diff options
author | James Morris <jmorris@namei.org> | 2011-03-07 18:55:06 -0500 |
---|---|---|
committer | James Morris <jmorris@namei.org> | 2011-03-07 18:55:06 -0500 |
commit | 1cc26bada9f6807814806db2f0d78792eecdac71 (patch) | |
tree | 5509b5139db04af6c13db0a580c84116a4a54039 /drivers/net | |
parent | eae61f3c829439f8f9121b5cd48a14be04df451f (diff) | |
parent | 214d93b02c4fe93638ad268613c9702a81ed9192 (diff) |
Merge branch 'master'; commit 'v2.6.38-rc7' into next
Diffstat (limited to 'drivers/net')
215 files changed, 7100 insertions, 3130 deletions
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index 3fda24a28d2f..03823327db25 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig | |||
@@ -1944,19 +1944,12 @@ config 68360_ENET | |||
1944 | config FEC | 1944 | config FEC |
1945 | bool "FEC ethernet controller (of ColdFire and some i.MX CPUs)" | 1945 | bool "FEC ethernet controller (of ColdFire and some i.MX CPUs)" |
1946 | depends on M523x || M527x || M5272 || M528x || M520x || M532x || \ | 1946 | depends on M523x || M527x || M5272 || M528x || M520x || M532x || \ |
1947 | MACH_MX27 || ARCH_MX35 || ARCH_MX25 || ARCH_MX5 | 1947 | MACH_MX27 || ARCH_MX35 || ARCH_MX25 || ARCH_MX5 || SOC_IMX28 |
1948 | select PHYLIB | 1948 | select PHYLIB |
1949 | help | 1949 | help |
1950 | Say Y here if you want to use the built-in 10/100 Fast ethernet | 1950 | Say Y here if you want to use the built-in 10/100 Fast ethernet |
1951 | controller on some Motorola ColdFire and Freescale i.MX processors. | 1951 | controller on some Motorola ColdFire and Freescale i.MX processors. |
1952 | 1952 | ||
1953 | config FEC2 | ||
1954 | bool "Second FEC ethernet controller (on some ColdFire CPUs)" | ||
1955 | depends on FEC | ||
1956 | help | ||
1957 | Say Y here if you want to use the second built-in 10/100 Fast | ||
1958 | ethernet controller on some Motorola ColdFire processors. | ||
1959 | |||
1960 | config FEC_MPC52xx | 1953 | config FEC_MPC52xx |
1961 | tristate "MPC52xx FEC driver" | 1954 | tristate "MPC52xx FEC driver" |
1962 | depends on PPC_MPC52xx && PPC_BESTCOMM | 1955 | depends on PPC_MPC52xx && PPC_BESTCOMM |
@@ -2871,7 +2864,7 @@ config MLX4_CORE | |||
2871 | default n | 2864 | default n |
2872 | 2865 | ||
2873 | config MLX4_DEBUG | 2866 | config MLX4_DEBUG |
2874 | bool "Verbose debugging output" if (MLX4_CORE && EMBEDDED) | 2867 | bool "Verbose debugging output" if (MLX4_CORE && EXPERT) |
2875 | depends on MLX4_CORE | 2868 | depends on MLX4_CORE |
2876 | default y | 2869 | default y |
2877 | ---help--- | 2870 | ---help--- |
@@ -2970,6 +2963,7 @@ config TILE_NET | |||
2970 | config XEN_NETDEV_FRONTEND | 2963 | config XEN_NETDEV_FRONTEND |
2971 | tristate "Xen network device frontend driver" | 2964 | tristate "Xen network device frontend driver" |
2972 | depends on XEN | 2965 | depends on XEN |
2966 | select XEN_XENBUS_FRONTEND | ||
2973 | default y | 2967 | default y |
2974 | help | 2968 | help |
2975 | The network device frontend driver allows the kernel to | 2969 | The network device frontend driver allows the kernel to |
@@ -3395,8 +3389,7 @@ config NETCONSOLE | |||
3395 | 3389 | ||
3396 | config NETCONSOLE_DYNAMIC | 3390 | config NETCONSOLE_DYNAMIC |
3397 | bool "Dynamic reconfiguration of logging targets" | 3391 | bool "Dynamic reconfiguration of logging targets" |
3398 | depends on NETCONSOLE && SYSFS | 3392 | depends on NETCONSOLE && SYSFS && CONFIGFS_FS |
3399 | select CONFIGFS_FS | ||
3400 | help | 3393 | help |
3401 | This option enables the ability to dynamically reconfigure target | 3394 | This option enables the ability to dynamically reconfigure target |
3402 | parameters (interface, IP addresses, port numbers, MAC addresses) | 3395 | parameters (interface, IP addresses, port numbers, MAC addresses) |
diff --git a/drivers/net/arm/ks8695net.c b/drivers/net/arm/ks8695net.c index 54c6d849cf25..aa07657744c3 100644 --- a/drivers/net/arm/ks8695net.c +++ b/drivers/net/arm/ks8695net.c | |||
@@ -854,12 +854,12 @@ ks8695_set_msglevel(struct net_device *ndev, u32 value) | |||
854 | } | 854 | } |
855 | 855 | ||
856 | /** | 856 | /** |
857 | * ks8695_get_settings - Get device-specific settings. | 857 | * ks8695_wan_get_settings - Get device-specific settings. |
858 | * @ndev: The network device to read settings from | 858 | * @ndev: The network device to read settings from |
859 | * @cmd: The ethtool structure to read into | 859 | * @cmd: The ethtool structure to read into |
860 | */ | 860 | */ |
861 | static int | 861 | static int |
862 | ks8695_get_settings(struct net_device *ndev, struct ethtool_cmd *cmd) | 862 | ks8695_wan_get_settings(struct net_device *ndev, struct ethtool_cmd *cmd) |
863 | { | 863 | { |
864 | struct ks8695_priv *ksp = netdev_priv(ndev); | 864 | struct ks8695_priv *ksp = netdev_priv(ndev); |
865 | u32 ctrl; | 865 | u32 ctrl; |
@@ -870,69 +870,50 @@ ks8695_get_settings(struct net_device *ndev, struct ethtool_cmd *cmd) | |||
870 | SUPPORTED_TP | SUPPORTED_MII); | 870 | SUPPORTED_TP | SUPPORTED_MII); |
871 | cmd->transceiver = XCVR_INTERNAL; | 871 | cmd->transceiver = XCVR_INTERNAL; |
872 | 872 | ||
873 | /* Port specific extras */ | 873 | cmd->advertising = ADVERTISED_TP | ADVERTISED_MII; |
874 | switch (ksp->dtype) { | 874 | cmd->port = PORT_MII; |
875 | case KS8695_DTYPE_HPNA: | 875 | cmd->supported |= (SUPPORTED_Autoneg | SUPPORTED_Pause); |
876 | cmd->phy_address = 0; | 876 | cmd->phy_address = 0; |
877 | /* not supported for HPNA */ | ||
878 | cmd->autoneg = AUTONEG_DISABLE; | ||
879 | 877 | ||
880 | /* BUG: Erm, dtype hpna implies no phy regs */ | 878 | ctrl = readl(ksp->phyiface_regs + KS8695_WMC); |
881 | /* | 879 | if ((ctrl & WMC_WAND) == 0) { |
882 | ctrl = readl(KS8695_MISC_VA + KS8695_HMC); | 880 | /* auto-negotiation is enabled */ |
883 | cmd->speed = (ctrl & HMC_HSS) ? SPEED_100 : SPEED_10; | 881 | cmd->advertising |= ADVERTISED_Autoneg; |
884 | cmd->duplex = (ctrl & HMC_HDS) ? DUPLEX_FULL : DUPLEX_HALF; | 882 | if (ctrl & WMC_WANA100F) |
885 | */ | 883 | cmd->advertising |= ADVERTISED_100baseT_Full; |
886 | return -EOPNOTSUPP; | 884 | if (ctrl & WMC_WANA100H) |
887 | case KS8695_DTYPE_WAN: | 885 | cmd->advertising |= ADVERTISED_100baseT_Half; |
888 | cmd->advertising = ADVERTISED_TP | ADVERTISED_MII; | 886 | if (ctrl & WMC_WANA10F) |
889 | cmd->port = PORT_MII; | 887 | cmd->advertising |= ADVERTISED_10baseT_Full; |
890 | cmd->supported |= (SUPPORTED_Autoneg | SUPPORTED_Pause); | 888 | if (ctrl & WMC_WANA10H) |
891 | cmd->phy_address = 0; | 889 | cmd->advertising |= ADVERTISED_10baseT_Half; |
890 | if (ctrl & WMC_WANAP) | ||
891 | cmd->advertising |= ADVERTISED_Pause; | ||
892 | cmd->autoneg = AUTONEG_ENABLE; | ||
893 | |||
894 | cmd->speed = (ctrl & WMC_WSS) ? SPEED_100 : SPEED_10; | ||
895 | cmd->duplex = (ctrl & WMC_WDS) ? | ||
896 | DUPLEX_FULL : DUPLEX_HALF; | ||
897 | } else { | ||
898 | /* auto-negotiation is disabled */ | ||
899 | cmd->autoneg = AUTONEG_DISABLE; | ||
892 | 900 | ||
893 | ctrl = readl(ksp->phyiface_regs + KS8695_WMC); | 901 | cmd->speed = (ctrl & WMC_WANF100) ? |
894 | if ((ctrl & WMC_WAND) == 0) { | 902 | SPEED_100 : SPEED_10; |
895 | /* auto-negotiation is enabled */ | 903 | cmd->duplex = (ctrl & WMC_WANFF) ? |
896 | cmd->advertising |= ADVERTISED_Autoneg; | 904 | DUPLEX_FULL : DUPLEX_HALF; |
897 | if (ctrl & WMC_WANA100F) | ||
898 | cmd->advertising |= ADVERTISED_100baseT_Full; | ||
899 | if (ctrl & WMC_WANA100H) | ||
900 | cmd->advertising |= ADVERTISED_100baseT_Half; | ||
901 | if (ctrl & WMC_WANA10F) | ||
902 | cmd->advertising |= ADVERTISED_10baseT_Full; | ||
903 | if (ctrl & WMC_WANA10H) | ||
904 | cmd->advertising |= ADVERTISED_10baseT_Half; | ||
905 | if (ctrl & WMC_WANAP) | ||
906 | cmd->advertising |= ADVERTISED_Pause; | ||
907 | cmd->autoneg = AUTONEG_ENABLE; | ||
908 | |||
909 | cmd->speed = (ctrl & WMC_WSS) ? SPEED_100 : SPEED_10; | ||
910 | cmd->duplex = (ctrl & WMC_WDS) ? | ||
911 | DUPLEX_FULL : DUPLEX_HALF; | ||
912 | } else { | ||
913 | /* auto-negotiation is disabled */ | ||
914 | cmd->autoneg = AUTONEG_DISABLE; | ||
915 | |||
916 | cmd->speed = (ctrl & WMC_WANF100) ? | ||
917 | SPEED_100 : SPEED_10; | ||
918 | cmd->duplex = (ctrl & WMC_WANFF) ? | ||
919 | DUPLEX_FULL : DUPLEX_HALF; | ||
920 | } | ||
921 | break; | ||
922 | case KS8695_DTYPE_LAN: | ||
923 | return -EOPNOTSUPP; | ||
924 | } | 905 | } |
925 | 906 | ||
926 | return 0; | 907 | return 0; |
927 | } | 908 | } |
928 | 909 | ||
929 | /** | 910 | /** |
930 | * ks8695_set_settings - Set device-specific settings. | 911 | * ks8695_wan_set_settings - Set device-specific settings. |
931 | * @ndev: The network device to configure | 912 | * @ndev: The network device to configure |
932 | * @cmd: The settings to configure | 913 | * @cmd: The settings to configure |
933 | */ | 914 | */ |
934 | static int | 915 | static int |
935 | ks8695_set_settings(struct net_device *ndev, struct ethtool_cmd *cmd) | 916 | ks8695_wan_set_settings(struct net_device *ndev, struct ethtool_cmd *cmd) |
936 | { | 917 | { |
937 | struct ks8695_priv *ksp = netdev_priv(ndev); | 918 | struct ks8695_priv *ksp = netdev_priv(ndev); |
938 | u32 ctrl; | 919 | u32 ctrl; |
@@ -956,171 +937,85 @@ ks8695_set_settings(struct net_device *ndev, struct ethtool_cmd *cmd) | |||
956 | ADVERTISED_100baseT_Full)) == 0) | 937 | ADVERTISED_100baseT_Full)) == 0) |
957 | return -EINVAL; | 938 | return -EINVAL; |
958 | 939 | ||
959 | switch (ksp->dtype) { | 940 | ctrl = readl(ksp->phyiface_regs + KS8695_WMC); |
960 | case KS8695_DTYPE_HPNA: | ||
961 | /* HPNA does not support auto-negotiation. */ | ||
962 | return -EINVAL; | ||
963 | case KS8695_DTYPE_WAN: | ||
964 | ctrl = readl(ksp->phyiface_regs + KS8695_WMC); | ||
965 | |||
966 | ctrl &= ~(WMC_WAND | WMC_WANA100F | WMC_WANA100H | | ||
967 | WMC_WANA10F | WMC_WANA10H); | ||
968 | if (cmd->advertising & ADVERTISED_100baseT_Full) | ||
969 | ctrl |= WMC_WANA100F; | ||
970 | if (cmd->advertising & ADVERTISED_100baseT_Half) | ||
971 | ctrl |= WMC_WANA100H; | ||
972 | if (cmd->advertising & ADVERTISED_10baseT_Full) | ||
973 | ctrl |= WMC_WANA10F; | ||
974 | if (cmd->advertising & ADVERTISED_10baseT_Half) | ||
975 | ctrl |= WMC_WANA10H; | ||
976 | |||
977 | /* force a re-negotiation */ | ||
978 | ctrl |= WMC_WANR; | ||
979 | writel(ctrl, ksp->phyiface_regs + KS8695_WMC); | ||
980 | break; | ||
981 | case KS8695_DTYPE_LAN: | ||
982 | return -EOPNOTSUPP; | ||
983 | } | ||
984 | 941 | ||
942 | ctrl &= ~(WMC_WAND | WMC_WANA100F | WMC_WANA100H | | ||
943 | WMC_WANA10F | WMC_WANA10H); | ||
944 | if (cmd->advertising & ADVERTISED_100baseT_Full) | ||
945 | ctrl |= WMC_WANA100F; | ||
946 | if (cmd->advertising & ADVERTISED_100baseT_Half) | ||
947 | ctrl |= WMC_WANA100H; | ||
948 | if (cmd->advertising & ADVERTISED_10baseT_Full) | ||
949 | ctrl |= WMC_WANA10F; | ||
950 | if (cmd->advertising & ADVERTISED_10baseT_Half) | ||
951 | ctrl |= WMC_WANA10H; | ||
952 | |||
953 | /* force a re-negotiation */ | ||
954 | ctrl |= WMC_WANR; | ||
955 | writel(ctrl, ksp->phyiface_regs + KS8695_WMC); | ||
985 | } else { | 956 | } else { |
986 | switch (ksp->dtype) { | 957 | ctrl = readl(ksp->phyiface_regs + KS8695_WMC); |
987 | case KS8695_DTYPE_HPNA: | 958 | |
988 | /* BUG: dtype_hpna implies no phy registers */ | 959 | /* disable auto-negotiation */ |
989 | /* | 960 | ctrl |= WMC_WAND; |
990 | ctrl = __raw_readl(KS8695_MISC_VA + KS8695_HMC); | 961 | ctrl &= ~(WMC_WANF100 | WMC_WANFF); |
991 | 962 | ||
992 | ctrl &= ~(HMC_HSS | HMC_HDS); | 963 | if (cmd->speed == SPEED_100) |
993 | if (cmd->speed == SPEED_100) | 964 | ctrl |= WMC_WANF100; |
994 | ctrl |= HMC_HSS; | 965 | if (cmd->duplex == DUPLEX_FULL) |
995 | if (cmd->duplex == DUPLEX_FULL) | 966 | ctrl |= WMC_WANFF; |
996 | ctrl |= HMC_HDS; | 967 | |
997 | 968 | writel(ctrl, ksp->phyiface_regs + KS8695_WMC); | |
998 | __raw_writel(ctrl, KS8695_MISC_VA + KS8695_HMC); | ||
999 | */ | ||
1000 | return -EOPNOTSUPP; | ||
1001 | case KS8695_DTYPE_WAN: | ||
1002 | ctrl = readl(ksp->phyiface_regs + KS8695_WMC); | ||
1003 | |||
1004 | /* disable auto-negotiation */ | ||
1005 | ctrl |= WMC_WAND; | ||
1006 | ctrl &= ~(WMC_WANF100 | WMC_WANFF); | ||
1007 | |||
1008 | if (cmd->speed == SPEED_100) | ||
1009 | ctrl |= WMC_WANF100; | ||
1010 | if (cmd->duplex == DUPLEX_FULL) | ||
1011 | ctrl |= WMC_WANFF; | ||
1012 | |||
1013 | writel(ctrl, ksp->phyiface_regs + KS8695_WMC); | ||
1014 | break; | ||
1015 | case KS8695_DTYPE_LAN: | ||
1016 | return -EOPNOTSUPP; | ||
1017 | } | ||
1018 | } | 969 | } |
1019 | 970 | ||
1020 | return 0; | 971 | return 0; |
1021 | } | 972 | } |
1022 | 973 | ||
1023 | /** | 974 | /** |
1024 | * ks8695_nwayreset - Restart the autonegotiation on the port. | 975 | * ks8695_wan_nwayreset - Restart the autonegotiation on the port. |
1025 | * @ndev: The network device to restart autoneotiation on | 976 | * @ndev: The network device to restart autoneotiation on |
1026 | */ | 977 | */ |
1027 | static int | 978 | static int |
1028 | ks8695_nwayreset(struct net_device *ndev) | 979 | ks8695_wan_nwayreset(struct net_device *ndev) |
1029 | { | 980 | { |
1030 | struct ks8695_priv *ksp = netdev_priv(ndev); | 981 | struct ks8695_priv *ksp = netdev_priv(ndev); |
1031 | u32 ctrl; | 982 | u32 ctrl; |
1032 | 983 | ||
1033 | switch (ksp->dtype) { | 984 | ctrl = readl(ksp->phyiface_regs + KS8695_WMC); |
1034 | case KS8695_DTYPE_HPNA: | ||
1035 | /* No phy means no autonegotiation on hpna */ | ||
1036 | return -EINVAL; | ||
1037 | case KS8695_DTYPE_WAN: | ||
1038 | ctrl = readl(ksp->phyiface_regs + KS8695_WMC); | ||
1039 | |||
1040 | if ((ctrl & WMC_WAND) == 0) | ||
1041 | writel(ctrl | WMC_WANR, | ||
1042 | ksp->phyiface_regs + KS8695_WMC); | ||
1043 | else | ||
1044 | /* auto-negotiation not enabled */ | ||
1045 | return -EINVAL; | ||
1046 | break; | ||
1047 | case KS8695_DTYPE_LAN: | ||
1048 | return -EOPNOTSUPP; | ||
1049 | } | ||
1050 | |||
1051 | return 0; | ||
1052 | } | ||
1053 | 985 | ||
1054 | /** | 986 | if ((ctrl & WMC_WAND) == 0) |
1055 | * ks8695_get_link - Retrieve link status of network interface | 987 | writel(ctrl | WMC_WANR, |
1056 | * @ndev: The network interface to retrive the link status of. | 988 | ksp->phyiface_regs + KS8695_WMC); |
1057 | */ | 989 | else |
1058 | static u32 | 990 | /* auto-negotiation not enabled */ |
1059 | ks8695_get_link(struct net_device *ndev) | 991 | return -EINVAL; |
1060 | { | ||
1061 | struct ks8695_priv *ksp = netdev_priv(ndev); | ||
1062 | u32 ctrl; | ||
1063 | 992 | ||
1064 | switch (ksp->dtype) { | ||
1065 | case KS8695_DTYPE_HPNA: | ||
1066 | /* HPNA always has link */ | ||
1067 | return 1; | ||
1068 | case KS8695_DTYPE_WAN: | ||
1069 | /* WAN we can read the PHY for */ | ||
1070 | ctrl = readl(ksp->phyiface_regs + KS8695_WMC); | ||
1071 | return ctrl & WMC_WLS; | ||
1072 | case KS8695_DTYPE_LAN: | ||
1073 | return -EOPNOTSUPP; | ||
1074 | } | ||
1075 | return 0; | 993 | return 0; |
1076 | } | 994 | } |
1077 | 995 | ||
1078 | /** | 996 | /** |
1079 | * ks8695_get_pause - Retrieve network pause/flow-control advertising | 997 | * ks8695_wan_get_pause - Retrieve network pause/flow-control advertising |
1080 | * @ndev: The device to retrieve settings from | 998 | * @ndev: The device to retrieve settings from |
1081 | * @param: The structure to fill out with the information | 999 | * @param: The structure to fill out with the information |
1082 | */ | 1000 | */ |
1083 | static void | 1001 | static void |
1084 | ks8695_get_pause(struct net_device *ndev, struct ethtool_pauseparam *param) | 1002 | ks8695_wan_get_pause(struct net_device *ndev, struct ethtool_pauseparam *param) |
1085 | { | 1003 | { |
1086 | struct ks8695_priv *ksp = netdev_priv(ndev); | 1004 | struct ks8695_priv *ksp = netdev_priv(ndev); |
1087 | u32 ctrl; | 1005 | u32 ctrl; |
1088 | 1006 | ||
1089 | switch (ksp->dtype) { | 1007 | ctrl = readl(ksp->phyiface_regs + KS8695_WMC); |
1090 | case KS8695_DTYPE_HPNA: | ||
1091 | /* No phy link on hpna to configure */ | ||
1092 | return; | ||
1093 | case KS8695_DTYPE_WAN: | ||
1094 | ctrl = readl(ksp->phyiface_regs + KS8695_WMC); | ||
1095 | |||
1096 | /* advertise Pause */ | ||
1097 | param->autoneg = (ctrl & WMC_WANAP); | ||
1098 | 1008 | ||
1099 | /* current Rx Flow-control */ | 1009 | /* advertise Pause */ |
1100 | ctrl = ks8695_readreg(ksp, KS8695_DRXC); | 1010 | param->autoneg = (ctrl & WMC_WANAP); |
1101 | param->rx_pause = (ctrl & DRXC_RFCE); | ||
1102 | 1011 | ||
1103 | /* current Tx Flow-control */ | 1012 | /* current Rx Flow-control */ |
1104 | ctrl = ks8695_readreg(ksp, KS8695_DTXC); | 1013 | ctrl = ks8695_readreg(ksp, KS8695_DRXC); |
1105 | param->tx_pause = (ctrl & DTXC_TFCE); | 1014 | param->rx_pause = (ctrl & DRXC_RFCE); |
1106 | break; | ||
1107 | case KS8695_DTYPE_LAN: | ||
1108 | /* The LAN's "phy" is a direct-attached switch */ | ||
1109 | return; | ||
1110 | } | ||
1111 | } | ||
1112 | 1015 | ||
1113 | /** | 1016 | /* current Tx Flow-control */ |
1114 | * ks8695_set_pause - Configure pause/flow-control | 1017 | ctrl = ks8695_readreg(ksp, KS8695_DTXC); |
1115 | * @ndev: The device to configure | 1018 | param->tx_pause = (ctrl & DTXC_TFCE); |
1116 | * @param: The pause parameters to set | ||
1117 | * | ||
1118 | * TODO: Implement this | ||
1119 | */ | ||
1120 | static int | ||
1121 | ks8695_set_pause(struct net_device *ndev, struct ethtool_pauseparam *param) | ||
1122 | { | ||
1123 | return -EOPNOTSUPP; | ||
1124 | } | 1019 | } |
1125 | 1020 | ||
1126 | /** | 1021 | /** |
@@ -1140,12 +1035,17 @@ ks8695_get_drvinfo(struct net_device *ndev, struct ethtool_drvinfo *info) | |||
1140 | static const struct ethtool_ops ks8695_ethtool_ops = { | 1035 | static const struct ethtool_ops ks8695_ethtool_ops = { |
1141 | .get_msglevel = ks8695_get_msglevel, | 1036 | .get_msglevel = ks8695_get_msglevel, |
1142 | .set_msglevel = ks8695_set_msglevel, | 1037 | .set_msglevel = ks8695_set_msglevel, |
1143 | .get_settings = ks8695_get_settings, | 1038 | .get_drvinfo = ks8695_get_drvinfo, |
1144 | .set_settings = ks8695_set_settings, | 1039 | }; |
1145 | .nway_reset = ks8695_nwayreset, | 1040 | |
1146 | .get_link = ks8695_get_link, | 1041 | static const struct ethtool_ops ks8695_wan_ethtool_ops = { |
1147 | .get_pauseparam = ks8695_get_pause, | 1042 | .get_msglevel = ks8695_get_msglevel, |
1148 | .set_pauseparam = ks8695_set_pause, | 1043 | .set_msglevel = ks8695_set_msglevel, |
1044 | .get_settings = ks8695_wan_get_settings, | ||
1045 | .set_settings = ks8695_wan_set_settings, | ||
1046 | .nway_reset = ks8695_wan_nwayreset, | ||
1047 | .get_link = ethtool_op_get_link, | ||
1048 | .get_pauseparam = ks8695_wan_get_pause, | ||
1149 | .get_drvinfo = ks8695_get_drvinfo, | 1049 | .get_drvinfo = ks8695_get_drvinfo, |
1150 | }; | 1050 | }; |
1151 | 1051 | ||
@@ -1541,7 +1441,6 @@ ks8695_probe(struct platform_device *pdev) | |||
1541 | 1441 | ||
1542 | /* driver system setup */ | 1442 | /* driver system setup */ |
1543 | ndev->netdev_ops = &ks8695_netdev_ops; | 1443 | ndev->netdev_ops = &ks8695_netdev_ops; |
1544 | SET_ETHTOOL_OPS(ndev, &ks8695_ethtool_ops); | ||
1545 | ndev->watchdog_timeo = msecs_to_jiffies(watchdog); | 1444 | ndev->watchdog_timeo = msecs_to_jiffies(watchdog); |
1546 | 1445 | ||
1547 | netif_napi_add(ndev, &ksp->napi, ks8695_poll, NAPI_WEIGHT); | 1446 | netif_napi_add(ndev, &ksp->napi, ks8695_poll, NAPI_WEIGHT); |
@@ -1608,12 +1507,15 @@ ks8695_probe(struct platform_device *pdev) | |||
1608 | if (ksp->phyiface_regs && ksp->link_irq == -1) { | 1507 | if (ksp->phyiface_regs && ksp->link_irq == -1) { |
1609 | ks8695_init_switch(ksp); | 1508 | ks8695_init_switch(ksp); |
1610 | ksp->dtype = KS8695_DTYPE_LAN; | 1509 | ksp->dtype = KS8695_DTYPE_LAN; |
1510 | SET_ETHTOOL_OPS(ndev, &ks8695_ethtool_ops); | ||
1611 | } else if (ksp->phyiface_regs && ksp->link_irq != -1) { | 1511 | } else if (ksp->phyiface_regs && ksp->link_irq != -1) { |
1612 | ks8695_init_wan_phy(ksp); | 1512 | ks8695_init_wan_phy(ksp); |
1613 | ksp->dtype = KS8695_DTYPE_WAN; | 1513 | ksp->dtype = KS8695_DTYPE_WAN; |
1514 | SET_ETHTOOL_OPS(ndev, &ks8695_wan_ethtool_ops); | ||
1614 | } else { | 1515 | } else { |
1615 | /* No initialisation since HPNA does not have a PHY */ | 1516 | /* No initialisation since HPNA does not have a PHY */ |
1616 | ksp->dtype = KS8695_DTYPE_HPNA; | 1517 | ksp->dtype = KS8695_DTYPE_HPNA; |
1518 | SET_ETHTOOL_OPS(ndev, &ks8695_ethtool_ops); | ||
1617 | } | 1519 | } |
1618 | 1520 | ||
1619 | /* And bring up the net_device with the net core */ | 1521 | /* And bring up the net_device with the net core */ |
@@ -1742,7 +1644,7 @@ ks8695_cleanup(void) | |||
1742 | module_init(ks8695_init); | 1644 | module_init(ks8695_init); |
1743 | module_exit(ks8695_cleanup); | 1645 | module_exit(ks8695_cleanup); |
1744 | 1646 | ||
1745 | MODULE_AUTHOR("Simtec Electronics") | 1647 | MODULE_AUTHOR("Simtec Electronics"); |
1746 | MODULE_DESCRIPTION("Micrel KS8695 (Centaur) Ethernet driver"); | 1648 | MODULE_DESCRIPTION("Micrel KS8695 (Centaur) Ethernet driver"); |
1747 | MODULE_LICENSE("GPL"); | 1649 | MODULE_LICENSE("GPL"); |
1748 | MODULE_ALIAS("platform:" MODULENAME); | 1650 | MODULE_ALIAS("platform:" MODULENAME); |
diff --git a/drivers/net/atl1c/atl1c_main.c b/drivers/net/atl1c/atl1c_main.c index a699bbf20eb5..3824382faecc 100644 --- a/drivers/net/atl1c/atl1c_main.c +++ b/drivers/net/atl1c/atl1c_main.c | |||
@@ -48,6 +48,7 @@ static DEFINE_PCI_DEVICE_TABLE(atl1c_pci_tbl) = { | |||
48 | {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATHEROS_L2C_B)}, | 48 | {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATHEROS_L2C_B)}, |
49 | {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATHEROS_L2C_B2)}, | 49 | {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATHEROS_L2C_B2)}, |
50 | {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATHEROS_L1D)}, | 50 | {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATHEROS_L1D)}, |
51 | {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATHEROS_L1D_2_0)}, | ||
51 | /* required last entry */ | 52 | /* required last entry */ |
52 | { 0 } | 53 | { 0 } |
53 | }; | 54 | }; |
diff --git a/drivers/net/benet/be_cmds.c b/drivers/net/benet/be_cmds.c index 0c7811faf72c..a179cc6d79f2 100644 --- a/drivers/net/benet/be_cmds.c +++ b/drivers/net/benet/be_cmds.c | |||
@@ -1786,6 +1786,10 @@ int be_cmd_get_seeprom_data(struct be_adapter *adapter, | |||
1786 | spin_lock_bh(&adapter->mcc_lock); | 1786 | spin_lock_bh(&adapter->mcc_lock); |
1787 | 1787 | ||
1788 | wrb = wrb_from_mccq(adapter); | 1788 | wrb = wrb_from_mccq(adapter); |
1789 | if (!wrb) { | ||
1790 | status = -EBUSY; | ||
1791 | goto err; | ||
1792 | } | ||
1789 | req = nonemb_cmd->va; | 1793 | req = nonemb_cmd->va; |
1790 | sge = nonembedded_sgl(wrb); | 1794 | sge = nonembedded_sgl(wrb); |
1791 | 1795 | ||
@@ -1801,6 +1805,7 @@ int be_cmd_get_seeprom_data(struct be_adapter *adapter, | |||
1801 | 1805 | ||
1802 | status = be_mcc_notify_wait(adapter); | 1806 | status = be_mcc_notify_wait(adapter); |
1803 | 1807 | ||
1808 | err: | ||
1804 | spin_unlock_bh(&adapter->mcc_lock); | 1809 | spin_unlock_bh(&adapter->mcc_lock); |
1805 | return status; | 1810 | return status; |
1806 | } | 1811 | } |
diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c index de40d3b7152f..28a32a6c8bf1 100644 --- a/drivers/net/benet/be_main.c +++ b/drivers/net/benet/be_main.c | |||
@@ -312,11 +312,9 @@ void be_link_status_update(struct be_adapter *adapter, bool link_up) | |||
312 | if (adapter->link_up != link_up) { | 312 | if (adapter->link_up != link_up) { |
313 | adapter->link_speed = -1; | 313 | adapter->link_speed = -1; |
314 | if (link_up) { | 314 | if (link_up) { |
315 | netif_start_queue(netdev); | ||
316 | netif_carrier_on(netdev); | 315 | netif_carrier_on(netdev); |
317 | printk(KERN_INFO "%s: Link up\n", netdev->name); | 316 | printk(KERN_INFO "%s: Link up\n", netdev->name); |
318 | } else { | 317 | } else { |
319 | netif_stop_queue(netdev); | ||
320 | netif_carrier_off(netdev); | 318 | netif_carrier_off(netdev); |
321 | printk(KERN_INFO "%s: Link down\n", netdev->name); | 319 | printk(KERN_INFO "%s: Link down\n", netdev->name); |
322 | } | 320 | } |
@@ -2628,8 +2626,6 @@ static void be_netdev_init(struct net_device *netdev) | |||
2628 | 2626 | ||
2629 | netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc, | 2627 | netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc, |
2630 | BE_NAPI_WEIGHT); | 2628 | BE_NAPI_WEIGHT); |
2631 | |||
2632 | netif_stop_queue(netdev); | ||
2633 | } | 2629 | } |
2634 | 2630 | ||
2635 | static void be_unmap_pci_bars(struct be_adapter *adapter) | 2631 | static void be_unmap_pci_bars(struct be_adapter *adapter) |
diff --git a/drivers/net/bfin_mac.c b/drivers/net/bfin_mac.c index ce1e5e9d06f6..22abfb39d813 100644 --- a/drivers/net/bfin_mac.c +++ b/drivers/net/bfin_mac.c | |||
@@ -8,6 +8,11 @@ | |||
8 | * Licensed under the GPL-2 or later. | 8 | * Licensed under the GPL-2 or later. |
9 | */ | 9 | */ |
10 | 10 | ||
11 | #define DRV_VERSION "1.1" | ||
12 | #define DRV_DESC "Blackfin on-chip Ethernet MAC driver" | ||
13 | |||
14 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
15 | |||
11 | #include <linux/init.h> | 16 | #include <linux/init.h> |
12 | #include <linux/module.h> | 17 | #include <linux/module.h> |
13 | #include <linux/kernel.h> | 18 | #include <linux/kernel.h> |
@@ -41,12 +46,7 @@ | |||
41 | 46 | ||
42 | #include "bfin_mac.h" | 47 | #include "bfin_mac.h" |
43 | 48 | ||
44 | #define DRV_NAME "bfin_mac" | 49 | MODULE_AUTHOR("Bryan Wu, Luke Yang"); |
45 | #define DRV_VERSION "1.1" | ||
46 | #define DRV_AUTHOR "Bryan Wu, Luke Yang" | ||
47 | #define DRV_DESC "Blackfin on-chip Ethernet MAC driver" | ||
48 | |||
49 | MODULE_AUTHOR(DRV_AUTHOR); | ||
50 | MODULE_LICENSE("GPL"); | 50 | MODULE_LICENSE("GPL"); |
51 | MODULE_DESCRIPTION(DRV_DESC); | 51 | MODULE_DESCRIPTION(DRV_DESC); |
52 | MODULE_ALIAS("platform:bfin_mac"); | 52 | MODULE_ALIAS("platform:bfin_mac"); |
@@ -189,8 +189,7 @@ static int desc_list_init(void) | |||
189 | /* allocate a new skb for next time receive */ | 189 | /* allocate a new skb for next time receive */ |
190 | new_skb = dev_alloc_skb(PKT_BUF_SZ + NET_IP_ALIGN); | 190 | new_skb = dev_alloc_skb(PKT_BUF_SZ + NET_IP_ALIGN); |
191 | if (!new_skb) { | 191 | if (!new_skb) { |
192 | printk(KERN_NOTICE DRV_NAME | 192 | pr_notice("init: low on mem - packet dropped\n"); |
193 | ": init: low on mem - packet dropped\n"); | ||
194 | goto init_error; | 193 | goto init_error; |
195 | } | 194 | } |
196 | skb_reserve(new_skb, NET_IP_ALIGN); | 195 | skb_reserve(new_skb, NET_IP_ALIGN); |
@@ -240,7 +239,7 @@ static int desc_list_init(void) | |||
240 | 239 | ||
241 | init_error: | 240 | init_error: |
242 | desc_list_free(); | 241 | desc_list_free(); |
243 | printk(KERN_ERR DRV_NAME ": kmalloc failed\n"); | 242 | pr_err("kmalloc failed\n"); |
244 | return -ENOMEM; | 243 | return -ENOMEM; |
245 | } | 244 | } |
246 | 245 | ||
@@ -259,8 +258,7 @@ static int bfin_mdio_poll(void) | |||
259 | while ((bfin_read_EMAC_STAADD()) & STABUSY) { | 258 | while ((bfin_read_EMAC_STAADD()) & STABUSY) { |
260 | udelay(1); | 259 | udelay(1); |
261 | if (timeout_cnt-- < 0) { | 260 | if (timeout_cnt-- < 0) { |
262 | printk(KERN_ERR DRV_NAME | 261 | pr_err("wait MDC/MDIO transaction to complete timeout\n"); |
263 | ": wait MDC/MDIO transaction to complete timeout\n"); | ||
264 | return -ETIMEDOUT; | 262 | return -ETIMEDOUT; |
265 | } | 263 | } |
266 | } | 264 | } |
@@ -350,9 +348,9 @@ static void bfin_mac_adjust_link(struct net_device *dev) | |||
350 | opmode &= ~RMII_10; | 348 | opmode &= ~RMII_10; |
351 | break; | 349 | break; |
352 | default: | 350 | default: |
353 | printk(KERN_WARNING | 351 | netdev_warn(dev, |
354 | "%s: Ack! Speed (%d) is not 10/100!\n", | 352 | "Ack! Speed (%d) is not 10/100!\n", |
355 | DRV_NAME, phydev->speed); | 353 | phydev->speed); |
356 | break; | 354 | break; |
357 | } | 355 | } |
358 | bfin_write_EMAC_OPMODE(opmode); | 356 | bfin_write_EMAC_OPMODE(opmode); |
@@ -417,14 +415,13 @@ static int mii_probe(struct net_device *dev, int phy_mode) | |||
417 | 415 | ||
418 | /* now we are supposed to have a proper phydev, to attach to... */ | 416 | /* now we are supposed to have a proper phydev, to attach to... */ |
419 | if (!phydev) { | 417 | if (!phydev) { |
420 | printk(KERN_INFO "%s: Don't found any phy device at all\n", | 418 | netdev_err(dev, "no phy device found\n"); |
421 | dev->name); | ||
422 | return -ENODEV; | 419 | return -ENODEV; |
423 | } | 420 | } |
424 | 421 | ||
425 | if (phy_mode != PHY_INTERFACE_MODE_RMII && | 422 | if (phy_mode != PHY_INTERFACE_MODE_RMII && |
426 | phy_mode != PHY_INTERFACE_MODE_MII) { | 423 | phy_mode != PHY_INTERFACE_MODE_MII) { |
427 | printk(KERN_INFO "%s: Invalid phy interface mode\n", dev->name); | 424 | netdev_err(dev, "invalid phy interface mode\n"); |
428 | return -EINVAL; | 425 | return -EINVAL; |
429 | } | 426 | } |
430 | 427 | ||
@@ -432,7 +429,7 @@ static int mii_probe(struct net_device *dev, int phy_mode) | |||
432 | 0, phy_mode); | 429 | 0, phy_mode); |
433 | 430 | ||
434 | if (IS_ERR(phydev)) { | 431 | if (IS_ERR(phydev)) { |
435 | printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name); | 432 | netdev_err(dev, "could not attach PHY\n"); |
436 | return PTR_ERR(phydev); | 433 | return PTR_ERR(phydev); |
437 | } | 434 | } |
438 | 435 | ||
@@ -453,11 +450,10 @@ static int mii_probe(struct net_device *dev, int phy_mode) | |||
453 | lp->old_duplex = -1; | 450 | lp->old_duplex = -1; |
454 | lp->phydev = phydev; | 451 | lp->phydev = phydev; |
455 | 452 | ||
456 | printk(KERN_INFO "%s: attached PHY driver [%s] " | 453 | pr_info("attached PHY driver [%s] " |
457 | "(mii_bus:phy_addr=%s, irq=%d, mdc_clk=%dHz(mdc_div=%d)" | 454 | "(mii_bus:phy_addr=%s, irq=%d, mdc_clk=%dHz(mdc_div=%d)@sclk=%dMHz)\n", |
458 | "@sclk=%dMHz)\n", | 455 | phydev->drv->name, dev_name(&phydev->dev), phydev->irq, |
459 | DRV_NAME, phydev->drv->name, dev_name(&phydev->dev), phydev->irq, | 456 | MDC_CLK, mdc_div, sclk/1000000); |
460 | MDC_CLK, mdc_div, sclk/1000000); | ||
461 | 457 | ||
462 | return 0; | 458 | return 0; |
463 | } | 459 | } |
@@ -502,7 +498,7 @@ bfin_mac_ethtool_setsettings(struct net_device *dev, struct ethtool_cmd *cmd) | |||
502 | static void bfin_mac_ethtool_getdrvinfo(struct net_device *dev, | 498 | static void bfin_mac_ethtool_getdrvinfo(struct net_device *dev, |
503 | struct ethtool_drvinfo *info) | 499 | struct ethtool_drvinfo *info) |
504 | { | 500 | { |
505 | strcpy(info->driver, DRV_NAME); | 501 | strcpy(info->driver, KBUILD_MODNAME); |
506 | strcpy(info->version, DRV_VERSION); | 502 | strcpy(info->version, DRV_VERSION); |
507 | strcpy(info->fw_version, "N/A"); | 503 | strcpy(info->fw_version, "N/A"); |
508 | strcpy(info->bus_info, dev_name(&dev->dev)); | 504 | strcpy(info->bus_info, dev_name(&dev->dev)); |
@@ -562,7 +558,7 @@ static const struct ethtool_ops bfin_mac_ethtool_ops = { | |||
562 | }; | 558 | }; |
563 | 559 | ||
564 | /**************************************************************************/ | 560 | /**************************************************************************/ |
565 | void setup_system_regs(struct net_device *dev) | 561 | static void setup_system_regs(struct net_device *dev) |
566 | { | 562 | { |
567 | struct bfin_mac_local *lp = netdev_priv(dev); | 563 | struct bfin_mac_local *lp = netdev_priv(dev); |
568 | int i; | 564 | int i; |
@@ -592,6 +588,10 @@ void setup_system_regs(struct net_device *dev) | |||
592 | 588 | ||
593 | bfin_write_EMAC_MMC_CTL(RSTC | CROLL); | 589 | bfin_write_EMAC_MMC_CTL(RSTC | CROLL); |
594 | 590 | ||
591 | /* Set vlan regs to let 1522 bytes long packets pass through */ | ||
592 | bfin_write_EMAC_VLAN1(lp->vlan1_mask); | ||
593 | bfin_write_EMAC_VLAN2(lp->vlan2_mask); | ||
594 | |||
595 | /* Initialize the TX DMA channel registers */ | 595 | /* Initialize the TX DMA channel registers */ |
596 | bfin_write_DMA2_X_COUNT(0); | 596 | bfin_write_DMA2_X_COUNT(0); |
597 | bfin_write_DMA2_X_MODIFY(4); | 597 | bfin_write_DMA2_X_MODIFY(4); |
@@ -827,8 +827,7 @@ static void bfin_tx_hwtstamp(struct net_device *netdev, struct sk_buff *skb) | |||
827 | while ((!(bfin_read_EMAC_PTP_ISTAT() & TXTL)) && (--timeout_cnt)) | 827 | while ((!(bfin_read_EMAC_PTP_ISTAT() & TXTL)) && (--timeout_cnt)) |
828 | udelay(1); | 828 | udelay(1); |
829 | if (timeout_cnt == 0) | 829 | if (timeout_cnt == 0) |
830 | printk(KERN_ERR DRV_NAME | 830 | netdev_err(netdev, "timestamp the TX packet failed\n"); |
831 | ": fails to timestamp the TX packet\n"); | ||
832 | else { | 831 | else { |
833 | struct skb_shared_hwtstamps shhwtstamps; | 832 | struct skb_shared_hwtstamps shhwtstamps; |
834 | u64 ns; | 833 | u64 ns; |
@@ -1083,8 +1082,7 @@ static void bfin_mac_rx(struct net_device *dev) | |||
1083 | * we which case we simply drop the packet | 1082 | * we which case we simply drop the packet |
1084 | */ | 1083 | */ |
1085 | if (current_rx_ptr->status.status_word & RX_ERROR_MASK) { | 1084 | if (current_rx_ptr->status.status_word & RX_ERROR_MASK) { |
1086 | printk(KERN_NOTICE DRV_NAME | 1085 | netdev_notice(dev, "rx: receive error - packet dropped\n"); |
1087 | ": rx: receive error - packet dropped\n"); | ||
1088 | dev->stats.rx_dropped++; | 1086 | dev->stats.rx_dropped++; |
1089 | goto out; | 1087 | goto out; |
1090 | } | 1088 | } |
@@ -1094,8 +1092,7 @@ static void bfin_mac_rx(struct net_device *dev) | |||
1094 | 1092 | ||
1095 | new_skb = dev_alloc_skb(PKT_BUF_SZ + NET_IP_ALIGN); | 1093 | new_skb = dev_alloc_skb(PKT_BUF_SZ + NET_IP_ALIGN); |
1096 | if (!new_skb) { | 1094 | if (!new_skb) { |
1097 | printk(KERN_NOTICE DRV_NAME | 1095 | netdev_notice(dev, "rx: low on mem - packet dropped\n"); |
1098 | ": rx: low on mem - packet dropped\n"); | ||
1099 | dev->stats.rx_dropped++; | 1096 | dev->stats.rx_dropped++; |
1100 | goto out; | 1097 | goto out; |
1101 | } | 1098 | } |
@@ -1213,7 +1210,7 @@ static int bfin_mac_enable(struct phy_device *phydev) | |||
1213 | int ret; | 1210 | int ret; |
1214 | u32 opmode; | 1211 | u32 opmode; |
1215 | 1212 | ||
1216 | pr_debug("%s: %s\n", DRV_NAME, __func__); | 1213 | pr_debug("%s\n", __func__); |
1217 | 1214 | ||
1218 | /* Set RX DMA */ | 1215 | /* Set RX DMA */ |
1219 | bfin_write_DMA1_NEXT_DESC_PTR(&(rx_list_head->desc_a)); | 1216 | bfin_write_DMA1_NEXT_DESC_PTR(&(rx_list_head->desc_a)); |
@@ -1287,19 +1284,12 @@ static void bfin_mac_multicast_hash(struct net_device *dev) | |||
1287 | { | 1284 | { |
1288 | u32 emac_hashhi, emac_hashlo; | 1285 | u32 emac_hashhi, emac_hashlo; |
1289 | struct netdev_hw_addr *ha; | 1286 | struct netdev_hw_addr *ha; |
1290 | char *addrs; | ||
1291 | u32 crc; | 1287 | u32 crc; |
1292 | 1288 | ||
1293 | emac_hashhi = emac_hashlo = 0; | 1289 | emac_hashhi = emac_hashlo = 0; |
1294 | 1290 | ||
1295 | netdev_for_each_mc_addr(ha, dev) { | 1291 | netdev_for_each_mc_addr(ha, dev) { |
1296 | addrs = ha->addr; | 1292 | crc = ether_crc(ETH_ALEN, ha->addr); |
1297 | |||
1298 | /* skip non-multicast addresses */ | ||
1299 | if (!(*addrs & 1)) | ||
1300 | continue; | ||
1301 | |||
1302 | crc = ether_crc(ETH_ALEN, addrs); | ||
1303 | crc >>= 26; | 1293 | crc >>= 26; |
1304 | 1294 | ||
1305 | if (crc & 0x20) | 1295 | if (crc & 0x20) |
@@ -1323,7 +1313,7 @@ static void bfin_mac_set_multicast_list(struct net_device *dev) | |||
1323 | u32 sysctl; | 1313 | u32 sysctl; |
1324 | 1314 | ||
1325 | if (dev->flags & IFF_PROMISC) { | 1315 | if (dev->flags & IFF_PROMISC) { |
1326 | printk(KERN_INFO "%s: set to promisc mode\n", dev->name); | 1316 | netdev_info(dev, "set promisc mode\n"); |
1327 | sysctl = bfin_read_EMAC_OPMODE(); | 1317 | sysctl = bfin_read_EMAC_OPMODE(); |
1328 | sysctl |= PR; | 1318 | sysctl |= PR; |
1329 | bfin_write_EMAC_OPMODE(sysctl); | 1319 | bfin_write_EMAC_OPMODE(sysctl); |
@@ -1393,7 +1383,7 @@ static int bfin_mac_open(struct net_device *dev) | |||
1393 | * address using ifconfig eth0 hw ether xx:xx:xx:xx:xx:xx | 1383 | * address using ifconfig eth0 hw ether xx:xx:xx:xx:xx:xx |
1394 | */ | 1384 | */ |
1395 | if (!is_valid_ether_addr(dev->dev_addr)) { | 1385 | if (!is_valid_ether_addr(dev->dev_addr)) { |
1396 | printk(KERN_WARNING DRV_NAME ": no valid ethernet hw addr\n"); | 1386 | netdev_warn(dev, "no valid ethernet hw addr\n"); |
1397 | return -EINVAL; | 1387 | return -EINVAL; |
1398 | } | 1388 | } |
1399 | 1389 | ||
@@ -1527,6 +1517,9 @@ static int __devinit bfin_mac_probe(struct platform_device *pdev) | |||
1527 | goto out_err_mii_probe; | 1517 | goto out_err_mii_probe; |
1528 | } | 1518 | } |
1529 | 1519 | ||
1520 | lp->vlan1_mask = ETH_P_8021Q | mii_bus_data->vlan1_mask; | ||
1521 | lp->vlan2_mask = ETH_P_8021Q | mii_bus_data->vlan2_mask; | ||
1522 | |||
1530 | /* Fill in the fields of the device structure with ethernet values. */ | 1523 | /* Fill in the fields of the device structure with ethernet values. */ |
1531 | ether_setup(ndev); | 1524 | ether_setup(ndev); |
1532 | 1525 | ||
@@ -1558,7 +1551,7 @@ static int __devinit bfin_mac_probe(struct platform_device *pdev) | |||
1558 | bfin_mac_hwtstamp_init(ndev); | 1551 | bfin_mac_hwtstamp_init(ndev); |
1559 | 1552 | ||
1560 | /* now, print out the card info, in a short format.. */ | 1553 | /* now, print out the card info, in a short format.. */ |
1561 | dev_info(&pdev->dev, "%s, Version %s\n", DRV_DESC, DRV_VERSION); | 1554 | netdev_info(ndev, "%s, Version %s\n", DRV_DESC, DRV_VERSION); |
1562 | 1555 | ||
1563 | return 0; | 1556 | return 0; |
1564 | 1557 | ||
@@ -1650,7 +1643,7 @@ static int __devinit bfin_mii_bus_probe(struct platform_device *pdev) | |||
1650 | * so set the GPIO pins to Ethernet mode | 1643 | * so set the GPIO pins to Ethernet mode |
1651 | */ | 1644 | */ |
1652 | pin_req = mii_bus_pd->mac_peripherals; | 1645 | pin_req = mii_bus_pd->mac_peripherals; |
1653 | rc = peripheral_request_list(pin_req, DRV_NAME); | 1646 | rc = peripheral_request_list(pin_req, KBUILD_MODNAME); |
1654 | if (rc) { | 1647 | if (rc) { |
1655 | dev_err(&pdev->dev, "Requesting peripherals failed!\n"); | 1648 | dev_err(&pdev->dev, "Requesting peripherals failed!\n"); |
1656 | return rc; | 1649 | return rc; |
@@ -1739,7 +1732,7 @@ static struct platform_driver bfin_mac_driver = { | |||
1739 | .resume = bfin_mac_resume, | 1732 | .resume = bfin_mac_resume, |
1740 | .suspend = bfin_mac_suspend, | 1733 | .suspend = bfin_mac_suspend, |
1741 | .driver = { | 1734 | .driver = { |
1742 | .name = DRV_NAME, | 1735 | .name = KBUILD_MODNAME, |
1743 | .owner = THIS_MODULE, | 1736 | .owner = THIS_MODULE, |
1744 | }, | 1737 | }, |
1745 | }; | 1738 | }; |
diff --git a/drivers/net/bfin_mac.h b/drivers/net/bfin_mac.h index aed68bed2365..f8559ac9a403 100644 --- a/drivers/net/bfin_mac.h +++ b/drivers/net/bfin_mac.h | |||
@@ -17,7 +17,14 @@ | |||
17 | #include <linux/etherdevice.h> | 17 | #include <linux/etherdevice.h> |
18 | #include <linux/bfin_mac.h> | 18 | #include <linux/bfin_mac.h> |
19 | 19 | ||
20 | /* | ||
21 | * Disable hardware checksum for bug #5600 if writeback cache is | ||
22 | * enabled. Otherwize, corrupted RX packet will be sent up stack | ||
23 | * without error mark. | ||
24 | */ | ||
25 | #ifndef CONFIG_BFIN_EXTMEM_WRITEBACK | ||
20 | #define BFIN_MAC_CSUM_OFFLOAD | 26 | #define BFIN_MAC_CSUM_OFFLOAD |
27 | #endif | ||
21 | 28 | ||
22 | #define TX_RECLAIM_JIFFIES (HZ / 5) | 29 | #define TX_RECLAIM_JIFFIES (HZ / 5) |
23 | 30 | ||
@@ -68,7 +75,6 @@ struct bfin_mac_local { | |||
68 | */ | 75 | */ |
69 | struct net_device_stats stats; | 76 | struct net_device_stats stats; |
70 | 77 | ||
71 | unsigned char Mac[6]; /* MAC address of the board */ | ||
72 | spinlock_t lock; | 78 | spinlock_t lock; |
73 | 79 | ||
74 | int wol; /* Wake On Lan */ | 80 | int wol; /* Wake On Lan */ |
@@ -76,6 +82,9 @@ struct bfin_mac_local { | |||
76 | struct timer_list tx_reclaim_timer; | 82 | struct timer_list tx_reclaim_timer; |
77 | struct net_device *ndev; | 83 | struct net_device *ndev; |
78 | 84 | ||
85 | /* Data for EMAC_VLAN1 regs */ | ||
86 | u16 vlan1_mask, vlan2_mask; | ||
87 | |||
79 | /* MII and PHY stuffs */ | 88 | /* MII and PHY stuffs */ |
80 | int old_link; /* used by bf537_adjust_link */ | 89 | int old_link; /* used by bf537_adjust_link */ |
81 | int old_speed; | 90 | int old_speed; |
diff --git a/drivers/net/bna/bnad_ethtool.c b/drivers/net/bna/bnad_ethtool.c index 99be5ae91991..142d6047da27 100644 --- a/drivers/net/bna/bnad_ethtool.c +++ b/drivers/net/bna/bnad_ethtool.c | |||
@@ -275,7 +275,6 @@ bnad_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) | |||
275 | 275 | ||
276 | ioc_attr = kzalloc(sizeof(*ioc_attr), GFP_KERNEL); | 276 | ioc_attr = kzalloc(sizeof(*ioc_attr), GFP_KERNEL); |
277 | if (ioc_attr) { | 277 | if (ioc_attr) { |
278 | memset(ioc_attr, 0, sizeof(*ioc_attr)); | ||
279 | spin_lock_irqsave(&bnad->bna_lock, flags); | 278 | spin_lock_irqsave(&bnad->bna_lock, flags); |
280 | bfa_nw_ioc_get_attr(&bnad->bna.device.ioc, ioc_attr); | 279 | bfa_nw_ioc_get_attr(&bnad->bna.device.ioc, ioc_attr); |
281 | spin_unlock_irqrestore(&bnad->bna_lock, flags); | 280 | spin_unlock_irqrestore(&bnad->bna_lock, flags); |
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c index df99edf3464a..0ba59d5aeb7f 100644 --- a/drivers/net/bnx2.c +++ b/drivers/net/bnx2.c | |||
@@ -7553,6 +7553,10 @@ bnx2_set_flags(struct net_device *dev, u32 data) | |||
7553 | !(data & ETH_FLAG_RXVLAN)) | 7553 | !(data & ETH_FLAG_RXVLAN)) |
7554 | return -EINVAL; | 7554 | return -EINVAL; |
7555 | 7555 | ||
7556 | /* TSO with VLAN tag won't work with current firmware */ | ||
7557 | if (!(data & ETH_FLAG_TXVLAN)) | ||
7558 | return -EINVAL; | ||
7559 | |||
7556 | rc = ethtool_op_set_flags(dev, data, ETH_FLAG_RXHASH | ETH_FLAG_RXVLAN | | 7560 | rc = ethtool_op_set_flags(dev, data, ETH_FLAG_RXHASH | ETH_FLAG_RXVLAN | |
7557 | ETH_FLAG_TXVLAN); | 7561 | ETH_FLAG_TXVLAN); |
7558 | if (rc) | 7562 | if (rc) |
@@ -7962,11 +7966,8 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev) | |||
7962 | 7966 | ||
7963 | /* AER (Advanced Error Reporting) hooks */ | 7967 | /* AER (Advanced Error Reporting) hooks */ |
7964 | err = pci_enable_pcie_error_reporting(pdev); | 7968 | err = pci_enable_pcie_error_reporting(pdev); |
7965 | if (err) { | 7969 | if (!err) |
7966 | dev_err(&pdev->dev, "pci_enable_pcie_error_reporting " | 7970 | bp->flags |= BNX2_FLAG_AER_ENABLED; |
7967 | "failed 0x%x\n", err); | ||
7968 | /* non-fatal, continue */ | ||
7969 | } | ||
7970 | 7971 | ||
7971 | } else { | 7972 | } else { |
7972 | bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX); | 7973 | bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX); |
@@ -8229,8 +8230,10 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev) | |||
8229 | return 0; | 8230 | return 0; |
8230 | 8231 | ||
8231 | err_out_unmap: | 8232 | err_out_unmap: |
8232 | if (bp->flags & BNX2_FLAG_PCIE) | 8233 | if (bp->flags & BNX2_FLAG_AER_ENABLED) { |
8233 | pci_disable_pcie_error_reporting(pdev); | 8234 | pci_disable_pcie_error_reporting(pdev); |
8235 | bp->flags &= ~BNX2_FLAG_AER_ENABLED; | ||
8236 | } | ||
8234 | 8237 | ||
8235 | if (bp->regview) { | 8238 | if (bp->regview) { |
8236 | iounmap(bp->regview); | 8239 | iounmap(bp->regview); |
@@ -8418,8 +8421,10 @@ bnx2_remove_one(struct pci_dev *pdev) | |||
8418 | 8421 | ||
8419 | kfree(bp->temp_stats_blk); | 8422 | kfree(bp->temp_stats_blk); |
8420 | 8423 | ||
8421 | if (bp->flags & BNX2_FLAG_PCIE) | 8424 | if (bp->flags & BNX2_FLAG_AER_ENABLED) { |
8422 | pci_disable_pcie_error_reporting(pdev); | 8425 | pci_disable_pcie_error_reporting(pdev); |
8426 | bp->flags &= ~BNX2_FLAG_AER_ENABLED; | ||
8427 | } | ||
8423 | 8428 | ||
8424 | free_netdev(dev); | 8429 | free_netdev(dev); |
8425 | 8430 | ||
@@ -8535,7 +8540,7 @@ static pci_ers_result_t bnx2_io_slot_reset(struct pci_dev *pdev) | |||
8535 | } | 8540 | } |
8536 | rtnl_unlock(); | 8541 | rtnl_unlock(); |
8537 | 8542 | ||
8538 | if (!(bp->flags & BNX2_FLAG_PCIE)) | 8543 | if (!(bp->flags & BNX2_FLAG_AER_ENABLED)) |
8539 | return result; | 8544 | return result; |
8540 | 8545 | ||
8541 | err = pci_cleanup_aer_uncorrect_error_status(pdev); | 8546 | err = pci_cleanup_aer_uncorrect_error_status(pdev); |
diff --git a/drivers/net/bnx2.h b/drivers/net/bnx2.h index 5488a2e82fe9..f459fb2f9add 100644 --- a/drivers/net/bnx2.h +++ b/drivers/net/bnx2.h | |||
@@ -6741,6 +6741,7 @@ struct bnx2 { | |||
6741 | #define BNX2_FLAG_JUMBO_BROKEN 0x00000800 | 6741 | #define BNX2_FLAG_JUMBO_BROKEN 0x00000800 |
6742 | #define BNX2_FLAG_CAN_KEEP_VLAN 0x00001000 | 6742 | #define BNX2_FLAG_CAN_KEEP_VLAN 0x00001000 |
6743 | #define BNX2_FLAG_BROKEN_STATS 0x00002000 | 6743 | #define BNX2_FLAG_BROKEN_STATS 0x00002000 |
6744 | #define BNX2_FLAG_AER_ENABLED 0x00004000 | ||
6744 | 6745 | ||
6745 | struct bnx2_napi bnx2_napi[BNX2_MAX_MSIX_VEC]; | 6746 | struct bnx2_napi bnx2_napi[BNX2_MAX_MSIX_VEC]; |
6746 | 6747 | ||
diff --git a/drivers/net/bnx2x/bnx2x.h b/drivers/net/bnx2x/bnx2x.h index 77d6c8d6d86b..7897d114b290 100644 --- a/drivers/net/bnx2x/bnx2x.h +++ b/drivers/net/bnx2x/bnx2x.h | |||
@@ -22,8 +22,8 @@ | |||
22 | * (you will need to reboot afterwards) */ | 22 | * (you will need to reboot afterwards) */ |
23 | /* #define BNX2X_STOP_ON_ERROR */ | 23 | /* #define BNX2X_STOP_ON_ERROR */ |
24 | 24 | ||
25 | #define DRV_MODULE_VERSION "1.62.00-3" | 25 | #define DRV_MODULE_VERSION "1.62.00-6" |
26 | #define DRV_MODULE_RELDATE "2010/12/21" | 26 | #define DRV_MODULE_RELDATE "2011/01/30" |
27 | #define BNX2X_BC_VER 0x040200 | 27 | #define BNX2X_BC_VER 0x040200 |
28 | 28 | ||
29 | #define BNX2X_MULTI_QUEUE | 29 | #define BNX2X_MULTI_QUEUE |
@@ -636,6 +636,7 @@ struct bnx2x_common { | |||
636 | 636 | ||
637 | #define CHIP_METAL(bp) (bp->common.chip_id & 0x00000ff0) | 637 | #define CHIP_METAL(bp) (bp->common.chip_id & 0x00000ff0) |
638 | #define CHIP_BOND_ID(bp) (bp->common.chip_id & 0x0000000f) | 638 | #define CHIP_BOND_ID(bp) (bp->common.chip_id & 0x0000000f) |
639 | #define CHIP_PARITY_ENABLED(bp) (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp)) | ||
639 | 640 | ||
640 | int flash_size; | 641 | int flash_size; |
641 | #define NVRAM_1MB_SIZE 0x20000 /* 1M bit in bytes */ | 642 | #define NVRAM_1MB_SIZE 0x20000 /* 1M bit in bytes */ |
@@ -1414,12 +1415,12 @@ struct bnx2x_func_init_params { | |||
1414 | else | 1415 | else |
1415 | 1416 | ||
1416 | /* skip rx queue | 1417 | /* skip rx queue |
1417 | * if FCOE l2 support is diabled and this is the fcoe L2 queue | 1418 | * if FCOE l2 support is disabled and this is the fcoe L2 queue |
1418 | */ | 1419 | */ |
1419 | #define skip_rx_queue(bp, idx) (NO_FCOE(bp) && IS_FCOE_IDX(idx)) | 1420 | #define skip_rx_queue(bp, idx) (NO_FCOE(bp) && IS_FCOE_IDX(idx)) |
1420 | 1421 | ||
1421 | /* skip tx queue | 1422 | /* skip tx queue |
1422 | * if FCOE l2 support is diabled and this is the fcoe L2 queue | 1423 | * if FCOE l2 support is disabled and this is the fcoe L2 queue |
1423 | */ | 1424 | */ |
1424 | #define skip_tx_queue(bp, idx) (NO_FCOE(bp) && IS_FCOE_IDX(idx)) | 1425 | #define skip_tx_queue(bp, idx) (NO_FCOE(bp) && IS_FCOE_IDX(idx)) |
1425 | 1426 | ||
@@ -1612,19 +1613,23 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms, | |||
1612 | #define BNX2X_BTR 4 | 1613 | #define BNX2X_BTR 4 |
1613 | #define MAX_SPQ_PENDING 8 | 1614 | #define MAX_SPQ_PENDING 8 |
1614 | 1615 | ||
1615 | 1616 | /* CMNG constants, as derived from system spec calculations */ | |
1616 | /* CMNG constants | 1617 | /* default MIN rate in case VNIC min rate is configured to zero - 100Mbps */ |
1617 | derived from lab experiments, and not from system spec calculations !!! */ | 1618 | #define DEF_MIN_RATE 100 |
1618 | #define DEF_MIN_RATE 100 | ||
1619 | /* resolution of the rate shaping timer - 100 usec */ | 1619 | /* resolution of the rate shaping timer - 100 usec */ |
1620 | #define RS_PERIODIC_TIMEOUT_USEC 100 | 1620 | #define RS_PERIODIC_TIMEOUT_USEC 100 |
1621 | /* resolution of fairness algorithm in usecs - | ||
1622 | coefficient for calculating the actual t fair */ | ||
1623 | #define T_FAIR_COEF 10000000 | ||
1624 | /* number of bytes in single QM arbitration cycle - | 1621 | /* number of bytes in single QM arbitration cycle - |
1625 | coefficient for calculating the fairness timer */ | 1622 | * coefficient for calculating the fairness timer */ |
1626 | #define QM_ARB_BYTES 40000 | 1623 | #define QM_ARB_BYTES 160000 |
1627 | #define FAIR_MEM 2 | 1624 | /* resolution of Min algorithm 1:100 */ |
1625 | #define MIN_RES 100 | ||
1626 | /* how many bytes above threshold for the minimal credit of Min algorithm*/ | ||
1627 | #define MIN_ABOVE_THRESH 32768 | ||
1628 | /* Fairness algorithm integration time coefficient - | ||
1629 | * for calculating the actual Tfair */ | ||
1630 | #define T_FAIR_COEF ((MIN_ABOVE_THRESH + QM_ARB_BYTES) * 8 * MIN_RES) | ||
1631 | /* Memory of fairness algorithm . 2 cycles */ | ||
1632 | #define FAIR_MEM 2 | ||
1628 | 1633 | ||
1629 | 1634 | ||
1630 | #define ATTN_NIG_FOR_FUNC (1L << 8) | 1635 | #define ATTN_NIG_FOR_FUNC (1L << 8) |
diff --git a/drivers/net/bnx2x/bnx2x_cmn.c b/drivers/net/bnx2x/bnx2x_cmn.c index 710ce5d04c53..93798129061b 100644 --- a/drivers/net/bnx2x/bnx2x_cmn.c +++ b/drivers/net/bnx2x/bnx2x_cmn.c | |||
@@ -259,10 +259,44 @@ static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue, | |||
259 | #endif | 259 | #endif |
260 | } | 260 | } |
261 | 261 | ||
262 | /* Timestamp option length allowed for TPA aggregation: | ||
263 | * | ||
264 | * nop nop kind length echo val | ||
265 | */ | ||
266 | #define TPA_TSTAMP_OPT_LEN 12 | ||
267 | /** | ||
268 | * Calculate the approximate value of the MSS for this | ||
269 | * aggregation using the first packet of it. | ||
270 | * | ||
271 | * @param bp | ||
272 | * @param parsing_flags Parsing flags from the START CQE | ||
273 | * @param len_on_bd Total length of the first packet for the | ||
274 | * aggregation. | ||
275 | */ | ||
276 | static inline u16 bnx2x_set_lro_mss(struct bnx2x *bp, u16 parsing_flags, | ||
277 | u16 len_on_bd) | ||
278 | { | ||
279 | /* TPA arrgregation won't have an IP options and TCP options | ||
280 | * other than timestamp. | ||
281 | */ | ||
282 | u16 hdrs_len = ETH_HLEN + sizeof(struct iphdr) + sizeof(struct tcphdr); | ||
283 | |||
284 | |||
285 | /* Check if there was a TCP timestamp, if there is it's will | ||
286 | * always be 12 bytes length: nop nop kind length echo val. | ||
287 | * | ||
288 | * Otherwise FW would close the aggregation. | ||
289 | */ | ||
290 | if (parsing_flags & PARSING_FLAGS_TIME_STAMP_EXIST_FLAG) | ||
291 | hdrs_len += TPA_TSTAMP_OPT_LEN; | ||
292 | |||
293 | return len_on_bd - hdrs_len; | ||
294 | } | ||
295 | |||
262 | static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp, | 296 | static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp, |
263 | struct sk_buff *skb, | 297 | struct sk_buff *skb, |
264 | struct eth_fast_path_rx_cqe *fp_cqe, | 298 | struct eth_fast_path_rx_cqe *fp_cqe, |
265 | u16 cqe_idx) | 299 | u16 cqe_idx, u16 parsing_flags) |
266 | { | 300 | { |
267 | struct sw_rx_page *rx_pg, old_rx_pg; | 301 | struct sw_rx_page *rx_pg, old_rx_pg; |
268 | u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd); | 302 | u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd); |
@@ -275,8 +309,8 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp, | |||
275 | 309 | ||
276 | /* This is needed in order to enable forwarding support */ | 310 | /* This is needed in order to enable forwarding support */ |
277 | if (frag_size) | 311 | if (frag_size) |
278 | skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE, | 312 | skb_shinfo(skb)->gso_size = bnx2x_set_lro_mss(bp, parsing_flags, |
279 | max(frag_size, (u32)len_on_bd)); | 313 | len_on_bd); |
280 | 314 | ||
281 | #ifdef BNX2X_STOP_ON_ERROR | 315 | #ifdef BNX2X_STOP_ON_ERROR |
282 | if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) { | 316 | if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) { |
@@ -344,6 +378,8 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp, | |||
344 | if (likely(new_skb)) { | 378 | if (likely(new_skb)) { |
345 | /* fix ip xsum and give it to the stack */ | 379 | /* fix ip xsum and give it to the stack */ |
346 | /* (no need to map the new skb) */ | 380 | /* (no need to map the new skb) */ |
381 | u16 parsing_flags = | ||
382 | le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags); | ||
347 | 383 | ||
348 | prefetch(skb); | 384 | prefetch(skb); |
349 | prefetch(((char *)(skb)) + L1_CACHE_BYTES); | 385 | prefetch(((char *)(skb)) + L1_CACHE_BYTES); |
@@ -373,9 +409,9 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp, | |||
373 | } | 409 | } |
374 | 410 | ||
375 | if (!bnx2x_fill_frag_skb(bp, fp, skb, | 411 | if (!bnx2x_fill_frag_skb(bp, fp, skb, |
376 | &cqe->fast_path_cqe, cqe_idx)) { | 412 | &cqe->fast_path_cqe, cqe_idx, |
377 | if ((le16_to_cpu(cqe->fast_path_cqe. | 413 | parsing_flags)) { |
378 | pars_flags.flags) & PARSING_FLAGS_VLAN)) | 414 | if (parsing_flags & PARSING_FLAGS_VLAN) |
379 | __vlan_hwaccel_put_tag(skb, | 415 | __vlan_hwaccel_put_tag(skb, |
380 | le16_to_cpu(cqe->fast_path_cqe. | 416 | le16_to_cpu(cqe->fast_path_cqe. |
381 | vlan_tag)); | 417 | vlan_tag)); |
@@ -703,19 +739,20 @@ u16 bnx2x_get_mf_speed(struct bnx2x *bp) | |||
703 | { | 739 | { |
704 | u16 line_speed = bp->link_vars.line_speed; | 740 | u16 line_speed = bp->link_vars.line_speed; |
705 | if (IS_MF(bp)) { | 741 | if (IS_MF(bp)) { |
706 | u16 maxCfg = (bp->mf_config[BP_VN(bp)] & | 742 | u16 maxCfg = bnx2x_extract_max_cfg(bp, |
707 | FUNC_MF_CFG_MAX_BW_MASK) >> | 743 | bp->mf_config[BP_VN(bp)]); |
708 | FUNC_MF_CFG_MAX_BW_SHIFT; | 744 | |
709 | /* Calculate the current MAX line speed limit for the DCC | 745 | /* Calculate the current MAX line speed limit for the MF |
710 | * capable devices | 746 | * devices |
711 | */ | 747 | */ |
712 | if (IS_MF_SD(bp)) { | 748 | if (IS_MF_SI(bp)) |
749 | line_speed = (line_speed * maxCfg) / 100; | ||
750 | else { /* SD mode */ | ||
713 | u16 vn_max_rate = maxCfg * 100; | 751 | u16 vn_max_rate = maxCfg * 100; |
714 | 752 | ||
715 | if (vn_max_rate < line_speed) | 753 | if (vn_max_rate < line_speed) |
716 | line_speed = vn_max_rate; | 754 | line_speed = vn_max_rate; |
717 | } else /* IS_MF_SI(bp)) */ | 755 | } |
718 | line_speed = (line_speed * maxCfg) / 100; | ||
719 | } | 756 | } |
720 | 757 | ||
721 | return line_speed; | 758 | return line_speed; |
diff --git a/drivers/net/bnx2x/bnx2x_cmn.h b/drivers/net/bnx2x/bnx2x_cmn.h index 03eb4d68e6bb..326ba44b3ded 100644 --- a/drivers/net/bnx2x/bnx2x_cmn.h +++ b/drivers/net/bnx2x/bnx2x_cmn.h | |||
@@ -1044,4 +1044,24 @@ static inline void storm_memset_cmng(struct bnx2x *bp, | |||
1044 | void bnx2x_acquire_phy_lock(struct bnx2x *bp); | 1044 | void bnx2x_acquire_phy_lock(struct bnx2x *bp); |
1045 | void bnx2x_release_phy_lock(struct bnx2x *bp); | 1045 | void bnx2x_release_phy_lock(struct bnx2x *bp); |
1046 | 1046 | ||
1047 | /** | ||
1048 | * Extracts MAX BW part from MF configuration. | ||
1049 | * | ||
1050 | * @param bp | ||
1051 | * @param mf_cfg | ||
1052 | * | ||
1053 | * @return u16 | ||
1054 | */ | ||
1055 | static inline u16 bnx2x_extract_max_cfg(struct bnx2x *bp, u32 mf_cfg) | ||
1056 | { | ||
1057 | u16 max_cfg = (mf_cfg & FUNC_MF_CFG_MAX_BW_MASK) >> | ||
1058 | FUNC_MF_CFG_MAX_BW_SHIFT; | ||
1059 | if (!max_cfg) { | ||
1060 | BNX2X_ERR("Illegal configuration detected for Max BW - " | ||
1061 | "using 100 instead\n"); | ||
1062 | max_cfg = 100; | ||
1063 | } | ||
1064 | return max_cfg; | ||
1065 | } | ||
1066 | |||
1047 | #endif /* BNX2X_CMN_H */ | 1067 | #endif /* BNX2X_CMN_H */ |
diff --git a/drivers/net/bnx2x/bnx2x_dump.h b/drivers/net/bnx2x/bnx2x_dump.h index dc18c25ca9e5..fb3ff7c4d7ca 100644 --- a/drivers/net/bnx2x/bnx2x_dump.h +++ b/drivers/net/bnx2x/bnx2x_dump.h | |||
@@ -1,10 +1,16 @@ | |||
1 | /* bnx2x_dump.h: Broadcom Everest network driver. | 1 | /* bnx2x_dump.h: Broadcom Everest network driver. |
2 | * | 2 | * |
3 | * Copyright (c) 2009 Broadcom Corporation | 3 | * Copyright (c) 2011 Broadcom Corporation |
4 | * | 4 | * |
5 | * This program is free software; you can redistribute it and/or modify | 5 | * Unless you and Broadcom execute a separate written software license |
6 | * it under the terms of the GNU General Public License as published by | 6 | * agreement governing use of this software, this software is licensed to you |
7 | * the Free Software Foundation. | 7 | * under the terms of the GNU General Public License version 2, available |
8 | * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL"). | ||
9 | * | ||
10 | * Notwithstanding the above, under no circumstances may you combine this | ||
11 | * software in any way with any other Broadcom software provided under a | ||
12 | * license other than the GPL, without Broadcom's express prior written | ||
13 | * consent. | ||
8 | */ | 14 | */ |
9 | 15 | ||
10 | 16 | ||
@@ -17,53 +23,53 @@ | |||
17 | #define BNX2X_DUMP_H | 23 | #define BNX2X_DUMP_H |
18 | 24 | ||
19 | 25 | ||
20 | struct dump_sign { | ||
21 | u32 time_stamp; | ||
22 | u32 diag_ver; | ||
23 | u32 grc_dump_ver; | ||
24 | }; | ||
25 | 26 | ||
26 | #define TSTORM_WAITP_ADDR 0x1b8a80 | 27 | /*definitions */ |
27 | #define CSTORM_WAITP_ADDR 0x238a80 | 28 | #define XSTORM_WAITP_ADDR 0x2b8a80 |
28 | #define XSTORM_WAITP_ADDR 0x2b8a80 | 29 | #define TSTORM_WAITP_ADDR 0x1b8a80 |
29 | #define USTORM_WAITP_ADDR 0x338a80 | 30 | #define USTORM_WAITP_ADDR 0x338a80 |
30 | #define TSTORM_CAM_MODE 0x1b1440 | 31 | #define CSTORM_WAITP_ADDR 0x238a80 |
32 | #define TSTORM_CAM_MODE 0x1B1440 | ||
31 | 33 | ||
32 | #define RI_E1 0x1 | 34 | #define MAX_TIMER_PENDING 200 |
33 | #define RI_E1H 0x2 | 35 | #define TIMER_SCAN_DONT_CARE 0xFF |
36 | #define RI_E1 0x1 | ||
37 | #define RI_E1H 0x2 | ||
34 | #define RI_E2 0x4 | 38 | #define RI_E2 0x4 |
35 | #define RI_ONLINE 0x100 | 39 | #define RI_ONLINE 0x100 |
36 | #define RI_PATH0_DUMP 0x200 | 40 | #define RI_PATH0_DUMP 0x200 |
37 | #define RI_PATH1_DUMP 0x400 | 41 | #define RI_PATH1_DUMP 0x400 |
38 | #define RI_E1_OFFLINE (RI_E1) | 42 | #define RI_E1_OFFLINE (RI_E1) |
39 | #define RI_E1_ONLINE (RI_E1 | RI_ONLINE) | 43 | #define RI_E1_ONLINE (RI_E1 | RI_ONLINE) |
40 | #define RI_E1H_OFFLINE (RI_E1H) | 44 | #define RI_E1H_OFFLINE (RI_E1H) |
41 | #define RI_E1H_ONLINE (RI_E1H | RI_ONLINE) | 45 | #define RI_E1H_ONLINE (RI_E1H | RI_ONLINE) |
42 | #define RI_E2_OFFLINE (RI_E2) | 46 | #define RI_E2_OFFLINE (RI_E2) |
43 | #define RI_E2_ONLINE (RI_E2 | RI_ONLINE) | 47 | #define RI_E2_ONLINE (RI_E2 | RI_ONLINE) |
44 | #define RI_E1E1H_OFFLINE (RI_E1 | RI_E1H) | 48 | #define RI_E1E1H_OFFLINE (RI_E1 | RI_E1H) |
45 | #define RI_E1E1H_ONLINE (RI_E1 | RI_E1H | RI_ONLINE) | 49 | #define RI_E1E1H_ONLINE (RI_E1 | RI_E1H | RI_ONLINE) |
46 | #define RI_E1HE2_OFFLINE (RI_E2 | RI_E1H) | 50 | #define RI_E1HE2_OFFLINE (RI_E2 | RI_E1H) |
47 | #define RI_E1HE2_ONLINE (RI_E2 | RI_E1H | RI_ONLINE) | 51 | #define RI_E1HE2_ONLINE (RI_E2 | RI_E1H | RI_ONLINE) |
48 | #define RI_E1E2_OFFLINE (RI_E2 | RI_E1) | 52 | #define RI_E1E2_OFFLINE (RI_E2 | RI_E1) |
49 | #define RI_E1E2_ONLINE (RI_E2 | RI_E1 | RI_ONLINE) | 53 | #define RI_E1E2_ONLINE (RI_E2 | RI_E1 | RI_ONLINE) |
50 | #define RI_ALL_OFFLINE (RI_E1 | RI_E1H | RI_E2) | 54 | #define RI_ALL_OFFLINE (RI_E1 | RI_E1H | RI_E2) |
51 | #define RI_ALL_ONLINE (RI_E1 | RI_E1H | RI_E2 | RI_ONLINE) | 55 | #define RI_ALL_ONLINE (RI_E1 | RI_E1H | RI_E2 | RI_ONLINE) |
52 | |||
53 | #define MAX_TIMER_PENDING 200 | ||
54 | #define TIMER_SCAN_DONT_CARE 0xFF | ||
55 | 56 | ||
57 | struct dump_sign { | ||
58 | u32 time_stamp; | ||
59 | u32 diag_ver; | ||
60 | u32 grc_dump_ver; | ||
61 | }; | ||
56 | 62 | ||
57 | struct dump_hdr { | 63 | struct dump_hdr { |
58 | u32 hdr_size; /* in dwords, excluding this field */ | 64 | u32 hdr_size; /* in dwords, excluding this field */ |
59 | struct dump_sign dump_sign; | 65 | struct dump_sign dump_sign; |
60 | u32 xstorm_waitp; | 66 | u32 xstorm_waitp; |
61 | u32 tstorm_waitp; | 67 | u32 tstorm_waitp; |
62 | u32 ustorm_waitp; | 68 | u32 ustorm_waitp; |
63 | u32 cstorm_waitp; | 69 | u32 cstorm_waitp; |
64 | u16 info; | 70 | u16 info; |
65 | u8 idle_chk; | 71 | u8 idle_chk; |
66 | u8 reserved; | 72 | u8 reserved; |
67 | }; | 73 | }; |
68 | 74 | ||
69 | struct reg_addr { | 75 | struct reg_addr { |
@@ -80,202 +86,185 @@ struct wreg_addr { | |||
80 | u16 info; | 86 | u16 info; |
81 | }; | 87 | }; |
82 | 88 | ||
83 | 89 | #define REGS_COUNT 834 | |
84 | #define REGS_COUNT 558 | ||
85 | static const struct reg_addr reg_addrs[REGS_COUNT] = { | 90 | static const struct reg_addr reg_addrs[REGS_COUNT] = { |
86 | { 0x2000, 341, RI_ALL_ONLINE }, { 0x2800, 103, RI_ALL_ONLINE }, | 91 | { 0x2000, 341, RI_ALL_ONLINE }, { 0x2800, 103, RI_ALL_ONLINE }, |
87 | { 0x3000, 287, RI_ALL_ONLINE }, { 0x3800, 331, RI_ALL_ONLINE }, | 92 | { 0x3000, 287, RI_ALL_ONLINE }, { 0x3800, 331, RI_ALL_ONLINE }, |
88 | { 0x8800, 6, RI_E1_ONLINE }, { 0xa000, 223, RI_ALL_ONLINE }, | 93 | { 0x8800, 6, RI_ALL_ONLINE }, { 0x8818, 1, RI_E1HE2_ONLINE }, |
89 | { 0xa388, 1, RI_ALL_ONLINE }, { 0xa398, 1, RI_ALL_ONLINE }, | 94 | { 0x9000, 164, RI_E2_ONLINE }, { 0x9400, 33, RI_E2_ONLINE }, |
90 | { 0xa39c, 7, RI_E1H_ONLINE }, { 0xa3c0, 3, RI_E1H_ONLINE }, | 95 | { 0xa000, 27, RI_ALL_ONLINE }, { 0xa06c, 1, RI_E1E1H_ONLINE }, |
91 | { 0xa3d0, 1, RI_E1H_ONLINE }, { 0xa3d8, 1, RI_E1H_ONLINE }, | 96 | { 0xa070, 71, RI_ALL_ONLINE }, { 0xa18c, 4, RI_E1E1H_ONLINE }, |
92 | { 0xa3e0, 1, RI_E1H_ONLINE }, { 0xa3e8, 1, RI_E1H_ONLINE }, | 97 | { 0xa19c, 62, RI_ALL_ONLINE }, { 0xa294, 2, RI_E1E1H_ONLINE }, |
93 | { 0xa3f0, 1, RI_E1H_ONLINE }, { 0xa3f8, 1, RI_E1H_ONLINE }, | 98 | { 0xa29c, 56, RI_ALL_ONLINE }, { 0xa39c, 7, RI_E1HE2_ONLINE }, |
94 | { 0xa400, 69, RI_ALL_ONLINE }, { 0xa518, 1, RI_ALL_ONLINE }, | 99 | { 0xa3c0, 3, RI_E1HE2_ONLINE }, { 0xa3d0, 1, RI_E1HE2_ONLINE }, |
95 | { 0xa520, 1, RI_ALL_ONLINE }, { 0xa528, 1, RI_ALL_ONLINE }, | 100 | { 0xa3d8, 1, RI_E1HE2_ONLINE }, { 0xa3e0, 1, RI_E1HE2_ONLINE }, |
96 | { 0xa530, 1, RI_ALL_ONLINE }, { 0xa538, 1, RI_ALL_ONLINE }, | 101 | { 0xa3e8, 1, RI_E1HE2_ONLINE }, { 0xa3f0, 1, RI_E1HE2_ONLINE }, |
97 | { 0xa540, 1, RI_ALL_ONLINE }, { 0xa548, 1, RI_ALL_ONLINE }, | 102 | { 0xa3f8, 1, RI_E1HE2_ONLINE }, { 0xa400, 43, RI_ALL_ONLINE }, |
98 | { 0xa550, 1, RI_ALL_ONLINE }, { 0xa558, 1, RI_ALL_ONLINE }, | 103 | { 0xa4ac, 2, RI_E1E1H_ONLINE }, { 0xa4b4, 1, RI_ALL_ONLINE }, |
99 | { 0xa560, 1, RI_ALL_ONLINE }, { 0xa568, 1, RI_ALL_ONLINE }, | 104 | { 0xa4b8, 2, RI_E1E1H_ONLINE }, { 0xa4c0, 3, RI_ALL_ONLINE }, |
100 | { 0xa570, 1, RI_ALL_ONLINE }, { 0xa580, 1, RI_ALL_ONLINE }, | 105 | { 0xa4cc, 5, RI_E1E1H_ONLINE }, { 0xa4e0, 9, RI_ALL_ONLINE }, |
101 | { 0xa590, 1, RI_ALL_ONLINE }, { 0xa5a0, 1, RI_ALL_ONLINE }, | 106 | { 0xa504, 1, RI_E1E1H_ONLINE }, { 0xa508, 3, RI_ALL_ONLINE }, |
102 | { 0xa5c0, 1, RI_ALL_ONLINE }, { 0xa5e0, 1, RI_E1H_ONLINE }, | 107 | { 0xa518, 1, RI_ALL_ONLINE }, { 0xa520, 1, RI_ALL_ONLINE }, |
103 | { 0xa5e8, 1, RI_E1H_ONLINE }, { 0xa5f0, 1, RI_E1H_ONLINE }, | 108 | { 0xa528, 1, RI_ALL_ONLINE }, { 0xa530, 1, RI_ALL_ONLINE }, |
104 | { 0xa5f8, 10, RI_E1H_ONLINE }, { 0x10000, 236, RI_ALL_ONLINE }, | 109 | { 0xa538, 1, RI_ALL_ONLINE }, { 0xa540, 1, RI_ALL_ONLINE }, |
105 | { 0x103bc, 1, RI_ALL_ONLINE }, { 0x103cc, 1, RI_ALL_ONLINE }, | 110 | { 0xa548, 1, RI_E1E1H_ONLINE }, { 0xa550, 1, RI_E1E1H_ONLINE }, |
106 | { 0x103dc, 1, RI_ALL_ONLINE }, { 0x10400, 57, RI_ALL_ONLINE }, | 111 | { 0xa558, 1, RI_E1E1H_ONLINE }, { 0xa560, 1, RI_E1E1H_ONLINE }, |
107 | { 0x104e8, 2, RI_ALL_ONLINE }, { 0x104f4, 2, RI_ALL_ONLINE }, | 112 | { 0xa568, 1, RI_E1E1H_ONLINE }, { 0xa570, 1, RI_ALL_ONLINE }, |
108 | { 0x10500, 146, RI_ALL_ONLINE }, { 0x10750, 2, RI_ALL_ONLINE }, | 113 | { 0xa580, 1, RI_ALL_ONLINE }, { 0xa590, 1, RI_ALL_ONLINE }, |
109 | { 0x10760, 2, RI_ALL_ONLINE }, { 0x10770, 2, RI_ALL_ONLINE }, | 114 | { 0xa5a0, 1, RI_ALL_ONLINE }, { 0xa5c0, 1, RI_ALL_ONLINE }, |
110 | { 0x10780, 2, RI_ALL_ONLINE }, { 0x10790, 2, RI_ALL_ONLINE }, | 115 | { 0xa5e0, 1, RI_E1HE2_ONLINE }, { 0xa5e8, 1, RI_E1HE2_ONLINE }, |
111 | { 0x107a0, 2, RI_ALL_ONLINE }, { 0x107b0, 2, RI_ALL_ONLINE }, | 116 | { 0xa5f0, 1, RI_E1HE2_ONLINE }, { 0xa5f8, 10, RI_E1HE2_ONLINE }, |
112 | { 0x107c0, 2, RI_ALL_ONLINE }, { 0x107d0, 2, RI_ALL_ONLINE }, | 117 | { 0xa620, 111, RI_E2_ONLINE }, { 0xa800, 51, RI_E2_ONLINE }, |
113 | { 0x107e0, 2, RI_ALL_ONLINE }, { 0x10880, 2, RI_ALL_ONLINE }, | 118 | { 0xa8d4, 4, RI_E2_ONLINE }, { 0xa8e8, 1, RI_E2_ONLINE }, |
114 | { 0x10900, 2, RI_ALL_ONLINE }, { 0x12000, 1, RI_ALL_ONLINE }, | 119 | { 0xa8f0, 1, RI_E2_ONLINE }, { 0x10000, 236, RI_ALL_ONLINE }, |
115 | { 0x14000, 1, RI_ALL_ONLINE }, { 0x16000, 26, RI_E1H_ONLINE }, | 120 | { 0x10400, 57, RI_ALL_ONLINE }, { 0x104e8, 2, RI_ALL_ONLINE }, |
116 | { 0x16070, 18, RI_E1H_ONLINE }, { 0x160c0, 27, RI_E1H_ONLINE }, | 121 | { 0x104f4, 2, RI_ALL_ONLINE }, { 0x10500, 146, RI_ALL_ONLINE }, |
117 | { 0x16140, 1, RI_E1H_ONLINE }, { 0x16160, 1, RI_E1H_ONLINE }, | 122 | { 0x10750, 2, RI_ALL_ONLINE }, { 0x10760, 2, RI_ALL_ONLINE }, |
118 | { 0x16180, 2, RI_E1H_ONLINE }, { 0x161c0, 2, RI_E1H_ONLINE }, | 123 | { 0x10770, 2, RI_ALL_ONLINE }, { 0x10780, 2, RI_ALL_ONLINE }, |
119 | { 0x16204, 5, RI_E1H_ONLINE }, { 0x18000, 1, RI_E1H_ONLINE }, | 124 | { 0x10790, 2, RI_ALL_ONLINE }, { 0x107a0, 2, RI_ALL_ONLINE }, |
120 | { 0x18008, 1, RI_E1H_ONLINE }, { 0x20000, 24, RI_ALL_ONLINE }, | 125 | { 0x107b0, 2, RI_ALL_ONLINE }, { 0x107c0, 2, RI_ALL_ONLINE }, |
121 | { 0x20060, 8, RI_ALL_ONLINE }, { 0x20080, 138, RI_ALL_ONLINE }, | 126 | { 0x107d0, 2, RI_ALL_ONLINE }, { 0x107e0, 2, RI_ALL_ONLINE }, |
122 | { 0x202b4, 1, RI_ALL_ONLINE }, { 0x202c4, 1, RI_ALL_ONLINE }, | 127 | { 0x10880, 2, RI_ALL_ONLINE }, { 0x10900, 2, RI_ALL_ONLINE }, |
123 | { 0x20400, 2, RI_ALL_ONLINE }, { 0x2040c, 8, RI_ALL_ONLINE }, | 128 | { 0x16000, 26, RI_E1HE2_ONLINE }, { 0x16070, 18, RI_E1HE2_ONLINE }, |
124 | { 0x2042c, 18, RI_E1H_ONLINE }, { 0x20480, 1, RI_ALL_ONLINE }, | 129 | { 0x160c0, 27, RI_E1HE2_ONLINE }, { 0x16140, 1, RI_E1HE2_ONLINE }, |
125 | { 0x20500, 1, RI_ALL_ONLINE }, { 0x20600, 1, RI_ALL_ONLINE }, | 130 | { 0x16160, 1, RI_E1HE2_ONLINE }, { 0x16180, 2, RI_E1HE2_ONLINE }, |
126 | { 0x28000, 1, RI_ALL_ONLINE }, { 0x28004, 8191, RI_ALL_OFFLINE }, | 131 | { 0x161c0, 2, RI_E1HE2_ONLINE }, { 0x16204, 5, RI_E1HE2_ONLINE }, |
127 | { 0x30000, 1, RI_ALL_ONLINE }, { 0x30004, 16383, RI_ALL_OFFLINE }, | 132 | { 0x18000, 1, RI_E1HE2_ONLINE }, { 0x18008, 1, RI_E1HE2_ONLINE }, |
128 | { 0x40000, 98, RI_ALL_ONLINE }, { 0x40194, 1, RI_ALL_ONLINE }, | 133 | { 0x18010, 35, RI_E2_ONLINE }, { 0x180a4, 2, RI_E2_ONLINE }, |
129 | { 0x401a4, 1, RI_ALL_ONLINE }, { 0x401a8, 11, RI_E1H_ONLINE }, | 134 | { 0x180c0, 191, RI_E2_ONLINE }, { 0x18440, 1, RI_E2_ONLINE }, |
130 | { 0x40200, 4, RI_ALL_ONLINE }, { 0x40400, 43, RI_ALL_ONLINE }, | 135 | { 0x18460, 1, RI_E2_ONLINE }, { 0x18480, 2, RI_E2_ONLINE }, |
131 | { 0x404b8, 1, RI_ALL_ONLINE }, { 0x404c8, 1, RI_ALL_ONLINE }, | 136 | { 0x184c0, 2, RI_E2_ONLINE }, { 0x18500, 15, RI_E2_ONLINE }, |
132 | { 0x404cc, 3, RI_E1H_ONLINE }, { 0x40500, 2, RI_ALL_ONLINE }, | 137 | { 0x20000, 24, RI_ALL_ONLINE }, { 0x20060, 8, RI_ALL_ONLINE }, |
138 | { 0x20080, 94, RI_ALL_ONLINE }, { 0x201f8, 1, RI_E1E1H_ONLINE }, | ||
139 | { 0x201fc, 1, RI_ALL_ONLINE }, { 0x20200, 1, RI_E1E1H_ONLINE }, | ||
140 | { 0x20204, 1, RI_ALL_ONLINE }, { 0x20208, 1, RI_E1E1H_ONLINE }, | ||
141 | { 0x2020c, 39, RI_ALL_ONLINE }, { 0x202c8, 1, RI_E2_ONLINE }, | ||
142 | { 0x202d8, 4, RI_E2_ONLINE }, { 0x20400, 2, RI_ALL_ONLINE }, | ||
143 | { 0x2040c, 8, RI_ALL_ONLINE }, { 0x2042c, 18, RI_E1HE2_ONLINE }, | ||
144 | { 0x20480, 1, RI_ALL_ONLINE }, { 0x20500, 1, RI_ALL_ONLINE }, | ||
145 | { 0x20600, 1, RI_ALL_ONLINE }, { 0x28000, 1, RI_ALL_ONLINE }, | ||
146 | { 0x28004, 8191, RI_ALL_OFFLINE }, { 0x30000, 1, RI_ALL_ONLINE }, | ||
147 | { 0x30004, 16383, RI_ALL_OFFLINE }, { 0x40000, 98, RI_ALL_ONLINE }, | ||
148 | { 0x401a8, 8, RI_E1HE2_ONLINE }, { 0x401c8, 1, RI_E1H_ONLINE }, | ||
149 | { 0x401cc, 2, RI_E1HE2_ONLINE }, { 0x401d4, 2, RI_E2_ONLINE }, | ||
150 | { 0x40200, 4, RI_ALL_ONLINE }, { 0x40220, 18, RI_E2_ONLINE }, | ||
151 | { 0x40400, 43, RI_ALL_ONLINE }, { 0x404cc, 3, RI_E1HE2_ONLINE }, | ||
152 | { 0x404e0, 1, RI_E2_ONLINE }, { 0x40500, 2, RI_ALL_ONLINE }, | ||
133 | { 0x40510, 2, RI_ALL_ONLINE }, { 0x40520, 2, RI_ALL_ONLINE }, | 153 | { 0x40510, 2, RI_ALL_ONLINE }, { 0x40520, 2, RI_ALL_ONLINE }, |
134 | { 0x40530, 2, RI_ALL_ONLINE }, { 0x40540, 2, RI_ALL_ONLINE }, | 154 | { 0x40530, 2, RI_ALL_ONLINE }, { 0x40540, 2, RI_ALL_ONLINE }, |
135 | { 0x42000, 164, RI_ALL_ONLINE }, { 0x4229c, 1, RI_ALL_ONLINE }, | 155 | { 0x40550, 10, RI_E2_ONLINE }, { 0x40610, 2, RI_E2_ONLINE }, |
136 | { 0x422ac, 1, RI_ALL_ONLINE }, { 0x422bc, 1, RI_ALL_ONLINE }, | 156 | { 0x42000, 164, RI_ALL_ONLINE }, { 0x422c0, 4, RI_E2_ONLINE }, |
137 | { 0x422d4, 5, RI_E1H_ONLINE }, { 0x42400, 49, RI_ALL_ONLINE }, | 157 | { 0x422d4, 5, RI_E1HE2_ONLINE }, { 0x422e8, 1, RI_E2_ONLINE }, |
138 | { 0x424c8, 38, RI_ALL_ONLINE }, { 0x42568, 2, RI_ALL_ONLINE }, | 158 | { 0x42400, 49, RI_ALL_ONLINE }, { 0x424c8, 38, RI_ALL_ONLINE }, |
139 | { 0x42800, 1, RI_ALL_ONLINE }, { 0x50000, 20, RI_ALL_ONLINE }, | 159 | { 0x42568, 2, RI_ALL_ONLINE }, { 0x42640, 5, RI_E2_ONLINE }, |
140 | { 0x50050, 8, RI_ALL_ONLINE }, { 0x50070, 88, RI_ALL_ONLINE }, | 160 | { 0x42800, 1, RI_ALL_ONLINE }, { 0x50000, 1, RI_ALL_ONLINE }, |
141 | { 0x501dc, 1, RI_ALL_ONLINE }, { 0x501ec, 1, RI_ALL_ONLINE }, | 161 | { 0x50004, 19, RI_ALL_ONLINE }, { 0x50050, 8, RI_ALL_ONLINE }, |
142 | { 0x501f0, 4, RI_E1H_ONLINE }, { 0x50200, 2, RI_ALL_ONLINE }, | 162 | { 0x50070, 88, RI_ALL_ONLINE }, { 0x501f0, 4, RI_E1HE2_ONLINE }, |
143 | { 0x5020c, 7, RI_ALL_ONLINE }, { 0x50228, 6, RI_E1H_ONLINE }, | 163 | { 0x50200, 2, RI_ALL_ONLINE }, { 0x5020c, 7, RI_ALL_ONLINE }, |
144 | { 0x50240, 1, RI_ALL_ONLINE }, { 0x50280, 1, RI_ALL_ONLINE }, | 164 | { 0x50228, 6, RI_E1HE2_ONLINE }, { 0x50240, 1, RI_ALL_ONLINE }, |
165 | { 0x50280, 1, RI_ALL_ONLINE }, { 0x50300, 1, RI_E2_ONLINE }, | ||
166 | { 0x5030c, 1, RI_E2_ONLINE }, { 0x50318, 1, RI_E2_ONLINE }, | ||
167 | { 0x5031c, 1, RI_E2_ONLINE }, { 0x50320, 2, RI_E2_ONLINE }, | ||
145 | { 0x52000, 1, RI_ALL_ONLINE }, { 0x54000, 1, RI_ALL_ONLINE }, | 168 | { 0x52000, 1, RI_ALL_ONLINE }, { 0x54000, 1, RI_ALL_ONLINE }, |
146 | { 0x54004, 3327, RI_ALL_OFFLINE }, { 0x58000, 1, RI_ALL_ONLINE }, | 169 | { 0x54004, 3327, RI_ALL_OFFLINE }, { 0x58000, 1, RI_ALL_ONLINE }, |
147 | { 0x58004, 8191, RI_ALL_OFFLINE }, { 0x60000, 71, RI_ALL_ONLINE }, | 170 | { 0x58004, 8191, RI_E1E1H_OFFLINE }, { 0x60000, 26, RI_ALL_ONLINE }, |
148 | { 0x60128, 1, RI_ALL_ONLINE }, { 0x60138, 1, RI_ALL_ONLINE }, | 171 | { 0x60068, 8, RI_E1E1H_ONLINE }, { 0x60088, 12, RI_ALL_ONLINE }, |
149 | { 0x6013c, 24, RI_E1H_ONLINE }, { 0x60200, 1, RI_ALL_ONLINE }, | 172 | { 0x600b8, 9, RI_E1E1H_ONLINE }, { 0x600dc, 1, RI_ALL_ONLINE }, |
173 | { 0x600e0, 5, RI_E1E1H_ONLINE }, { 0x600f4, 1, RI_ALL_ONLINE }, | ||
174 | { 0x600f8, 1, RI_E1E1H_ONLINE }, { 0x600fc, 8, RI_ALL_ONLINE }, | ||
175 | { 0x6013c, 24, RI_E1H_ONLINE }, { 0x6019c, 2, RI_E2_ONLINE }, | ||
176 | { 0x601ac, 18, RI_E2_ONLINE }, { 0x60200, 1, RI_ALL_ONLINE }, | ||
177 | { 0x60204, 2, RI_ALL_OFFLINE }, { 0x60210, 13, RI_E2_ONLINE }, | ||
150 | { 0x61000, 1, RI_ALL_ONLINE }, { 0x61004, 511, RI_ALL_OFFLINE }, | 178 | { 0x61000, 1, RI_ALL_ONLINE }, { 0x61004, 511, RI_ALL_OFFLINE }, |
151 | { 0x70000, 8, RI_ALL_ONLINE }, { 0x70020, 21496, RI_ALL_OFFLINE }, | 179 | { 0x70000, 8, RI_ALL_ONLINE }, { 0x70020, 8184, RI_ALL_OFFLINE }, |
152 | { 0x85000, 3, RI_ALL_ONLINE }, { 0x8500c, 4, RI_ALL_OFFLINE }, | 180 | { 0x85000, 3, RI_ALL_ONLINE }, { 0x8501c, 7, RI_ALL_ONLINE }, |
153 | { 0x8501c, 7, RI_ALL_ONLINE }, { 0x85038, 4, RI_ALL_OFFLINE }, | 181 | { 0x85048, 1, RI_ALL_ONLINE }, { 0x85200, 32, RI_ALL_ONLINE }, |
154 | { 0x85048, 1, RI_ALL_ONLINE }, { 0x8504c, 109, RI_ALL_OFFLINE }, | 182 | { 0xc1000, 7, RI_ALL_ONLINE }, { 0xc103c, 2, RI_E2_ONLINE }, |
155 | { 0x85200, 32, RI_ALL_ONLINE }, { 0x85280, 11104, RI_ALL_OFFLINE }, | 183 | { 0xc1800, 2, RI_ALL_ONLINE }, { 0xc2000, 164, RI_ALL_ONLINE }, |
156 | { 0xa0000, 16384, RI_ALL_ONLINE }, { 0xb0000, 16384, RI_E1H_ONLINE }, | 184 | { 0xc22c0, 5, RI_E2_ONLINE }, { 0xc22d8, 4, RI_E2_ONLINE }, |
157 | { 0xc1000, 7, RI_ALL_ONLINE }, { 0xc1028, 1, RI_ALL_ONLINE }, | ||
158 | { 0xc1038, 1, RI_ALL_ONLINE }, { 0xc1800, 2, RI_ALL_ONLINE }, | ||
159 | { 0xc2000, 164, RI_ALL_ONLINE }, { 0xc229c, 1, RI_ALL_ONLINE }, | ||
160 | { 0xc22ac, 1, RI_ALL_ONLINE }, { 0xc22bc, 1, RI_ALL_ONLINE }, | ||
161 | { 0xc2400, 49, RI_ALL_ONLINE }, { 0xc24c8, 38, RI_ALL_ONLINE }, | 185 | { 0xc2400, 49, RI_ALL_ONLINE }, { 0xc24c8, 38, RI_ALL_ONLINE }, |
162 | { 0xc2568, 2, RI_ALL_ONLINE }, { 0xc2600, 1, RI_ALL_ONLINE }, | 186 | { 0xc2568, 2, RI_ALL_ONLINE }, { 0xc2600, 1, RI_ALL_ONLINE }, |
163 | { 0xc4000, 165, RI_ALL_ONLINE }, { 0xc42a0, 1, RI_ALL_ONLINE }, | 187 | { 0xc4000, 165, RI_ALL_ONLINE }, { 0xc42d8, 2, RI_E2_ONLINE }, |
164 | { 0xc42b0, 1, RI_ALL_ONLINE }, { 0xc42c0, 1, RI_ALL_ONLINE }, | 188 | { 0xc42e0, 7, RI_E1HE2_ONLINE }, { 0xc42fc, 1, RI_E2_ONLINE }, |
165 | { 0xc42e0, 7, RI_E1H_ONLINE }, { 0xc4400, 51, RI_ALL_ONLINE }, | 189 | { 0xc4400, 51, RI_ALL_ONLINE }, { 0xc44d0, 38, RI_ALL_ONLINE }, |
166 | { 0xc44d0, 38, RI_ALL_ONLINE }, { 0xc4570, 2, RI_ALL_ONLINE }, | 190 | { 0xc4570, 2, RI_ALL_ONLINE }, { 0xc4578, 5, RI_E2_ONLINE }, |
167 | { 0xc4600, 1, RI_ALL_ONLINE }, { 0xd0000, 19, RI_ALL_ONLINE }, | 191 | { 0xc4600, 1, RI_ALL_ONLINE }, { 0xd0000, 19, RI_ALL_ONLINE }, |
168 | { 0xd004c, 8, RI_ALL_ONLINE }, { 0xd006c, 91, RI_ALL_ONLINE }, | 192 | { 0xd004c, 8, RI_ALL_ONLINE }, { 0xd006c, 91, RI_ALL_ONLINE }, |
169 | { 0xd01e4, 1, RI_ALL_ONLINE }, { 0xd01f4, 1, RI_ALL_ONLINE }, | 193 | { 0xd01fc, 1, RI_E2_ONLINE }, { 0xd0200, 2, RI_ALL_ONLINE }, |
170 | { 0xd0200, 2, RI_ALL_ONLINE }, { 0xd020c, 7, RI_ALL_ONLINE }, | 194 | { 0xd020c, 7, RI_ALL_ONLINE }, { 0xd0228, 18, RI_E1HE2_ONLINE }, |
171 | { 0xd0228, 18, RI_E1H_ONLINE }, { 0xd0280, 1, RI_ALL_ONLINE }, | 195 | { 0xd0280, 1, RI_ALL_ONLINE }, { 0xd0300, 1, RI_ALL_ONLINE }, |
172 | { 0xd0300, 1, RI_ALL_ONLINE }, { 0xd0400, 1, RI_ALL_ONLINE }, | 196 | { 0xd0400, 1, RI_ALL_ONLINE }, { 0xd4000, 1, RI_ALL_ONLINE }, |
173 | { 0xd4000, 1, RI_ALL_ONLINE }, { 0xd4004, 2559, RI_ALL_OFFLINE }, | 197 | { 0xd4004, 2559, RI_ALL_OFFLINE }, { 0xd8000, 1, RI_ALL_ONLINE }, |
174 | { 0xd8000, 1, RI_ALL_ONLINE }, { 0xd8004, 8191, RI_ALL_OFFLINE }, | 198 | { 0xd8004, 8191, RI_ALL_OFFLINE }, { 0xe0000, 21, RI_ALL_ONLINE }, |
175 | { 0xe0000, 21, RI_ALL_ONLINE }, { 0xe0054, 8, RI_ALL_ONLINE }, | 199 | { 0xe0054, 8, RI_ALL_ONLINE }, { 0xe0074, 49, RI_ALL_ONLINE }, |
176 | { 0xe0074, 85, RI_ALL_ONLINE }, { 0xe01d4, 1, RI_ALL_ONLINE }, | 200 | { 0xe0138, 1, RI_E1E1H_ONLINE }, { 0xe013c, 35, RI_ALL_ONLINE }, |
177 | { 0xe01e4, 1, RI_ALL_ONLINE }, { 0xe0200, 2, RI_ALL_ONLINE }, | 201 | { 0xe01f4, 2, RI_E2_ONLINE }, { 0xe0200, 2, RI_ALL_ONLINE }, |
178 | { 0xe020c, 8, RI_ALL_ONLINE }, { 0xe022c, 18, RI_E1H_ONLINE }, | 202 | { 0xe020c, 8, RI_ALL_ONLINE }, { 0xe022c, 18, RI_E1HE2_ONLINE }, |
179 | { 0xe0280, 1, RI_ALL_ONLINE }, { 0xe0300, 1, RI_ALL_ONLINE }, | 203 | { 0xe0280, 1, RI_ALL_ONLINE }, { 0xe0300, 1, RI_ALL_ONLINE }, |
180 | { 0xe1000, 1, RI_ALL_ONLINE }, { 0xe2000, 1, RI_ALL_ONLINE }, | 204 | { 0xe1000, 1, RI_ALL_ONLINE }, { 0xe2000, 1, RI_ALL_ONLINE }, |
181 | { 0xe2004, 2047, RI_ALL_OFFLINE }, { 0xf0000, 1, RI_ALL_ONLINE }, | 205 | { 0xe2004, 2047, RI_ALL_OFFLINE }, { 0xf0000, 1, RI_ALL_ONLINE }, |
182 | { 0xf0004, 16383, RI_ALL_OFFLINE }, { 0x101000, 12, RI_ALL_ONLINE }, | 206 | { 0xf0004, 16383, RI_ALL_OFFLINE }, { 0x101000, 12, RI_ALL_ONLINE }, |
183 | { 0x10103c, 1, RI_ALL_ONLINE }, { 0x10104c, 1, RI_ALL_ONLINE }, | 207 | { 0x101050, 1, RI_E1HE2_ONLINE }, { 0x101054, 3, RI_E2_ONLINE }, |
184 | { 0x101050, 1, RI_E1H_ONLINE }, { 0x101100, 1, RI_ALL_ONLINE }, | 208 | { 0x101100, 1, RI_ALL_ONLINE }, { 0x101800, 8, RI_ALL_ONLINE }, |
185 | { 0x101800, 8, RI_ALL_ONLINE }, { 0x102000, 18, RI_ALL_ONLINE }, | 209 | { 0x102000, 18, RI_ALL_ONLINE }, { 0x102068, 6, RI_E2_ONLINE }, |
186 | { 0x102054, 1, RI_ALL_ONLINE }, { 0x102064, 1, RI_ALL_ONLINE }, | ||
187 | { 0x102080, 17, RI_ALL_ONLINE }, { 0x1020c8, 8, RI_E1H_ONLINE }, | 210 | { 0x102080, 17, RI_ALL_ONLINE }, { 0x1020c8, 8, RI_E1H_ONLINE }, |
188 | { 0x102400, 1, RI_ALL_ONLINE }, { 0x103000, 26, RI_ALL_ONLINE }, | 211 | { 0x1020e8, 9, RI_E2_ONLINE }, { 0x102400, 1, RI_ALL_ONLINE }, |
189 | { 0x103074, 1, RI_ALL_ONLINE }, { 0x103084, 1, RI_ALL_ONLINE }, | 212 | { 0x103000, 26, RI_ALL_ONLINE }, { 0x103098, 5, RI_E1HE2_ONLINE }, |
190 | { 0x103094, 1, RI_ALL_ONLINE }, { 0x103098, 5, RI_E1H_ONLINE }, | 213 | { 0x1030ac, 10, RI_E2_ONLINE }, { 0x1030d8, 8, RI_E2_ONLINE }, |
214 | { 0x103400, 1, RI_E2_ONLINE }, { 0x103404, 135, RI_E2_OFFLINE }, | ||
191 | { 0x103800, 8, RI_ALL_ONLINE }, { 0x104000, 63, RI_ALL_ONLINE }, | 215 | { 0x103800, 8, RI_ALL_ONLINE }, { 0x104000, 63, RI_ALL_ONLINE }, |
192 | { 0x104108, 1, RI_ALL_ONLINE }, { 0x104118, 1, RI_ALL_ONLINE }, | 216 | { 0x10411c, 16, RI_E2_ONLINE }, { 0x104200, 17, RI_ALL_ONLINE }, |
193 | { 0x104200, 17, RI_ALL_ONLINE }, { 0x104400, 64, RI_ALL_ONLINE }, | 217 | { 0x104400, 64, RI_ALL_ONLINE }, { 0x104500, 192, RI_ALL_OFFLINE }, |
194 | { 0x104500, 192, RI_ALL_OFFLINE }, { 0x104800, 64, RI_ALL_ONLINE }, | 218 | { 0x104800, 64, RI_ALL_ONLINE }, { 0x104900, 192, RI_ALL_OFFLINE }, |
195 | { 0x104900, 192, RI_ALL_OFFLINE }, { 0x105000, 7, RI_ALL_ONLINE }, | 219 | { 0x105000, 256, RI_ALL_ONLINE }, { 0x105400, 768, RI_ALL_OFFLINE }, |
196 | { 0x10501c, 1, RI_ALL_OFFLINE }, { 0x105020, 3, RI_ALL_ONLINE }, | 220 | { 0x107000, 7, RI_E2_ONLINE }, { 0x108000, 33, RI_E1E1H_ONLINE }, |
197 | { 0x10502c, 1, RI_ALL_OFFLINE }, { 0x105030, 3, RI_ALL_ONLINE }, | 221 | { 0x1080ac, 5, RI_E1H_ONLINE }, { 0x108100, 5, RI_E1E1H_ONLINE }, |
198 | { 0x10503c, 1, RI_ALL_OFFLINE }, { 0x105040, 3, RI_ALL_ONLINE }, | 222 | { 0x108120, 5, RI_E1E1H_ONLINE }, { 0x108200, 74, RI_E1E1H_ONLINE }, |
199 | { 0x10504c, 1, RI_ALL_OFFLINE }, { 0x105050, 3, RI_ALL_ONLINE }, | 223 | { 0x108400, 74, RI_E1E1H_ONLINE }, { 0x108800, 152, RI_E1E1H_ONLINE }, |
200 | { 0x10505c, 1, RI_ALL_OFFLINE }, { 0x105060, 3, RI_ALL_ONLINE }, | 224 | { 0x110000, 111, RI_E2_ONLINE }, { 0x110200, 4, RI_E2_ONLINE }, |
201 | { 0x10506c, 1, RI_ALL_OFFLINE }, { 0x105070, 3, RI_ALL_ONLINE }, | 225 | { 0x120000, 2, RI_ALL_ONLINE }, { 0x120008, 4, RI_ALL_ONLINE }, |
202 | { 0x10507c, 1, RI_ALL_OFFLINE }, { 0x105080, 3, RI_ALL_ONLINE }, | 226 | { 0x120018, 3, RI_ALL_ONLINE }, { 0x120024, 4, RI_ALL_ONLINE }, |
203 | { 0x10508c, 1, RI_ALL_OFFLINE }, { 0x105090, 3, RI_ALL_ONLINE }, | 227 | { 0x120034, 3, RI_ALL_ONLINE }, { 0x120040, 4, RI_ALL_ONLINE }, |
204 | { 0x10509c, 1, RI_ALL_OFFLINE }, { 0x1050a0, 3, RI_ALL_ONLINE }, | 228 | { 0x120050, 3, RI_ALL_ONLINE }, { 0x12005c, 4, RI_ALL_ONLINE }, |
205 | { 0x1050ac, 1, RI_ALL_OFFLINE }, { 0x1050b0, 3, RI_ALL_ONLINE }, | 229 | { 0x12006c, 3, RI_ALL_ONLINE }, { 0x120078, 4, RI_ALL_ONLINE }, |
206 | { 0x1050bc, 1, RI_ALL_OFFLINE }, { 0x1050c0, 3, RI_ALL_ONLINE }, | 230 | { 0x120088, 3, RI_ALL_ONLINE }, { 0x120094, 4, RI_ALL_ONLINE }, |
207 | { 0x1050cc, 1, RI_ALL_OFFLINE }, { 0x1050d0, 3, RI_ALL_ONLINE }, | 231 | { 0x1200a4, 3, RI_ALL_ONLINE }, { 0x1200b0, 4, RI_ALL_ONLINE }, |
208 | { 0x1050dc, 1, RI_ALL_OFFLINE }, { 0x1050e0, 3, RI_ALL_ONLINE }, | 232 | { 0x1200c0, 3, RI_ALL_ONLINE }, { 0x1200cc, 4, RI_ALL_ONLINE }, |
209 | { 0x1050ec, 1, RI_ALL_OFFLINE }, { 0x1050f0, 3, RI_ALL_ONLINE }, | 233 | { 0x1200dc, 3, RI_ALL_ONLINE }, { 0x1200e8, 4, RI_ALL_ONLINE }, |
210 | { 0x1050fc, 1, RI_ALL_OFFLINE }, { 0x105100, 3, RI_ALL_ONLINE }, | 234 | { 0x1200f8, 3, RI_ALL_ONLINE }, { 0x120104, 4, RI_ALL_ONLINE }, |
211 | { 0x10510c, 1, RI_ALL_OFFLINE }, { 0x105110, 3, RI_ALL_ONLINE }, | 235 | { 0x120114, 1, RI_ALL_ONLINE }, { 0x120118, 22, RI_ALL_ONLINE }, |
212 | { 0x10511c, 1, RI_ALL_OFFLINE }, { 0x105120, 3, RI_ALL_ONLINE }, | 236 | { 0x120170, 2, RI_E1E1H_ONLINE }, { 0x120178, 243, RI_ALL_ONLINE }, |
213 | { 0x10512c, 1, RI_ALL_OFFLINE }, { 0x105130, 3, RI_ALL_ONLINE }, | 237 | { 0x120544, 4, RI_E1E1H_ONLINE }, { 0x120554, 7, RI_ALL_ONLINE }, |
214 | { 0x10513c, 1, RI_ALL_OFFLINE }, { 0x105140, 3, RI_ALL_ONLINE }, | 238 | { 0x12059c, 6, RI_E1HE2_ONLINE }, { 0x1205b4, 1, RI_E1HE2_ONLINE }, |
215 | { 0x10514c, 1, RI_ALL_OFFLINE }, { 0x105150, 3, RI_ALL_ONLINE }, | 239 | { 0x1205b8, 16, RI_E1HE2_ONLINE }, { 0x1205f8, 4, RI_E2_ONLINE }, |
216 | { 0x10515c, 1, RI_ALL_OFFLINE }, { 0x105160, 3, RI_ALL_ONLINE }, | 240 | { 0x120618, 1, RI_E2_ONLINE }, { 0x12061c, 20, RI_E1HE2_ONLINE }, |
217 | { 0x10516c, 1, RI_ALL_OFFLINE }, { 0x105170, 3, RI_ALL_ONLINE }, | 241 | { 0x12066c, 11, RI_E1HE2_ONLINE }, { 0x120698, 5, RI_E2_ONLINE }, |
218 | { 0x10517c, 1, RI_ALL_OFFLINE }, { 0x105180, 3, RI_ALL_ONLINE }, | 242 | { 0x1206b0, 76, RI_E2_ONLINE }, { 0x1207fc, 1, RI_E2_ONLINE }, |
219 | { 0x10518c, 1, RI_ALL_OFFLINE }, { 0x105190, 3, RI_ALL_ONLINE }, | 243 | { 0x120808, 66, RI_ALL_ONLINE }, { 0x120910, 7, RI_E2_ONLINE }, |
220 | { 0x10519c, 1, RI_ALL_OFFLINE }, { 0x1051a0, 3, RI_ALL_ONLINE }, | 244 | { 0x120930, 9, RI_E2_ONLINE }, { 0x120a00, 2, RI_ALL_ONLINE }, |
221 | { 0x1051ac, 1, RI_ALL_OFFLINE }, { 0x1051b0, 3, RI_ALL_ONLINE }, | 245 | { 0x122000, 2, RI_ALL_ONLINE }, { 0x122008, 2046, RI_E1_OFFLINE }, |
222 | { 0x1051bc, 1, RI_ALL_OFFLINE }, { 0x1051c0, 3, RI_ALL_ONLINE }, | 246 | { 0x128000, 2, RI_E1HE2_ONLINE }, { 0x128008, 6142, RI_E1HE2_OFFLINE }, |
223 | { 0x1051cc, 1, RI_ALL_OFFLINE }, { 0x1051d0, 3, RI_ALL_ONLINE }, | 247 | { 0x130000, 35, RI_E2_ONLINE }, { 0x130100, 29, RI_E2_ONLINE }, |
224 | { 0x1051dc, 1, RI_ALL_OFFLINE }, { 0x1051e0, 3, RI_ALL_ONLINE }, | 248 | { 0x130180, 1, RI_E2_ONLINE }, { 0x130200, 1, RI_E2_ONLINE }, |
225 | { 0x1051ec, 1, RI_ALL_OFFLINE }, { 0x1051f0, 3, RI_ALL_ONLINE }, | 249 | { 0x130280, 1, RI_E2_ONLINE }, { 0x130300, 5, RI_E2_ONLINE }, |
226 | { 0x1051fc, 1, RI_ALL_OFFLINE }, { 0x105200, 3, RI_ALL_ONLINE }, | 250 | { 0x130380, 1, RI_E2_ONLINE }, { 0x130400, 1, RI_E2_ONLINE }, |
227 | { 0x10520c, 1, RI_ALL_OFFLINE }, { 0x105210, 3, RI_ALL_ONLINE }, | 251 | { 0x130480, 5, RI_E2_ONLINE }, { 0x130800, 72, RI_E2_ONLINE }, |
228 | { 0x10521c, 1, RI_ALL_OFFLINE }, { 0x105220, 3, RI_ALL_ONLINE }, | 252 | { 0x131000, 136, RI_E2_ONLINE }, { 0x132000, 148, RI_E2_ONLINE }, |
229 | { 0x10522c, 1, RI_ALL_OFFLINE }, { 0x105230, 3, RI_ALL_ONLINE }, | 253 | { 0x134000, 544, RI_E2_ONLINE }, { 0x140000, 64, RI_ALL_ONLINE }, |
230 | { 0x10523c, 1, RI_ALL_OFFLINE }, { 0x105240, 3, RI_ALL_ONLINE }, | 254 | { 0x140100, 5, RI_E1E1H_ONLINE }, { 0x140114, 45, RI_ALL_ONLINE }, |
231 | { 0x10524c, 1, RI_ALL_OFFLINE }, { 0x105250, 3, RI_ALL_ONLINE }, | 255 | { 0x140200, 6, RI_ALL_ONLINE }, { 0x140220, 4, RI_E2_ONLINE }, |
232 | { 0x10525c, 1, RI_ALL_OFFLINE }, { 0x105260, 3, RI_ALL_ONLINE }, | 256 | { 0x140240, 4, RI_E2_ONLINE }, { 0x140260, 4, RI_E2_ONLINE }, |
233 | { 0x10526c, 1, RI_ALL_OFFLINE }, { 0x105270, 3, RI_ALL_ONLINE }, | 257 | { 0x140280, 4, RI_E2_ONLINE }, { 0x1402a0, 4, RI_E2_ONLINE }, |
234 | { 0x10527c, 1, RI_ALL_OFFLINE }, { 0x105280, 3, RI_ALL_ONLINE }, | 258 | { 0x1402c0, 4, RI_E2_ONLINE }, { 0x1402e0, 13, RI_E2_ONLINE }, |
235 | { 0x10528c, 1, RI_ALL_OFFLINE }, { 0x105290, 3, RI_ALL_ONLINE }, | 259 | { 0x144000, 4, RI_E1E1H_ONLINE }, { 0x148000, 4, RI_E1E1H_ONLINE }, |
236 | { 0x10529c, 1, RI_ALL_OFFLINE }, { 0x1052a0, 3, RI_ALL_ONLINE }, | 260 | { 0x14c000, 4, RI_E1E1H_ONLINE }, { 0x150000, 4, RI_E1E1H_ONLINE }, |
237 | { 0x1052ac, 1, RI_ALL_OFFLINE }, { 0x1052b0, 3, RI_ALL_ONLINE }, | 261 | { 0x154000, 4, RI_E1E1H_ONLINE }, { 0x158000, 4, RI_E1E1H_ONLINE }, |
238 | { 0x1052bc, 1, RI_ALL_OFFLINE }, { 0x1052c0, 3, RI_ALL_ONLINE }, | 262 | { 0x15c000, 2, RI_E1HE2_ONLINE }, { 0x15c008, 5, RI_E1H_ONLINE }, |
239 | { 0x1052cc, 1, RI_ALL_OFFLINE }, { 0x1052d0, 3, RI_ALL_ONLINE }, | 263 | { 0x15c020, 27, RI_E2_ONLINE }, { 0x15c090, 13, RI_E2_ONLINE }, |
240 | { 0x1052dc, 1, RI_ALL_OFFLINE }, { 0x1052e0, 3, RI_ALL_ONLINE }, | 264 | { 0x15c0c8, 34, RI_E2_ONLINE }, { 0x161000, 7, RI_ALL_ONLINE }, |
241 | { 0x1052ec, 1, RI_ALL_OFFLINE }, { 0x1052f0, 3, RI_ALL_ONLINE }, | 265 | { 0x16103c, 2, RI_E2_ONLINE }, { 0x161800, 2, RI_ALL_ONLINE }, |
242 | { 0x1052fc, 1, RI_ALL_OFFLINE }, { 0x105300, 3, RI_ALL_ONLINE }, | 266 | { 0x164000, 60, RI_ALL_ONLINE }, { 0x164110, 2, RI_E1HE2_ONLINE }, |
243 | { 0x10530c, 1, RI_ALL_OFFLINE }, { 0x105310, 3, RI_ALL_ONLINE }, | 267 | { 0x164118, 15, RI_E2_ONLINE }, { 0x164200, 1, RI_ALL_ONLINE }, |
244 | { 0x10531c, 1, RI_ALL_OFFLINE }, { 0x105320, 3, RI_ALL_ONLINE }, | ||
245 | { 0x10532c, 1, RI_ALL_OFFLINE }, { 0x105330, 3, RI_ALL_ONLINE }, | ||
246 | { 0x10533c, 1, RI_ALL_OFFLINE }, { 0x105340, 3, RI_ALL_ONLINE }, | ||
247 | { 0x10534c, 1, RI_ALL_OFFLINE }, { 0x105350, 3, RI_ALL_ONLINE }, | ||
248 | { 0x10535c, 1, RI_ALL_OFFLINE }, { 0x105360, 3, RI_ALL_ONLINE }, | ||
249 | { 0x10536c, 1, RI_ALL_OFFLINE }, { 0x105370, 3, RI_ALL_ONLINE }, | ||
250 | { 0x10537c, 1, RI_ALL_OFFLINE }, { 0x105380, 3, RI_ALL_ONLINE }, | ||
251 | { 0x10538c, 1, RI_ALL_OFFLINE }, { 0x105390, 3, RI_ALL_ONLINE }, | ||
252 | { 0x10539c, 1, RI_ALL_OFFLINE }, { 0x1053a0, 3, RI_ALL_ONLINE }, | ||
253 | { 0x1053ac, 1, RI_ALL_OFFLINE }, { 0x1053b0, 3, RI_ALL_ONLINE }, | ||
254 | { 0x1053bc, 1, RI_ALL_OFFLINE }, { 0x1053c0, 3, RI_ALL_ONLINE }, | ||
255 | { 0x1053cc, 1, RI_ALL_OFFLINE }, { 0x1053d0, 3, RI_ALL_ONLINE }, | ||
256 | { 0x1053dc, 1, RI_ALL_OFFLINE }, { 0x1053e0, 3, RI_ALL_ONLINE }, | ||
257 | { 0x1053ec, 1, RI_ALL_OFFLINE }, { 0x1053f0, 3, RI_ALL_ONLINE }, | ||
258 | { 0x1053fc, 769, RI_ALL_OFFLINE }, { 0x108000, 33, RI_ALL_ONLINE }, | ||
259 | { 0x108090, 1, RI_ALL_ONLINE }, { 0x1080a0, 1, RI_ALL_ONLINE }, | ||
260 | { 0x1080ac, 5, RI_E1H_ONLINE }, { 0x108100, 5, RI_ALL_ONLINE }, | ||
261 | { 0x108120, 5, RI_ALL_ONLINE }, { 0x108200, 74, RI_ALL_ONLINE }, | ||
262 | { 0x108400, 74, RI_ALL_ONLINE }, { 0x108800, 152, RI_ALL_ONLINE }, | ||
263 | { 0x109000, 1, RI_ALL_ONLINE }, { 0x120000, 347, RI_ALL_ONLINE }, | ||
264 | { 0x120578, 1, RI_ALL_ONLINE }, { 0x120588, 1, RI_ALL_ONLINE }, | ||
265 | { 0x120598, 1, RI_ALL_ONLINE }, { 0x12059c, 23, RI_E1H_ONLINE }, | ||
266 | { 0x120614, 1, RI_E1H_ONLINE }, { 0x12061c, 30, RI_E1H_ONLINE }, | ||
267 | { 0x12080c, 65, RI_ALL_ONLINE }, { 0x120a00, 2, RI_ALL_ONLINE }, | ||
268 | { 0x122000, 2, RI_ALL_ONLINE }, { 0x128000, 2, RI_E1H_ONLINE }, | ||
269 | { 0x140000, 114, RI_ALL_ONLINE }, { 0x1401d4, 1, RI_ALL_ONLINE }, | ||
270 | { 0x1401e4, 1, RI_ALL_ONLINE }, { 0x140200, 6, RI_ALL_ONLINE }, | ||
271 | { 0x144000, 4, RI_ALL_ONLINE }, { 0x148000, 4, RI_ALL_ONLINE }, | ||
272 | { 0x14c000, 4, RI_ALL_ONLINE }, { 0x150000, 4, RI_ALL_ONLINE }, | ||
273 | { 0x154000, 4, RI_ALL_ONLINE }, { 0x158000, 4, RI_ALL_ONLINE }, | ||
274 | { 0x15c000, 7, RI_E1H_ONLINE }, { 0x161000, 7, RI_ALL_ONLINE }, | ||
275 | { 0x161028, 1, RI_ALL_ONLINE }, { 0x161038, 1, RI_ALL_ONLINE }, | ||
276 | { 0x161800, 2, RI_ALL_ONLINE }, { 0x164000, 60, RI_ALL_ONLINE }, | ||
277 | { 0x1640fc, 1, RI_ALL_ONLINE }, { 0x16410c, 1, RI_ALL_ONLINE }, | ||
278 | { 0x164110, 2, RI_E1H_ONLINE }, { 0x164200, 1, RI_ALL_ONLINE }, | ||
279 | { 0x164208, 1, RI_ALL_ONLINE }, { 0x164210, 1, RI_ALL_ONLINE }, | 268 | { 0x164208, 1, RI_ALL_ONLINE }, { 0x164210, 1, RI_ALL_ONLINE }, |
280 | { 0x164218, 1, RI_ALL_ONLINE }, { 0x164220, 1, RI_ALL_ONLINE }, | 269 | { 0x164218, 1, RI_ALL_ONLINE }, { 0x164220, 1, RI_ALL_ONLINE }, |
281 | { 0x164228, 1, RI_ALL_ONLINE }, { 0x164230, 1, RI_ALL_ONLINE }, | 270 | { 0x164228, 1, RI_ALL_ONLINE }, { 0x164230, 1, RI_ALL_ONLINE }, |
@@ -284,169 +273,298 @@ static const struct reg_addr reg_addrs[REGS_COUNT] = { | |||
284 | { 0x164258, 1, RI_ALL_ONLINE }, { 0x164260, 1, RI_ALL_ONLINE }, | 273 | { 0x164258, 1, RI_ALL_ONLINE }, { 0x164260, 1, RI_ALL_ONLINE }, |
285 | { 0x164270, 2, RI_ALL_ONLINE }, { 0x164280, 2, RI_ALL_ONLINE }, | 274 | { 0x164270, 2, RI_ALL_ONLINE }, { 0x164280, 2, RI_ALL_ONLINE }, |
286 | { 0x164800, 2, RI_ALL_ONLINE }, { 0x165000, 2, RI_ALL_ONLINE }, | 275 | { 0x164800, 2, RI_ALL_ONLINE }, { 0x165000, 2, RI_ALL_ONLINE }, |
287 | { 0x166000, 164, RI_ALL_ONLINE }, { 0x16629c, 1, RI_ALL_ONLINE }, | 276 | { 0x166000, 164, RI_ALL_ONLINE }, { 0x1662cc, 7, RI_E2_ONLINE }, |
288 | { 0x1662ac, 1, RI_ALL_ONLINE }, { 0x1662bc, 1, RI_ALL_ONLINE }, | ||
289 | { 0x166400, 49, RI_ALL_ONLINE }, { 0x1664c8, 38, RI_ALL_ONLINE }, | 277 | { 0x166400, 49, RI_ALL_ONLINE }, { 0x1664c8, 38, RI_ALL_ONLINE }, |
290 | { 0x166568, 2, RI_ALL_ONLINE }, { 0x166800, 1, RI_ALL_ONLINE }, | 278 | { 0x166568, 2, RI_ALL_ONLINE }, { 0x166570, 5, RI_E2_ONLINE }, |
291 | { 0x168000, 270, RI_ALL_ONLINE }, { 0x168444, 1, RI_ALL_ONLINE }, | 279 | { 0x166800, 1, RI_ALL_ONLINE }, { 0x168000, 137, RI_ALL_ONLINE }, |
292 | { 0x168454, 1, RI_ALL_ONLINE }, { 0x168800, 19, RI_ALL_ONLINE }, | 280 | { 0x168224, 2, RI_E1E1H_ONLINE }, { 0x16822c, 29, RI_ALL_ONLINE }, |
293 | { 0x168900, 1, RI_ALL_ONLINE }, { 0x168a00, 128, RI_ALL_ONLINE }, | 281 | { 0x1682a0, 12, RI_E1E1H_ONLINE }, { 0x1682d0, 12, RI_ALL_ONLINE }, |
294 | { 0x16a000, 1, RI_ALL_ONLINE }, { 0x16a004, 1535, RI_ALL_OFFLINE }, | 282 | { 0x168300, 2, RI_E1E1H_ONLINE }, { 0x168308, 68, RI_ALL_ONLINE }, |
295 | { 0x16c000, 1, RI_ALL_ONLINE }, { 0x16c004, 1535, RI_ALL_OFFLINE }, | 283 | { 0x168418, 2, RI_E1E1H_ONLINE }, { 0x168420, 6, RI_ALL_ONLINE }, |
296 | { 0x16e000, 16, RI_E1H_ONLINE }, { 0x16e100, 1, RI_E1H_ONLINE }, | 284 | { 0x168800, 19, RI_ALL_ONLINE }, { 0x168900, 1, RI_ALL_ONLINE }, |
297 | { 0x16e200, 2, RI_E1H_ONLINE }, { 0x16e400, 183, RI_E1H_ONLINE }, | 285 | { 0x168a00, 128, RI_ALL_ONLINE }, { 0x16a000, 1, RI_ALL_ONLINE }, |
298 | { 0x170000, 93, RI_ALL_ONLINE }, { 0x170180, 1, RI_ALL_ONLINE }, | 286 | { 0x16a004, 1535, RI_ALL_OFFLINE }, { 0x16c000, 1, RI_ALL_ONLINE }, |
299 | { 0x170190, 1, RI_ALL_ONLINE }, { 0x170200, 4, RI_ALL_ONLINE }, | 287 | { 0x16c004, 1535, RI_ALL_OFFLINE }, { 0x16e000, 16, RI_E1H_ONLINE }, |
300 | { 0x170214, 1, RI_ALL_ONLINE }, { 0x178000, 1, RI_ALL_ONLINE }, | 288 | { 0x16e040, 8, RI_E2_ONLINE }, { 0x16e100, 1, RI_E1H_ONLINE }, |
301 | { 0x180000, 61, RI_ALL_ONLINE }, { 0x180100, 1, RI_ALL_ONLINE }, | 289 | { 0x16e200, 2, RI_E1H_ONLINE }, { 0x16e400, 161, RI_E1H_ONLINE }, |
302 | { 0x180110, 1, RI_ALL_ONLINE }, { 0x180120, 1, RI_ALL_ONLINE }, | 290 | { 0x16e684, 2, RI_E1HE2_ONLINE }, { 0x16e68c, 12, RI_E1H_ONLINE }, |
303 | { 0x180130, 1, RI_ALL_ONLINE }, { 0x18013c, 2, RI_E1H_ONLINE }, | 291 | { 0x16e6bc, 4, RI_E1HE2_ONLINE }, { 0x16e6cc, 4, RI_E1H_ONLINE }, |
304 | { 0x180200, 58, RI_ALL_ONLINE }, { 0x180340, 4, RI_ALL_ONLINE }, | 292 | { 0x16e6e0, 12, RI_E2_ONLINE }, { 0x16e768, 17, RI_E2_ONLINE }, |
305 | { 0x180400, 1, RI_ALL_ONLINE }, { 0x180404, 255, RI_ALL_OFFLINE }, | 293 | { 0x170000, 24, RI_ALL_ONLINE }, { 0x170060, 4, RI_E1E1H_ONLINE }, |
294 | { 0x170070, 65, RI_ALL_ONLINE }, { 0x170194, 11, RI_E2_ONLINE }, | ||
295 | { 0x1701c4, 1, RI_E2_ONLINE }, { 0x1701cc, 7, RI_E2_ONLINE }, | ||
296 | { 0x1701ec, 1, RI_E2_ONLINE }, { 0x1701f4, 1, RI_E2_ONLINE }, | ||
297 | { 0x170200, 4, RI_ALL_ONLINE }, { 0x170214, 1, RI_ALL_ONLINE }, | ||
298 | { 0x170218, 77, RI_E2_ONLINE }, { 0x170400, 64, RI_E2_ONLINE }, | ||
299 | { 0x178000, 1, RI_ALL_ONLINE }, { 0x180000, 61, RI_ALL_ONLINE }, | ||
300 | { 0x18013c, 2, RI_E1HE2_ONLINE }, { 0x180200, 58, RI_ALL_ONLINE }, | ||
301 | { 0x180340, 4, RI_ALL_ONLINE }, { 0x180380, 1, RI_E2_ONLINE }, | ||
302 | { 0x180388, 1, RI_E2_ONLINE }, { 0x180390, 1, RI_E2_ONLINE }, | ||
303 | { 0x180398, 1, RI_E2_ONLINE }, { 0x1803a0, 5, RI_E2_ONLINE }, | ||
304 | { 0x180400, 1, RI_ALL_ONLINE }, { 0x180404, 255, RI_E1E1H_OFFLINE }, | ||
306 | { 0x181000, 4, RI_ALL_ONLINE }, { 0x181010, 1020, RI_ALL_OFFLINE }, | 305 | { 0x181000, 4, RI_ALL_ONLINE }, { 0x181010, 1020, RI_ALL_OFFLINE }, |
307 | { 0x1a0000, 1, RI_ALL_ONLINE }, { 0x1a0004, 1023, RI_ALL_OFFLINE }, | 306 | { 0x1a0000, 1, RI_ALL_ONLINE }, { 0x1a0004, 5631, RI_ALL_OFFLINE }, |
308 | { 0x1a1000, 1, RI_ALL_ONLINE }, { 0x1a1004, 4607, RI_ALL_OFFLINE }, | 307 | { 0x1a5800, 2560, RI_E1HE2_OFFLINE }, { 0x1a8000, 1, RI_ALL_ONLINE }, |
309 | { 0x1a5800, 2560, RI_E1H_OFFLINE }, { 0x1a8000, 64, RI_ALL_OFFLINE }, | 308 | { 0x1a8004, 8191, RI_E1HE2_OFFLINE }, { 0x1b0000, 1, RI_ALL_ONLINE }, |
310 | { 0x1a8100, 1984, RI_E1H_OFFLINE }, { 0x1aa000, 1, RI_E1H_ONLINE }, | 309 | { 0x1b0004, 15, RI_E1H_OFFLINE }, { 0x1b0040, 1, RI_E1HE2_ONLINE }, |
311 | { 0x1aa004, 6655, RI_E1H_OFFLINE }, { 0x1b1800, 128, RI_ALL_OFFLINE }, | 310 | { 0x1b0044, 239, RI_E1H_OFFLINE }, { 0x1b0400, 1, RI_ALL_ONLINE }, |
312 | { 0x1b1c00, 128, RI_ALL_OFFLINE }, { 0x1b2000, 1, RI_ALL_OFFLINE }, | 311 | { 0x1b0404, 255, RI_E1H_OFFLINE }, { 0x1b0800, 1, RI_ALL_ONLINE }, |
313 | { 0x1b2400, 64, RI_E1H_OFFLINE }, { 0x1b8200, 1, RI_ALL_ONLINE }, | 312 | { 0x1b0840, 1, RI_E1HE2_ONLINE }, { 0x1b0c00, 1, RI_ALL_ONLINE }, |
313 | { 0x1b1000, 1, RI_ALL_ONLINE }, { 0x1b1040, 1, RI_E1HE2_ONLINE }, | ||
314 | { 0x1b1400, 1, RI_ALL_ONLINE }, { 0x1b1440, 1, RI_E1HE2_ONLINE }, | ||
315 | { 0x1b1480, 1, RI_E1HE2_ONLINE }, { 0x1b14c0, 1, RI_E1HE2_ONLINE }, | ||
316 | { 0x1b1800, 128, RI_ALL_OFFLINE }, { 0x1b1c00, 128, RI_ALL_OFFLINE }, | ||
317 | { 0x1b2000, 1, RI_ALL_ONLINE }, { 0x1b2400, 1, RI_E1HE2_ONLINE }, | ||
318 | { 0x1b2404, 5631, RI_E2_OFFLINE }, { 0x1b8000, 1, RI_ALL_ONLINE }, | ||
319 | { 0x1b8040, 1, RI_ALL_ONLINE }, { 0x1b8080, 1, RI_ALL_ONLINE }, | ||
320 | { 0x1b80c0, 1, RI_ALL_ONLINE }, { 0x1b8100, 1, RI_ALL_ONLINE }, | ||
321 | { 0x1b8140, 1, RI_ALL_ONLINE }, { 0x1b8180, 1, RI_ALL_ONLINE }, | ||
322 | { 0x1b81c0, 1, RI_ALL_ONLINE }, { 0x1b8200, 1, RI_ALL_ONLINE }, | ||
314 | { 0x1b8240, 1, RI_ALL_ONLINE }, { 0x1b8280, 1, RI_ALL_ONLINE }, | 323 | { 0x1b8240, 1, RI_ALL_ONLINE }, { 0x1b8280, 1, RI_ALL_ONLINE }, |
315 | { 0x1b82c0, 1, RI_ALL_ONLINE }, { 0x1b8a00, 1, RI_ALL_ONLINE }, | 324 | { 0x1b82c0, 1, RI_ALL_ONLINE }, { 0x1b8300, 1, RI_ALL_ONLINE }, |
316 | { 0x1b8a80, 1, RI_ALL_ONLINE }, { 0x1c0000, 2, RI_ALL_ONLINE }, | 325 | { 0x1b8340, 1, RI_ALL_ONLINE }, { 0x1b8380, 1, RI_ALL_ONLINE }, |
317 | { 0x200000, 65, RI_ALL_ONLINE }, { 0x200110, 1, RI_ALL_ONLINE }, | 326 | { 0x1b83c0, 1, RI_ALL_ONLINE }, { 0x1b8400, 1, RI_ALL_ONLINE }, |
318 | { 0x200120, 1, RI_ALL_ONLINE }, { 0x200130, 1, RI_ALL_ONLINE }, | 327 | { 0x1b8440, 1, RI_ALL_ONLINE }, { 0x1b8480, 1, RI_ALL_ONLINE }, |
319 | { 0x200140, 1, RI_ALL_ONLINE }, { 0x20014c, 2, RI_E1H_ONLINE }, | 328 | { 0x1b84c0, 1, RI_ALL_ONLINE }, { 0x1b8500, 1, RI_ALL_ONLINE }, |
320 | { 0x200200, 58, RI_ALL_ONLINE }, { 0x200340, 4, RI_ALL_ONLINE }, | 329 | { 0x1b8540, 1, RI_ALL_ONLINE }, { 0x1b8580, 1, RI_ALL_ONLINE }, |
321 | { 0x200400, 1, RI_ALL_ONLINE }, { 0x200404, 255, RI_ALL_OFFLINE }, | 330 | { 0x1b85c0, 19, RI_E2_ONLINE }, { 0x1b8800, 1, RI_ALL_ONLINE }, |
322 | { 0x202000, 4, RI_ALL_ONLINE }, { 0x202010, 2044, RI_ALL_OFFLINE }, | 331 | { 0x1b8840, 1, RI_ALL_ONLINE }, { 0x1b8880, 1, RI_ALL_ONLINE }, |
323 | { 0x220000, 1, RI_ALL_ONLINE }, { 0x220004, 1023, RI_ALL_OFFLINE }, | 332 | { 0x1b88c0, 1, RI_ALL_ONLINE }, { 0x1b8900, 1, RI_ALL_ONLINE }, |
324 | { 0x221000, 1, RI_ALL_ONLINE }, { 0x221004, 4607, RI_ALL_OFFLINE }, | 333 | { 0x1b8940, 1, RI_ALL_ONLINE }, { 0x1b8980, 1, RI_ALL_ONLINE }, |
325 | { 0x225800, 1536, RI_E1H_OFFLINE }, { 0x227000, 1, RI_E1H_ONLINE }, | 334 | { 0x1b89c0, 1, RI_ALL_ONLINE }, { 0x1b8a00, 1, RI_ALL_ONLINE }, |
326 | { 0x227004, 1023, RI_E1H_OFFLINE }, { 0x228000, 64, RI_ALL_OFFLINE }, | 335 | { 0x1b8a40, 1, RI_ALL_ONLINE }, { 0x1b8a80, 1, RI_ALL_ONLINE }, |
327 | { 0x228100, 8640, RI_E1H_OFFLINE }, { 0x231800, 128, RI_ALL_OFFLINE }, | 336 | { 0x1b8ac0, 1, RI_ALL_ONLINE }, { 0x1b8b00, 1, RI_ALL_ONLINE }, |
328 | { 0x231c00, 128, RI_ALL_OFFLINE }, { 0x232000, 1, RI_ALL_OFFLINE }, | 337 | { 0x1b8b40, 1, RI_ALL_ONLINE }, { 0x1b8b80, 1, RI_ALL_ONLINE }, |
329 | { 0x232400, 64, RI_E1H_OFFLINE }, { 0x238200, 1, RI_ALL_ONLINE }, | 338 | { 0x1b8bc0, 1, RI_ALL_ONLINE }, { 0x1b8c00, 1, RI_ALL_ONLINE }, |
330 | { 0x238240, 1, RI_ALL_ONLINE }, { 0x238280, 1, RI_ALL_ONLINE }, | 339 | { 0x1b8c40, 1, RI_ALL_ONLINE }, { 0x1b8c80, 1, RI_ALL_ONLINE }, |
331 | { 0x2382c0, 1, RI_ALL_ONLINE }, { 0x238a00, 1, RI_ALL_ONLINE }, | 340 | { 0x1b8cc0, 1, RI_ALL_ONLINE }, { 0x1b8cc4, 1, RI_E2_ONLINE }, |
332 | { 0x238a80, 1, RI_ALL_ONLINE }, { 0x240000, 2, RI_ALL_ONLINE }, | 341 | { 0x1b8d00, 1, RI_ALL_ONLINE }, { 0x1b8d40, 1, RI_ALL_ONLINE }, |
333 | { 0x280000, 65, RI_ALL_ONLINE }, { 0x280110, 1, RI_ALL_ONLINE }, | 342 | { 0x1b8d80, 1, RI_ALL_ONLINE }, { 0x1b8dc0, 1, RI_ALL_ONLINE }, |
334 | { 0x280120, 1, RI_ALL_ONLINE }, { 0x280130, 1, RI_ALL_ONLINE }, | 343 | { 0x1b8e00, 1, RI_ALL_ONLINE }, { 0x1b8e40, 1, RI_ALL_ONLINE }, |
335 | { 0x280140, 1, RI_ALL_ONLINE }, { 0x28014c, 2, RI_E1H_ONLINE }, | 344 | { 0x1b8e80, 1, RI_ALL_ONLINE }, { 0x1b8e84, 1, RI_E2_ONLINE }, |
336 | { 0x280200, 58, RI_ALL_ONLINE }, { 0x280340, 4, RI_ALL_ONLINE }, | 345 | { 0x1b8ec0, 1, RI_E1HE2_ONLINE }, { 0x1b8f00, 1, RI_E1HE2_ONLINE }, |
337 | { 0x280400, 1, RI_ALL_ONLINE }, { 0x280404, 255, RI_ALL_OFFLINE }, | 346 | { 0x1b8f40, 1, RI_E1HE2_ONLINE }, { 0x1b8f80, 1, RI_E1HE2_ONLINE }, |
338 | { 0x282000, 4, RI_ALL_ONLINE }, { 0x282010, 2044, RI_ALL_OFFLINE }, | 347 | { 0x1b8fc0, 1, RI_E1HE2_ONLINE }, { 0x1b8fc4, 2, RI_E2_ONLINE }, |
339 | { 0x2a0000, 1, RI_ALL_ONLINE }, { 0x2a0004, 1023, RI_ALL_OFFLINE }, | 348 | { 0x1b8fd0, 6, RI_E2_ONLINE }, { 0x1b9000, 1, RI_E2_ONLINE }, |
340 | { 0x2a1000, 1, RI_ALL_ONLINE }, { 0x2a1004, 4607, RI_ALL_OFFLINE }, | 349 | { 0x1b9040, 3, RI_E2_ONLINE }, { 0x1b9400, 14, RI_E2_ONLINE }, |
341 | { 0x2a5800, 2560, RI_E1H_OFFLINE }, { 0x2a8000, 64, RI_ALL_OFFLINE }, | 350 | { 0x1b943c, 19, RI_E2_ONLINE }, { 0x1b9490, 10, RI_E2_ONLINE }, |
342 | { 0x2a8100, 960, RI_E1H_OFFLINE }, { 0x2a9000, 1, RI_E1H_ONLINE }, | 351 | { 0x1c0000, 2, RI_ALL_ONLINE }, { 0x200000, 65, RI_ALL_ONLINE }, |
343 | { 0x2a9004, 7679, RI_E1H_OFFLINE }, { 0x2b1800, 128, RI_ALL_OFFLINE }, | 352 | { 0x20014c, 2, RI_E1HE2_ONLINE }, { 0x200200, 58, RI_ALL_ONLINE }, |
344 | { 0x2b1c00, 128, RI_ALL_OFFLINE }, { 0x2b2000, 1, RI_ALL_OFFLINE }, | 353 | { 0x200340, 4, RI_ALL_ONLINE }, { 0x200380, 1, RI_E2_ONLINE }, |
345 | { 0x2b2400, 64, RI_E1H_OFFLINE }, { 0x2b8200, 1, RI_ALL_ONLINE }, | 354 | { 0x200388, 1, RI_E2_ONLINE }, { 0x200390, 1, RI_E2_ONLINE }, |
346 | { 0x2b8240, 1, RI_ALL_ONLINE }, { 0x2b8280, 1, RI_ALL_ONLINE }, | 355 | { 0x200398, 1, RI_E2_ONLINE }, { 0x2003a0, 1, RI_E2_ONLINE }, |
347 | { 0x2b82c0, 1, RI_ALL_ONLINE }, { 0x2b8a00, 1, RI_ALL_ONLINE }, | 356 | { 0x2003a8, 2, RI_E2_ONLINE }, { 0x200400, 1, RI_ALL_ONLINE }, |
348 | { 0x2b8a80, 1, RI_ALL_ONLINE }, { 0x2c0000, 2, RI_ALL_ONLINE }, | 357 | { 0x200404, 255, RI_E1E1H_OFFLINE }, { 0x202000, 4, RI_ALL_ONLINE }, |
349 | { 0x300000, 65, RI_ALL_ONLINE }, { 0x300110, 1, RI_ALL_ONLINE }, | 358 | { 0x202010, 2044, RI_ALL_OFFLINE }, { 0x220000, 1, RI_ALL_ONLINE }, |
350 | { 0x300120, 1, RI_ALL_ONLINE }, { 0x300130, 1, RI_ALL_ONLINE }, | 359 | { 0x220004, 5631, RI_ALL_OFFLINE }, { 0x225800, 2560, RI_E1HE2_OFFLINE}, |
351 | { 0x300140, 1, RI_ALL_ONLINE }, { 0x30014c, 2, RI_E1H_ONLINE }, | 360 | { 0x228000, 1, RI_ALL_ONLINE }, { 0x228004, 8191, RI_E1HE2_OFFLINE }, |
361 | { 0x230000, 1, RI_ALL_ONLINE }, { 0x230004, 15, RI_E1H_OFFLINE }, | ||
362 | { 0x230040, 1, RI_E1HE2_ONLINE }, { 0x230044, 239, RI_E1H_OFFLINE }, | ||
363 | { 0x230400, 1, RI_ALL_ONLINE }, { 0x230404, 255, RI_E1H_OFFLINE }, | ||
364 | { 0x230800, 1, RI_ALL_ONLINE }, { 0x230840, 1, RI_E1HE2_ONLINE }, | ||
365 | { 0x230c00, 1, RI_ALL_ONLINE }, { 0x231000, 1, RI_ALL_ONLINE }, | ||
366 | { 0x231040, 1, RI_E1HE2_ONLINE }, { 0x231400, 1, RI_ALL_ONLINE }, | ||
367 | { 0x231440, 1, RI_E1HE2_ONLINE }, { 0x231480, 1, RI_E1HE2_ONLINE }, | ||
368 | { 0x2314c0, 1, RI_E1HE2_ONLINE }, { 0x231800, 128, RI_ALL_OFFLINE }, | ||
369 | { 0x231c00, 128, RI_ALL_OFFLINE }, { 0x232000, 1, RI_ALL_ONLINE }, | ||
370 | { 0x232400, 1, RI_E1HE2_ONLINE }, { 0x232404, 5631, RI_E2_OFFLINE }, | ||
371 | { 0x238000, 1, RI_ALL_ONLINE }, { 0x238040, 1, RI_ALL_ONLINE }, | ||
372 | { 0x238080, 1, RI_ALL_ONLINE }, { 0x2380c0, 1, RI_ALL_ONLINE }, | ||
373 | { 0x238100, 1, RI_ALL_ONLINE }, { 0x238140, 1, RI_ALL_ONLINE }, | ||
374 | { 0x238180, 1, RI_ALL_ONLINE }, { 0x2381c0, 1, RI_ALL_ONLINE }, | ||
375 | { 0x238200, 1, RI_ALL_ONLINE }, { 0x238240, 1, RI_ALL_ONLINE }, | ||
376 | { 0x238280, 1, RI_ALL_ONLINE }, { 0x2382c0, 1, RI_ALL_ONLINE }, | ||
377 | { 0x238300, 1, RI_ALL_ONLINE }, { 0x238340, 1, RI_ALL_ONLINE }, | ||
378 | { 0x238380, 1, RI_ALL_ONLINE }, { 0x2383c0, 1, RI_ALL_ONLINE }, | ||
379 | { 0x238400, 1, RI_ALL_ONLINE }, { 0x238440, 1, RI_ALL_ONLINE }, | ||
380 | { 0x238480, 1, RI_ALL_ONLINE }, { 0x2384c0, 1, RI_ALL_ONLINE }, | ||
381 | { 0x238500, 1, RI_ALL_ONLINE }, { 0x238540, 1, RI_ALL_ONLINE }, | ||
382 | { 0x238580, 1, RI_ALL_ONLINE }, { 0x2385c0, 19, RI_E2_ONLINE }, | ||
383 | { 0x238800, 1, RI_ALL_ONLINE }, { 0x238840, 1, RI_ALL_ONLINE }, | ||
384 | { 0x238880, 1, RI_ALL_ONLINE }, { 0x2388c0, 1, RI_ALL_ONLINE }, | ||
385 | { 0x238900, 1, RI_ALL_ONLINE }, { 0x238940, 1, RI_ALL_ONLINE }, | ||
386 | { 0x238980, 1, RI_ALL_ONLINE }, { 0x2389c0, 1, RI_ALL_ONLINE }, | ||
387 | { 0x238a00, 1, RI_ALL_ONLINE }, { 0x238a40, 1, RI_ALL_ONLINE }, | ||
388 | { 0x238a80, 1, RI_ALL_ONLINE }, { 0x238ac0, 1, RI_ALL_ONLINE }, | ||
389 | { 0x238b00, 1, RI_ALL_ONLINE }, { 0x238b40, 1, RI_ALL_ONLINE }, | ||
390 | { 0x238b80, 1, RI_ALL_ONLINE }, { 0x238bc0, 1, RI_ALL_ONLINE }, | ||
391 | { 0x238c00, 1, RI_ALL_ONLINE }, { 0x238c40, 1, RI_ALL_ONLINE }, | ||
392 | { 0x238c80, 1, RI_ALL_ONLINE }, { 0x238cc0, 1, RI_ALL_ONLINE }, | ||
393 | { 0x238cc4, 1, RI_E2_ONLINE }, { 0x238d00, 1, RI_ALL_ONLINE }, | ||
394 | { 0x238d40, 1, RI_ALL_ONLINE }, { 0x238d80, 1, RI_ALL_ONLINE }, | ||
395 | { 0x238dc0, 1, RI_ALL_ONLINE }, { 0x238e00, 1, RI_ALL_ONLINE }, | ||
396 | { 0x238e40, 1, RI_ALL_ONLINE }, { 0x238e80, 1, RI_ALL_ONLINE }, | ||
397 | { 0x238e84, 1, RI_E2_ONLINE }, { 0x238ec0, 1, RI_E1HE2_ONLINE }, | ||
398 | { 0x238f00, 1, RI_E1HE2_ONLINE }, { 0x238f40, 1, RI_E1HE2_ONLINE }, | ||
399 | { 0x238f80, 1, RI_E1HE2_ONLINE }, { 0x238fc0, 1, RI_E1HE2_ONLINE }, | ||
400 | { 0x238fc4, 2, RI_E2_ONLINE }, { 0x238fd0, 6, RI_E2_ONLINE }, | ||
401 | { 0x239000, 1, RI_E2_ONLINE }, { 0x239040, 3, RI_E2_ONLINE }, | ||
402 | { 0x240000, 2, RI_ALL_ONLINE }, { 0x280000, 65, RI_ALL_ONLINE }, | ||
403 | { 0x28014c, 2, RI_E1HE2_ONLINE }, { 0x280200, 58, RI_ALL_ONLINE }, | ||
404 | { 0x280340, 4, RI_ALL_ONLINE }, { 0x280380, 1, RI_E2_ONLINE }, | ||
405 | { 0x280388, 1, RI_E2_ONLINE }, { 0x280390, 1, RI_E2_ONLINE }, | ||
406 | { 0x280398, 1, RI_E2_ONLINE }, { 0x2803a0, 1, RI_E2_ONLINE }, | ||
407 | { 0x2803a8, 2, RI_E2_ONLINE }, { 0x280400, 1, RI_ALL_ONLINE }, | ||
408 | { 0x280404, 255, RI_E1E1H_OFFLINE }, { 0x282000, 4, RI_ALL_ONLINE }, | ||
409 | { 0x282010, 2044, RI_ALL_OFFLINE }, { 0x2a0000, 1, RI_ALL_ONLINE }, | ||
410 | { 0x2a0004, 5631, RI_ALL_OFFLINE }, { 0x2a5800, 2560, RI_E1HE2_OFFLINE}, | ||
411 | { 0x2a8000, 1, RI_ALL_ONLINE }, { 0x2a8004, 8191, RI_E1HE2_OFFLINE }, | ||
412 | { 0x2b0000, 1, RI_ALL_ONLINE }, { 0x2b0004, 15, RI_E1H_OFFLINE }, | ||
413 | { 0x2b0040, 1, RI_E1HE2_ONLINE }, { 0x2b0044, 239, RI_E1H_OFFLINE }, | ||
414 | { 0x2b0400, 1, RI_ALL_ONLINE }, { 0x2b0404, 255, RI_E1H_OFFLINE }, | ||
415 | { 0x2b0800, 1, RI_ALL_ONLINE }, { 0x2b0840, 1, RI_E1HE2_ONLINE }, | ||
416 | { 0x2b0c00, 1, RI_ALL_ONLINE }, { 0x2b1000, 1, RI_ALL_ONLINE }, | ||
417 | { 0x2b1040, 1, RI_E1HE2_ONLINE }, { 0x2b1400, 1, RI_ALL_ONLINE }, | ||
418 | { 0x2b1440, 1, RI_E1HE2_ONLINE }, { 0x2b1480, 1, RI_E1HE2_ONLINE }, | ||
419 | { 0x2b14c0, 1, RI_E1HE2_ONLINE }, { 0x2b1800, 128, RI_ALL_OFFLINE }, | ||
420 | { 0x2b1c00, 128, RI_ALL_OFFLINE }, { 0x2b2000, 1, RI_ALL_ONLINE }, | ||
421 | { 0x2b2400, 1, RI_E1HE2_ONLINE }, { 0x2b2404, 5631, RI_E2_OFFLINE }, | ||
422 | { 0x2b8000, 1, RI_ALL_ONLINE }, { 0x2b8040, 1, RI_ALL_ONLINE }, | ||
423 | { 0x2b8080, 1, RI_ALL_ONLINE }, { 0x2b80c0, 1, RI_ALL_ONLINE }, | ||
424 | { 0x2b8100, 1, RI_ALL_ONLINE }, { 0x2b8140, 1, RI_ALL_ONLINE }, | ||
425 | { 0x2b8180, 1, RI_ALL_ONLINE }, { 0x2b81c0, 1, RI_ALL_ONLINE }, | ||
426 | { 0x2b8200, 1, RI_ALL_ONLINE }, { 0x2b8240, 1, RI_ALL_ONLINE }, | ||
427 | { 0x2b8280, 1, RI_ALL_ONLINE }, { 0x2b82c0, 1, RI_ALL_ONLINE }, | ||
428 | { 0x2b8300, 1, RI_ALL_ONLINE }, { 0x2b8340, 1, RI_ALL_ONLINE }, | ||
429 | { 0x2b8380, 1, RI_ALL_ONLINE }, { 0x2b83c0, 1, RI_ALL_ONLINE }, | ||
430 | { 0x2b8400, 1, RI_ALL_ONLINE }, { 0x2b8440, 1, RI_ALL_ONLINE }, | ||
431 | { 0x2b8480, 1, RI_ALL_ONLINE }, { 0x2b84c0, 1, RI_ALL_ONLINE }, | ||
432 | { 0x2b8500, 1, RI_ALL_ONLINE }, { 0x2b8540, 1, RI_ALL_ONLINE }, | ||
433 | { 0x2b8580, 1, RI_ALL_ONLINE }, { 0x2b85c0, 19, RI_E2_ONLINE }, | ||
434 | { 0x2b8800, 1, RI_ALL_ONLINE }, { 0x2b8840, 1, RI_ALL_ONLINE }, | ||
435 | { 0x2b8880, 1, RI_ALL_ONLINE }, { 0x2b88c0, 1, RI_ALL_ONLINE }, | ||
436 | { 0x2b8900, 1, RI_ALL_ONLINE }, { 0x2b8940, 1, RI_ALL_ONLINE }, | ||
437 | { 0x2b8980, 1, RI_ALL_ONLINE }, { 0x2b89c0, 1, RI_ALL_ONLINE }, | ||
438 | { 0x2b8a00, 1, RI_ALL_ONLINE }, { 0x2b8a40, 1, RI_ALL_ONLINE }, | ||
439 | { 0x2b8a80, 1, RI_ALL_ONLINE }, { 0x2b8ac0, 1, RI_ALL_ONLINE }, | ||
440 | { 0x2b8b00, 1, RI_ALL_ONLINE }, { 0x2b8b40, 1, RI_ALL_ONLINE }, | ||
441 | { 0x2b8b80, 1, RI_ALL_ONLINE }, { 0x2b8bc0, 1, RI_ALL_ONLINE }, | ||
442 | { 0x2b8c00, 1, RI_ALL_ONLINE }, { 0x2b8c40, 1, RI_ALL_ONLINE }, | ||
443 | { 0x2b8c80, 1, RI_ALL_ONLINE }, { 0x2b8cc0, 1, RI_ALL_ONLINE }, | ||
444 | { 0x2b8cc4, 1, RI_E2_ONLINE }, { 0x2b8d00, 1, RI_ALL_ONLINE }, | ||
445 | { 0x2b8d40, 1, RI_ALL_ONLINE }, { 0x2b8d80, 1, RI_ALL_ONLINE }, | ||
446 | { 0x2b8dc0, 1, RI_ALL_ONLINE }, { 0x2b8e00, 1, RI_ALL_ONLINE }, | ||
447 | { 0x2b8e40, 1, RI_ALL_ONLINE }, { 0x2b8e80, 1, RI_ALL_ONLINE }, | ||
448 | { 0x2b8e84, 1, RI_E2_ONLINE }, { 0x2b8ec0, 1, RI_E1HE2_ONLINE }, | ||
449 | { 0x2b8f00, 1, RI_E1HE2_ONLINE }, { 0x2b8f40, 1, RI_E1HE2_ONLINE }, | ||
450 | { 0x2b8f80, 1, RI_E1HE2_ONLINE }, { 0x2b8fc0, 1, RI_E1HE2_ONLINE }, | ||
451 | { 0x2b8fc4, 2, RI_E2_ONLINE }, { 0x2b8fd0, 6, RI_E2_ONLINE }, | ||
452 | { 0x2b9000, 1, RI_E2_ONLINE }, { 0x2b9040, 3, RI_E2_ONLINE }, | ||
453 | { 0x2b9400, 14, RI_E2_ONLINE }, { 0x2b943c, 19, RI_E2_ONLINE }, | ||
454 | { 0x2b9490, 10, RI_E2_ONLINE }, { 0x2c0000, 2, RI_ALL_ONLINE }, | ||
455 | { 0x300000, 65, RI_ALL_ONLINE }, { 0x30014c, 2, RI_E1HE2_ONLINE }, | ||
352 | { 0x300200, 58, RI_ALL_ONLINE }, { 0x300340, 4, RI_ALL_ONLINE }, | 456 | { 0x300200, 58, RI_ALL_ONLINE }, { 0x300340, 4, RI_ALL_ONLINE }, |
353 | { 0x300400, 1, RI_ALL_ONLINE }, { 0x300404, 255, RI_ALL_OFFLINE }, | 457 | { 0x300380, 1, RI_E2_ONLINE }, { 0x300388, 1, RI_E2_ONLINE }, |
458 | { 0x300390, 1, RI_E2_ONLINE }, { 0x300398, 1, RI_E2_ONLINE }, | ||
459 | { 0x3003a0, 1, RI_E2_ONLINE }, { 0x3003a8, 2, RI_E2_ONLINE }, | ||
460 | { 0x300400, 1, RI_ALL_ONLINE }, { 0x300404, 255, RI_E1E1H_OFFLINE }, | ||
354 | { 0x302000, 4, RI_ALL_ONLINE }, { 0x302010, 2044, RI_ALL_OFFLINE }, | 461 | { 0x302000, 4, RI_ALL_ONLINE }, { 0x302010, 2044, RI_ALL_OFFLINE }, |
355 | { 0x320000, 1, RI_ALL_ONLINE }, { 0x320004, 1023, RI_ALL_OFFLINE }, | 462 | { 0x320000, 1, RI_ALL_ONLINE }, { 0x320004, 5631, RI_ALL_OFFLINE }, |
356 | { 0x321000, 1, RI_ALL_ONLINE }, { 0x321004, 4607, RI_ALL_OFFLINE }, | 463 | { 0x325800, 2560, RI_E1HE2_OFFLINE }, { 0x328000, 1, RI_ALL_ONLINE }, |
357 | { 0x325800, 2560, RI_E1H_OFFLINE }, { 0x328000, 64, RI_ALL_OFFLINE }, | 464 | { 0x328004, 8191, RI_E1HE2_OFFLINE }, { 0x330000, 1, RI_ALL_ONLINE }, |
358 | { 0x328100, 536, RI_E1H_OFFLINE }, { 0x328960, 1, RI_E1H_ONLINE }, | 465 | { 0x330004, 15, RI_E1H_OFFLINE }, { 0x330040, 1, RI_E1HE2_ONLINE }, |
359 | { 0x328964, 8103, RI_E1H_OFFLINE }, { 0x331800, 128, RI_ALL_OFFLINE }, | 466 | { 0x330044, 239, RI_E1H_OFFLINE }, { 0x330400, 1, RI_ALL_ONLINE }, |
360 | { 0x331c00, 128, RI_ALL_OFFLINE }, { 0x332000, 1, RI_ALL_OFFLINE }, | 467 | { 0x330404, 255, RI_E1H_OFFLINE }, { 0x330800, 1, RI_ALL_ONLINE }, |
361 | { 0x332400, 64, RI_E1H_OFFLINE }, { 0x338200, 1, RI_ALL_ONLINE }, | 468 | { 0x330840, 1, RI_E1HE2_ONLINE }, { 0x330c00, 1, RI_ALL_ONLINE }, |
469 | { 0x331000, 1, RI_ALL_ONLINE }, { 0x331040, 1, RI_E1HE2_ONLINE }, | ||
470 | { 0x331400, 1, RI_ALL_ONLINE }, { 0x331440, 1, RI_E1HE2_ONLINE }, | ||
471 | { 0x331480, 1, RI_E1HE2_ONLINE }, { 0x3314c0, 1, RI_E1HE2_ONLINE }, | ||
472 | { 0x331800, 128, RI_ALL_OFFLINE }, { 0x331c00, 128, RI_ALL_OFFLINE }, | ||
473 | { 0x332000, 1, RI_ALL_ONLINE }, { 0x332400, 1, RI_E1HE2_ONLINE }, | ||
474 | { 0x332404, 5631, RI_E2_OFFLINE }, { 0x338000, 1, RI_ALL_ONLINE }, | ||
475 | { 0x338040, 1, RI_ALL_ONLINE }, { 0x338080, 1, RI_ALL_ONLINE }, | ||
476 | { 0x3380c0, 1, RI_ALL_ONLINE }, { 0x338100, 1, RI_ALL_ONLINE }, | ||
477 | { 0x338140, 1, RI_ALL_ONLINE }, { 0x338180, 1, RI_ALL_ONLINE }, | ||
478 | { 0x3381c0, 1, RI_ALL_ONLINE }, { 0x338200, 1, RI_ALL_ONLINE }, | ||
362 | { 0x338240, 1, RI_ALL_ONLINE }, { 0x338280, 1, RI_ALL_ONLINE }, | 479 | { 0x338240, 1, RI_ALL_ONLINE }, { 0x338280, 1, RI_ALL_ONLINE }, |
363 | { 0x3382c0, 1, RI_ALL_ONLINE }, { 0x338a00, 1, RI_ALL_ONLINE }, | 480 | { 0x3382c0, 1, RI_ALL_ONLINE }, { 0x338300, 1, RI_ALL_ONLINE }, |
364 | { 0x338a80, 1, RI_ALL_ONLINE }, { 0x340000, 2, RI_ALL_ONLINE } | 481 | { 0x338340, 1, RI_ALL_ONLINE }, { 0x338380, 1, RI_ALL_ONLINE }, |
482 | { 0x3383c0, 1, RI_ALL_ONLINE }, { 0x338400, 1, RI_ALL_ONLINE }, | ||
483 | { 0x338440, 1, RI_ALL_ONLINE }, { 0x338480, 1, RI_ALL_ONLINE }, | ||
484 | { 0x3384c0, 1, RI_ALL_ONLINE }, { 0x338500, 1, RI_ALL_ONLINE }, | ||
485 | { 0x338540, 1, RI_ALL_ONLINE }, { 0x338580, 1, RI_ALL_ONLINE }, | ||
486 | { 0x3385c0, 19, RI_E2_ONLINE }, { 0x338800, 1, RI_ALL_ONLINE }, | ||
487 | { 0x338840, 1, RI_ALL_ONLINE }, { 0x338880, 1, RI_ALL_ONLINE }, | ||
488 | { 0x3388c0, 1, RI_ALL_ONLINE }, { 0x338900, 1, RI_ALL_ONLINE }, | ||
489 | { 0x338940, 1, RI_ALL_ONLINE }, { 0x338980, 1, RI_ALL_ONLINE }, | ||
490 | { 0x3389c0, 1, RI_ALL_ONLINE }, { 0x338a00, 1, RI_ALL_ONLINE }, | ||
491 | { 0x338a40, 1, RI_ALL_ONLINE }, { 0x338a80, 1, RI_ALL_ONLINE }, | ||
492 | { 0x338ac0, 1, RI_ALL_ONLINE }, { 0x338b00, 1, RI_ALL_ONLINE }, | ||
493 | { 0x338b40, 1, RI_ALL_ONLINE }, { 0x338b80, 1, RI_ALL_ONLINE }, | ||
494 | { 0x338bc0, 1, RI_ALL_ONLINE }, { 0x338c00, 1, RI_ALL_ONLINE }, | ||
495 | { 0x338c40, 1, RI_ALL_ONLINE }, { 0x338c80, 1, RI_ALL_ONLINE }, | ||
496 | { 0x338cc0, 1, RI_ALL_ONLINE }, { 0x338cc4, 1, RI_E2_ONLINE }, | ||
497 | { 0x338d00, 1, RI_ALL_ONLINE }, { 0x338d40, 1, RI_ALL_ONLINE }, | ||
498 | { 0x338d80, 1, RI_ALL_ONLINE }, { 0x338dc0, 1, RI_ALL_ONLINE }, | ||
499 | { 0x338e00, 1, RI_ALL_ONLINE }, { 0x338e40, 1, RI_ALL_ONLINE }, | ||
500 | { 0x338e80, 1, RI_ALL_ONLINE }, { 0x338e84, 1, RI_E2_ONLINE }, | ||
501 | { 0x338ec0, 1, RI_E1HE2_ONLINE }, { 0x338f00, 1, RI_E1HE2_ONLINE }, | ||
502 | { 0x338f40, 1, RI_E1HE2_ONLINE }, { 0x338f80, 1, RI_E1HE2_ONLINE }, | ||
503 | { 0x338fc0, 1, RI_E1HE2_ONLINE }, { 0x338fc4, 2, RI_E2_ONLINE }, | ||
504 | { 0x338fd0, 6, RI_E2_ONLINE }, { 0x339000, 1, RI_E2_ONLINE }, | ||
505 | { 0x339040, 3, RI_E2_ONLINE }, { 0x340000, 2, RI_ALL_ONLINE }, | ||
365 | }; | 506 | }; |
366 | 507 | ||
367 | 508 | #define IDLE_REGS_COUNT 237 | |
368 | #define IDLE_REGS_COUNT 277 | ||
369 | static const struct reg_addr idle_addrs[IDLE_REGS_COUNT] = { | 509 | static const struct reg_addr idle_addrs[IDLE_REGS_COUNT] = { |
370 | { 0x2114, 1, RI_ALL_ONLINE }, { 0x2120, 1, RI_ALL_ONLINE }, | 510 | { 0x2104, 1, RI_ALL_ONLINE }, { 0x2110, 2, RI_ALL_ONLINE }, |
371 | { 0x212c, 4, RI_ALL_ONLINE }, { 0x2814, 1, RI_ALL_ONLINE }, | 511 | { 0x211c, 8, RI_ALL_ONLINE }, { 0x2814, 1, RI_ALL_ONLINE }, |
372 | { 0x281c, 2, RI_ALL_ONLINE }, { 0xa38c, 1, RI_ALL_ONLINE }, | 512 | { 0x281c, 2, RI_ALL_ONLINE }, { 0x2854, 1, RI_ALL_ONLINE }, |
513 | { 0x285c, 1, RI_ALL_ONLINE }, { 0x9010, 7, RI_E2_ONLINE }, | ||
514 | { 0x9030, 1, RI_E2_ONLINE }, { 0x9068, 16, RI_E2_ONLINE }, | ||
515 | { 0x9230, 2, RI_E2_ONLINE }, { 0x9244, 1, RI_E2_ONLINE }, | ||
516 | { 0x9298, 1, RI_E2_ONLINE }, { 0x92a8, 1, RI_E2_ONLINE }, | ||
517 | { 0xa38c, 1, RI_ALL_ONLINE }, { 0xa3c4, 1, RI_E1HE2_ONLINE }, | ||
373 | { 0xa408, 1, RI_ALL_ONLINE }, { 0xa42c, 12, RI_ALL_ONLINE }, | 518 | { 0xa408, 1, RI_ALL_ONLINE }, { 0xa42c, 12, RI_ALL_ONLINE }, |
374 | { 0xa600, 5, RI_E1H_ONLINE }, { 0xa618, 1, RI_E1H_ONLINE }, | 519 | { 0xa600, 5, RI_E1HE2_ONLINE }, { 0xa618, 1, RI_E1HE2_ONLINE }, |
375 | { 0xc09c, 1, RI_ALL_ONLINE }, { 0x103b0, 1, RI_ALL_ONLINE }, | 520 | { 0xa714, 1, RI_E2_ONLINE }, { 0xa720, 1, RI_E2_ONLINE }, |
376 | { 0x103c0, 1, RI_ALL_ONLINE }, { 0x103d0, 1, RI_E1H_ONLINE }, | 521 | { 0xa750, 1, RI_E2_ONLINE }, { 0xc09c, 1, RI_E1E1H_ONLINE }, |
377 | { 0x2021c, 11, RI_ALL_ONLINE }, { 0x202a8, 1, RI_ALL_ONLINE }, | 522 | { 0x103b0, 1, RI_ALL_ONLINE }, { 0x103c0, 1, RI_ALL_ONLINE }, |
378 | { 0x202b8, 1, RI_ALL_ONLINE }, { 0x20404, 1, RI_ALL_ONLINE }, | 523 | { 0x103d0, 1, RI_E1H_ONLINE }, { 0x183bc, 1, RI_E2_ONLINE }, |
379 | { 0x2040c, 2, RI_ALL_ONLINE }, { 0x2041c, 2, RI_ALL_ONLINE }, | 524 | { 0x183cc, 1, RI_E2_ONLINE }, { 0x2021c, 11, RI_ALL_ONLINE }, |
380 | { 0x40154, 14, RI_ALL_ONLINE }, { 0x40198, 1, RI_ALL_ONLINE }, | 525 | { 0x202a8, 1, RI_ALL_ONLINE }, { 0x202b8, 1, RI_ALL_ONLINE }, |
381 | { 0x404ac, 1, RI_ALL_ONLINE }, { 0x404bc, 1, RI_ALL_ONLINE }, | 526 | { 0x20404, 1, RI_ALL_ONLINE }, { 0x2040c, 2, RI_ALL_ONLINE }, |
382 | { 0x42290, 1, RI_ALL_ONLINE }, { 0x422a0, 1, RI_ALL_ONLINE }, | 527 | { 0x2041c, 2, RI_ALL_ONLINE }, { 0x40154, 14, RI_ALL_ONLINE }, |
383 | { 0x422b0, 1, RI_ALL_ONLINE }, { 0x42548, 1, RI_ALL_ONLINE }, | 528 | { 0x40198, 1, RI_ALL_ONLINE }, { 0x404ac, 1, RI_ALL_ONLINE }, |
384 | { 0x42550, 1, RI_ALL_ONLINE }, { 0x42558, 1, RI_ALL_ONLINE }, | 529 | { 0x404bc, 1, RI_ALL_ONLINE }, { 0x42290, 1, RI_ALL_ONLINE }, |
385 | { 0x50160, 8, RI_ALL_ONLINE }, { 0x501d0, 1, RI_ALL_ONLINE }, | 530 | { 0x422a0, 1, RI_ALL_ONLINE }, { 0x422b0, 1, RI_ALL_ONLINE }, |
386 | { 0x501e0, 1, RI_ALL_ONLINE }, { 0x50204, 1, RI_ALL_ONLINE }, | 531 | { 0x42548, 1, RI_ALL_ONLINE }, { 0x42550, 1, RI_ALL_ONLINE }, |
387 | { 0x5020c, 2, RI_ALL_ONLINE }, { 0x5021c, 1, RI_ALL_ONLINE }, | 532 | { 0x42558, 1, RI_ALL_ONLINE }, { 0x50160, 8, RI_ALL_ONLINE }, |
388 | { 0x60090, 1, RI_ALL_ONLINE }, { 0x6011c, 1, RI_ALL_ONLINE }, | 533 | { 0x501d0, 1, RI_ALL_ONLINE }, { 0x501e0, 1, RI_ALL_ONLINE }, |
389 | { 0x6012c, 1, RI_ALL_ONLINE }, { 0xc101c, 1, RI_ALL_ONLINE }, | 534 | { 0x50204, 1, RI_ALL_ONLINE }, { 0x5020c, 2, RI_ALL_ONLINE }, |
390 | { 0xc102c, 1, RI_ALL_ONLINE }, { 0xc2290, 1, RI_ALL_ONLINE }, | 535 | { 0x5021c, 1, RI_ALL_ONLINE }, { 0x60090, 1, RI_ALL_ONLINE }, |
391 | { 0xc22a0, 1, RI_ALL_ONLINE }, { 0xc22b0, 1, RI_ALL_ONLINE }, | 536 | { 0x6011c, 1, RI_ALL_ONLINE }, { 0x6012c, 1, RI_ALL_ONLINE }, |
392 | { 0xc2548, 1, RI_ALL_ONLINE }, { 0xc2550, 1, RI_ALL_ONLINE }, | 537 | { 0xc101c, 1, RI_ALL_ONLINE }, { 0xc102c, 1, RI_ALL_ONLINE }, |
393 | { 0xc2558, 1, RI_ALL_ONLINE }, { 0xc4294, 1, RI_ALL_ONLINE }, | 538 | { 0xc2290, 1, RI_ALL_ONLINE }, { 0xc22a0, 1, RI_ALL_ONLINE }, |
394 | { 0xc42a4, 1, RI_ALL_ONLINE }, { 0xc42b4, 1, RI_ALL_ONLINE }, | 539 | { 0xc22b0, 1, RI_ALL_ONLINE }, { 0xc2548, 1, RI_ALL_ONLINE }, |
395 | { 0xc4550, 1, RI_ALL_ONLINE }, { 0xc4558, 1, RI_ALL_ONLINE }, | 540 | { 0xc2550, 1, RI_ALL_ONLINE }, { 0xc2558, 1, RI_ALL_ONLINE }, |
396 | { 0xc4560, 1, RI_ALL_ONLINE }, { 0xd016c, 8, RI_ALL_ONLINE }, | 541 | { 0xc4294, 1, RI_ALL_ONLINE }, { 0xc42a4, 1, RI_ALL_ONLINE }, |
397 | { 0xd01d8, 1, RI_ALL_ONLINE }, { 0xd01e8, 1, RI_ALL_ONLINE }, | 542 | { 0xc42b4, 1, RI_ALL_ONLINE }, { 0xc4550, 1, RI_ALL_ONLINE }, |
398 | { 0xd0204, 1, RI_ALL_ONLINE }, { 0xd020c, 3, RI_ALL_ONLINE }, | 543 | { 0xc4558, 1, RI_ALL_ONLINE }, { 0xc4560, 1, RI_ALL_ONLINE }, |
399 | { 0xe0154, 8, RI_ALL_ONLINE }, { 0xe01c8, 1, RI_ALL_ONLINE }, | 544 | { 0xd016c, 8, RI_ALL_ONLINE }, { 0xd01d8, 1, RI_ALL_ONLINE }, |
400 | { 0xe01d8, 1, RI_ALL_ONLINE }, { 0xe0204, 1, RI_ALL_ONLINE }, | 545 | { 0xd01e8, 1, RI_ALL_ONLINE }, { 0xd0204, 1, RI_ALL_ONLINE }, |
401 | { 0xe020c, 2, RI_ALL_ONLINE }, { 0xe021c, 2, RI_ALL_ONLINE }, | 546 | { 0xd020c, 3, RI_ALL_ONLINE }, { 0xe0154, 8, RI_ALL_ONLINE }, |
402 | { 0x101014, 1, RI_ALL_ONLINE }, { 0x101030, 1, RI_ALL_ONLINE }, | 547 | { 0xe01c8, 1, RI_ALL_ONLINE }, { 0xe01d8, 1, RI_ALL_ONLINE }, |
403 | { 0x101040, 1, RI_ALL_ONLINE }, { 0x102058, 1, RI_ALL_ONLINE }, | 548 | { 0xe0204, 1, RI_ALL_ONLINE }, { 0xe020c, 2, RI_ALL_ONLINE }, |
404 | { 0x102080, 16, RI_ALL_ONLINE }, { 0x103004, 2, RI_ALL_ONLINE }, | 549 | { 0xe021c, 2, RI_ALL_ONLINE }, { 0x101014, 1, RI_ALL_ONLINE }, |
405 | { 0x103068, 1, RI_ALL_ONLINE }, { 0x103078, 1, RI_ALL_ONLINE }, | 550 | { 0x101030, 1, RI_ALL_ONLINE }, { 0x101040, 1, RI_ALL_ONLINE }, |
406 | { 0x103088, 1, RI_ALL_ONLINE }, { 0x10309c, 2, RI_E1H_ONLINE }, | 551 | { 0x102058, 1, RI_ALL_ONLINE }, { 0x102080, 16, RI_ALL_ONLINE }, |
552 | { 0x103004, 2, RI_ALL_ONLINE }, { 0x103068, 1, RI_ALL_ONLINE }, | ||
553 | { 0x103078, 1, RI_ALL_ONLINE }, { 0x103088, 1, RI_ALL_ONLINE }, | ||
554 | { 0x10309c, 2, RI_E1HE2_ONLINE }, { 0x1030b8, 2, RI_E2_ONLINE }, | ||
555 | { 0x1030cc, 1, RI_E2_ONLINE }, { 0x1030e0, 1, RI_E2_ONLINE }, | ||
407 | { 0x104004, 1, RI_ALL_ONLINE }, { 0x104018, 1, RI_ALL_ONLINE }, | 556 | { 0x104004, 1, RI_ALL_ONLINE }, { 0x104018, 1, RI_ALL_ONLINE }, |
408 | { 0x104020, 1, RI_ALL_ONLINE }, { 0x10403c, 1, RI_ALL_ONLINE }, | 557 | { 0x104020, 1, RI_ALL_ONLINE }, { 0x10403c, 1, RI_ALL_ONLINE }, |
409 | { 0x1040fc, 1, RI_ALL_ONLINE }, { 0x10410c, 1, RI_ALL_ONLINE }, | 558 | { 0x1040fc, 1, RI_ALL_ONLINE }, { 0x10410c, 1, RI_ALL_ONLINE }, |
410 | { 0x104400, 64, RI_ALL_ONLINE }, { 0x104800, 64, RI_ALL_ONLINE }, | 559 | { 0x104400, 64, RI_ALL_ONLINE }, { 0x104800, 64, RI_ALL_ONLINE }, |
411 | { 0x105000, 3, RI_ALL_ONLINE }, { 0x105010, 3, RI_ALL_ONLINE }, | 560 | { 0x105000, 256, RI_ALL_ONLINE }, { 0x108094, 1, RI_E1E1H_ONLINE }, |
412 | { 0x105020, 3, RI_ALL_ONLINE }, { 0x105030, 3, RI_ALL_ONLINE }, | 561 | { 0x1201b0, 2, RI_ALL_ONLINE }, { 0x12032c, 1, RI_ALL_ONLINE }, |
413 | { 0x105040, 3, RI_ALL_ONLINE }, { 0x105050, 3, RI_ALL_ONLINE }, | 562 | { 0x12036c, 3, RI_ALL_ONLINE }, { 0x120408, 2, RI_ALL_ONLINE }, |
414 | { 0x105060, 3, RI_ALL_ONLINE }, { 0x105070, 3, RI_ALL_ONLINE }, | 563 | { 0x120414, 15, RI_ALL_ONLINE }, { 0x120478, 2, RI_ALL_ONLINE }, |
415 | { 0x105080, 3, RI_ALL_ONLINE }, { 0x105090, 3, RI_ALL_ONLINE }, | 564 | { 0x12052c, 1, RI_ALL_ONLINE }, { 0x120564, 3, RI_ALL_ONLINE }, |
416 | { 0x1050a0, 3, RI_ALL_ONLINE }, { 0x1050b0, 3, RI_ALL_ONLINE }, | 565 | { 0x12057c, 1, RI_ALL_ONLINE }, { 0x12058c, 1, RI_ALL_ONLINE }, |
417 | { 0x1050c0, 3, RI_ALL_ONLINE }, { 0x1050d0, 3, RI_ALL_ONLINE }, | 566 | { 0x120608, 1, RI_E1HE2_ONLINE }, { 0x120738, 1, RI_E2_ONLINE }, |
418 | { 0x1050e0, 3, RI_ALL_ONLINE }, { 0x1050f0, 3, RI_ALL_ONLINE }, | 567 | { 0x120778, 2, RI_E2_ONLINE }, { 0x120808, 3, RI_ALL_ONLINE }, |
419 | { 0x105100, 3, RI_ALL_ONLINE }, { 0x105110, 3, RI_ALL_ONLINE }, | ||
420 | { 0x105120, 3, RI_ALL_ONLINE }, { 0x105130, 3, RI_ALL_ONLINE }, | ||
421 | { 0x105140, 3, RI_ALL_ONLINE }, { 0x105150, 3, RI_ALL_ONLINE }, | ||
422 | { 0x105160, 3, RI_ALL_ONLINE }, { 0x105170, 3, RI_ALL_ONLINE }, | ||
423 | { 0x105180, 3, RI_ALL_ONLINE }, { 0x105190, 3, RI_ALL_ONLINE }, | ||
424 | { 0x1051a0, 3, RI_ALL_ONLINE }, { 0x1051b0, 3, RI_ALL_ONLINE }, | ||
425 | { 0x1051c0, 3, RI_ALL_ONLINE }, { 0x1051d0, 3, RI_ALL_ONLINE }, | ||
426 | { 0x1051e0, 3, RI_ALL_ONLINE }, { 0x1051f0, 3, RI_ALL_ONLINE }, | ||
427 | { 0x105200, 3, RI_ALL_ONLINE }, { 0x105210, 3, RI_ALL_ONLINE }, | ||
428 | { 0x105220, 3, RI_ALL_ONLINE }, { 0x105230, 3, RI_ALL_ONLINE }, | ||
429 | { 0x105240, 3, RI_ALL_ONLINE }, { 0x105250, 3, RI_ALL_ONLINE }, | ||
430 | { 0x105260, 3, RI_ALL_ONLINE }, { 0x105270, 3, RI_ALL_ONLINE }, | ||
431 | { 0x105280, 3, RI_ALL_ONLINE }, { 0x105290, 3, RI_ALL_ONLINE }, | ||
432 | { 0x1052a0, 3, RI_ALL_ONLINE }, { 0x1052b0, 3, RI_ALL_ONLINE }, | ||
433 | { 0x1052c0, 3, RI_ALL_ONLINE }, { 0x1052d0, 3, RI_ALL_ONLINE }, | ||
434 | { 0x1052e0, 3, RI_ALL_ONLINE }, { 0x1052f0, 3, RI_ALL_ONLINE }, | ||
435 | { 0x105300, 3, RI_ALL_ONLINE }, { 0x105310, 3, RI_ALL_ONLINE }, | ||
436 | { 0x105320, 3, RI_ALL_ONLINE }, { 0x105330, 3, RI_ALL_ONLINE }, | ||
437 | { 0x105340, 3, RI_ALL_ONLINE }, { 0x105350, 3, RI_ALL_ONLINE }, | ||
438 | { 0x105360, 3, RI_ALL_ONLINE }, { 0x105370, 3, RI_ALL_ONLINE }, | ||
439 | { 0x105380, 3, RI_ALL_ONLINE }, { 0x105390, 3, RI_ALL_ONLINE }, | ||
440 | { 0x1053a0, 3, RI_ALL_ONLINE }, { 0x1053b0, 3, RI_ALL_ONLINE }, | ||
441 | { 0x1053c0, 3, RI_ALL_ONLINE }, { 0x1053d0, 3, RI_ALL_ONLINE }, | ||
442 | { 0x1053e0, 3, RI_ALL_ONLINE }, { 0x1053f0, 3, RI_ALL_ONLINE }, | ||
443 | { 0x108094, 1, RI_ALL_ONLINE }, { 0x1201b0, 2, RI_ALL_ONLINE }, | ||
444 | { 0x12032c, 1, RI_ALL_ONLINE }, { 0x12036c, 3, RI_ALL_ONLINE }, | ||
445 | { 0x120408, 2, RI_ALL_ONLINE }, { 0x120414, 15, RI_ALL_ONLINE }, | ||
446 | { 0x120478, 2, RI_ALL_ONLINE }, { 0x12052c, 1, RI_ALL_ONLINE }, | ||
447 | { 0x120564, 3, RI_ALL_ONLINE }, { 0x12057c, 1, RI_ALL_ONLINE }, | ||
448 | { 0x12058c, 1, RI_ALL_ONLINE }, { 0x120608, 1, RI_E1H_ONLINE }, | ||
449 | { 0x120808, 1, RI_E1_ONLINE }, { 0x12080c, 2, RI_ALL_ONLINE }, | ||
450 | { 0x120818, 1, RI_ALL_ONLINE }, { 0x120820, 1, RI_ALL_ONLINE }, | 568 | { 0x120818, 1, RI_ALL_ONLINE }, { 0x120820, 1, RI_ALL_ONLINE }, |
451 | { 0x120828, 1, RI_ALL_ONLINE }, { 0x120830, 1, RI_ALL_ONLINE }, | 569 | { 0x120828, 1, RI_ALL_ONLINE }, { 0x120830, 1, RI_ALL_ONLINE }, |
452 | { 0x120838, 1, RI_ALL_ONLINE }, { 0x120840, 1, RI_ALL_ONLINE }, | 570 | { 0x120838, 1, RI_ALL_ONLINE }, { 0x120840, 1, RI_ALL_ONLINE }, |
@@ -462,48 +580,50 @@ static const struct reg_addr idle_addrs[IDLE_REGS_COUNT] = { | |||
462 | { 0x1208d8, 1, RI_ALL_ONLINE }, { 0x1208e0, 1, RI_ALL_ONLINE }, | 580 | { 0x1208d8, 1, RI_ALL_ONLINE }, { 0x1208e0, 1, RI_ALL_ONLINE }, |
463 | { 0x1208e8, 1, RI_ALL_ONLINE }, { 0x1208f0, 1, RI_ALL_ONLINE }, | 581 | { 0x1208e8, 1, RI_ALL_ONLINE }, { 0x1208f0, 1, RI_ALL_ONLINE }, |
464 | { 0x1208f8, 1, RI_ALL_ONLINE }, { 0x120900, 1, RI_ALL_ONLINE }, | 582 | { 0x1208f8, 1, RI_ALL_ONLINE }, { 0x120900, 1, RI_ALL_ONLINE }, |
465 | { 0x120908, 1, RI_ALL_ONLINE }, { 0x14005c, 2, RI_ALL_ONLINE }, | 583 | { 0x120908, 1, RI_ALL_ONLINE }, { 0x120940, 5, RI_E2_ONLINE }, |
466 | { 0x1400d0, 2, RI_ALL_ONLINE }, { 0x1400e0, 1, RI_ALL_ONLINE }, | 584 | { 0x130030, 1, RI_E2_ONLINE }, { 0x13004c, 3, RI_E2_ONLINE }, |
467 | { 0x1401c8, 1, RI_ALL_ONLINE }, { 0x140200, 6, RI_ALL_ONLINE }, | 585 | { 0x130064, 2, RI_E2_ONLINE }, { 0x13009c, 1, RI_E2_ONLINE }, |
468 | { 0x16101c, 1, RI_ALL_ONLINE }, { 0x16102c, 1, RI_ALL_ONLINE }, | 586 | { 0x130130, 1, RI_E2_ONLINE }, { 0x13016c, 1, RI_E2_ONLINE }, |
469 | { 0x164014, 2, RI_ALL_ONLINE }, { 0x1640f0, 1, RI_ALL_ONLINE }, | 587 | { 0x130300, 1, RI_E2_ONLINE }, { 0x130480, 1, RI_E2_ONLINE }, |
470 | { 0x166290, 1, RI_ALL_ONLINE }, { 0x1662a0, 1, RI_ALL_ONLINE }, | 588 | { 0x14005c, 2, RI_ALL_ONLINE }, { 0x1400d0, 2, RI_ALL_ONLINE }, |
471 | { 0x1662b0, 1, RI_ALL_ONLINE }, { 0x166548, 1, RI_ALL_ONLINE }, | 589 | { 0x1400e0, 1, RI_ALL_ONLINE }, { 0x1401c8, 1, RI_ALL_ONLINE }, |
472 | { 0x166550, 1, RI_ALL_ONLINE }, { 0x166558, 1, RI_ALL_ONLINE }, | 590 | { 0x140200, 6, RI_ALL_ONLINE }, { 0x16101c, 1, RI_ALL_ONLINE }, |
473 | { 0x168000, 1, RI_ALL_ONLINE }, { 0x168008, 1, RI_ALL_ONLINE }, | 591 | { 0x16102c, 1, RI_ALL_ONLINE }, { 0x164014, 2, RI_ALL_ONLINE }, |
474 | { 0x168010, 1, RI_ALL_ONLINE }, { 0x168018, 1, RI_ALL_ONLINE }, | 592 | { 0x1640f0, 1, RI_ALL_ONLINE }, { 0x166290, 1, RI_ALL_ONLINE }, |
475 | { 0x168028, 2, RI_ALL_ONLINE }, { 0x168058, 4, RI_ALL_ONLINE }, | 593 | { 0x1662a0, 1, RI_ALL_ONLINE }, { 0x1662b0, 1, RI_ALL_ONLINE }, |
476 | { 0x168070, 1, RI_ALL_ONLINE }, { 0x168238, 1, RI_ALL_ONLINE }, | 594 | { 0x166548, 1, RI_ALL_ONLINE }, { 0x166550, 1, RI_ALL_ONLINE }, |
477 | { 0x1682d0, 2, RI_ALL_ONLINE }, { 0x1682e0, 1, RI_ALL_ONLINE }, | 595 | { 0x166558, 1, RI_ALL_ONLINE }, { 0x168000, 1, RI_ALL_ONLINE }, |
478 | { 0x168300, 67, RI_ALL_ONLINE }, { 0x168410, 2, RI_ALL_ONLINE }, | 596 | { 0x168008, 1, RI_ALL_ONLINE }, { 0x168010, 1, RI_ALL_ONLINE }, |
597 | { 0x168018, 1, RI_ALL_ONLINE }, { 0x168028, 2, RI_ALL_ONLINE }, | ||
598 | { 0x168058, 4, RI_ALL_ONLINE }, { 0x168070, 1, RI_ALL_ONLINE }, | ||
599 | { 0x168238, 1, RI_ALL_ONLINE }, { 0x1682d0, 2, RI_ALL_ONLINE }, | ||
600 | { 0x1682e0, 1, RI_ALL_ONLINE }, { 0x168300, 2, RI_E1E1H_ONLINE }, | ||
601 | { 0x168308, 65, RI_ALL_ONLINE }, { 0x168410, 2, RI_ALL_ONLINE }, | ||
479 | { 0x168438, 1, RI_ALL_ONLINE }, { 0x168448, 1, RI_ALL_ONLINE }, | 602 | { 0x168438, 1, RI_ALL_ONLINE }, { 0x168448, 1, RI_ALL_ONLINE }, |
480 | { 0x168a00, 128, RI_ALL_ONLINE }, { 0x16e200, 128, RI_E1H_ONLINE }, | 603 | { 0x168a00, 128, RI_ALL_ONLINE }, { 0x16e200, 128, RI_E1H_ONLINE }, |
481 | { 0x16e404, 2, RI_E1H_ONLINE }, { 0x16e584, 70, RI_E1H_ONLINE }, | 604 | { 0x16e404, 2, RI_E1H_ONLINE }, { 0x16e584, 64, RI_E1H_ONLINE }, |
482 | { 0x1700a4, 1, RI_ALL_ONLINE }, { 0x1700ac, 2, RI_ALL_ONLINE }, | 605 | { 0x16e684, 2, RI_E1HE2_ONLINE }, { 0x16e68c, 4, RI_E1H_ONLINE }, |
483 | { 0x1700c0, 1, RI_ALL_ONLINE }, { 0x170174, 1, RI_ALL_ONLINE }, | 606 | { 0x16e6fc, 4, RI_E2_ONLINE }, { 0x1700a4, 1, RI_ALL_ONLINE }, |
484 | { 0x170184, 1, RI_ALL_ONLINE }, { 0x1800f4, 1, RI_ALL_ONLINE }, | 607 | { 0x1700ac, 2, RI_ALL_ONLINE }, { 0x1700c0, 1, RI_ALL_ONLINE }, |
485 | { 0x180104, 1, RI_ALL_ONLINE }, { 0x180114, 1, RI_ALL_ONLINE }, | 608 | { 0x170174, 1, RI_ALL_ONLINE }, { 0x170184, 1, RI_ALL_ONLINE }, |
486 | { 0x180124, 1, RI_ALL_ONLINE }, { 0x18026c, 1, RI_ALL_ONLINE }, | 609 | { 0x1800f4, 1, RI_ALL_ONLINE }, { 0x180104, 1, RI_ALL_ONLINE }, |
487 | { 0x1802a0, 1, RI_ALL_ONLINE }, { 0x1a1000, 1, RI_ALL_ONLINE }, | 610 | { 0x180114, 1, RI_ALL_ONLINE }, { 0x180124, 1, RI_ALL_ONLINE }, |
488 | { 0x1aa000, 1, RI_E1H_ONLINE }, { 0x1b8000, 1, RI_ALL_ONLINE }, | 611 | { 0x18026c, 1, RI_ALL_ONLINE }, { 0x1802a0, 1, RI_ALL_ONLINE }, |
489 | { 0x1b8040, 1, RI_ALL_ONLINE }, { 0x1b8080, 1, RI_ALL_ONLINE }, | 612 | { 0x1b8000, 1, RI_ALL_ONLINE }, { 0x1b8040, 1, RI_ALL_ONLINE }, |
490 | { 0x1b80c0, 1, RI_ALL_ONLINE }, { 0x200104, 1, RI_ALL_ONLINE }, | 613 | { 0x1b8080, 1, RI_ALL_ONLINE }, { 0x1b80c0, 1, RI_ALL_ONLINE }, |
491 | { 0x200114, 1, RI_ALL_ONLINE }, { 0x200124, 1, RI_ALL_ONLINE }, | 614 | { 0x200104, 1, RI_ALL_ONLINE }, { 0x200114, 1, RI_ALL_ONLINE }, |
492 | { 0x200134, 1, RI_ALL_ONLINE }, { 0x20026c, 1, RI_ALL_ONLINE }, | 615 | { 0x200124, 1, RI_ALL_ONLINE }, { 0x200134, 1, RI_ALL_ONLINE }, |
493 | { 0x2002a0, 1, RI_ALL_ONLINE }, { 0x221000, 1, RI_ALL_ONLINE }, | 616 | { 0x20026c, 1, RI_ALL_ONLINE }, { 0x2002a0, 1, RI_ALL_ONLINE }, |
494 | { 0x227000, 1, RI_E1H_ONLINE }, { 0x238000, 1, RI_ALL_ONLINE }, | 617 | { 0x238000, 1, RI_ALL_ONLINE }, { 0x238040, 1, RI_ALL_ONLINE }, |
495 | { 0x238040, 1, RI_ALL_ONLINE }, { 0x238080, 1, RI_ALL_ONLINE }, | 618 | { 0x238080, 1, RI_ALL_ONLINE }, { 0x2380c0, 1, RI_ALL_ONLINE }, |
496 | { 0x2380c0, 1, RI_ALL_ONLINE }, { 0x280104, 1, RI_ALL_ONLINE }, | 619 | { 0x280104, 1, RI_ALL_ONLINE }, { 0x280114, 1, RI_ALL_ONLINE }, |
497 | { 0x280114, 1, RI_ALL_ONLINE }, { 0x280124, 1, RI_ALL_ONLINE }, | 620 | { 0x280124, 1, RI_ALL_ONLINE }, { 0x280134, 1, RI_ALL_ONLINE }, |
498 | { 0x280134, 1, RI_ALL_ONLINE }, { 0x28026c, 1, RI_ALL_ONLINE }, | 621 | { 0x28026c, 1, RI_ALL_ONLINE }, { 0x2802a0, 1, RI_ALL_ONLINE }, |
499 | { 0x2802a0, 1, RI_ALL_ONLINE }, { 0x2a1000, 1, RI_ALL_ONLINE }, | 622 | { 0x2b8000, 1, RI_ALL_ONLINE }, { 0x2b8040, 1, RI_ALL_ONLINE }, |
500 | { 0x2a9000, 1, RI_E1H_ONLINE }, { 0x2b8000, 1, RI_ALL_ONLINE }, | 623 | { 0x2b8080, 1, RI_ALL_ONLINE }, { 0x300104, 1, RI_ALL_ONLINE }, |
501 | { 0x2b8040, 1, RI_ALL_ONLINE }, { 0x2b8080, 1, RI_ALL_ONLINE }, | ||
502 | { 0x2b80c0, 1, RI_ALL_ONLINE }, { 0x300104, 1, RI_ALL_ONLINE }, | ||
503 | { 0x300114, 1, RI_ALL_ONLINE }, { 0x300124, 1, RI_ALL_ONLINE }, | 624 | { 0x300114, 1, RI_ALL_ONLINE }, { 0x300124, 1, RI_ALL_ONLINE }, |
504 | { 0x300134, 1, RI_ALL_ONLINE }, { 0x30026c, 1, RI_ALL_ONLINE }, | 625 | { 0x300134, 1, RI_ALL_ONLINE }, { 0x30026c, 1, RI_ALL_ONLINE }, |
505 | { 0x3002a0, 1, RI_ALL_ONLINE }, { 0x321000, 1, RI_ALL_ONLINE }, | 626 | { 0x3002a0, 1, RI_ALL_ONLINE }, { 0x338000, 1, RI_ALL_ONLINE }, |
506 | { 0x328960, 1, RI_E1H_ONLINE }, { 0x338000, 1, RI_ALL_ONLINE }, | ||
507 | { 0x338040, 1, RI_ALL_ONLINE }, { 0x338080, 1, RI_ALL_ONLINE }, | 627 | { 0x338040, 1, RI_ALL_ONLINE }, { 0x338080, 1, RI_ALL_ONLINE }, |
508 | { 0x3380c0, 1, RI_ALL_ONLINE } | 628 | { 0x3380c0, 1, RI_ALL_ONLINE } |
509 | }; | 629 | }; |
@@ -515,7 +635,6 @@ static const struct wreg_addr wreg_addrs_e1[WREGS_COUNT_E1] = { | |||
515 | { 0x1b0c00, 192, 1, read_reg_e1_0, RI_E1_OFFLINE } | 635 | { 0x1b0c00, 192, 1, read_reg_e1_0, RI_E1_OFFLINE } |
516 | }; | 636 | }; |
517 | 637 | ||
518 | |||
519 | #define WREGS_COUNT_E1H 1 | 638 | #define WREGS_COUNT_E1H 1 |
520 | static const u32 read_reg_e1h_0[] = { 0x1b1040, 0x1b1000 }; | 639 | static const u32 read_reg_e1h_0[] = { 0x1b1040, 0x1b1000 }; |
521 | 640 | ||
@@ -530,22 +649,53 @@ static const struct wreg_addr wreg_addrs_e2[WREGS_COUNT_E2] = { | |||
530 | { 0x1b0c00, 128, 2, read_reg_e2_0, RI_E2_OFFLINE } | 649 | { 0x1b0c00, 128, 2, read_reg_e2_0, RI_E2_OFFLINE } |
531 | }; | 650 | }; |
532 | 651 | ||
533 | static const struct dump_sign dump_sign_all = { 0x49aa93ee, 0x40835, 0x22 }; | 652 | static const struct dump_sign dump_sign_all = { 0x4d18b0a4, 0x60010, 0x3a }; |
534 | |||
535 | 653 | ||
536 | #define TIMER_REGS_COUNT_E1 2 | 654 | #define TIMER_REGS_COUNT_E1 2 |
537 | static const u32 timer_status_regs_e1[TIMER_REGS_COUNT_E1] = | ||
538 | { 0x164014, 0x164018 }; | ||
539 | static const u32 timer_scan_regs_e1[TIMER_REGS_COUNT_E1] = | ||
540 | { 0x1640d0, 0x1640d4 }; | ||
541 | 655 | ||
656 | static const u32 timer_status_regs_e1[TIMER_REGS_COUNT_E1] = { | ||
657 | 0x164014, 0x164018 }; | ||
658 | static const u32 timer_scan_regs_e1[TIMER_REGS_COUNT_E1] = { | ||
659 | 0x1640d0, 0x1640d4 }; | ||
542 | 660 | ||
543 | #define TIMER_REGS_COUNT_E1H 2 | 661 | #define TIMER_REGS_COUNT_E1H 2 |
544 | static const u32 timer_status_regs_e1h[TIMER_REGS_COUNT_E1H] = | ||
545 | { 0x164014, 0x164018 }; | ||
546 | static const u32 timer_scan_regs_e1h[TIMER_REGS_COUNT_E1H] = | ||
547 | { 0x1640d0, 0x1640d4 }; | ||
548 | 662 | ||
663 | static const u32 timer_status_regs_e1h[TIMER_REGS_COUNT_E1H] = { | ||
664 | 0x164014, 0x164018 }; | ||
665 | static const u32 timer_scan_regs_e1h[TIMER_REGS_COUNT_E1H] = { | ||
666 | 0x1640d0, 0x1640d4 }; | ||
667 | |||
668 | #define TIMER_REGS_COUNT_E2 2 | ||
669 | |||
670 | static const u32 timer_status_regs_e2[TIMER_REGS_COUNT_E2] = { | ||
671 | 0x164014, 0x164018 }; | ||
672 | static const u32 timer_scan_regs_e2[TIMER_REGS_COUNT_E2] = { | ||
673 | 0x1640d0, 0x1640d4 }; | ||
674 | |||
675 | #define PAGE_MODE_VALUES_E1 0 | ||
676 | |||
677 | #define PAGE_READ_REGS_E1 0 | ||
678 | |||
679 | #define PAGE_WRITE_REGS_E1 0 | ||
680 | |||
681 | static const u32 page_vals_e1[] = { 0 }; | ||
682 | |||
683 | static const u32 page_write_regs_e1[] = { 0 }; | ||
684 | |||
685 | static const struct reg_addr page_read_regs_e1[] = { { 0x0, 0, RI_E1_ONLINE } }; | ||
686 | |||
687 | #define PAGE_MODE_VALUES_E1H 0 | ||
688 | |||
689 | #define PAGE_READ_REGS_E1H 0 | ||
690 | |||
691 | #define PAGE_WRITE_REGS_E1H 0 | ||
692 | |||
693 | static const u32 page_vals_e1h[] = { 0 }; | ||
694 | |||
695 | static const u32 page_write_regs_e1h[] = { 0 }; | ||
696 | |||
697 | static const struct reg_addr page_read_regs_e1h[] = { | ||
698 | { 0x0, 0, RI_E1H_ONLINE } }; | ||
549 | 699 | ||
550 | #define PAGE_MODE_VALUES_E2 2 | 700 | #define PAGE_MODE_VALUES_E2 2 |
551 | 701 | ||
diff --git a/drivers/net/bnx2x/bnx2x_ethtool.c b/drivers/net/bnx2x/bnx2x_ethtool.c index 99c672d894ca..ef2919987a10 100644 --- a/drivers/net/bnx2x/bnx2x_ethtool.c +++ b/drivers/net/bnx2x/bnx2x_ethtool.c | |||
@@ -24,6 +24,7 @@ | |||
24 | #include "bnx2x.h" | 24 | #include "bnx2x.h" |
25 | #include "bnx2x_cmn.h" | 25 | #include "bnx2x_cmn.h" |
26 | #include "bnx2x_dump.h" | 26 | #include "bnx2x_dump.h" |
27 | #include "bnx2x_init.h" | ||
27 | 28 | ||
28 | /* Note: in the format strings below %s is replaced by the queue-name which is | 29 | /* Note: in the format strings below %s is replaced by the queue-name which is |
29 | * either its index or 'fcoe' for the fcoe queue. Make sure the format string | 30 | * either its index or 'fcoe' for the fcoe queue. Make sure the format string |
@@ -237,7 +238,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) | |||
237 | speed |= (cmd->speed_hi << 16); | 238 | speed |= (cmd->speed_hi << 16); |
238 | 239 | ||
239 | if (IS_MF_SI(bp)) { | 240 | if (IS_MF_SI(bp)) { |
240 | u32 param = 0; | 241 | u32 param = 0, part; |
241 | u32 line_speed = bp->link_vars.line_speed; | 242 | u32 line_speed = bp->link_vars.line_speed; |
242 | 243 | ||
243 | /* use 10G if no link detected */ | 244 | /* use 10G if no link detected */ |
@@ -250,9 +251,11 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) | |||
250 | REQ_BC_VER_4_SET_MF_BW); | 251 | REQ_BC_VER_4_SET_MF_BW); |
251 | return -EINVAL; | 252 | return -EINVAL; |
252 | } | 253 | } |
253 | if (line_speed < speed) { | 254 | part = (speed * 100) / line_speed; |
254 | BNX2X_DEV_INFO("New speed should be less or equal " | 255 | if (line_speed < speed || !part) { |
255 | "to actual line speed\n"); | 256 | BNX2X_DEV_INFO("Speed setting should be in a range " |
257 | "from 1%% to 100%% " | ||
258 | "of actual line speed\n"); | ||
256 | return -EINVAL; | 259 | return -EINVAL; |
257 | } | 260 | } |
258 | /* load old values */ | 261 | /* load old values */ |
@@ -262,8 +265,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) | |||
262 | param &= FUNC_MF_CFG_MIN_BW_MASK; | 265 | param &= FUNC_MF_CFG_MIN_BW_MASK; |
263 | 266 | ||
264 | /* set new MAX value */ | 267 | /* set new MAX value */ |
265 | param |= (((speed * 100) / line_speed) | 268 | param |= (part << FUNC_MF_CFG_MAX_BW_SHIFT) |
266 | << FUNC_MF_CFG_MAX_BW_SHIFT) | ||
267 | & FUNC_MF_CFG_MAX_BW_MASK; | 269 | & FUNC_MF_CFG_MAX_BW_MASK; |
268 | 270 | ||
269 | bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, param); | 271 | bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, param); |
@@ -472,7 +474,7 @@ static int bnx2x_get_regs_len(struct net_device *dev) | |||
472 | { | 474 | { |
473 | struct bnx2x *bp = netdev_priv(dev); | 475 | struct bnx2x *bp = netdev_priv(dev); |
474 | int regdump_len = 0; | 476 | int regdump_len = 0; |
475 | int i; | 477 | int i, j, k; |
476 | 478 | ||
477 | if (CHIP_IS_E1(bp)) { | 479 | if (CHIP_IS_E1(bp)) { |
478 | for (i = 0; i < REGS_COUNT; i++) | 480 | for (i = 0; i < REGS_COUNT; i++) |
@@ -502,6 +504,15 @@ static int bnx2x_get_regs_len(struct net_device *dev) | |||
502 | if (IS_E2_ONLINE(wreg_addrs_e2[i].info)) | 504 | if (IS_E2_ONLINE(wreg_addrs_e2[i].info)) |
503 | regdump_len += wreg_addrs_e2[i].size * | 505 | regdump_len += wreg_addrs_e2[i].size * |
504 | (1 + wreg_addrs_e2[i].read_regs_count); | 506 | (1 + wreg_addrs_e2[i].read_regs_count); |
507 | |||
508 | for (i = 0; i < PAGE_MODE_VALUES_E2; i++) | ||
509 | for (j = 0; j < PAGE_WRITE_REGS_E2; j++) { | ||
510 | for (k = 0; k < PAGE_READ_REGS_E2; k++) | ||
511 | if (IS_E2_ONLINE(page_read_regs_e2[k]. | ||
512 | info)) | ||
513 | regdump_len += | ||
514 | page_read_regs_e2[k].size; | ||
515 | } | ||
505 | } | 516 | } |
506 | regdump_len *= 4; | 517 | regdump_len *= 4; |
507 | regdump_len += sizeof(struct dump_hdr); | 518 | regdump_len += sizeof(struct dump_hdr); |
@@ -539,6 +550,12 @@ static void bnx2x_get_regs(struct net_device *dev, | |||
539 | if (!netif_running(bp->dev)) | 550 | if (!netif_running(bp->dev)) |
540 | return; | 551 | return; |
541 | 552 | ||
553 | /* Disable parity attentions as long as following dump may | ||
554 | * cause false alarms by reading never written registers. We | ||
555 | * will re-enable parity attentions right after the dump. | ||
556 | */ | ||
557 | bnx2x_disable_blocks_parity(bp); | ||
558 | |||
542 | dump_hdr.hdr_size = (sizeof(struct dump_hdr) / 4) - 1; | 559 | dump_hdr.hdr_size = (sizeof(struct dump_hdr) / 4) - 1; |
543 | dump_hdr.dump_sign = dump_sign_all; | 560 | dump_hdr.dump_sign = dump_sign_all; |
544 | dump_hdr.xstorm_waitp = REG_RD(bp, XSTORM_WAITP_ADDR); | 561 | dump_hdr.xstorm_waitp = REG_RD(bp, XSTORM_WAITP_ADDR); |
@@ -580,6 +597,10 @@ static void bnx2x_get_regs(struct net_device *dev, | |||
580 | 597 | ||
581 | bnx2x_read_pages_regs_e2(bp, p); | 598 | bnx2x_read_pages_regs_e2(bp, p); |
582 | } | 599 | } |
600 | /* Re-enable parity attentions */ | ||
601 | bnx2x_clear_blocks_parity(bp); | ||
602 | if (CHIP_PARITY_ENABLED(bp)) | ||
603 | bnx2x_enable_blocks_parity(bp); | ||
583 | } | 604 | } |
584 | 605 | ||
585 | #define PHY_FW_VER_LEN 20 | 606 | #define PHY_FW_VER_LEN 20 |
@@ -1761,9 +1782,7 @@ static int bnx2x_test_nvram(struct bnx2x *bp) | |||
1761 | { 0x100, 0x350 }, /* manuf_info */ | 1782 | { 0x100, 0x350 }, /* manuf_info */ |
1762 | { 0x450, 0xf0 }, /* feature_info */ | 1783 | { 0x450, 0xf0 }, /* feature_info */ |
1763 | { 0x640, 0x64 }, /* upgrade_key_info */ | 1784 | { 0x640, 0x64 }, /* upgrade_key_info */ |
1764 | { 0x6a4, 0x64 }, | ||
1765 | { 0x708, 0x70 }, /* manuf_key_info */ | 1785 | { 0x708, 0x70 }, /* manuf_key_info */ |
1766 | { 0x778, 0x70 }, | ||
1767 | { 0, 0 } | 1786 | { 0, 0 } |
1768 | }; | 1787 | }; |
1769 | __be32 buf[0x350 / 4]; | 1788 | __be32 buf[0x350 / 4]; |
@@ -1913,11 +1932,11 @@ static void bnx2x_self_test(struct net_device *dev, | |||
1913 | buf[4] = 1; | 1932 | buf[4] = 1; |
1914 | etest->flags |= ETH_TEST_FL_FAILED; | 1933 | etest->flags |= ETH_TEST_FL_FAILED; |
1915 | } | 1934 | } |
1916 | if (bp->port.pmf) | 1935 | |
1917 | if (bnx2x_link_test(bp, is_serdes) != 0) { | 1936 | if (bnx2x_link_test(bp, is_serdes) != 0) { |
1918 | buf[5] = 1; | 1937 | buf[5] = 1; |
1919 | etest->flags |= ETH_TEST_FL_FAILED; | 1938 | etest->flags |= ETH_TEST_FL_FAILED; |
1920 | } | 1939 | } |
1921 | 1940 | ||
1922 | #ifdef BNX2X_EXTRA_DEBUG | 1941 | #ifdef BNX2X_EXTRA_DEBUG |
1923 | bnx2x_panic_dump(bp); | 1942 | bnx2x_panic_dump(bp); |
diff --git a/drivers/net/bnx2x/bnx2x_hsi.h b/drivers/net/bnx2x/bnx2x_hsi.h index 6238d4f63989..548f5631c0dc 100644 --- a/drivers/net/bnx2x/bnx2x_hsi.h +++ b/drivers/net/bnx2x/bnx2x_hsi.h | |||
@@ -352,6 +352,10 @@ struct port_hw_cfg { /* port 0: 0x12c port 1: 0x2bc */ | |||
352 | #define PORT_HW_CFG_LANE_SWAP_CFG_31203120 0x0000d8d8 | 352 | #define PORT_HW_CFG_LANE_SWAP_CFG_31203120 0x0000d8d8 |
353 | /* forced only */ | 353 | /* forced only */ |
354 | #define PORT_HW_CFG_LANE_SWAP_CFG_32103210 0x0000e4e4 | 354 | #define PORT_HW_CFG_LANE_SWAP_CFG_32103210 0x0000e4e4 |
355 | /* Indicate whether to swap the external phy polarity */ | ||
356 | #define PORT_HW_CFG_SWAP_PHY_POLARITY_MASK 0x00010000 | ||
357 | #define PORT_HW_CFG_SWAP_PHY_POLARITY_DISABLED 0x00000000 | ||
358 | #define PORT_HW_CFG_SWAP_PHY_POLARITY_ENABLED 0x00010000 | ||
355 | 359 | ||
356 | u32 external_phy_config; | 360 | u32 external_phy_config; |
357 | #define PORT_HW_CFG_SERDES_EXT_PHY_TYPE_MASK 0xff000000 | 361 | #define PORT_HW_CFG_SERDES_EXT_PHY_TYPE_MASK 0xff000000 |
diff --git a/drivers/net/bnx2x/bnx2x_init.h b/drivers/net/bnx2x/bnx2x_init.h index a9d54874a559..fa6dbe3f2058 100644 --- a/drivers/net/bnx2x/bnx2x_init.h +++ b/drivers/net/bnx2x/bnx2x_init.h | |||
@@ -192,5 +192,225 @@ struct src_ent { | |||
192 | u64 next; | 192 | u64 next; |
193 | }; | 193 | }; |
194 | 194 | ||
195 | /**************************************************************************** | ||
196 | * Parity configuration | ||
197 | ****************************************************************************/ | ||
198 | #define BLOCK_PRTY_INFO(block, en_mask, m1, m1h, m2) \ | ||
199 | { \ | ||
200 | block##_REG_##block##_PRTY_MASK, \ | ||
201 | block##_REG_##block##_PRTY_STS_CLR, \ | ||
202 | en_mask, {m1, m1h, m2}, #block \ | ||
203 | } | ||
204 | |||
205 | #define BLOCK_PRTY_INFO_0(block, en_mask, m1, m1h, m2) \ | ||
206 | { \ | ||
207 | block##_REG_##block##_PRTY_MASK_0, \ | ||
208 | block##_REG_##block##_PRTY_STS_CLR_0, \ | ||
209 | en_mask, {m1, m1h, m2}, #block"_0" \ | ||
210 | } | ||
211 | |||
212 | #define BLOCK_PRTY_INFO_1(block, en_mask, m1, m1h, m2) \ | ||
213 | { \ | ||
214 | block##_REG_##block##_PRTY_MASK_1, \ | ||
215 | block##_REG_##block##_PRTY_STS_CLR_1, \ | ||
216 | en_mask, {m1, m1h, m2}, #block"_1" \ | ||
217 | } | ||
218 | |||
219 | static const struct { | ||
220 | u32 mask_addr; | ||
221 | u32 sts_clr_addr; | ||
222 | u32 en_mask; /* Mask to enable parity attentions */ | ||
223 | struct { | ||
224 | u32 e1; /* 57710 */ | ||
225 | u32 e1h; /* 57711 */ | ||
226 | u32 e2; /* 57712 */ | ||
227 | } reg_mask; /* Register mask (all valid bits) */ | ||
228 | char name[7]; /* Block's longest name is 6 characters long | ||
229 | * (name + suffix) | ||
230 | */ | ||
231 | } bnx2x_blocks_parity_data[] = { | ||
232 | /* bit 19 masked */ | ||
233 | /* REG_WR(bp, PXP_REG_PXP_PRTY_MASK, 0x80000); */ | ||
234 | /* bit 5,18,20-31 */ | ||
235 | /* REG_WR(bp, PXP2_REG_PXP2_PRTY_MASK_0, 0xfff40020); */ | ||
236 | /* bit 5 */ | ||
237 | /* REG_WR(bp, PXP2_REG_PXP2_PRTY_MASK_1, 0x20); */ | ||
238 | /* REG_WR(bp, HC_REG_HC_PRTY_MASK, 0x0); */ | ||
239 | /* REG_WR(bp, MISC_REG_MISC_PRTY_MASK, 0x0); */ | ||
240 | |||
241 | /* Block IGU, MISC, PXP and PXP2 parity errors as long as we don't | ||
242 | * want to handle "system kill" flow at the moment. | ||
243 | */ | ||
244 | BLOCK_PRTY_INFO(PXP, 0x7ffffff, 0x3ffffff, 0x3ffffff, 0x7ffffff), | ||
245 | BLOCK_PRTY_INFO_0(PXP2, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff), | ||
246 | BLOCK_PRTY_INFO_1(PXP2, 0x7ff, 0x7f, 0x7f, 0x7ff), | ||
247 | BLOCK_PRTY_INFO(HC, 0x7, 0x7, 0x7, 0), | ||
248 | BLOCK_PRTY_INFO(IGU, 0x7ff, 0, 0, 0x7ff), | ||
249 | BLOCK_PRTY_INFO(MISC, 0x1, 0x1, 0x1, 0x1), | ||
250 | BLOCK_PRTY_INFO(QM, 0, 0x1ff, 0xfff, 0xfff), | ||
251 | BLOCK_PRTY_INFO(DORQ, 0, 0x3, 0x3, 0x3), | ||
252 | {GRCBASE_UPB + PB_REG_PB_PRTY_MASK, | ||
253 | GRCBASE_UPB + PB_REG_PB_PRTY_STS_CLR, 0, | ||
254 | {0xf, 0xf, 0xf}, "UPB"}, | ||
255 | {GRCBASE_XPB + PB_REG_PB_PRTY_MASK, | ||
256 | GRCBASE_XPB + PB_REG_PB_PRTY_STS_CLR, 0, | ||
257 | {0xf, 0xf, 0xf}, "XPB"}, | ||
258 | BLOCK_PRTY_INFO(SRC, 0x4, 0x7, 0x7, 0x7), | ||
259 | BLOCK_PRTY_INFO(CDU, 0, 0x1f, 0x1f, 0x1f), | ||
260 | BLOCK_PRTY_INFO(CFC, 0, 0xf, 0xf, 0xf), | ||
261 | BLOCK_PRTY_INFO(DBG, 0, 0x1, 0x1, 0x1), | ||
262 | BLOCK_PRTY_INFO(DMAE, 0, 0xf, 0xf, 0xf), | ||
263 | BLOCK_PRTY_INFO(BRB1, 0, 0xf, 0xf, 0xf), | ||
264 | BLOCK_PRTY_INFO(PRS, (1<<6), 0xff, 0xff, 0xff), | ||
265 | BLOCK_PRTY_INFO(TSDM, 0x18, 0x7ff, 0x7ff, 0x7ff), | ||
266 | BLOCK_PRTY_INFO(CSDM, 0x8, 0x7ff, 0x7ff, 0x7ff), | ||
267 | BLOCK_PRTY_INFO(USDM, 0x38, 0x7ff, 0x7ff, 0x7ff), | ||
268 | BLOCK_PRTY_INFO(XSDM, 0x8, 0x7ff, 0x7ff, 0x7ff), | ||
269 | BLOCK_PRTY_INFO_0(TSEM, 0, 0xffffffff, 0xffffffff, 0xffffffff), | ||
270 | BLOCK_PRTY_INFO_1(TSEM, 0, 0x3, 0x1f, 0x3f), | ||
271 | BLOCK_PRTY_INFO_0(USEM, 0, 0xffffffff, 0xffffffff, 0xffffffff), | ||
272 | BLOCK_PRTY_INFO_1(USEM, 0, 0x3, 0x1f, 0x1f), | ||
273 | BLOCK_PRTY_INFO_0(CSEM, 0, 0xffffffff, 0xffffffff, 0xffffffff), | ||
274 | BLOCK_PRTY_INFO_1(CSEM, 0, 0x3, 0x1f, 0x1f), | ||
275 | BLOCK_PRTY_INFO_0(XSEM, 0, 0xffffffff, 0xffffffff, 0xffffffff), | ||
276 | BLOCK_PRTY_INFO_1(XSEM, 0, 0x3, 0x1f, 0x3f), | ||
277 | }; | ||
278 | |||
279 | |||
280 | /* [28] MCP Latched rom_parity | ||
281 | * [29] MCP Latched ump_rx_parity | ||
282 | * [30] MCP Latched ump_tx_parity | ||
283 | * [31] MCP Latched scpad_parity | ||
284 | */ | ||
285 | #define MISC_AEU_ENABLE_MCP_PRTY_BITS \ | ||
286 | (AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY | \ | ||
287 | AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY | \ | ||
288 | AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY | \ | ||
289 | AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY) | ||
290 | |||
291 | /* Below registers control the MCP parity attention output. When | ||
292 | * MISC_AEU_ENABLE_MCP_PRTY_BITS are set - attentions are | ||
293 | * enabled, when cleared - disabled. | ||
294 | */ | ||
295 | static const u32 mcp_attn_ctl_regs[] = { | ||
296 | MISC_REG_AEU_ENABLE4_FUNC_0_OUT_0, | ||
297 | MISC_REG_AEU_ENABLE4_NIG_0, | ||
298 | MISC_REG_AEU_ENABLE4_PXP_0, | ||
299 | MISC_REG_AEU_ENABLE4_FUNC_1_OUT_0, | ||
300 | MISC_REG_AEU_ENABLE4_NIG_1, | ||
301 | MISC_REG_AEU_ENABLE4_PXP_1 | ||
302 | }; | ||
303 | |||
304 | static inline void bnx2x_set_mcp_parity(struct bnx2x *bp, u8 enable) | ||
305 | { | ||
306 | int i; | ||
307 | u32 reg_val; | ||
308 | |||
309 | for (i = 0; i < ARRAY_SIZE(mcp_attn_ctl_regs); i++) { | ||
310 | reg_val = REG_RD(bp, mcp_attn_ctl_regs[i]); | ||
311 | |||
312 | if (enable) | ||
313 | reg_val |= MISC_AEU_ENABLE_MCP_PRTY_BITS; | ||
314 | else | ||
315 | reg_val &= ~MISC_AEU_ENABLE_MCP_PRTY_BITS; | ||
316 | |||
317 | REG_WR(bp, mcp_attn_ctl_regs[i], reg_val); | ||
318 | } | ||
319 | } | ||
320 | |||
321 | static inline u32 bnx2x_parity_reg_mask(struct bnx2x *bp, int idx) | ||
322 | { | ||
323 | if (CHIP_IS_E1(bp)) | ||
324 | return bnx2x_blocks_parity_data[idx].reg_mask.e1; | ||
325 | else if (CHIP_IS_E1H(bp)) | ||
326 | return bnx2x_blocks_parity_data[idx].reg_mask.e1h; | ||
327 | else | ||
328 | return bnx2x_blocks_parity_data[idx].reg_mask.e2; | ||
329 | } | ||
330 | |||
331 | static inline void bnx2x_disable_blocks_parity(struct bnx2x *bp) | ||
332 | { | ||
333 | int i; | ||
334 | |||
335 | for (i = 0; i < ARRAY_SIZE(bnx2x_blocks_parity_data); i++) { | ||
336 | u32 dis_mask = bnx2x_parity_reg_mask(bp, i); | ||
337 | |||
338 | if (dis_mask) { | ||
339 | REG_WR(bp, bnx2x_blocks_parity_data[i].mask_addr, | ||
340 | dis_mask); | ||
341 | DP(NETIF_MSG_HW, "Setting parity mask " | ||
342 | "for %s to\t\t0x%x\n", | ||
343 | bnx2x_blocks_parity_data[i].name, dis_mask); | ||
344 | } | ||
345 | } | ||
346 | |||
347 | /* Disable MCP parity attentions */ | ||
348 | bnx2x_set_mcp_parity(bp, false); | ||
349 | } | ||
350 | |||
351 | /** | ||
352 | * Clear the parity error status registers. | ||
353 | */ | ||
354 | static inline void bnx2x_clear_blocks_parity(struct bnx2x *bp) | ||
355 | { | ||
356 | int i; | ||
357 | u32 reg_val, mcp_aeu_bits = | ||
358 | AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY | | ||
359 | AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY | | ||
360 | AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY | | ||
361 | AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY; | ||
362 | |||
363 | /* Clear SEM_FAST parities */ | ||
364 | REG_WR(bp, XSEM_REG_FAST_MEMORY + SEM_FAST_REG_PARITY_RST, 0x1); | ||
365 | REG_WR(bp, TSEM_REG_FAST_MEMORY + SEM_FAST_REG_PARITY_RST, 0x1); | ||
366 | REG_WR(bp, USEM_REG_FAST_MEMORY + SEM_FAST_REG_PARITY_RST, 0x1); | ||
367 | REG_WR(bp, CSEM_REG_FAST_MEMORY + SEM_FAST_REG_PARITY_RST, 0x1); | ||
368 | |||
369 | for (i = 0; i < ARRAY_SIZE(bnx2x_blocks_parity_data); i++) { | ||
370 | u32 reg_mask = bnx2x_parity_reg_mask(bp, i); | ||
371 | |||
372 | if (reg_mask) { | ||
373 | reg_val = REG_RD(bp, bnx2x_blocks_parity_data[i]. | ||
374 | sts_clr_addr); | ||
375 | if (reg_val & reg_mask) | ||
376 | DP(NETIF_MSG_HW, | ||
377 | "Parity errors in %s: 0x%x\n", | ||
378 | bnx2x_blocks_parity_data[i].name, | ||
379 | reg_val & reg_mask); | ||
380 | } | ||
381 | } | ||
382 | |||
383 | /* Check if there were parity attentions in MCP */ | ||
384 | reg_val = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_MCP); | ||
385 | if (reg_val & mcp_aeu_bits) | ||
386 | DP(NETIF_MSG_HW, "Parity error in MCP: 0x%x\n", | ||
387 | reg_val & mcp_aeu_bits); | ||
388 | |||
389 | /* Clear parity attentions in MCP: | ||
390 | * [7] clears Latched rom_parity | ||
391 | * [8] clears Latched ump_rx_parity | ||
392 | * [9] clears Latched ump_tx_parity | ||
393 | * [10] clears Latched scpad_parity (both ports) | ||
394 | */ | ||
395 | REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x780); | ||
396 | } | ||
397 | |||
398 | static inline void bnx2x_enable_blocks_parity(struct bnx2x *bp) | ||
399 | { | ||
400 | int i; | ||
401 | |||
402 | for (i = 0; i < ARRAY_SIZE(bnx2x_blocks_parity_data); i++) { | ||
403 | u32 reg_mask = bnx2x_parity_reg_mask(bp, i); | ||
404 | |||
405 | if (reg_mask) | ||
406 | REG_WR(bp, bnx2x_blocks_parity_data[i].mask_addr, | ||
407 | bnx2x_blocks_parity_data[i].en_mask & reg_mask); | ||
408 | } | ||
409 | |||
410 | /* Enable MCP parity attentions */ | ||
411 | bnx2x_set_mcp_parity(bp, true); | ||
412 | } | ||
413 | |||
414 | |||
195 | #endif /* BNX2X_INIT_H */ | 415 | #endif /* BNX2X_INIT_H */ |
196 | 416 | ||
diff --git a/drivers/net/bnx2x/bnx2x_link.c b/drivers/net/bnx2x/bnx2x_link.c index 43b0de24f391..dd1210fddfff 100644 --- a/drivers/net/bnx2x/bnx2x_link.c +++ b/drivers/net/bnx2x/bnx2x_link.c | |||
@@ -1573,7 +1573,7 @@ static void bnx2x_set_aer_mmd_xgxs(struct link_params *params, | |||
1573 | 1573 | ||
1574 | offset = phy->addr + ser_lane; | 1574 | offset = phy->addr + ser_lane; |
1575 | if (CHIP_IS_E2(bp)) | 1575 | if (CHIP_IS_E2(bp)) |
1576 | aer_val = 0x2800 + offset - 1; | 1576 | aer_val = 0x3800 + offset - 1; |
1577 | else | 1577 | else |
1578 | aer_val = 0x3800 + offset; | 1578 | aer_val = 0x3800 + offset; |
1579 | CL45_WR_OVER_CL22(bp, phy, | 1579 | CL45_WR_OVER_CL22(bp, phy, |
@@ -3166,7 +3166,23 @@ u8 bnx2x_set_led(struct link_params *params, | |||
3166 | if (!vars->link_up) | 3166 | if (!vars->link_up) |
3167 | break; | 3167 | break; |
3168 | case LED_MODE_ON: | 3168 | case LED_MODE_ON: |
3169 | if (SINGLE_MEDIA_DIRECT(params)) { | 3169 | if (params->phy[EXT_PHY1].type == |
3170 | PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727 && | ||
3171 | CHIP_IS_E2(bp) && params->num_phys == 2) { | ||
3172 | /** | ||
3173 | * This is a work-around for E2+8727 Configurations | ||
3174 | */ | ||
3175 | if (mode == LED_MODE_ON || | ||
3176 | speed == SPEED_10000){ | ||
3177 | REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, 0); | ||
3178 | REG_WR(bp, NIG_REG_LED_10G_P0 + port*4, 1); | ||
3179 | |||
3180 | tmp = EMAC_RD(bp, EMAC_REG_EMAC_LED); | ||
3181 | EMAC_WR(bp, EMAC_REG_EMAC_LED, | ||
3182 | (tmp | EMAC_LED_OVERRIDE)); | ||
3183 | return rc; | ||
3184 | } | ||
3185 | } else if (SINGLE_MEDIA_DIRECT(params)) { | ||
3170 | /** | 3186 | /** |
3171 | * This is a work-around for HW issue found when link | 3187 | * This is a work-around for HW issue found when link |
3172 | * is up in CL73 | 3188 | * is up in CL73 |
@@ -3854,11 +3870,14 @@ static void bnx2x_8073_resolve_fc(struct bnx2x_phy *phy, | |||
3854 | pause_result); | 3870 | pause_result); |
3855 | } | 3871 | } |
3856 | } | 3872 | } |
3857 | 3873 | static u8 bnx2x_8073_8727_external_rom_boot(struct bnx2x *bp, | |
3858 | static void bnx2x_8073_8727_external_rom_boot(struct bnx2x *bp, | ||
3859 | struct bnx2x_phy *phy, | 3874 | struct bnx2x_phy *phy, |
3860 | u8 port) | 3875 | u8 port) |
3861 | { | 3876 | { |
3877 | u32 count = 0; | ||
3878 | u16 fw_ver1, fw_msgout; | ||
3879 | u8 rc = 0; | ||
3880 | |||
3862 | /* Boot port from external ROM */ | 3881 | /* Boot port from external ROM */ |
3863 | /* EDC grst */ | 3882 | /* EDC grst */ |
3864 | bnx2x_cl45_write(bp, phy, | 3883 | bnx2x_cl45_write(bp, phy, |
@@ -3888,56 +3907,45 @@ static void bnx2x_8073_8727_external_rom_boot(struct bnx2x *bp, | |||
3888 | MDIO_PMA_REG_GEN_CTRL, | 3907 | MDIO_PMA_REG_GEN_CTRL, |
3889 | MDIO_PMA_REG_GEN_CTRL_ROM_RESET_INTERNAL_MP); | 3908 | MDIO_PMA_REG_GEN_CTRL_ROM_RESET_INTERNAL_MP); |
3890 | 3909 | ||
3891 | /* wait for 120ms for code download via SPI port */ | 3910 | /* Delay 100ms per the PHY specifications */ |
3892 | msleep(120); | 3911 | msleep(100); |
3912 | |||
3913 | /* 8073 sometimes taking longer to download */ | ||
3914 | do { | ||
3915 | count++; | ||
3916 | if (count > 300) { | ||
3917 | DP(NETIF_MSG_LINK, | ||
3918 | "bnx2x_8073_8727_external_rom_boot port %x:" | ||
3919 | "Download failed. fw version = 0x%x\n", | ||
3920 | port, fw_ver1); | ||
3921 | rc = -EINVAL; | ||
3922 | break; | ||
3923 | } | ||
3924 | |||
3925 | bnx2x_cl45_read(bp, phy, | ||
3926 | MDIO_PMA_DEVAD, | ||
3927 | MDIO_PMA_REG_ROM_VER1, &fw_ver1); | ||
3928 | bnx2x_cl45_read(bp, phy, | ||
3929 | MDIO_PMA_DEVAD, | ||
3930 | MDIO_PMA_REG_M8051_MSGOUT_REG, &fw_msgout); | ||
3931 | |||
3932 | msleep(1); | ||
3933 | } while (fw_ver1 == 0 || fw_ver1 == 0x4321 || | ||
3934 | ((fw_msgout & 0xff) != 0x03 && (phy->type == | ||
3935 | PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))); | ||
3893 | 3936 | ||
3894 | /* Clear ser_boot_ctl bit */ | 3937 | /* Clear ser_boot_ctl bit */ |
3895 | bnx2x_cl45_write(bp, phy, | 3938 | bnx2x_cl45_write(bp, phy, |
3896 | MDIO_PMA_DEVAD, | 3939 | MDIO_PMA_DEVAD, |
3897 | MDIO_PMA_REG_MISC_CTRL1, 0x0000); | 3940 | MDIO_PMA_REG_MISC_CTRL1, 0x0000); |
3898 | bnx2x_save_bcm_spirom_ver(bp, phy, port); | 3941 | bnx2x_save_bcm_spirom_ver(bp, phy, port); |
3899 | } | ||
3900 | 3942 | ||
3901 | static void bnx2x_8073_set_xaui_low_power_mode(struct bnx2x *bp, | 3943 | DP(NETIF_MSG_LINK, |
3902 | struct bnx2x_phy *phy) | 3944 | "bnx2x_8073_8727_external_rom_boot port %x:" |
3903 | { | 3945 | "Download complete. fw version = 0x%x\n", |
3904 | u16 val; | 3946 | port, fw_ver1); |
3905 | bnx2x_cl45_read(bp, phy, | ||
3906 | MDIO_PMA_DEVAD, MDIO_PMA_REG_8073_CHIP_REV, &val); | ||
3907 | |||
3908 | if (val == 0) { | ||
3909 | /* Mustn't set low power mode in 8073 A0 */ | ||
3910 | return; | ||
3911 | } | ||
3912 | |||
3913 | /* Disable PLL sequencer (use read-modify-write to clear bit 13) */ | ||
3914 | bnx2x_cl45_read(bp, phy, | ||
3915 | MDIO_XS_DEVAD, MDIO_XS_PLL_SEQUENCER, &val); | ||
3916 | val &= ~(1<<13); | ||
3917 | bnx2x_cl45_write(bp, phy, | ||
3918 | MDIO_XS_DEVAD, MDIO_XS_PLL_SEQUENCER, val); | ||
3919 | |||
3920 | /* PLL controls */ | ||
3921 | bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x805E, 0x1077); | ||
3922 | bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x805D, 0x0000); | ||
3923 | bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x805C, 0x030B); | ||
3924 | bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x805B, 0x1240); | ||
3925 | bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x805A, 0x2490); | ||
3926 | |||
3927 | /* Tx Controls */ | ||
3928 | bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x80A7, 0x0C74); | ||
3929 | bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x80A6, 0x9041); | ||
3930 | bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x80A5, 0x4640); | ||
3931 | |||
3932 | /* Rx Controls */ | ||
3933 | bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x80FE, 0x01C4); | ||
3934 | bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x80FD, 0x9249); | ||
3935 | bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x80FC, 0x2015); | ||
3936 | 3947 | ||
3937 | /* Enable PLL sequencer (use read-modify-write to set bit 13) */ | 3948 | return rc; |
3938 | bnx2x_cl45_read(bp, phy, MDIO_XS_DEVAD, MDIO_XS_PLL_SEQUENCER, &val); | ||
3939 | val |= (1<<13); | ||
3940 | bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, MDIO_XS_PLL_SEQUENCER, val); | ||
3941 | } | 3949 | } |
3942 | 3950 | ||
3943 | /******************************************************************/ | 3951 | /******************************************************************/ |
@@ -4098,8 +4106,6 @@ static u8 bnx2x_8073_config_init(struct bnx2x_phy *phy, | |||
4098 | 4106 | ||
4099 | bnx2x_8073_set_pause_cl37(params, phy, vars); | 4107 | bnx2x_8073_set_pause_cl37(params, phy, vars); |
4100 | 4108 | ||
4101 | bnx2x_8073_set_xaui_low_power_mode(bp, phy); | ||
4102 | |||
4103 | bnx2x_cl45_read(bp, phy, | 4109 | bnx2x_cl45_read(bp, phy, |
4104 | MDIO_PMA_DEVAD, MDIO_PMA_REG_M8051_MSGOUT_REG, &tmp1); | 4110 | MDIO_PMA_DEVAD, MDIO_PMA_REG_M8051_MSGOUT_REG, &tmp1); |
4105 | 4111 | ||
@@ -4108,6 +4114,25 @@ static u8 bnx2x_8073_config_init(struct bnx2x_phy *phy, | |||
4108 | 4114 | ||
4109 | DP(NETIF_MSG_LINK, "Before rom RX_ALARM(port1): 0x%x\n", tmp1); | 4115 | DP(NETIF_MSG_LINK, "Before rom RX_ALARM(port1): 0x%x\n", tmp1); |
4110 | 4116 | ||
4117 | /** | ||
4118 | * If this is forced speed, set to KR or KX (all other are not | ||
4119 | * supported) | ||
4120 | */ | ||
4121 | /* Swap polarity if required - Must be done only in non-1G mode */ | ||
4122 | if (params->lane_config & PORT_HW_CFG_SWAP_PHY_POLARITY_ENABLED) { | ||
4123 | /* Configure the 8073 to swap _P and _N of the KR lines */ | ||
4124 | DP(NETIF_MSG_LINK, "Swapping polarity for the 8073\n"); | ||
4125 | /* 10G Rx/Tx and 1G Tx signal polarity swap */ | ||
4126 | bnx2x_cl45_read(bp, phy, | ||
4127 | MDIO_PMA_DEVAD, | ||
4128 | MDIO_PMA_REG_8073_OPT_DIGITAL_CTRL, &val); | ||
4129 | bnx2x_cl45_write(bp, phy, | ||
4130 | MDIO_PMA_DEVAD, | ||
4131 | MDIO_PMA_REG_8073_OPT_DIGITAL_CTRL, | ||
4132 | (val | (3<<9))); | ||
4133 | } | ||
4134 | |||
4135 | |||
4111 | /* Enable CL37 BAM */ | 4136 | /* Enable CL37 BAM */ |
4112 | if (REG_RD(bp, params->shmem_base + | 4137 | if (REG_RD(bp, params->shmem_base + |
4113 | offsetof(struct shmem_region, dev_info. | 4138 | offsetof(struct shmem_region, dev_info. |
@@ -4314,8 +4339,32 @@ static u8 bnx2x_8073_read_status(struct bnx2x_phy *phy, | |||
4314 | } | 4339 | } |
4315 | 4340 | ||
4316 | if (link_up) { | 4341 | if (link_up) { |
4342 | /* Swap polarity if required */ | ||
4343 | if (params->lane_config & | ||
4344 | PORT_HW_CFG_SWAP_PHY_POLARITY_ENABLED) { | ||
4345 | /* Configure the 8073 to swap P and N of the KR lines */ | ||
4346 | bnx2x_cl45_read(bp, phy, | ||
4347 | MDIO_XS_DEVAD, | ||
4348 | MDIO_XS_REG_8073_RX_CTRL_PCIE, &val1); | ||
4349 | /** | ||
4350 | * Set bit 3 to invert Rx in 1G mode and clear this bit | ||
4351 | * when it`s in 10G mode. | ||
4352 | */ | ||
4353 | if (vars->line_speed == SPEED_1000) { | ||
4354 | DP(NETIF_MSG_LINK, "Swapping 1G polarity for" | ||
4355 | "the 8073\n"); | ||
4356 | val1 |= (1<<3); | ||
4357 | } else | ||
4358 | val1 &= ~(1<<3); | ||
4359 | |||
4360 | bnx2x_cl45_write(bp, phy, | ||
4361 | MDIO_XS_DEVAD, | ||
4362 | MDIO_XS_REG_8073_RX_CTRL_PCIE, | ||
4363 | val1); | ||
4364 | } | ||
4317 | bnx2x_ext_phy_10G_an_resolve(bp, phy, vars); | 4365 | bnx2x_ext_phy_10G_an_resolve(bp, phy, vars); |
4318 | bnx2x_8073_resolve_fc(phy, params, vars); | 4366 | bnx2x_8073_resolve_fc(phy, params, vars); |
4367 | vars->duplex = DUPLEX_FULL; | ||
4319 | } | 4368 | } |
4320 | return link_up; | 4369 | return link_up; |
4321 | } | 4370 | } |
@@ -5062,6 +5111,7 @@ static u8 bnx2x_8706_8726_read_status(struct bnx2x_phy *phy, | |||
5062 | else | 5111 | else |
5063 | vars->line_speed = SPEED_10000; | 5112 | vars->line_speed = SPEED_10000; |
5064 | bnx2x_ext_phy_resolve_fc(phy, params, vars); | 5113 | bnx2x_ext_phy_resolve_fc(phy, params, vars); |
5114 | vars->duplex = DUPLEX_FULL; | ||
5065 | } | 5115 | } |
5066 | return link_up; | 5116 | return link_up; |
5067 | } | 5117 | } |
@@ -5758,8 +5808,11 @@ static u8 bnx2x_8727_read_status(struct bnx2x_phy *phy, | |||
5758 | DP(NETIF_MSG_LINK, "port %x: External link is down\n", | 5808 | DP(NETIF_MSG_LINK, "port %x: External link is down\n", |
5759 | params->port); | 5809 | params->port); |
5760 | } | 5810 | } |
5761 | if (link_up) | 5811 | if (link_up) { |
5762 | bnx2x_ext_phy_resolve_fc(phy, params, vars); | 5812 | bnx2x_ext_phy_resolve_fc(phy, params, vars); |
5813 | vars->duplex = DUPLEX_FULL; | ||
5814 | DP(NETIF_MSG_LINK, "duplex = 0x%x\n", vars->duplex); | ||
5815 | } | ||
5763 | 5816 | ||
5764 | if ((DUAL_MEDIA(params)) && | 5817 | if ((DUAL_MEDIA(params)) && |
5765 | (phy->req_line_speed == SPEED_1000)) { | 5818 | (phy->req_line_speed == SPEED_1000)) { |
@@ -5875,10 +5928,26 @@ static void bnx2x_848xx_set_led(struct bnx2x *bp, | |||
5875 | MDIO_PMA_REG_8481_LED2_MASK, | 5928 | MDIO_PMA_REG_8481_LED2_MASK, |
5876 | 0x18); | 5929 | 0x18); |
5877 | 5930 | ||
5931 | /* Select activity source by Tx and Rx, as suggested by PHY AE */ | ||
5878 | bnx2x_cl45_write(bp, phy, | 5932 | bnx2x_cl45_write(bp, phy, |
5879 | MDIO_PMA_DEVAD, | 5933 | MDIO_PMA_DEVAD, |
5880 | MDIO_PMA_REG_8481_LED3_MASK, | 5934 | MDIO_PMA_REG_8481_LED3_MASK, |
5881 | 0x0040); | 5935 | 0x0006); |
5936 | |||
5937 | /* Select the closest activity blink rate to that in 10/100/1000 */ | ||
5938 | bnx2x_cl45_write(bp, phy, | ||
5939 | MDIO_PMA_DEVAD, | ||
5940 | MDIO_PMA_REG_8481_LED3_BLINK, | ||
5941 | 0); | ||
5942 | |||
5943 | bnx2x_cl45_read(bp, phy, | ||
5944 | MDIO_PMA_DEVAD, | ||
5945 | MDIO_PMA_REG_84823_CTL_LED_CTL_1, &val); | ||
5946 | val |= MDIO_PMA_REG_84823_LED3_STRETCH_EN; /* stretch_en for LED3*/ | ||
5947 | |||
5948 | bnx2x_cl45_write(bp, phy, | ||
5949 | MDIO_PMA_DEVAD, | ||
5950 | MDIO_PMA_REG_84823_CTL_LED_CTL_1, val); | ||
5882 | 5951 | ||
5883 | /* 'Interrupt Mask' */ | 5952 | /* 'Interrupt Mask' */ |
5884 | bnx2x_cl45_write(bp, phy, | 5953 | bnx2x_cl45_write(bp, phy, |
@@ -6126,6 +6195,7 @@ static u8 bnx2x_848xx_read_status(struct bnx2x_phy *phy, | |||
6126 | /* Check link 10G */ | 6195 | /* Check link 10G */ |
6127 | if (val2 & (1<<11)) { | 6196 | if (val2 & (1<<11)) { |
6128 | vars->line_speed = SPEED_10000; | 6197 | vars->line_speed = SPEED_10000; |
6198 | vars->duplex = DUPLEX_FULL; | ||
6129 | link_up = 1; | 6199 | link_up = 1; |
6130 | bnx2x_ext_phy_10G_an_resolve(bp, phy, vars); | 6200 | bnx2x_ext_phy_10G_an_resolve(bp, phy, vars); |
6131 | } else { /* Check Legacy speed link */ | 6201 | } else { /* Check Legacy speed link */ |
@@ -6405,6 +6475,18 @@ static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy, | |||
6405 | MDIO_PMA_DEVAD, | 6475 | MDIO_PMA_DEVAD, |
6406 | MDIO_PMA_REG_8481_LED1_MASK, | 6476 | MDIO_PMA_REG_8481_LED1_MASK, |
6407 | 0x80); | 6477 | 0x80); |
6478 | |||
6479 | /* Tell LED3 to blink on source */ | ||
6480 | bnx2x_cl45_read(bp, phy, | ||
6481 | MDIO_PMA_DEVAD, | ||
6482 | MDIO_PMA_REG_8481_LINK_SIGNAL, | ||
6483 | &val); | ||
6484 | val &= ~(7<<6); | ||
6485 | val |= (1<<6); /* A83B[8:6]= 1 */ | ||
6486 | bnx2x_cl45_write(bp, phy, | ||
6487 | MDIO_PMA_DEVAD, | ||
6488 | MDIO_PMA_REG_8481_LINK_SIGNAL, | ||
6489 | val); | ||
6408 | } | 6490 | } |
6409 | break; | 6491 | break; |
6410 | } | 6492 | } |
@@ -6489,6 +6571,7 @@ static u8 bnx2x_7101_read_status(struct bnx2x_phy *phy, | |||
6489 | MDIO_AN_DEVAD, MDIO_AN_REG_MASTER_STATUS, | 6571 | MDIO_AN_DEVAD, MDIO_AN_REG_MASTER_STATUS, |
6490 | &val2); | 6572 | &val2); |
6491 | vars->line_speed = SPEED_10000; | 6573 | vars->line_speed = SPEED_10000; |
6574 | vars->duplex = DUPLEX_FULL; | ||
6492 | DP(NETIF_MSG_LINK, "SFX7101 AN status 0x%x->Master=%x\n", | 6575 | DP(NETIF_MSG_LINK, "SFX7101 AN status 0x%x->Master=%x\n", |
6493 | val2, (val2 & (1<<14))); | 6576 | val2, (val2 & (1<<14))); |
6494 | bnx2x_ext_phy_10G_an_resolve(bp, phy, vars); | 6577 | bnx2x_ext_phy_10G_an_resolve(bp, phy, vars); |
@@ -7605,10 +7688,13 @@ static u8 bnx2x_8073_common_init_phy(struct bnx2x *bp, | |||
7605 | struct bnx2x_phy phy[PORT_MAX]; | 7688 | struct bnx2x_phy phy[PORT_MAX]; |
7606 | struct bnx2x_phy *phy_blk[PORT_MAX]; | 7689 | struct bnx2x_phy *phy_blk[PORT_MAX]; |
7607 | u16 val; | 7690 | u16 val; |
7608 | s8 port; | 7691 | s8 port = 0; |
7609 | s8 port_of_path = 0; | 7692 | s8 port_of_path = 0; |
7610 | 7693 | u32 swap_val, swap_override; | |
7611 | bnx2x_ext_phy_hw_reset(bp, 0); | 7694 | swap_val = REG_RD(bp, NIG_REG_PORT_SWAP); |
7695 | swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE); | ||
7696 | port ^= (swap_val && swap_override); | ||
7697 | bnx2x_ext_phy_hw_reset(bp, port); | ||
7612 | /* PART1 - Reset both phys */ | 7698 | /* PART1 - Reset both phys */ |
7613 | for (port = PORT_MAX - 1; port >= PORT_0; port--) { | 7699 | for (port = PORT_MAX - 1; port >= PORT_0; port--) { |
7614 | u32 shmem_base, shmem2_base; | 7700 | u32 shmem_base, shmem2_base; |
@@ -7663,7 +7749,6 @@ static u8 bnx2x_8073_common_init_phy(struct bnx2x *bp, | |||
7663 | 7749 | ||
7664 | /* PART2 - Download firmware to both phys */ | 7750 | /* PART2 - Download firmware to both phys */ |
7665 | for (port = PORT_MAX - 1; port >= PORT_0; port--) { | 7751 | for (port = PORT_MAX - 1; port >= PORT_0; port--) { |
7666 | u16 fw_ver1; | ||
7667 | if (CHIP_IS_E2(bp)) | 7752 | if (CHIP_IS_E2(bp)) |
7668 | port_of_path = 0; | 7753 | port_of_path = 0; |
7669 | else | 7754 | else |
@@ -7671,19 +7756,9 @@ static u8 bnx2x_8073_common_init_phy(struct bnx2x *bp, | |||
7671 | 7756 | ||
7672 | DP(NETIF_MSG_LINK, "Loading spirom for phy address 0x%x\n", | 7757 | DP(NETIF_MSG_LINK, "Loading spirom for phy address 0x%x\n", |
7673 | phy_blk[port]->addr); | 7758 | phy_blk[port]->addr); |
7674 | bnx2x_8073_8727_external_rom_boot(bp, phy_blk[port], | 7759 | if (bnx2x_8073_8727_external_rom_boot(bp, phy_blk[port], |
7675 | port_of_path); | 7760 | port_of_path)) |
7676 | |||
7677 | bnx2x_cl45_read(bp, phy_blk[port], | ||
7678 | MDIO_PMA_DEVAD, | ||
7679 | MDIO_PMA_REG_ROM_VER1, &fw_ver1); | ||
7680 | if (fw_ver1 == 0 || fw_ver1 == 0x4321) { | ||
7681 | DP(NETIF_MSG_LINK, | ||
7682 | "bnx2x_8073_common_init_phy port %x:" | ||
7683 | "Download failed. fw version = 0x%x\n", | ||
7684 | port, fw_ver1); | ||
7685 | return -EINVAL; | 7761 | return -EINVAL; |
7686 | } | ||
7687 | 7762 | ||
7688 | /* Only set bit 10 = 1 (Tx power down) */ | 7763 | /* Only set bit 10 = 1 (Tx power down) */ |
7689 | bnx2x_cl45_read(bp, phy_blk[port], | 7764 | bnx2x_cl45_read(bp, phy_blk[port], |
@@ -7848,27 +7923,17 @@ static u8 bnx2x_8727_common_init_phy(struct bnx2x *bp, | |||
7848 | } | 7923 | } |
7849 | /* PART2 - Download firmware to both phys */ | 7924 | /* PART2 - Download firmware to both phys */ |
7850 | for (port = PORT_MAX - 1; port >= PORT_0; port--) { | 7925 | for (port = PORT_MAX - 1; port >= PORT_0; port--) { |
7851 | u16 fw_ver1; | ||
7852 | if (CHIP_IS_E2(bp)) | 7926 | if (CHIP_IS_E2(bp)) |
7853 | port_of_path = 0; | 7927 | port_of_path = 0; |
7854 | else | 7928 | else |
7855 | port_of_path = port; | 7929 | port_of_path = port; |
7856 | DP(NETIF_MSG_LINK, "Loading spirom for phy address 0x%x\n", | 7930 | DP(NETIF_MSG_LINK, "Loading spirom for phy address 0x%x\n", |
7857 | phy_blk[port]->addr); | 7931 | phy_blk[port]->addr); |
7858 | bnx2x_8073_8727_external_rom_boot(bp, phy_blk[port], | 7932 | if (bnx2x_8073_8727_external_rom_boot(bp, phy_blk[port], |
7859 | port_of_path); | 7933 | port_of_path)) |
7860 | bnx2x_cl45_read(bp, phy_blk[port], | ||
7861 | MDIO_PMA_DEVAD, | ||
7862 | MDIO_PMA_REG_ROM_VER1, &fw_ver1); | ||
7863 | if (fw_ver1 == 0 || fw_ver1 == 0x4321) { | ||
7864 | DP(NETIF_MSG_LINK, | ||
7865 | "bnx2x_8727_common_init_phy port %x:" | ||
7866 | "Download failed. fw version = 0x%x\n", | ||
7867 | port, fw_ver1); | ||
7868 | return -EINVAL; | 7934 | return -EINVAL; |
7869 | } | ||
7870 | } | ||
7871 | 7935 | ||
7936 | } | ||
7872 | return 0; | 7937 | return 0; |
7873 | } | 7938 | } |
7874 | 7939 | ||
@@ -7916,6 +7981,7 @@ u8 bnx2x_common_init_phy(struct bnx2x *bp, u32 shmem_base_path[], | |||
7916 | u32 shmem2_base_path[], u32 chip_id) | 7981 | u32 shmem2_base_path[], u32 chip_id) |
7917 | { | 7982 | { |
7918 | u8 rc = 0; | 7983 | u8 rc = 0; |
7984 | u32 phy_ver; | ||
7919 | u8 phy_index; | 7985 | u8 phy_index; |
7920 | u32 ext_phy_type, ext_phy_config; | 7986 | u32 ext_phy_type, ext_phy_config; |
7921 | DP(NETIF_MSG_LINK, "Begin common phy init\n"); | 7987 | DP(NETIF_MSG_LINK, "Begin common phy init\n"); |
@@ -7923,6 +7989,16 @@ u8 bnx2x_common_init_phy(struct bnx2x *bp, u32 shmem_base_path[], | |||
7923 | if (CHIP_REV_IS_EMUL(bp)) | 7989 | if (CHIP_REV_IS_EMUL(bp)) |
7924 | return 0; | 7990 | return 0; |
7925 | 7991 | ||
7992 | /* Check if common init was already done */ | ||
7993 | phy_ver = REG_RD(bp, shmem_base_path[0] + | ||
7994 | offsetof(struct shmem_region, | ||
7995 | port_mb[PORT_0].ext_phy_fw_version)); | ||
7996 | if (phy_ver) { | ||
7997 | DP(NETIF_MSG_LINK, "Not doing common init; phy ver is 0x%x\n", | ||
7998 | phy_ver); | ||
7999 | return 0; | ||
8000 | } | ||
8001 | |||
7926 | /* Read the ext_phy_type for arbitrary port(0) */ | 8002 | /* Read the ext_phy_type for arbitrary port(0) */ |
7927 | for (phy_index = EXT_PHY1; phy_index < MAX_PHYS; | 8003 | for (phy_index = EXT_PHY1; phy_index < MAX_PHYS; |
7928 | phy_index++) { | 8004 | phy_index++) { |
diff --git a/drivers/net/bnx2x/bnx2x_main.c b/drivers/net/bnx2x/bnx2x_main.c index 489a5512a04d..032ae184b605 100644 --- a/drivers/net/bnx2x/bnx2x_main.c +++ b/drivers/net/bnx2x/bnx2x_main.c | |||
@@ -1974,13 +1974,22 @@ static void bnx2x_init_vn_minmax(struct bnx2x *bp, int vn) | |||
1974 | vn_max_rate = 0; | 1974 | vn_max_rate = 0; |
1975 | 1975 | ||
1976 | } else { | 1976 | } else { |
1977 | u32 maxCfg = bnx2x_extract_max_cfg(bp, vn_cfg); | ||
1978 | |||
1977 | vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >> | 1979 | vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >> |
1978 | FUNC_MF_CFG_MIN_BW_SHIFT) * 100; | 1980 | FUNC_MF_CFG_MIN_BW_SHIFT) * 100; |
1979 | /* If min rate is zero - set it to 1 */ | 1981 | /* If fairness is enabled (not all min rates are zeroes) and |
1982 | if current min rate is zero - set it to 1. | ||
1983 | This is a requirement of the algorithm. */ | ||
1980 | if (bp->vn_weight_sum && (vn_min_rate == 0)) | 1984 | if (bp->vn_weight_sum && (vn_min_rate == 0)) |
1981 | vn_min_rate = DEF_MIN_RATE; | 1985 | vn_min_rate = DEF_MIN_RATE; |
1982 | vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >> | 1986 | |
1983 | FUNC_MF_CFG_MAX_BW_SHIFT) * 100; | 1987 | if (IS_MF_SI(bp)) |
1988 | /* maxCfg in percents of linkspeed */ | ||
1989 | vn_max_rate = (bp->link_vars.line_speed * maxCfg) / 100; | ||
1990 | else | ||
1991 | /* maxCfg is absolute in 100Mb units */ | ||
1992 | vn_max_rate = maxCfg * 100; | ||
1984 | } | 1993 | } |
1985 | 1994 | ||
1986 | DP(NETIF_MSG_IFUP, | 1995 | DP(NETIF_MSG_IFUP, |
@@ -2006,7 +2015,8 @@ static void bnx2x_init_vn_minmax(struct bnx2x *bp, int vn) | |||
2006 | m_fair_vn.vn_credit_delta = | 2015 | m_fair_vn.vn_credit_delta = |
2007 | max_t(u32, (vn_min_rate * (T_FAIR_COEF / | 2016 | max_t(u32, (vn_min_rate * (T_FAIR_COEF / |
2008 | (8 * bp->vn_weight_sum))), | 2017 | (8 * bp->vn_weight_sum))), |
2009 | (bp->cmng.fair_vars.fair_threshold * 2)); | 2018 | (bp->cmng.fair_vars.fair_threshold + |
2019 | MIN_ABOVE_THRESH)); | ||
2010 | DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta %d\n", | 2020 | DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta %d\n", |
2011 | m_fair_vn.vn_credit_delta); | 2021 | m_fair_vn.vn_credit_delta); |
2012 | } | 2022 | } |
@@ -2301,15 +2311,10 @@ static void bnx2x_rxq_set_mac_filters(struct bnx2x *bp, u16 cl_id, u32 filters) | |||
2301 | /* accept matched ucast */ | 2311 | /* accept matched ucast */ |
2302 | drop_all_ucast = 0; | 2312 | drop_all_ucast = 0; |
2303 | } | 2313 | } |
2304 | if (filters & BNX2X_ACCEPT_MULTICAST) { | 2314 | if (filters & BNX2X_ACCEPT_MULTICAST) |
2305 | /* accept matched mcast */ | 2315 | /* accept matched mcast */ |
2306 | drop_all_mcast = 0; | 2316 | drop_all_mcast = 0; |
2307 | if (IS_MF_SI(bp)) | 2317 | |
2308 | /* since mcast addresses won't arrive with ovlan, | ||
2309 | * fw needs to accept all of them in | ||
2310 | * switch-independent mode */ | ||
2311 | accp_all_mcast = 1; | ||
2312 | } | ||
2313 | if (filters & BNX2X_ACCEPT_ALL_UNICAST) { | 2318 | if (filters & BNX2X_ACCEPT_ALL_UNICAST) { |
2314 | /* accept all mcast */ | 2319 | /* accept all mcast */ |
2315 | drop_all_ucast = 0; | 2320 | drop_all_ucast = 0; |
@@ -3152,7 +3157,6 @@ static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn) | |||
3152 | #define LOAD_COUNTER_MASK (((u32)0x1 << LOAD_COUNTER_BITS) - 1) | 3157 | #define LOAD_COUNTER_MASK (((u32)0x1 << LOAD_COUNTER_BITS) - 1) |
3153 | #define RESET_DONE_FLAG_MASK (~LOAD_COUNTER_MASK) | 3158 | #define RESET_DONE_FLAG_MASK (~LOAD_COUNTER_MASK) |
3154 | #define RESET_DONE_FLAG_SHIFT LOAD_COUNTER_BITS | 3159 | #define RESET_DONE_FLAG_SHIFT LOAD_COUNTER_BITS |
3155 | #define CHIP_PARITY_SUPPORTED(bp) (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp)) | ||
3156 | 3160 | ||
3157 | /* | 3161 | /* |
3158 | * should be run under rtnl lock | 3162 | * should be run under rtnl lock |
@@ -3527,7 +3531,7 @@ static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted) | |||
3527 | try to handle this event */ | 3531 | try to handle this event */ |
3528 | bnx2x_acquire_alr(bp); | 3532 | bnx2x_acquire_alr(bp); |
3529 | 3533 | ||
3530 | if (bnx2x_chk_parity_attn(bp)) { | 3534 | if (CHIP_PARITY_ENABLED(bp) && bnx2x_chk_parity_attn(bp)) { |
3531 | bp->recovery_state = BNX2X_RECOVERY_INIT; | 3535 | bp->recovery_state = BNX2X_RECOVERY_INIT; |
3532 | bnx2x_set_reset_in_progress(bp); | 3536 | bnx2x_set_reset_in_progress(bp); |
3533 | schedule_delayed_work(&bp->reset_task, 0); | 3537 | schedule_delayed_work(&bp->reset_task, 0); |
@@ -4282,9 +4286,12 @@ void bnx2x_set_storm_rx_mode(struct bnx2x *bp) | |||
4282 | def_q_filters |= BNX2X_ACCEPT_UNICAST | BNX2X_ACCEPT_BROADCAST | | 4286 | def_q_filters |= BNX2X_ACCEPT_UNICAST | BNX2X_ACCEPT_BROADCAST | |
4283 | BNX2X_ACCEPT_MULTICAST; | 4287 | BNX2X_ACCEPT_MULTICAST; |
4284 | #ifdef BCM_CNIC | 4288 | #ifdef BCM_CNIC |
4285 | cl_id = bnx2x_fcoe(bp, cl_id); | 4289 | if (!NO_FCOE(bp)) { |
4286 | bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_ACCEPT_UNICAST | | 4290 | cl_id = bnx2x_fcoe(bp, cl_id); |
4287 | BNX2X_ACCEPT_MULTICAST); | 4291 | bnx2x_rxq_set_mac_filters(bp, cl_id, |
4292 | BNX2X_ACCEPT_UNICAST | | ||
4293 | BNX2X_ACCEPT_MULTICAST); | ||
4294 | } | ||
4288 | #endif | 4295 | #endif |
4289 | break; | 4296 | break; |
4290 | 4297 | ||
@@ -4292,18 +4299,29 @@ void bnx2x_set_storm_rx_mode(struct bnx2x *bp) | |||
4292 | def_q_filters |= BNX2X_ACCEPT_UNICAST | BNX2X_ACCEPT_BROADCAST | | 4299 | def_q_filters |= BNX2X_ACCEPT_UNICAST | BNX2X_ACCEPT_BROADCAST | |
4293 | BNX2X_ACCEPT_ALL_MULTICAST; | 4300 | BNX2X_ACCEPT_ALL_MULTICAST; |
4294 | #ifdef BCM_CNIC | 4301 | #ifdef BCM_CNIC |
4295 | cl_id = bnx2x_fcoe(bp, cl_id); | 4302 | /* |
4296 | bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_ACCEPT_UNICAST | | 4303 | * Prevent duplication of multicast packets by configuring FCoE |
4297 | BNX2X_ACCEPT_MULTICAST); | 4304 | * L2 Client to receive only matched unicast frames. |
4305 | */ | ||
4306 | if (!NO_FCOE(bp)) { | ||
4307 | cl_id = bnx2x_fcoe(bp, cl_id); | ||
4308 | bnx2x_rxq_set_mac_filters(bp, cl_id, | ||
4309 | BNX2X_ACCEPT_UNICAST); | ||
4310 | } | ||
4298 | #endif | 4311 | #endif |
4299 | break; | 4312 | break; |
4300 | 4313 | ||
4301 | case BNX2X_RX_MODE_PROMISC: | 4314 | case BNX2X_RX_MODE_PROMISC: |
4302 | def_q_filters |= BNX2X_PROMISCUOUS_MODE; | 4315 | def_q_filters |= BNX2X_PROMISCUOUS_MODE; |
4303 | #ifdef BCM_CNIC | 4316 | #ifdef BCM_CNIC |
4304 | cl_id = bnx2x_fcoe(bp, cl_id); | 4317 | /* |
4305 | bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_ACCEPT_UNICAST | | 4318 | * Prevent packets duplication by configuring DROP_ALL for FCoE |
4306 | BNX2X_ACCEPT_MULTICAST); | 4319 | * L2 Client. |
4320 | */ | ||
4321 | if (!NO_FCOE(bp)) { | ||
4322 | cl_id = bnx2x_fcoe(bp, cl_id); | ||
4323 | bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_ACCEPT_NONE); | ||
4324 | } | ||
4307 | #endif | 4325 | #endif |
4308 | /* pass management unicast packets as well */ | 4326 | /* pass management unicast packets as well */ |
4309 | llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST; | 4327 | llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST; |
@@ -4754,7 +4772,7 @@ static int bnx2x_int_mem_test(struct bnx2x *bp) | |||
4754 | return 0; /* OK */ | 4772 | return 0; /* OK */ |
4755 | } | 4773 | } |
4756 | 4774 | ||
4757 | static void enable_blocks_attention(struct bnx2x *bp) | 4775 | static void bnx2x_enable_blocks_attention(struct bnx2x *bp) |
4758 | { | 4776 | { |
4759 | REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0); | 4777 | REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0); |
4760 | if (CHIP_IS_E2(bp)) | 4778 | if (CHIP_IS_E2(bp)) |
@@ -4808,53 +4826,9 @@ static void enable_blocks_attention(struct bnx2x *bp) | |||
4808 | REG_WR(bp, CDU_REG_CDU_INT_MASK, 0); | 4826 | REG_WR(bp, CDU_REG_CDU_INT_MASK, 0); |
4809 | REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0); | 4827 | REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0); |
4810 | /* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */ | 4828 | /* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */ |
4811 | REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */ | 4829 | REG_WR(bp, PBF_REG_PBF_INT_MASK, 0x18); /* bit 3,4 masked */ |
4812 | } | 4830 | } |
4813 | 4831 | ||
4814 | static const struct { | ||
4815 | u32 addr; | ||
4816 | u32 mask; | ||
4817 | } bnx2x_parity_mask[] = { | ||
4818 | {PXP_REG_PXP_PRTY_MASK, 0x3ffffff}, | ||
4819 | {PXP2_REG_PXP2_PRTY_MASK_0, 0xffffffff}, | ||
4820 | {PXP2_REG_PXP2_PRTY_MASK_1, 0x7f}, | ||
4821 | {HC_REG_HC_PRTY_MASK, 0x7}, | ||
4822 | {MISC_REG_MISC_PRTY_MASK, 0x1}, | ||
4823 | {QM_REG_QM_PRTY_MASK, 0x0}, | ||
4824 | {DORQ_REG_DORQ_PRTY_MASK, 0x0}, | ||
4825 | {GRCBASE_UPB + PB_REG_PB_PRTY_MASK, 0x0}, | ||
4826 | {GRCBASE_XPB + PB_REG_PB_PRTY_MASK, 0x0}, | ||
4827 | {SRC_REG_SRC_PRTY_MASK, 0x4}, /* bit 2 */ | ||
4828 | {CDU_REG_CDU_PRTY_MASK, 0x0}, | ||
4829 | {CFC_REG_CFC_PRTY_MASK, 0x0}, | ||
4830 | {DBG_REG_DBG_PRTY_MASK, 0x0}, | ||
4831 | {DMAE_REG_DMAE_PRTY_MASK, 0x0}, | ||
4832 | {BRB1_REG_BRB1_PRTY_MASK, 0x0}, | ||
4833 | {PRS_REG_PRS_PRTY_MASK, (1<<6)},/* bit 6 */ | ||
4834 | {TSDM_REG_TSDM_PRTY_MASK, 0x18}, /* bit 3,4 */ | ||
4835 | {CSDM_REG_CSDM_PRTY_MASK, 0x8}, /* bit 3 */ | ||
4836 | {USDM_REG_USDM_PRTY_MASK, 0x38}, /* bit 3,4,5 */ | ||
4837 | {XSDM_REG_XSDM_PRTY_MASK, 0x8}, /* bit 3 */ | ||
4838 | {TSEM_REG_TSEM_PRTY_MASK_0, 0x0}, | ||
4839 | {TSEM_REG_TSEM_PRTY_MASK_1, 0x0}, | ||
4840 | {USEM_REG_USEM_PRTY_MASK_0, 0x0}, | ||
4841 | {USEM_REG_USEM_PRTY_MASK_1, 0x0}, | ||
4842 | {CSEM_REG_CSEM_PRTY_MASK_0, 0x0}, | ||
4843 | {CSEM_REG_CSEM_PRTY_MASK_1, 0x0}, | ||
4844 | {XSEM_REG_XSEM_PRTY_MASK_0, 0x0}, | ||
4845 | {XSEM_REG_XSEM_PRTY_MASK_1, 0x0} | ||
4846 | }; | ||
4847 | |||
4848 | static void enable_blocks_parity(struct bnx2x *bp) | ||
4849 | { | ||
4850 | int i; | ||
4851 | |||
4852 | for (i = 0; i < ARRAY_SIZE(bnx2x_parity_mask); i++) | ||
4853 | REG_WR(bp, bnx2x_parity_mask[i].addr, | ||
4854 | bnx2x_parity_mask[i].mask); | ||
4855 | } | ||
4856 | |||
4857 | |||
4858 | static void bnx2x_reset_common(struct bnx2x *bp) | 4832 | static void bnx2x_reset_common(struct bnx2x *bp) |
4859 | { | 4833 | { |
4860 | /* reset_common */ | 4834 | /* reset_common */ |
@@ -5082,7 +5056,7 @@ static int bnx2x_init_hw_common(struct bnx2x *bp, u32 load_code) | |||
5082 | memset(&ilt_cli, 0, sizeof(struct ilt_client_info)); | 5056 | memset(&ilt_cli, 0, sizeof(struct ilt_client_info)); |
5083 | memset(&ilt, 0, sizeof(struct bnx2x_ilt)); | 5057 | memset(&ilt, 0, sizeof(struct bnx2x_ilt)); |
5084 | 5058 | ||
5085 | /* initalize dummy TM client */ | 5059 | /* initialize dummy TM client */ |
5086 | ilt_cli.start = 0; | 5060 | ilt_cli.start = 0; |
5087 | ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1; | 5061 | ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1; |
5088 | ilt_cli.client_num = ILT_CLIENT_TM; | 5062 | ilt_cli.client_num = ILT_CLIENT_TM; |
@@ -5341,18 +5315,14 @@ static int bnx2x_init_hw_common(struct bnx2x *bp, u32 load_code) | |||
5341 | } | 5315 | } |
5342 | } | 5316 | } |
5343 | 5317 | ||
5344 | bp->port.need_hw_lock = bnx2x_hw_lock_required(bp, | ||
5345 | bp->common.shmem_base, | ||
5346 | bp->common.shmem2_base); | ||
5347 | |||
5348 | bnx2x_setup_fan_failure_detection(bp); | 5318 | bnx2x_setup_fan_failure_detection(bp); |
5349 | 5319 | ||
5350 | /* clear PXP2 attentions */ | 5320 | /* clear PXP2 attentions */ |
5351 | REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0); | 5321 | REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0); |
5352 | 5322 | ||
5353 | enable_blocks_attention(bp); | 5323 | bnx2x_enable_blocks_attention(bp); |
5354 | if (CHIP_PARITY_SUPPORTED(bp)) | 5324 | if (CHIP_PARITY_ENABLED(bp)) |
5355 | enable_blocks_parity(bp); | 5325 | bnx2x_enable_blocks_parity(bp); |
5356 | 5326 | ||
5357 | if (!BP_NOMCP(bp)) { | 5327 | if (!BP_NOMCP(bp)) { |
5358 | /* In E2 2-PORT mode, same ext phy is used for the two paths */ | 5328 | /* In E2 2-PORT mode, same ext phy is used for the two paths */ |
@@ -5548,9 +5518,6 @@ static int bnx2x_init_hw_port(struct bnx2x *bp) | |||
5548 | 5518 | ||
5549 | bnx2x_init_block(bp, MCP_BLOCK, init_stage); | 5519 | bnx2x_init_block(bp, MCP_BLOCK, init_stage); |
5550 | bnx2x_init_block(bp, DMAE_BLOCK, init_stage); | 5520 | bnx2x_init_block(bp, DMAE_BLOCK, init_stage); |
5551 | bp->port.need_hw_lock = bnx2x_hw_lock_required(bp, | ||
5552 | bp->common.shmem_base, | ||
5553 | bp->common.shmem2_base); | ||
5554 | if (bnx2x_fan_failure_det_req(bp, bp->common.shmem_base, | 5521 | if (bnx2x_fan_failure_det_req(bp, bp->common.shmem_base, |
5555 | bp->common.shmem2_base, port)) { | 5522 | bp->common.shmem2_base, port)) { |
5556 | u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 : | 5523 | u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 : |
@@ -8424,6 +8391,17 @@ static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp) | |||
8424 | (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN)) | 8391 | (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN)) |
8425 | bp->mdio.prtad = | 8392 | bp->mdio.prtad = |
8426 | XGXS_EXT_PHY_ADDR(ext_phy_config); | 8393 | XGXS_EXT_PHY_ADDR(ext_phy_config); |
8394 | |||
8395 | /* | ||
8396 | * Check if hw lock is required to access MDC/MDIO bus to the PHY(s) | ||
8397 | * In MF mode, it is set to cover self test cases | ||
8398 | */ | ||
8399 | if (IS_MF(bp)) | ||
8400 | bp->port.need_hw_lock = 1; | ||
8401 | else | ||
8402 | bp->port.need_hw_lock = bnx2x_hw_lock_required(bp, | ||
8403 | bp->common.shmem_base, | ||
8404 | bp->common.shmem2_base); | ||
8427 | } | 8405 | } |
8428 | 8406 | ||
8429 | static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp) | 8407 | static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp) |
@@ -8751,13 +8729,6 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp) | |||
8751 | dev_err(&bp->pdev->dev, "MCP disabled, " | 8729 | dev_err(&bp->pdev->dev, "MCP disabled, " |
8752 | "must load devices in order!\n"); | 8730 | "must load devices in order!\n"); |
8753 | 8731 | ||
8754 | /* Set multi queue mode */ | ||
8755 | if ((multi_mode != ETH_RSS_MODE_DISABLED) && | ||
8756 | ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) { | ||
8757 | dev_err(&bp->pdev->dev, "Multi disabled since int_mode " | ||
8758 | "requested is not MSI-X\n"); | ||
8759 | multi_mode = ETH_RSS_MODE_DISABLED; | ||
8760 | } | ||
8761 | bp->multi_mode = multi_mode; | 8732 | bp->multi_mode = multi_mode; |
8762 | bp->int_mode = int_mode; | 8733 | bp->int_mode = int_mode; |
8763 | 8734 | ||
@@ -9560,9 +9531,15 @@ static void __devexit bnx2x_remove_one(struct pci_dev *pdev) | |||
9560 | /* Delete all NAPI objects */ | 9531 | /* Delete all NAPI objects */ |
9561 | bnx2x_del_all_napi(bp); | 9532 | bnx2x_del_all_napi(bp); |
9562 | 9533 | ||
9534 | /* Power on: we can't let PCI layer write to us while we are in D3 */ | ||
9535 | bnx2x_set_power_state(bp, PCI_D0); | ||
9536 | |||
9563 | /* Disable MSI/MSI-X */ | 9537 | /* Disable MSI/MSI-X */ |
9564 | bnx2x_disable_msi(bp); | 9538 | bnx2x_disable_msi(bp); |
9565 | 9539 | ||
9540 | /* Power off */ | ||
9541 | bnx2x_set_power_state(bp, PCI_D3hot); | ||
9542 | |||
9566 | /* Make sure RESET task is not scheduled before continuing */ | 9543 | /* Make sure RESET task is not scheduled before continuing */ |
9567 | cancel_delayed_work_sync(&bp->reset_task); | 9544 | cancel_delayed_work_sync(&bp->reset_task); |
9568 | 9545 | ||
diff --git a/drivers/net/bnx2x/bnx2x_reg.h b/drivers/net/bnx2x/bnx2x_reg.h index bfd875b72906..e01330bb36c7 100644 --- a/drivers/net/bnx2x/bnx2x_reg.h +++ b/drivers/net/bnx2x/bnx2x_reg.h | |||
@@ -18,6 +18,8 @@ | |||
18 | * WR - Write Clear (write 1 to clear the bit) | 18 | * WR - Write Clear (write 1 to clear the bit) |
19 | * | 19 | * |
20 | */ | 20 | */ |
21 | #ifndef BNX2X_REG_H | ||
22 | #define BNX2X_REG_H | ||
21 | 23 | ||
22 | #define ATC_ATC_INT_STS_REG_ADDRESS_ERROR (0x1<<0) | 24 | #define ATC_ATC_INT_STS_REG_ADDRESS_ERROR (0x1<<0) |
23 | #define ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS (0x1<<2) | 25 | #define ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS (0x1<<2) |
@@ -39,6 +41,8 @@ | |||
39 | #define BRB1_REG_BRB1_PRTY_MASK 0x60138 | 41 | #define BRB1_REG_BRB1_PRTY_MASK 0x60138 |
40 | /* [R 4] Parity register #0 read */ | 42 | /* [R 4] Parity register #0 read */ |
41 | #define BRB1_REG_BRB1_PRTY_STS 0x6012c | 43 | #define BRB1_REG_BRB1_PRTY_STS 0x6012c |
44 | /* [RC 4] Parity register #0 read clear */ | ||
45 | #define BRB1_REG_BRB1_PRTY_STS_CLR 0x60130 | ||
42 | /* [RW 10] At address BRB1_IND_FREE_LIST_PRS_CRDT initialize free head. At | 46 | /* [RW 10] At address BRB1_IND_FREE_LIST_PRS_CRDT initialize free head. At |
43 | * address BRB1_IND_FREE_LIST_PRS_CRDT+1 initialize free tail. At address | 47 | * address BRB1_IND_FREE_LIST_PRS_CRDT+1 initialize free tail. At address |
44 | * BRB1_IND_FREE_LIST_PRS_CRDT+2 initialize parser initial credit. Warning - | 48 | * BRB1_IND_FREE_LIST_PRS_CRDT+2 initialize parser initial credit. Warning - |
@@ -132,8 +136,12 @@ | |||
132 | #define CCM_REG_CCM_INT_MASK 0xd01e4 | 136 | #define CCM_REG_CCM_INT_MASK 0xd01e4 |
133 | /* [R 11] Interrupt register #0 read */ | 137 | /* [R 11] Interrupt register #0 read */ |
134 | #define CCM_REG_CCM_INT_STS 0xd01d8 | 138 | #define CCM_REG_CCM_INT_STS 0xd01d8 |
139 | /* [RW 27] Parity mask register #0 read/write */ | ||
140 | #define CCM_REG_CCM_PRTY_MASK 0xd01f4 | ||
135 | /* [R 27] Parity register #0 read */ | 141 | /* [R 27] Parity register #0 read */ |
136 | #define CCM_REG_CCM_PRTY_STS 0xd01e8 | 142 | #define CCM_REG_CCM_PRTY_STS 0xd01e8 |
143 | /* [RC 27] Parity register #0 read clear */ | ||
144 | #define CCM_REG_CCM_PRTY_STS_CLR 0xd01ec | ||
137 | /* [RW 3] The size of AG context region 0 in REG-pairs. Designates the MS | 145 | /* [RW 3] The size of AG context region 0 in REG-pairs. Designates the MS |
138 | REG-pair number (e.g. if region 0 is 6 REG-pairs; the value should be 5). | 146 | REG-pair number (e.g. if region 0 is 6 REG-pairs; the value should be 5). |
139 | Is used to determine the number of the AG context REG-pairs written back; | 147 | Is used to determine the number of the AG context REG-pairs written back; |
@@ -350,6 +358,8 @@ | |||
350 | #define CDU_REG_CDU_PRTY_MASK 0x10104c | 358 | #define CDU_REG_CDU_PRTY_MASK 0x10104c |
351 | /* [R 5] Parity register #0 read */ | 359 | /* [R 5] Parity register #0 read */ |
352 | #define CDU_REG_CDU_PRTY_STS 0x101040 | 360 | #define CDU_REG_CDU_PRTY_STS 0x101040 |
361 | /* [RC 5] Parity register #0 read clear */ | ||
362 | #define CDU_REG_CDU_PRTY_STS_CLR 0x101044 | ||
353 | /* [RC 32] logging of error data in case of a CDU load error: | 363 | /* [RC 32] logging of error data in case of a CDU load error: |
354 | {expected_cid[15:0]; xpected_type[2:0]; xpected_region[2:0]; ctive_error; | 364 | {expected_cid[15:0]; xpected_type[2:0]; xpected_region[2:0]; ctive_error; |
355 | ype_error; ctual_active; ctual_compressed_context}; */ | 365 | ype_error; ctual_active; ctual_compressed_context}; */ |
@@ -381,6 +391,8 @@ | |||
381 | #define CFC_REG_CFC_PRTY_MASK 0x104118 | 391 | #define CFC_REG_CFC_PRTY_MASK 0x104118 |
382 | /* [R 4] Parity register #0 read */ | 392 | /* [R 4] Parity register #0 read */ |
383 | #define CFC_REG_CFC_PRTY_STS 0x10410c | 393 | #define CFC_REG_CFC_PRTY_STS 0x10410c |
394 | /* [RC 4] Parity register #0 read clear */ | ||
395 | #define CFC_REG_CFC_PRTY_STS_CLR 0x104110 | ||
384 | /* [RW 21] CID cam access (21:1 - Data; alid - 0) */ | 396 | /* [RW 21] CID cam access (21:1 - Data; alid - 0) */ |
385 | #define CFC_REG_CID_CAM 0x104800 | 397 | #define CFC_REG_CID_CAM 0x104800 |
386 | #define CFC_REG_CONTROL0 0x104028 | 398 | #define CFC_REG_CONTROL0 0x104028 |
@@ -466,6 +478,8 @@ | |||
466 | #define CSDM_REG_CSDM_PRTY_MASK 0xc22bc | 478 | #define CSDM_REG_CSDM_PRTY_MASK 0xc22bc |
467 | /* [R 11] Parity register #0 read */ | 479 | /* [R 11] Parity register #0 read */ |
468 | #define CSDM_REG_CSDM_PRTY_STS 0xc22b0 | 480 | #define CSDM_REG_CSDM_PRTY_STS 0xc22b0 |
481 | /* [RC 11] Parity register #0 read clear */ | ||
482 | #define CSDM_REG_CSDM_PRTY_STS_CLR 0xc22b4 | ||
469 | #define CSDM_REG_ENABLE_IN1 0xc2238 | 483 | #define CSDM_REG_ENABLE_IN1 0xc2238 |
470 | #define CSDM_REG_ENABLE_IN2 0xc223c | 484 | #define CSDM_REG_ENABLE_IN2 0xc223c |
471 | #define CSDM_REG_ENABLE_OUT1 0xc2240 | 485 | #define CSDM_REG_ENABLE_OUT1 0xc2240 |
@@ -556,6 +570,9 @@ | |||
556 | /* [R 32] Parity register #0 read */ | 570 | /* [R 32] Parity register #0 read */ |
557 | #define CSEM_REG_CSEM_PRTY_STS_0 0x200124 | 571 | #define CSEM_REG_CSEM_PRTY_STS_0 0x200124 |
558 | #define CSEM_REG_CSEM_PRTY_STS_1 0x200134 | 572 | #define CSEM_REG_CSEM_PRTY_STS_1 0x200134 |
573 | /* [RC 32] Parity register #0 read clear */ | ||
574 | #define CSEM_REG_CSEM_PRTY_STS_CLR_0 0x200128 | ||
575 | #define CSEM_REG_CSEM_PRTY_STS_CLR_1 0x200138 | ||
559 | #define CSEM_REG_ENABLE_IN 0x2000a4 | 576 | #define CSEM_REG_ENABLE_IN 0x2000a4 |
560 | #define CSEM_REG_ENABLE_OUT 0x2000a8 | 577 | #define CSEM_REG_ENABLE_OUT 0x2000a8 |
561 | /* [RW 32] This address space contains all registers and memories that are | 578 | /* [RW 32] This address space contains all registers and memories that are |
@@ -648,6 +665,8 @@ | |||
648 | #define DBG_REG_DBG_PRTY_MASK 0xc0a8 | 665 | #define DBG_REG_DBG_PRTY_MASK 0xc0a8 |
649 | /* [R 1] Parity register #0 read */ | 666 | /* [R 1] Parity register #0 read */ |
650 | #define DBG_REG_DBG_PRTY_STS 0xc09c | 667 | #define DBG_REG_DBG_PRTY_STS 0xc09c |
668 | /* [RC 1] Parity register #0 read clear */ | ||
669 | #define DBG_REG_DBG_PRTY_STS_CLR 0xc0a0 | ||
651 | /* [RW 1] When set the DMAE will process the commands as in E1.5. 1.The | 670 | /* [RW 1] When set the DMAE will process the commands as in E1.5. 1.The |
652 | * function that is used is always SRC-PCI; 2.VF_Valid = 0; 3.VFID=0; | 671 | * function that is used is always SRC-PCI; 2.VF_Valid = 0; 3.VFID=0; |
653 | * 4.Completion function=0; 5.Error handling=0 */ | 672 | * 4.Completion function=0; 5.Error handling=0 */ |
@@ -668,6 +687,8 @@ | |||
668 | #define DMAE_REG_DMAE_PRTY_MASK 0x102064 | 687 | #define DMAE_REG_DMAE_PRTY_MASK 0x102064 |
669 | /* [R 4] Parity register #0 read */ | 688 | /* [R 4] Parity register #0 read */ |
670 | #define DMAE_REG_DMAE_PRTY_STS 0x102058 | 689 | #define DMAE_REG_DMAE_PRTY_STS 0x102058 |
690 | /* [RC 4] Parity register #0 read clear */ | ||
691 | #define DMAE_REG_DMAE_PRTY_STS_CLR 0x10205c | ||
671 | /* [RW 1] Command 0 go. */ | 692 | /* [RW 1] Command 0 go. */ |
672 | #define DMAE_REG_GO_C0 0x102080 | 693 | #define DMAE_REG_GO_C0 0x102080 |
673 | /* [RW 1] Command 1 go. */ | 694 | /* [RW 1] Command 1 go. */ |
@@ -734,6 +755,8 @@ | |||
734 | #define DORQ_REG_DORQ_PRTY_MASK 0x170190 | 755 | #define DORQ_REG_DORQ_PRTY_MASK 0x170190 |
735 | /* [R 2] Parity register #0 read */ | 756 | /* [R 2] Parity register #0 read */ |
736 | #define DORQ_REG_DORQ_PRTY_STS 0x170184 | 757 | #define DORQ_REG_DORQ_PRTY_STS 0x170184 |
758 | /* [RC 2] Parity register #0 read clear */ | ||
759 | #define DORQ_REG_DORQ_PRTY_STS_CLR 0x170188 | ||
737 | /* [RW 8] The address to write the DPM CID to STORM. */ | 760 | /* [RW 8] The address to write the DPM CID to STORM. */ |
738 | #define DORQ_REG_DPM_CID_ADDR 0x170044 | 761 | #define DORQ_REG_DPM_CID_ADDR 0x170044 |
739 | /* [RW 5] The DPM mode CID extraction offset. */ | 762 | /* [RW 5] The DPM mode CID extraction offset. */ |
@@ -842,8 +865,12 @@ | |||
842 | /* [R 1] data availble for error memory. If this bit is clear do not red | 865 | /* [R 1] data availble for error memory. If this bit is clear do not red |
843 | * from error_handling_memory. */ | 866 | * from error_handling_memory. */ |
844 | #define IGU_REG_ERROR_HANDLING_DATA_VALID 0x130130 | 867 | #define IGU_REG_ERROR_HANDLING_DATA_VALID 0x130130 |
868 | /* [RW 11] Parity mask register #0 read/write */ | ||
869 | #define IGU_REG_IGU_PRTY_MASK 0x1300a8 | ||
845 | /* [R 11] Parity register #0 read */ | 870 | /* [R 11] Parity register #0 read */ |
846 | #define IGU_REG_IGU_PRTY_STS 0x13009c | 871 | #define IGU_REG_IGU_PRTY_STS 0x13009c |
872 | /* [RC 11] Parity register #0 read clear */ | ||
873 | #define IGU_REG_IGU_PRTY_STS_CLR 0x1300a0 | ||
847 | /* [R 4] Debug: int_handle_fsm */ | 874 | /* [R 4] Debug: int_handle_fsm */ |
848 | #define IGU_REG_INT_HANDLE_FSM 0x130050 | 875 | #define IGU_REG_INT_HANDLE_FSM 0x130050 |
849 | #define IGU_REG_LEADING_EDGE_LATCH 0x130134 | 876 | #define IGU_REG_LEADING_EDGE_LATCH 0x130134 |
@@ -1501,6 +1528,8 @@ | |||
1501 | #define MISC_REG_MISC_PRTY_MASK 0xa398 | 1528 | #define MISC_REG_MISC_PRTY_MASK 0xa398 |
1502 | /* [R 1] Parity register #0 read */ | 1529 | /* [R 1] Parity register #0 read */ |
1503 | #define MISC_REG_MISC_PRTY_STS 0xa38c | 1530 | #define MISC_REG_MISC_PRTY_STS 0xa38c |
1531 | /* [RC 1] Parity register #0 read clear */ | ||
1532 | #define MISC_REG_MISC_PRTY_STS_CLR 0xa390 | ||
1504 | #define MISC_REG_NIG_WOL_P0 0xa270 | 1533 | #define MISC_REG_NIG_WOL_P0 0xa270 |
1505 | #define MISC_REG_NIG_WOL_P1 0xa274 | 1534 | #define MISC_REG_NIG_WOL_P1 0xa274 |
1506 | /* [R 1] If set indicate that the pcie_rst_b was asserted without perst | 1535 | /* [R 1] If set indicate that the pcie_rst_b was asserted without perst |
@@ -1604,7 +1633,7 @@ | |||
1604 | (~misc_registers_sw_timer_cfg_4.sw_timer_cfg_4[1] ) is set */ | 1633 | (~misc_registers_sw_timer_cfg_4.sw_timer_cfg_4[1] ) is set */ |
1605 | #define MISC_REG_SW_TIMER_RELOAD_VAL_4 0xa2fc | 1634 | #define MISC_REG_SW_TIMER_RELOAD_VAL_4 0xa2fc |
1606 | /* [RW 32] the value of the counter for sw timers1-8. there are 8 addresses | 1635 | /* [RW 32] the value of the counter for sw timers1-8. there are 8 addresses |
1607 | in this register. addres 0 - timer 1; address 1 - timer 2, ... address 7 - | 1636 | in this register. address 0 - timer 1; address 1 - timer 2, ... address 7 - |
1608 | timer 8 */ | 1637 | timer 8 */ |
1609 | #define MISC_REG_SW_TIMER_VAL 0xa5c0 | 1638 | #define MISC_REG_SW_TIMER_VAL 0xa5c0 |
1610 | /* [RW 1] Set by the MCP to remember if one or more of the drivers is/are | 1639 | /* [RW 1] Set by the MCP to remember if one or more of the drivers is/are |
@@ -2082,6 +2111,10 @@ | |||
2082 | #define PBF_REG_PBF_INT_MASK 0x1401d4 | 2111 | #define PBF_REG_PBF_INT_MASK 0x1401d4 |
2083 | /* [R 5] Interrupt register #0 read */ | 2112 | /* [R 5] Interrupt register #0 read */ |
2084 | #define PBF_REG_PBF_INT_STS 0x1401c8 | 2113 | #define PBF_REG_PBF_INT_STS 0x1401c8 |
2114 | /* [RW 20] Parity mask register #0 read/write */ | ||
2115 | #define PBF_REG_PBF_PRTY_MASK 0x1401e4 | ||
2116 | /* [RC 20] Parity register #0 read clear */ | ||
2117 | #define PBF_REG_PBF_PRTY_STS_CLR 0x1401dc | ||
2085 | #define PB_REG_CONTROL 0 | 2118 | #define PB_REG_CONTROL 0 |
2086 | /* [RW 2] Interrupt mask register #0 read/write */ | 2119 | /* [RW 2] Interrupt mask register #0 read/write */ |
2087 | #define PB_REG_PB_INT_MASK 0x28 | 2120 | #define PB_REG_PB_INT_MASK 0x28 |
@@ -2091,6 +2124,8 @@ | |||
2091 | #define PB_REG_PB_PRTY_MASK 0x38 | 2124 | #define PB_REG_PB_PRTY_MASK 0x38 |
2092 | /* [R 4] Parity register #0 read */ | 2125 | /* [R 4] Parity register #0 read */ |
2093 | #define PB_REG_PB_PRTY_STS 0x2c | 2126 | #define PB_REG_PB_PRTY_STS 0x2c |
2127 | /* [RC 4] Parity register #0 read clear */ | ||
2128 | #define PB_REG_PB_PRTY_STS_CLR 0x30 | ||
2094 | #define PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR (0x1<<0) | 2129 | #define PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR (0x1<<0) |
2095 | #define PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW (0x1<<8) | 2130 | #define PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW (0x1<<8) |
2096 | #define PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR (0x1<<1) | 2131 | #define PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR (0x1<<1) |
@@ -2446,6 +2481,8 @@ | |||
2446 | #define PRS_REG_PRS_PRTY_MASK 0x401a4 | 2481 | #define PRS_REG_PRS_PRTY_MASK 0x401a4 |
2447 | /* [R 8] Parity register #0 read */ | 2482 | /* [R 8] Parity register #0 read */ |
2448 | #define PRS_REG_PRS_PRTY_STS 0x40198 | 2483 | #define PRS_REG_PRS_PRTY_STS 0x40198 |
2484 | /* [RC 8] Parity register #0 read clear */ | ||
2485 | #define PRS_REG_PRS_PRTY_STS_CLR 0x4019c | ||
2449 | /* [RW 8] Context region for pure acknowledge packets. Used in CFC load | 2486 | /* [RW 8] Context region for pure acknowledge packets. Used in CFC load |
2450 | request message */ | 2487 | request message */ |
2451 | #define PRS_REG_PURE_REGIONS 0x40024 | 2488 | #define PRS_REG_PURE_REGIONS 0x40024 |
@@ -2599,6 +2636,9 @@ | |||
2599 | /* [R 32] Parity register #0 read */ | 2636 | /* [R 32] Parity register #0 read */ |
2600 | #define PXP2_REG_PXP2_PRTY_STS_0 0x12057c | 2637 | #define PXP2_REG_PXP2_PRTY_STS_0 0x12057c |
2601 | #define PXP2_REG_PXP2_PRTY_STS_1 0x12058c | 2638 | #define PXP2_REG_PXP2_PRTY_STS_1 0x12058c |
2639 | /* [RC 32] Parity register #0 read clear */ | ||
2640 | #define PXP2_REG_PXP2_PRTY_STS_CLR_0 0x120580 | ||
2641 | #define PXP2_REG_PXP2_PRTY_STS_CLR_1 0x120590 | ||
2602 | /* [R 1] Debug only: The 'almost full' indication from each fifo (gives | 2642 | /* [R 1] Debug only: The 'almost full' indication from each fifo (gives |
2603 | indication about backpressure) */ | 2643 | indication about backpressure) */ |
2604 | #define PXP2_REG_RD_ALMOST_FULL_0 0x120424 | 2644 | #define PXP2_REG_RD_ALMOST_FULL_0 0x120424 |
@@ -3001,6 +3041,8 @@ | |||
3001 | #define PXP_REG_PXP_PRTY_MASK 0x103094 | 3041 | #define PXP_REG_PXP_PRTY_MASK 0x103094 |
3002 | /* [R 26] Parity register #0 read */ | 3042 | /* [R 26] Parity register #0 read */ |
3003 | #define PXP_REG_PXP_PRTY_STS 0x103088 | 3043 | #define PXP_REG_PXP_PRTY_STS 0x103088 |
3044 | /* [RC 27] Parity register #0 read clear */ | ||
3045 | #define PXP_REG_PXP_PRTY_STS_CLR 0x10308c | ||
3004 | /* [RW 4] The activity counter initial increment value sent in the load | 3046 | /* [RW 4] The activity counter initial increment value sent in the load |
3005 | request */ | 3047 | request */ |
3006 | #define QM_REG_ACTCTRINITVAL_0 0x168040 | 3048 | #define QM_REG_ACTCTRINITVAL_0 0x168040 |
@@ -3157,6 +3199,8 @@ | |||
3157 | #define QM_REG_QM_PRTY_MASK 0x168454 | 3199 | #define QM_REG_QM_PRTY_MASK 0x168454 |
3158 | /* [R 12] Parity register #0 read */ | 3200 | /* [R 12] Parity register #0 read */ |
3159 | #define QM_REG_QM_PRTY_STS 0x168448 | 3201 | #define QM_REG_QM_PRTY_STS 0x168448 |
3202 | /* [RC 12] Parity register #0 read clear */ | ||
3203 | #define QM_REG_QM_PRTY_STS_CLR 0x16844c | ||
3160 | /* [R 32] Current queues in pipeline: Queues from 32 to 63 */ | 3204 | /* [R 32] Current queues in pipeline: Queues from 32 to 63 */ |
3161 | #define QM_REG_QSTATUS_HIGH 0x16802c | 3205 | #define QM_REG_QSTATUS_HIGH 0x16802c |
3162 | /* [R 32] Current queues in pipeline: Queues from 96 to 127 */ | 3206 | /* [R 32] Current queues in pipeline: Queues from 96 to 127 */ |
@@ -3442,6 +3486,8 @@ | |||
3442 | #define QM_REG_WRRWEIGHTS_9 0x168848 | 3486 | #define QM_REG_WRRWEIGHTS_9 0x168848 |
3443 | /* [R 6] Keep the fill level of the fifo from write client 1 */ | 3487 | /* [R 6] Keep the fill level of the fifo from write client 1 */ |
3444 | #define QM_REG_XQM_WRC_FIFOLVL 0x168000 | 3488 | #define QM_REG_XQM_WRC_FIFOLVL 0x168000 |
3489 | /* [W 1] reset to parity interrupt */ | ||
3490 | #define SEM_FAST_REG_PARITY_RST 0x18840 | ||
3445 | #define SRC_REG_COUNTFREE0 0x40500 | 3491 | #define SRC_REG_COUNTFREE0 0x40500 |
3446 | /* [RW 1] If clr the searcher is compatible to E1 A0 - support only two | 3492 | /* [RW 1] If clr the searcher is compatible to E1 A0 - support only two |
3447 | ports. If set the searcher support 8 functions. */ | 3493 | ports. If set the searcher support 8 functions. */ |
@@ -3470,6 +3516,8 @@ | |||
3470 | #define SRC_REG_SRC_PRTY_MASK 0x404c8 | 3516 | #define SRC_REG_SRC_PRTY_MASK 0x404c8 |
3471 | /* [R 3] Parity register #0 read */ | 3517 | /* [R 3] Parity register #0 read */ |
3472 | #define SRC_REG_SRC_PRTY_STS 0x404bc | 3518 | #define SRC_REG_SRC_PRTY_STS 0x404bc |
3519 | /* [RC 3] Parity register #0 read clear */ | ||
3520 | #define SRC_REG_SRC_PRTY_STS_CLR 0x404c0 | ||
3473 | /* [R 4] Used to read the value of the XX protection CAM occupancy counter. */ | 3521 | /* [R 4] Used to read the value of the XX protection CAM occupancy counter. */ |
3474 | #define TCM_REG_CAM_OCCUP 0x5017c | 3522 | #define TCM_REG_CAM_OCCUP 0x5017c |
3475 | /* [RW 1] CDU AG read Interface enable. If 0 - the request input is | 3523 | /* [RW 1] CDU AG read Interface enable. If 0 - the request input is |
@@ -3596,8 +3644,12 @@ | |||
3596 | #define TCM_REG_TCM_INT_MASK 0x501dc | 3644 | #define TCM_REG_TCM_INT_MASK 0x501dc |
3597 | /* [R 11] Interrupt register #0 read */ | 3645 | /* [R 11] Interrupt register #0 read */ |
3598 | #define TCM_REG_TCM_INT_STS 0x501d0 | 3646 | #define TCM_REG_TCM_INT_STS 0x501d0 |
3647 | /* [RW 27] Parity mask register #0 read/write */ | ||
3648 | #define TCM_REG_TCM_PRTY_MASK 0x501ec | ||
3599 | /* [R 27] Parity register #0 read */ | 3649 | /* [R 27] Parity register #0 read */ |
3600 | #define TCM_REG_TCM_PRTY_STS 0x501e0 | 3650 | #define TCM_REG_TCM_PRTY_STS 0x501e0 |
3651 | /* [RC 27] Parity register #0 read clear */ | ||
3652 | #define TCM_REG_TCM_PRTY_STS_CLR 0x501e4 | ||
3601 | /* [RW 3] The size of AG context region 0 in REG-pairs. Designates the MS | 3653 | /* [RW 3] The size of AG context region 0 in REG-pairs. Designates the MS |
3602 | REG-pair number (e.g. if region 0 is 6 REG-pairs; the value should be 5). | 3654 | REG-pair number (e.g. if region 0 is 6 REG-pairs; the value should be 5). |
3603 | Is used to determine the number of the AG context REG-pairs written back; | 3655 | Is used to determine the number of the AG context REG-pairs written back; |
@@ -3755,6 +3807,10 @@ | |||
3755 | #define TM_REG_TM_INT_MASK 0x1640fc | 3807 | #define TM_REG_TM_INT_MASK 0x1640fc |
3756 | /* [R 1] Interrupt register #0 read */ | 3808 | /* [R 1] Interrupt register #0 read */ |
3757 | #define TM_REG_TM_INT_STS 0x1640f0 | 3809 | #define TM_REG_TM_INT_STS 0x1640f0 |
3810 | /* [RW 7] Parity mask register #0 read/write */ | ||
3811 | #define TM_REG_TM_PRTY_MASK 0x16410c | ||
3812 | /* [RC 7] Parity register #0 read clear */ | ||
3813 | #define TM_REG_TM_PRTY_STS_CLR 0x164104 | ||
3758 | /* [RW 8] The event id for aggregated interrupt 0 */ | 3814 | /* [RW 8] The event id for aggregated interrupt 0 */ |
3759 | #define TSDM_REG_AGG_INT_EVENT_0 0x42038 | 3815 | #define TSDM_REG_AGG_INT_EVENT_0 0x42038 |
3760 | #define TSDM_REG_AGG_INT_EVENT_1 0x4203c | 3816 | #define TSDM_REG_AGG_INT_EVENT_1 0x4203c |
@@ -3835,6 +3891,8 @@ | |||
3835 | #define TSDM_REG_TSDM_PRTY_MASK 0x422bc | 3891 | #define TSDM_REG_TSDM_PRTY_MASK 0x422bc |
3836 | /* [R 11] Parity register #0 read */ | 3892 | /* [R 11] Parity register #0 read */ |
3837 | #define TSDM_REG_TSDM_PRTY_STS 0x422b0 | 3893 | #define TSDM_REG_TSDM_PRTY_STS 0x422b0 |
3894 | /* [RC 11] Parity register #0 read clear */ | ||
3895 | #define TSDM_REG_TSDM_PRTY_STS_CLR 0x422b4 | ||
3838 | /* [RW 5] The number of time_slots in the arbitration cycle */ | 3896 | /* [RW 5] The number of time_slots in the arbitration cycle */ |
3839 | #define TSEM_REG_ARB_CYCLE_SIZE 0x180034 | 3897 | #define TSEM_REG_ARB_CYCLE_SIZE 0x180034 |
3840 | /* [RW 3] The source that is associated with arbitration element 0. Source | 3898 | /* [RW 3] The source that is associated with arbitration element 0. Source |
@@ -3914,6 +3972,9 @@ | |||
3914 | #define TSEM_REG_SLOW_EXT_STORE_EMPTY 0x1802a0 | 3972 | #define TSEM_REG_SLOW_EXT_STORE_EMPTY 0x1802a0 |
3915 | /* [RW 8] List of free threads . There is a bit per thread. */ | 3973 | /* [RW 8] List of free threads . There is a bit per thread. */ |
3916 | #define TSEM_REG_THREADS_LIST 0x1802e4 | 3974 | #define TSEM_REG_THREADS_LIST 0x1802e4 |
3975 | /* [RC 32] Parity register #0 read clear */ | ||
3976 | #define TSEM_REG_TSEM_PRTY_STS_CLR_0 0x180118 | ||
3977 | #define TSEM_REG_TSEM_PRTY_STS_CLR_1 0x180128 | ||
3917 | /* [RW 3] The arbitration scheme of time_slot 0 */ | 3978 | /* [RW 3] The arbitration scheme of time_slot 0 */ |
3918 | #define TSEM_REG_TS_0_AS 0x180038 | 3979 | #define TSEM_REG_TS_0_AS 0x180038 |
3919 | /* [RW 3] The arbitration scheme of time_slot 10 */ | 3980 | /* [RW 3] The arbitration scheme of time_slot 10 */ |
@@ -4116,6 +4177,8 @@ | |||
4116 | #define UCM_REG_UCM_INT_STS 0xe01c8 | 4177 | #define UCM_REG_UCM_INT_STS 0xe01c8 |
4117 | /* [R 27] Parity register #0 read */ | 4178 | /* [R 27] Parity register #0 read */ |
4118 | #define UCM_REG_UCM_PRTY_STS 0xe01d8 | 4179 | #define UCM_REG_UCM_PRTY_STS 0xe01d8 |
4180 | /* [RC 27] Parity register #0 read clear */ | ||
4181 | #define UCM_REG_UCM_PRTY_STS_CLR 0xe01dc | ||
4119 | /* [RW 2] The size of AG context region 0 in REG-pairs. Designates the MS | 4182 | /* [RW 2] The size of AG context region 0 in REG-pairs. Designates the MS |
4120 | REG-pair number (e.g. if region 0 is 6 REG-pairs; the value should be 5). | 4183 | REG-pair number (e.g. if region 0 is 6 REG-pairs; the value should be 5). |
4121 | Is used to determine the number of the AG context REG-pairs written back; | 4184 | Is used to determine the number of the AG context REG-pairs written back; |
@@ -4292,6 +4355,8 @@ | |||
4292 | #define USDM_REG_USDM_PRTY_MASK 0xc42c0 | 4355 | #define USDM_REG_USDM_PRTY_MASK 0xc42c0 |
4293 | /* [R 11] Parity register #0 read */ | 4356 | /* [R 11] Parity register #0 read */ |
4294 | #define USDM_REG_USDM_PRTY_STS 0xc42b4 | 4357 | #define USDM_REG_USDM_PRTY_STS 0xc42b4 |
4358 | /* [RC 11] Parity register #0 read clear */ | ||
4359 | #define USDM_REG_USDM_PRTY_STS_CLR 0xc42b8 | ||
4295 | /* [RW 5] The number of time_slots in the arbitration cycle */ | 4360 | /* [RW 5] The number of time_slots in the arbitration cycle */ |
4296 | #define USEM_REG_ARB_CYCLE_SIZE 0x300034 | 4361 | #define USEM_REG_ARB_CYCLE_SIZE 0x300034 |
4297 | /* [RW 3] The source that is associated with arbitration element 0. Source | 4362 | /* [RW 3] The source that is associated with arbitration element 0. Source |
@@ -4421,6 +4486,9 @@ | |||
4421 | /* [R 32] Parity register #0 read */ | 4486 | /* [R 32] Parity register #0 read */ |
4422 | #define USEM_REG_USEM_PRTY_STS_0 0x300124 | 4487 | #define USEM_REG_USEM_PRTY_STS_0 0x300124 |
4423 | #define USEM_REG_USEM_PRTY_STS_1 0x300134 | 4488 | #define USEM_REG_USEM_PRTY_STS_1 0x300134 |
4489 | /* [RC 32] Parity register #0 read clear */ | ||
4490 | #define USEM_REG_USEM_PRTY_STS_CLR_0 0x300128 | ||
4491 | #define USEM_REG_USEM_PRTY_STS_CLR_1 0x300138 | ||
4424 | /* [W 7] VF or PF ID for reset error bit. Values 0-63 reset error bit for 64 | 4492 | /* [W 7] VF or PF ID for reset error bit. Values 0-63 reset error bit for 64 |
4425 | * VF; values 64-67 reset error for 4 PF; values 68-127 are not valid. */ | 4493 | * VF; values 64-67 reset error for 4 PF; values 68-127 are not valid. */ |
4426 | #define USEM_REG_VFPF_ERR_NUM 0x300380 | 4494 | #define USEM_REG_VFPF_ERR_NUM 0x300380 |
@@ -4797,6 +4865,8 @@ | |||
4797 | #define XSDM_REG_XSDM_PRTY_MASK 0x1662bc | 4865 | #define XSDM_REG_XSDM_PRTY_MASK 0x1662bc |
4798 | /* [R 11] Parity register #0 read */ | 4866 | /* [R 11] Parity register #0 read */ |
4799 | #define XSDM_REG_XSDM_PRTY_STS 0x1662b0 | 4867 | #define XSDM_REG_XSDM_PRTY_STS 0x1662b0 |
4868 | /* [RC 11] Parity register #0 read clear */ | ||
4869 | #define XSDM_REG_XSDM_PRTY_STS_CLR 0x1662b4 | ||
4800 | /* [RW 5] The number of time_slots in the arbitration cycle */ | 4870 | /* [RW 5] The number of time_slots in the arbitration cycle */ |
4801 | #define XSEM_REG_ARB_CYCLE_SIZE 0x280034 | 4871 | #define XSEM_REG_ARB_CYCLE_SIZE 0x280034 |
4802 | /* [RW 3] The source that is associated with arbitration element 0. Source | 4872 | /* [RW 3] The source that is associated with arbitration element 0. Source |
@@ -4929,6 +4999,9 @@ | |||
4929 | /* [R 32] Parity register #0 read */ | 4999 | /* [R 32] Parity register #0 read */ |
4930 | #define XSEM_REG_XSEM_PRTY_STS_0 0x280124 | 5000 | #define XSEM_REG_XSEM_PRTY_STS_0 0x280124 |
4931 | #define XSEM_REG_XSEM_PRTY_STS_1 0x280134 | 5001 | #define XSEM_REG_XSEM_PRTY_STS_1 0x280134 |
5002 | /* [RC 32] Parity register #0 read clear */ | ||
5003 | #define XSEM_REG_XSEM_PRTY_STS_CLR_0 0x280128 | ||
5004 | #define XSEM_REG_XSEM_PRTY_STS_CLR_1 0x280138 | ||
4932 | #define MCPR_NVM_ACCESS_ENABLE_EN (1L<<0) | 5005 | #define MCPR_NVM_ACCESS_ENABLE_EN (1L<<0) |
4933 | #define MCPR_NVM_ACCESS_ENABLE_WR_EN (1L<<1) | 5006 | #define MCPR_NVM_ACCESS_ENABLE_WR_EN (1L<<1) |
4934 | #define MCPR_NVM_ADDR_NVM_ADDR_VALUE (0xffffffL<<0) | 5007 | #define MCPR_NVM_ADDR_NVM_ADDR_VALUE (0xffffffL<<0) |
@@ -6121,7 +6194,11 @@ Theotherbitsarereservedandshouldbezero*/ | |||
6121 | #define MDIO_CTL_REG_84823_MEDIA_PRIORITY_COPPER 0x0000 | 6194 | #define MDIO_CTL_REG_84823_MEDIA_PRIORITY_COPPER 0x0000 |
6122 | #define MDIO_CTL_REG_84823_MEDIA_PRIORITY_FIBER 0x0100 | 6195 | #define MDIO_CTL_REG_84823_MEDIA_PRIORITY_FIBER 0x0100 |
6123 | #define MDIO_CTL_REG_84823_MEDIA_FIBER_1G 0x1000 | 6196 | #define MDIO_CTL_REG_84823_MEDIA_FIBER_1G 0x1000 |
6197 | #define MDIO_CTL_REG_84823_USER_CTRL_REG 0x4005 | ||
6198 | #define MDIO_CTL_REG_84823_USER_CTRL_CMS 0x0080 | ||
6124 | 6199 | ||
6200 | #define MDIO_PMA_REG_84823_CTL_LED_CTL_1 0xa8e3 | ||
6201 | #define MDIO_PMA_REG_84823_LED3_STRETCH_EN 0x0080 | ||
6125 | 6202 | ||
6126 | #define IGU_FUNC_BASE 0x0400 | 6203 | #define IGU_FUNC_BASE 0x0400 |
6127 | 6204 | ||
@@ -6316,3 +6393,4 @@ static inline u8 calc_crc8(u32 data, u8 crc) | |||
6316 | } | 6393 | } |
6317 | 6394 | ||
6318 | 6395 | ||
6396 | #endif /* BNX2X_REG_H */ | ||
diff --git a/drivers/net/bnx2x/bnx2x_stats.c b/drivers/net/bnx2x/bnx2x_stats.c index 6e4d9b144cc4..3445ded6674f 100644 --- a/drivers/net/bnx2x/bnx2x_stats.c +++ b/drivers/net/bnx2x/bnx2x_stats.c | |||
@@ -158,6 +158,11 @@ static void bnx2x_storm_stats_post(struct bnx2x *bp) | |||
158 | 158 | ||
159 | spin_lock_bh(&bp->stats_lock); | 159 | spin_lock_bh(&bp->stats_lock); |
160 | 160 | ||
161 | if (bp->stats_pending) { | ||
162 | spin_unlock_bh(&bp->stats_lock); | ||
163 | return; | ||
164 | } | ||
165 | |||
161 | ramrod_data.drv_counter = bp->stats_counter++; | 166 | ramrod_data.drv_counter = bp->stats_counter++; |
162 | ramrod_data.collect_port = bp->port.pmf ? 1 : 0; | 167 | ramrod_data.collect_port = bp->port.pmf ? 1 : 0; |
163 | for_each_eth_queue(bp, i) | 168 | for_each_eth_queue(bp, i) |
@@ -1234,14 +1239,14 @@ void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event) | |||
1234 | if (unlikely(bp->panic)) | 1239 | if (unlikely(bp->panic)) |
1235 | return; | 1240 | return; |
1236 | 1241 | ||
1242 | bnx2x_stats_stm[bp->stats_state][event].action(bp); | ||
1243 | |||
1237 | /* Protect a state change flow */ | 1244 | /* Protect a state change flow */ |
1238 | spin_lock_bh(&bp->stats_lock); | 1245 | spin_lock_bh(&bp->stats_lock); |
1239 | state = bp->stats_state; | 1246 | state = bp->stats_state; |
1240 | bp->stats_state = bnx2x_stats_stm[state][event].next_state; | 1247 | bp->stats_state = bnx2x_stats_stm[state][event].next_state; |
1241 | spin_unlock_bh(&bp->stats_lock); | 1248 | spin_unlock_bh(&bp->stats_lock); |
1242 | 1249 | ||
1243 | bnx2x_stats_stm[state][event].action(bp); | ||
1244 | |||
1245 | if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp)) | 1250 | if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp)) |
1246 | DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n", | 1251 | DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n", |
1247 | state, event, bp->stats_state); | 1252 | state, event, bp->stats_state); |
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c index 48cf24ff4e6f..1024ae158227 100644 --- a/drivers/net/bonding/bond_3ad.c +++ b/drivers/net/bonding/bond_3ad.c | |||
@@ -840,7 +840,7 @@ static int ad_lacpdu_send(struct port *port) | |||
840 | lacpdu_header = (struct lacpdu_header *)skb_put(skb, length); | 840 | lacpdu_header = (struct lacpdu_header *)skb_put(skb, length); |
841 | 841 | ||
842 | memcpy(lacpdu_header->hdr.h_dest, lacpdu_mcast_addr, ETH_ALEN); | 842 | memcpy(lacpdu_header->hdr.h_dest, lacpdu_mcast_addr, ETH_ALEN); |
843 | /* Note: source addres is set to be the member's PERMANENT address, | 843 | /* Note: source address is set to be the member's PERMANENT address, |
844 | because we use it to identify loopback lacpdus in receive. */ | 844 | because we use it to identify loopback lacpdus in receive. */ |
845 | memcpy(lacpdu_header->hdr.h_source, slave->perm_hwaddr, ETH_ALEN); | 845 | memcpy(lacpdu_header->hdr.h_source, slave->perm_hwaddr, ETH_ALEN); |
846 | lacpdu_header->hdr.h_proto = PKT_TYPE_LACPDU; | 846 | lacpdu_header->hdr.h_proto = PKT_TYPE_LACPDU; |
@@ -881,7 +881,7 @@ static int ad_marker_send(struct port *port, struct bond_marker *marker) | |||
881 | marker_header = (struct bond_marker_header *)skb_put(skb, length); | 881 | marker_header = (struct bond_marker_header *)skb_put(skb, length); |
882 | 882 | ||
883 | memcpy(marker_header->hdr.h_dest, lacpdu_mcast_addr, ETH_ALEN); | 883 | memcpy(marker_header->hdr.h_dest, lacpdu_mcast_addr, ETH_ALEN); |
884 | /* Note: source addres is set to be the member's PERMANENT address, | 884 | /* Note: source address is set to be the member's PERMANENT address, |
885 | because we use it to identify loopback MARKERs in receive. */ | 885 | because we use it to identify loopback MARKERs in receive. */ |
886 | memcpy(marker_header->hdr.h_source, slave->perm_hwaddr, ETH_ALEN); | 886 | memcpy(marker_header->hdr.h_source, slave->perm_hwaddr, ETH_ALEN); |
887 | marker_header->hdr.h_proto = PKT_TYPE_LACPDU; | 887 | marker_header->hdr.h_proto = PKT_TYPE_LACPDU; |
@@ -1916,7 +1916,7 @@ int bond_3ad_bind_slave(struct slave *slave) | |||
1916 | return -1; | 1916 | return -1; |
1917 | } | 1917 | } |
1918 | 1918 | ||
1919 | //check that the slave has not been intialized yet. | 1919 | //check that the slave has not been initialized yet. |
1920 | if (SLAVE_AD_INFO(slave).port.slave != slave) { | 1920 | if (SLAVE_AD_INFO(slave).port.slave != slave) { |
1921 | 1921 | ||
1922 | // port initialization | 1922 | // port initialization |
@@ -2470,6 +2470,10 @@ int bond_3ad_lacpdu_recv(struct sk_buff *skb, struct net_device *dev, struct pac | |||
2470 | if (!(dev->flags & IFF_MASTER)) | 2470 | if (!(dev->flags & IFF_MASTER)) |
2471 | goto out; | 2471 | goto out; |
2472 | 2472 | ||
2473 | skb = skb_share_check(skb, GFP_ATOMIC); | ||
2474 | if (!skb) | ||
2475 | goto out; | ||
2476 | |||
2473 | if (!pskb_may_pull(skb, sizeof(struct lacpdu))) | 2477 | if (!pskb_may_pull(skb, sizeof(struct lacpdu))) |
2474 | goto out; | 2478 | goto out; |
2475 | 2479 | ||
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c index f4e638c65129..5c6fba802f2b 100644 --- a/drivers/net/bonding/bond_alb.c +++ b/drivers/net/bonding/bond_alb.c | |||
@@ -326,6 +326,10 @@ static int rlb_arp_recv(struct sk_buff *skb, struct net_device *bond_dev, struct | |||
326 | goto out; | 326 | goto out; |
327 | } | 327 | } |
328 | 328 | ||
329 | skb = skb_share_check(skb, GFP_ATOMIC); | ||
330 | if (!skb) | ||
331 | goto out; | ||
332 | |||
329 | if (!pskb_may_pull(skb, arp_hdr_len(bond_dev))) | 333 | if (!pskb_may_pull(skb, arp_hdr_len(bond_dev))) |
330 | goto out; | 334 | goto out; |
331 | 335 | ||
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index b1025b85acf1..163e0b06eaa5 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c | |||
@@ -2733,6 +2733,10 @@ static int bond_arp_rcv(struct sk_buff *skb, struct net_device *dev, struct pack | |||
2733 | if (!slave || !slave_do_arp_validate(bond, slave)) | 2733 | if (!slave || !slave_do_arp_validate(bond, slave)) |
2734 | goto out_unlock; | 2734 | goto out_unlock; |
2735 | 2735 | ||
2736 | skb = skb_share_check(skb, GFP_ATOMIC); | ||
2737 | if (!skb) | ||
2738 | goto out_unlock; | ||
2739 | |||
2736 | if (!pskb_may_pull(skb, arp_hdr_len(dev))) | 2740 | if (!pskb_may_pull(skb, arp_hdr_len(dev))) |
2737 | goto out_unlock; | 2741 | goto out_unlock; |
2738 | 2742 | ||
diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h index 4da384cc7603..31fe980e4e28 100644 --- a/drivers/net/bonding/bonding.h +++ b/drivers/net/bonding/bonding.h | |||
@@ -18,7 +18,6 @@ | |||
18 | #include <linux/timer.h> | 18 | #include <linux/timer.h> |
19 | #include <linux/proc_fs.h> | 19 | #include <linux/proc_fs.h> |
20 | #include <linux/if_bonding.h> | 20 | #include <linux/if_bonding.h> |
21 | #include <linux/kobject.h> | ||
22 | #include <linux/cpumask.h> | 21 | #include <linux/cpumask.h> |
23 | #include <linux/in6.h> | 22 | #include <linux/in6.h> |
24 | #include "bond_3ad.h" | 23 | #include "bond_3ad.h" |
diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig index d5a9db60ade9..5dec456fd4a4 100644 --- a/drivers/net/can/Kconfig +++ b/drivers/net/can/Kconfig | |||
@@ -23,7 +23,7 @@ config CAN_SLCAN | |||
23 | 23 | ||
24 | As only the sending and receiving of CAN frames is implemented, this | 24 | As only the sending and receiving of CAN frames is implemented, this |
25 | driver should work with the (serial/USB) CAN hardware from: | 25 | driver should work with the (serial/USB) CAN hardware from: |
26 | www.canusb.com / www.can232.com / www.mictronic.com / www.canhack.de | 26 | www.canusb.com / www.can232.com / www.mictronics.de / www.canhack.de |
27 | 27 | ||
28 | Userspace tools to attach the SLCAN line discipline (slcan_attach, | 28 | Userspace tools to attach the SLCAN line discipline (slcan_attach, |
29 | slcand) can be found in the can-utils at the SocketCAN SVN, see | 29 | slcand) can be found in the can-utils at the SocketCAN SVN, see |
@@ -117,6 +117,8 @@ source "drivers/net/can/sja1000/Kconfig" | |||
117 | 117 | ||
118 | source "drivers/net/can/usb/Kconfig" | 118 | source "drivers/net/can/usb/Kconfig" |
119 | 119 | ||
120 | source "drivers/net/can/softing/Kconfig" | ||
121 | |||
120 | config CAN_DEBUG_DEVICES | 122 | config CAN_DEBUG_DEVICES |
121 | bool "CAN devices debugging messages" | 123 | bool "CAN devices debugging messages" |
122 | depends on CAN | 124 | depends on CAN |
diff --git a/drivers/net/can/Makefile b/drivers/net/can/Makefile index 07ca159ba3f9..53c82a71778e 100644 --- a/drivers/net/can/Makefile +++ b/drivers/net/can/Makefile | |||
@@ -9,6 +9,7 @@ obj-$(CONFIG_CAN_DEV) += can-dev.o | |||
9 | can-dev-y := dev.o | 9 | can-dev-y := dev.o |
10 | 10 | ||
11 | obj-y += usb/ | 11 | obj-y += usb/ |
12 | obj-y += softing/ | ||
12 | 13 | ||
13 | obj-$(CONFIG_CAN_SJA1000) += sja1000/ | 14 | obj-$(CONFIG_CAN_SJA1000) += sja1000/ |
14 | obj-$(CONFIG_CAN_MSCAN) += mscan/ | 15 | obj-$(CONFIG_CAN_MSCAN) += mscan/ |
diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c index 7ef83d06f7ed..57d2ffbbb433 100644 --- a/drivers/net/can/at91_can.c +++ b/drivers/net/can/at91_can.c | |||
@@ -2,7 +2,7 @@ | |||
2 | * at91_can.c - CAN network driver for AT91 SoC CAN controller | 2 | * at91_can.c - CAN network driver for AT91 SoC CAN controller |
3 | * | 3 | * |
4 | * (C) 2007 by Hans J. Koch <hjk@hansjkoch.de> | 4 | * (C) 2007 by Hans J. Koch <hjk@hansjkoch.de> |
5 | * (C) 2008, 2009, 2010 by Marc Kleine-Budde <kernel@pengutronix.de> | 5 | * (C) 2008, 2009, 2010, 2011 by Marc Kleine-Budde <kernel@pengutronix.de> |
6 | * | 6 | * |
7 | * This software may be distributed under the terms of the GNU General | 7 | * This software may be distributed under the terms of the GNU General |
8 | * Public License ("GPL") version 2 as distributed in the 'COPYING' | 8 | * Public License ("GPL") version 2 as distributed in the 'COPYING' |
@@ -30,6 +30,7 @@ | |||
30 | #include <linux/module.h> | 30 | #include <linux/module.h> |
31 | #include <linux/netdevice.h> | 31 | #include <linux/netdevice.h> |
32 | #include <linux/platform_device.h> | 32 | #include <linux/platform_device.h> |
33 | #include <linux/rtnetlink.h> | ||
33 | #include <linux/skbuff.h> | 34 | #include <linux/skbuff.h> |
34 | #include <linux/spinlock.h> | 35 | #include <linux/spinlock.h> |
35 | #include <linux/string.h> | 36 | #include <linux/string.h> |
@@ -40,22 +41,23 @@ | |||
40 | 41 | ||
41 | #include <mach/board.h> | 42 | #include <mach/board.h> |
42 | 43 | ||
43 | #define AT91_NAPI_WEIGHT 12 | 44 | #define AT91_NAPI_WEIGHT 11 |
44 | 45 | ||
45 | /* | 46 | /* |
46 | * RX/TX Mailbox split | 47 | * RX/TX Mailbox split |
47 | * don't dare to touch | 48 | * don't dare to touch |
48 | */ | 49 | */ |
49 | #define AT91_MB_RX_NUM 12 | 50 | #define AT91_MB_RX_NUM 11 |
50 | #define AT91_MB_TX_SHIFT 2 | 51 | #define AT91_MB_TX_SHIFT 2 |
51 | 52 | ||
52 | #define AT91_MB_RX_FIRST 0 | 53 | #define AT91_MB_RX_FIRST 1 |
53 | #define AT91_MB_RX_LAST (AT91_MB_RX_FIRST + AT91_MB_RX_NUM - 1) | 54 | #define AT91_MB_RX_LAST (AT91_MB_RX_FIRST + AT91_MB_RX_NUM - 1) |
54 | 55 | ||
55 | #define AT91_MB_RX_MASK(i) ((1 << (i)) - 1) | 56 | #define AT91_MB_RX_MASK(i) ((1 << (i)) - 1) |
56 | #define AT91_MB_RX_SPLIT 8 | 57 | #define AT91_MB_RX_SPLIT 8 |
57 | #define AT91_MB_RX_LOW_LAST (AT91_MB_RX_SPLIT - 1) | 58 | #define AT91_MB_RX_LOW_LAST (AT91_MB_RX_SPLIT - 1) |
58 | #define AT91_MB_RX_LOW_MASK (AT91_MB_RX_MASK(AT91_MB_RX_SPLIT)) | 59 | #define AT91_MB_RX_LOW_MASK (AT91_MB_RX_MASK(AT91_MB_RX_SPLIT) & \ |
60 | ~AT91_MB_RX_MASK(AT91_MB_RX_FIRST)) | ||
59 | 61 | ||
60 | #define AT91_MB_TX_NUM (1 << AT91_MB_TX_SHIFT) | 62 | #define AT91_MB_TX_NUM (1 << AT91_MB_TX_SHIFT) |
61 | #define AT91_MB_TX_FIRST (AT91_MB_RX_LAST + 1) | 63 | #define AT91_MB_TX_FIRST (AT91_MB_RX_LAST + 1) |
@@ -168,6 +170,8 @@ struct at91_priv { | |||
168 | 170 | ||
169 | struct clk *clk; | 171 | struct clk *clk; |
170 | struct at91_can_data *pdata; | 172 | struct at91_can_data *pdata; |
173 | |||
174 | canid_t mb0_id; | ||
171 | }; | 175 | }; |
172 | 176 | ||
173 | static struct can_bittiming_const at91_bittiming_const = { | 177 | static struct can_bittiming_const at91_bittiming_const = { |
@@ -220,6 +224,18 @@ static inline void set_mb_mode(const struct at91_priv *priv, unsigned int mb, | |||
220 | set_mb_mode_prio(priv, mb, mode, 0); | 224 | set_mb_mode_prio(priv, mb, mode, 0); |
221 | } | 225 | } |
222 | 226 | ||
227 | static inline u32 at91_can_id_to_reg_mid(canid_t can_id) | ||
228 | { | ||
229 | u32 reg_mid; | ||
230 | |||
231 | if (can_id & CAN_EFF_FLAG) | ||
232 | reg_mid = (can_id & CAN_EFF_MASK) | AT91_MID_MIDE; | ||
233 | else | ||
234 | reg_mid = (can_id & CAN_SFF_MASK) << 18; | ||
235 | |||
236 | return reg_mid; | ||
237 | } | ||
238 | |||
223 | /* | 239 | /* |
224 | * Swtich transceiver on or off | 240 | * Swtich transceiver on or off |
225 | */ | 241 | */ |
@@ -233,12 +249,22 @@ static void at91_setup_mailboxes(struct net_device *dev) | |||
233 | { | 249 | { |
234 | struct at91_priv *priv = netdev_priv(dev); | 250 | struct at91_priv *priv = netdev_priv(dev); |
235 | unsigned int i; | 251 | unsigned int i; |
252 | u32 reg_mid; | ||
236 | 253 | ||
237 | /* | 254 | /* |
238 | * The first 12 mailboxes are used as a reception FIFO. The | 255 | * Due to a chip bug (errata 50.2.6.3 & 50.3.5.3) the first |
239 | * last mailbox is configured with overwrite option. The | 256 | * mailbox is disabled. The next 11 mailboxes are used as a |
240 | * overwrite flag indicates a FIFO overflow. | 257 | * reception FIFO. The last mailbox is configured with |
258 | * overwrite option. The overwrite flag indicates a FIFO | ||
259 | * overflow. | ||
241 | */ | 260 | */ |
261 | reg_mid = at91_can_id_to_reg_mid(priv->mb0_id); | ||
262 | for (i = 0; i < AT91_MB_RX_FIRST; i++) { | ||
263 | set_mb_mode(priv, i, AT91_MB_MODE_DISABLED); | ||
264 | at91_write(priv, AT91_MID(i), reg_mid); | ||
265 | at91_write(priv, AT91_MCR(i), 0x0); /* clear dlc */ | ||
266 | } | ||
267 | |||
242 | for (i = AT91_MB_RX_FIRST; i < AT91_MB_RX_LAST; i++) | 268 | for (i = AT91_MB_RX_FIRST; i < AT91_MB_RX_LAST; i++) |
243 | set_mb_mode(priv, i, AT91_MB_MODE_RX); | 269 | set_mb_mode(priv, i, AT91_MB_MODE_RX); |
244 | set_mb_mode(priv, AT91_MB_RX_LAST, AT91_MB_MODE_RX_OVRWR); | 270 | set_mb_mode(priv, AT91_MB_RX_LAST, AT91_MB_MODE_RX_OVRWR); |
@@ -254,7 +280,8 @@ static void at91_setup_mailboxes(struct net_device *dev) | |||
254 | set_mb_mode_prio(priv, i, AT91_MB_MODE_TX, 0); | 280 | set_mb_mode_prio(priv, i, AT91_MB_MODE_TX, 0); |
255 | 281 | ||
256 | /* Reset tx and rx helper pointers */ | 282 | /* Reset tx and rx helper pointers */ |
257 | priv->tx_next = priv->tx_echo = priv->rx_next = 0; | 283 | priv->tx_next = priv->tx_echo = 0; |
284 | priv->rx_next = AT91_MB_RX_FIRST; | ||
258 | } | 285 | } |
259 | 286 | ||
260 | static int at91_set_bittiming(struct net_device *dev) | 287 | static int at91_set_bittiming(struct net_device *dev) |
@@ -372,12 +399,7 @@ static netdev_tx_t at91_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
372 | netdev_err(dev, "BUG! TX buffer full when queue awake!\n"); | 399 | netdev_err(dev, "BUG! TX buffer full when queue awake!\n"); |
373 | return NETDEV_TX_BUSY; | 400 | return NETDEV_TX_BUSY; |
374 | } | 401 | } |
375 | 402 | reg_mid = at91_can_id_to_reg_mid(cf->can_id); | |
376 | if (cf->can_id & CAN_EFF_FLAG) | ||
377 | reg_mid = (cf->can_id & CAN_EFF_MASK) | AT91_MID_MIDE; | ||
378 | else | ||
379 | reg_mid = (cf->can_id & CAN_SFF_MASK) << 18; | ||
380 | |||
381 | reg_mcr = ((cf->can_id & CAN_RTR_FLAG) ? AT91_MCR_MRTR : 0) | | 403 | reg_mcr = ((cf->can_id & CAN_RTR_FLAG) ? AT91_MCR_MRTR : 0) | |
382 | (cf->can_dlc << 16) | AT91_MCR_MTCR; | 404 | (cf->can_dlc << 16) | AT91_MCR_MTCR; |
383 | 405 | ||
@@ -539,27 +561,31 @@ static void at91_read_msg(struct net_device *dev, unsigned int mb) | |||
539 | * | 561 | * |
540 | * Theory of Operation: | 562 | * Theory of Operation: |
541 | * | 563 | * |
542 | * 12 of the 16 mailboxes on the chip are reserved for RX. we split | 564 | * 11 of the 16 mailboxes on the chip are reserved for RX. we split |
543 | * them into 2 groups. The lower group holds 8 and upper 4 mailboxes. | 565 | * them into 2 groups. The lower group holds 7 and upper 4 mailboxes. |
544 | * | 566 | * |
545 | * Like it or not, but the chip always saves a received CAN message | 567 | * Like it or not, but the chip always saves a received CAN message |
546 | * into the first free mailbox it finds (starting with the | 568 | * into the first free mailbox it finds (starting with the |
547 | * lowest). This makes it very difficult to read the messages in the | 569 | * lowest). This makes it very difficult to read the messages in the |
548 | * right order from the chip. This is how we work around that problem: | 570 | * right order from the chip. This is how we work around that problem: |
549 | * | 571 | * |
550 | * The first message goes into mb nr. 0 and issues an interrupt. All | 572 | * The first message goes into mb nr. 1 and issues an interrupt. All |
551 | * rx ints are disabled in the interrupt handler and a napi poll is | 573 | * rx ints are disabled in the interrupt handler and a napi poll is |
552 | * scheduled. We read the mailbox, but do _not_ reenable the mb (to | 574 | * scheduled. We read the mailbox, but do _not_ reenable the mb (to |
553 | * receive another message). | 575 | * receive another message). |
554 | * | 576 | * |
555 | * lower mbxs upper | 577 | * lower mbxs upper |
556 | * ______^______ __^__ | 578 | * ____^______ __^__ |
557 | * / \ / \ | 579 | * / \ / \ |
558 | * +-+-+-+-+-+-+-+-++-+-+-+-+ | 580 | * +-+-+-+-+-+-+-+-++-+-+-+-+ |
559 | * |x|x|x|x|x|x|x|x|| | | | | | 581 | * | |x|x|x|x|x|x|x|| | | | | |
560 | * +-+-+-+-+-+-+-+-++-+-+-+-+ | 582 | * +-+-+-+-+-+-+-+-++-+-+-+-+ |
561 | * 0 0 0 0 0 0 0 0 0 0 1 1 \ mail | 583 | * 0 0 0 0 0 0 0 0 0 0 1 1 \ mail |
562 | * 0 1 2 3 4 5 6 7 8 9 0 1 / box | 584 | * 0 1 2 3 4 5 6 7 8 9 0 1 / box |
585 | * ^ | ||
586 | * | | ||
587 | * \ | ||
588 | * unused, due to chip bug | ||
563 | * | 589 | * |
564 | * The variable priv->rx_next points to the next mailbox to read a | 590 | * The variable priv->rx_next points to the next mailbox to read a |
565 | * message from. As long we're in the lower mailboxes we just read the | 591 | * message from. As long we're in the lower mailboxes we just read the |
@@ -590,10 +616,10 @@ static int at91_poll_rx(struct net_device *dev, int quota) | |||
590 | "order of incoming frames cannot be guaranteed\n"); | 616 | "order of incoming frames cannot be guaranteed\n"); |
591 | 617 | ||
592 | again: | 618 | again: |
593 | for (mb = find_next_bit(addr, AT91_MB_RX_NUM, priv->rx_next); | 619 | for (mb = find_next_bit(addr, AT91_MB_RX_LAST + 1, priv->rx_next); |
594 | mb < AT91_MB_RX_NUM && quota > 0; | 620 | mb < AT91_MB_RX_LAST + 1 && quota > 0; |
595 | reg_sr = at91_read(priv, AT91_SR), | 621 | reg_sr = at91_read(priv, AT91_SR), |
596 | mb = find_next_bit(addr, AT91_MB_RX_NUM, ++priv->rx_next)) { | 622 | mb = find_next_bit(addr, AT91_MB_RX_LAST + 1, ++priv->rx_next)) { |
597 | at91_read_msg(dev, mb); | 623 | at91_read_msg(dev, mb); |
598 | 624 | ||
599 | /* reactivate mailboxes */ | 625 | /* reactivate mailboxes */ |
@@ -610,8 +636,8 @@ static int at91_poll_rx(struct net_device *dev, int quota) | |||
610 | 636 | ||
611 | /* upper group completed, look again in lower */ | 637 | /* upper group completed, look again in lower */ |
612 | if (priv->rx_next > AT91_MB_RX_LOW_LAST && | 638 | if (priv->rx_next > AT91_MB_RX_LOW_LAST && |
613 | quota > 0 && mb >= AT91_MB_RX_NUM) { | 639 | quota > 0 && mb > AT91_MB_RX_LAST) { |
614 | priv->rx_next = 0; | 640 | priv->rx_next = AT91_MB_RX_FIRST; |
615 | goto again; | 641 | goto again; |
616 | } | 642 | } |
617 | 643 | ||
@@ -1037,6 +1063,64 @@ static const struct net_device_ops at91_netdev_ops = { | |||
1037 | .ndo_start_xmit = at91_start_xmit, | 1063 | .ndo_start_xmit = at91_start_xmit, |
1038 | }; | 1064 | }; |
1039 | 1065 | ||
1066 | static ssize_t at91_sysfs_show_mb0_id(struct device *dev, | ||
1067 | struct device_attribute *attr, char *buf) | ||
1068 | { | ||
1069 | struct at91_priv *priv = netdev_priv(to_net_dev(dev)); | ||
1070 | |||
1071 | if (priv->mb0_id & CAN_EFF_FLAG) | ||
1072 | return snprintf(buf, PAGE_SIZE, "0x%08x\n", priv->mb0_id); | ||
1073 | else | ||
1074 | return snprintf(buf, PAGE_SIZE, "0x%03x\n", priv->mb0_id); | ||
1075 | } | ||
1076 | |||
1077 | static ssize_t at91_sysfs_set_mb0_id(struct device *dev, | ||
1078 | struct device_attribute *attr, const char *buf, size_t count) | ||
1079 | { | ||
1080 | struct net_device *ndev = to_net_dev(dev); | ||
1081 | struct at91_priv *priv = netdev_priv(ndev); | ||
1082 | unsigned long can_id; | ||
1083 | ssize_t ret; | ||
1084 | int err; | ||
1085 | |||
1086 | rtnl_lock(); | ||
1087 | |||
1088 | if (ndev->flags & IFF_UP) { | ||
1089 | ret = -EBUSY; | ||
1090 | goto out; | ||
1091 | } | ||
1092 | |||
1093 | err = strict_strtoul(buf, 0, &can_id); | ||
1094 | if (err) { | ||
1095 | ret = err; | ||
1096 | goto out; | ||
1097 | } | ||
1098 | |||
1099 | if (can_id & CAN_EFF_FLAG) | ||
1100 | can_id &= CAN_EFF_MASK | CAN_EFF_FLAG; | ||
1101 | else | ||
1102 | can_id &= CAN_SFF_MASK; | ||
1103 | |||
1104 | priv->mb0_id = can_id; | ||
1105 | ret = count; | ||
1106 | |||
1107 | out: | ||
1108 | rtnl_unlock(); | ||
1109 | return ret; | ||
1110 | } | ||
1111 | |||
1112 | static DEVICE_ATTR(mb0_id, S_IWUSR | S_IRUGO, | ||
1113 | at91_sysfs_show_mb0_id, at91_sysfs_set_mb0_id); | ||
1114 | |||
1115 | static struct attribute *at91_sysfs_attrs[] = { | ||
1116 | &dev_attr_mb0_id.attr, | ||
1117 | NULL, | ||
1118 | }; | ||
1119 | |||
1120 | static struct attribute_group at91_sysfs_attr_group = { | ||
1121 | .attrs = at91_sysfs_attrs, | ||
1122 | }; | ||
1123 | |||
1040 | static int __devinit at91_can_probe(struct platform_device *pdev) | 1124 | static int __devinit at91_can_probe(struct platform_device *pdev) |
1041 | { | 1125 | { |
1042 | struct net_device *dev; | 1126 | struct net_device *dev; |
@@ -1082,6 +1166,7 @@ static int __devinit at91_can_probe(struct platform_device *pdev) | |||
1082 | dev->netdev_ops = &at91_netdev_ops; | 1166 | dev->netdev_ops = &at91_netdev_ops; |
1083 | dev->irq = irq; | 1167 | dev->irq = irq; |
1084 | dev->flags |= IFF_ECHO; | 1168 | dev->flags |= IFF_ECHO; |
1169 | dev->sysfs_groups[0] = &at91_sysfs_attr_group; | ||
1085 | 1170 | ||
1086 | priv = netdev_priv(dev); | 1171 | priv = netdev_priv(dev); |
1087 | priv->can.clock.freq = clk_get_rate(clk); | 1172 | priv->can.clock.freq = clk_get_rate(clk); |
@@ -1093,6 +1178,7 @@ static int __devinit at91_can_probe(struct platform_device *pdev) | |||
1093 | priv->dev = dev; | 1178 | priv->dev = dev; |
1094 | priv->clk = clk; | 1179 | priv->clk = clk; |
1095 | priv->pdata = pdev->dev.platform_data; | 1180 | priv->pdata = pdev->dev.platform_data; |
1181 | priv->mb0_id = 0x7ff; | ||
1096 | 1182 | ||
1097 | netif_napi_add(dev, &priv->napi, at91_poll, AT91_NAPI_WEIGHT); | 1183 | netif_napi_add(dev, &priv->napi, at91_poll, AT91_NAPI_WEIGHT); |
1098 | 1184 | ||
diff --git a/drivers/net/can/janz-ican3.c b/drivers/net/can/janz-ican3.c index b9a6d7a5a739..366f5cc050ae 100644 --- a/drivers/net/can/janz-ican3.c +++ b/drivers/net/can/janz-ican3.c | |||
@@ -1618,7 +1618,7 @@ static ssize_t ican3_sysfs_set_term(struct device *dev, | |||
1618 | return count; | 1618 | return count; |
1619 | } | 1619 | } |
1620 | 1620 | ||
1621 | static DEVICE_ATTR(termination, S_IWUGO | S_IRUGO, ican3_sysfs_show_term, | 1621 | static DEVICE_ATTR(termination, S_IWUSR | S_IRUGO, ican3_sysfs_show_term, |
1622 | ican3_sysfs_set_term); | 1622 | ican3_sysfs_set_term); |
1623 | 1623 | ||
1624 | static struct attribute *ican3_sysfs_attrs[] = { | 1624 | static struct attribute *ican3_sysfs_attrs[] = { |
diff --git a/drivers/net/can/mcp251x.c b/drivers/net/can/mcp251x.c index 7ab534aee452..7513c4523ac4 100644 --- a/drivers/net/can/mcp251x.c +++ b/drivers/net/can/mcp251x.c | |||
@@ -940,7 +940,7 @@ static int mcp251x_open(struct net_device *net) | |||
940 | goto open_unlock; | 940 | goto open_unlock; |
941 | } | 941 | } |
942 | 942 | ||
943 | priv->wq = create_freezeable_workqueue("mcp251x_wq"); | 943 | priv->wq = create_freezable_workqueue("mcp251x_wq"); |
944 | INIT_WORK(&priv->tx_work, mcp251x_tx_work_handler); | 944 | INIT_WORK(&priv->tx_work, mcp251x_tx_work_handler); |
945 | INIT_WORK(&priv->restart_work, mcp251x_restart_work_handler); | 945 | INIT_WORK(&priv->restart_work, mcp251x_restart_work_handler); |
946 | 946 | ||
diff --git a/drivers/net/can/mscan/Kconfig b/drivers/net/can/mscan/Kconfig index 27d1d398e25e..d38706958af6 100644 --- a/drivers/net/can/mscan/Kconfig +++ b/drivers/net/can/mscan/Kconfig | |||
@@ -1,5 +1,5 @@ | |||
1 | config CAN_MSCAN | 1 | config CAN_MSCAN |
2 | depends on CAN_DEV && (PPC || M68K || M68KNOMMU) | 2 | depends on CAN_DEV && (PPC || M68K) |
3 | tristate "Support for Freescale MSCAN based chips" | 3 | tristate "Support for Freescale MSCAN based chips" |
4 | ---help--- | 4 | ---help--- |
5 | The Motorola Scalable Controller Area Network (MSCAN) definition | 5 | The Motorola Scalable Controller Area Network (MSCAN) definition |
diff --git a/drivers/net/can/pch_can.c b/drivers/net/can/pch_can.c index c42e97268248..e54712b22c27 100644 --- a/drivers/net/can/pch_can.c +++ b/drivers/net/can/pch_can.c | |||
@@ -185,7 +185,7 @@ struct pch_can_priv { | |||
185 | 185 | ||
186 | static struct can_bittiming_const pch_can_bittiming_const = { | 186 | static struct can_bittiming_const pch_can_bittiming_const = { |
187 | .name = KBUILD_MODNAME, | 187 | .name = KBUILD_MODNAME, |
188 | .tseg1_min = 1, | 188 | .tseg1_min = 2, |
189 | .tseg1_max = 16, | 189 | .tseg1_max = 16, |
190 | .tseg2_min = 1, | 190 | .tseg2_min = 1, |
191 | .tseg2_max = 8, | 191 | .tseg2_max = 8, |
@@ -959,13 +959,13 @@ static void __devexit pch_can_remove(struct pci_dev *pdev) | |||
959 | struct pch_can_priv *priv = netdev_priv(ndev); | 959 | struct pch_can_priv *priv = netdev_priv(ndev); |
960 | 960 | ||
961 | unregister_candev(priv->ndev); | 961 | unregister_candev(priv->ndev); |
962 | pci_iounmap(pdev, priv->regs); | ||
963 | if (priv->use_msi) | 962 | if (priv->use_msi) |
964 | pci_disable_msi(priv->dev); | 963 | pci_disable_msi(priv->dev); |
965 | pci_release_regions(pdev); | 964 | pci_release_regions(pdev); |
966 | pci_disable_device(pdev); | 965 | pci_disable_device(pdev); |
967 | pci_set_drvdata(pdev, NULL); | 966 | pci_set_drvdata(pdev, NULL); |
968 | pch_can_reset(priv); | 967 | pch_can_reset(priv); |
968 | pci_iounmap(pdev, priv->regs); | ||
969 | free_candev(priv->ndev); | 969 | free_candev(priv->ndev); |
970 | } | 970 | } |
971 | 971 | ||
@@ -1238,6 +1238,7 @@ static int __devinit pch_can_probe(struct pci_dev *pdev, | |||
1238 | priv->use_msi = 0; | 1238 | priv->use_msi = 0; |
1239 | } else { | 1239 | } else { |
1240 | netdev_err(ndev, "PCH CAN opened with MSI\n"); | 1240 | netdev_err(ndev, "PCH CAN opened with MSI\n"); |
1241 | pci_set_master(pdev); | ||
1241 | priv->use_msi = 1; | 1242 | priv->use_msi = 1; |
1242 | } | 1243 | } |
1243 | 1244 | ||
diff --git a/drivers/net/can/softing/Kconfig b/drivers/net/can/softing/Kconfig new file mode 100644 index 000000000000..5de46a9a77bb --- /dev/null +++ b/drivers/net/can/softing/Kconfig | |||
@@ -0,0 +1,30 @@ | |||
1 | config CAN_SOFTING | ||
2 | tristate "Softing Gmbh CAN generic support" | ||
3 | depends on CAN_DEV && HAS_IOMEM | ||
4 | ---help--- | ||
5 | Support for CAN cards from Softing Gmbh & some cards | ||
6 | from Vector Gmbh. | ||
7 | Softing Gmbh CAN cards come with 1 or 2 physical busses. | ||
8 | Those cards typically use Dual Port RAM to communicate | ||
9 | with the host CPU. The interface is then identical for PCI | ||
10 | and PCMCIA cards. This driver operates on a platform device, | ||
11 | which has been created by softing_cs or softing_pci driver. | ||
12 | Warning: | ||
13 | The API of the card does not allow fine control per bus, but | ||
14 | controls the 2 busses on the card together. | ||
15 | As such, some actions (start/stop/busoff recovery) on 1 bus | ||
16 | must bring down the other bus too temporarily. | ||
17 | |||
18 | config CAN_SOFTING_CS | ||
19 | tristate "Softing Gmbh CAN pcmcia cards" | ||
20 | depends on PCMCIA | ||
21 | depends on CAN_SOFTING | ||
22 | ---help--- | ||
23 | Support for PCMCIA cards from Softing Gmbh & some cards | ||
24 | from Vector Gmbh. | ||
25 | You need firmware for these, which you can get at | ||
26 | http://developer.berlios.de/projects/socketcan/ | ||
27 | This version of the driver is written against | ||
28 | firmware version 4.6 (softing-fw-4.6-binaries.tar.gz) | ||
29 | In order to use the card as CAN device, you need the Softing generic | ||
30 | support too. | ||
diff --git a/drivers/net/can/softing/Makefile b/drivers/net/can/softing/Makefile new file mode 100644 index 000000000000..c5e5016c742e --- /dev/null +++ b/drivers/net/can/softing/Makefile | |||
@@ -0,0 +1,6 @@ | |||
1 | |||
2 | softing-y := softing_main.o softing_fw.o | ||
3 | obj-$(CONFIG_CAN_SOFTING) += softing.o | ||
4 | obj-$(CONFIG_CAN_SOFTING_CS) += softing_cs.o | ||
5 | |||
6 | ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG | ||
diff --git a/drivers/net/can/softing/softing.h b/drivers/net/can/softing/softing.h new file mode 100644 index 000000000000..7ec9f4db3d52 --- /dev/null +++ b/drivers/net/can/softing/softing.h | |||
@@ -0,0 +1,167 @@ | |||
1 | /* | ||
2 | * softing common interfaces | ||
3 | * | ||
4 | * by Kurt Van Dijck, 2008-2010 | ||
5 | */ | ||
6 | |||
7 | #include <linux/atomic.h> | ||
8 | #include <linux/netdevice.h> | ||
9 | #include <linux/ktime.h> | ||
10 | #include <linux/mutex.h> | ||
11 | #include <linux/spinlock.h> | ||
12 | #include <linux/can.h> | ||
13 | #include <linux/can/dev.h> | ||
14 | |||
15 | #include "softing_platform.h" | ||
16 | |||
17 | struct softing; | ||
18 | |||
19 | struct softing_priv { | ||
20 | struct can_priv can; /* must be the first member! */ | ||
21 | struct net_device *netdev; | ||
22 | struct softing *card; | ||
23 | struct { | ||
24 | int pending; | ||
25 | /* variables wich hold the circular buffer */ | ||
26 | int echo_put; | ||
27 | int echo_get; | ||
28 | } tx; | ||
29 | struct can_bittiming_const btr_const; | ||
30 | int index; | ||
31 | uint8_t output; | ||
32 | uint16_t chip; | ||
33 | }; | ||
34 | #define netdev2softing(netdev) ((struct softing_priv *)netdev_priv(netdev)) | ||
35 | |||
36 | struct softing { | ||
37 | const struct softing_platform_data *pdat; | ||
38 | struct platform_device *pdev; | ||
39 | struct net_device *net[2]; | ||
40 | spinlock_t spin; /* protect this structure & DPRAM access */ | ||
41 | ktime_t ts_ref; | ||
42 | ktime_t ts_overflow; /* timestamp overflow value, in ktime */ | ||
43 | |||
44 | struct { | ||
45 | /* indication of firmware status */ | ||
46 | int up; | ||
47 | /* protection of the 'up' variable */ | ||
48 | struct mutex lock; | ||
49 | } fw; | ||
50 | struct { | ||
51 | int nr; | ||
52 | int requested; | ||
53 | int svc_count; | ||
54 | unsigned int dpram_position; | ||
55 | } irq; | ||
56 | struct { | ||
57 | int pending; | ||
58 | int last_bus; | ||
59 | /* | ||
60 | * keep the bus that last tx'd a message, | ||
61 | * in order to let every netdev queue resume | ||
62 | */ | ||
63 | } tx; | ||
64 | __iomem uint8_t *dpram; | ||
65 | unsigned long dpram_phys; | ||
66 | unsigned long dpram_size; | ||
67 | struct { | ||
68 | uint16_t fw_version, hw_version, license, serial; | ||
69 | uint16_t chip[2]; | ||
70 | unsigned int freq; /* remote cpu's operating frequency */ | ||
71 | } id; | ||
72 | }; | ||
73 | |||
74 | extern int softing_default_output(struct net_device *netdev); | ||
75 | |||
76 | extern ktime_t softing_raw2ktime(struct softing *card, u32 raw); | ||
77 | |||
78 | extern int softing_chip_poweron(struct softing *card); | ||
79 | |||
80 | extern int softing_bootloader_command(struct softing *card, int16_t cmd, | ||
81 | const char *msg); | ||
82 | |||
83 | /* Load firmware after reset */ | ||
84 | extern int softing_load_fw(const char *file, struct softing *card, | ||
85 | __iomem uint8_t *virt, unsigned int size, int offset); | ||
86 | |||
87 | /* Load final application firmware after bootloader */ | ||
88 | extern int softing_load_app_fw(const char *file, struct softing *card); | ||
89 | |||
90 | /* | ||
91 | * enable or disable irq | ||
92 | * only called with fw.lock locked | ||
93 | */ | ||
94 | extern int softing_enable_irq(struct softing *card, int enable); | ||
95 | |||
96 | /* start/stop 1 bus on card */ | ||
97 | extern int softing_startstop(struct net_device *netdev, int up); | ||
98 | |||
99 | /* netif_rx() */ | ||
100 | extern int softing_netdev_rx(struct net_device *netdev, | ||
101 | const struct can_frame *msg, ktime_t ktime); | ||
102 | |||
103 | /* SOFTING DPRAM mappings */ | ||
104 | #define DPRAM_RX 0x0000 | ||
105 | #define DPRAM_RX_SIZE 32 | ||
106 | #define DPRAM_RX_CNT 16 | ||
107 | #define DPRAM_RX_RD 0x0201 /* uint8_t */ | ||
108 | #define DPRAM_RX_WR 0x0205 /* uint8_t */ | ||
109 | #define DPRAM_RX_LOST 0x0207 /* uint8_t */ | ||
110 | |||
111 | #define DPRAM_FCT_PARAM 0x0300 /* int16_t [20] */ | ||
112 | #define DPRAM_FCT_RESULT 0x0328 /* int16_t */ | ||
113 | #define DPRAM_FCT_HOST 0x032b /* uint16_t */ | ||
114 | |||
115 | #define DPRAM_INFO_BUSSTATE 0x0331 /* uint16_t */ | ||
116 | #define DPRAM_INFO_BUSSTATE2 0x0335 /* uint16_t */ | ||
117 | #define DPRAM_INFO_ERRSTATE 0x0339 /* uint16_t */ | ||
118 | #define DPRAM_INFO_ERRSTATE2 0x033d /* uint16_t */ | ||
119 | #define DPRAM_RESET 0x0341 /* uint16_t */ | ||
120 | #define DPRAM_CLR_RECV_FIFO 0x0345 /* uint16_t */ | ||
121 | #define DPRAM_RESET_TIME 0x034d /* uint16_t */ | ||
122 | #define DPRAM_TIME 0x0350 /* uint64_t */ | ||
123 | #define DPRAM_WR_START 0x0358 /* uint8_t */ | ||
124 | #define DPRAM_WR_END 0x0359 /* uint8_t */ | ||
125 | #define DPRAM_RESET_RX_FIFO 0x0361 /* uint16_t */ | ||
126 | #define DPRAM_RESET_TX_FIFO 0x0364 /* uint8_t */ | ||
127 | #define DPRAM_READ_FIFO_LEVEL 0x0365 /* uint8_t */ | ||
128 | #define DPRAM_RX_FIFO_LEVEL 0x0366 /* uint16_t */ | ||
129 | #define DPRAM_TX_FIFO_LEVEL 0x0366 /* uint16_t */ | ||
130 | |||
131 | #define DPRAM_TX 0x0400 /* uint16_t */ | ||
132 | #define DPRAM_TX_SIZE 16 | ||
133 | #define DPRAM_TX_CNT 32 | ||
134 | #define DPRAM_TX_RD 0x0601 /* uint8_t */ | ||
135 | #define DPRAM_TX_WR 0x0605 /* uint8_t */ | ||
136 | |||
137 | #define DPRAM_COMMAND 0x07e0 /* uint16_t */ | ||
138 | #define DPRAM_RECEIPT 0x07f0 /* uint16_t */ | ||
139 | #define DPRAM_IRQ_TOHOST 0x07fe /* uint8_t */ | ||
140 | #define DPRAM_IRQ_TOCARD 0x07ff /* uint8_t */ | ||
141 | |||
142 | #define DPRAM_V2_RESET 0x0e00 /* uint8_t */ | ||
143 | #define DPRAM_V2_IRQ_TOHOST 0x0e02 /* uint8_t */ | ||
144 | |||
145 | #define TXMAX (DPRAM_TX_CNT - 1) | ||
146 | |||
147 | /* DPRAM return codes */ | ||
148 | #define RES_NONE 0 | ||
149 | #define RES_OK 1 | ||
150 | #define RES_NOK 2 | ||
151 | #define RES_UNKNOWN 3 | ||
152 | /* DPRAM flags */ | ||
153 | #define CMD_TX 0x01 | ||
154 | #define CMD_ACK 0x02 | ||
155 | #define CMD_XTD 0x04 | ||
156 | #define CMD_RTR 0x08 | ||
157 | #define CMD_ERR 0x10 | ||
158 | #define CMD_BUS2 0x80 | ||
159 | |||
160 | /* returned fifo entry bus state masks */ | ||
161 | #define SF_MASK_BUSOFF 0x80 | ||
162 | #define SF_MASK_EPASSIVE 0x60 | ||
163 | |||
164 | /* bus states */ | ||
165 | #define STATE_BUSOFF 2 | ||
166 | #define STATE_EPASSIVE 1 | ||
167 | #define STATE_EACTIVE 0 | ||
diff --git a/drivers/net/can/softing/softing_cs.c b/drivers/net/can/softing/softing_cs.c new file mode 100644 index 000000000000..c11bb4de8630 --- /dev/null +++ b/drivers/net/can/softing/softing_cs.c | |||
@@ -0,0 +1,360 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2008-2010 | ||
3 | * | ||
4 | * - Kurt Van Dijck, EIA Electronics | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the version 2 of the GNU General Public License | ||
8 | * as published by the Free Software Foundation | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, | ||
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | * GNU General Public License for more details. | ||
14 | * | ||
15 | * You should have received a copy of the GNU General Public License | ||
16 | * along with this program; if not, write to the Free Software | ||
17 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
18 | */ | ||
19 | |||
20 | #include <linux/module.h> | ||
21 | #include <linux/kernel.h> | ||
22 | #include <linux/slab.h> | ||
23 | |||
24 | #include <pcmcia/cistpl.h> | ||
25 | #include <pcmcia/ds.h> | ||
26 | |||
27 | #include "softing_platform.h" | ||
28 | |||
29 | static int softingcs_index; | ||
30 | static spinlock_t softingcs_index_lock; | ||
31 | |||
32 | static int softingcs_reset(struct platform_device *pdev, int v); | ||
33 | static int softingcs_enable_irq(struct platform_device *pdev, int v); | ||
34 | |||
35 | /* | ||
36 | * platform_data descriptions | ||
37 | */ | ||
38 | #define MHZ (1000*1000) | ||
39 | static const struct softing_platform_data softingcs_platform_data[] = { | ||
40 | { | ||
41 | .name = "CANcard", | ||
42 | .manf = 0x0168, .prod = 0x001, | ||
43 | .generation = 1, | ||
44 | .nbus = 2, | ||
45 | .freq = 16 * MHZ, .max_brp = 32, .max_sjw = 4, | ||
46 | .dpram_size = 0x0800, | ||
47 | .boot = {0x0000, 0x000000, fw_dir "bcard.bin",}, | ||
48 | .load = {0x0120, 0x00f600, fw_dir "ldcard.bin",}, | ||
49 | .app = {0x0010, 0x0d0000, fw_dir "cancard.bin",}, | ||
50 | .reset = softingcs_reset, | ||
51 | .enable_irq = softingcs_enable_irq, | ||
52 | }, { | ||
53 | .name = "CANcard-NEC", | ||
54 | .manf = 0x0168, .prod = 0x002, | ||
55 | .generation = 1, | ||
56 | .nbus = 2, | ||
57 | .freq = 16 * MHZ, .max_brp = 32, .max_sjw = 4, | ||
58 | .dpram_size = 0x0800, | ||
59 | .boot = {0x0000, 0x000000, fw_dir "bcard.bin",}, | ||
60 | .load = {0x0120, 0x00f600, fw_dir "ldcard.bin",}, | ||
61 | .app = {0x0010, 0x0d0000, fw_dir "cancard.bin",}, | ||
62 | .reset = softingcs_reset, | ||
63 | .enable_irq = softingcs_enable_irq, | ||
64 | }, { | ||
65 | .name = "CANcard-SJA", | ||
66 | .manf = 0x0168, .prod = 0x004, | ||
67 | .generation = 1, | ||
68 | .nbus = 2, | ||
69 | .freq = 20 * MHZ, .max_brp = 32, .max_sjw = 4, | ||
70 | .dpram_size = 0x0800, | ||
71 | .boot = {0x0000, 0x000000, fw_dir "bcard.bin",}, | ||
72 | .load = {0x0120, 0x00f600, fw_dir "ldcard.bin",}, | ||
73 | .app = {0x0010, 0x0d0000, fw_dir "cansja.bin",}, | ||
74 | .reset = softingcs_reset, | ||
75 | .enable_irq = softingcs_enable_irq, | ||
76 | }, { | ||
77 | .name = "CANcard-2", | ||
78 | .manf = 0x0168, .prod = 0x005, | ||
79 | .generation = 2, | ||
80 | .nbus = 2, | ||
81 | .freq = 24 * MHZ, .max_brp = 64, .max_sjw = 4, | ||
82 | .dpram_size = 0x1000, | ||
83 | .boot = {0x0000, 0x000000, fw_dir "bcard2.bin",}, | ||
84 | .load = {0x0120, 0x00f600, fw_dir "ldcard2.bin",}, | ||
85 | .app = {0x0010, 0x0d0000, fw_dir "cancrd2.bin",}, | ||
86 | .reset = softingcs_reset, | ||
87 | .enable_irq = NULL, | ||
88 | }, { | ||
89 | .name = "Vector-CANcard", | ||
90 | .manf = 0x0168, .prod = 0x081, | ||
91 | .generation = 1, | ||
92 | .nbus = 2, | ||
93 | .freq = 16 * MHZ, .max_brp = 64, .max_sjw = 4, | ||
94 | .dpram_size = 0x0800, | ||
95 | .boot = {0x0000, 0x000000, fw_dir "bcard.bin",}, | ||
96 | .load = {0x0120, 0x00f600, fw_dir "ldcard.bin",}, | ||
97 | .app = {0x0010, 0x0d0000, fw_dir "cancard.bin",}, | ||
98 | .reset = softingcs_reset, | ||
99 | .enable_irq = softingcs_enable_irq, | ||
100 | }, { | ||
101 | .name = "Vector-CANcard-SJA", | ||
102 | .manf = 0x0168, .prod = 0x084, | ||
103 | .generation = 1, | ||
104 | .nbus = 2, | ||
105 | .freq = 20 * MHZ, .max_brp = 32, .max_sjw = 4, | ||
106 | .dpram_size = 0x0800, | ||
107 | .boot = {0x0000, 0x000000, fw_dir "bcard.bin",}, | ||
108 | .load = {0x0120, 0x00f600, fw_dir "ldcard.bin",}, | ||
109 | .app = {0x0010, 0x0d0000, fw_dir "cansja.bin",}, | ||
110 | .reset = softingcs_reset, | ||
111 | .enable_irq = softingcs_enable_irq, | ||
112 | }, { | ||
113 | .name = "Vector-CANcard-2", | ||
114 | .manf = 0x0168, .prod = 0x085, | ||
115 | .generation = 2, | ||
116 | .nbus = 2, | ||
117 | .freq = 24 * MHZ, .max_brp = 64, .max_sjw = 4, | ||
118 | .dpram_size = 0x1000, | ||
119 | .boot = {0x0000, 0x000000, fw_dir "bcard2.bin",}, | ||
120 | .load = {0x0120, 0x00f600, fw_dir "ldcard2.bin",}, | ||
121 | .app = {0x0010, 0x0d0000, fw_dir "cancrd2.bin",}, | ||
122 | .reset = softingcs_reset, | ||
123 | .enable_irq = NULL, | ||
124 | }, { | ||
125 | .name = "EDICcard-NEC", | ||
126 | .manf = 0x0168, .prod = 0x102, | ||
127 | .generation = 1, | ||
128 | .nbus = 2, | ||
129 | .freq = 16 * MHZ, .max_brp = 64, .max_sjw = 4, | ||
130 | .dpram_size = 0x0800, | ||
131 | .boot = {0x0000, 0x000000, fw_dir "bcard.bin",}, | ||
132 | .load = {0x0120, 0x00f600, fw_dir "ldcard.bin",}, | ||
133 | .app = {0x0010, 0x0d0000, fw_dir "cancard.bin",}, | ||
134 | .reset = softingcs_reset, | ||
135 | .enable_irq = softingcs_enable_irq, | ||
136 | }, { | ||
137 | .name = "EDICcard-2", | ||
138 | .manf = 0x0168, .prod = 0x105, | ||
139 | .generation = 2, | ||
140 | .nbus = 2, | ||
141 | .freq = 24 * MHZ, .max_brp = 64, .max_sjw = 4, | ||
142 | .dpram_size = 0x1000, | ||
143 | .boot = {0x0000, 0x000000, fw_dir "bcard2.bin",}, | ||
144 | .load = {0x0120, 0x00f600, fw_dir "ldcard2.bin",}, | ||
145 | .app = {0x0010, 0x0d0000, fw_dir "cancrd2.bin",}, | ||
146 | .reset = softingcs_reset, | ||
147 | .enable_irq = NULL, | ||
148 | }, { | ||
149 | 0, 0, | ||
150 | }, | ||
151 | }; | ||
152 | |||
153 | MODULE_FIRMWARE(fw_dir "bcard.bin"); | ||
154 | MODULE_FIRMWARE(fw_dir "ldcard.bin"); | ||
155 | MODULE_FIRMWARE(fw_dir "cancard.bin"); | ||
156 | MODULE_FIRMWARE(fw_dir "cansja.bin"); | ||
157 | |||
158 | MODULE_FIRMWARE(fw_dir "bcard2.bin"); | ||
159 | MODULE_FIRMWARE(fw_dir "ldcard2.bin"); | ||
160 | MODULE_FIRMWARE(fw_dir "cancrd2.bin"); | ||
161 | |||
162 | static __devinit const struct softing_platform_data | ||
163 | *softingcs_find_platform_data(unsigned int manf, unsigned int prod) | ||
164 | { | ||
165 | const struct softing_platform_data *lp; | ||
166 | |||
167 | for (lp = softingcs_platform_data; lp->manf; ++lp) { | ||
168 | if ((lp->manf == manf) && (lp->prod == prod)) | ||
169 | return lp; | ||
170 | } | ||
171 | return NULL; | ||
172 | } | ||
173 | |||
174 | /* | ||
175 | * platformdata callbacks | ||
176 | */ | ||
177 | static int softingcs_reset(struct platform_device *pdev, int v) | ||
178 | { | ||
179 | struct pcmcia_device *pcmcia = to_pcmcia_dev(pdev->dev.parent); | ||
180 | |||
181 | dev_dbg(&pdev->dev, "pcmcia config [2] %02x\n", v ? 0 : 0x20); | ||
182 | return pcmcia_write_config_byte(pcmcia, 2, v ? 0 : 0x20); | ||
183 | } | ||
184 | |||
185 | static int softingcs_enable_irq(struct platform_device *pdev, int v) | ||
186 | { | ||
187 | struct pcmcia_device *pcmcia = to_pcmcia_dev(pdev->dev.parent); | ||
188 | |||
189 | dev_dbg(&pdev->dev, "pcmcia config [0] %02x\n", v ? 0x60 : 0); | ||
190 | return pcmcia_write_config_byte(pcmcia, 0, v ? 0x60 : 0); | ||
191 | } | ||
192 | |||
193 | /* | ||
194 | * pcmcia check | ||
195 | */ | ||
196 | static __devinit int softingcs_probe_config(struct pcmcia_device *pcmcia, | ||
197 | void *priv_data) | ||
198 | { | ||
199 | struct softing_platform_data *pdat = priv_data; | ||
200 | struct resource *pres; | ||
201 | int memspeed = 0; | ||
202 | |||
203 | WARN_ON(!pdat); | ||
204 | pres = pcmcia->resource[PCMCIA_IOMEM_0]; | ||
205 | if (resource_size(pres) < 0x1000) | ||
206 | return -ERANGE; | ||
207 | |||
208 | pres->flags |= WIN_MEMORY_TYPE_CM | WIN_ENABLE; | ||
209 | if (pdat->generation < 2) { | ||
210 | pres->flags |= WIN_USE_WAIT | WIN_DATA_WIDTH_8; | ||
211 | memspeed = 3; | ||
212 | } else { | ||
213 | pres->flags |= WIN_DATA_WIDTH_16; | ||
214 | } | ||
215 | return pcmcia_request_window(pcmcia, pres, memspeed); | ||
216 | } | ||
217 | |||
218 | static __devexit void softingcs_remove(struct pcmcia_device *pcmcia) | ||
219 | { | ||
220 | struct platform_device *pdev = pcmcia->priv; | ||
221 | |||
222 | /* free bits */ | ||
223 | platform_device_unregister(pdev); | ||
224 | /* release pcmcia stuff */ | ||
225 | pcmcia_disable_device(pcmcia); | ||
226 | } | ||
227 | |||
228 | /* | ||
229 | * platform_device wrapper | ||
230 | * pdev->resource has 2 entries: io & irq | ||
231 | */ | ||
232 | static void softingcs_pdev_release(struct device *dev) | ||
233 | { | ||
234 | struct platform_device *pdev = to_platform_device(dev); | ||
235 | kfree(pdev); | ||
236 | } | ||
237 | |||
238 | static __devinit int softingcs_probe(struct pcmcia_device *pcmcia) | ||
239 | { | ||
240 | int ret; | ||
241 | struct platform_device *pdev; | ||
242 | const struct softing_platform_data *pdat; | ||
243 | struct resource *pres; | ||
244 | struct dev { | ||
245 | struct platform_device pdev; | ||
246 | struct resource res[2]; | ||
247 | } *dev; | ||
248 | |||
249 | /* find matching platform_data */ | ||
250 | pdat = softingcs_find_platform_data(pcmcia->manf_id, pcmcia->card_id); | ||
251 | if (!pdat) | ||
252 | return -ENOTTY; | ||
253 | |||
254 | /* setup pcmcia device */ | ||
255 | pcmcia->config_flags |= CONF_ENABLE_IRQ | CONF_AUTO_SET_IOMEM | | ||
256 | CONF_AUTO_SET_VPP | CONF_AUTO_CHECK_VCC; | ||
257 | ret = pcmcia_loop_config(pcmcia, softingcs_probe_config, (void *)pdat); | ||
258 | if (ret) | ||
259 | goto pcmcia_failed; | ||
260 | |||
261 | ret = pcmcia_enable_device(pcmcia); | ||
262 | if (ret < 0) | ||
263 | goto pcmcia_failed; | ||
264 | |||
265 | pres = pcmcia->resource[PCMCIA_IOMEM_0]; | ||
266 | if (!pres) { | ||
267 | ret = -EBADF; | ||
268 | goto pcmcia_bad; | ||
269 | } | ||
270 | |||
271 | /* create softing platform device */ | ||
272 | dev = kzalloc(sizeof(*dev), GFP_KERNEL); | ||
273 | if (!dev) { | ||
274 | ret = -ENOMEM; | ||
275 | goto mem_failed; | ||
276 | } | ||
277 | dev->pdev.resource = dev->res; | ||
278 | dev->pdev.num_resources = ARRAY_SIZE(dev->res); | ||
279 | dev->pdev.dev.release = softingcs_pdev_release; | ||
280 | |||
281 | pdev = &dev->pdev; | ||
282 | pdev->dev.platform_data = (void *)pdat; | ||
283 | pdev->dev.parent = &pcmcia->dev; | ||
284 | pcmcia->priv = pdev; | ||
285 | |||
286 | /* platform device resources */ | ||
287 | pdev->resource[0].flags = IORESOURCE_MEM; | ||
288 | pdev->resource[0].start = pres->start; | ||
289 | pdev->resource[0].end = pres->end; | ||
290 | |||
291 | pdev->resource[1].flags = IORESOURCE_IRQ; | ||
292 | pdev->resource[1].start = pcmcia->irq; | ||
293 | pdev->resource[1].end = pdev->resource[1].start; | ||
294 | |||
295 | /* platform device setup */ | ||
296 | spin_lock(&softingcs_index_lock); | ||
297 | pdev->id = softingcs_index++; | ||
298 | spin_unlock(&softingcs_index_lock); | ||
299 | pdev->name = "softing"; | ||
300 | dev_set_name(&pdev->dev, "softingcs.%i", pdev->id); | ||
301 | ret = platform_device_register(pdev); | ||
302 | if (ret < 0) | ||
303 | goto platform_failed; | ||
304 | |||
305 | dev_info(&pcmcia->dev, "created %s\n", dev_name(&pdev->dev)); | ||
306 | return 0; | ||
307 | |||
308 | platform_failed: | ||
309 | kfree(dev); | ||
310 | mem_failed: | ||
311 | pcmcia_bad: | ||
312 | pcmcia_failed: | ||
313 | pcmcia_disable_device(pcmcia); | ||
314 | pcmcia->priv = NULL; | ||
315 | return ret ?: -ENODEV; | ||
316 | } | ||
317 | |||
318 | static /*const*/ struct pcmcia_device_id softingcs_ids[] = { | ||
319 | /* softing */ | ||
320 | PCMCIA_DEVICE_MANF_CARD(0x0168, 0x0001), | ||
321 | PCMCIA_DEVICE_MANF_CARD(0x0168, 0x0002), | ||
322 | PCMCIA_DEVICE_MANF_CARD(0x0168, 0x0004), | ||
323 | PCMCIA_DEVICE_MANF_CARD(0x0168, 0x0005), | ||
324 | /* vector, manufacturer? */ | ||
325 | PCMCIA_DEVICE_MANF_CARD(0x0168, 0x0081), | ||
326 | PCMCIA_DEVICE_MANF_CARD(0x0168, 0x0084), | ||
327 | PCMCIA_DEVICE_MANF_CARD(0x0168, 0x0085), | ||
328 | /* EDIC */ | ||
329 | PCMCIA_DEVICE_MANF_CARD(0x0168, 0x0102), | ||
330 | PCMCIA_DEVICE_MANF_CARD(0x0168, 0x0105), | ||
331 | PCMCIA_DEVICE_NULL, | ||
332 | }; | ||
333 | |||
334 | MODULE_DEVICE_TABLE(pcmcia, softingcs_ids); | ||
335 | |||
336 | static struct pcmcia_driver softingcs_driver = { | ||
337 | .owner = THIS_MODULE, | ||
338 | .name = "softingcs", | ||
339 | .id_table = softingcs_ids, | ||
340 | .probe = softingcs_probe, | ||
341 | .remove = __devexit_p(softingcs_remove), | ||
342 | }; | ||
343 | |||
344 | static int __init softingcs_start(void) | ||
345 | { | ||
346 | spin_lock_init(&softingcs_index_lock); | ||
347 | return pcmcia_register_driver(&softingcs_driver); | ||
348 | } | ||
349 | |||
350 | static void __exit softingcs_stop(void) | ||
351 | { | ||
352 | pcmcia_unregister_driver(&softingcs_driver); | ||
353 | } | ||
354 | |||
355 | module_init(softingcs_start); | ||
356 | module_exit(softingcs_stop); | ||
357 | |||
358 | MODULE_DESCRIPTION("softing CANcard driver" | ||
359 | ", links PCMCIA card to softing driver"); | ||
360 | MODULE_LICENSE("GPL v2"); | ||
diff --git a/drivers/net/can/softing/softing_fw.c b/drivers/net/can/softing/softing_fw.c new file mode 100644 index 000000000000..b520784fb197 --- /dev/null +++ b/drivers/net/can/softing/softing_fw.c | |||
@@ -0,0 +1,691 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2008-2010 | ||
3 | * | ||
4 | * - Kurt Van Dijck, EIA Electronics | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the version 2 of the GNU General Public License | ||
8 | * as published by the Free Software Foundation | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, | ||
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | * GNU General Public License for more details. | ||
14 | * | ||
15 | * You should have received a copy of the GNU General Public License | ||
16 | * along with this program; if not, write to the Free Software | ||
17 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
18 | */ | ||
19 | |||
20 | #include <linux/firmware.h> | ||
21 | #include <linux/sched.h> | ||
22 | #include <asm/div64.h> | ||
23 | |||
24 | #include "softing.h" | ||
25 | |||
26 | /* | ||
27 | * low level DPRAM command. | ||
28 | * Make sure that card->dpram[DPRAM_FCT_HOST] is preset | ||
29 | */ | ||
30 | static int _softing_fct_cmd(struct softing *card, int16_t cmd, uint16_t vector, | ||
31 | const char *msg) | ||
32 | { | ||
33 | int ret; | ||
34 | unsigned long stamp; | ||
35 | |||
36 | iowrite16(cmd, &card->dpram[DPRAM_FCT_PARAM]); | ||
37 | iowrite8(vector >> 8, &card->dpram[DPRAM_FCT_HOST + 1]); | ||
38 | iowrite8(vector, &card->dpram[DPRAM_FCT_HOST]); | ||
39 | /* be sure to flush this to the card */ | ||
40 | wmb(); | ||
41 | stamp = jiffies + 1 * HZ; | ||
42 | /* wait for card */ | ||
43 | do { | ||
44 | /* DPRAM_FCT_HOST is _not_ aligned */ | ||
45 | ret = ioread8(&card->dpram[DPRAM_FCT_HOST]) + | ||
46 | (ioread8(&card->dpram[DPRAM_FCT_HOST + 1]) << 8); | ||
47 | /* don't have any cached variables */ | ||
48 | rmb(); | ||
49 | if (ret == RES_OK) | ||
50 | /* read return-value now */ | ||
51 | return ioread16(&card->dpram[DPRAM_FCT_RESULT]); | ||
52 | |||
53 | if ((ret != vector) || time_after(jiffies, stamp)) | ||
54 | break; | ||
55 | /* process context => relax */ | ||
56 | usleep_range(500, 10000); | ||
57 | } while (1); | ||
58 | |||
59 | ret = (ret == RES_NONE) ? -ETIMEDOUT : -ECANCELED; | ||
60 | dev_alert(&card->pdev->dev, "firmware %s failed (%i)\n", msg, ret); | ||
61 | return ret; | ||
62 | } | ||
63 | |||
64 | static int softing_fct_cmd(struct softing *card, int16_t cmd, const char *msg) | ||
65 | { | ||
66 | int ret; | ||
67 | |||
68 | ret = _softing_fct_cmd(card, cmd, 0, msg); | ||
69 | if (ret > 0) { | ||
70 | dev_alert(&card->pdev->dev, "%s returned %u\n", msg, ret); | ||
71 | ret = -EIO; | ||
72 | } | ||
73 | return ret; | ||
74 | } | ||
75 | |||
76 | int softing_bootloader_command(struct softing *card, int16_t cmd, | ||
77 | const char *msg) | ||
78 | { | ||
79 | int ret; | ||
80 | unsigned long stamp; | ||
81 | |||
82 | iowrite16(RES_NONE, &card->dpram[DPRAM_RECEIPT]); | ||
83 | iowrite16(cmd, &card->dpram[DPRAM_COMMAND]); | ||
84 | /* be sure to flush this to the card */ | ||
85 | wmb(); | ||
86 | stamp = jiffies + 3 * HZ; | ||
87 | /* wait for card */ | ||
88 | do { | ||
89 | ret = ioread16(&card->dpram[DPRAM_RECEIPT]); | ||
90 | /* don't have any cached variables */ | ||
91 | rmb(); | ||
92 | if (ret == RES_OK) | ||
93 | return 0; | ||
94 | if (time_after(jiffies, stamp)) | ||
95 | break; | ||
96 | /* process context => relax */ | ||
97 | usleep_range(500, 10000); | ||
98 | } while (!signal_pending(current)); | ||
99 | |||
100 | ret = (ret == RES_NONE) ? -ETIMEDOUT : -ECANCELED; | ||
101 | dev_alert(&card->pdev->dev, "bootloader %s failed (%i)\n", msg, ret); | ||
102 | return ret; | ||
103 | } | ||
104 | |||
105 | static int fw_parse(const uint8_t **pmem, uint16_t *ptype, uint32_t *paddr, | ||
106 | uint16_t *plen, const uint8_t **pdat) | ||
107 | { | ||
108 | uint16_t checksum[2]; | ||
109 | const uint8_t *mem; | ||
110 | const uint8_t *end; | ||
111 | |||
112 | /* | ||
113 | * firmware records are a binary, unaligned stream composed of: | ||
114 | * uint16_t type; | ||
115 | * uint32_t addr; | ||
116 | * uint16_t len; | ||
117 | * uint8_t dat[len]; | ||
118 | * uint16_t checksum; | ||
119 | * all values in little endian. | ||
120 | * We could define a struct for this, with __attribute__((packed)), | ||
121 | * but would that solve the alignment in _all_ cases (cfr. the | ||
122 | * struct itself may be an odd address)? | ||
123 | * | ||
124 | * I chose to use leXX_to_cpup() since this solves both | ||
125 | * endianness & alignment. | ||
126 | */ | ||
127 | mem = *pmem; | ||
128 | *ptype = le16_to_cpup((void *)&mem[0]); | ||
129 | *paddr = le32_to_cpup((void *)&mem[2]); | ||
130 | *plen = le16_to_cpup((void *)&mem[6]); | ||
131 | *pdat = &mem[8]; | ||
132 | /* verify checksum */ | ||
133 | end = &mem[8 + *plen]; | ||
134 | checksum[0] = le16_to_cpup((void *)end); | ||
135 | for (checksum[1] = 0; mem < end; ++mem) | ||
136 | checksum[1] += *mem; | ||
137 | if (checksum[0] != checksum[1]) | ||
138 | return -EINVAL; | ||
139 | /* increment */ | ||
140 | *pmem += 10 + *plen; | ||
141 | return 0; | ||
142 | } | ||
143 | |||
144 | int softing_load_fw(const char *file, struct softing *card, | ||
145 | __iomem uint8_t *dpram, unsigned int size, int offset) | ||
146 | { | ||
147 | const struct firmware *fw; | ||
148 | int ret; | ||
149 | const uint8_t *mem, *end, *dat; | ||
150 | uint16_t type, len; | ||
151 | uint32_t addr; | ||
152 | uint8_t *buf = NULL; | ||
153 | int buflen = 0; | ||
154 | int8_t type_end = 0; | ||
155 | |||
156 | ret = request_firmware(&fw, file, &card->pdev->dev); | ||
157 | if (ret < 0) | ||
158 | return ret; | ||
159 | dev_dbg(&card->pdev->dev, "%s, firmware(%s) got %u bytes" | ||
160 | ", offset %c0x%04x\n", | ||
161 | card->pdat->name, file, (unsigned int)fw->size, | ||
162 | (offset >= 0) ? '+' : '-', (unsigned int)abs(offset)); | ||
163 | /* parse the firmware */ | ||
164 | mem = fw->data; | ||
165 | end = &mem[fw->size]; | ||
166 | /* look for header record */ | ||
167 | ret = fw_parse(&mem, &type, &addr, &len, &dat); | ||
168 | if (ret < 0) | ||
169 | goto failed; | ||
170 | if (type != 0xffff) | ||
171 | goto failed; | ||
172 | if (strncmp("Structured Binary Format, Softing GmbH" , dat, len)) { | ||
173 | ret = -EINVAL; | ||
174 | goto failed; | ||
175 | } | ||
176 | /* ok, we had a header */ | ||
177 | while (mem < end) { | ||
178 | ret = fw_parse(&mem, &type, &addr, &len, &dat); | ||
179 | if (ret < 0) | ||
180 | goto failed; | ||
181 | if (type == 3) { | ||
182 | /* start address, not used here */ | ||
183 | continue; | ||
184 | } else if (type == 1) { | ||
185 | /* eof */ | ||
186 | type_end = 1; | ||
187 | break; | ||
188 | } else if (type != 0) { | ||
189 | ret = -EINVAL; | ||
190 | goto failed; | ||
191 | } | ||
192 | |||
193 | if ((addr + len + offset) > size) | ||
194 | goto failed; | ||
195 | memcpy_toio(&dpram[addr + offset], dat, len); | ||
196 | /* be sure to flush caches from IO space */ | ||
197 | mb(); | ||
198 | if (len > buflen) { | ||
199 | /* align buflen */ | ||
200 | buflen = (len + (1024-1)) & ~(1024-1); | ||
201 | buf = krealloc(buf, buflen, GFP_KERNEL); | ||
202 | if (!buf) { | ||
203 | ret = -ENOMEM; | ||
204 | goto failed; | ||
205 | } | ||
206 | } | ||
207 | /* verify record data */ | ||
208 | memcpy_fromio(buf, &dpram[addr + offset], len); | ||
209 | if (memcmp(buf, dat, len)) { | ||
210 | /* is not ok */ | ||
211 | dev_alert(&card->pdev->dev, "DPRAM readback failed\n"); | ||
212 | ret = -EIO; | ||
213 | goto failed; | ||
214 | } | ||
215 | } | ||
216 | if (!type_end) | ||
217 | /* no end record seen */ | ||
218 | goto failed; | ||
219 | ret = 0; | ||
220 | failed: | ||
221 | kfree(buf); | ||
222 | release_firmware(fw); | ||
223 | if (ret < 0) | ||
224 | dev_info(&card->pdev->dev, "firmware %s failed\n", file); | ||
225 | return ret; | ||
226 | } | ||
227 | |||
228 | int softing_load_app_fw(const char *file, struct softing *card) | ||
229 | { | ||
230 | const struct firmware *fw; | ||
231 | const uint8_t *mem, *end, *dat; | ||
232 | int ret, j; | ||
233 | uint16_t type, len; | ||
234 | uint32_t addr, start_addr = 0; | ||
235 | unsigned int sum, rx_sum; | ||
236 | int8_t type_end = 0, type_entrypoint = 0; | ||
237 | |||
238 | ret = request_firmware(&fw, file, &card->pdev->dev); | ||
239 | if (ret) { | ||
240 | dev_alert(&card->pdev->dev, "request_firmware(%s) got %i\n", | ||
241 | file, ret); | ||
242 | return ret; | ||
243 | } | ||
244 | dev_dbg(&card->pdev->dev, "firmware(%s) got %lu bytes\n", | ||
245 | file, (unsigned long)fw->size); | ||
246 | /* parse the firmware */ | ||
247 | mem = fw->data; | ||
248 | end = &mem[fw->size]; | ||
249 | /* look for header record */ | ||
250 | ret = fw_parse(&mem, &type, &addr, &len, &dat); | ||
251 | if (ret) | ||
252 | goto failed; | ||
253 | ret = -EINVAL; | ||
254 | if (type != 0xffff) { | ||
255 | dev_alert(&card->pdev->dev, "firmware starts with type 0x%x\n", | ||
256 | type); | ||
257 | goto failed; | ||
258 | } | ||
259 | if (strncmp("Structured Binary Format, Softing GmbH", dat, len)) { | ||
260 | dev_alert(&card->pdev->dev, "firmware string '%.*s' fault\n", | ||
261 | len, dat); | ||
262 | goto failed; | ||
263 | } | ||
264 | /* ok, we had a header */ | ||
265 | while (mem < end) { | ||
266 | ret = fw_parse(&mem, &type, &addr, &len, &dat); | ||
267 | if (ret) | ||
268 | goto failed; | ||
269 | |||
270 | if (type == 3) { | ||
271 | /* start address */ | ||
272 | start_addr = addr; | ||
273 | type_entrypoint = 1; | ||
274 | continue; | ||
275 | } else if (type == 1) { | ||
276 | /* eof */ | ||
277 | type_end = 1; | ||
278 | break; | ||
279 | } else if (type != 0) { | ||
280 | dev_alert(&card->pdev->dev, | ||
281 | "unknown record type 0x%04x\n", type); | ||
282 | ret = -EINVAL; | ||
283 | goto failed; | ||
284 | } | ||
285 | |||
286 | /* regualar data */ | ||
287 | for (sum = 0, j = 0; j < len; ++j) | ||
288 | sum += dat[j]; | ||
289 | /* work in 16bit (target) */ | ||
290 | sum &= 0xffff; | ||
291 | |||
292 | memcpy_toio(&card->dpram[card->pdat->app.offs], dat, len); | ||
293 | iowrite32(card->pdat->app.offs + card->pdat->app.addr, | ||
294 | &card->dpram[DPRAM_COMMAND + 2]); | ||
295 | iowrite32(addr, &card->dpram[DPRAM_COMMAND + 6]); | ||
296 | iowrite16(len, &card->dpram[DPRAM_COMMAND + 10]); | ||
297 | iowrite8(1, &card->dpram[DPRAM_COMMAND + 12]); | ||
298 | ret = softing_bootloader_command(card, 1, "loading app."); | ||
299 | if (ret < 0) | ||
300 | goto failed; | ||
301 | /* verify checksum */ | ||
302 | rx_sum = ioread16(&card->dpram[DPRAM_RECEIPT + 2]); | ||
303 | if (rx_sum != sum) { | ||
304 | dev_alert(&card->pdev->dev, "SRAM seems to be damaged" | ||
305 | ", wanted 0x%04x, got 0x%04x\n", sum, rx_sum); | ||
306 | ret = -EIO; | ||
307 | goto failed; | ||
308 | } | ||
309 | } | ||
310 | if (!type_end || !type_entrypoint) | ||
311 | goto failed; | ||
312 | /* start application in card */ | ||
313 | iowrite32(start_addr, &card->dpram[DPRAM_COMMAND + 2]); | ||
314 | iowrite8(1, &card->dpram[DPRAM_COMMAND + 6]); | ||
315 | ret = softing_bootloader_command(card, 3, "start app."); | ||
316 | if (ret < 0) | ||
317 | goto failed; | ||
318 | ret = 0; | ||
319 | failed: | ||
320 | release_firmware(fw); | ||
321 | if (ret < 0) | ||
322 | dev_info(&card->pdev->dev, "firmware %s failed\n", file); | ||
323 | return ret; | ||
324 | } | ||
325 | |||
326 | static int softing_reset_chip(struct softing *card) | ||
327 | { | ||
328 | int ret; | ||
329 | |||
330 | do { | ||
331 | /* reset chip */ | ||
332 | iowrite8(0, &card->dpram[DPRAM_RESET_RX_FIFO]); | ||
333 | iowrite8(0, &card->dpram[DPRAM_RESET_RX_FIFO+1]); | ||
334 | iowrite8(1, &card->dpram[DPRAM_RESET]); | ||
335 | iowrite8(0, &card->dpram[DPRAM_RESET+1]); | ||
336 | |||
337 | ret = softing_fct_cmd(card, 0, "reset_can"); | ||
338 | if (!ret) | ||
339 | break; | ||
340 | if (signal_pending(current)) | ||
341 | /* don't wait any longer */ | ||
342 | break; | ||
343 | } while (1); | ||
344 | card->tx.pending = 0; | ||
345 | return ret; | ||
346 | } | ||
347 | |||
348 | int softing_chip_poweron(struct softing *card) | ||
349 | { | ||
350 | int ret; | ||
351 | /* sync */ | ||
352 | ret = _softing_fct_cmd(card, 99, 0x55, "sync-a"); | ||
353 | if (ret < 0) | ||
354 | goto failed; | ||
355 | |||
356 | ret = _softing_fct_cmd(card, 99, 0xaa, "sync-b"); | ||
357 | if (ret < 0) | ||
358 | goto failed; | ||
359 | |||
360 | ret = softing_reset_chip(card); | ||
361 | if (ret < 0) | ||
362 | goto failed; | ||
363 | /* get_serial */ | ||
364 | ret = softing_fct_cmd(card, 43, "get_serial_number"); | ||
365 | if (ret < 0) | ||
366 | goto failed; | ||
367 | card->id.serial = ioread32(&card->dpram[DPRAM_FCT_PARAM]); | ||
368 | /* get_version */ | ||
369 | ret = softing_fct_cmd(card, 12, "get_version"); | ||
370 | if (ret < 0) | ||
371 | goto failed; | ||
372 | card->id.fw_version = ioread16(&card->dpram[DPRAM_FCT_PARAM + 2]); | ||
373 | card->id.hw_version = ioread16(&card->dpram[DPRAM_FCT_PARAM + 4]); | ||
374 | card->id.license = ioread16(&card->dpram[DPRAM_FCT_PARAM + 6]); | ||
375 | card->id.chip[0] = ioread16(&card->dpram[DPRAM_FCT_PARAM + 8]); | ||
376 | card->id.chip[1] = ioread16(&card->dpram[DPRAM_FCT_PARAM + 10]); | ||
377 | return 0; | ||
378 | failed: | ||
379 | return ret; | ||
380 | } | ||
381 | |||
382 | static void softing_initialize_timestamp(struct softing *card) | ||
383 | { | ||
384 | uint64_t ovf; | ||
385 | |||
386 | card->ts_ref = ktime_get(); | ||
387 | |||
388 | /* 16MHz is the reference */ | ||
389 | ovf = 0x100000000ULL * 16; | ||
390 | do_div(ovf, card->pdat->freq ?: 16); | ||
391 | |||
392 | card->ts_overflow = ktime_add_us(ktime_set(0, 0), ovf); | ||
393 | } | ||
394 | |||
395 | ktime_t softing_raw2ktime(struct softing *card, u32 raw) | ||
396 | { | ||
397 | uint64_t rawl; | ||
398 | ktime_t now, real_offset; | ||
399 | ktime_t target; | ||
400 | ktime_t tmp; | ||
401 | |||
402 | now = ktime_get(); | ||
403 | real_offset = ktime_sub(ktime_get_real(), now); | ||
404 | |||
405 | /* find nsec from card */ | ||
406 | rawl = raw * 16; | ||
407 | do_div(rawl, card->pdat->freq ?: 16); | ||
408 | target = ktime_add_us(card->ts_ref, rawl); | ||
409 | /* test for overflows */ | ||
410 | tmp = ktime_add(target, card->ts_overflow); | ||
411 | while (unlikely(ktime_to_ns(tmp) > ktime_to_ns(now))) { | ||
412 | card->ts_ref = ktime_add(card->ts_ref, card->ts_overflow); | ||
413 | target = tmp; | ||
414 | tmp = ktime_add(target, card->ts_overflow); | ||
415 | } | ||
416 | return ktime_add(target, real_offset); | ||
417 | } | ||
418 | |||
419 | static inline int softing_error_reporting(struct net_device *netdev) | ||
420 | { | ||
421 | struct softing_priv *priv = netdev_priv(netdev); | ||
422 | |||
423 | return (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) | ||
424 | ? 1 : 0; | ||
425 | } | ||
426 | |||
427 | int softing_startstop(struct net_device *dev, int up) | ||
428 | { | ||
429 | int ret; | ||
430 | struct softing *card; | ||
431 | struct softing_priv *priv; | ||
432 | struct net_device *netdev; | ||
433 | int bus_bitmask_start; | ||
434 | int j, error_reporting; | ||
435 | struct can_frame msg; | ||
436 | const struct can_bittiming *bt; | ||
437 | |||
438 | priv = netdev_priv(dev); | ||
439 | card = priv->card; | ||
440 | |||
441 | if (!card->fw.up) | ||
442 | return -EIO; | ||
443 | |||
444 | ret = mutex_lock_interruptible(&card->fw.lock); | ||
445 | if (ret) | ||
446 | return ret; | ||
447 | |||
448 | bus_bitmask_start = 0; | ||
449 | if (dev && up) | ||
450 | /* prepare to start this bus as well */ | ||
451 | bus_bitmask_start |= (1 << priv->index); | ||
452 | /* bring netdevs down */ | ||
453 | for (j = 0; j < ARRAY_SIZE(card->net); ++j) { | ||
454 | netdev = card->net[j]; | ||
455 | if (!netdev) | ||
456 | continue; | ||
457 | priv = netdev_priv(netdev); | ||
458 | |||
459 | if (dev != netdev) | ||
460 | netif_stop_queue(netdev); | ||
461 | |||
462 | if (netif_running(netdev)) { | ||
463 | if (dev != netdev) | ||
464 | bus_bitmask_start |= (1 << j); | ||
465 | priv->tx.pending = 0; | ||
466 | priv->tx.echo_put = 0; | ||
467 | priv->tx.echo_get = 0; | ||
468 | /* | ||
469 | * this bus' may just have called open_candev() | ||
470 | * which is rather stupid to call close_candev() | ||
471 | * already | ||
472 | * but we may come here from busoff recovery too | ||
473 | * in which case the echo_skb _needs_ flushing too. | ||
474 | * just be sure to call open_candev() again | ||
475 | */ | ||
476 | close_candev(netdev); | ||
477 | } | ||
478 | priv->can.state = CAN_STATE_STOPPED; | ||
479 | } | ||
480 | card->tx.pending = 0; | ||
481 | |||
482 | softing_enable_irq(card, 0); | ||
483 | ret = softing_reset_chip(card); | ||
484 | if (ret) | ||
485 | goto failed; | ||
486 | if (!bus_bitmask_start) | ||
487 | /* no busses to be brought up */ | ||
488 | goto card_done; | ||
489 | |||
490 | if ((bus_bitmask_start & 1) && (bus_bitmask_start & 2) | ||
491 | && (softing_error_reporting(card->net[0]) | ||
492 | != softing_error_reporting(card->net[1]))) { | ||
493 | dev_alert(&card->pdev->dev, | ||
494 | "err_reporting flag differs for busses\n"); | ||
495 | goto invalid; | ||
496 | } | ||
497 | error_reporting = 0; | ||
498 | if (bus_bitmask_start & 1) { | ||
499 | netdev = card->net[0]; | ||
500 | priv = netdev_priv(netdev); | ||
501 | error_reporting += softing_error_reporting(netdev); | ||
502 | /* init chip 1 */ | ||
503 | bt = &priv->can.bittiming; | ||
504 | iowrite16(bt->brp, &card->dpram[DPRAM_FCT_PARAM + 2]); | ||
505 | iowrite16(bt->sjw, &card->dpram[DPRAM_FCT_PARAM + 4]); | ||
506 | iowrite16(bt->phase_seg1 + bt->prop_seg, | ||
507 | &card->dpram[DPRAM_FCT_PARAM + 6]); | ||
508 | iowrite16(bt->phase_seg2, &card->dpram[DPRAM_FCT_PARAM + 8]); | ||
509 | iowrite16((priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES) ? 1 : 0, | ||
510 | &card->dpram[DPRAM_FCT_PARAM + 10]); | ||
511 | ret = softing_fct_cmd(card, 1, "initialize_chip[0]"); | ||
512 | if (ret < 0) | ||
513 | goto failed; | ||
514 | /* set mode */ | ||
515 | iowrite16(0, &card->dpram[DPRAM_FCT_PARAM + 2]); | ||
516 | iowrite16(0, &card->dpram[DPRAM_FCT_PARAM + 4]); | ||
517 | ret = softing_fct_cmd(card, 3, "set_mode[0]"); | ||
518 | if (ret < 0) | ||
519 | goto failed; | ||
520 | /* set filter */ | ||
521 | /* 11bit id & mask */ | ||
522 | iowrite16(0x0000, &card->dpram[DPRAM_FCT_PARAM + 2]); | ||
523 | iowrite16(0x07ff, &card->dpram[DPRAM_FCT_PARAM + 4]); | ||
524 | /* 29bit id.lo & mask.lo & id.hi & mask.hi */ | ||
525 | iowrite16(0x0000, &card->dpram[DPRAM_FCT_PARAM + 6]); | ||
526 | iowrite16(0xffff, &card->dpram[DPRAM_FCT_PARAM + 8]); | ||
527 | iowrite16(0x0000, &card->dpram[DPRAM_FCT_PARAM + 10]); | ||
528 | iowrite16(0x1fff, &card->dpram[DPRAM_FCT_PARAM + 12]); | ||
529 | ret = softing_fct_cmd(card, 7, "set_filter[0]"); | ||
530 | if (ret < 0) | ||
531 | goto failed; | ||
532 | /* set output control */ | ||
533 | iowrite16(priv->output, &card->dpram[DPRAM_FCT_PARAM + 2]); | ||
534 | ret = softing_fct_cmd(card, 5, "set_output[0]"); | ||
535 | if (ret < 0) | ||
536 | goto failed; | ||
537 | } | ||
538 | if (bus_bitmask_start & 2) { | ||
539 | netdev = card->net[1]; | ||
540 | priv = netdev_priv(netdev); | ||
541 | error_reporting += softing_error_reporting(netdev); | ||
542 | /* init chip2 */ | ||
543 | bt = &priv->can.bittiming; | ||
544 | iowrite16(bt->brp, &card->dpram[DPRAM_FCT_PARAM + 2]); | ||
545 | iowrite16(bt->sjw, &card->dpram[DPRAM_FCT_PARAM + 4]); | ||
546 | iowrite16(bt->phase_seg1 + bt->prop_seg, | ||
547 | &card->dpram[DPRAM_FCT_PARAM + 6]); | ||
548 | iowrite16(bt->phase_seg2, &card->dpram[DPRAM_FCT_PARAM + 8]); | ||
549 | iowrite16((priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES) ? 1 : 0, | ||
550 | &card->dpram[DPRAM_FCT_PARAM + 10]); | ||
551 | ret = softing_fct_cmd(card, 2, "initialize_chip[1]"); | ||
552 | if (ret < 0) | ||
553 | goto failed; | ||
554 | /* set mode2 */ | ||
555 | iowrite16(0, &card->dpram[DPRAM_FCT_PARAM + 2]); | ||
556 | iowrite16(0, &card->dpram[DPRAM_FCT_PARAM + 4]); | ||
557 | ret = softing_fct_cmd(card, 4, "set_mode[1]"); | ||
558 | if (ret < 0) | ||
559 | goto failed; | ||
560 | /* set filter2 */ | ||
561 | /* 11bit id & mask */ | ||
562 | iowrite16(0x0000, &card->dpram[DPRAM_FCT_PARAM + 2]); | ||
563 | iowrite16(0x07ff, &card->dpram[DPRAM_FCT_PARAM + 4]); | ||
564 | /* 29bit id.lo & mask.lo & id.hi & mask.hi */ | ||
565 | iowrite16(0x0000, &card->dpram[DPRAM_FCT_PARAM + 6]); | ||
566 | iowrite16(0xffff, &card->dpram[DPRAM_FCT_PARAM + 8]); | ||
567 | iowrite16(0x0000, &card->dpram[DPRAM_FCT_PARAM + 10]); | ||
568 | iowrite16(0x1fff, &card->dpram[DPRAM_FCT_PARAM + 12]); | ||
569 | ret = softing_fct_cmd(card, 8, "set_filter[1]"); | ||
570 | if (ret < 0) | ||
571 | goto failed; | ||
572 | /* set output control2 */ | ||
573 | iowrite16(priv->output, &card->dpram[DPRAM_FCT_PARAM + 2]); | ||
574 | ret = softing_fct_cmd(card, 6, "set_output[1]"); | ||
575 | if (ret < 0) | ||
576 | goto failed; | ||
577 | } | ||
578 | /* enable_error_frame */ | ||
579 | /* | ||
580 | * Error reporting is switched off at the moment since | ||
581 | * the receiving of them is not yet 100% verified | ||
582 | * This should be enabled sooner or later | ||
583 | * | ||
584 | if (error_reporting) { | ||
585 | ret = softing_fct_cmd(card, 51, "enable_error_frame"); | ||
586 | if (ret < 0) | ||
587 | goto failed; | ||
588 | } | ||
589 | */ | ||
590 | /* initialize interface */ | ||
591 | iowrite16(1, &card->dpram[DPRAM_FCT_PARAM + 2]); | ||
592 | iowrite16(1, &card->dpram[DPRAM_FCT_PARAM + 4]); | ||
593 | iowrite16(1, &card->dpram[DPRAM_FCT_PARAM + 6]); | ||
594 | iowrite16(1, &card->dpram[DPRAM_FCT_PARAM + 8]); | ||
595 | iowrite16(1, &card->dpram[DPRAM_FCT_PARAM + 10]); | ||
596 | iowrite16(1, &card->dpram[DPRAM_FCT_PARAM + 12]); | ||
597 | iowrite16(1, &card->dpram[DPRAM_FCT_PARAM + 14]); | ||
598 | iowrite16(1, &card->dpram[DPRAM_FCT_PARAM + 16]); | ||
599 | iowrite16(1, &card->dpram[DPRAM_FCT_PARAM + 18]); | ||
600 | iowrite16(1, &card->dpram[DPRAM_FCT_PARAM + 20]); | ||
601 | ret = softing_fct_cmd(card, 17, "initialize_interface"); | ||
602 | if (ret < 0) | ||
603 | goto failed; | ||
604 | /* enable_fifo */ | ||
605 | ret = softing_fct_cmd(card, 36, "enable_fifo"); | ||
606 | if (ret < 0) | ||
607 | goto failed; | ||
608 | /* enable fifo tx ack */ | ||
609 | ret = softing_fct_cmd(card, 13, "fifo_tx_ack[0]"); | ||
610 | if (ret < 0) | ||
611 | goto failed; | ||
612 | /* enable fifo tx ack2 */ | ||
613 | ret = softing_fct_cmd(card, 14, "fifo_tx_ack[1]"); | ||
614 | if (ret < 0) | ||
615 | goto failed; | ||
616 | /* start_chip */ | ||
617 | ret = softing_fct_cmd(card, 11, "start_chip"); | ||
618 | if (ret < 0) | ||
619 | goto failed; | ||
620 | iowrite8(0, &card->dpram[DPRAM_INFO_BUSSTATE]); | ||
621 | iowrite8(0, &card->dpram[DPRAM_INFO_BUSSTATE2]); | ||
622 | if (card->pdat->generation < 2) { | ||
623 | iowrite8(0, &card->dpram[DPRAM_V2_IRQ_TOHOST]); | ||
624 | /* flush the DPRAM caches */ | ||
625 | wmb(); | ||
626 | } | ||
627 | |||
628 | softing_initialize_timestamp(card); | ||
629 | |||
630 | /* | ||
631 | * do socketcan notifications/status changes | ||
632 | * from here, no errors should occur, or the failed: part | ||
633 | * must be reviewed | ||
634 | */ | ||
635 | memset(&msg, 0, sizeof(msg)); | ||
636 | msg.can_id = CAN_ERR_FLAG | CAN_ERR_RESTARTED; | ||
637 | msg.can_dlc = CAN_ERR_DLC; | ||
638 | for (j = 0; j < ARRAY_SIZE(card->net); ++j) { | ||
639 | if (!(bus_bitmask_start & (1 << j))) | ||
640 | continue; | ||
641 | netdev = card->net[j]; | ||
642 | if (!netdev) | ||
643 | continue; | ||
644 | priv = netdev_priv(netdev); | ||
645 | priv->can.state = CAN_STATE_ERROR_ACTIVE; | ||
646 | open_candev(netdev); | ||
647 | if (dev != netdev) { | ||
648 | /* notify other busses on the restart */ | ||
649 | softing_netdev_rx(netdev, &msg, ktime_set(0, 0)); | ||
650 | ++priv->can.can_stats.restarts; | ||
651 | } | ||
652 | netif_wake_queue(netdev); | ||
653 | } | ||
654 | |||
655 | /* enable interrupts */ | ||
656 | ret = softing_enable_irq(card, 1); | ||
657 | if (ret) | ||
658 | goto failed; | ||
659 | card_done: | ||
660 | mutex_unlock(&card->fw.lock); | ||
661 | return 0; | ||
662 | invalid: | ||
663 | ret = -EINVAL; | ||
664 | failed: | ||
665 | softing_enable_irq(card, 0); | ||
666 | softing_reset_chip(card); | ||
667 | mutex_unlock(&card->fw.lock); | ||
668 | /* bring all other interfaces down */ | ||
669 | for (j = 0; j < ARRAY_SIZE(card->net); ++j) { | ||
670 | netdev = card->net[j]; | ||
671 | if (!netdev) | ||
672 | continue; | ||
673 | dev_close(netdev); | ||
674 | } | ||
675 | return ret; | ||
676 | } | ||
677 | |||
678 | int softing_default_output(struct net_device *netdev) | ||
679 | { | ||
680 | struct softing_priv *priv = netdev_priv(netdev); | ||
681 | struct softing *card = priv->card; | ||
682 | |||
683 | switch (priv->chip) { | ||
684 | case 1000: | ||
685 | return (card->pdat->generation < 2) ? 0xfb : 0xfa; | ||
686 | case 5: | ||
687 | return 0x60; | ||
688 | default: | ||
689 | return 0x40; | ||
690 | } | ||
691 | } | ||
diff --git a/drivers/net/can/softing/softing_main.c b/drivers/net/can/softing/softing_main.c new file mode 100644 index 000000000000..aeea9f9ff6e8 --- /dev/null +++ b/drivers/net/can/softing/softing_main.c | |||
@@ -0,0 +1,894 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2008-2010 | ||
3 | * | ||
4 | * - Kurt Van Dijck, EIA Electronics | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the version 2 of the GNU General Public License | ||
8 | * as published by the Free Software Foundation | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, | ||
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | * GNU General Public License for more details. | ||
14 | * | ||
15 | * You should have received a copy of the GNU General Public License | ||
16 | * along with this program; if not, write to the Free Software | ||
17 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
18 | */ | ||
19 | |||
20 | #include <linux/version.h> | ||
21 | #include <linux/module.h> | ||
22 | #include <linux/init.h> | ||
23 | #include <linux/interrupt.h> | ||
24 | |||
25 | #include "softing.h" | ||
26 | |||
27 | #define TX_ECHO_SKB_MAX (((TXMAX+1)/2)-1) | ||
28 | |||
29 | /* | ||
30 | * test is a specific CAN netdev | ||
31 | * is online (ie. up 'n running, not sleeping, not busoff | ||
32 | */ | ||
33 | static inline int canif_is_active(struct net_device *netdev) | ||
34 | { | ||
35 | struct can_priv *can = netdev_priv(netdev); | ||
36 | |||
37 | if (!netif_running(netdev)) | ||
38 | return 0; | ||
39 | return (can->state <= CAN_STATE_ERROR_PASSIVE); | ||
40 | } | ||
41 | |||
42 | /* reset DPRAM */ | ||
43 | static inline void softing_set_reset_dpram(struct softing *card) | ||
44 | { | ||
45 | if (card->pdat->generation >= 2) { | ||
46 | spin_lock_bh(&card->spin); | ||
47 | iowrite8(ioread8(&card->dpram[DPRAM_V2_RESET]) & ~1, | ||
48 | &card->dpram[DPRAM_V2_RESET]); | ||
49 | spin_unlock_bh(&card->spin); | ||
50 | } | ||
51 | } | ||
52 | |||
53 | static inline void softing_clr_reset_dpram(struct softing *card) | ||
54 | { | ||
55 | if (card->pdat->generation >= 2) { | ||
56 | spin_lock_bh(&card->spin); | ||
57 | iowrite8(ioread8(&card->dpram[DPRAM_V2_RESET]) | 1, | ||
58 | &card->dpram[DPRAM_V2_RESET]); | ||
59 | spin_unlock_bh(&card->spin); | ||
60 | } | ||
61 | } | ||
62 | |||
63 | /* trigger the tx queue-ing */ | ||
64 | static netdev_tx_t softing_netdev_start_xmit(struct sk_buff *skb, | ||
65 | struct net_device *dev) | ||
66 | { | ||
67 | struct softing_priv *priv = netdev_priv(dev); | ||
68 | struct softing *card = priv->card; | ||
69 | int ret; | ||
70 | uint8_t *ptr; | ||
71 | uint8_t fifo_wr, fifo_rd; | ||
72 | struct can_frame *cf = (struct can_frame *)skb->data; | ||
73 | uint8_t buf[DPRAM_TX_SIZE]; | ||
74 | |||
75 | if (can_dropped_invalid_skb(dev, skb)) | ||
76 | return NETDEV_TX_OK; | ||
77 | |||
78 | spin_lock(&card->spin); | ||
79 | |||
80 | ret = NETDEV_TX_BUSY; | ||
81 | if (!card->fw.up || | ||
82 | (card->tx.pending >= TXMAX) || | ||
83 | (priv->tx.pending >= TX_ECHO_SKB_MAX)) | ||
84 | goto xmit_done; | ||
85 | fifo_wr = ioread8(&card->dpram[DPRAM_TX_WR]); | ||
86 | fifo_rd = ioread8(&card->dpram[DPRAM_TX_RD]); | ||
87 | if (fifo_wr == fifo_rd) | ||
88 | /* fifo full */ | ||
89 | goto xmit_done; | ||
90 | memset(buf, 0, sizeof(buf)); | ||
91 | ptr = buf; | ||
92 | *ptr = CMD_TX; | ||
93 | if (cf->can_id & CAN_RTR_FLAG) | ||
94 | *ptr |= CMD_RTR; | ||
95 | if (cf->can_id & CAN_EFF_FLAG) | ||
96 | *ptr |= CMD_XTD; | ||
97 | if (priv->index) | ||
98 | *ptr |= CMD_BUS2; | ||
99 | ++ptr; | ||
100 | *ptr++ = cf->can_dlc; | ||
101 | *ptr++ = (cf->can_id >> 0); | ||
102 | *ptr++ = (cf->can_id >> 8); | ||
103 | if (cf->can_id & CAN_EFF_FLAG) { | ||
104 | *ptr++ = (cf->can_id >> 16); | ||
105 | *ptr++ = (cf->can_id >> 24); | ||
106 | } else { | ||
107 | /* increment 1, not 2 as you might think */ | ||
108 | ptr += 1; | ||
109 | } | ||
110 | if (!(cf->can_id & CAN_RTR_FLAG)) | ||
111 | memcpy(ptr, &cf->data[0], cf->can_dlc); | ||
112 | memcpy_toio(&card->dpram[DPRAM_TX + DPRAM_TX_SIZE * fifo_wr], | ||
113 | buf, DPRAM_TX_SIZE); | ||
114 | if (++fifo_wr >= DPRAM_TX_CNT) | ||
115 | fifo_wr = 0; | ||
116 | iowrite8(fifo_wr, &card->dpram[DPRAM_TX_WR]); | ||
117 | card->tx.last_bus = priv->index; | ||
118 | ++card->tx.pending; | ||
119 | ++priv->tx.pending; | ||
120 | can_put_echo_skb(skb, dev, priv->tx.echo_put); | ||
121 | ++priv->tx.echo_put; | ||
122 | if (priv->tx.echo_put >= TX_ECHO_SKB_MAX) | ||
123 | priv->tx.echo_put = 0; | ||
124 | /* can_put_echo_skb() saves the skb, safe to return TX_OK */ | ||
125 | ret = NETDEV_TX_OK; | ||
126 | xmit_done: | ||
127 | spin_unlock(&card->spin); | ||
128 | if (card->tx.pending >= TXMAX) { | ||
129 | int j; | ||
130 | for (j = 0; j < ARRAY_SIZE(card->net); ++j) { | ||
131 | if (card->net[j]) | ||
132 | netif_stop_queue(card->net[j]); | ||
133 | } | ||
134 | } | ||
135 | if (ret != NETDEV_TX_OK) | ||
136 | netif_stop_queue(dev); | ||
137 | |||
138 | return ret; | ||
139 | } | ||
140 | |||
141 | /* | ||
142 | * shortcut for skb delivery | ||
143 | */ | ||
144 | int softing_netdev_rx(struct net_device *netdev, const struct can_frame *msg, | ||
145 | ktime_t ktime) | ||
146 | { | ||
147 | struct sk_buff *skb; | ||
148 | struct can_frame *cf; | ||
149 | |||
150 | skb = alloc_can_skb(netdev, &cf); | ||
151 | if (!skb) | ||
152 | return -ENOMEM; | ||
153 | memcpy(cf, msg, sizeof(*msg)); | ||
154 | skb->tstamp = ktime; | ||
155 | return netif_rx(skb); | ||
156 | } | ||
157 | |||
158 | /* | ||
159 | * softing_handle_1 | ||
160 | * pop 1 entry from the DPRAM queue, and process | ||
161 | */ | ||
162 | static int softing_handle_1(struct softing *card) | ||
163 | { | ||
164 | struct net_device *netdev; | ||
165 | struct softing_priv *priv; | ||
166 | ktime_t ktime; | ||
167 | struct can_frame msg; | ||
168 | int cnt = 0, lost_msg; | ||
169 | uint8_t fifo_rd, fifo_wr, cmd; | ||
170 | uint8_t *ptr; | ||
171 | uint32_t tmp_u32; | ||
172 | uint8_t buf[DPRAM_RX_SIZE]; | ||
173 | |||
174 | memset(&msg, 0, sizeof(msg)); | ||
175 | /* test for lost msgs */ | ||
176 | lost_msg = ioread8(&card->dpram[DPRAM_RX_LOST]); | ||
177 | if (lost_msg) { | ||
178 | int j; | ||
179 | /* reset condition */ | ||
180 | iowrite8(0, &card->dpram[DPRAM_RX_LOST]); | ||
181 | /* prepare msg */ | ||
182 | msg.can_id = CAN_ERR_FLAG | CAN_ERR_CRTL; | ||
183 | msg.can_dlc = CAN_ERR_DLC; | ||
184 | msg.data[1] = CAN_ERR_CRTL_RX_OVERFLOW; | ||
185 | /* | ||
186 | * service to all busses, we don't know which it was applicable | ||
187 | * but only service busses that are online | ||
188 | */ | ||
189 | for (j = 0; j < ARRAY_SIZE(card->net); ++j) { | ||
190 | netdev = card->net[j]; | ||
191 | if (!netdev) | ||
192 | continue; | ||
193 | if (!canif_is_active(netdev)) | ||
194 | /* a dead bus has no overflows */ | ||
195 | continue; | ||
196 | ++netdev->stats.rx_over_errors; | ||
197 | softing_netdev_rx(netdev, &msg, ktime_set(0, 0)); | ||
198 | } | ||
199 | /* prepare for other use */ | ||
200 | memset(&msg, 0, sizeof(msg)); | ||
201 | ++cnt; | ||
202 | } | ||
203 | |||
204 | fifo_rd = ioread8(&card->dpram[DPRAM_RX_RD]); | ||
205 | fifo_wr = ioread8(&card->dpram[DPRAM_RX_WR]); | ||
206 | |||
207 | if (++fifo_rd >= DPRAM_RX_CNT) | ||
208 | fifo_rd = 0; | ||
209 | if (fifo_wr == fifo_rd) | ||
210 | return cnt; | ||
211 | |||
212 | memcpy_fromio(buf, &card->dpram[DPRAM_RX + DPRAM_RX_SIZE*fifo_rd], | ||
213 | DPRAM_RX_SIZE); | ||
214 | mb(); | ||
215 | /* trigger dual port RAM */ | ||
216 | iowrite8(fifo_rd, &card->dpram[DPRAM_RX_RD]); | ||
217 | |||
218 | ptr = buf; | ||
219 | cmd = *ptr++; | ||
220 | if (cmd == 0xff) | ||
221 | /* not quite usefull, probably the card has got out */ | ||
222 | return 0; | ||
223 | netdev = card->net[0]; | ||
224 | if (cmd & CMD_BUS2) | ||
225 | netdev = card->net[1]; | ||
226 | priv = netdev_priv(netdev); | ||
227 | |||
228 | if (cmd & CMD_ERR) { | ||
229 | uint8_t can_state, state; | ||
230 | |||
231 | state = *ptr++; | ||
232 | |||
233 | msg.can_id = CAN_ERR_FLAG; | ||
234 | msg.can_dlc = CAN_ERR_DLC; | ||
235 | |||
236 | if (state & SF_MASK_BUSOFF) { | ||
237 | can_state = CAN_STATE_BUS_OFF; | ||
238 | msg.can_id |= CAN_ERR_BUSOFF; | ||
239 | state = STATE_BUSOFF; | ||
240 | } else if (state & SF_MASK_EPASSIVE) { | ||
241 | can_state = CAN_STATE_ERROR_PASSIVE; | ||
242 | msg.can_id |= CAN_ERR_CRTL; | ||
243 | msg.data[1] = CAN_ERR_CRTL_TX_PASSIVE; | ||
244 | state = STATE_EPASSIVE; | ||
245 | } else { | ||
246 | can_state = CAN_STATE_ERROR_ACTIVE; | ||
247 | msg.can_id |= CAN_ERR_CRTL; | ||
248 | state = STATE_EACTIVE; | ||
249 | } | ||
250 | /* update DPRAM */ | ||
251 | iowrite8(state, &card->dpram[priv->index ? | ||
252 | DPRAM_INFO_BUSSTATE2 : DPRAM_INFO_BUSSTATE]); | ||
253 | /* timestamp */ | ||
254 | tmp_u32 = le32_to_cpup((void *)ptr); | ||
255 | ptr += 4; | ||
256 | ktime = softing_raw2ktime(card, tmp_u32); | ||
257 | |||
258 | ++netdev->stats.rx_errors; | ||
259 | /* update internal status */ | ||
260 | if (can_state != priv->can.state) { | ||
261 | priv->can.state = can_state; | ||
262 | if (can_state == CAN_STATE_ERROR_PASSIVE) | ||
263 | ++priv->can.can_stats.error_passive; | ||
264 | else if (can_state == CAN_STATE_BUS_OFF) { | ||
265 | /* this calls can_close_cleanup() */ | ||
266 | can_bus_off(netdev); | ||
267 | netif_stop_queue(netdev); | ||
268 | } | ||
269 | /* trigger socketcan */ | ||
270 | softing_netdev_rx(netdev, &msg, ktime); | ||
271 | } | ||
272 | |||
273 | } else { | ||
274 | if (cmd & CMD_RTR) | ||
275 | msg.can_id |= CAN_RTR_FLAG; | ||
276 | msg.can_dlc = get_can_dlc(*ptr++); | ||
277 | if (cmd & CMD_XTD) { | ||
278 | msg.can_id |= CAN_EFF_FLAG; | ||
279 | msg.can_id |= le32_to_cpup((void *)ptr); | ||
280 | ptr += 4; | ||
281 | } else { | ||
282 | msg.can_id |= le16_to_cpup((void *)ptr); | ||
283 | ptr += 2; | ||
284 | } | ||
285 | /* timestamp */ | ||
286 | tmp_u32 = le32_to_cpup((void *)ptr); | ||
287 | ptr += 4; | ||
288 | ktime = softing_raw2ktime(card, tmp_u32); | ||
289 | if (!(msg.can_id & CAN_RTR_FLAG)) | ||
290 | memcpy(&msg.data[0], ptr, 8); | ||
291 | ptr += 8; | ||
292 | /* update socket */ | ||
293 | if (cmd & CMD_ACK) { | ||
294 | /* acknowledge, was tx msg */ | ||
295 | struct sk_buff *skb; | ||
296 | skb = priv->can.echo_skb[priv->tx.echo_get]; | ||
297 | if (skb) | ||
298 | skb->tstamp = ktime; | ||
299 | can_get_echo_skb(netdev, priv->tx.echo_get); | ||
300 | ++priv->tx.echo_get; | ||
301 | if (priv->tx.echo_get >= TX_ECHO_SKB_MAX) | ||
302 | priv->tx.echo_get = 0; | ||
303 | if (priv->tx.pending) | ||
304 | --priv->tx.pending; | ||
305 | if (card->tx.pending) | ||
306 | --card->tx.pending; | ||
307 | ++netdev->stats.tx_packets; | ||
308 | if (!(msg.can_id & CAN_RTR_FLAG)) | ||
309 | netdev->stats.tx_bytes += msg.can_dlc; | ||
310 | } else { | ||
311 | int ret; | ||
312 | |||
313 | ret = softing_netdev_rx(netdev, &msg, ktime); | ||
314 | if (ret == NET_RX_SUCCESS) { | ||
315 | ++netdev->stats.rx_packets; | ||
316 | if (!(msg.can_id & CAN_RTR_FLAG)) | ||
317 | netdev->stats.rx_bytes += msg.can_dlc; | ||
318 | } else { | ||
319 | ++netdev->stats.rx_dropped; | ||
320 | } | ||
321 | } | ||
322 | } | ||
323 | ++cnt; | ||
324 | return cnt; | ||
325 | } | ||
326 | |||
327 | /* | ||
328 | * real interrupt handler | ||
329 | */ | ||
330 | static irqreturn_t softing_irq_thread(int irq, void *dev_id) | ||
331 | { | ||
332 | struct softing *card = (struct softing *)dev_id; | ||
333 | struct net_device *netdev; | ||
334 | struct softing_priv *priv; | ||
335 | int j, offset, work_done; | ||
336 | |||
337 | work_done = 0; | ||
338 | spin_lock_bh(&card->spin); | ||
339 | while (softing_handle_1(card) > 0) { | ||
340 | ++card->irq.svc_count; | ||
341 | ++work_done; | ||
342 | } | ||
343 | spin_unlock_bh(&card->spin); | ||
344 | /* resume tx queue's */ | ||
345 | offset = card->tx.last_bus; | ||
346 | for (j = 0; j < ARRAY_SIZE(card->net); ++j) { | ||
347 | if (card->tx.pending >= TXMAX) | ||
348 | break; | ||
349 | netdev = card->net[(j + offset + 1) % card->pdat->nbus]; | ||
350 | if (!netdev) | ||
351 | continue; | ||
352 | priv = netdev_priv(netdev); | ||
353 | if (!canif_is_active(netdev)) | ||
354 | /* it makes no sense to wake dead busses */ | ||
355 | continue; | ||
356 | if (priv->tx.pending >= TX_ECHO_SKB_MAX) | ||
357 | continue; | ||
358 | ++work_done; | ||
359 | netif_wake_queue(netdev); | ||
360 | } | ||
361 | return work_done ? IRQ_HANDLED : IRQ_NONE; | ||
362 | } | ||
363 | |||
364 | /* | ||
365 | * interrupt routines: | ||
366 | * schedule the 'real interrupt handler' | ||
367 | */ | ||
368 | static irqreturn_t softing_irq_v2(int irq, void *dev_id) | ||
369 | { | ||
370 | struct softing *card = (struct softing *)dev_id; | ||
371 | uint8_t ir; | ||
372 | |||
373 | ir = ioread8(&card->dpram[DPRAM_V2_IRQ_TOHOST]); | ||
374 | iowrite8(0, &card->dpram[DPRAM_V2_IRQ_TOHOST]); | ||
375 | return (1 == ir) ? IRQ_WAKE_THREAD : IRQ_NONE; | ||
376 | } | ||
377 | |||
378 | static irqreturn_t softing_irq_v1(int irq, void *dev_id) | ||
379 | { | ||
380 | struct softing *card = (struct softing *)dev_id; | ||
381 | uint8_t ir; | ||
382 | |||
383 | ir = ioread8(&card->dpram[DPRAM_IRQ_TOHOST]); | ||
384 | iowrite8(0, &card->dpram[DPRAM_IRQ_TOHOST]); | ||
385 | return ir ? IRQ_WAKE_THREAD : IRQ_NONE; | ||
386 | } | ||
387 | |||
388 | /* | ||
389 | * netdev/candev inter-operability | ||
390 | */ | ||
391 | static int softing_netdev_open(struct net_device *ndev) | ||
392 | { | ||
393 | int ret; | ||
394 | |||
395 | /* check or determine and set bittime */ | ||
396 | ret = open_candev(ndev); | ||
397 | if (!ret) | ||
398 | ret = softing_startstop(ndev, 1); | ||
399 | return ret; | ||
400 | } | ||
401 | |||
402 | static int softing_netdev_stop(struct net_device *ndev) | ||
403 | { | ||
404 | int ret; | ||
405 | |||
406 | netif_stop_queue(ndev); | ||
407 | |||
408 | /* softing cycle does close_candev() */ | ||
409 | ret = softing_startstop(ndev, 0); | ||
410 | return ret; | ||
411 | } | ||
412 | |||
413 | static int softing_candev_set_mode(struct net_device *ndev, enum can_mode mode) | ||
414 | { | ||
415 | int ret; | ||
416 | |||
417 | switch (mode) { | ||
418 | case CAN_MODE_START: | ||
419 | /* softing_startstop does close_candev() */ | ||
420 | ret = softing_startstop(ndev, 1); | ||
421 | return ret; | ||
422 | case CAN_MODE_STOP: | ||
423 | case CAN_MODE_SLEEP: | ||
424 | return -EOPNOTSUPP; | ||
425 | } | ||
426 | return 0; | ||
427 | } | ||
428 | |||
429 | /* | ||
430 | * Softing device management helpers | ||
431 | */ | ||
432 | int softing_enable_irq(struct softing *card, int enable) | ||
433 | { | ||
434 | int ret; | ||
435 | |||
436 | if (!card->irq.nr) { | ||
437 | return 0; | ||
438 | } else if (card->irq.requested && !enable) { | ||
439 | free_irq(card->irq.nr, card); | ||
440 | card->irq.requested = 0; | ||
441 | } else if (!card->irq.requested && enable) { | ||
442 | ret = request_threaded_irq(card->irq.nr, | ||
443 | (card->pdat->generation >= 2) ? | ||
444 | softing_irq_v2 : softing_irq_v1, | ||
445 | softing_irq_thread, IRQF_SHARED, | ||
446 | dev_name(&card->pdev->dev), card); | ||
447 | if (ret) { | ||
448 | dev_alert(&card->pdev->dev, | ||
449 | "request_threaded_irq(%u) failed\n", | ||
450 | card->irq.nr); | ||
451 | return ret; | ||
452 | } | ||
453 | card->irq.requested = 1; | ||
454 | } | ||
455 | return 0; | ||
456 | } | ||
457 | |||
458 | static void softing_card_shutdown(struct softing *card) | ||
459 | { | ||
460 | int fw_up = 0; | ||
461 | |||
462 | if (mutex_lock_interruptible(&card->fw.lock)) | ||
463 | /* return -ERESTARTSYS */; | ||
464 | fw_up = card->fw.up; | ||
465 | card->fw.up = 0; | ||
466 | |||
467 | if (card->irq.requested && card->irq.nr) { | ||
468 | free_irq(card->irq.nr, card); | ||
469 | card->irq.requested = 0; | ||
470 | } | ||
471 | if (fw_up) { | ||
472 | if (card->pdat->enable_irq) | ||
473 | card->pdat->enable_irq(card->pdev, 0); | ||
474 | softing_set_reset_dpram(card); | ||
475 | if (card->pdat->reset) | ||
476 | card->pdat->reset(card->pdev, 1); | ||
477 | } | ||
478 | mutex_unlock(&card->fw.lock); | ||
479 | } | ||
480 | |||
481 | static __devinit int softing_card_boot(struct softing *card) | ||
482 | { | ||
483 | int ret, j; | ||
484 | static const uint8_t stream[] = { | ||
485 | 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, }; | ||
486 | unsigned char back[sizeof(stream)]; | ||
487 | |||
488 | if (mutex_lock_interruptible(&card->fw.lock)) | ||
489 | return -ERESTARTSYS; | ||
490 | if (card->fw.up) { | ||
491 | mutex_unlock(&card->fw.lock); | ||
492 | return 0; | ||
493 | } | ||
494 | /* reset board */ | ||
495 | if (card->pdat->enable_irq) | ||
496 | card->pdat->enable_irq(card->pdev, 1); | ||
497 | /* boot card */ | ||
498 | softing_set_reset_dpram(card); | ||
499 | if (card->pdat->reset) | ||
500 | card->pdat->reset(card->pdev, 1); | ||
501 | for (j = 0; (j + sizeof(stream)) < card->dpram_size; | ||
502 | j += sizeof(stream)) { | ||
503 | |||
504 | memcpy_toio(&card->dpram[j], stream, sizeof(stream)); | ||
505 | /* flush IO cache */ | ||
506 | mb(); | ||
507 | memcpy_fromio(back, &card->dpram[j], sizeof(stream)); | ||
508 | |||
509 | if (!memcmp(back, stream, sizeof(stream))) | ||
510 | continue; | ||
511 | /* memory is not equal */ | ||
512 | dev_alert(&card->pdev->dev, "dpram failed at 0x%04x\n", j); | ||
513 | ret = -EIO; | ||
514 | goto failed; | ||
515 | } | ||
516 | wmb(); | ||
517 | /* load boot firmware */ | ||
518 | ret = softing_load_fw(card->pdat->boot.fw, card, card->dpram, | ||
519 | card->dpram_size, | ||
520 | card->pdat->boot.offs - card->pdat->boot.addr); | ||
521 | if (ret < 0) | ||
522 | goto failed; | ||
523 | /* load loader firmware */ | ||
524 | ret = softing_load_fw(card->pdat->load.fw, card, card->dpram, | ||
525 | card->dpram_size, | ||
526 | card->pdat->load.offs - card->pdat->load.addr); | ||
527 | if (ret < 0) | ||
528 | goto failed; | ||
529 | |||
530 | if (card->pdat->reset) | ||
531 | card->pdat->reset(card->pdev, 0); | ||
532 | softing_clr_reset_dpram(card); | ||
533 | ret = softing_bootloader_command(card, 0, "card boot"); | ||
534 | if (ret < 0) | ||
535 | goto failed; | ||
536 | ret = softing_load_app_fw(card->pdat->app.fw, card); | ||
537 | if (ret < 0) | ||
538 | goto failed; | ||
539 | |||
540 | ret = softing_chip_poweron(card); | ||
541 | if (ret < 0) | ||
542 | goto failed; | ||
543 | |||
544 | card->fw.up = 1; | ||
545 | mutex_unlock(&card->fw.lock); | ||
546 | return 0; | ||
547 | failed: | ||
548 | card->fw.up = 0; | ||
549 | if (card->pdat->enable_irq) | ||
550 | card->pdat->enable_irq(card->pdev, 0); | ||
551 | softing_set_reset_dpram(card); | ||
552 | if (card->pdat->reset) | ||
553 | card->pdat->reset(card->pdev, 1); | ||
554 | mutex_unlock(&card->fw.lock); | ||
555 | return ret; | ||
556 | } | ||
557 | |||
558 | /* | ||
559 | * netdev sysfs | ||
560 | */ | ||
561 | static ssize_t show_channel(struct device *dev, struct device_attribute *attr, | ||
562 | char *buf) | ||
563 | { | ||
564 | struct net_device *ndev = to_net_dev(dev); | ||
565 | struct softing_priv *priv = netdev2softing(ndev); | ||
566 | |||
567 | return sprintf(buf, "%i\n", priv->index); | ||
568 | } | ||
569 | |||
570 | static ssize_t show_chip(struct device *dev, struct device_attribute *attr, | ||
571 | char *buf) | ||
572 | { | ||
573 | struct net_device *ndev = to_net_dev(dev); | ||
574 | struct softing_priv *priv = netdev2softing(ndev); | ||
575 | |||
576 | return sprintf(buf, "%i\n", priv->chip); | ||
577 | } | ||
578 | |||
579 | static ssize_t show_output(struct device *dev, struct device_attribute *attr, | ||
580 | char *buf) | ||
581 | { | ||
582 | struct net_device *ndev = to_net_dev(dev); | ||
583 | struct softing_priv *priv = netdev2softing(ndev); | ||
584 | |||
585 | return sprintf(buf, "0x%02x\n", priv->output); | ||
586 | } | ||
587 | |||
588 | static ssize_t store_output(struct device *dev, struct device_attribute *attr, | ||
589 | const char *buf, size_t count) | ||
590 | { | ||
591 | struct net_device *ndev = to_net_dev(dev); | ||
592 | struct softing_priv *priv = netdev2softing(ndev); | ||
593 | struct softing *card = priv->card; | ||
594 | unsigned long val; | ||
595 | int ret; | ||
596 | |||
597 | ret = strict_strtoul(buf, 0, &val); | ||
598 | if (ret < 0) | ||
599 | return ret; | ||
600 | val &= 0xFF; | ||
601 | |||
602 | ret = mutex_lock_interruptible(&card->fw.lock); | ||
603 | if (ret) | ||
604 | return -ERESTARTSYS; | ||
605 | if (netif_running(ndev)) { | ||
606 | mutex_unlock(&card->fw.lock); | ||
607 | return -EBUSY; | ||
608 | } | ||
609 | priv->output = val; | ||
610 | mutex_unlock(&card->fw.lock); | ||
611 | return count; | ||
612 | } | ||
613 | |||
614 | static const DEVICE_ATTR(channel, S_IRUGO, show_channel, NULL); | ||
615 | static const DEVICE_ATTR(chip, S_IRUGO, show_chip, NULL); | ||
616 | static const DEVICE_ATTR(output, S_IRUGO | S_IWUSR, show_output, store_output); | ||
617 | |||
618 | static const struct attribute *const netdev_sysfs_attrs[] = { | ||
619 | &dev_attr_channel.attr, | ||
620 | &dev_attr_chip.attr, | ||
621 | &dev_attr_output.attr, | ||
622 | NULL, | ||
623 | }; | ||
624 | static const struct attribute_group netdev_sysfs_group = { | ||
625 | .name = NULL, | ||
626 | .attrs = (struct attribute **)netdev_sysfs_attrs, | ||
627 | }; | ||
628 | |||
629 | static const struct net_device_ops softing_netdev_ops = { | ||
630 | .ndo_open = softing_netdev_open, | ||
631 | .ndo_stop = softing_netdev_stop, | ||
632 | .ndo_start_xmit = softing_netdev_start_xmit, | ||
633 | }; | ||
634 | |||
635 | static const struct can_bittiming_const softing_btr_const = { | ||
636 | .name = "softing", | ||
637 | .tseg1_min = 1, | ||
638 | .tseg1_max = 16, | ||
639 | .tseg2_min = 1, | ||
640 | .tseg2_max = 8, | ||
641 | .sjw_max = 4, /* overruled */ | ||
642 | .brp_min = 1, | ||
643 | .brp_max = 32, /* overruled */ | ||
644 | .brp_inc = 1, | ||
645 | }; | ||
646 | |||
647 | |||
648 | static __devinit struct net_device *softing_netdev_create(struct softing *card, | ||
649 | uint16_t chip_id) | ||
650 | { | ||
651 | struct net_device *netdev; | ||
652 | struct softing_priv *priv; | ||
653 | |||
654 | netdev = alloc_candev(sizeof(*priv), TX_ECHO_SKB_MAX); | ||
655 | if (!netdev) { | ||
656 | dev_alert(&card->pdev->dev, "alloc_candev failed\n"); | ||
657 | return NULL; | ||
658 | } | ||
659 | priv = netdev_priv(netdev); | ||
660 | priv->netdev = netdev; | ||
661 | priv->card = card; | ||
662 | memcpy(&priv->btr_const, &softing_btr_const, sizeof(priv->btr_const)); | ||
663 | priv->btr_const.brp_max = card->pdat->max_brp; | ||
664 | priv->btr_const.sjw_max = card->pdat->max_sjw; | ||
665 | priv->can.bittiming_const = &priv->btr_const; | ||
666 | priv->can.clock.freq = 8000000; | ||
667 | priv->chip = chip_id; | ||
668 | priv->output = softing_default_output(netdev); | ||
669 | SET_NETDEV_DEV(netdev, &card->pdev->dev); | ||
670 | |||
671 | netdev->flags |= IFF_ECHO; | ||
672 | netdev->netdev_ops = &softing_netdev_ops; | ||
673 | priv->can.do_set_mode = softing_candev_set_mode; | ||
674 | priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES; | ||
675 | |||
676 | return netdev; | ||
677 | } | ||
678 | |||
679 | static __devinit int softing_netdev_register(struct net_device *netdev) | ||
680 | { | ||
681 | int ret; | ||
682 | |||
683 | netdev->sysfs_groups[0] = &netdev_sysfs_group; | ||
684 | ret = register_candev(netdev); | ||
685 | if (ret) { | ||
686 | dev_alert(&netdev->dev, "register failed\n"); | ||
687 | return ret; | ||
688 | } | ||
689 | return 0; | ||
690 | } | ||
691 | |||
692 | static void softing_netdev_cleanup(struct net_device *netdev) | ||
693 | { | ||
694 | unregister_candev(netdev); | ||
695 | free_candev(netdev); | ||
696 | } | ||
697 | |||
698 | /* | ||
699 | * sysfs for Platform device | ||
700 | */ | ||
701 | #define DEV_ATTR_RO(name, member) \ | ||
702 | static ssize_t show_##name(struct device *dev, \ | ||
703 | struct device_attribute *attr, char *buf) \ | ||
704 | { \ | ||
705 | struct softing *card = platform_get_drvdata(to_platform_device(dev)); \ | ||
706 | return sprintf(buf, "%u\n", card->member); \ | ||
707 | } \ | ||
708 | static DEVICE_ATTR(name, 0444, show_##name, NULL) | ||
709 | |||
710 | #define DEV_ATTR_RO_STR(name, member) \ | ||
711 | static ssize_t show_##name(struct device *dev, \ | ||
712 | struct device_attribute *attr, char *buf) \ | ||
713 | { \ | ||
714 | struct softing *card = platform_get_drvdata(to_platform_device(dev)); \ | ||
715 | return sprintf(buf, "%s\n", card->member); \ | ||
716 | } \ | ||
717 | static DEVICE_ATTR(name, 0444, show_##name, NULL) | ||
718 | |||
719 | DEV_ATTR_RO(serial, id.serial); | ||
720 | DEV_ATTR_RO_STR(firmware, pdat->app.fw); | ||
721 | DEV_ATTR_RO(firmware_version, id.fw_version); | ||
722 | DEV_ATTR_RO_STR(hardware, pdat->name); | ||
723 | DEV_ATTR_RO(hardware_version, id.hw_version); | ||
724 | DEV_ATTR_RO(license, id.license); | ||
725 | DEV_ATTR_RO(frequency, id.freq); | ||
726 | DEV_ATTR_RO(txpending, tx.pending); | ||
727 | |||
728 | static struct attribute *softing_pdev_attrs[] = { | ||
729 | &dev_attr_serial.attr, | ||
730 | &dev_attr_firmware.attr, | ||
731 | &dev_attr_firmware_version.attr, | ||
732 | &dev_attr_hardware.attr, | ||
733 | &dev_attr_hardware_version.attr, | ||
734 | &dev_attr_license.attr, | ||
735 | &dev_attr_frequency.attr, | ||
736 | &dev_attr_txpending.attr, | ||
737 | NULL, | ||
738 | }; | ||
739 | |||
740 | static const struct attribute_group softing_pdev_group = { | ||
741 | .name = NULL, | ||
742 | .attrs = softing_pdev_attrs, | ||
743 | }; | ||
744 | |||
745 | /* | ||
746 | * platform driver | ||
747 | */ | ||
748 | static __devexit int softing_pdev_remove(struct platform_device *pdev) | ||
749 | { | ||
750 | struct softing *card = platform_get_drvdata(pdev); | ||
751 | int j; | ||
752 | |||
753 | /* first, disable card*/ | ||
754 | softing_card_shutdown(card); | ||
755 | |||
756 | for (j = 0; j < ARRAY_SIZE(card->net); ++j) { | ||
757 | if (!card->net[j]) | ||
758 | continue; | ||
759 | softing_netdev_cleanup(card->net[j]); | ||
760 | card->net[j] = NULL; | ||
761 | } | ||
762 | sysfs_remove_group(&pdev->dev.kobj, &softing_pdev_group); | ||
763 | |||
764 | iounmap(card->dpram); | ||
765 | kfree(card); | ||
766 | return 0; | ||
767 | } | ||
768 | |||
769 | static __devinit int softing_pdev_probe(struct platform_device *pdev) | ||
770 | { | ||
771 | const struct softing_platform_data *pdat = pdev->dev.platform_data; | ||
772 | struct softing *card; | ||
773 | struct net_device *netdev; | ||
774 | struct softing_priv *priv; | ||
775 | struct resource *pres; | ||
776 | int ret; | ||
777 | int j; | ||
778 | |||
779 | if (!pdat) { | ||
780 | dev_warn(&pdev->dev, "no platform data\n"); | ||
781 | return -EINVAL; | ||
782 | } | ||
783 | if (pdat->nbus > ARRAY_SIZE(card->net)) { | ||
784 | dev_warn(&pdev->dev, "%u nets??\n", pdat->nbus); | ||
785 | return -EINVAL; | ||
786 | } | ||
787 | |||
788 | card = kzalloc(sizeof(*card), GFP_KERNEL); | ||
789 | if (!card) | ||
790 | return -ENOMEM; | ||
791 | card->pdat = pdat; | ||
792 | card->pdev = pdev; | ||
793 | platform_set_drvdata(pdev, card); | ||
794 | mutex_init(&card->fw.lock); | ||
795 | spin_lock_init(&card->spin); | ||
796 | |||
797 | ret = -EINVAL; | ||
798 | pres = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
799 | if (!pres) | ||
800 | goto platform_resource_failed;; | ||
801 | card->dpram_phys = pres->start; | ||
802 | card->dpram_size = pres->end - pres->start + 1; | ||
803 | card->dpram = ioremap_nocache(card->dpram_phys, card->dpram_size); | ||
804 | if (!card->dpram) { | ||
805 | dev_alert(&card->pdev->dev, "dpram ioremap failed\n"); | ||
806 | goto ioremap_failed; | ||
807 | } | ||
808 | |||
809 | pres = platform_get_resource(pdev, IORESOURCE_IRQ, 0); | ||
810 | if (pres) | ||
811 | card->irq.nr = pres->start; | ||
812 | |||
813 | /* reset card */ | ||
814 | ret = softing_card_boot(card); | ||
815 | if (ret < 0) { | ||
816 | dev_alert(&pdev->dev, "failed to boot\n"); | ||
817 | goto boot_failed; | ||
818 | } | ||
819 | |||
820 | /* only now, the chip's are known */ | ||
821 | card->id.freq = card->pdat->freq; | ||
822 | |||
823 | ret = sysfs_create_group(&pdev->dev.kobj, &softing_pdev_group); | ||
824 | if (ret < 0) { | ||
825 | dev_alert(&card->pdev->dev, "sysfs failed\n"); | ||
826 | goto sysfs_failed; | ||
827 | } | ||
828 | |||
829 | ret = -ENOMEM; | ||
830 | for (j = 0; j < ARRAY_SIZE(card->net); ++j) { | ||
831 | card->net[j] = netdev = | ||
832 | softing_netdev_create(card, card->id.chip[j]); | ||
833 | if (!netdev) { | ||
834 | dev_alert(&pdev->dev, "failed to make can[%i]", j); | ||
835 | goto netdev_failed; | ||
836 | } | ||
837 | priv = netdev_priv(card->net[j]); | ||
838 | priv->index = j; | ||
839 | ret = softing_netdev_register(netdev); | ||
840 | if (ret) { | ||
841 | free_candev(netdev); | ||
842 | card->net[j] = NULL; | ||
843 | dev_alert(&card->pdev->dev, | ||
844 | "failed to register can[%i]\n", j); | ||
845 | goto netdev_failed; | ||
846 | } | ||
847 | } | ||
848 | dev_info(&card->pdev->dev, "%s ready.\n", card->pdat->name); | ||
849 | return 0; | ||
850 | |||
851 | netdev_failed: | ||
852 | for (j = 0; j < ARRAY_SIZE(card->net); ++j) { | ||
853 | if (!card->net[j]) | ||
854 | continue; | ||
855 | softing_netdev_cleanup(card->net[j]); | ||
856 | } | ||
857 | sysfs_remove_group(&pdev->dev.kobj, &softing_pdev_group); | ||
858 | sysfs_failed: | ||
859 | softing_card_shutdown(card); | ||
860 | boot_failed: | ||
861 | iounmap(card->dpram); | ||
862 | ioremap_failed: | ||
863 | platform_resource_failed: | ||
864 | kfree(card); | ||
865 | return ret; | ||
866 | } | ||
867 | |||
868 | static struct platform_driver softing_driver = { | ||
869 | .driver = { | ||
870 | .name = "softing", | ||
871 | .owner = THIS_MODULE, | ||
872 | }, | ||
873 | .probe = softing_pdev_probe, | ||
874 | .remove = __devexit_p(softing_pdev_remove), | ||
875 | }; | ||
876 | |||
877 | MODULE_ALIAS("platform:softing"); | ||
878 | |||
879 | static int __init softing_start(void) | ||
880 | { | ||
881 | return platform_driver_register(&softing_driver); | ||
882 | } | ||
883 | |||
884 | static void __exit softing_stop(void) | ||
885 | { | ||
886 | platform_driver_unregister(&softing_driver); | ||
887 | } | ||
888 | |||
889 | module_init(softing_start); | ||
890 | module_exit(softing_stop); | ||
891 | |||
892 | MODULE_DESCRIPTION("Softing DPRAM CAN driver"); | ||
893 | MODULE_AUTHOR("Kurt Van Dijck <kurt.van.dijck@eia.be>"); | ||
894 | MODULE_LICENSE("GPL v2"); | ||
diff --git a/drivers/net/can/softing/softing_platform.h b/drivers/net/can/softing/softing_platform.h new file mode 100644 index 000000000000..ebbf69815623 --- /dev/null +++ b/drivers/net/can/softing/softing_platform.h | |||
@@ -0,0 +1,40 @@ | |||
1 | |||
2 | #include <linux/platform_device.h> | ||
3 | |||
4 | #ifndef _SOFTING_DEVICE_H_ | ||
5 | #define _SOFTING_DEVICE_H_ | ||
6 | |||
7 | /* softing firmware directory prefix */ | ||
8 | #define fw_dir "softing-4.6/" | ||
9 | |||
10 | struct softing_platform_data { | ||
11 | unsigned int manf; | ||
12 | unsigned int prod; | ||
13 | /* | ||
14 | * generation | ||
15 | * 1st with NEC or SJA1000 | ||
16 | * 8bit, exclusive interrupt, ... | ||
17 | * 2nd only SJA1000 | ||
18 | * 16bit, shared interrupt | ||
19 | */ | ||
20 | int generation; | ||
21 | int nbus; /* # busses on device */ | ||
22 | unsigned int freq; /* operating frequency in Hz */ | ||
23 | unsigned int max_brp; | ||
24 | unsigned int max_sjw; | ||
25 | unsigned long dpram_size; | ||
26 | const char *name; | ||
27 | struct { | ||
28 | unsigned long offs; | ||
29 | unsigned long addr; | ||
30 | const char *fw; | ||
31 | } boot, load, app; | ||
32 | /* | ||
33 | * reset() function | ||
34 | * bring pdev in or out of reset, depending on value | ||
35 | */ | ||
36 | int (*reset)(struct platform_device *pdev, int value); | ||
37 | int (*enable_irq)(struct platform_device *pdev, int value); | ||
38 | }; | ||
39 | |||
40 | #endif | ||
diff --git a/drivers/net/cassini.c b/drivers/net/cassini.c index 7206ab2cbbf8..3437613f0454 100644 --- a/drivers/net/cassini.c +++ b/drivers/net/cassini.c | |||
@@ -3203,7 +3203,7 @@ static int cas_get_vpd_info(struct cas *cp, unsigned char *dev_addr, | |||
3203 | int phy_type = CAS_PHY_MII_MDIO0; /* default phy type */ | 3203 | int phy_type = CAS_PHY_MII_MDIO0; /* default phy type */ |
3204 | int mac_off = 0; | 3204 | int mac_off = 0; |
3205 | 3205 | ||
3206 | #if defined(CONFIG_OF) | 3206 | #if defined(CONFIG_SPARC) |
3207 | const unsigned char *addr; | 3207 | const unsigned char *addr; |
3208 | #endif | 3208 | #endif |
3209 | 3209 | ||
@@ -3354,7 +3354,7 @@ use_random_mac_addr: | |||
3354 | if (found & VPD_FOUND_MAC) | 3354 | if (found & VPD_FOUND_MAC) |
3355 | goto done; | 3355 | goto done; |
3356 | 3356 | ||
3357 | #if defined(CONFIG_OF) | 3357 | #if defined(CONFIG_SPARC) |
3358 | addr = of_get_property(cp->of_node, "local-mac-address", NULL); | 3358 | addr = of_get_property(cp->of_node, "local-mac-address", NULL); |
3359 | if (addr != NULL) { | 3359 | if (addr != NULL) { |
3360 | memcpy(dev_addr, addr, 6); | 3360 | memcpy(dev_addr, addr, 6); |
@@ -5031,7 +5031,7 @@ static int __devinit cas_init_one(struct pci_dev *pdev, | |||
5031 | cp->msg_enable = (cassini_debug < 0) ? CAS_DEF_MSG_ENABLE : | 5031 | cp->msg_enable = (cassini_debug < 0) ? CAS_DEF_MSG_ENABLE : |
5032 | cassini_debug; | 5032 | cassini_debug; |
5033 | 5033 | ||
5034 | #if defined(CONFIG_OF) | 5034 | #if defined(CONFIG_SPARC) |
5035 | cp->of_node = pci_device_to_OF_node(pdev); | 5035 | cp->of_node = pci_device_to_OF_node(pdev); |
5036 | #endif | 5036 | #endif |
5037 | 5037 | ||
diff --git a/drivers/net/chelsio/subr.c b/drivers/net/chelsio/subr.c index 63ebf76d2390..8a43c7e19701 100644 --- a/drivers/net/chelsio/subr.c +++ b/drivers/net/chelsio/subr.c | |||
@@ -556,7 +556,7 @@ struct chelsio_vpd_t { | |||
556 | #define EEPROM_MAX_POLL 4 | 556 | #define EEPROM_MAX_POLL 4 |
557 | 557 | ||
558 | /* | 558 | /* |
559 | * Read SEEPROM. A zero is written to the flag register when the addres is | 559 | * Read SEEPROM. A zero is written to the flag register when the address is |
560 | * written to the Control register. The hardware device will set the flag to a | 560 | * written to the Control register. The hardware device will set the flag to a |
561 | * one when 4B have been transferred to the Data register. | 561 | * one when 4B have been transferred to the Data register. |
562 | */ | 562 | */ |
diff --git a/drivers/net/cnic.c b/drivers/net/cnic.c index 263a2944566f..302be4aa69d6 100644 --- a/drivers/net/cnic.c +++ b/drivers/net/cnic.c | |||
@@ -699,13 +699,13 @@ static void cnic_free_dma(struct cnic_dev *dev, struct cnic_dma *dma) | |||
699 | static void cnic_setup_page_tbl(struct cnic_dev *dev, struct cnic_dma *dma) | 699 | static void cnic_setup_page_tbl(struct cnic_dev *dev, struct cnic_dma *dma) |
700 | { | 700 | { |
701 | int i; | 701 | int i; |
702 | u32 *page_table = dma->pgtbl; | 702 | __le32 *page_table = (__le32 *) dma->pgtbl; |
703 | 703 | ||
704 | for (i = 0; i < dma->num_pages; i++) { | 704 | for (i = 0; i < dma->num_pages; i++) { |
705 | /* Each entry needs to be in big endian format. */ | 705 | /* Each entry needs to be in big endian format. */ |
706 | *page_table = (u32) ((u64) dma->pg_map_arr[i] >> 32); | 706 | *page_table = cpu_to_le32((u64) dma->pg_map_arr[i] >> 32); |
707 | page_table++; | 707 | page_table++; |
708 | *page_table = (u32) dma->pg_map_arr[i]; | 708 | *page_table = cpu_to_le32(dma->pg_map_arr[i] & 0xffffffff); |
709 | page_table++; | 709 | page_table++; |
710 | } | 710 | } |
711 | } | 711 | } |
@@ -713,13 +713,13 @@ static void cnic_setup_page_tbl(struct cnic_dev *dev, struct cnic_dma *dma) | |||
713 | static void cnic_setup_page_tbl_le(struct cnic_dev *dev, struct cnic_dma *dma) | 713 | static void cnic_setup_page_tbl_le(struct cnic_dev *dev, struct cnic_dma *dma) |
714 | { | 714 | { |
715 | int i; | 715 | int i; |
716 | u32 *page_table = dma->pgtbl; | 716 | __le32 *page_table = (__le32 *) dma->pgtbl; |
717 | 717 | ||
718 | for (i = 0; i < dma->num_pages; i++) { | 718 | for (i = 0; i < dma->num_pages; i++) { |
719 | /* Each entry needs to be in little endian format. */ | 719 | /* Each entry needs to be in little endian format. */ |
720 | *page_table = dma->pg_map_arr[i] & 0xffffffff; | 720 | *page_table = cpu_to_le32(dma->pg_map_arr[i] & 0xffffffff); |
721 | page_table++; | 721 | page_table++; |
722 | *page_table = (u32) ((u64) dma->pg_map_arr[i] >> 32); | 722 | *page_table = cpu_to_le32((u64) dma->pg_map_arr[i] >> 32); |
723 | page_table++; | 723 | page_table++; |
724 | } | 724 | } |
725 | } | 725 | } |
@@ -2760,6 +2760,8 @@ static u32 cnic_service_bnx2_queues(struct cnic_dev *dev) | |||
2760 | u32 status_idx = (u16) *cp->kcq1.status_idx_ptr; | 2760 | u32 status_idx = (u16) *cp->kcq1.status_idx_ptr; |
2761 | int kcqe_cnt; | 2761 | int kcqe_cnt; |
2762 | 2762 | ||
2763 | /* status block index must be read before reading other fields */ | ||
2764 | rmb(); | ||
2763 | cp->kwq_con_idx = *cp->kwq_con_idx_ptr; | 2765 | cp->kwq_con_idx = *cp->kwq_con_idx_ptr; |
2764 | 2766 | ||
2765 | while ((kcqe_cnt = cnic_get_kcqes(dev, &cp->kcq1))) { | 2767 | while ((kcqe_cnt = cnic_get_kcqes(dev, &cp->kcq1))) { |
@@ -2770,6 +2772,8 @@ static u32 cnic_service_bnx2_queues(struct cnic_dev *dev) | |||
2770 | barrier(); | 2772 | barrier(); |
2771 | if (status_idx != *cp->kcq1.status_idx_ptr) { | 2773 | if (status_idx != *cp->kcq1.status_idx_ptr) { |
2772 | status_idx = (u16) *cp->kcq1.status_idx_ptr; | 2774 | status_idx = (u16) *cp->kcq1.status_idx_ptr; |
2775 | /* status block index must be read first */ | ||
2776 | rmb(); | ||
2773 | cp->kwq_con_idx = *cp->kwq_con_idx_ptr; | 2777 | cp->kwq_con_idx = *cp->kwq_con_idx_ptr; |
2774 | } else | 2778 | } else |
2775 | break; | 2779 | break; |
@@ -2888,6 +2892,8 @@ static u32 cnic_service_bnx2x_kcq(struct cnic_dev *dev, struct kcq_info *info) | |||
2888 | u32 last_status = *info->status_idx_ptr; | 2892 | u32 last_status = *info->status_idx_ptr; |
2889 | int kcqe_cnt; | 2893 | int kcqe_cnt; |
2890 | 2894 | ||
2895 | /* status block index must be read before reading the KCQ */ | ||
2896 | rmb(); | ||
2891 | while ((kcqe_cnt = cnic_get_kcqes(dev, info))) { | 2897 | while ((kcqe_cnt = cnic_get_kcqes(dev, info))) { |
2892 | 2898 | ||
2893 | service_kcqes(dev, kcqe_cnt); | 2899 | service_kcqes(dev, kcqe_cnt); |
@@ -2898,6 +2904,8 @@ static u32 cnic_service_bnx2x_kcq(struct cnic_dev *dev, struct kcq_info *info) | |||
2898 | break; | 2904 | break; |
2899 | 2905 | ||
2900 | last_status = *info->status_idx_ptr; | 2906 | last_status = *info->status_idx_ptr; |
2907 | /* status block index must be read before reading the KCQ */ | ||
2908 | rmb(); | ||
2901 | } | 2909 | } |
2902 | return last_status; | 2910 | return last_status; |
2903 | } | 2911 | } |
@@ -2906,26 +2914,35 @@ static void cnic_service_bnx2x_bh(unsigned long data) | |||
2906 | { | 2914 | { |
2907 | struct cnic_dev *dev = (struct cnic_dev *) data; | 2915 | struct cnic_dev *dev = (struct cnic_dev *) data; |
2908 | struct cnic_local *cp = dev->cnic_priv; | 2916 | struct cnic_local *cp = dev->cnic_priv; |
2909 | u32 status_idx; | 2917 | u32 status_idx, new_status_idx; |
2910 | 2918 | ||
2911 | if (unlikely(!test_bit(CNIC_F_CNIC_UP, &dev->flags))) | 2919 | if (unlikely(!test_bit(CNIC_F_CNIC_UP, &dev->flags))) |
2912 | return; | 2920 | return; |
2913 | 2921 | ||
2914 | status_idx = cnic_service_bnx2x_kcq(dev, &cp->kcq1); | 2922 | while (1) { |
2923 | status_idx = cnic_service_bnx2x_kcq(dev, &cp->kcq1); | ||
2915 | 2924 | ||
2916 | CNIC_WR16(dev, cp->kcq1.io_addr, cp->kcq1.sw_prod_idx + MAX_KCQ_IDX); | 2925 | CNIC_WR16(dev, cp->kcq1.io_addr, |
2926 | cp->kcq1.sw_prod_idx + MAX_KCQ_IDX); | ||
2917 | 2927 | ||
2918 | if (BNX2X_CHIP_IS_E2(cp->chip_id)) { | 2928 | if (!BNX2X_CHIP_IS_E2(cp->chip_id)) { |
2919 | status_idx = cnic_service_bnx2x_kcq(dev, &cp->kcq2); | 2929 | cnic_ack_bnx2x_int(dev, cp->bnx2x_igu_sb_id, USTORM_ID, |
2930 | status_idx, IGU_INT_ENABLE, 1); | ||
2931 | break; | ||
2932 | } | ||
2933 | |||
2934 | new_status_idx = cnic_service_bnx2x_kcq(dev, &cp->kcq2); | ||
2935 | |||
2936 | if (new_status_idx != status_idx) | ||
2937 | continue; | ||
2920 | 2938 | ||
2921 | CNIC_WR16(dev, cp->kcq2.io_addr, cp->kcq2.sw_prod_idx + | 2939 | CNIC_WR16(dev, cp->kcq2.io_addr, cp->kcq2.sw_prod_idx + |
2922 | MAX_KCQ_IDX); | 2940 | MAX_KCQ_IDX); |
2923 | 2941 | ||
2924 | cnic_ack_igu_sb(dev, cp->bnx2x_igu_sb_id, IGU_SEG_ACCESS_DEF, | 2942 | cnic_ack_igu_sb(dev, cp->bnx2x_igu_sb_id, IGU_SEG_ACCESS_DEF, |
2925 | status_idx, IGU_INT_ENABLE, 1); | 2943 | status_idx, IGU_INT_ENABLE, 1); |
2926 | } else { | 2944 | |
2927 | cnic_ack_bnx2x_int(dev, cp->bnx2x_igu_sb_id, USTORM_ID, | 2945 | break; |
2928 | status_idx, IGU_INT_ENABLE, 1); | ||
2929 | } | 2946 | } |
2930 | } | 2947 | } |
2931 | 2948 | ||
diff --git a/drivers/net/cxgb3/mc5.c b/drivers/net/cxgb3/mc5.c index a8766fb2f9ab..e13b7fe9d082 100644 --- a/drivers/net/cxgb3/mc5.c +++ b/drivers/net/cxgb3/mc5.c | |||
@@ -318,7 +318,7 @@ static void mc5_dbgi_mode_disable(const struct mc5 *mc5) | |||
318 | 318 | ||
319 | /* | 319 | /* |
320 | * Initialization that requires the OS and protocol layers to already | 320 | * Initialization that requires the OS and protocol layers to already |
321 | * be intialized goes here. | 321 | * be initialized goes here. |
322 | */ | 322 | */ |
323 | int t3_mc5_init(struct mc5 *mc5, unsigned int nservers, unsigned int nfilters, | 323 | int t3_mc5_init(struct mc5 *mc5, unsigned int nservers, unsigned int nfilters, |
324 | unsigned int nroutes) | 324 | unsigned int nroutes) |
diff --git a/drivers/net/cxgb3/t3_hw.c b/drivers/net/cxgb3/t3_hw.c index ec8579a0a808..d55db6b38e7b 100644 --- a/drivers/net/cxgb3/t3_hw.c +++ b/drivers/net/cxgb3/t3_hw.c | |||
@@ -607,7 +607,7 @@ struct t3_vpd { | |||
607 | * | 607 | * |
608 | * Read a 32-bit word from a location in VPD EEPROM using the card's PCI | 608 | * Read a 32-bit word from a location in VPD EEPROM using the card's PCI |
609 | * VPD ROM capability. A zero is written to the flag bit when the | 609 | * VPD ROM capability. A zero is written to the flag bit when the |
610 | * addres is written to the control register. The hardware device will | 610 | * address is written to the control register. The hardware device will |
611 | * set the flag to 1 when 4 bytes have been read into the data register. | 611 | * set the flag to 1 when 4 bytes have been read into the data register. |
612 | */ | 612 | */ |
613 | int t3_seeprom_read(struct adapter *adapter, u32 addr, __le32 *data) | 613 | int t3_seeprom_read(struct adapter *adapter, u32 addr, __le32 *data) |
diff --git a/drivers/net/cxgb4/cxgb4_main.c b/drivers/net/cxgb4/cxgb4_main.c index 059c1eec8c3f..ec35d458102c 100644 --- a/drivers/net/cxgb4/cxgb4_main.c +++ b/drivers/net/cxgb4/cxgb4_main.c | |||
@@ -2710,6 +2710,8 @@ static int cxgb_open(struct net_device *dev) | |||
2710 | struct port_info *pi = netdev_priv(dev); | 2710 | struct port_info *pi = netdev_priv(dev); |
2711 | struct adapter *adapter = pi->adapter; | 2711 | struct adapter *adapter = pi->adapter; |
2712 | 2712 | ||
2713 | netif_carrier_off(dev); | ||
2714 | |||
2713 | if (!(adapter->flags & FULL_INIT_DONE)) { | 2715 | if (!(adapter->flags & FULL_INIT_DONE)) { |
2714 | err = cxgb_up(adapter); | 2716 | err = cxgb_up(adapter); |
2715 | if (err < 0) | 2717 | if (err < 0) |
@@ -3661,7 +3663,6 @@ static int __devinit init_one(struct pci_dev *pdev, | |||
3661 | pi->xact_addr_filt = -1; | 3663 | pi->xact_addr_filt = -1; |
3662 | pi->rx_offload = RX_CSO; | 3664 | pi->rx_offload = RX_CSO; |
3663 | pi->port_id = i; | 3665 | pi->port_id = i; |
3664 | netif_carrier_off(netdev); | ||
3665 | netdev->irq = pdev->irq; | 3666 | netdev->irq = pdev->irq; |
3666 | 3667 | ||
3667 | netdev->features |= NETIF_F_SG | TSO_FLAGS; | 3668 | netdev->features |= NETIF_F_SG | TSO_FLAGS; |
diff --git a/drivers/net/cxgb4vf/cxgb4vf_main.c b/drivers/net/cxgb4vf/cxgb4vf_main.c index 3c403f895750..6aad64df4dcb 100644 --- a/drivers/net/cxgb4vf/cxgb4vf_main.c +++ b/drivers/net/cxgb4vf/cxgb4vf_main.c | |||
@@ -749,13 +749,19 @@ static int cxgb4vf_open(struct net_device *dev) | |||
749 | netif_set_real_num_tx_queues(dev, pi->nqsets); | 749 | netif_set_real_num_tx_queues(dev, pi->nqsets); |
750 | err = netif_set_real_num_rx_queues(dev, pi->nqsets); | 750 | err = netif_set_real_num_rx_queues(dev, pi->nqsets); |
751 | if (err) | 751 | if (err) |
752 | return err; | 752 | goto err_unwind; |
753 | set_bit(pi->port_id, &adapter->open_device_map); | ||
754 | err = link_start(dev); | 753 | err = link_start(dev); |
755 | if (err) | 754 | if (err) |
756 | return err; | 755 | goto err_unwind; |
756 | |||
757 | netif_tx_start_all_queues(dev); | 757 | netif_tx_start_all_queues(dev); |
758 | set_bit(pi->port_id, &adapter->open_device_map); | ||
758 | return 0; | 759 | return 0; |
760 | |||
761 | err_unwind: | ||
762 | if (adapter->open_device_map == 0) | ||
763 | adapter_down(adapter); | ||
764 | return err; | ||
759 | } | 765 | } |
760 | 766 | ||
761 | /* | 767 | /* |
@@ -764,13 +770,12 @@ static int cxgb4vf_open(struct net_device *dev) | |||
764 | */ | 770 | */ |
765 | static int cxgb4vf_stop(struct net_device *dev) | 771 | static int cxgb4vf_stop(struct net_device *dev) |
766 | { | 772 | { |
767 | int ret; | ||
768 | struct port_info *pi = netdev_priv(dev); | 773 | struct port_info *pi = netdev_priv(dev); |
769 | struct adapter *adapter = pi->adapter; | 774 | struct adapter *adapter = pi->adapter; |
770 | 775 | ||
771 | netif_tx_stop_all_queues(dev); | 776 | netif_tx_stop_all_queues(dev); |
772 | netif_carrier_off(dev); | 777 | netif_carrier_off(dev); |
773 | ret = t4vf_enable_vi(adapter, pi->viid, false, false); | 778 | t4vf_enable_vi(adapter, pi->viid, false, false); |
774 | pi->link_cfg.link_ok = 0; | 779 | pi->link_cfg.link_ok = 0; |
775 | 780 | ||
776 | clear_bit(pi->port_id, &adapter->open_device_map); | 781 | clear_bit(pi->port_id, &adapter->open_device_map); |
@@ -2035,7 +2040,7 @@ static int __devinit setup_debugfs(struct adapter *adapter) | |||
2035 | { | 2040 | { |
2036 | int i; | 2041 | int i; |
2037 | 2042 | ||
2038 | BUG_ON(adapter->debugfs_root == NULL); | 2043 | BUG_ON(IS_ERR_OR_NULL(adapter->debugfs_root)); |
2039 | 2044 | ||
2040 | /* | 2045 | /* |
2041 | * Debugfs support is best effort. | 2046 | * Debugfs support is best effort. |
@@ -2056,7 +2061,7 @@ static int __devinit setup_debugfs(struct adapter *adapter) | |||
2056 | */ | 2061 | */ |
2057 | static void cleanup_debugfs(struct adapter *adapter) | 2062 | static void cleanup_debugfs(struct adapter *adapter) |
2058 | { | 2063 | { |
2059 | BUG_ON(adapter->debugfs_root == NULL); | 2064 | BUG_ON(IS_ERR_OR_NULL(adapter->debugfs_root)); |
2060 | 2065 | ||
2061 | /* | 2066 | /* |
2062 | * Unlike our sister routine cleanup_proc(), we don't need to remove | 2067 | * Unlike our sister routine cleanup_proc(), we don't need to remove |
@@ -2484,17 +2489,6 @@ static int __devinit cxgb4vf_pci_probe(struct pci_dev *pdev, | |||
2484 | struct net_device *netdev; | 2489 | struct net_device *netdev; |
2485 | 2490 | ||
2486 | /* | 2491 | /* |
2487 | * Vet our module parameters. | ||
2488 | */ | ||
2489 | if (msi != MSI_MSIX && msi != MSI_MSI) { | ||
2490 | dev_err(&pdev->dev, "bad module parameter msi=%d; must be %d" | ||
2491 | " (MSI-X or MSI) or %d (MSI)\n", msi, MSI_MSIX, | ||
2492 | MSI_MSI); | ||
2493 | err = -EINVAL; | ||
2494 | goto err_out; | ||
2495 | } | ||
2496 | |||
2497 | /* | ||
2498 | * Print our driver banner the first time we're called to initialize a | 2492 | * Print our driver banner the first time we're called to initialize a |
2499 | * device. | 2493 | * device. |
2500 | */ | 2494 | */ |
@@ -2706,11 +2700,11 @@ static int __devinit cxgb4vf_pci_probe(struct pci_dev *pdev, | |||
2706 | /* | 2700 | /* |
2707 | * Set up our debugfs entries. | 2701 | * Set up our debugfs entries. |
2708 | */ | 2702 | */ |
2709 | if (cxgb4vf_debugfs_root) { | 2703 | if (!IS_ERR_OR_NULL(cxgb4vf_debugfs_root)) { |
2710 | adapter->debugfs_root = | 2704 | adapter->debugfs_root = |
2711 | debugfs_create_dir(pci_name(pdev), | 2705 | debugfs_create_dir(pci_name(pdev), |
2712 | cxgb4vf_debugfs_root); | 2706 | cxgb4vf_debugfs_root); |
2713 | if (adapter->debugfs_root == NULL) | 2707 | if (IS_ERR_OR_NULL(adapter->debugfs_root)) |
2714 | dev_warn(&pdev->dev, "could not create debugfs" | 2708 | dev_warn(&pdev->dev, "could not create debugfs" |
2715 | " directory"); | 2709 | " directory"); |
2716 | else | 2710 | else |
@@ -2765,7 +2759,7 @@ static int __devinit cxgb4vf_pci_probe(struct pci_dev *pdev, | |||
2765 | */ | 2759 | */ |
2766 | 2760 | ||
2767 | err_free_debugfs: | 2761 | err_free_debugfs: |
2768 | if (adapter->debugfs_root) { | 2762 | if (!IS_ERR_OR_NULL(adapter->debugfs_root)) { |
2769 | cleanup_debugfs(adapter); | 2763 | cleanup_debugfs(adapter); |
2770 | debugfs_remove_recursive(adapter->debugfs_root); | 2764 | debugfs_remove_recursive(adapter->debugfs_root); |
2771 | } | 2765 | } |
@@ -2797,7 +2791,6 @@ err_release_regions: | |||
2797 | err_disable_device: | 2791 | err_disable_device: |
2798 | pci_disable_device(pdev); | 2792 | pci_disable_device(pdev); |
2799 | 2793 | ||
2800 | err_out: | ||
2801 | return err; | 2794 | return err; |
2802 | } | 2795 | } |
2803 | 2796 | ||
@@ -2835,7 +2828,7 @@ static void __devexit cxgb4vf_pci_remove(struct pci_dev *pdev) | |||
2835 | /* | 2828 | /* |
2836 | * Tear down our debugfs entries. | 2829 | * Tear down our debugfs entries. |
2837 | */ | 2830 | */ |
2838 | if (adapter->debugfs_root) { | 2831 | if (!IS_ERR_OR_NULL(adapter->debugfs_root)) { |
2839 | cleanup_debugfs(adapter); | 2832 | cleanup_debugfs(adapter); |
2840 | debugfs_remove_recursive(adapter->debugfs_root); | 2833 | debugfs_remove_recursive(adapter->debugfs_root); |
2841 | } | 2834 | } |
@@ -2869,6 +2862,46 @@ static void __devexit cxgb4vf_pci_remove(struct pci_dev *pdev) | |||
2869 | } | 2862 | } |
2870 | 2863 | ||
2871 | /* | 2864 | /* |
2865 | * "Shutdown" quiesce the device, stopping Ingress Packet and Interrupt | ||
2866 | * delivery. | ||
2867 | */ | ||
2868 | static void __devexit cxgb4vf_pci_shutdown(struct pci_dev *pdev) | ||
2869 | { | ||
2870 | struct adapter *adapter; | ||
2871 | int pidx; | ||
2872 | |||
2873 | adapter = pci_get_drvdata(pdev); | ||
2874 | if (!adapter) | ||
2875 | return; | ||
2876 | |||
2877 | /* | ||
2878 | * Disable all Virtual Interfaces. This will shut down the | ||
2879 | * delivery of all ingress packets into the chip for these | ||
2880 | * Virtual Interfaces. | ||
2881 | */ | ||
2882 | for_each_port(adapter, pidx) { | ||
2883 | struct net_device *netdev; | ||
2884 | struct port_info *pi; | ||
2885 | |||
2886 | if (!test_bit(pidx, &adapter->registered_device_map)) | ||
2887 | continue; | ||
2888 | |||
2889 | netdev = adapter->port[pidx]; | ||
2890 | if (!netdev) | ||
2891 | continue; | ||
2892 | |||
2893 | pi = netdev_priv(netdev); | ||
2894 | t4vf_enable_vi(adapter, pi->viid, false, false); | ||
2895 | } | ||
2896 | |||
2897 | /* | ||
2898 | * Free up all Queues which will prevent further DMA and | ||
2899 | * Interrupts allowing various internal pathways to drain. | ||
2900 | */ | ||
2901 | t4vf_free_sge_resources(adapter); | ||
2902 | } | ||
2903 | |||
2904 | /* | ||
2872 | * PCI Device registration data structures. | 2905 | * PCI Device registration data structures. |
2873 | */ | 2906 | */ |
2874 | #define CH_DEVICE(devid, idx) \ | 2907 | #define CH_DEVICE(devid, idx) \ |
@@ -2901,6 +2934,7 @@ static struct pci_driver cxgb4vf_driver = { | |||
2901 | .id_table = cxgb4vf_pci_tbl, | 2934 | .id_table = cxgb4vf_pci_tbl, |
2902 | .probe = cxgb4vf_pci_probe, | 2935 | .probe = cxgb4vf_pci_probe, |
2903 | .remove = __devexit_p(cxgb4vf_pci_remove), | 2936 | .remove = __devexit_p(cxgb4vf_pci_remove), |
2937 | .shutdown = __devexit_p(cxgb4vf_pci_shutdown), | ||
2904 | }; | 2938 | }; |
2905 | 2939 | ||
2906 | /* | 2940 | /* |
@@ -2910,14 +2944,25 @@ static int __init cxgb4vf_module_init(void) | |||
2910 | { | 2944 | { |
2911 | int ret; | 2945 | int ret; |
2912 | 2946 | ||
2947 | /* | ||
2948 | * Vet our module parameters. | ||
2949 | */ | ||
2950 | if (msi != MSI_MSIX && msi != MSI_MSI) { | ||
2951 | printk(KERN_WARNING KBUILD_MODNAME | ||
2952 | ": bad module parameter msi=%d; must be %d" | ||
2953 | " (MSI-X or MSI) or %d (MSI)\n", | ||
2954 | msi, MSI_MSIX, MSI_MSI); | ||
2955 | return -EINVAL; | ||
2956 | } | ||
2957 | |||
2913 | /* Debugfs support is optional, just warn if this fails */ | 2958 | /* Debugfs support is optional, just warn if this fails */ |
2914 | cxgb4vf_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL); | 2959 | cxgb4vf_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL); |
2915 | if (!cxgb4vf_debugfs_root) | 2960 | if (IS_ERR_OR_NULL(cxgb4vf_debugfs_root)) |
2916 | printk(KERN_WARNING KBUILD_MODNAME ": could not create" | 2961 | printk(KERN_WARNING KBUILD_MODNAME ": could not create" |
2917 | " debugfs entry, continuing\n"); | 2962 | " debugfs entry, continuing\n"); |
2918 | 2963 | ||
2919 | ret = pci_register_driver(&cxgb4vf_driver); | 2964 | ret = pci_register_driver(&cxgb4vf_driver); |
2920 | if (ret < 0) | 2965 | if (ret < 0 && !IS_ERR_OR_NULL(cxgb4vf_debugfs_root)) |
2921 | debugfs_remove(cxgb4vf_debugfs_root); | 2966 | debugfs_remove(cxgb4vf_debugfs_root); |
2922 | return ret; | 2967 | return ret; |
2923 | } | 2968 | } |
diff --git a/drivers/net/cxgb4vf/t4vf_hw.c b/drivers/net/cxgb4vf/t4vf_hw.c index e4bec78c8e3f..192db226ec7f 100644 --- a/drivers/net/cxgb4vf/t4vf_hw.c +++ b/drivers/net/cxgb4vf/t4vf_hw.c | |||
@@ -147,9 +147,20 @@ int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size, | |||
147 | /* | 147 | /* |
148 | * Write the command array into the Mailbox Data register array and | 148 | * Write the command array into the Mailbox Data register array and |
149 | * transfer ownership of the mailbox to the firmware. | 149 | * transfer ownership of the mailbox to the firmware. |
150 | * | ||
151 | * For the VFs, the Mailbox Data "registers" are actually backed by | ||
152 | * T4's "MA" interface rather than PL Registers (as is the case for | ||
153 | * the PFs). Because these are in different coherency domains, the | ||
154 | * write to the VF's PL-register-backed Mailbox Control can race in | ||
155 | * front of the writes to the MA-backed VF Mailbox Data "registers". | ||
156 | * So we need to do a read-back on at least one byte of the VF Mailbox | ||
157 | * Data registers before doing the write to the VF Mailbox Control | ||
158 | * register. | ||
150 | */ | 159 | */ |
151 | for (i = 0, p = cmd; i < size; i += 8) | 160 | for (i = 0, p = cmd; i < size; i += 8) |
152 | t4_write_reg64(adapter, mbox_data + i, be64_to_cpu(*p++)); | 161 | t4_write_reg64(adapter, mbox_data + i, be64_to_cpu(*p++)); |
162 | t4_read_reg(adapter, mbox_data); /* flush write */ | ||
163 | |||
153 | t4_write_reg(adapter, mbox_ctl, | 164 | t4_write_reg(adapter, mbox_ctl, |
154 | MBMSGVALID | MBOWNER(MBOX_OWNER_FW)); | 165 | MBMSGVALID | MBOWNER(MBOX_OWNER_FW)); |
155 | t4_read_reg(adapter, mbox_ctl); /* flush write */ | 166 | t4_read_reg(adapter, mbox_ctl); /* flush write */ |
@@ -160,7 +171,7 @@ int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size, | |||
160 | delay_idx = 0; | 171 | delay_idx = 0; |
161 | ms = delay[0]; | 172 | ms = delay[0]; |
162 | 173 | ||
163 | for (i = 0; i < 500; i += ms) { | 174 | for (i = 0; i < FW_CMD_MAX_TIMEOUT; i += ms) { |
164 | if (sleep_ok) { | 175 | if (sleep_ok) { |
165 | ms = delay[delay_idx]; | 176 | ms = delay[delay_idx]; |
166 | if (delay_idx < ARRAY_SIZE(delay) - 1) | 177 | if (delay_idx < ARRAY_SIZE(delay) - 1) |
diff --git a/drivers/net/davinci_emac.c b/drivers/net/davinci_emac.c index 2a628d17d178..7018bfe408a4 100644 --- a/drivers/net/davinci_emac.c +++ b/drivers/net/davinci_emac.c | |||
@@ -1008,7 +1008,7 @@ static void emac_rx_handler(void *token, int len, int status) | |||
1008 | int ret; | 1008 | int ret; |
1009 | 1009 | ||
1010 | /* free and bail if we are shutting down */ | 1010 | /* free and bail if we are shutting down */ |
1011 | if (unlikely(!netif_running(ndev))) { | 1011 | if (unlikely(!netif_running(ndev) || !netif_carrier_ok(ndev))) { |
1012 | dev_kfree_skb_any(skb); | 1012 | dev_kfree_skb_any(skb); |
1013 | return; | 1013 | return; |
1014 | } | 1014 | } |
diff --git a/drivers/net/depca.c b/drivers/net/depca.c index 1b48b68ad4fd..8b0084d17c8c 100644 --- a/drivers/net/depca.c +++ b/drivers/net/depca.c | |||
@@ -1094,7 +1094,7 @@ static int depca_rx(struct net_device *dev) | |||
1094 | } | 1094 | } |
1095 | } | 1095 | } |
1096 | /* Change buffer ownership for this last frame, back to the adapter */ | 1096 | /* Change buffer ownership for this last frame, back to the adapter */ |
1097 | for (; lp->rx_old != entry; lp->rx_old = (++lp->rx_old) & lp->rxRingMask) { | 1097 | for (; lp->rx_old != entry; lp->rx_old = (lp->rx_old + 1) & lp->rxRingMask) { |
1098 | writel(readl(&lp->rx_ring[lp->rx_old].base) | R_OWN, &lp->rx_ring[lp->rx_old].base); | 1098 | writel(readl(&lp->rx_ring[lp->rx_old].base) | R_OWN, &lp->rx_ring[lp->rx_old].base); |
1099 | } | 1099 | } |
1100 | writel(readl(&lp->rx_ring[entry].base) | R_OWN, &lp->rx_ring[entry].base); | 1100 | writel(readl(&lp->rx_ring[entry].base) | R_OWN, &lp->rx_ring[entry].base); |
@@ -1103,7 +1103,7 @@ static int depca_rx(struct net_device *dev) | |||
1103 | /* | 1103 | /* |
1104 | ** Update entry information | 1104 | ** Update entry information |
1105 | */ | 1105 | */ |
1106 | lp->rx_new = (++lp->rx_new) & lp->rxRingMask; | 1106 | lp->rx_new = (lp->rx_new + 1) & lp->rxRingMask; |
1107 | } | 1107 | } |
1108 | 1108 | ||
1109 | return 0; | 1109 | return 0; |
@@ -1148,7 +1148,7 @@ static int depca_tx(struct net_device *dev) | |||
1148 | } | 1148 | } |
1149 | 1149 | ||
1150 | /* Update all the pointers */ | 1150 | /* Update all the pointers */ |
1151 | lp->tx_old = (++lp->tx_old) & lp->txRingMask; | 1151 | lp->tx_old = (lp->tx_old + 1) & lp->txRingMask; |
1152 | } | 1152 | } |
1153 | 1153 | ||
1154 | return 0; | 1154 | return 0; |
diff --git a/drivers/net/dl2k.c b/drivers/net/dl2k.c index e1a8216ff692..c05db6046050 100644 --- a/drivers/net/dl2k.c +++ b/drivers/net/dl2k.c | |||
@@ -1753,8 +1753,6 @@ rio_close (struct net_device *dev) | |||
1753 | 1753 | ||
1754 | /* Free all the skbuffs in the queue. */ | 1754 | /* Free all the skbuffs in the queue. */ |
1755 | for (i = 0; i < RX_RING_SIZE; i++) { | 1755 | for (i = 0; i < RX_RING_SIZE; i++) { |
1756 | np->rx_ring[i].status = 0; | ||
1757 | np->rx_ring[i].fraginfo = 0; | ||
1758 | skb = np->rx_skbuff[i]; | 1756 | skb = np->rx_skbuff[i]; |
1759 | if (skb) { | 1757 | if (skb) { |
1760 | pci_unmap_single(np->pdev, | 1758 | pci_unmap_single(np->pdev, |
@@ -1763,6 +1761,8 @@ rio_close (struct net_device *dev) | |||
1763 | dev_kfree_skb (skb); | 1761 | dev_kfree_skb (skb); |
1764 | np->rx_skbuff[i] = NULL; | 1762 | np->rx_skbuff[i] = NULL; |
1765 | } | 1763 | } |
1764 | np->rx_ring[i].status = 0; | ||
1765 | np->rx_ring[i].fraginfo = 0; | ||
1766 | } | 1766 | } |
1767 | for (i = 0; i < TX_RING_SIZE; i++) { | 1767 | for (i = 0; i < TX_RING_SIZE; i++) { |
1768 | skb = np->tx_skbuff[i]; | 1768 | skb = np->tx_skbuff[i]; |
diff --git a/drivers/net/dm9000.c b/drivers/net/dm9000.c index 2d4c4fc1d900..461dd6f905f7 100644 --- a/drivers/net/dm9000.c +++ b/drivers/net/dm9000.c | |||
@@ -802,10 +802,7 @@ dm9000_init_dm9000(struct net_device *dev) | |||
802 | /* Checksum mode */ | 802 | /* Checksum mode */ |
803 | dm9000_set_rx_csum_unlocked(dev, db->rx_csum); | 803 | dm9000_set_rx_csum_unlocked(dev, db->rx_csum); |
804 | 804 | ||
805 | /* GPIO0 on pre-activate PHY */ | ||
806 | iow(db, DM9000_GPR, 0); /* REG_1F bit0 activate phyxcer */ | ||
807 | iow(db, DM9000_GPCR, GPCR_GEP_CNTL); /* Let GPIO0 output */ | 805 | iow(db, DM9000_GPCR, GPCR_GEP_CNTL); /* Let GPIO0 output */ |
808 | iow(db, DM9000_GPR, 0); /* Enable PHY */ | ||
809 | 806 | ||
810 | ncr = (db->flags & DM9000_PLATF_EXT_PHY) ? NCR_EXT_PHY : 0; | 807 | ncr = (db->flags & DM9000_PLATF_EXT_PHY) ? NCR_EXT_PHY : 0; |
811 | 808 | ||
@@ -852,8 +849,8 @@ static void dm9000_timeout(struct net_device *dev) | |||
852 | unsigned long flags; | 849 | unsigned long flags; |
853 | 850 | ||
854 | /* Save previous register address */ | 851 | /* Save previous register address */ |
855 | reg_save = readb(db->io_addr); | ||
856 | spin_lock_irqsave(&db->lock, flags); | 852 | spin_lock_irqsave(&db->lock, flags); |
853 | reg_save = readb(db->io_addr); | ||
857 | 854 | ||
858 | netif_stop_queue(dev); | 855 | netif_stop_queue(dev); |
859 | dm9000_reset(db); | 856 | dm9000_reset(db); |
@@ -1194,6 +1191,10 @@ dm9000_open(struct net_device *dev) | |||
1194 | if (request_irq(dev->irq, dm9000_interrupt, irqflags, dev->name, dev)) | 1191 | if (request_irq(dev->irq, dm9000_interrupt, irqflags, dev->name, dev)) |
1195 | return -EAGAIN; | 1192 | return -EAGAIN; |
1196 | 1193 | ||
1194 | /* GPIO0 on pre-activate PHY, Reg 1F is not set by reset */ | ||
1195 | iow(db, DM9000_GPR, 0); /* REG_1F bit0 activate phyxcer */ | ||
1196 | mdelay(1); /* delay needs by DM9000B */ | ||
1197 | |||
1197 | /* Initialize DM9000 board */ | 1198 | /* Initialize DM9000 board */ |
1198 | dm9000_reset(db); | 1199 | dm9000_reset(db); |
1199 | dm9000_init_dm9000(dev); | 1200 | dm9000_init_dm9000(dev); |
diff --git a/drivers/net/dnet.c b/drivers/net/dnet.c index 9d8a20b72fa9..8318ea06cb6d 100644 --- a/drivers/net/dnet.c +++ b/drivers/net/dnet.c | |||
@@ -337,8 +337,6 @@ static int dnet_mii_init(struct dnet *bp) | |||
337 | for (i = 0; i < PHY_MAX_ADDR; i++) | 337 | for (i = 0; i < PHY_MAX_ADDR; i++) |
338 | bp->mii_bus->irq[i] = PHY_POLL; | 338 | bp->mii_bus->irq[i] = PHY_POLL; |
339 | 339 | ||
340 | platform_set_drvdata(bp->dev, bp->mii_bus); | ||
341 | |||
342 | if (mdiobus_register(bp->mii_bus)) { | 340 | if (mdiobus_register(bp->mii_bus)) { |
343 | err = -ENXIO; | 341 | err = -ENXIO; |
344 | goto err_out_free_mdio_irq; | 342 | goto err_out_free_mdio_irq; |
@@ -863,6 +861,7 @@ static int __devinit dnet_probe(struct platform_device *pdev) | |||
863 | bp = netdev_priv(dev); | 861 | bp = netdev_priv(dev); |
864 | bp->dev = dev; | 862 | bp->dev = dev; |
865 | 863 | ||
864 | platform_set_drvdata(pdev, dev); | ||
866 | SET_NETDEV_DEV(dev, &pdev->dev); | 865 | SET_NETDEV_DEV(dev, &pdev->dev); |
867 | 866 | ||
868 | spin_lock_init(&bp->lock); | 867 | spin_lock_init(&bp->lock); |
diff --git a/drivers/net/e1000/e1000_hw.c b/drivers/net/e1000/e1000_hw.c index 77d08e697b74..7501d977d992 100644 --- a/drivers/net/e1000/e1000_hw.c +++ b/drivers/net/e1000/e1000_hw.c | |||
@@ -124,16 +124,22 @@ static s32 e1000_set_phy_type(struct e1000_hw *hw) | |||
124 | case M88E1000_I_PHY_ID: | 124 | case M88E1000_I_PHY_ID: |
125 | case M88E1011_I_PHY_ID: | 125 | case M88E1011_I_PHY_ID: |
126 | case M88E1111_I_PHY_ID: | 126 | case M88E1111_I_PHY_ID: |
127 | case M88E1118_E_PHY_ID: | ||
127 | hw->phy_type = e1000_phy_m88; | 128 | hw->phy_type = e1000_phy_m88; |
128 | break; | 129 | break; |
129 | case IGP01E1000_I_PHY_ID: | 130 | case IGP01E1000_I_PHY_ID: |
130 | if (hw->mac_type == e1000_82541 || | 131 | if (hw->mac_type == e1000_82541 || |
131 | hw->mac_type == e1000_82541_rev_2 || | 132 | hw->mac_type == e1000_82541_rev_2 || |
132 | hw->mac_type == e1000_82547 || | 133 | hw->mac_type == e1000_82547 || |
133 | hw->mac_type == e1000_82547_rev_2) { | 134 | hw->mac_type == e1000_82547_rev_2) |
134 | hw->phy_type = e1000_phy_igp; | 135 | hw->phy_type = e1000_phy_igp; |
135 | break; | 136 | break; |
136 | } | 137 | case RTL8211B_PHY_ID: |
138 | hw->phy_type = e1000_phy_8211; | ||
139 | break; | ||
140 | case RTL8201N_PHY_ID: | ||
141 | hw->phy_type = e1000_phy_8201; | ||
142 | break; | ||
137 | default: | 143 | default: |
138 | /* Should never have loaded on this device */ | 144 | /* Should never have loaded on this device */ |
139 | hw->phy_type = e1000_phy_undefined; | 145 | hw->phy_type = e1000_phy_undefined; |
@@ -318,6 +324,9 @@ s32 e1000_set_mac_type(struct e1000_hw *hw) | |||
318 | case E1000_DEV_ID_82547GI: | 324 | case E1000_DEV_ID_82547GI: |
319 | hw->mac_type = e1000_82547_rev_2; | 325 | hw->mac_type = e1000_82547_rev_2; |
320 | break; | 326 | break; |
327 | case E1000_DEV_ID_INTEL_CE4100_GBE: | ||
328 | hw->mac_type = e1000_ce4100; | ||
329 | break; | ||
321 | default: | 330 | default: |
322 | /* Should never have loaded on this device */ | 331 | /* Should never have loaded on this device */ |
323 | return -E1000_ERR_MAC_TYPE; | 332 | return -E1000_ERR_MAC_TYPE; |
@@ -372,6 +381,9 @@ void e1000_set_media_type(struct e1000_hw *hw) | |||
372 | case e1000_82542_rev2_1: | 381 | case e1000_82542_rev2_1: |
373 | hw->media_type = e1000_media_type_fiber; | 382 | hw->media_type = e1000_media_type_fiber; |
374 | break; | 383 | break; |
384 | case e1000_ce4100: | ||
385 | hw->media_type = e1000_media_type_copper; | ||
386 | break; | ||
375 | default: | 387 | default: |
376 | status = er32(STATUS); | 388 | status = er32(STATUS); |
377 | if (status & E1000_STATUS_TBIMODE) { | 389 | if (status & E1000_STATUS_TBIMODE) { |
@@ -460,6 +472,7 @@ s32 e1000_reset_hw(struct e1000_hw *hw) | |||
460 | /* Reset is performed on a shadow of the control register */ | 472 | /* Reset is performed on a shadow of the control register */ |
461 | ew32(CTRL_DUP, (ctrl | E1000_CTRL_RST)); | 473 | ew32(CTRL_DUP, (ctrl | E1000_CTRL_RST)); |
462 | break; | 474 | break; |
475 | case e1000_ce4100: | ||
463 | default: | 476 | default: |
464 | ew32(CTRL, (ctrl | E1000_CTRL_RST)); | 477 | ew32(CTRL, (ctrl | E1000_CTRL_RST)); |
465 | break; | 478 | break; |
@@ -952,6 +965,67 @@ static s32 e1000_setup_fiber_serdes_link(struct e1000_hw *hw) | |||
952 | } | 965 | } |
953 | 966 | ||
954 | /** | 967 | /** |
968 | * e1000_copper_link_rtl_setup - Copper link setup for e1000_phy_rtl series. | ||
969 | * @hw: Struct containing variables accessed by shared code | ||
970 | * | ||
971 | * Commits changes to PHY configuration by calling e1000_phy_reset(). | ||
972 | */ | ||
973 | static s32 e1000_copper_link_rtl_setup(struct e1000_hw *hw) | ||
974 | { | ||
975 | s32 ret_val; | ||
976 | |||
977 | /* SW reset the PHY so all changes take effect */ | ||
978 | ret_val = e1000_phy_reset(hw); | ||
979 | if (ret_val) { | ||
980 | e_dbg("Error Resetting the PHY\n"); | ||
981 | return ret_val; | ||
982 | } | ||
983 | |||
984 | return E1000_SUCCESS; | ||
985 | } | ||
986 | |||
987 | static s32 gbe_dhg_phy_setup(struct e1000_hw *hw) | ||
988 | { | ||
989 | s32 ret_val; | ||
990 | u32 ctrl_aux; | ||
991 | |||
992 | switch (hw->phy_type) { | ||
993 | case e1000_phy_8211: | ||
994 | ret_val = e1000_copper_link_rtl_setup(hw); | ||
995 | if (ret_val) { | ||
996 | e_dbg("e1000_copper_link_rtl_setup failed!\n"); | ||
997 | return ret_val; | ||
998 | } | ||
999 | break; | ||
1000 | case e1000_phy_8201: | ||
1001 | /* Set RMII mode */ | ||
1002 | ctrl_aux = er32(CTL_AUX); | ||
1003 | ctrl_aux |= E1000_CTL_AUX_RMII; | ||
1004 | ew32(CTL_AUX, ctrl_aux); | ||
1005 | E1000_WRITE_FLUSH(); | ||
1006 | |||
1007 | /* Disable the J/K bits required for receive */ | ||
1008 | ctrl_aux = er32(CTL_AUX); | ||
1009 | ctrl_aux |= 0x4; | ||
1010 | ctrl_aux &= ~0x2; | ||
1011 | ew32(CTL_AUX, ctrl_aux); | ||
1012 | E1000_WRITE_FLUSH(); | ||
1013 | ret_val = e1000_copper_link_rtl_setup(hw); | ||
1014 | |||
1015 | if (ret_val) { | ||
1016 | e_dbg("e1000_copper_link_rtl_setup failed!\n"); | ||
1017 | return ret_val; | ||
1018 | } | ||
1019 | break; | ||
1020 | default: | ||
1021 | e_dbg("Error Resetting the PHY\n"); | ||
1022 | return E1000_ERR_PHY_TYPE; | ||
1023 | } | ||
1024 | |||
1025 | return E1000_SUCCESS; | ||
1026 | } | ||
1027 | |||
1028 | /** | ||
955 | * e1000_copper_link_preconfig - early configuration for copper | 1029 | * e1000_copper_link_preconfig - early configuration for copper |
956 | * @hw: Struct containing variables accessed by shared code | 1030 | * @hw: Struct containing variables accessed by shared code |
957 | * | 1031 | * |
@@ -1286,6 +1360,10 @@ static s32 e1000_copper_link_autoneg(struct e1000_hw *hw) | |||
1286 | if (hw->autoneg_advertised == 0) | 1360 | if (hw->autoneg_advertised == 0) |
1287 | hw->autoneg_advertised = AUTONEG_ADVERTISE_SPEED_DEFAULT; | 1361 | hw->autoneg_advertised = AUTONEG_ADVERTISE_SPEED_DEFAULT; |
1288 | 1362 | ||
1363 | /* IFE/RTL8201N PHY only supports 10/100 */ | ||
1364 | if (hw->phy_type == e1000_phy_8201) | ||
1365 | hw->autoneg_advertised &= AUTONEG_ADVERTISE_10_100_ALL; | ||
1366 | |||
1289 | e_dbg("Reconfiguring auto-neg advertisement params\n"); | 1367 | e_dbg("Reconfiguring auto-neg advertisement params\n"); |
1290 | ret_val = e1000_phy_setup_autoneg(hw); | 1368 | ret_val = e1000_phy_setup_autoneg(hw); |
1291 | if (ret_val) { | 1369 | if (ret_val) { |
@@ -1341,7 +1419,7 @@ static s32 e1000_copper_link_postconfig(struct e1000_hw *hw) | |||
1341 | s32 ret_val; | 1419 | s32 ret_val; |
1342 | e_dbg("e1000_copper_link_postconfig"); | 1420 | e_dbg("e1000_copper_link_postconfig"); |
1343 | 1421 | ||
1344 | if (hw->mac_type >= e1000_82544) { | 1422 | if ((hw->mac_type >= e1000_82544) && (hw->mac_type != e1000_ce4100)) { |
1345 | e1000_config_collision_dist(hw); | 1423 | e1000_config_collision_dist(hw); |
1346 | } else { | 1424 | } else { |
1347 | ret_val = e1000_config_mac_to_phy(hw); | 1425 | ret_val = e1000_config_mac_to_phy(hw); |
@@ -1395,6 +1473,12 @@ static s32 e1000_setup_copper_link(struct e1000_hw *hw) | |||
1395 | ret_val = e1000_copper_link_mgp_setup(hw); | 1473 | ret_val = e1000_copper_link_mgp_setup(hw); |
1396 | if (ret_val) | 1474 | if (ret_val) |
1397 | return ret_val; | 1475 | return ret_val; |
1476 | } else { | ||
1477 | ret_val = gbe_dhg_phy_setup(hw); | ||
1478 | if (ret_val) { | ||
1479 | e_dbg("gbe_dhg_phy_setup failed!\n"); | ||
1480 | return ret_val; | ||
1481 | } | ||
1398 | } | 1482 | } |
1399 | 1483 | ||
1400 | if (hw->autoneg) { | 1484 | if (hw->autoneg) { |
@@ -1461,10 +1545,11 @@ s32 e1000_phy_setup_autoneg(struct e1000_hw *hw) | |||
1461 | return ret_val; | 1545 | return ret_val; |
1462 | 1546 | ||
1463 | /* Read the MII 1000Base-T Control Register (Address 9). */ | 1547 | /* Read the MII 1000Base-T Control Register (Address 9). */ |
1464 | ret_val = | 1548 | ret_val = e1000_read_phy_reg(hw, PHY_1000T_CTRL, &mii_1000t_ctrl_reg); |
1465 | e1000_read_phy_reg(hw, PHY_1000T_CTRL, &mii_1000t_ctrl_reg); | ||
1466 | if (ret_val) | 1549 | if (ret_val) |
1467 | return ret_val; | 1550 | return ret_val; |
1551 | else if (hw->phy_type == e1000_phy_8201) | ||
1552 | mii_1000t_ctrl_reg &= ~REG9_SPEED_MASK; | ||
1468 | 1553 | ||
1469 | /* Need to parse both autoneg_advertised and fc and set up | 1554 | /* Need to parse both autoneg_advertised and fc and set up |
1470 | * the appropriate PHY registers. First we will parse for | 1555 | * the appropriate PHY registers. First we will parse for |
@@ -1577,9 +1662,14 @@ s32 e1000_phy_setup_autoneg(struct e1000_hw *hw) | |||
1577 | 1662 | ||
1578 | e_dbg("Auto-Neg Advertising %x\n", mii_autoneg_adv_reg); | 1663 | e_dbg("Auto-Neg Advertising %x\n", mii_autoneg_adv_reg); |
1579 | 1664 | ||
1580 | ret_val = e1000_write_phy_reg(hw, PHY_1000T_CTRL, mii_1000t_ctrl_reg); | 1665 | if (hw->phy_type == e1000_phy_8201) { |
1581 | if (ret_val) | 1666 | mii_1000t_ctrl_reg = 0; |
1582 | return ret_val; | 1667 | } else { |
1668 | ret_val = e1000_write_phy_reg(hw, PHY_1000T_CTRL, | ||
1669 | mii_1000t_ctrl_reg); | ||
1670 | if (ret_val) | ||
1671 | return ret_val; | ||
1672 | } | ||
1583 | 1673 | ||
1584 | return E1000_SUCCESS; | 1674 | return E1000_SUCCESS; |
1585 | } | 1675 | } |
@@ -1860,7 +1950,7 @@ static s32 e1000_config_mac_to_phy(struct e1000_hw *hw) | |||
1860 | 1950 | ||
1861 | /* 82544 or newer MAC, Auto Speed Detection takes care of | 1951 | /* 82544 or newer MAC, Auto Speed Detection takes care of |
1862 | * MAC speed/duplex configuration.*/ | 1952 | * MAC speed/duplex configuration.*/ |
1863 | if (hw->mac_type >= e1000_82544) | 1953 | if ((hw->mac_type >= e1000_82544) && (hw->mac_type != e1000_ce4100)) |
1864 | return E1000_SUCCESS; | 1954 | return E1000_SUCCESS; |
1865 | 1955 | ||
1866 | /* Read the Device Control Register and set the bits to Force Speed | 1956 | /* Read the Device Control Register and set the bits to Force Speed |
@@ -1870,27 +1960,49 @@ static s32 e1000_config_mac_to_phy(struct e1000_hw *hw) | |||
1870 | ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); | 1960 | ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); |
1871 | ctrl &= ~(E1000_CTRL_SPD_SEL | E1000_CTRL_ILOS); | 1961 | ctrl &= ~(E1000_CTRL_SPD_SEL | E1000_CTRL_ILOS); |
1872 | 1962 | ||
1873 | /* Set up duplex in the Device Control and Transmit Control | 1963 | switch (hw->phy_type) { |
1874 | * registers depending on negotiated values. | 1964 | case e1000_phy_8201: |
1875 | */ | 1965 | ret_val = e1000_read_phy_reg(hw, PHY_CTRL, &phy_data); |
1876 | ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data); | 1966 | if (ret_val) |
1877 | if (ret_val) | 1967 | return ret_val; |
1878 | return ret_val; | ||
1879 | 1968 | ||
1880 | if (phy_data & M88E1000_PSSR_DPLX) | 1969 | if (phy_data & RTL_PHY_CTRL_FD) |
1881 | ctrl |= E1000_CTRL_FD; | 1970 | ctrl |= E1000_CTRL_FD; |
1882 | else | 1971 | else |
1883 | ctrl &= ~E1000_CTRL_FD; | 1972 | ctrl &= ~E1000_CTRL_FD; |
1884 | 1973 | ||
1885 | e1000_config_collision_dist(hw); | 1974 | if (phy_data & RTL_PHY_CTRL_SPD_100) |
1975 | ctrl |= E1000_CTRL_SPD_100; | ||
1976 | else | ||
1977 | ctrl |= E1000_CTRL_SPD_10; | ||
1886 | 1978 | ||
1887 | /* Set up speed in the Device Control register depending on | 1979 | e1000_config_collision_dist(hw); |
1888 | * negotiated values. | 1980 | break; |
1889 | */ | 1981 | default: |
1890 | if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS) | 1982 | /* Set up duplex in the Device Control and Transmit Control |
1891 | ctrl |= E1000_CTRL_SPD_1000; | 1983 | * registers depending on negotiated values. |
1892 | else if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_100MBS) | 1984 | */ |
1893 | ctrl |= E1000_CTRL_SPD_100; | 1985 | ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, |
1986 | &phy_data); | ||
1987 | if (ret_val) | ||
1988 | return ret_val; | ||
1989 | |||
1990 | if (phy_data & M88E1000_PSSR_DPLX) | ||
1991 | ctrl |= E1000_CTRL_FD; | ||
1992 | else | ||
1993 | ctrl &= ~E1000_CTRL_FD; | ||
1994 | |||
1995 | e1000_config_collision_dist(hw); | ||
1996 | |||
1997 | /* Set up speed in the Device Control register depending on | ||
1998 | * negotiated values. | ||
1999 | */ | ||
2000 | if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS) | ||
2001 | ctrl |= E1000_CTRL_SPD_1000; | ||
2002 | else if ((phy_data & M88E1000_PSSR_SPEED) == | ||
2003 | M88E1000_PSSR_100MBS) | ||
2004 | ctrl |= E1000_CTRL_SPD_100; | ||
2005 | } | ||
1894 | 2006 | ||
1895 | /* Write the configured values back to the Device Control Reg. */ | 2007 | /* Write the configured values back to the Device Control Reg. */ |
1896 | ew32(CTRL, ctrl); | 2008 | ew32(CTRL, ctrl); |
@@ -2401,7 +2513,8 @@ s32 e1000_check_for_link(struct e1000_hw *hw) | |||
2401 | * speed/duplex on the MAC to the current PHY speed/duplex | 2513 | * speed/duplex on the MAC to the current PHY speed/duplex |
2402 | * settings. | 2514 | * settings. |
2403 | */ | 2515 | */ |
2404 | if (hw->mac_type >= e1000_82544) | 2516 | if ((hw->mac_type >= e1000_82544) && |
2517 | (hw->mac_type != e1000_ce4100)) | ||
2405 | e1000_config_collision_dist(hw); | 2518 | e1000_config_collision_dist(hw); |
2406 | else { | 2519 | else { |
2407 | ret_val = e1000_config_mac_to_phy(hw); | 2520 | ret_val = e1000_config_mac_to_phy(hw); |
@@ -2738,7 +2851,7 @@ static s32 e1000_read_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr, | |||
2738 | { | 2851 | { |
2739 | u32 i; | 2852 | u32 i; |
2740 | u32 mdic = 0; | 2853 | u32 mdic = 0; |
2741 | const u32 phy_addr = 1; | 2854 | const u32 phy_addr = (hw->mac_type == e1000_ce4100) ? hw->phy_addr : 1; |
2742 | 2855 | ||
2743 | e_dbg("e1000_read_phy_reg_ex"); | 2856 | e_dbg("e1000_read_phy_reg_ex"); |
2744 | 2857 | ||
@@ -2752,28 +2865,61 @@ static s32 e1000_read_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr, | |||
2752 | * Control register. The MAC will take care of interfacing with the | 2865 | * Control register. The MAC will take care of interfacing with the |
2753 | * PHY to retrieve the desired data. | 2866 | * PHY to retrieve the desired data. |
2754 | */ | 2867 | */ |
2755 | mdic = ((reg_addr << E1000_MDIC_REG_SHIFT) | | 2868 | if (hw->mac_type == e1000_ce4100) { |
2756 | (phy_addr << E1000_MDIC_PHY_SHIFT) | | 2869 | mdic = ((reg_addr << E1000_MDIC_REG_SHIFT) | |
2757 | (E1000_MDIC_OP_READ)); | 2870 | (phy_addr << E1000_MDIC_PHY_SHIFT) | |
2871 | (INTEL_CE_GBE_MDIC_OP_READ) | | ||
2872 | (INTEL_CE_GBE_MDIC_GO)); | ||
2758 | 2873 | ||
2759 | ew32(MDIC, mdic); | 2874 | writel(mdic, E1000_MDIO_CMD); |
2760 | 2875 | ||
2761 | /* Poll the ready bit to see if the MDI read completed */ | 2876 | /* Poll the ready bit to see if the MDI read |
2762 | for (i = 0; i < 64; i++) { | 2877 | * completed |
2763 | udelay(50); | 2878 | */ |
2764 | mdic = er32(MDIC); | 2879 | for (i = 0; i < 64; i++) { |
2765 | if (mdic & E1000_MDIC_READY) | 2880 | udelay(50); |
2766 | break; | 2881 | mdic = readl(E1000_MDIO_CMD); |
2767 | } | 2882 | if (!(mdic & INTEL_CE_GBE_MDIC_GO)) |
2768 | if (!(mdic & E1000_MDIC_READY)) { | 2883 | break; |
2769 | e_dbg("MDI Read did not complete\n"); | 2884 | } |
2770 | return -E1000_ERR_PHY; | 2885 | |
2771 | } | 2886 | if (mdic & INTEL_CE_GBE_MDIC_GO) { |
2772 | if (mdic & E1000_MDIC_ERROR) { | 2887 | e_dbg("MDI Read did not complete\n"); |
2773 | e_dbg("MDI Error\n"); | 2888 | return -E1000_ERR_PHY; |
2774 | return -E1000_ERR_PHY; | 2889 | } |
2890 | |||
2891 | mdic = readl(E1000_MDIO_STS); | ||
2892 | if (mdic & INTEL_CE_GBE_MDIC_READ_ERROR) { | ||
2893 | e_dbg("MDI Read Error\n"); | ||
2894 | return -E1000_ERR_PHY; | ||
2895 | } | ||
2896 | *phy_data = (u16) mdic; | ||
2897 | } else { | ||
2898 | mdic = ((reg_addr << E1000_MDIC_REG_SHIFT) | | ||
2899 | (phy_addr << E1000_MDIC_PHY_SHIFT) | | ||
2900 | (E1000_MDIC_OP_READ)); | ||
2901 | |||
2902 | ew32(MDIC, mdic); | ||
2903 | |||
2904 | /* Poll the ready bit to see if the MDI read | ||
2905 | * completed | ||
2906 | */ | ||
2907 | for (i = 0; i < 64; i++) { | ||
2908 | udelay(50); | ||
2909 | mdic = er32(MDIC); | ||
2910 | if (mdic & E1000_MDIC_READY) | ||
2911 | break; | ||
2912 | } | ||
2913 | if (!(mdic & E1000_MDIC_READY)) { | ||
2914 | e_dbg("MDI Read did not complete\n"); | ||
2915 | return -E1000_ERR_PHY; | ||
2916 | } | ||
2917 | if (mdic & E1000_MDIC_ERROR) { | ||
2918 | e_dbg("MDI Error\n"); | ||
2919 | return -E1000_ERR_PHY; | ||
2920 | } | ||
2921 | *phy_data = (u16) mdic; | ||
2775 | } | 2922 | } |
2776 | *phy_data = (u16) mdic; | ||
2777 | } else { | 2923 | } else { |
2778 | /* We must first send a preamble through the MDIO pin to signal the | 2924 | /* We must first send a preamble through the MDIO pin to signal the |
2779 | * beginning of an MII instruction. This is done by sending 32 | 2925 | * beginning of an MII instruction. This is done by sending 32 |
@@ -2840,7 +2986,7 @@ static s32 e1000_write_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr, | |||
2840 | { | 2986 | { |
2841 | u32 i; | 2987 | u32 i; |
2842 | u32 mdic = 0; | 2988 | u32 mdic = 0; |
2843 | const u32 phy_addr = 1; | 2989 | const u32 phy_addr = (hw->mac_type == e1000_ce4100) ? hw->phy_addr : 1; |
2844 | 2990 | ||
2845 | e_dbg("e1000_write_phy_reg_ex"); | 2991 | e_dbg("e1000_write_phy_reg_ex"); |
2846 | 2992 | ||
@@ -2850,27 +2996,54 @@ static s32 e1000_write_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr, | |||
2850 | } | 2996 | } |
2851 | 2997 | ||
2852 | if (hw->mac_type > e1000_82543) { | 2998 | if (hw->mac_type > e1000_82543) { |
2853 | /* Set up Op-code, Phy Address, register address, and data intended | 2999 | /* Set up Op-code, Phy Address, register address, and data |
2854 | * for the PHY register in the MDI Control register. The MAC will take | 3000 | * intended for the PHY register in the MDI Control register. |
2855 | * care of interfacing with the PHY to send the desired data. | 3001 | * The MAC will take care of interfacing with the PHY to send |
3002 | * the desired data. | ||
2856 | */ | 3003 | */ |
2857 | mdic = (((u32) phy_data) | | 3004 | if (hw->mac_type == e1000_ce4100) { |
2858 | (reg_addr << E1000_MDIC_REG_SHIFT) | | 3005 | mdic = (((u32) phy_data) | |
2859 | (phy_addr << E1000_MDIC_PHY_SHIFT) | | 3006 | (reg_addr << E1000_MDIC_REG_SHIFT) | |
2860 | (E1000_MDIC_OP_WRITE)); | 3007 | (phy_addr << E1000_MDIC_PHY_SHIFT) | |
3008 | (INTEL_CE_GBE_MDIC_OP_WRITE) | | ||
3009 | (INTEL_CE_GBE_MDIC_GO)); | ||
2861 | 3010 | ||
2862 | ew32(MDIC, mdic); | 3011 | writel(mdic, E1000_MDIO_CMD); |
2863 | 3012 | ||
2864 | /* Poll the ready bit to see if the MDI read completed */ | 3013 | /* Poll the ready bit to see if the MDI read |
2865 | for (i = 0; i < 641; i++) { | 3014 | * completed |
2866 | udelay(5); | 3015 | */ |
2867 | mdic = er32(MDIC); | 3016 | for (i = 0; i < 640; i++) { |
2868 | if (mdic & E1000_MDIC_READY) | 3017 | udelay(5); |
2869 | break; | 3018 | mdic = readl(E1000_MDIO_CMD); |
2870 | } | 3019 | if (!(mdic & INTEL_CE_GBE_MDIC_GO)) |
2871 | if (!(mdic & E1000_MDIC_READY)) { | 3020 | break; |
2872 | e_dbg("MDI Write did not complete\n"); | 3021 | } |
2873 | return -E1000_ERR_PHY; | 3022 | if (mdic & INTEL_CE_GBE_MDIC_GO) { |
3023 | e_dbg("MDI Write did not complete\n"); | ||
3024 | return -E1000_ERR_PHY; | ||
3025 | } | ||
3026 | } else { | ||
3027 | mdic = (((u32) phy_data) | | ||
3028 | (reg_addr << E1000_MDIC_REG_SHIFT) | | ||
3029 | (phy_addr << E1000_MDIC_PHY_SHIFT) | | ||
3030 | (E1000_MDIC_OP_WRITE)); | ||
3031 | |||
3032 | ew32(MDIC, mdic); | ||
3033 | |||
3034 | /* Poll the ready bit to see if the MDI read | ||
3035 | * completed | ||
3036 | */ | ||
3037 | for (i = 0; i < 641; i++) { | ||
3038 | udelay(5); | ||
3039 | mdic = er32(MDIC); | ||
3040 | if (mdic & E1000_MDIC_READY) | ||
3041 | break; | ||
3042 | } | ||
3043 | if (!(mdic & E1000_MDIC_READY)) { | ||
3044 | e_dbg("MDI Write did not complete\n"); | ||
3045 | return -E1000_ERR_PHY; | ||
3046 | } | ||
2874 | } | 3047 | } |
2875 | } else { | 3048 | } else { |
2876 | /* We'll need to use the SW defined pins to shift the write command | 3049 | /* We'll need to use the SW defined pins to shift the write command |
@@ -3048,6 +3221,12 @@ static s32 e1000_detect_gig_phy(struct e1000_hw *hw) | |||
3048 | if (hw->phy_id == M88E1011_I_PHY_ID) | 3221 | if (hw->phy_id == M88E1011_I_PHY_ID) |
3049 | match = true; | 3222 | match = true; |
3050 | break; | 3223 | break; |
3224 | case e1000_ce4100: | ||
3225 | if ((hw->phy_id == RTL8211B_PHY_ID) || | ||
3226 | (hw->phy_id == RTL8201N_PHY_ID) || | ||
3227 | (hw->phy_id == M88E1118_E_PHY_ID)) | ||
3228 | match = true; | ||
3229 | break; | ||
3051 | case e1000_82541: | 3230 | case e1000_82541: |
3052 | case e1000_82541_rev_2: | 3231 | case e1000_82541_rev_2: |
3053 | case e1000_82547: | 3232 | case e1000_82547: |
@@ -3291,6 +3470,9 @@ s32 e1000_phy_get_info(struct e1000_hw *hw, struct e1000_phy_info *phy_info) | |||
3291 | 3470 | ||
3292 | if (hw->phy_type == e1000_phy_igp) | 3471 | if (hw->phy_type == e1000_phy_igp) |
3293 | return e1000_phy_igp_get_info(hw, phy_info); | 3472 | return e1000_phy_igp_get_info(hw, phy_info); |
3473 | else if ((hw->phy_type == e1000_phy_8211) || | ||
3474 | (hw->phy_type == e1000_phy_8201)) | ||
3475 | return E1000_SUCCESS; | ||
3294 | else | 3476 | else |
3295 | return e1000_phy_m88_get_info(hw, phy_info); | 3477 | return e1000_phy_m88_get_info(hw, phy_info); |
3296 | } | 3478 | } |
@@ -3742,6 +3924,12 @@ static s32 e1000_do_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words, | |||
3742 | 3924 | ||
3743 | e_dbg("e1000_read_eeprom"); | 3925 | e_dbg("e1000_read_eeprom"); |
3744 | 3926 | ||
3927 | if (hw->mac_type == e1000_ce4100) { | ||
3928 | GBE_CONFIG_FLASH_READ(GBE_CONFIG_BASE_VIRT, offset, words, | ||
3929 | data); | ||
3930 | return E1000_SUCCESS; | ||
3931 | } | ||
3932 | |||
3745 | /* If eeprom is not yet detected, do so now */ | 3933 | /* If eeprom is not yet detected, do so now */ |
3746 | if (eeprom->word_size == 0) | 3934 | if (eeprom->word_size == 0) |
3747 | e1000_init_eeprom_params(hw); | 3935 | e1000_init_eeprom_params(hw); |
@@ -3904,6 +4092,12 @@ static s32 e1000_do_write_eeprom(struct e1000_hw *hw, u16 offset, u16 words, | |||
3904 | 4092 | ||
3905 | e_dbg("e1000_write_eeprom"); | 4093 | e_dbg("e1000_write_eeprom"); |
3906 | 4094 | ||
4095 | if (hw->mac_type == e1000_ce4100) { | ||
4096 | GBE_CONFIG_FLASH_WRITE(GBE_CONFIG_BASE_VIRT, offset, words, | ||
4097 | data); | ||
4098 | return E1000_SUCCESS; | ||
4099 | } | ||
4100 | |||
3907 | /* If eeprom is not yet detected, do so now */ | 4101 | /* If eeprom is not yet detected, do so now */ |
3908 | if (eeprom->word_size == 0) | 4102 | if (eeprom->word_size == 0) |
3909 | e1000_init_eeprom_params(hw); | 4103 | e1000_init_eeprom_params(hw); |
diff --git a/drivers/net/e1000/e1000_hw.h b/drivers/net/e1000/e1000_hw.h index ecd9f6c6bcd5..c70b23d52284 100644 --- a/drivers/net/e1000/e1000_hw.h +++ b/drivers/net/e1000/e1000_hw.h | |||
@@ -41,7 +41,7 @@ struct e1000_hw; | |||
41 | struct e1000_hw_stats; | 41 | struct e1000_hw_stats; |
42 | 42 | ||
43 | /* Enumerated types specific to the e1000 hardware */ | 43 | /* Enumerated types specific to the e1000 hardware */ |
44 | /* Media Access Controlers */ | 44 | /* Media Access Controllers */ |
45 | typedef enum { | 45 | typedef enum { |
46 | e1000_undefined = 0, | 46 | e1000_undefined = 0, |
47 | e1000_82542_rev2_0, | 47 | e1000_82542_rev2_0, |
@@ -52,6 +52,7 @@ typedef enum { | |||
52 | e1000_82545, | 52 | e1000_82545, |
53 | e1000_82545_rev_3, | 53 | e1000_82545_rev_3, |
54 | e1000_82546, | 54 | e1000_82546, |
55 | e1000_ce4100, | ||
55 | e1000_82546_rev_3, | 56 | e1000_82546_rev_3, |
56 | e1000_82541, | 57 | e1000_82541, |
57 | e1000_82541_rev_2, | 58 | e1000_82541_rev_2, |
@@ -209,9 +210,11 @@ typedef enum { | |||
209 | } e1000_1000t_rx_status; | 210 | } e1000_1000t_rx_status; |
210 | 211 | ||
211 | typedef enum { | 212 | typedef enum { |
212 | e1000_phy_m88 = 0, | 213 | e1000_phy_m88 = 0, |
213 | e1000_phy_igp, | 214 | e1000_phy_igp, |
214 | e1000_phy_undefined = 0xFF | 215 | e1000_phy_8211, |
216 | e1000_phy_8201, | ||
217 | e1000_phy_undefined = 0xFF | ||
215 | } e1000_phy_type; | 218 | } e1000_phy_type; |
216 | 219 | ||
217 | typedef enum { | 220 | typedef enum { |
@@ -442,6 +445,7 @@ void e1000_io_write(struct e1000_hw *hw, unsigned long port, u32 value); | |||
442 | #define E1000_DEV_ID_82547EI 0x1019 | 445 | #define E1000_DEV_ID_82547EI 0x1019 |
443 | #define E1000_DEV_ID_82547EI_MOBILE 0x101A | 446 | #define E1000_DEV_ID_82547EI_MOBILE 0x101A |
444 | #define E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3 0x10B5 | 447 | #define E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3 0x10B5 |
448 | #define E1000_DEV_ID_INTEL_CE4100_GBE 0x2E6E | ||
445 | 449 | ||
446 | #define NODE_ADDRESS_SIZE 6 | 450 | #define NODE_ADDRESS_SIZE 6 |
447 | #define ETH_LENGTH_OF_ADDRESS 6 | 451 | #define ETH_LENGTH_OF_ADDRESS 6 |
@@ -808,6 +812,16 @@ struct e1000_ffvt_entry { | |||
808 | #define E1000_CTRL_EXT 0x00018 /* Extended Device Control - RW */ | 812 | #define E1000_CTRL_EXT 0x00018 /* Extended Device Control - RW */ |
809 | #define E1000_FLA 0x0001C /* Flash Access - RW */ | 813 | #define E1000_FLA 0x0001C /* Flash Access - RW */ |
810 | #define E1000_MDIC 0x00020 /* MDI Control - RW */ | 814 | #define E1000_MDIC 0x00020 /* MDI Control - RW */ |
815 | |||
816 | extern void __iomem *ce4100_gbe_mdio_base_virt; | ||
817 | #define INTEL_CE_GBE_MDIO_RCOMP_BASE (ce4100_gbe_mdio_base_virt) | ||
818 | #define E1000_MDIO_STS (INTEL_CE_GBE_MDIO_RCOMP_BASE + 0) | ||
819 | #define E1000_MDIO_CMD (INTEL_CE_GBE_MDIO_RCOMP_BASE + 4) | ||
820 | #define E1000_MDIO_DRV (INTEL_CE_GBE_MDIO_RCOMP_BASE + 8) | ||
821 | #define E1000_MDC_CMD (INTEL_CE_GBE_MDIO_RCOMP_BASE + 0xC) | ||
822 | #define E1000_RCOMP_CTL (INTEL_CE_GBE_MDIO_RCOMP_BASE + 0x20) | ||
823 | #define E1000_RCOMP_STS (INTEL_CE_GBE_MDIO_RCOMP_BASE + 0x24) | ||
824 | |||
811 | #define E1000_SCTL 0x00024 /* SerDes Control - RW */ | 825 | #define E1000_SCTL 0x00024 /* SerDes Control - RW */ |
812 | #define E1000_FEXTNVM 0x00028 /* Future Extended NVM register */ | 826 | #define E1000_FEXTNVM 0x00028 /* Future Extended NVM register */ |
813 | #define E1000_FCAL 0x00028 /* Flow Control Address Low - RW */ | 827 | #define E1000_FCAL 0x00028 /* Flow Control Address Low - RW */ |
@@ -820,6 +834,34 @@ struct e1000_ffvt_entry { | |||
820 | #define E1000_IMS 0x000D0 /* Interrupt Mask Set - RW */ | 834 | #define E1000_IMS 0x000D0 /* Interrupt Mask Set - RW */ |
821 | #define E1000_IMC 0x000D8 /* Interrupt Mask Clear - WO */ | 835 | #define E1000_IMC 0x000D8 /* Interrupt Mask Clear - WO */ |
822 | #define E1000_IAM 0x000E0 /* Interrupt Acknowledge Auto Mask */ | 836 | #define E1000_IAM 0x000E0 /* Interrupt Acknowledge Auto Mask */ |
837 | |||
838 | /* Auxiliary Control Register. This register is CE4100 specific, | ||
839 | * RMII/RGMII function is switched by this register - RW | ||
840 | * Following are bits definitions of the Auxiliary Control Register | ||
841 | */ | ||
842 | #define E1000_CTL_AUX 0x000E0 | ||
843 | #define E1000_CTL_AUX_END_SEL_SHIFT 10 | ||
844 | #define E1000_CTL_AUX_ENDIANESS_SHIFT 8 | ||
845 | #define E1000_CTL_AUX_RGMII_RMII_SHIFT 0 | ||
846 | |||
847 | /* descriptor and packet transfer use CTL_AUX.ENDIANESS */ | ||
848 | #define E1000_CTL_AUX_DES_PKT (0x0 << E1000_CTL_AUX_END_SEL_SHIFT) | ||
849 | /* descriptor use CTL_AUX.ENDIANESS, packet use default */ | ||
850 | #define E1000_CTL_AUX_DES (0x1 << E1000_CTL_AUX_END_SEL_SHIFT) | ||
851 | /* descriptor use default, packet use CTL_AUX.ENDIANESS */ | ||
852 | #define E1000_CTL_AUX_PKT (0x2 << E1000_CTL_AUX_END_SEL_SHIFT) | ||
853 | /* all use CTL_AUX.ENDIANESS */ | ||
854 | #define E1000_CTL_AUX_ALL (0x3 << E1000_CTL_AUX_END_SEL_SHIFT) | ||
855 | |||
856 | #define E1000_CTL_AUX_RGMII (0x0 << E1000_CTL_AUX_RGMII_RMII_SHIFT) | ||
857 | #define E1000_CTL_AUX_RMII (0x1 << E1000_CTL_AUX_RGMII_RMII_SHIFT) | ||
858 | |||
859 | /* LW little endian, Byte big endian */ | ||
860 | #define E1000_CTL_AUX_LWLE_BBE (0x0 << E1000_CTL_AUX_ENDIANESS_SHIFT) | ||
861 | #define E1000_CTL_AUX_LWLE_BLE (0x1 << E1000_CTL_AUX_ENDIANESS_SHIFT) | ||
862 | #define E1000_CTL_AUX_LWBE_BBE (0x2 << E1000_CTL_AUX_ENDIANESS_SHIFT) | ||
863 | #define E1000_CTL_AUX_LWBE_BLE (0x3 << E1000_CTL_AUX_ENDIANESS_SHIFT) | ||
864 | |||
823 | #define E1000_RCTL 0x00100 /* RX Control - RW */ | 865 | #define E1000_RCTL 0x00100 /* RX Control - RW */ |
824 | #define E1000_RDTR1 0x02820 /* RX Delay Timer (1) - RW */ | 866 | #define E1000_RDTR1 0x02820 /* RX Delay Timer (1) - RW */ |
825 | #define E1000_RDBAL1 0x02900 /* RX Descriptor Base Address Low (1) - RW */ | 867 | #define E1000_RDBAL1 0x02900 /* RX Descriptor Base Address Low (1) - RW */ |
@@ -1011,6 +1053,7 @@ struct e1000_ffvt_entry { | |||
1011 | * in more current versions of the 8254x. Despite the difference in location, | 1053 | * in more current versions of the 8254x. Despite the difference in location, |
1012 | * the registers function in the same manner. | 1054 | * the registers function in the same manner. |
1013 | */ | 1055 | */ |
1056 | #define E1000_82542_CTL_AUX E1000_CTL_AUX | ||
1014 | #define E1000_82542_CTRL E1000_CTRL | 1057 | #define E1000_82542_CTRL E1000_CTRL |
1015 | #define E1000_82542_CTRL_DUP E1000_CTRL_DUP | 1058 | #define E1000_82542_CTRL_DUP E1000_CTRL_DUP |
1016 | #define E1000_82542_STATUS E1000_STATUS | 1059 | #define E1000_82542_STATUS E1000_STATUS |
@@ -1571,6 +1614,11 @@ struct e1000_hw { | |||
1571 | #define E1000_MDIC_INT_EN 0x20000000 | 1614 | #define E1000_MDIC_INT_EN 0x20000000 |
1572 | #define E1000_MDIC_ERROR 0x40000000 | 1615 | #define E1000_MDIC_ERROR 0x40000000 |
1573 | 1616 | ||
1617 | #define INTEL_CE_GBE_MDIC_OP_WRITE 0x04000000 | ||
1618 | #define INTEL_CE_GBE_MDIC_OP_READ 0x00000000 | ||
1619 | #define INTEL_CE_GBE_MDIC_GO 0x80000000 | ||
1620 | #define INTEL_CE_GBE_MDIC_READ_ERROR 0x80000000 | ||
1621 | |||
1574 | #define E1000_KUMCTRLSTA_MASK 0x0000FFFF | 1622 | #define E1000_KUMCTRLSTA_MASK 0x0000FFFF |
1575 | #define E1000_KUMCTRLSTA_OFFSET 0x001F0000 | 1623 | #define E1000_KUMCTRLSTA_OFFSET 0x001F0000 |
1576 | #define E1000_KUMCTRLSTA_OFFSET_SHIFT 16 | 1624 | #define E1000_KUMCTRLSTA_OFFSET_SHIFT 16 |
@@ -2869,8 +2917,14 @@ struct e1000_host_command_info { | |||
2869 | #define M88E1000_14_PHY_ID M88E1000_E_PHY_ID | 2917 | #define M88E1000_14_PHY_ID M88E1000_E_PHY_ID |
2870 | #define M88E1011_I_REV_4 0x04 | 2918 | #define M88E1011_I_REV_4 0x04 |
2871 | #define M88E1111_I_PHY_ID 0x01410CC0 | 2919 | #define M88E1111_I_PHY_ID 0x01410CC0 |
2920 | #define M88E1118_E_PHY_ID 0x01410E40 | ||
2872 | #define L1LXT971A_PHY_ID 0x001378E0 | 2921 | #define L1LXT971A_PHY_ID 0x001378E0 |
2873 | 2922 | ||
2923 | #define RTL8211B_PHY_ID 0x001CC910 | ||
2924 | #define RTL8201N_PHY_ID 0x8200 | ||
2925 | #define RTL_PHY_CTRL_FD 0x0100 /* Full duplex.0=half; 1=full */ | ||
2926 | #define RTL_PHY_CTRL_SPD_100 0x200000 /* Force 100Mb */ | ||
2927 | |||
2874 | /* Bits... | 2928 | /* Bits... |
2875 | * 15-5: page | 2929 | * 15-5: page |
2876 | * 4-0: register offset | 2930 | * 4-0: register offset |
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c index 340e12d2e4a9..bfab14092d2c 100644 --- a/drivers/net/e1000/e1000_main.c +++ b/drivers/net/e1000/e1000_main.c | |||
@@ -28,6 +28,12 @@ | |||
28 | 28 | ||
29 | #include "e1000.h" | 29 | #include "e1000.h" |
30 | #include <net/ip6_checksum.h> | 30 | #include <net/ip6_checksum.h> |
31 | #include <linux/io.h> | ||
32 | |||
33 | /* Intel Media SOC GbE MDIO physical base address */ | ||
34 | static unsigned long ce4100_gbe_mdio_base_phy; | ||
35 | /* Intel Media SOC GbE MDIO virtual base address */ | ||
36 | void __iomem *ce4100_gbe_mdio_base_virt; | ||
31 | 37 | ||
32 | char e1000_driver_name[] = "e1000"; | 38 | char e1000_driver_name[] = "e1000"; |
33 | static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver"; | 39 | static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver"; |
@@ -79,6 +85,7 @@ static DEFINE_PCI_DEVICE_TABLE(e1000_pci_tbl) = { | |||
79 | INTEL_E1000_ETHERNET_DEVICE(0x108A), | 85 | INTEL_E1000_ETHERNET_DEVICE(0x108A), |
80 | INTEL_E1000_ETHERNET_DEVICE(0x1099), | 86 | INTEL_E1000_ETHERNET_DEVICE(0x1099), |
81 | INTEL_E1000_ETHERNET_DEVICE(0x10B5), | 87 | INTEL_E1000_ETHERNET_DEVICE(0x10B5), |
88 | INTEL_E1000_ETHERNET_DEVICE(0x2E6E), | ||
82 | /* required last entry */ | 89 | /* required last entry */ |
83 | {0,} | 90 | {0,} |
84 | }; | 91 | }; |
@@ -459,6 +466,7 @@ static void e1000_power_down_phy(struct e1000_adapter *adapter) | |||
459 | case e1000_82545: | 466 | case e1000_82545: |
460 | case e1000_82545_rev_3: | 467 | case e1000_82545_rev_3: |
461 | case e1000_82546: | 468 | case e1000_82546: |
469 | case e1000_ce4100: | ||
462 | case e1000_82546_rev_3: | 470 | case e1000_82546_rev_3: |
463 | case e1000_82541: | 471 | case e1000_82541: |
464 | case e1000_82541_rev_2: | 472 | case e1000_82541_rev_2: |
@@ -573,6 +581,7 @@ void e1000_reset(struct e1000_adapter *adapter) | |||
573 | case e1000_82545: | 581 | case e1000_82545: |
574 | case e1000_82545_rev_3: | 582 | case e1000_82545_rev_3: |
575 | case e1000_82546: | 583 | case e1000_82546: |
584 | case e1000_ce4100: | ||
576 | case e1000_82546_rev_3: | 585 | case e1000_82546_rev_3: |
577 | pba = E1000_PBA_48K; | 586 | pba = E1000_PBA_48K; |
578 | break; | 587 | break; |
@@ -894,6 +903,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev, | |||
894 | static int global_quad_port_a = 0; /* global ksp3 port a indication */ | 903 | static int global_quad_port_a = 0; /* global ksp3 port a indication */ |
895 | int i, err, pci_using_dac; | 904 | int i, err, pci_using_dac; |
896 | u16 eeprom_data = 0; | 905 | u16 eeprom_data = 0; |
906 | u16 tmp = 0; | ||
897 | u16 eeprom_apme_mask = E1000_EEPROM_APME; | 907 | u16 eeprom_apme_mask = E1000_EEPROM_APME; |
898 | int bars, need_ioport; | 908 | int bars, need_ioport; |
899 | 909 | ||
@@ -996,6 +1006,14 @@ static int __devinit e1000_probe(struct pci_dev *pdev, | |||
996 | goto err_sw_init; | 1006 | goto err_sw_init; |
997 | 1007 | ||
998 | err = -EIO; | 1008 | err = -EIO; |
1009 | if (hw->mac_type == e1000_ce4100) { | ||
1010 | ce4100_gbe_mdio_base_phy = pci_resource_start(pdev, BAR_1); | ||
1011 | ce4100_gbe_mdio_base_virt = ioremap(ce4100_gbe_mdio_base_phy, | ||
1012 | pci_resource_len(pdev, BAR_1)); | ||
1013 | |||
1014 | if (!ce4100_gbe_mdio_base_virt) | ||
1015 | goto err_mdio_ioremap; | ||
1016 | } | ||
999 | 1017 | ||
1000 | if (hw->mac_type >= e1000_82543) { | 1018 | if (hw->mac_type >= e1000_82543) { |
1001 | netdev->features = NETIF_F_SG | | 1019 | netdev->features = NETIF_F_SG | |
@@ -1135,6 +1153,20 @@ static int __devinit e1000_probe(struct pci_dev *pdev, | |||
1135 | adapter->wol = adapter->eeprom_wol; | 1153 | adapter->wol = adapter->eeprom_wol; |
1136 | device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); | 1154 | device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); |
1137 | 1155 | ||
1156 | /* Auto detect PHY address */ | ||
1157 | if (hw->mac_type == e1000_ce4100) { | ||
1158 | for (i = 0; i < 32; i++) { | ||
1159 | hw->phy_addr = i; | ||
1160 | e1000_read_phy_reg(hw, PHY_ID2, &tmp); | ||
1161 | if (tmp == 0 || tmp == 0xFF) { | ||
1162 | if (i == 31) | ||
1163 | goto err_eeprom; | ||
1164 | continue; | ||
1165 | } else | ||
1166 | break; | ||
1167 | } | ||
1168 | } | ||
1169 | |||
1138 | /* reset the hardware with the new settings */ | 1170 | /* reset the hardware with the new settings */ |
1139 | e1000_reset(adapter); | 1171 | e1000_reset(adapter); |
1140 | 1172 | ||
@@ -1171,6 +1203,8 @@ err_eeprom: | |||
1171 | kfree(adapter->rx_ring); | 1203 | kfree(adapter->rx_ring); |
1172 | err_dma: | 1204 | err_dma: |
1173 | err_sw_init: | 1205 | err_sw_init: |
1206 | err_mdio_ioremap: | ||
1207 | iounmap(ce4100_gbe_mdio_base_virt); | ||
1174 | iounmap(hw->hw_addr); | 1208 | iounmap(hw->hw_addr); |
1175 | err_ioremap: | 1209 | err_ioremap: |
1176 | free_netdev(netdev); | 1210 | free_netdev(netdev); |
@@ -1409,6 +1443,7 @@ static bool e1000_check_64k_bound(struct e1000_adapter *adapter, void *start, | |||
1409 | /* First rev 82545 and 82546 need to not allow any memory | 1443 | /* First rev 82545 and 82546 need to not allow any memory |
1410 | * write location to cross 64k boundary due to errata 23 */ | 1444 | * write location to cross 64k boundary due to errata 23 */ |
1411 | if (hw->mac_type == e1000_82545 || | 1445 | if (hw->mac_type == e1000_82545 || |
1446 | hw->mac_type == e1000_ce4100 || | ||
1412 | hw->mac_type == e1000_82546) { | 1447 | hw->mac_type == e1000_82546) { |
1413 | return ((begin ^ (end - 1)) >> 16) != 0 ? false : true; | 1448 | return ((begin ^ (end - 1)) >> 16) != 0 ? false : true; |
1414 | } | 1449 | } |
@@ -2198,7 +2233,7 @@ static void e1000_set_rx_mode(struct net_device *netdev) | |||
2198 | * addresses take precedence to avoid disabling unicast filtering | 2233 | * addresses take precedence to avoid disabling unicast filtering |
2199 | * when possible. | 2234 | * when possible. |
2200 | * | 2235 | * |
2201 | * RAR 0 is used for the station MAC adddress | 2236 | * RAR 0 is used for the station MAC address |
2202 | * if there are not 14 addresses, go ahead and clear the filters | 2237 | * if there are not 14 addresses, go ahead and clear the filters |
2203 | */ | 2238 | */ |
2204 | i = 1; | 2239 | i = 1; |
@@ -3443,9 +3478,17 @@ static irqreturn_t e1000_intr(int irq, void *data) | |||
3443 | struct e1000_hw *hw = &adapter->hw; | 3478 | struct e1000_hw *hw = &adapter->hw; |
3444 | u32 icr = er32(ICR); | 3479 | u32 icr = er32(ICR); |
3445 | 3480 | ||
3446 | if (unlikely((!icr) || test_bit(__E1000_DOWN, &adapter->flags))) | 3481 | if (unlikely((!icr))) |
3447 | return IRQ_NONE; /* Not our interrupt */ | 3482 | return IRQ_NONE; /* Not our interrupt */ |
3448 | 3483 | ||
3484 | /* | ||
3485 | * we might have caused the interrupt, but the above | ||
3486 | * read cleared it, and just in case the driver is | ||
3487 | * down there is nothing to do so return handled | ||
3488 | */ | ||
3489 | if (unlikely(test_bit(__E1000_DOWN, &adapter->flags))) | ||
3490 | return IRQ_HANDLED; | ||
3491 | |||
3449 | if (unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) { | 3492 | if (unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) { |
3450 | hw->get_link_status = 1; | 3493 | hw->get_link_status = 1; |
3451 | /* guard against interrupt when we're going down */ | 3494 | /* guard against interrupt when we're going down */ |
diff --git a/drivers/net/e1000/e1000_osdep.h b/drivers/net/e1000/e1000_osdep.h index edd1c75aa895..33e7c45a4fe4 100644 --- a/drivers/net/e1000/e1000_osdep.h +++ b/drivers/net/e1000/e1000_osdep.h | |||
@@ -34,12 +34,22 @@ | |||
34 | #ifndef _E1000_OSDEP_H_ | 34 | #ifndef _E1000_OSDEP_H_ |
35 | #define _E1000_OSDEP_H_ | 35 | #define _E1000_OSDEP_H_ |
36 | 36 | ||
37 | #include <linux/types.h> | ||
38 | #include <linux/pci.h> | ||
39 | #include <linux/delay.h> | ||
40 | #include <asm/io.h> | 37 | #include <asm/io.h> |
41 | #include <linux/interrupt.h> | 38 | |
42 | #include <linux/sched.h> | 39 | #define CONFIG_RAM_BASE 0x60000 |
40 | #define GBE_CONFIG_OFFSET 0x0 | ||
41 | |||
42 | #define GBE_CONFIG_RAM_BASE \ | ||
43 | ((unsigned int)(CONFIG_RAM_BASE + GBE_CONFIG_OFFSET)) | ||
44 | |||
45 | #define GBE_CONFIG_BASE_VIRT \ | ||
46 | ((void __iomem *)phys_to_virt(GBE_CONFIG_RAM_BASE)) | ||
47 | |||
48 | #define GBE_CONFIG_FLASH_WRITE(base, offset, count, data) \ | ||
49 | (iowrite16_rep(base + offset, data, count)) | ||
50 | |||
51 | #define GBE_CONFIG_FLASH_READ(base, offset, count, data) \ | ||
52 | (ioread16_rep(base + (offset << 1), data, count)) | ||
43 | 53 | ||
44 | #define er32(reg) \ | 54 | #define er32(reg) \ |
45 | (readl(hw->hw_addr + ((hw->mac_type >= e1000_82543) \ | 55 | (readl(hw->hw_addr + ((hw->mac_type >= e1000_82543) \ |
diff --git a/drivers/net/e1000e/82571.c b/drivers/net/e1000e/82571.c index e57e4097ef1b..89a69035e538 100644 --- a/drivers/net/e1000e/82571.c +++ b/drivers/net/e1000e/82571.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************************* | 1 | /******************************************************************************* |
2 | 2 | ||
3 | Intel PRO/1000 Linux driver | 3 | Intel PRO/1000 Linux driver |
4 | Copyright(c) 1999 - 2010 Intel Corporation. | 4 | Copyright(c) 1999 - 2011 Intel Corporation. |
5 | 5 | ||
6 | This program is free software; you can redistribute it and/or modify it | 6 | This program is free software; you can redistribute it and/or modify it |
7 | under the terms and conditions of the GNU General Public License, | 7 | under the terms and conditions of the GNU General Public License, |
@@ -78,6 +78,8 @@ static void e1000_power_down_phy_copper_82571(struct e1000_hw *hw); | |||
78 | static void e1000_put_hw_semaphore_82573(struct e1000_hw *hw); | 78 | static void e1000_put_hw_semaphore_82573(struct e1000_hw *hw); |
79 | static s32 e1000_get_hw_semaphore_82574(struct e1000_hw *hw); | 79 | static s32 e1000_get_hw_semaphore_82574(struct e1000_hw *hw); |
80 | static void e1000_put_hw_semaphore_82574(struct e1000_hw *hw); | 80 | static void e1000_put_hw_semaphore_82574(struct e1000_hw *hw); |
81 | static s32 e1000_set_d0_lplu_state_82574(struct e1000_hw *hw, bool active); | ||
82 | static s32 e1000_set_d3_lplu_state_82574(struct e1000_hw *hw, bool active); | ||
81 | 83 | ||
82 | /** | 84 | /** |
83 | * e1000_init_phy_params_82571 - Init PHY func ptrs. | 85 | * e1000_init_phy_params_82571 - Init PHY func ptrs. |
@@ -113,6 +115,8 @@ static s32 e1000_init_phy_params_82571(struct e1000_hw *hw) | |||
113 | phy->type = e1000_phy_bm; | 115 | phy->type = e1000_phy_bm; |
114 | phy->ops.acquire = e1000_get_hw_semaphore_82574; | 116 | phy->ops.acquire = e1000_get_hw_semaphore_82574; |
115 | phy->ops.release = e1000_put_hw_semaphore_82574; | 117 | phy->ops.release = e1000_put_hw_semaphore_82574; |
118 | phy->ops.set_d0_lplu_state = e1000_set_d0_lplu_state_82574; | ||
119 | phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_82574; | ||
116 | break; | 120 | break; |
117 | default: | 121 | default: |
118 | return -E1000_ERR_PHY; | 122 | return -E1000_ERR_PHY; |
@@ -121,29 +125,36 @@ static s32 e1000_init_phy_params_82571(struct e1000_hw *hw) | |||
121 | 125 | ||
122 | /* This can only be done after all function pointers are setup. */ | 126 | /* This can only be done after all function pointers are setup. */ |
123 | ret_val = e1000_get_phy_id_82571(hw); | 127 | ret_val = e1000_get_phy_id_82571(hw); |
128 | if (ret_val) { | ||
129 | e_dbg("Error getting PHY ID\n"); | ||
130 | return ret_val; | ||
131 | } | ||
124 | 132 | ||
125 | /* Verify phy id */ | 133 | /* Verify phy id */ |
126 | switch (hw->mac.type) { | 134 | switch (hw->mac.type) { |
127 | case e1000_82571: | 135 | case e1000_82571: |
128 | case e1000_82572: | 136 | case e1000_82572: |
129 | if (phy->id != IGP01E1000_I_PHY_ID) | 137 | if (phy->id != IGP01E1000_I_PHY_ID) |
130 | return -E1000_ERR_PHY; | 138 | ret_val = -E1000_ERR_PHY; |
131 | break; | 139 | break; |
132 | case e1000_82573: | 140 | case e1000_82573: |
133 | if (phy->id != M88E1111_I_PHY_ID) | 141 | if (phy->id != M88E1111_I_PHY_ID) |
134 | return -E1000_ERR_PHY; | 142 | ret_val = -E1000_ERR_PHY; |
135 | break; | 143 | break; |
136 | case e1000_82574: | 144 | case e1000_82574: |
137 | case e1000_82583: | 145 | case e1000_82583: |
138 | if (phy->id != BME1000_E_PHY_ID_R2) | 146 | if (phy->id != BME1000_E_PHY_ID_R2) |
139 | return -E1000_ERR_PHY; | 147 | ret_val = -E1000_ERR_PHY; |
140 | break; | 148 | break; |
141 | default: | 149 | default: |
142 | return -E1000_ERR_PHY; | 150 | ret_val = -E1000_ERR_PHY; |
143 | break; | 151 | break; |
144 | } | 152 | } |
145 | 153 | ||
146 | return 0; | 154 | if (ret_val) |
155 | e_dbg("PHY ID unknown: type = 0x%08x\n", phy->id); | ||
156 | |||
157 | return ret_val; | ||
147 | } | 158 | } |
148 | 159 | ||
149 | /** | 160 | /** |
@@ -317,7 +328,7 @@ static s32 e1000_init_mac_params_82571(struct e1000_adapter *adapter) | |||
317 | 328 | ||
318 | /* | 329 | /* |
319 | * Ensure that the inter-port SWSM.SMBI lock bit is clear before | 330 | * Ensure that the inter-port SWSM.SMBI lock bit is clear before |
320 | * first NVM or PHY acess. This should be done for single-port | 331 | * first NVM or PHY access. This should be done for single-port |
321 | * devices, and for one port only on dual-port devices so that | 332 | * devices, and for one port only on dual-port devices so that |
322 | * for those devices we can still use the SMBI lock to synchronize | 333 | * for those devices we can still use the SMBI lock to synchronize |
323 | * inter-port accesses to the PHY & NVM. | 334 | * inter-port accesses to the PHY & NVM. |
@@ -649,6 +660,58 @@ static void e1000_put_hw_semaphore_82574(struct e1000_hw *hw) | |||
649 | } | 660 | } |
650 | 661 | ||
651 | /** | 662 | /** |
663 | * e1000_set_d0_lplu_state_82574 - Set Low Power Linkup D0 state | ||
664 | * @hw: pointer to the HW structure | ||
665 | * @active: true to enable LPLU, false to disable | ||
666 | * | ||
667 | * Sets the LPLU D0 state according to the active flag. | ||
668 | * LPLU will not be activated unless the | ||
669 | * device autonegotiation advertisement meets standards of | ||
670 | * either 10 or 10/100 or 10/100/1000 at all duplexes. | ||
671 | * This is a function pointer entry point only called by | ||
672 | * PHY setup routines. | ||
673 | **/ | ||
674 | static s32 e1000_set_d0_lplu_state_82574(struct e1000_hw *hw, bool active) | ||
675 | { | ||
676 | u16 data = er32(POEMB); | ||
677 | |||
678 | if (active) | ||
679 | data |= E1000_PHY_CTRL_D0A_LPLU; | ||
680 | else | ||
681 | data &= ~E1000_PHY_CTRL_D0A_LPLU; | ||
682 | |||
683 | ew32(POEMB, data); | ||
684 | return 0; | ||
685 | } | ||
686 | |||
687 | /** | ||
688 | * e1000_set_d3_lplu_state_82574 - Sets low power link up state for D3 | ||
689 | * @hw: pointer to the HW structure | ||
690 | * @active: boolean used to enable/disable lplu | ||
691 | * | ||
692 | * The low power link up (lplu) state is set to the power management level D3 | ||
693 | * when active is true, else clear lplu for D3. LPLU | ||
694 | * is used during Dx states where the power conservation is most important. | ||
695 | * During driver activity, SmartSpeed should be enabled so performance is | ||
696 | * maintained. | ||
697 | **/ | ||
698 | static s32 e1000_set_d3_lplu_state_82574(struct e1000_hw *hw, bool active) | ||
699 | { | ||
700 | u16 data = er32(POEMB); | ||
701 | |||
702 | if (!active) { | ||
703 | data &= ~E1000_PHY_CTRL_NOND0A_LPLU; | ||
704 | } else if ((hw->phy.autoneg_advertised == E1000_ALL_SPEED_DUPLEX) || | ||
705 | (hw->phy.autoneg_advertised == E1000_ALL_NOT_GIG) || | ||
706 | (hw->phy.autoneg_advertised == E1000_ALL_10_SPEED)) { | ||
707 | data |= E1000_PHY_CTRL_NOND0A_LPLU; | ||
708 | } | ||
709 | |||
710 | ew32(POEMB, data); | ||
711 | return 0; | ||
712 | } | ||
713 | |||
714 | /** | ||
652 | * e1000_acquire_nvm_82571 - Request for access to the EEPROM | 715 | * e1000_acquire_nvm_82571 - Request for access to the EEPROM |
653 | * @hw: pointer to the HW structure | 716 | * @hw: pointer to the HW structure |
654 | * | 717 | * |
@@ -956,7 +1019,7 @@ static s32 e1000_set_d0_lplu_state_82571(struct e1000_hw *hw, bool active) | |||
956 | **/ | 1019 | **/ |
957 | static s32 e1000_reset_hw_82571(struct e1000_hw *hw) | 1020 | static s32 e1000_reset_hw_82571(struct e1000_hw *hw) |
958 | { | 1021 | { |
959 | u32 ctrl, ctrl_ext, icr; | 1022 | u32 ctrl, ctrl_ext; |
960 | s32 ret_val; | 1023 | s32 ret_val; |
961 | 1024 | ||
962 | /* | 1025 | /* |
@@ -1040,7 +1103,7 @@ static s32 e1000_reset_hw_82571(struct e1000_hw *hw) | |||
1040 | 1103 | ||
1041 | /* Clear any pending interrupt events. */ | 1104 | /* Clear any pending interrupt events. */ |
1042 | ew32(IMC, 0xffffffff); | 1105 | ew32(IMC, 0xffffffff); |
1043 | icr = er32(ICR); | 1106 | er32(ICR); |
1044 | 1107 | ||
1045 | if (hw->mac.type == e1000_82571) { | 1108 | if (hw->mac.type == e1000_82571) { |
1046 | /* Install any alternate MAC address into RAR0 */ | 1109 | /* Install any alternate MAC address into RAR0 */ |
@@ -1247,7 +1310,7 @@ static void e1000_initialize_hw_bits_82571(struct e1000_hw *hw) | |||
1247 | * apply workaround for hardware errata documented in errata | 1310 | * apply workaround for hardware errata documented in errata |
1248 | * docs Fixes issue where some error prone or unreliable PCIe | 1311 | * docs Fixes issue where some error prone or unreliable PCIe |
1249 | * completions are occurring, particularly with ASPM enabled. | 1312 | * completions are occurring, particularly with ASPM enabled. |
1250 | * Without fix, issue can cause tx timeouts. | 1313 | * Without fix, issue can cause Tx timeouts. |
1251 | */ | 1314 | */ |
1252 | reg = er32(GCR2); | 1315 | reg = er32(GCR2); |
1253 | reg |= 1; | 1316 | reg |= 1; |
diff --git a/drivers/net/e1000e/Makefile b/drivers/net/e1000e/Makefile index 360c91369f35..28519acacd2d 100644 --- a/drivers/net/e1000e/Makefile +++ b/drivers/net/e1000e/Makefile | |||
@@ -1,7 +1,7 @@ | |||
1 | ################################################################################ | 1 | ################################################################################ |
2 | # | 2 | # |
3 | # Intel PRO/1000 Linux driver | 3 | # Intel PRO/1000 Linux driver |
4 | # Copyright(c) 1999 - 2008 Intel Corporation. | 4 | # Copyright(c) 1999 - 2011 Intel Corporation. |
5 | # | 5 | # |
6 | # This program is free software; you can redistribute it and/or modify it | 6 | # This program is free software; you can redistribute it and/or modify it |
7 | # under the terms and conditions of the GNU General Public License, | 7 | # under the terms and conditions of the GNU General Public License, |
diff --git a/drivers/net/e1000e/defines.h b/drivers/net/e1000e/defines.h index 7245dc2e0b7c..13149983d07e 100644 --- a/drivers/net/e1000e/defines.h +++ b/drivers/net/e1000e/defines.h | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************************* | 1 | /******************************************************************************* |
2 | 2 | ||
3 | Intel PRO/1000 Linux driver | 3 | Intel PRO/1000 Linux driver |
4 | Copyright(c) 1999 - 2010 Intel Corporation. | 4 | Copyright(c) 1999 - 2011 Intel Corporation. |
5 | 5 | ||
6 | This program is free software; you can redistribute it and/or modify it | 6 | This program is free software; you can redistribute it and/or modify it |
7 | under the terms and conditions of the GNU General Public License, | 7 | under the terms and conditions of the GNU General Public License, |
diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h index 2c913b8e9116..e610e1369053 100644 --- a/drivers/net/e1000e/e1000.h +++ b/drivers/net/e1000e/e1000.h | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************************* | 1 | /******************************************************************************* |
2 | 2 | ||
3 | Intel PRO/1000 Linux driver | 3 | Intel PRO/1000 Linux driver |
4 | Copyright(c) 1999 - 2010 Intel Corporation. | 4 | Copyright(c) 1999 - 2011 Intel Corporation. |
5 | 5 | ||
6 | This program is free software; you can redistribute it and/or modify it | 6 | This program is free software; you can redistribute it and/or modify it |
7 | under the terms and conditions of the GNU General Public License, | 7 | under the terms and conditions of the GNU General Public License, |
@@ -38,6 +38,7 @@ | |||
38 | #include <linux/netdevice.h> | 38 | #include <linux/netdevice.h> |
39 | #include <linux/pci.h> | 39 | #include <linux/pci.h> |
40 | #include <linux/pci-aspm.h> | 40 | #include <linux/pci-aspm.h> |
41 | #include <linux/crc32.h> | ||
41 | 42 | ||
42 | #include "hw.h" | 43 | #include "hw.h" |
43 | 44 | ||
@@ -496,6 +497,8 @@ extern void e1000e_free_tx_resources(struct e1000_adapter *adapter); | |||
496 | extern void e1000e_update_stats(struct e1000_adapter *adapter); | 497 | extern void e1000e_update_stats(struct e1000_adapter *adapter); |
497 | extern void e1000e_set_interrupt_capability(struct e1000_adapter *adapter); | 498 | extern void e1000e_set_interrupt_capability(struct e1000_adapter *adapter); |
498 | extern void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter); | 499 | extern void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter); |
500 | extern void e1000e_get_hw_control(struct e1000_adapter *adapter); | ||
501 | extern void e1000e_release_hw_control(struct e1000_adapter *adapter); | ||
499 | extern void e1000e_disable_aspm(struct pci_dev *pdev, u16 state); | 502 | extern void e1000e_disable_aspm(struct pci_dev *pdev, u16 state); |
500 | 503 | ||
501 | extern unsigned int copybreak; | 504 | extern unsigned int copybreak; |
diff --git a/drivers/net/e1000e/es2lan.c b/drivers/net/e1000e/es2lan.c index b18c644e13d1..2fefa820302b 100644 --- a/drivers/net/e1000e/es2lan.c +++ b/drivers/net/e1000e/es2lan.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************************* | 1 | /******************************************************************************* |
2 | 2 | ||
3 | Intel PRO/1000 Linux driver | 3 | Intel PRO/1000 Linux driver |
4 | Copyright(c) 1999 - 2010 Intel Corporation. | 4 | Copyright(c) 1999 - 2011 Intel Corporation. |
5 | 5 | ||
6 | This program is free software; you can redistribute it and/or modify it | 6 | This program is free software; you can redistribute it and/or modify it |
7 | under the terms and conditions of the GNU General Public License, | 7 | under the terms and conditions of the GNU General Public License, |
@@ -784,7 +784,7 @@ static s32 e1000_get_link_up_info_80003es2lan(struct e1000_hw *hw, u16 *speed, | |||
784 | **/ | 784 | **/ |
785 | static s32 e1000_reset_hw_80003es2lan(struct e1000_hw *hw) | 785 | static s32 e1000_reset_hw_80003es2lan(struct e1000_hw *hw) |
786 | { | 786 | { |
787 | u32 ctrl, icr; | 787 | u32 ctrl; |
788 | s32 ret_val; | 788 | s32 ret_val; |
789 | 789 | ||
790 | /* | 790 | /* |
@@ -818,7 +818,7 @@ static s32 e1000_reset_hw_80003es2lan(struct e1000_hw *hw) | |||
818 | 818 | ||
819 | /* Clear any pending interrupt events. */ | 819 | /* Clear any pending interrupt events. */ |
820 | ew32(IMC, 0xffffffff); | 820 | ew32(IMC, 0xffffffff); |
821 | icr = er32(ICR); | 821 | er32(ICR); |
822 | 822 | ||
823 | ret_val = e1000_check_alt_mac_addr_generic(hw); | 823 | ret_val = e1000_check_alt_mac_addr_generic(hw); |
824 | 824 | ||
diff --git a/drivers/net/e1000e/ethtool.c b/drivers/net/e1000e/ethtool.c index affcacf6f5a9..fa08b6336cfb 100644 --- a/drivers/net/e1000e/ethtool.c +++ b/drivers/net/e1000e/ethtool.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************************* | 1 | /******************************************************************************* |
2 | 2 | ||
3 | Intel PRO/1000 Linux driver | 3 | Intel PRO/1000 Linux driver |
4 | Copyright(c) 1999 - 2010 Intel Corporation. | 4 | Copyright(c) 1999 - 2011 Intel Corporation. |
5 | 5 | ||
6 | This program is free software; you can redistribute it and/or modify it | 6 | This program is free software; you can redistribute it and/or modify it |
7 | under the terms and conditions of the GNU General Public License, | 7 | under the terms and conditions of the GNU General Public License, |
@@ -624,20 +624,24 @@ static void e1000_get_drvinfo(struct net_device *netdev, | |||
624 | struct e1000_adapter *adapter = netdev_priv(netdev); | 624 | struct e1000_adapter *adapter = netdev_priv(netdev); |
625 | char firmware_version[32]; | 625 | char firmware_version[32]; |
626 | 626 | ||
627 | strncpy(drvinfo->driver, e1000e_driver_name, 32); | 627 | strncpy(drvinfo->driver, e1000e_driver_name, |
628 | strncpy(drvinfo->version, e1000e_driver_version, 32); | 628 | sizeof(drvinfo->driver) - 1); |
629 | strncpy(drvinfo->version, e1000e_driver_version, | ||
630 | sizeof(drvinfo->version) - 1); | ||
629 | 631 | ||
630 | /* | 632 | /* |
631 | * EEPROM image version # is reported as firmware version # for | 633 | * EEPROM image version # is reported as firmware version # for |
632 | * PCI-E controllers | 634 | * PCI-E controllers |
633 | */ | 635 | */ |
634 | sprintf(firmware_version, "%d.%d-%d", | 636 | snprintf(firmware_version, sizeof(firmware_version), "%d.%d-%d", |
635 | (adapter->eeprom_vers & 0xF000) >> 12, | 637 | (adapter->eeprom_vers & 0xF000) >> 12, |
636 | (adapter->eeprom_vers & 0x0FF0) >> 4, | 638 | (adapter->eeprom_vers & 0x0FF0) >> 4, |
637 | (adapter->eeprom_vers & 0x000F)); | 639 | (adapter->eeprom_vers & 0x000F)); |
638 | 640 | ||
639 | strncpy(drvinfo->fw_version, firmware_version, 32); | 641 | strncpy(drvinfo->fw_version, firmware_version, |
640 | strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 32); | 642 | sizeof(drvinfo->fw_version) - 1); |
643 | strncpy(drvinfo->bus_info, pci_name(adapter->pdev), | ||
644 | sizeof(drvinfo->bus_info) - 1); | ||
641 | drvinfo->regdump_len = e1000_get_regs_len(netdev); | 645 | drvinfo->regdump_len = e1000_get_regs_len(netdev); |
642 | drvinfo->eedump_len = e1000_get_eeprom_len(netdev); | 646 | drvinfo->eedump_len = e1000_get_eeprom_len(netdev); |
643 | } | 647 | } |
@@ -1704,6 +1708,19 @@ static void e1000_diag_test(struct net_device *netdev, | |||
1704 | bool if_running = netif_running(netdev); | 1708 | bool if_running = netif_running(netdev); |
1705 | 1709 | ||
1706 | set_bit(__E1000_TESTING, &adapter->state); | 1710 | set_bit(__E1000_TESTING, &adapter->state); |
1711 | |||
1712 | if (!if_running) { | ||
1713 | /* Get control of and reset hardware */ | ||
1714 | if (adapter->flags & FLAG_HAS_AMT) | ||
1715 | e1000e_get_hw_control(adapter); | ||
1716 | |||
1717 | e1000e_power_up_phy(adapter); | ||
1718 | |||
1719 | adapter->hw.phy.autoneg_wait_to_complete = 1; | ||
1720 | e1000e_reset(adapter); | ||
1721 | adapter->hw.phy.autoneg_wait_to_complete = 0; | ||
1722 | } | ||
1723 | |||
1707 | if (eth_test->flags == ETH_TEST_FL_OFFLINE) { | 1724 | if (eth_test->flags == ETH_TEST_FL_OFFLINE) { |
1708 | /* Offline tests */ | 1725 | /* Offline tests */ |
1709 | 1726 | ||
@@ -1717,8 +1734,6 @@ static void e1000_diag_test(struct net_device *netdev, | |||
1717 | if (if_running) | 1734 | if (if_running) |
1718 | /* indicate we're in test mode */ | 1735 | /* indicate we're in test mode */ |
1719 | dev_close(netdev); | 1736 | dev_close(netdev); |
1720 | else | ||
1721 | e1000e_reset(adapter); | ||
1722 | 1737 | ||
1723 | if (e1000_reg_test(adapter, &data[0])) | 1738 | if (e1000_reg_test(adapter, &data[0])) |
1724 | eth_test->flags |= ETH_TEST_FL_FAILED; | 1739 | eth_test->flags |= ETH_TEST_FL_FAILED; |
@@ -1732,8 +1747,6 @@ static void e1000_diag_test(struct net_device *netdev, | |||
1732 | eth_test->flags |= ETH_TEST_FL_FAILED; | 1747 | eth_test->flags |= ETH_TEST_FL_FAILED; |
1733 | 1748 | ||
1734 | e1000e_reset(adapter); | 1749 | e1000e_reset(adapter); |
1735 | /* make sure the phy is powered up */ | ||
1736 | e1000e_power_up_phy(adapter); | ||
1737 | if (e1000_loopback_test(adapter, &data[3])) | 1750 | if (e1000_loopback_test(adapter, &data[3])) |
1738 | eth_test->flags |= ETH_TEST_FL_FAILED; | 1751 | eth_test->flags |= ETH_TEST_FL_FAILED; |
1739 | 1752 | ||
@@ -1755,28 +1768,29 @@ static void e1000_diag_test(struct net_device *netdev, | |||
1755 | if (if_running) | 1768 | if (if_running) |
1756 | dev_open(netdev); | 1769 | dev_open(netdev); |
1757 | } else { | 1770 | } else { |
1758 | if (!if_running && (adapter->flags & FLAG_HAS_AMT)) { | 1771 | /* Online tests */ |
1759 | clear_bit(__E1000_TESTING, &adapter->state); | ||
1760 | dev_open(netdev); | ||
1761 | set_bit(__E1000_TESTING, &adapter->state); | ||
1762 | } | ||
1763 | 1772 | ||
1764 | e_info("online testing starting\n"); | 1773 | e_info("online testing starting\n"); |
1765 | /* Online tests */ | ||
1766 | if (e1000_link_test(adapter, &data[4])) | ||
1767 | eth_test->flags |= ETH_TEST_FL_FAILED; | ||
1768 | 1774 | ||
1769 | /* Online tests aren't run; pass by default */ | 1775 | /* register, eeprom, intr and loopback tests not run online */ |
1770 | data[0] = 0; | 1776 | data[0] = 0; |
1771 | data[1] = 0; | 1777 | data[1] = 0; |
1772 | data[2] = 0; | 1778 | data[2] = 0; |
1773 | data[3] = 0; | 1779 | data[3] = 0; |
1774 | 1780 | ||
1775 | if (!if_running && (adapter->flags & FLAG_HAS_AMT)) | 1781 | if (e1000_link_test(adapter, &data[4])) |
1776 | dev_close(netdev); | 1782 | eth_test->flags |= ETH_TEST_FL_FAILED; |
1777 | 1783 | ||
1778 | clear_bit(__E1000_TESTING, &adapter->state); | 1784 | clear_bit(__E1000_TESTING, &adapter->state); |
1779 | } | 1785 | } |
1786 | |||
1787 | if (!if_running) { | ||
1788 | e1000e_reset(adapter); | ||
1789 | |||
1790 | if (adapter->flags & FLAG_HAS_AMT) | ||
1791 | e1000e_release_hw_control(adapter); | ||
1792 | } | ||
1793 | |||
1780 | msleep_interruptible(4 * 1000); | 1794 | msleep_interruptible(4 * 1000); |
1781 | } | 1795 | } |
1782 | 1796 | ||
diff --git a/drivers/net/e1000e/hw.h b/drivers/net/e1000e/hw.h index ba302a5c2c30..bc0860a598c9 100644 --- a/drivers/net/e1000e/hw.h +++ b/drivers/net/e1000e/hw.h | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************************* | 1 | /******************************************************************************* |
2 | 2 | ||
3 | Intel PRO/1000 Linux driver | 3 | Intel PRO/1000 Linux driver |
4 | Copyright(c) 1999 - 2010 Intel Corporation. | 4 | Copyright(c) 1999 - 2011 Intel Corporation. |
5 | 5 | ||
6 | This program is free software; you can redistribute it and/or modify it | 6 | This program is free software; you can redistribute it and/or modify it |
7 | under the terms and conditions of the GNU General Public License, | 7 | under the terms and conditions of the GNU General Public License, |
@@ -83,6 +83,7 @@ enum e1e_registers { | |||
83 | E1000_EXTCNF_CTRL = 0x00F00, /* Extended Configuration Control */ | 83 | E1000_EXTCNF_CTRL = 0x00F00, /* Extended Configuration Control */ |
84 | E1000_EXTCNF_SIZE = 0x00F08, /* Extended Configuration Size */ | 84 | E1000_EXTCNF_SIZE = 0x00F08, /* Extended Configuration Size */ |
85 | E1000_PHY_CTRL = 0x00F10, /* PHY Control Register in CSR */ | 85 | E1000_PHY_CTRL = 0x00F10, /* PHY Control Register in CSR */ |
86 | #define E1000_POEMB E1000_PHY_CTRL /* PHY OEM Bits */ | ||
86 | E1000_PBA = 0x01000, /* Packet Buffer Allocation - RW */ | 87 | E1000_PBA = 0x01000, /* Packet Buffer Allocation - RW */ |
87 | E1000_PBS = 0x01008, /* Packet Buffer Size */ | 88 | E1000_PBS = 0x01008, /* Packet Buffer Size */ |
88 | E1000_EEMNGCTL = 0x01010, /* MNG EEprom Control */ | 89 | E1000_EEMNGCTL = 0x01010, /* MNG EEprom Control */ |
@@ -101,7 +102,7 @@ enum e1e_registers { | |||
101 | E1000_RDTR = 0x02820, /* Rx Delay Timer - RW */ | 102 | E1000_RDTR = 0x02820, /* Rx Delay Timer - RW */ |
102 | E1000_RXDCTL_BASE = 0x02828, /* Rx Descriptor Control - RW */ | 103 | E1000_RXDCTL_BASE = 0x02828, /* Rx Descriptor Control - RW */ |
103 | #define E1000_RXDCTL(_n) (E1000_RXDCTL_BASE + (_n << 8)) | 104 | #define E1000_RXDCTL(_n) (E1000_RXDCTL_BASE + (_n << 8)) |
104 | E1000_RADV = 0x0282C, /* RX Interrupt Absolute Delay Timer - RW */ | 105 | E1000_RADV = 0x0282C, /* Rx Interrupt Absolute Delay Timer - RW */ |
105 | 106 | ||
106 | /* Convenience macros | 107 | /* Convenience macros |
107 | * | 108 | * |
diff --git a/drivers/net/e1000e/ich8lan.c b/drivers/net/e1000e/ich8lan.c index d86cc0832720..fb46974cfec1 100644 --- a/drivers/net/e1000e/ich8lan.c +++ b/drivers/net/e1000e/ich8lan.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************************* | 1 | /******************************************************************************* |
2 | 2 | ||
3 | Intel PRO/1000 Linux driver | 3 | Intel PRO/1000 Linux driver |
4 | Copyright(c) 1999 - 2010 Intel Corporation. | 4 | Copyright(c) 1999 - 2011 Intel Corporation. |
5 | 5 | ||
6 | This program is free software; you can redistribute it and/or modify it | 6 | This program is free software; you can redistribute it and/or modify it |
7 | under the terms and conditions of the GNU General Public License, | 7 | under the terms and conditions of the GNU General Public License, |
@@ -321,7 +321,7 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw) | |||
321 | } | 321 | } |
322 | 322 | ||
323 | /* | 323 | /* |
324 | * Reset the PHY before any acccess to it. Doing so, ensures that | 324 | * Reset the PHY before any access to it. Doing so, ensures that |
325 | * the PHY is in a known good state before we read/write PHY registers. | 325 | * the PHY is in a known good state before we read/write PHY registers. |
326 | * The generic reset is sufficient here, because we haven't determined | 326 | * The generic reset is sufficient here, because we haven't determined |
327 | * the PHY type yet. | 327 | * the PHY type yet. |
@@ -1395,22 +1395,6 @@ void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw) | |||
1395 | } | 1395 | } |
1396 | } | 1396 | } |
1397 | 1397 | ||
1398 | static u32 e1000_calc_rx_da_crc(u8 mac[]) | ||
1399 | { | ||
1400 | u32 poly = 0xEDB88320; /* Polynomial for 802.3 CRC calculation */ | ||
1401 | u32 i, j, mask, crc; | ||
1402 | |||
1403 | crc = 0xffffffff; | ||
1404 | for (i = 0; i < 6; i++) { | ||
1405 | crc = crc ^ mac[i]; | ||
1406 | for (j = 8; j > 0; j--) { | ||
1407 | mask = (crc & 1) * (-1); | ||
1408 | crc = (crc >> 1) ^ (poly & mask); | ||
1409 | } | ||
1410 | } | ||
1411 | return ~crc; | ||
1412 | } | ||
1413 | |||
1414 | /** | 1398 | /** |
1415 | * e1000_lv_jumbo_workaround_ich8lan - required for jumbo frame operation | 1399 | * e1000_lv_jumbo_workaround_ich8lan - required for jumbo frame operation |
1416 | * with 82579 PHY | 1400 | * with 82579 PHY |
@@ -1453,8 +1437,7 @@ s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable) | |||
1453 | mac_addr[4] = (addr_high & 0xFF); | 1437 | mac_addr[4] = (addr_high & 0xFF); |
1454 | mac_addr[5] = ((addr_high >> 8) & 0xFF); | 1438 | mac_addr[5] = ((addr_high >> 8) & 0xFF); |
1455 | 1439 | ||
1456 | ew32(PCH_RAICC(i), | 1440 | ew32(PCH_RAICC(i), ~ether_crc_le(ETH_ALEN, mac_addr)); |
1457 | e1000_calc_rx_da_crc(mac_addr)); | ||
1458 | } | 1441 | } |
1459 | 1442 | ||
1460 | /* Write Rx addresses to the PHY */ | 1443 | /* Write Rx addresses to the PHY */ |
@@ -2977,7 +2960,7 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw) | |||
2977 | { | 2960 | { |
2978 | struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; | 2961 | struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; |
2979 | u16 reg; | 2962 | u16 reg; |
2980 | u32 ctrl, icr, kab; | 2963 | u32 ctrl, kab; |
2981 | s32 ret_val; | 2964 | s32 ret_val; |
2982 | 2965 | ||
2983 | /* | 2966 | /* |
@@ -3067,7 +3050,7 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw) | |||
3067 | ew32(CRC_OFFSET, 0x65656565); | 3050 | ew32(CRC_OFFSET, 0x65656565); |
3068 | 3051 | ||
3069 | ew32(IMC, 0xffffffff); | 3052 | ew32(IMC, 0xffffffff); |
3070 | icr = er32(ICR); | 3053 | er32(ICR); |
3071 | 3054 | ||
3072 | kab = er32(KABGTXD); | 3055 | kab = er32(KABGTXD); |
3073 | kab |= E1000_KABGTXD_BGSQLBIAS; | 3056 | kab |= E1000_KABGTXD_BGSQLBIAS; |
@@ -3118,7 +3101,7 @@ static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw) | |||
3118 | * Reset the phy after disabling host wakeup to reset the Rx buffer. | 3101 | * Reset the phy after disabling host wakeup to reset the Rx buffer. |
3119 | */ | 3102 | */ |
3120 | if (hw->phy.type == e1000_phy_82578) { | 3103 | if (hw->phy.type == e1000_phy_82578) { |
3121 | hw->phy.ops.read_reg(hw, BM_WUC, &i); | 3104 | e1e_rphy(hw, BM_WUC, &i); |
3122 | ret_val = e1000_phy_hw_reset_ich8lan(hw); | 3105 | ret_val = e1000_phy_hw_reset_ich8lan(hw); |
3123 | if (ret_val) | 3106 | if (ret_val) |
3124 | return ret_val; | 3107 | return ret_val; |
@@ -3276,9 +3259,8 @@ static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw) | |||
3276 | (hw->phy.type == e1000_phy_82577)) { | 3259 | (hw->phy.type == e1000_phy_82577)) { |
3277 | ew32(FCRTV_PCH, hw->fc.refresh_time); | 3260 | ew32(FCRTV_PCH, hw->fc.refresh_time); |
3278 | 3261 | ||
3279 | ret_val = hw->phy.ops.write_reg(hw, | 3262 | ret_val = e1e_wphy(hw, PHY_REG(BM_PORT_CTRL_PAGE, 27), |
3280 | PHY_REG(BM_PORT_CTRL_PAGE, 27), | 3263 | hw->fc.pause_time); |
3281 | hw->fc.pause_time); | ||
3282 | if (ret_val) | 3264 | if (ret_val) |
3283 | return ret_val; | 3265 | return ret_val; |
3284 | } | 3266 | } |
@@ -3342,8 +3324,7 @@ static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw) | |||
3342 | return ret_val; | 3324 | return ret_val; |
3343 | break; | 3325 | break; |
3344 | case e1000_phy_ife: | 3326 | case e1000_phy_ife: |
3345 | ret_val = hw->phy.ops.read_reg(hw, IFE_PHY_MDIX_CONTROL, | 3327 | ret_val = e1e_rphy(hw, IFE_PHY_MDIX_CONTROL, ®_data); |
3346 | ®_data); | ||
3347 | if (ret_val) | 3328 | if (ret_val) |
3348 | return ret_val; | 3329 | return ret_val; |
3349 | 3330 | ||
@@ -3361,8 +3342,7 @@ static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw) | |||
3361 | reg_data |= IFE_PMC_AUTO_MDIX; | 3342 | reg_data |= IFE_PMC_AUTO_MDIX; |
3362 | break; | 3343 | break; |
3363 | } | 3344 | } |
3364 | ret_val = hw->phy.ops.write_reg(hw, IFE_PHY_MDIX_CONTROL, | 3345 | ret_val = e1e_wphy(hw, IFE_PHY_MDIX_CONTROL, reg_data); |
3365 | reg_data); | ||
3366 | if (ret_val) | 3346 | if (ret_val) |
3367 | return ret_val; | 3347 | return ret_val; |
3368 | break; | 3348 | break; |
@@ -3646,7 +3626,8 @@ static s32 e1000_led_off_ich8lan(struct e1000_hw *hw) | |||
3646 | { | 3626 | { |
3647 | if (hw->phy.type == e1000_phy_ife) | 3627 | if (hw->phy.type == e1000_phy_ife) |
3648 | return e1e_wphy(hw, IFE_PHY_SPECIAL_CONTROL_LED, | 3628 | return e1e_wphy(hw, IFE_PHY_SPECIAL_CONTROL_LED, |
3649 | (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_OFF)); | 3629 | (IFE_PSCL_PROBE_MODE | |
3630 | IFE_PSCL_PROBE_LEDS_OFF)); | ||
3650 | 3631 | ||
3651 | ew32(LEDCTL, hw->mac.ledctl_mode1); | 3632 | ew32(LEDCTL, hw->mac.ledctl_mode1); |
3652 | return 0; | 3633 | return 0; |
@@ -3660,8 +3641,7 @@ static s32 e1000_led_off_ich8lan(struct e1000_hw *hw) | |||
3660 | **/ | 3641 | **/ |
3661 | static s32 e1000_setup_led_pchlan(struct e1000_hw *hw) | 3642 | static s32 e1000_setup_led_pchlan(struct e1000_hw *hw) |
3662 | { | 3643 | { |
3663 | return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, | 3644 | return e1e_wphy(hw, HV_LED_CONFIG, (u16)hw->mac.ledctl_mode1); |
3664 | (u16)hw->mac.ledctl_mode1); | ||
3665 | } | 3645 | } |
3666 | 3646 | ||
3667 | /** | 3647 | /** |
@@ -3672,8 +3652,7 @@ static s32 e1000_setup_led_pchlan(struct e1000_hw *hw) | |||
3672 | **/ | 3652 | **/ |
3673 | static s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw) | 3653 | static s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw) |
3674 | { | 3654 | { |
3675 | return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, | 3655 | return e1e_wphy(hw, HV_LED_CONFIG, (u16)hw->mac.ledctl_default); |
3676 | (u16)hw->mac.ledctl_default); | ||
3677 | } | 3656 | } |
3678 | 3657 | ||
3679 | /** | 3658 | /** |
@@ -3704,7 +3683,7 @@ static s32 e1000_led_on_pchlan(struct e1000_hw *hw) | |||
3704 | } | 3683 | } |
3705 | } | 3684 | } |
3706 | 3685 | ||
3707 | return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, data); | 3686 | return e1e_wphy(hw, HV_LED_CONFIG, data); |
3708 | } | 3687 | } |
3709 | 3688 | ||
3710 | /** | 3689 | /** |
@@ -3735,7 +3714,7 @@ static s32 e1000_led_off_pchlan(struct e1000_hw *hw) | |||
3735 | } | 3714 | } |
3736 | } | 3715 | } |
3737 | 3716 | ||
3738 | return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, data); | 3717 | return e1e_wphy(hw, HV_LED_CONFIG, data); |
3739 | } | 3718 | } |
3740 | 3719 | ||
3741 | /** | 3720 | /** |
@@ -3844,20 +3823,20 @@ static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw) | |||
3844 | if ((hw->phy.type == e1000_phy_82578) || | 3823 | if ((hw->phy.type == e1000_phy_82578) || |
3845 | (hw->phy.type == e1000_phy_82579) || | 3824 | (hw->phy.type == e1000_phy_82579) || |
3846 | (hw->phy.type == e1000_phy_82577)) { | 3825 | (hw->phy.type == e1000_phy_82577)) { |
3847 | hw->phy.ops.read_reg(hw, HV_SCC_UPPER, &phy_data); | 3826 | e1e_rphy(hw, HV_SCC_UPPER, &phy_data); |
3848 | hw->phy.ops.read_reg(hw, HV_SCC_LOWER, &phy_data); | 3827 | e1e_rphy(hw, HV_SCC_LOWER, &phy_data); |
3849 | hw->phy.ops.read_reg(hw, HV_ECOL_UPPER, &phy_data); | 3828 | e1e_rphy(hw, HV_ECOL_UPPER, &phy_data); |
3850 | hw->phy.ops.read_reg(hw, HV_ECOL_LOWER, &phy_data); | 3829 | e1e_rphy(hw, HV_ECOL_LOWER, &phy_data); |
3851 | hw->phy.ops.read_reg(hw, HV_MCC_UPPER, &phy_data); | 3830 | e1e_rphy(hw, HV_MCC_UPPER, &phy_data); |
3852 | hw->phy.ops.read_reg(hw, HV_MCC_LOWER, &phy_data); | 3831 | e1e_rphy(hw, HV_MCC_LOWER, &phy_data); |
3853 | hw->phy.ops.read_reg(hw, HV_LATECOL_UPPER, &phy_data); | 3832 | e1e_rphy(hw, HV_LATECOL_UPPER, &phy_data); |
3854 | hw->phy.ops.read_reg(hw, HV_LATECOL_LOWER, &phy_data); | 3833 | e1e_rphy(hw, HV_LATECOL_LOWER, &phy_data); |
3855 | hw->phy.ops.read_reg(hw, HV_COLC_UPPER, &phy_data); | 3834 | e1e_rphy(hw, HV_COLC_UPPER, &phy_data); |
3856 | hw->phy.ops.read_reg(hw, HV_COLC_LOWER, &phy_data); | 3835 | e1e_rphy(hw, HV_COLC_LOWER, &phy_data); |
3857 | hw->phy.ops.read_reg(hw, HV_DC_UPPER, &phy_data); | 3836 | e1e_rphy(hw, HV_DC_UPPER, &phy_data); |
3858 | hw->phy.ops.read_reg(hw, HV_DC_LOWER, &phy_data); | 3837 | e1e_rphy(hw, HV_DC_LOWER, &phy_data); |
3859 | hw->phy.ops.read_reg(hw, HV_TNCRS_UPPER, &phy_data); | 3838 | e1e_rphy(hw, HV_TNCRS_UPPER, &phy_data); |
3860 | hw->phy.ops.read_reg(hw, HV_TNCRS_LOWER, &phy_data); | 3839 | e1e_rphy(hw, HV_TNCRS_LOWER, &phy_data); |
3861 | } | 3840 | } |
3862 | } | 3841 | } |
3863 | 3842 | ||
diff --git a/drivers/net/e1000e/lib.c b/drivers/net/e1000e/lib.c index 7e55170a601e..68aa1749bf66 100644 --- a/drivers/net/e1000e/lib.c +++ b/drivers/net/e1000e/lib.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************************* | 1 | /******************************************************************************* |
2 | 2 | ||
3 | Intel PRO/1000 Linux driver | 3 | Intel PRO/1000 Linux driver |
4 | Copyright(c) 1999 - 2010 Intel Corporation. | 4 | Copyright(c) 1999 - 2011 Intel Corporation. |
5 | 5 | ||
6 | This program is free software; you can redistribute it and/or modify it | 6 | This program is free software; you can redistribute it and/or modify it |
7 | under the terms and conditions of the GNU General Public License, | 7 | under the terms and conditions of the GNU General Public License, |
@@ -533,7 +533,7 @@ s32 e1000e_check_for_fiber_link(struct e1000_hw *hw) | |||
533 | mac->autoneg_failed = 1; | 533 | mac->autoneg_failed = 1; |
534 | return 0; | 534 | return 0; |
535 | } | 535 | } |
536 | e_dbg("NOT RXing /C/, disable AutoNeg and force link.\n"); | 536 | e_dbg("NOT Rx'ing /C/, disable AutoNeg and force link.\n"); |
537 | 537 | ||
538 | /* Disable auto-negotiation in the TXCW register */ | 538 | /* Disable auto-negotiation in the TXCW register */ |
539 | ew32(TXCW, (mac->txcw & ~E1000_TXCW_ANE)); | 539 | ew32(TXCW, (mac->txcw & ~E1000_TXCW_ANE)); |
@@ -556,7 +556,7 @@ s32 e1000e_check_for_fiber_link(struct e1000_hw *hw) | |||
556 | * and disable forced link in the Device Control register | 556 | * and disable forced link in the Device Control register |
557 | * in an attempt to auto-negotiate with our link partner. | 557 | * in an attempt to auto-negotiate with our link partner. |
558 | */ | 558 | */ |
559 | e_dbg("RXing /C/, enable AutoNeg and stop forcing link.\n"); | 559 | e_dbg("Rx'ing /C/, enable AutoNeg and stop forcing link.\n"); |
560 | ew32(TXCW, mac->txcw); | 560 | ew32(TXCW, mac->txcw); |
561 | ew32(CTRL, (ctrl & ~E1000_CTRL_SLU)); | 561 | ew32(CTRL, (ctrl & ~E1000_CTRL_SLU)); |
562 | 562 | ||
@@ -598,7 +598,7 @@ s32 e1000e_check_for_serdes_link(struct e1000_hw *hw) | |||
598 | mac->autoneg_failed = 1; | 598 | mac->autoneg_failed = 1; |
599 | return 0; | 599 | return 0; |
600 | } | 600 | } |
601 | e_dbg("NOT RXing /C/, disable AutoNeg and force link.\n"); | 601 | e_dbg("NOT Rx'ing /C/, disable AutoNeg and force link.\n"); |
602 | 602 | ||
603 | /* Disable auto-negotiation in the TXCW register */ | 603 | /* Disable auto-negotiation in the TXCW register */ |
604 | ew32(TXCW, (mac->txcw & ~E1000_TXCW_ANE)); | 604 | ew32(TXCW, (mac->txcw & ~E1000_TXCW_ANE)); |
@@ -621,7 +621,7 @@ s32 e1000e_check_for_serdes_link(struct e1000_hw *hw) | |||
621 | * and disable forced link in the Device Control register | 621 | * and disable forced link in the Device Control register |
622 | * in an attempt to auto-negotiate with our link partner. | 622 | * in an attempt to auto-negotiate with our link partner. |
623 | */ | 623 | */ |
624 | e_dbg("RXing /C/, enable AutoNeg and stop forcing link.\n"); | 624 | e_dbg("Rx'ing /C/, enable AutoNeg and stop forcing link.\n"); |
625 | ew32(TXCW, mac->txcw); | 625 | ew32(TXCW, mac->txcw); |
626 | ew32(CTRL, (ctrl & ~E1000_CTRL_SLU)); | 626 | ew32(CTRL, (ctrl & ~E1000_CTRL_SLU)); |
627 | 627 | ||
@@ -800,9 +800,9 @@ static s32 e1000_commit_fc_settings_generic(struct e1000_hw *hw) | |||
800 | * The possible values of the "fc" parameter are: | 800 | * The possible values of the "fc" parameter are: |
801 | * 0: Flow control is completely disabled | 801 | * 0: Flow control is completely disabled |
802 | * 1: Rx flow control is enabled (we can receive pause frames, | 802 | * 1: Rx flow control is enabled (we can receive pause frames, |
803 | * but not send pause frames). | 803 | * but not send pause frames). |
804 | * 2: Tx flow control is enabled (we can send pause frames but we | 804 | * 2: Tx flow control is enabled (we can send pause frames but we |
805 | * do not support receiving pause frames). | 805 | * do not support receiving pause frames). |
806 | * 3: Both Rx and Tx flow control (symmetric) are enabled. | 806 | * 3: Both Rx and Tx flow control (symmetric) are enabled. |
807 | */ | 807 | */ |
808 | switch (hw->fc.current_mode) { | 808 | switch (hw->fc.current_mode) { |
@@ -1031,9 +1031,9 @@ s32 e1000e_force_mac_fc(struct e1000_hw *hw) | |||
1031 | * The possible values of the "fc" parameter are: | 1031 | * The possible values of the "fc" parameter are: |
1032 | * 0: Flow control is completely disabled | 1032 | * 0: Flow control is completely disabled |
1033 | * 1: Rx flow control is enabled (we can receive pause | 1033 | * 1: Rx flow control is enabled (we can receive pause |
1034 | * frames but not send pause frames). | 1034 | * frames but not send pause frames). |
1035 | * 2: Tx flow control is enabled (we can send pause frames | 1035 | * 2: Tx flow control is enabled (we can send pause frames |
1036 | * frames but we do not receive pause frames). | 1036 | * frames but we do not receive pause frames). |
1037 | * 3: Both Rx and Tx flow control (symmetric) is enabled. | 1037 | * 3: Both Rx and Tx flow control (symmetric) is enabled. |
1038 | * other: No other values should be possible at this point. | 1038 | * other: No other values should be possible at this point. |
1039 | */ | 1039 | */ |
@@ -1135,7 +1135,8 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw) | |||
1135 | ret_val = e1e_rphy(hw, PHY_AUTONEG_ADV, &mii_nway_adv_reg); | 1135 | ret_val = e1e_rphy(hw, PHY_AUTONEG_ADV, &mii_nway_adv_reg); |
1136 | if (ret_val) | 1136 | if (ret_val) |
1137 | return ret_val; | 1137 | return ret_val; |
1138 | ret_val = e1e_rphy(hw, PHY_LP_ABILITY, &mii_nway_lp_ability_reg); | 1138 | ret_val = |
1139 | e1e_rphy(hw, PHY_LP_ABILITY, &mii_nway_lp_ability_reg); | ||
1139 | if (ret_val) | 1140 | if (ret_val) |
1140 | return ret_val; | 1141 | return ret_val; |
1141 | 1142 | ||
@@ -1188,7 +1189,7 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw) | |||
1188 | } else { | 1189 | } else { |
1189 | hw->fc.current_mode = e1000_fc_rx_pause; | 1190 | hw->fc.current_mode = e1000_fc_rx_pause; |
1190 | e_dbg("Flow Control = " | 1191 | e_dbg("Flow Control = " |
1191 | "RX PAUSE frames only.\r\n"); | 1192 | "Rx PAUSE frames only.\r\n"); |
1192 | } | 1193 | } |
1193 | } | 1194 | } |
1194 | /* | 1195 | /* |
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c index fe50242aa9e6..2e5022849f18 100644 --- a/drivers/net/e1000e/netdev.c +++ b/drivers/net/e1000e/netdev.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************************* | 1 | /******************************************************************************* |
2 | 2 | ||
3 | Intel PRO/1000 Linux driver | 3 | Intel PRO/1000 Linux driver |
4 | Copyright(c) 1999 - 2010 Intel Corporation. | 4 | Copyright(c) 1999 - 2011 Intel Corporation. |
5 | 5 | ||
6 | This program is free software; you can redistribute it and/or modify it | 6 | This program is free software; you can redistribute it and/or modify it |
7 | under the terms and conditions of the GNU General Public License, | 7 | under the terms and conditions of the GNU General Public License, |
@@ -77,17 +77,17 @@ struct e1000_reg_info { | |||
77 | char *name; | 77 | char *name; |
78 | }; | 78 | }; |
79 | 79 | ||
80 | #define E1000_RDFH 0x02410 /* Rx Data FIFO Head - RW */ | 80 | #define E1000_RDFH 0x02410 /* Rx Data FIFO Head - RW */ |
81 | #define E1000_RDFT 0x02418 /* Rx Data FIFO Tail - RW */ | 81 | #define E1000_RDFT 0x02418 /* Rx Data FIFO Tail - RW */ |
82 | #define E1000_RDFHS 0x02420 /* Rx Data FIFO Head Saved - RW */ | 82 | #define E1000_RDFHS 0x02420 /* Rx Data FIFO Head Saved - RW */ |
83 | #define E1000_RDFTS 0x02428 /* Rx Data FIFO Tail Saved - RW */ | 83 | #define E1000_RDFTS 0x02428 /* Rx Data FIFO Tail Saved - RW */ |
84 | #define E1000_RDFPC 0x02430 /* Rx Data FIFO Packet Count - RW */ | 84 | #define E1000_RDFPC 0x02430 /* Rx Data FIFO Packet Count - RW */ |
85 | 85 | ||
86 | #define E1000_TDFH 0x03410 /* Tx Data FIFO Head - RW */ | 86 | #define E1000_TDFH 0x03410 /* Tx Data FIFO Head - RW */ |
87 | #define E1000_TDFT 0x03418 /* Tx Data FIFO Tail - RW */ | 87 | #define E1000_TDFT 0x03418 /* Tx Data FIFO Tail - RW */ |
88 | #define E1000_TDFHS 0x03420 /* Tx Data FIFO Head Saved - RW */ | 88 | #define E1000_TDFHS 0x03420 /* Tx Data FIFO Head Saved - RW */ |
89 | #define E1000_TDFTS 0x03428 /* Tx Data FIFO Tail Saved - RW */ | 89 | #define E1000_TDFTS 0x03428 /* Tx Data FIFO Tail Saved - RW */ |
90 | #define E1000_TDFPC 0x03430 /* Tx Data FIFO Packet Count - RW */ | 90 | #define E1000_TDFPC 0x03430 /* Tx Data FIFO Packet Count - RW */ |
91 | 91 | ||
92 | static const struct e1000_reg_info e1000_reg_info_tbl[] = { | 92 | static const struct e1000_reg_info e1000_reg_info_tbl[] = { |
93 | 93 | ||
@@ -99,7 +99,7 @@ static const struct e1000_reg_info e1000_reg_info_tbl[] = { | |||
99 | /* Interrupt Registers */ | 99 | /* Interrupt Registers */ |
100 | {E1000_ICR, "ICR"}, | 100 | {E1000_ICR, "ICR"}, |
101 | 101 | ||
102 | /* RX Registers */ | 102 | /* Rx Registers */ |
103 | {E1000_RCTL, "RCTL"}, | 103 | {E1000_RCTL, "RCTL"}, |
104 | {E1000_RDLEN, "RDLEN"}, | 104 | {E1000_RDLEN, "RDLEN"}, |
105 | {E1000_RDH, "RDH"}, | 105 | {E1000_RDH, "RDH"}, |
@@ -115,7 +115,7 @@ static const struct e1000_reg_info e1000_reg_info_tbl[] = { | |||
115 | {E1000_RDFTS, "RDFTS"}, | 115 | {E1000_RDFTS, "RDFTS"}, |
116 | {E1000_RDFPC, "RDFPC"}, | 116 | {E1000_RDFPC, "RDFPC"}, |
117 | 117 | ||
118 | /* TX Registers */ | 118 | /* Tx Registers */ |
119 | {E1000_TCTL, "TCTL"}, | 119 | {E1000_TCTL, "TCTL"}, |
120 | {E1000_TDBAL, "TDBAL"}, | 120 | {E1000_TDBAL, "TDBAL"}, |
121 | {E1000_TDBAH, "TDBAH"}, | 121 | {E1000_TDBAH, "TDBAH"}, |
@@ -160,7 +160,7 @@ static void e1000_regdump(struct e1000_hw *hw, struct e1000_reg_info *reginfo) | |||
160 | break; | 160 | break; |
161 | default: | 161 | default: |
162 | printk(KERN_INFO "%-15s %08x\n", | 162 | printk(KERN_INFO "%-15s %08x\n", |
163 | reginfo->name, __er32(hw, reginfo->ofs)); | 163 | reginfo->name, __er32(hw, reginfo->ofs)); |
164 | return; | 164 | return; |
165 | } | 165 | } |
166 | 166 | ||
@@ -171,9 +171,8 @@ static void e1000_regdump(struct e1000_hw *hw, struct e1000_reg_info *reginfo) | |||
171 | printk(KERN_CONT "\n"); | 171 | printk(KERN_CONT "\n"); |
172 | } | 172 | } |
173 | 173 | ||
174 | |||
175 | /* | 174 | /* |
176 | * e1000e_dump - Print registers, tx-ring and rx-ring | 175 | * e1000e_dump - Print registers, Tx-ring and Rx-ring |
177 | */ | 176 | */ |
178 | static void e1000e_dump(struct e1000_adapter *adapter) | 177 | static void e1000e_dump(struct e1000_adapter *adapter) |
179 | { | 178 | { |
@@ -182,12 +181,20 @@ static void e1000e_dump(struct e1000_adapter *adapter) | |||
182 | struct e1000_reg_info *reginfo; | 181 | struct e1000_reg_info *reginfo; |
183 | struct e1000_ring *tx_ring = adapter->tx_ring; | 182 | struct e1000_ring *tx_ring = adapter->tx_ring; |
184 | struct e1000_tx_desc *tx_desc; | 183 | struct e1000_tx_desc *tx_desc; |
185 | struct my_u0 { u64 a; u64 b; } *u0; | 184 | struct my_u0 { |
185 | u64 a; | ||
186 | u64 b; | ||
187 | } *u0; | ||
186 | struct e1000_buffer *buffer_info; | 188 | struct e1000_buffer *buffer_info; |
187 | struct e1000_ring *rx_ring = adapter->rx_ring; | 189 | struct e1000_ring *rx_ring = adapter->rx_ring; |
188 | union e1000_rx_desc_packet_split *rx_desc_ps; | 190 | union e1000_rx_desc_packet_split *rx_desc_ps; |
189 | struct e1000_rx_desc *rx_desc; | 191 | struct e1000_rx_desc *rx_desc; |
190 | struct my_u1 { u64 a; u64 b; u64 c; u64 d; } *u1; | 192 | struct my_u1 { |
193 | u64 a; | ||
194 | u64 b; | ||
195 | u64 c; | ||
196 | u64 d; | ||
197 | } *u1; | ||
191 | u32 staterr; | 198 | u32 staterr; |
192 | int i = 0; | 199 | int i = 0; |
193 | 200 | ||
@@ -198,12 +205,10 @@ static void e1000e_dump(struct e1000_adapter *adapter) | |||
198 | if (netdev) { | 205 | if (netdev) { |
199 | dev_info(&adapter->pdev->dev, "Net device Info\n"); | 206 | dev_info(&adapter->pdev->dev, "Net device Info\n"); |
200 | printk(KERN_INFO "Device Name state " | 207 | printk(KERN_INFO "Device Name state " |
201 | "trans_start last_rx\n"); | 208 | "trans_start last_rx\n"); |
202 | printk(KERN_INFO "%-15s %016lX %016lX %016lX\n", | 209 | printk(KERN_INFO "%-15s %016lX %016lX %016lX\n", |
203 | netdev->name, | 210 | netdev->name, netdev->state, netdev->trans_start, |
204 | netdev->state, | 211 | netdev->last_rx); |
205 | netdev->trans_start, | ||
206 | netdev->last_rx); | ||
207 | } | 212 | } |
208 | 213 | ||
209 | /* Print Registers */ | 214 | /* Print Registers */ |
@@ -214,26 +219,26 @@ static void e1000e_dump(struct e1000_adapter *adapter) | |||
214 | e1000_regdump(hw, reginfo); | 219 | e1000_regdump(hw, reginfo); |
215 | } | 220 | } |
216 | 221 | ||
217 | /* Print TX Ring Summary */ | 222 | /* Print Tx Ring Summary */ |
218 | if (!netdev || !netif_running(netdev)) | 223 | if (!netdev || !netif_running(netdev)) |
219 | goto exit; | 224 | goto exit; |
220 | 225 | ||
221 | dev_info(&adapter->pdev->dev, "TX Rings Summary\n"); | 226 | dev_info(&adapter->pdev->dev, "Tx Ring Summary\n"); |
222 | printk(KERN_INFO "Queue [NTU] [NTC] [bi(ntc)->dma ]" | 227 | printk(KERN_INFO "Queue [NTU] [NTC] [bi(ntc)->dma ]" |
223 | " leng ntw timestamp\n"); | 228 | " leng ntw timestamp\n"); |
224 | buffer_info = &tx_ring->buffer_info[tx_ring->next_to_clean]; | 229 | buffer_info = &tx_ring->buffer_info[tx_ring->next_to_clean]; |
225 | printk(KERN_INFO " %5d %5X %5X %016llX %04X %3X %016llX\n", | 230 | printk(KERN_INFO " %5d %5X %5X %016llX %04X %3X %016llX\n", |
226 | 0, tx_ring->next_to_use, tx_ring->next_to_clean, | 231 | 0, tx_ring->next_to_use, tx_ring->next_to_clean, |
227 | (unsigned long long)buffer_info->dma, | 232 | (unsigned long long)buffer_info->dma, |
228 | buffer_info->length, | 233 | buffer_info->length, |
229 | buffer_info->next_to_watch, | 234 | buffer_info->next_to_watch, |
230 | (unsigned long long)buffer_info->time_stamp); | 235 | (unsigned long long)buffer_info->time_stamp); |
231 | 236 | ||
232 | /* Print TX Rings */ | 237 | /* Print Tx Ring */ |
233 | if (!netif_msg_tx_done(adapter)) | 238 | if (!netif_msg_tx_done(adapter)) |
234 | goto rx_ring_summary; | 239 | goto rx_ring_summary; |
235 | 240 | ||
236 | dev_info(&adapter->pdev->dev, "TX Rings Dump\n"); | 241 | dev_info(&adapter->pdev->dev, "Tx Ring Dump\n"); |
237 | 242 | ||
238 | /* Transmit Descriptor Formats - DEXT[29] is 0 (Legacy) or 1 (Extended) | 243 | /* Transmit Descriptor Formats - DEXT[29] is 0 (Legacy) or 1 (Extended) |
239 | * | 244 | * |
@@ -263,22 +268,22 @@ static void e1000e_dump(struct e1000_adapter *adapter) | |||
263 | * 63 48 47 40 39 36 35 32 31 24 23 20 19 0 | 268 | * 63 48 47 40 39 36 35 32 31 24 23 20 19 0 |
264 | */ | 269 | */ |
265 | printk(KERN_INFO "Tl[desc] [address 63:0 ] [SpeCssSCmCsLen]" | 270 | printk(KERN_INFO "Tl[desc] [address 63:0 ] [SpeCssSCmCsLen]" |
266 | " [bi->dma ] leng ntw timestamp bi->skb " | 271 | " [bi->dma ] leng ntw timestamp bi->skb " |
267 | "<-- Legacy format\n"); | 272 | "<-- Legacy format\n"); |
268 | printk(KERN_INFO "Tc[desc] [Ce CoCsIpceCoS] [MssHlRSCm0Plen]" | 273 | printk(KERN_INFO "Tc[desc] [Ce CoCsIpceCoS] [MssHlRSCm0Plen]" |
269 | " [bi->dma ] leng ntw timestamp bi->skb " | 274 | " [bi->dma ] leng ntw timestamp bi->skb " |
270 | "<-- Ext Context format\n"); | 275 | "<-- Ext Context format\n"); |
271 | printk(KERN_INFO "Td[desc] [address 63:0 ] [VlaPoRSCm1Dlen]" | 276 | printk(KERN_INFO "Td[desc] [address 63:0 ] [VlaPoRSCm1Dlen]" |
272 | " [bi->dma ] leng ntw timestamp bi->skb " | 277 | " [bi->dma ] leng ntw timestamp bi->skb " |
273 | "<-- Ext Data format\n"); | 278 | "<-- Ext Data format\n"); |
274 | for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) { | 279 | for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) { |
275 | tx_desc = E1000_TX_DESC(*tx_ring, i); | 280 | tx_desc = E1000_TX_DESC(*tx_ring, i); |
276 | buffer_info = &tx_ring->buffer_info[i]; | 281 | buffer_info = &tx_ring->buffer_info[i]; |
277 | u0 = (struct my_u0 *)tx_desc; | 282 | u0 = (struct my_u0 *)tx_desc; |
278 | printk(KERN_INFO "T%c[0x%03X] %016llX %016llX %016llX " | 283 | printk(KERN_INFO "T%c[0x%03X] %016llX %016llX %016llX " |
279 | "%04X %3X %016llX %p", | 284 | "%04X %3X %016llX %p", |
280 | (!(le64_to_cpu(u0->b) & (1<<29)) ? 'l' : | 285 | (!(le64_to_cpu(u0->b) & (1 << 29)) ? 'l' : |
281 | ((le64_to_cpu(u0->b) & (1<<20)) ? 'd' : 'c')), i, | 286 | ((le64_to_cpu(u0->b) & (1 << 20)) ? 'd' : 'c')), i, |
282 | (unsigned long long)le64_to_cpu(u0->a), | 287 | (unsigned long long)le64_to_cpu(u0->a), |
283 | (unsigned long long)le64_to_cpu(u0->b), | 288 | (unsigned long long)le64_to_cpu(u0->b), |
284 | (unsigned long long)buffer_info->dma, | 289 | (unsigned long long)buffer_info->dma, |
@@ -296,22 +301,22 @@ static void e1000e_dump(struct e1000_adapter *adapter) | |||
296 | 301 | ||
297 | if (netif_msg_pktdata(adapter) && buffer_info->dma != 0) | 302 | if (netif_msg_pktdata(adapter) && buffer_info->dma != 0) |
298 | print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS, | 303 | print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS, |
299 | 16, 1, phys_to_virt(buffer_info->dma), | 304 | 16, 1, phys_to_virt(buffer_info->dma), |
300 | buffer_info->length, true); | 305 | buffer_info->length, true); |
301 | } | 306 | } |
302 | 307 | ||
303 | /* Print RX Rings Summary */ | 308 | /* Print Rx Ring Summary */ |
304 | rx_ring_summary: | 309 | rx_ring_summary: |
305 | dev_info(&adapter->pdev->dev, "RX Rings Summary\n"); | 310 | dev_info(&adapter->pdev->dev, "Rx Ring Summary\n"); |
306 | printk(KERN_INFO "Queue [NTU] [NTC]\n"); | 311 | printk(KERN_INFO "Queue [NTU] [NTC]\n"); |
307 | printk(KERN_INFO " %5d %5X %5X\n", 0, | 312 | printk(KERN_INFO " %5d %5X %5X\n", 0, |
308 | rx_ring->next_to_use, rx_ring->next_to_clean); | 313 | rx_ring->next_to_use, rx_ring->next_to_clean); |
309 | 314 | ||
310 | /* Print RX Rings */ | 315 | /* Print Rx Ring */ |
311 | if (!netif_msg_rx_status(adapter)) | 316 | if (!netif_msg_rx_status(adapter)) |
312 | goto exit; | 317 | goto exit; |
313 | 318 | ||
314 | dev_info(&adapter->pdev->dev, "RX Rings Dump\n"); | 319 | dev_info(&adapter->pdev->dev, "Rx Ring Dump\n"); |
315 | switch (adapter->rx_ps_pages) { | 320 | switch (adapter->rx_ps_pages) { |
316 | case 1: | 321 | case 1: |
317 | case 2: | 322 | case 2: |
@@ -329,7 +334,7 @@ rx_ring_summary: | |||
329 | * +-----------------------------------------------------+ | 334 | * +-----------------------------------------------------+ |
330 | */ | 335 | */ |
331 | printk(KERN_INFO "R [desc] [buffer 0 63:0 ] " | 336 | printk(KERN_INFO "R [desc] [buffer 0 63:0 ] " |
332 | "[buffer 1 63:0 ] " | 337 | "[buffer 1 63:0 ] " |
333 | "[buffer 2 63:0 ] [buffer 3 63:0 ] [bi->dma ] " | 338 | "[buffer 2 63:0 ] [buffer 3 63:0 ] [bi->dma ] " |
334 | "[bi->skb] <-- Ext Pkt Split format\n"); | 339 | "[bi->skb] <-- Ext Pkt Split format\n"); |
335 | /* [Extended] Receive Descriptor (Write-Back) Format | 340 | /* [Extended] Receive Descriptor (Write-Back) Format |
@@ -344,7 +349,7 @@ rx_ring_summary: | |||
344 | * 63 48 47 32 31 20 19 0 | 349 | * 63 48 47 32 31 20 19 0 |
345 | */ | 350 | */ |
346 | printk(KERN_INFO "RWB[desc] [ck ipid mrqhsh] " | 351 | printk(KERN_INFO "RWB[desc] [ck ipid mrqhsh] " |
347 | "[vl l0 ee es] " | 352 | "[vl l0 ee es] " |
348 | "[ l3 l2 l1 hs] [reserved ] ---------------- " | 353 | "[ l3 l2 l1 hs] [reserved ] ---------------- " |
349 | "[bi->skb] <-- Ext Rx Write-Back format\n"); | 354 | "[bi->skb] <-- Ext Rx Write-Back format\n"); |
350 | for (i = 0; i < rx_ring->count; i++) { | 355 | for (i = 0; i < rx_ring->count; i++) { |
@@ -352,26 +357,26 @@ rx_ring_summary: | |||
352 | rx_desc_ps = E1000_RX_DESC_PS(*rx_ring, i); | 357 | rx_desc_ps = E1000_RX_DESC_PS(*rx_ring, i); |
353 | u1 = (struct my_u1 *)rx_desc_ps; | 358 | u1 = (struct my_u1 *)rx_desc_ps; |
354 | staterr = | 359 | staterr = |
355 | le32_to_cpu(rx_desc_ps->wb.middle.status_error); | 360 | le32_to_cpu(rx_desc_ps->wb.middle.status_error); |
356 | if (staterr & E1000_RXD_STAT_DD) { | 361 | if (staterr & E1000_RXD_STAT_DD) { |
357 | /* Descriptor Done */ | 362 | /* Descriptor Done */ |
358 | printk(KERN_INFO "RWB[0x%03X] %016llX " | 363 | printk(KERN_INFO "RWB[0x%03X] %016llX " |
359 | "%016llX %016llX %016llX " | 364 | "%016llX %016llX %016llX " |
360 | "---------------- %p", i, | 365 | "---------------- %p", i, |
361 | (unsigned long long)le64_to_cpu(u1->a), | 366 | (unsigned long long)le64_to_cpu(u1->a), |
362 | (unsigned long long)le64_to_cpu(u1->b), | 367 | (unsigned long long)le64_to_cpu(u1->b), |
363 | (unsigned long long)le64_to_cpu(u1->c), | 368 | (unsigned long long)le64_to_cpu(u1->c), |
364 | (unsigned long long)le64_to_cpu(u1->d), | 369 | (unsigned long long)le64_to_cpu(u1->d), |
365 | buffer_info->skb); | 370 | buffer_info->skb); |
366 | } else { | 371 | } else { |
367 | printk(KERN_INFO "R [0x%03X] %016llX " | 372 | printk(KERN_INFO "R [0x%03X] %016llX " |
368 | "%016llX %016llX %016llX %016llX %p", i, | 373 | "%016llX %016llX %016llX %016llX %p", i, |
369 | (unsigned long long)le64_to_cpu(u1->a), | 374 | (unsigned long long)le64_to_cpu(u1->a), |
370 | (unsigned long long)le64_to_cpu(u1->b), | 375 | (unsigned long long)le64_to_cpu(u1->b), |
371 | (unsigned long long)le64_to_cpu(u1->c), | 376 | (unsigned long long)le64_to_cpu(u1->c), |
372 | (unsigned long long)le64_to_cpu(u1->d), | 377 | (unsigned long long)le64_to_cpu(u1->d), |
373 | (unsigned long long)buffer_info->dma, | 378 | (unsigned long long)buffer_info->dma, |
374 | buffer_info->skb); | 379 | buffer_info->skb); |
375 | 380 | ||
376 | if (netif_msg_pktdata(adapter)) | 381 | if (netif_msg_pktdata(adapter)) |
377 | print_hex_dump(KERN_INFO, "", | 382 | print_hex_dump(KERN_INFO, "", |
@@ -400,18 +405,18 @@ rx_ring_summary: | |||
400 | * 63 48 47 40 39 32 31 16 15 0 | 405 | * 63 48 47 40 39 32 31 16 15 0 |
401 | */ | 406 | */ |
402 | printk(KERN_INFO "Rl[desc] [address 63:0 ] " | 407 | printk(KERN_INFO "Rl[desc] [address 63:0 ] " |
403 | "[vl er S cks ln] [bi->dma ] [bi->skb] " | 408 | "[vl er S cks ln] [bi->dma ] [bi->skb] " |
404 | "<-- Legacy format\n"); | 409 | "<-- Legacy format\n"); |
405 | for (i = 0; rx_ring->desc && (i < rx_ring->count); i++) { | 410 | for (i = 0; rx_ring->desc && (i < rx_ring->count); i++) { |
406 | rx_desc = E1000_RX_DESC(*rx_ring, i); | 411 | rx_desc = E1000_RX_DESC(*rx_ring, i); |
407 | buffer_info = &rx_ring->buffer_info[i]; | 412 | buffer_info = &rx_ring->buffer_info[i]; |
408 | u0 = (struct my_u0 *)rx_desc; | 413 | u0 = (struct my_u0 *)rx_desc; |
409 | printk(KERN_INFO "Rl[0x%03X] %016llX %016llX " | 414 | printk(KERN_INFO "Rl[0x%03X] %016llX %016llX " |
410 | "%016llX %p", i, | 415 | "%016llX %p", i, |
411 | (unsigned long long)le64_to_cpu(u0->a), | 416 | (unsigned long long)le64_to_cpu(u0->a), |
412 | (unsigned long long)le64_to_cpu(u0->b), | 417 | (unsigned long long)le64_to_cpu(u0->b), |
413 | (unsigned long long)buffer_info->dma, | 418 | (unsigned long long)buffer_info->dma, |
414 | buffer_info->skb); | 419 | buffer_info->skb); |
415 | if (i == rx_ring->next_to_use) | 420 | if (i == rx_ring->next_to_use) |
416 | printk(KERN_CONT " NTU\n"); | 421 | printk(KERN_CONT " NTU\n"); |
417 | else if (i == rx_ring->next_to_clean) | 422 | else if (i == rx_ring->next_to_clean) |
@@ -421,9 +426,10 @@ rx_ring_summary: | |||
421 | 426 | ||
422 | if (netif_msg_pktdata(adapter)) | 427 | if (netif_msg_pktdata(adapter)) |
423 | print_hex_dump(KERN_INFO, "", | 428 | print_hex_dump(KERN_INFO, "", |
424 | DUMP_PREFIX_ADDRESS, | 429 | DUMP_PREFIX_ADDRESS, |
425 | 16, 1, phys_to_virt(buffer_info->dma), | 430 | 16, 1, |
426 | adapter->rx_buffer_len, true); | 431 | phys_to_virt(buffer_info->dma), |
432 | adapter->rx_buffer_len, true); | ||
427 | } | 433 | } |
428 | } | 434 | } |
429 | 435 | ||
@@ -450,8 +456,7 @@ static int e1000_desc_unused(struct e1000_ring *ring) | |||
450 | * @skb: pointer to sk_buff to be indicated to stack | 456 | * @skb: pointer to sk_buff to be indicated to stack |
451 | **/ | 457 | **/ |
452 | static void e1000_receive_skb(struct e1000_adapter *adapter, | 458 | static void e1000_receive_skb(struct e1000_adapter *adapter, |
453 | struct net_device *netdev, | 459 | struct net_device *netdev, struct sk_buff *skb, |
454 | struct sk_buff *skb, | ||
455 | u8 status, __le16 vlan) | 460 | u8 status, __le16 vlan) |
456 | { | 461 | { |
457 | skb->protocol = eth_type_trans(skb, netdev); | 462 | skb->protocol = eth_type_trans(skb, netdev); |
@@ -464,7 +469,7 @@ static void e1000_receive_skb(struct e1000_adapter *adapter, | |||
464 | } | 469 | } |
465 | 470 | ||
466 | /** | 471 | /** |
467 | * e1000_rx_checksum - Receive Checksum Offload for 82543 | 472 | * e1000_rx_checksum - Receive Checksum Offload |
468 | * @adapter: board private structure | 473 | * @adapter: board private structure |
469 | * @status_err: receive descriptor status and error fields | 474 | * @status_err: receive descriptor status and error fields |
470 | * @csum: receive descriptor csum field | 475 | * @csum: receive descriptor csum field |
@@ -548,7 +553,7 @@ map_skb: | |||
548 | adapter->rx_buffer_len, | 553 | adapter->rx_buffer_len, |
549 | DMA_FROM_DEVICE); | 554 | DMA_FROM_DEVICE); |
550 | if (dma_mapping_error(&pdev->dev, buffer_info->dma)) { | 555 | if (dma_mapping_error(&pdev->dev, buffer_info->dma)) { |
551 | dev_err(&pdev->dev, "RX DMA map failed\n"); | 556 | dev_err(&pdev->dev, "Rx DMA map failed\n"); |
552 | adapter->rx_dma_failed++; | 557 | adapter->rx_dma_failed++; |
553 | break; | 558 | break; |
554 | } | 559 | } |
@@ -601,7 +606,8 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter, | |||
601 | ps_page = &buffer_info->ps_pages[j]; | 606 | ps_page = &buffer_info->ps_pages[j]; |
602 | if (j >= adapter->rx_ps_pages) { | 607 | if (j >= adapter->rx_ps_pages) { |
603 | /* all unused desc entries get hw null ptr */ | 608 | /* all unused desc entries get hw null ptr */ |
604 | rx_desc->read.buffer_addr[j+1] = ~cpu_to_le64(0); | 609 | rx_desc->read.buffer_addr[j + 1] = |
610 | ~cpu_to_le64(0); | ||
605 | continue; | 611 | continue; |
606 | } | 612 | } |
607 | if (!ps_page->page) { | 613 | if (!ps_page->page) { |
@@ -617,7 +623,7 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter, | |||
617 | if (dma_mapping_error(&pdev->dev, | 623 | if (dma_mapping_error(&pdev->dev, |
618 | ps_page->dma)) { | 624 | ps_page->dma)) { |
619 | dev_err(&adapter->pdev->dev, | 625 | dev_err(&adapter->pdev->dev, |
620 | "RX DMA page map failed\n"); | 626 | "Rx DMA page map failed\n"); |
621 | adapter->rx_dma_failed++; | 627 | adapter->rx_dma_failed++; |
622 | goto no_buffers; | 628 | goto no_buffers; |
623 | } | 629 | } |
@@ -627,8 +633,8 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter, | |||
627 | * didn't change because each write-back | 633 | * didn't change because each write-back |
628 | * erases this info. | 634 | * erases this info. |
629 | */ | 635 | */ |
630 | rx_desc->read.buffer_addr[j+1] = | 636 | rx_desc->read.buffer_addr[j + 1] = |
631 | cpu_to_le64(ps_page->dma); | 637 | cpu_to_le64(ps_page->dma); |
632 | } | 638 | } |
633 | 639 | ||
634 | skb = netdev_alloc_skb_ip_align(netdev, | 640 | skb = netdev_alloc_skb_ip_align(netdev, |
@@ -644,7 +650,7 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter, | |||
644 | adapter->rx_ps_bsize0, | 650 | adapter->rx_ps_bsize0, |
645 | DMA_FROM_DEVICE); | 651 | DMA_FROM_DEVICE); |
646 | if (dma_mapping_error(&pdev->dev, buffer_info->dma)) { | 652 | if (dma_mapping_error(&pdev->dev, buffer_info->dma)) { |
647 | dev_err(&pdev->dev, "RX DMA map failed\n"); | 653 | dev_err(&pdev->dev, "Rx DMA map failed\n"); |
648 | adapter->rx_dma_failed++; | 654 | adapter->rx_dma_failed++; |
649 | /* cleanup skb */ | 655 | /* cleanup skb */ |
650 | dev_kfree_skb_any(skb); | 656 | dev_kfree_skb_any(skb); |
@@ -662,7 +668,7 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter, | |||
662 | * such as IA-64). | 668 | * such as IA-64). |
663 | */ | 669 | */ |
664 | wmb(); | 670 | wmb(); |
665 | writel(i<<1, adapter->hw.hw_addr + rx_ring->tail); | 671 | writel(i << 1, adapter->hw.hw_addr + rx_ring->tail); |
666 | } | 672 | } |
667 | 673 | ||
668 | i++; | 674 | i++; |
@@ -931,6 +937,9 @@ static void e1000_print_hw_hang(struct work_struct *work) | |||
931 | u16 phy_status, phy_1000t_status, phy_ext_status; | 937 | u16 phy_status, phy_1000t_status, phy_ext_status; |
932 | u16 pci_status; | 938 | u16 pci_status; |
933 | 939 | ||
940 | if (test_bit(__E1000_DOWN, &adapter->state)) | ||
941 | return; | ||
942 | |||
934 | e1e_rphy(hw, PHY_STATUS, &phy_status); | 943 | e1e_rphy(hw, PHY_STATUS, &phy_status); |
935 | e1e_rphy(hw, PHY_1000T_STATUS, &phy_1000t_status); | 944 | e1e_rphy(hw, PHY_1000T_STATUS, &phy_1000t_status); |
936 | e1e_rphy(hw, PHY_EXT_STATUS, &phy_ext_status); | 945 | e1e_rphy(hw, PHY_EXT_STATUS, &phy_ext_status); |
@@ -1106,11 +1115,10 @@ static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, | |||
1106 | cleaned = 1; | 1115 | cleaned = 1; |
1107 | cleaned_count++; | 1116 | cleaned_count++; |
1108 | dma_unmap_single(&pdev->dev, buffer_info->dma, | 1117 | dma_unmap_single(&pdev->dev, buffer_info->dma, |
1109 | adapter->rx_ps_bsize0, | 1118 | adapter->rx_ps_bsize0, DMA_FROM_DEVICE); |
1110 | DMA_FROM_DEVICE); | ||
1111 | buffer_info->dma = 0; | 1119 | buffer_info->dma = 0; |
1112 | 1120 | ||
1113 | /* see !EOP comment in other rx routine */ | 1121 | /* see !EOP comment in other Rx routine */ |
1114 | if (!(staterr & E1000_RXD_STAT_EOP)) | 1122 | if (!(staterr & E1000_RXD_STAT_EOP)) |
1115 | adapter->flags2 |= FLAG2_IS_DISCARDING; | 1123 | adapter->flags2 |= FLAG2_IS_DISCARDING; |
1116 | 1124 | ||
@@ -1501,6 +1509,9 @@ static void e1000e_downshift_workaround(struct work_struct *work) | |||
1501 | struct e1000_adapter *adapter = container_of(work, | 1509 | struct e1000_adapter *adapter = container_of(work, |
1502 | struct e1000_adapter, downshift_task); | 1510 | struct e1000_adapter, downshift_task); |
1503 | 1511 | ||
1512 | if (test_bit(__E1000_DOWN, &adapter->state)) | ||
1513 | return; | ||
1514 | |||
1504 | e1000e_gig_downshift_workaround_ich8lan(&adapter->hw); | 1515 | e1000e_gig_downshift_workaround_ich8lan(&adapter->hw); |
1505 | } | 1516 | } |
1506 | 1517 | ||
@@ -1980,15 +1991,15 @@ static void e1000_irq_enable(struct e1000_adapter *adapter) | |||
1980 | } | 1991 | } |
1981 | 1992 | ||
1982 | /** | 1993 | /** |
1983 | * e1000_get_hw_control - get control of the h/w from f/w | 1994 | * e1000e_get_hw_control - get control of the h/w from f/w |
1984 | * @adapter: address of board private structure | 1995 | * @adapter: address of board private structure |
1985 | * | 1996 | * |
1986 | * e1000_get_hw_control sets {CTRL_EXT|SWSM}:DRV_LOAD bit. | 1997 | * e1000e_get_hw_control sets {CTRL_EXT|SWSM}:DRV_LOAD bit. |
1987 | * For ASF and Pass Through versions of f/w this means that | 1998 | * For ASF and Pass Through versions of f/w this means that |
1988 | * the driver is loaded. For AMT version (only with 82573) | 1999 | * the driver is loaded. For AMT version (only with 82573) |
1989 | * of the f/w this means that the network i/f is open. | 2000 | * of the f/w this means that the network i/f is open. |
1990 | **/ | 2001 | **/ |
1991 | static void e1000_get_hw_control(struct e1000_adapter *adapter) | 2002 | void e1000e_get_hw_control(struct e1000_adapter *adapter) |
1992 | { | 2003 | { |
1993 | struct e1000_hw *hw = &adapter->hw; | 2004 | struct e1000_hw *hw = &adapter->hw; |
1994 | u32 ctrl_ext; | 2005 | u32 ctrl_ext; |
@@ -2005,16 +2016,16 @@ static void e1000_get_hw_control(struct e1000_adapter *adapter) | |||
2005 | } | 2016 | } |
2006 | 2017 | ||
2007 | /** | 2018 | /** |
2008 | * e1000_release_hw_control - release control of the h/w to f/w | 2019 | * e1000e_release_hw_control - release control of the h/w to f/w |
2009 | * @adapter: address of board private structure | 2020 | * @adapter: address of board private structure |
2010 | * | 2021 | * |
2011 | * e1000_release_hw_control resets {CTRL_EXT|SWSM}:DRV_LOAD bit. | 2022 | * e1000e_release_hw_control resets {CTRL_EXT|SWSM}:DRV_LOAD bit. |
2012 | * For ASF and Pass Through versions of f/w this means that the | 2023 | * For ASF and Pass Through versions of f/w this means that the |
2013 | * driver is no longer loaded. For AMT version (only with 82573) i | 2024 | * driver is no longer loaded. For AMT version (only with 82573) i |
2014 | * of the f/w this means that the network i/f is closed. | 2025 | * of the f/w this means that the network i/f is closed. |
2015 | * | 2026 | * |
2016 | **/ | 2027 | **/ |
2017 | static void e1000_release_hw_control(struct e1000_adapter *adapter) | 2028 | void e1000e_release_hw_control(struct e1000_adapter *adapter) |
2018 | { | 2029 | { |
2019 | struct e1000_hw *hw = &adapter->hw; | 2030 | struct e1000_hw *hw = &adapter->hw; |
2020 | u32 ctrl_ext; | 2031 | u32 ctrl_ext; |
@@ -2445,7 +2456,7 @@ static void e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) | |||
2445 | E1000_MNG_DHCP_COOKIE_STATUS_VLAN) && | 2456 | E1000_MNG_DHCP_COOKIE_STATUS_VLAN) && |
2446 | (vid == adapter->mng_vlan_id)) { | 2457 | (vid == adapter->mng_vlan_id)) { |
2447 | /* release control to f/w */ | 2458 | /* release control to f/w */ |
2448 | e1000_release_hw_control(adapter); | 2459 | e1000e_release_hw_control(adapter); |
2449 | return; | 2460 | return; |
2450 | } | 2461 | } |
2451 | 2462 | ||
@@ -2610,7 +2621,7 @@ static void e1000_init_manageability_pt(struct e1000_adapter *adapter) | |||
2610 | } | 2621 | } |
2611 | 2622 | ||
2612 | /** | 2623 | /** |
2613 | * e1000_configure_tx - Configure 8254x Transmit Unit after Reset | 2624 | * e1000_configure_tx - Configure Transmit Unit after Reset |
2614 | * @adapter: board private structure | 2625 | * @adapter: board private structure |
2615 | * | 2626 | * |
2616 | * Configure the Tx unit of the MAC after a reset. | 2627 | * Configure the Tx unit of the MAC after a reset. |
@@ -2663,7 +2674,7 @@ static void e1000_configure_tx(struct e1000_adapter *adapter) | |||
2663 | * hthresh = 1 ==> prefetch when one or more available | 2674 | * hthresh = 1 ==> prefetch when one or more available |
2664 | * pthresh = 0x1f ==> prefetch if internal cache 31 or less | 2675 | * pthresh = 0x1f ==> prefetch if internal cache 31 or less |
2665 | * BEWARE: this seems to work but should be considered first if | 2676 | * BEWARE: this seems to work but should be considered first if |
2666 | * there are tx hangs or other tx related bugs | 2677 | * there are Tx hangs or other Tx related bugs |
2667 | */ | 2678 | */ |
2668 | txdctl |= E1000_TXDCTL_DMA_BURST_ENABLE; | 2679 | txdctl |= E1000_TXDCTL_DMA_BURST_ENABLE; |
2669 | ew32(TXDCTL(0), txdctl); | 2680 | ew32(TXDCTL(0), txdctl); |
@@ -2734,6 +2745,9 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter) | |||
2734 | ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, true); | 2745 | ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, true); |
2735 | else | 2746 | else |
2736 | ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, false); | 2747 | ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, false); |
2748 | |||
2749 | if (ret_val) | ||
2750 | e_dbg("failed to enable jumbo frame workaround mode\n"); | ||
2737 | } | 2751 | } |
2738 | 2752 | ||
2739 | /* Program MC offset vector base */ | 2753 | /* Program MC offset vector base */ |
@@ -2874,7 +2888,7 @@ static void e1000_configure_rx(struct e1000_adapter *adapter) | |||
2874 | if (adapter->rx_ps_pages) { | 2888 | if (adapter->rx_ps_pages) { |
2875 | /* this is a 32 byte descriptor */ | 2889 | /* this is a 32 byte descriptor */ |
2876 | rdlen = rx_ring->count * | 2890 | rdlen = rx_ring->count * |
2877 | sizeof(union e1000_rx_desc_packet_split); | 2891 | sizeof(union e1000_rx_desc_packet_split); |
2878 | adapter->clean_rx = e1000_clean_rx_irq_ps; | 2892 | adapter->clean_rx = e1000_clean_rx_irq_ps; |
2879 | adapter->alloc_rx_buf = e1000_alloc_rx_buffers_ps; | 2893 | adapter->alloc_rx_buf = e1000_alloc_rx_buffers_ps; |
2880 | } else if (adapter->netdev->mtu > ETH_FRAME_LEN + ETH_FCS_LEN) { | 2894 | } else if (adapter->netdev->mtu > ETH_FRAME_LEN + ETH_FCS_LEN) { |
@@ -2897,7 +2911,7 @@ static void e1000_configure_rx(struct e1000_adapter *adapter) | |||
2897 | /* | 2911 | /* |
2898 | * set the writeback threshold (only takes effect if the RDTR | 2912 | * set the writeback threshold (only takes effect if the RDTR |
2899 | * is set). set GRAN=1 and write back up to 0x4 worth, and | 2913 | * is set). set GRAN=1 and write back up to 0x4 worth, and |
2900 | * enable prefetching of 0x20 rx descriptors | 2914 | * enable prefetching of 0x20 Rx descriptors |
2901 | * granularity = 01 | 2915 | * granularity = 01 |
2902 | * wthresh = 04, | 2916 | * wthresh = 04, |
2903 | * hthresh = 04, | 2917 | * hthresh = 04, |
@@ -2978,12 +2992,10 @@ static void e1000_configure_rx(struct e1000_adapter *adapter) | |||
2978 | * excessive C-state transition latencies result in | 2992 | * excessive C-state transition latencies result in |
2979 | * dropped transactions. | 2993 | * dropped transactions. |
2980 | */ | 2994 | */ |
2981 | pm_qos_update_request( | 2995 | pm_qos_update_request(&adapter->netdev->pm_qos_req, 55); |
2982 | &adapter->netdev->pm_qos_req, 55); | ||
2983 | } else { | 2996 | } else { |
2984 | pm_qos_update_request( | 2997 | pm_qos_update_request(&adapter->netdev->pm_qos_req, |
2985 | &adapter->netdev->pm_qos_req, | 2998 | PM_QOS_DEFAULT_VALUE); |
2986 | PM_QOS_DEFAULT_VALUE); | ||
2987 | } | 2999 | } |
2988 | } | 3000 | } |
2989 | 3001 | ||
@@ -3149,7 +3161,7 @@ void e1000e_reset(struct e1000_adapter *adapter) | |||
3149 | /* lower 16 bits has Rx packet buffer allocation size in KB */ | 3161 | /* lower 16 bits has Rx packet buffer allocation size in KB */ |
3150 | pba &= 0xffff; | 3162 | pba &= 0xffff; |
3151 | /* | 3163 | /* |
3152 | * the Tx fifo also stores 16 bytes of information about the tx | 3164 | * the Tx fifo also stores 16 bytes of information about the Tx |
3153 | * but don't include ethernet FCS because hardware appends it | 3165 | * but don't include ethernet FCS because hardware appends it |
3154 | */ | 3166 | */ |
3155 | min_tx_space = (adapter->max_frame_size + | 3167 | min_tx_space = (adapter->max_frame_size + |
@@ -3172,7 +3184,7 @@ void e1000e_reset(struct e1000_adapter *adapter) | |||
3172 | pba -= min_tx_space - tx_space; | 3184 | pba -= min_tx_space - tx_space; |
3173 | 3185 | ||
3174 | /* | 3186 | /* |
3175 | * if short on Rx space, Rx wins and must trump tx | 3187 | * if short on Rx space, Rx wins and must trump Tx |
3176 | * adjustment or use Early Receive if available | 3188 | * adjustment or use Early Receive if available |
3177 | */ | 3189 | */ |
3178 | if ((pba < min_rx_space) && | 3190 | if ((pba < min_rx_space) && |
@@ -3184,7 +3196,6 @@ void e1000e_reset(struct e1000_adapter *adapter) | |||
3184 | ew32(PBA, pba); | 3196 | ew32(PBA, pba); |
3185 | } | 3197 | } |
3186 | 3198 | ||
3187 | |||
3188 | /* | 3199 | /* |
3189 | * flow control settings | 3200 | * flow control settings |
3190 | * | 3201 | * |
@@ -3272,7 +3283,7 @@ void e1000e_reset(struct e1000_adapter *adapter) | |||
3272 | * that the network interface is in control | 3283 | * that the network interface is in control |
3273 | */ | 3284 | */ |
3274 | if (adapter->flags & FLAG_HAS_AMT) | 3285 | if (adapter->flags & FLAG_HAS_AMT) |
3275 | e1000_get_hw_control(adapter); | 3286 | e1000e_get_hw_control(adapter); |
3276 | 3287 | ||
3277 | ew32(WUC, 0); | 3288 | ew32(WUC, 0); |
3278 | 3289 | ||
@@ -3285,6 +3296,13 @@ void e1000e_reset(struct e1000_adapter *adapter) | |||
3285 | ew32(VET, ETH_P_8021Q); | 3296 | ew32(VET, ETH_P_8021Q); |
3286 | 3297 | ||
3287 | e1000e_reset_adaptive(hw); | 3298 | e1000e_reset_adaptive(hw); |
3299 | |||
3300 | if (!netif_running(adapter->netdev) && | ||
3301 | !test_bit(__E1000_TESTING, &adapter->state)) { | ||
3302 | e1000_power_down_phy(adapter); | ||
3303 | return; | ||
3304 | } | ||
3305 | |||
3288 | e1000_get_phy_info(hw); | 3306 | e1000_get_phy_info(hw); |
3289 | 3307 | ||
3290 | if ((adapter->flags & FLAG_HAS_SMART_POWER_DOWN) && | 3308 | if ((adapter->flags & FLAG_HAS_SMART_POWER_DOWN) && |
@@ -3326,6 +3344,21 @@ int e1000e_up(struct e1000_adapter *adapter) | |||
3326 | return 0; | 3344 | return 0; |
3327 | } | 3345 | } |
3328 | 3346 | ||
3347 | static void e1000e_flush_descriptors(struct e1000_adapter *adapter) | ||
3348 | { | ||
3349 | struct e1000_hw *hw = &adapter->hw; | ||
3350 | |||
3351 | if (!(adapter->flags2 & FLAG2_DMA_BURST)) | ||
3352 | return; | ||
3353 | |||
3354 | /* flush pending descriptor writebacks to memory */ | ||
3355 | ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD); | ||
3356 | ew32(RDTR, adapter->rx_int_delay | E1000_RDTR_FPD); | ||
3357 | |||
3358 | /* execute the writes immediately */ | ||
3359 | e1e_flush(); | ||
3360 | } | ||
3361 | |||
3329 | void e1000e_down(struct e1000_adapter *adapter) | 3362 | void e1000e_down(struct e1000_adapter *adapter) |
3330 | { | 3363 | { |
3331 | struct net_device *netdev = adapter->netdev; | 3364 | struct net_device *netdev = adapter->netdev; |
@@ -3365,6 +3398,9 @@ void e1000e_down(struct e1000_adapter *adapter) | |||
3365 | 3398 | ||
3366 | if (!pci_channel_offline(adapter->pdev)) | 3399 | if (!pci_channel_offline(adapter->pdev)) |
3367 | e1000e_reset(adapter); | 3400 | e1000e_reset(adapter); |
3401 | |||
3402 | e1000e_flush_descriptors(adapter); | ||
3403 | |||
3368 | e1000_clean_tx_ring(adapter); | 3404 | e1000_clean_tx_ring(adapter); |
3369 | e1000_clean_rx_ring(adapter); | 3405 | e1000_clean_rx_ring(adapter); |
3370 | 3406 | ||
@@ -3570,7 +3606,7 @@ static int e1000_open(struct net_device *netdev) | |||
3570 | * interface is now open and reset the part to a known state. | 3606 | * interface is now open and reset the part to a known state. |
3571 | */ | 3607 | */ |
3572 | if (adapter->flags & FLAG_HAS_AMT) { | 3608 | if (adapter->flags & FLAG_HAS_AMT) { |
3573 | e1000_get_hw_control(adapter); | 3609 | e1000e_get_hw_control(adapter); |
3574 | e1000e_reset(adapter); | 3610 | e1000e_reset(adapter); |
3575 | } | 3611 | } |
3576 | 3612 | ||
@@ -3634,7 +3670,7 @@ static int e1000_open(struct net_device *netdev) | |||
3634 | return 0; | 3670 | return 0; |
3635 | 3671 | ||
3636 | err_req_irq: | 3672 | err_req_irq: |
3637 | e1000_release_hw_control(adapter); | 3673 | e1000e_release_hw_control(adapter); |
3638 | e1000_power_down_phy(adapter); | 3674 | e1000_power_down_phy(adapter); |
3639 | e1000e_free_rx_resources(adapter); | 3675 | e1000e_free_rx_resources(adapter); |
3640 | err_setup_rx: | 3676 | err_setup_rx: |
@@ -3689,8 +3725,9 @@ static int e1000_close(struct net_device *netdev) | |||
3689 | * If AMT is enabled, let the firmware know that the network | 3725 | * If AMT is enabled, let the firmware know that the network |
3690 | * interface is now closed | 3726 | * interface is now closed |
3691 | */ | 3727 | */ |
3692 | if (adapter->flags & FLAG_HAS_AMT) | 3728 | if ((adapter->flags & FLAG_HAS_AMT) && |
3693 | e1000_release_hw_control(adapter); | 3729 | !test_bit(__E1000_TESTING, &adapter->state)) |
3730 | e1000e_release_hw_control(adapter); | ||
3694 | 3731 | ||
3695 | if ((adapter->flags & FLAG_HAS_ERT) || | 3732 | if ((adapter->flags & FLAG_HAS_ERT) || |
3696 | (adapter->hw.mac.type == e1000_pch2lan)) | 3733 | (adapter->hw.mac.type == e1000_pch2lan)) |
@@ -3752,6 +3789,10 @@ static void e1000e_update_phy_task(struct work_struct *work) | |||
3752 | { | 3789 | { |
3753 | struct e1000_adapter *adapter = container_of(work, | 3790 | struct e1000_adapter *adapter = container_of(work, |
3754 | struct e1000_adapter, update_phy_task); | 3791 | struct e1000_adapter, update_phy_task); |
3792 | |||
3793 | if (test_bit(__E1000_DOWN, &adapter->state)) | ||
3794 | return; | ||
3795 | |||
3755 | e1000_get_phy_info(&adapter->hw); | 3796 | e1000_get_phy_info(&adapter->hw); |
3756 | } | 3797 | } |
3757 | 3798 | ||
@@ -3762,6 +3803,10 @@ static void e1000e_update_phy_task(struct work_struct *work) | |||
3762 | static void e1000_update_phy_info(unsigned long data) | 3803 | static void e1000_update_phy_info(unsigned long data) |
3763 | { | 3804 | { |
3764 | struct e1000_adapter *adapter = (struct e1000_adapter *) data; | 3805 | struct e1000_adapter *adapter = (struct e1000_adapter *) data; |
3806 | |||
3807 | if (test_bit(__E1000_DOWN, &adapter->state)) | ||
3808 | return; | ||
3809 | |||
3765 | schedule_work(&adapter->update_phy_task); | 3810 | schedule_work(&adapter->update_phy_task); |
3766 | } | 3811 | } |
3767 | 3812 | ||
@@ -4029,11 +4074,11 @@ static void e1000_print_link_info(struct e1000_adapter *adapter) | |||
4029 | adapter->netdev->name, | 4074 | adapter->netdev->name, |
4030 | adapter->link_speed, | 4075 | adapter->link_speed, |
4031 | (adapter->link_duplex == FULL_DUPLEX) ? | 4076 | (adapter->link_duplex == FULL_DUPLEX) ? |
4032 | "Full Duplex" : "Half Duplex", | 4077 | "Full Duplex" : "Half Duplex", |
4033 | ((ctrl & E1000_CTRL_TFCE) && (ctrl & E1000_CTRL_RFCE)) ? | 4078 | ((ctrl & E1000_CTRL_TFCE) && (ctrl & E1000_CTRL_RFCE)) ? |
4034 | "RX/TX" : | 4079 | "Rx/Tx" : |
4035 | ((ctrl & E1000_CTRL_RFCE) ? "RX" : | 4080 | ((ctrl & E1000_CTRL_RFCE) ? "Rx" : |
4036 | ((ctrl & E1000_CTRL_TFCE) ? "TX" : "None" ))); | 4081 | ((ctrl & E1000_CTRL_TFCE) ? "Tx" : "None"))); |
4037 | } | 4082 | } |
4038 | 4083 | ||
4039 | static bool e1000e_has_link(struct e1000_adapter *adapter) | 4084 | static bool e1000e_has_link(struct e1000_adapter *adapter) |
@@ -4136,6 +4181,9 @@ static void e1000_watchdog_task(struct work_struct *work) | |||
4136 | u32 link, tctl; | 4181 | u32 link, tctl; |
4137 | int tx_pending = 0; | 4182 | int tx_pending = 0; |
4138 | 4183 | ||
4184 | if (test_bit(__E1000_DOWN, &adapter->state)) | ||
4185 | return; | ||
4186 | |||
4139 | link = e1000e_has_link(adapter); | 4187 | link = e1000e_has_link(adapter); |
4140 | if ((netif_carrier_ok(netdev)) && link) { | 4188 | if ((netif_carrier_ok(netdev)) && link) { |
4141 | /* Cancel scheduled suspend requests. */ | 4189 | /* Cancel scheduled suspend requests. */ |
@@ -4296,7 +4344,6 @@ link_up: | |||
4296 | * to get done, so reset controller to flush Tx. | 4344 | * to get done, so reset controller to flush Tx. |
4297 | * (Do the reset outside of interrupt context). | 4345 | * (Do the reset outside of interrupt context). |
4298 | */ | 4346 | */ |
4299 | adapter->tx_timeout_count++; | ||
4300 | schedule_work(&adapter->reset_task); | 4347 | schedule_work(&adapter->reset_task); |
4301 | /* return immediately since reset is imminent */ | 4348 | /* return immediately since reset is imminent */ |
4302 | return; | 4349 | return; |
@@ -4325,19 +4372,12 @@ link_up: | |||
4325 | else | 4372 | else |
4326 | ew32(ICS, E1000_ICS_RXDMT0); | 4373 | ew32(ICS, E1000_ICS_RXDMT0); |
4327 | 4374 | ||
4375 | /* flush pending descriptors to memory before detecting Tx hang */ | ||
4376 | e1000e_flush_descriptors(adapter); | ||
4377 | |||
4328 | /* Force detection of hung controller every watchdog period */ | 4378 | /* Force detection of hung controller every watchdog period */ |
4329 | adapter->detect_tx_hung = 1; | 4379 | adapter->detect_tx_hung = 1; |
4330 | 4380 | ||
4331 | /* flush partial descriptors to memory before detecting tx hang */ | ||
4332 | if (adapter->flags2 & FLAG2_DMA_BURST) { | ||
4333 | ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD); | ||
4334 | ew32(RDTR, adapter->rx_int_delay | E1000_RDTR_FPD); | ||
4335 | /* | ||
4336 | * no need to flush the writes because the timeout code does | ||
4337 | * an er32 first thing | ||
4338 | */ | ||
4339 | } | ||
4340 | |||
4341 | /* | 4381 | /* |
4342 | * With 82571 controllers, LAA may be overwritten due to controller | 4382 | * With 82571 controllers, LAA may be overwritten due to controller |
4343 | * reset from the other port. Set the appropriate LAA in RAR[0] | 4383 | * reset from the other port. Set the appropriate LAA in RAR[0] |
@@ -4519,7 +4559,7 @@ static int e1000_tx_map(struct e1000_adapter *adapter, | |||
4519 | buffer_info->next_to_watch = i; | 4559 | buffer_info->next_to_watch = i; |
4520 | buffer_info->dma = dma_map_single(&pdev->dev, | 4560 | buffer_info->dma = dma_map_single(&pdev->dev, |
4521 | skb->data + offset, | 4561 | skb->data + offset, |
4522 | size, DMA_TO_DEVICE); | 4562 | size, DMA_TO_DEVICE); |
4523 | buffer_info->mapped_as_page = false; | 4563 | buffer_info->mapped_as_page = false; |
4524 | if (dma_mapping_error(&pdev->dev, buffer_info->dma)) | 4564 | if (dma_mapping_error(&pdev->dev, buffer_info->dma)) |
4525 | goto dma_error; | 4565 | goto dma_error; |
@@ -4566,7 +4606,7 @@ static int e1000_tx_map(struct e1000_adapter *adapter, | |||
4566 | } | 4606 | } |
4567 | } | 4607 | } |
4568 | 4608 | ||
4569 | segs = skb_shinfo(skb)->gso_segs ?: 1; | 4609 | segs = skb_shinfo(skb)->gso_segs ? : 1; |
4570 | /* multiply data chunks by size of headers */ | 4610 | /* multiply data chunks by size of headers */ |
4571 | bytecount = ((segs - 1) * skb_headlen(skb)) + skb->len; | 4611 | bytecount = ((segs - 1) * skb_headlen(skb)) + skb->len; |
4572 | 4612 | ||
@@ -4578,13 +4618,13 @@ static int e1000_tx_map(struct e1000_adapter *adapter, | |||
4578 | return count; | 4618 | return count; |
4579 | 4619 | ||
4580 | dma_error: | 4620 | dma_error: |
4581 | dev_err(&pdev->dev, "TX DMA map failed\n"); | 4621 | dev_err(&pdev->dev, "Tx DMA map failed\n"); |
4582 | buffer_info->dma = 0; | 4622 | buffer_info->dma = 0; |
4583 | if (count) | 4623 | if (count) |
4584 | count--; | 4624 | count--; |
4585 | 4625 | ||
4586 | while (count--) { | 4626 | while (count--) { |
4587 | if (i==0) | 4627 | if (i == 0) |
4588 | i += tx_ring->count; | 4628 | i += tx_ring->count; |
4589 | i--; | 4629 | i--; |
4590 | buffer_info = &tx_ring->buffer_info[i]; | 4630 | buffer_info = &tx_ring->buffer_info[i]; |
@@ -4875,6 +4915,10 @@ static void e1000_reset_task(struct work_struct *work) | |||
4875 | struct e1000_adapter *adapter; | 4915 | struct e1000_adapter *adapter; |
4876 | adapter = container_of(work, struct e1000_adapter, reset_task); | 4916 | adapter = container_of(work, struct e1000_adapter, reset_task); |
4877 | 4917 | ||
4918 | /* don't run the task if already down */ | ||
4919 | if (test_bit(__E1000_DOWN, &adapter->state)) | ||
4920 | return; | ||
4921 | |||
4878 | if (!((adapter->flags & FLAG_RX_NEEDS_RESTART) && | 4922 | if (!((adapter->flags & FLAG_RX_NEEDS_RESTART) && |
4879 | (adapter->flags & FLAG_RX_RESTART_NOW))) { | 4923 | (adapter->flags & FLAG_RX_RESTART_NOW))) { |
4880 | e1000e_dump(adapter); | 4924 | e1000e_dump(adapter); |
@@ -5209,7 +5253,7 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake, | |||
5209 | * Release control of h/w to f/w. If f/w is AMT enabled, this | 5253 | * Release control of h/w to f/w. If f/w is AMT enabled, this |
5210 | * would have already happened in close and is redundant. | 5254 | * would have already happened in close and is redundant. |
5211 | */ | 5255 | */ |
5212 | e1000_release_hw_control(adapter); | 5256 | e1000e_release_hw_control(adapter); |
5213 | 5257 | ||
5214 | pci_disable_device(pdev); | 5258 | pci_disable_device(pdev); |
5215 | 5259 | ||
@@ -5366,7 +5410,7 @@ static int __e1000_resume(struct pci_dev *pdev) | |||
5366 | * under the control of the driver. | 5410 | * under the control of the driver. |
5367 | */ | 5411 | */ |
5368 | if (!(adapter->flags & FLAG_HAS_AMT)) | 5412 | if (!(adapter->flags & FLAG_HAS_AMT)) |
5369 | e1000_get_hw_control(adapter); | 5413 | e1000e_get_hw_control(adapter); |
5370 | 5414 | ||
5371 | return 0; | 5415 | return 0; |
5372 | } | 5416 | } |
@@ -5613,7 +5657,7 @@ static void e1000_io_resume(struct pci_dev *pdev) | |||
5613 | * under the control of the driver. | 5657 | * under the control of the driver. |
5614 | */ | 5658 | */ |
5615 | if (!(adapter->flags & FLAG_HAS_AMT)) | 5659 | if (!(adapter->flags & FLAG_HAS_AMT)) |
5616 | e1000_get_hw_control(adapter); | 5660 | e1000e_get_hw_control(adapter); |
5617 | 5661 | ||
5618 | } | 5662 | } |
5619 | 5663 | ||
@@ -5636,7 +5680,7 @@ static void e1000_print_device_info(struct e1000_adapter *adapter) | |||
5636 | ret_val = e1000_read_pba_string_generic(hw, pba_str, | 5680 | ret_val = e1000_read_pba_string_generic(hw, pba_str, |
5637 | E1000_PBANUM_LENGTH); | 5681 | E1000_PBANUM_LENGTH); |
5638 | if (ret_val) | 5682 | if (ret_val) |
5639 | strcpy(pba_str, "Unknown"); | 5683 | strncpy((char *)pba_str, "Unknown", sizeof(pba_str) - 1); |
5640 | e_info("MAC: %d, PHY: %d, PBA No: %s\n", | 5684 | e_info("MAC: %d, PHY: %d, PBA No: %s\n", |
5641 | hw->mac.type, hw->phy.type, pba_str); | 5685 | hw->mac.type, hw->phy.type, pba_str); |
5642 | } | 5686 | } |
@@ -5923,7 +5967,8 @@ static int __devinit e1000_probe(struct pci_dev *pdev, | |||
5923 | /* APME bit in EEPROM is mapped to WUC.APME */ | 5967 | /* APME bit in EEPROM is mapped to WUC.APME */ |
5924 | eeprom_data = er32(WUC); | 5968 | eeprom_data = er32(WUC); |
5925 | eeprom_apme_mask = E1000_WUC_APME; | 5969 | eeprom_apme_mask = E1000_WUC_APME; |
5926 | if (eeprom_data & E1000_WUC_PHY_WAKE) | 5970 | if ((hw->mac.type > e1000_ich10lan) && |
5971 | (eeprom_data & E1000_WUC_PHY_WAKE)) | ||
5927 | adapter->flags2 |= FLAG2_HAS_PHY_WAKEUP; | 5972 | adapter->flags2 |= FLAG2_HAS_PHY_WAKEUP; |
5928 | } else if (adapter->flags & FLAG_APME_IN_CTRL3) { | 5973 | } else if (adapter->flags & FLAG_APME_IN_CTRL3) { |
5929 | if (adapter->flags & FLAG_APME_CHECK_PORT_B && | 5974 | if (adapter->flags & FLAG_APME_CHECK_PORT_B && |
@@ -5963,9 +6008,9 @@ static int __devinit e1000_probe(struct pci_dev *pdev, | |||
5963 | * under the control of the driver. | 6008 | * under the control of the driver. |
5964 | */ | 6009 | */ |
5965 | if (!(adapter->flags & FLAG_HAS_AMT)) | 6010 | if (!(adapter->flags & FLAG_HAS_AMT)) |
5966 | e1000_get_hw_control(adapter); | 6011 | e1000e_get_hw_control(adapter); |
5967 | 6012 | ||
5968 | strcpy(netdev->name, "eth%d"); | 6013 | strncpy(netdev->name, "eth%d", sizeof(netdev->name) - 1); |
5969 | err = register_netdev(netdev); | 6014 | err = register_netdev(netdev); |
5970 | if (err) | 6015 | if (err) |
5971 | goto err_register; | 6016 | goto err_register; |
@@ -5982,12 +6027,11 @@ static int __devinit e1000_probe(struct pci_dev *pdev, | |||
5982 | 6027 | ||
5983 | err_register: | 6028 | err_register: |
5984 | if (!(adapter->flags & FLAG_HAS_AMT)) | 6029 | if (!(adapter->flags & FLAG_HAS_AMT)) |
5985 | e1000_release_hw_control(adapter); | 6030 | e1000e_release_hw_control(adapter); |
5986 | err_eeprom: | 6031 | err_eeprom: |
5987 | if (!e1000_check_reset_block(&adapter->hw)) | 6032 | if (!e1000_check_reset_block(&adapter->hw)) |
5988 | e1000_phy_hw_reset(&adapter->hw); | 6033 | e1000_phy_hw_reset(&adapter->hw); |
5989 | err_hw_init: | 6034 | err_hw_init: |
5990 | |||
5991 | kfree(adapter->tx_ring); | 6035 | kfree(adapter->tx_ring); |
5992 | kfree(adapter->rx_ring); | 6036 | kfree(adapter->rx_ring); |
5993 | err_sw_init: | 6037 | err_sw_init: |
@@ -6053,7 +6097,7 @@ static void __devexit e1000_remove(struct pci_dev *pdev) | |||
6053 | * Release control of h/w to f/w. If f/w is AMT enabled, this | 6097 | * Release control of h/w to f/w. If f/w is AMT enabled, this |
6054 | * would have already happened in close and is redundant. | 6098 | * would have already happened in close and is redundant. |
6055 | */ | 6099 | */ |
6056 | e1000_release_hw_control(adapter); | 6100 | e1000e_release_hw_control(adapter); |
6057 | 6101 | ||
6058 | e1000e_reset_interrupt_capability(adapter); | 6102 | e1000e_reset_interrupt_capability(adapter); |
6059 | kfree(adapter->tx_ring); | 6103 | kfree(adapter->tx_ring); |
@@ -6184,7 +6228,7 @@ static int __init e1000_init_module(void) | |||
6184 | int ret; | 6228 | int ret; |
6185 | pr_info("Intel(R) PRO/1000 Network Driver - %s\n", | 6229 | pr_info("Intel(R) PRO/1000 Network Driver - %s\n", |
6186 | e1000e_driver_version); | 6230 | e1000e_driver_version); |
6187 | pr_info("Copyright (c) 1999 - 2010 Intel Corporation.\n"); | 6231 | pr_info("Copyright(c) 1999 - 2011 Intel Corporation.\n"); |
6188 | ret = pci_register_driver(&e1000_driver); | 6232 | ret = pci_register_driver(&e1000_driver); |
6189 | 6233 | ||
6190 | return ret; | 6234 | return ret; |
diff --git a/drivers/net/e1000e/param.c b/drivers/net/e1000e/param.c index a9612b0e4bca..4dd9b63273f6 100644 --- a/drivers/net/e1000e/param.c +++ b/drivers/net/e1000e/param.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************************* | 1 | /******************************************************************************* |
2 | 2 | ||
3 | Intel PRO/1000 Linux driver | 3 | Intel PRO/1000 Linux driver |
4 | Copyright(c) 1999 - 2010 Intel Corporation. | 4 | Copyright(c) 1999 - 2011 Intel Corporation. |
5 | 5 | ||
6 | This program is free software; you can redistribute it and/or modify it | 6 | This program is free software; you can redistribute it and/or modify it |
7 | under the terms and conditions of the GNU General Public License, | 7 | under the terms and conditions of the GNU General Public License, |
@@ -62,10 +62,9 @@ MODULE_PARM_DESC(copybreak, | |||
62 | module_param_array_named(X, X, int, &num_##X, 0); \ | 62 | module_param_array_named(X, X, int, &num_##X, 0); \ |
63 | MODULE_PARM_DESC(X, desc); | 63 | MODULE_PARM_DESC(X, desc); |
64 | 64 | ||
65 | |||
66 | /* | 65 | /* |
67 | * Transmit Interrupt Delay in units of 1.024 microseconds | 66 | * Transmit Interrupt Delay in units of 1.024 microseconds |
68 | * Tx interrupt delay needs to typically be set to something non zero | 67 | * Tx interrupt delay needs to typically be set to something non-zero |
69 | * | 68 | * |
70 | * Valid Range: 0-65535 | 69 | * Valid Range: 0-65535 |
71 | */ | 70 | */ |
@@ -112,6 +111,7 @@ E1000_PARAM(InterruptThrottleRate, "Interrupt Throttling Rate"); | |||
112 | #define DEFAULT_ITR 3 | 111 | #define DEFAULT_ITR 3 |
113 | #define MAX_ITR 100000 | 112 | #define MAX_ITR 100000 |
114 | #define MIN_ITR 100 | 113 | #define MIN_ITR 100 |
114 | |||
115 | /* IntMode (Interrupt Mode) | 115 | /* IntMode (Interrupt Mode) |
116 | * | 116 | * |
117 | * Valid Range: 0 - 2 | 117 | * Valid Range: 0 - 2 |
diff --git a/drivers/net/e1000e/phy.c b/drivers/net/e1000e/phy.c index 1781efeb55e3..6bea051b134b 100644 --- a/drivers/net/e1000e/phy.c +++ b/drivers/net/e1000e/phy.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************************* | 1 | /******************************************************************************* |
2 | 2 | ||
3 | Intel PRO/1000 Linux driver | 3 | Intel PRO/1000 Linux driver |
4 | Copyright(c) 1999 - 2010 Intel Corporation. | 4 | Copyright(c) 1999 - 2011 Intel Corporation. |
5 | 5 | ||
6 | This program is free software; you can redistribute it and/or modify it | 6 | This program is free software; you can redistribute it and/or modify it |
7 | under the terms and conditions of the GNU General Public License, | 7 | under the terms and conditions of the GNU General Public License, |
@@ -637,12 +637,11 @@ s32 e1000e_write_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 data) | |||
637 | **/ | 637 | **/ |
638 | s32 e1000_copper_link_setup_82577(struct e1000_hw *hw) | 638 | s32 e1000_copper_link_setup_82577(struct e1000_hw *hw) |
639 | { | 639 | { |
640 | struct e1000_phy_info *phy = &hw->phy; | ||
641 | s32 ret_val; | 640 | s32 ret_val; |
642 | u16 phy_data; | 641 | u16 phy_data; |
643 | 642 | ||
644 | /* Enable CRS on TX. This must be set for half-duplex operation. */ | 643 | /* Enable CRS on Tx. This must be set for half-duplex operation. */ |
645 | ret_val = phy->ops.read_reg(hw, I82577_CFG_REG, &phy_data); | 644 | ret_val = e1e_rphy(hw, I82577_CFG_REG, &phy_data); |
646 | if (ret_val) | 645 | if (ret_val) |
647 | goto out; | 646 | goto out; |
648 | 647 | ||
@@ -651,7 +650,7 @@ s32 e1000_copper_link_setup_82577(struct e1000_hw *hw) | |||
651 | /* Enable downshift */ | 650 | /* Enable downshift */ |
652 | phy_data |= I82577_CFG_ENABLE_DOWNSHIFT; | 651 | phy_data |= I82577_CFG_ENABLE_DOWNSHIFT; |
653 | 652 | ||
654 | ret_val = phy->ops.write_reg(hw, I82577_CFG_REG, phy_data); | 653 | ret_val = e1e_wphy(hw, I82577_CFG_REG, phy_data); |
655 | 654 | ||
656 | out: | 655 | out: |
657 | return ret_val; | 656 | return ret_val; |
@@ -774,16 +773,14 @@ s32 e1000e_copper_link_setup_m88(struct e1000_hw *hw) | |||
774 | } | 773 | } |
775 | 774 | ||
776 | if (phy->type == e1000_phy_82578) { | 775 | if (phy->type == e1000_phy_82578) { |
777 | ret_val = phy->ops.read_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, | 776 | ret_val = e1e_rphy(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_data); |
778 | &phy_data); | ||
779 | if (ret_val) | 777 | if (ret_val) |
780 | return ret_val; | 778 | return ret_val; |
781 | 779 | ||
782 | /* 82578 PHY - set the downshift count to 1x. */ | 780 | /* 82578 PHY - set the downshift count to 1x. */ |
783 | phy_data |= I82578_EPSCR_DOWNSHIFT_ENABLE; | 781 | phy_data |= I82578_EPSCR_DOWNSHIFT_ENABLE; |
784 | phy_data &= ~I82578_EPSCR_DOWNSHIFT_COUNTER_MASK; | 782 | phy_data &= ~I82578_EPSCR_DOWNSHIFT_COUNTER_MASK; |
785 | ret_val = phy->ops.write_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, | 783 | ret_val = e1e_wphy(hw, M88E1000_EXT_PHY_SPEC_CTRL, phy_data); |
786 | phy_data); | ||
787 | if (ret_val) | 784 | if (ret_val) |
788 | return ret_val; | 785 | return ret_val; |
789 | } | 786 | } |
@@ -1319,9 +1316,8 @@ s32 e1000e_phy_force_speed_duplex_m88(struct e1000_hw *hw) | |||
1319 | * We didn't get link. | 1316 | * We didn't get link. |
1320 | * Reset the DSP and cross our fingers. | 1317 | * Reset the DSP and cross our fingers. |
1321 | */ | 1318 | */ |
1322 | ret_val = e1e_wphy(hw, | 1319 | ret_val = e1e_wphy(hw, M88E1000_PHY_PAGE_SELECT, |
1323 | M88E1000_PHY_PAGE_SELECT, | 1320 | 0x001d); |
1324 | 0x001d); | ||
1325 | if (ret_val) | 1321 | if (ret_val) |
1326 | return ret_val; | 1322 | return ret_val; |
1327 | ret_val = e1000e_phy_reset_dsp(hw); | 1323 | ret_val = e1000e_phy_reset_dsp(hw); |
@@ -2990,7 +2986,7 @@ s32 e1000_write_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset, u16 data) | |||
2990 | } | 2986 | } |
2991 | 2987 | ||
2992 | /** | 2988 | /** |
2993 | * e1000_get_phy_addr_for_hv_page - Get PHY adrress based on page | 2989 | * e1000_get_phy_addr_for_hv_page - Get PHY address based on page |
2994 | * @page: page to be accessed | 2990 | * @page: page to be accessed |
2995 | **/ | 2991 | **/ |
2996 | static u32 e1000_get_phy_addr_for_hv_page(u32 page) | 2992 | static u32 e1000_get_phy_addr_for_hv_page(u32 page) |
@@ -3071,12 +3067,12 @@ s32 e1000_link_stall_workaround_hv(struct e1000_hw *hw) | |||
3071 | goto out; | 3067 | goto out; |
3072 | 3068 | ||
3073 | /* Do not apply workaround if in PHY loopback bit 14 set */ | 3069 | /* Do not apply workaround if in PHY loopback bit 14 set */ |
3074 | hw->phy.ops.read_reg(hw, PHY_CONTROL, &data); | 3070 | e1e_rphy(hw, PHY_CONTROL, &data); |
3075 | if (data & PHY_CONTROL_LB) | 3071 | if (data & PHY_CONTROL_LB) |
3076 | goto out; | 3072 | goto out; |
3077 | 3073 | ||
3078 | /* check if link is up and at 1Gbps */ | 3074 | /* check if link is up and at 1Gbps */ |
3079 | ret_val = hw->phy.ops.read_reg(hw, BM_CS_STATUS, &data); | 3075 | ret_val = e1e_rphy(hw, BM_CS_STATUS, &data); |
3080 | if (ret_val) | 3076 | if (ret_val) |
3081 | goto out; | 3077 | goto out; |
3082 | 3078 | ||
@@ -3092,14 +3088,12 @@ s32 e1000_link_stall_workaround_hv(struct e1000_hw *hw) | |||
3092 | mdelay(200); | 3088 | mdelay(200); |
3093 | 3089 | ||
3094 | /* flush the packets in the fifo buffer */ | 3090 | /* flush the packets in the fifo buffer */ |
3095 | ret_val = hw->phy.ops.write_reg(hw, HV_MUX_DATA_CTRL, | 3091 | ret_val = e1e_wphy(hw, HV_MUX_DATA_CTRL, HV_MUX_DATA_CTRL_GEN_TO_MAC | |
3096 | HV_MUX_DATA_CTRL_GEN_TO_MAC | | 3092 | HV_MUX_DATA_CTRL_FORCE_SPEED); |
3097 | HV_MUX_DATA_CTRL_FORCE_SPEED); | ||
3098 | if (ret_val) | 3093 | if (ret_val) |
3099 | goto out; | 3094 | goto out; |
3100 | 3095 | ||
3101 | ret_val = hw->phy.ops.write_reg(hw, HV_MUX_DATA_CTRL, | 3096 | ret_val = e1e_wphy(hw, HV_MUX_DATA_CTRL, HV_MUX_DATA_CTRL_GEN_TO_MAC); |
3102 | HV_MUX_DATA_CTRL_GEN_TO_MAC); | ||
3103 | 3097 | ||
3104 | out: | 3098 | out: |
3105 | return ret_val; | 3099 | return ret_val; |
@@ -3119,7 +3113,7 @@ s32 e1000_check_polarity_82577(struct e1000_hw *hw) | |||
3119 | s32 ret_val; | 3113 | s32 ret_val; |
3120 | u16 data; | 3114 | u16 data; |
3121 | 3115 | ||
3122 | ret_val = phy->ops.read_reg(hw, I82577_PHY_STATUS_2, &data); | 3116 | ret_val = e1e_rphy(hw, I82577_PHY_STATUS_2, &data); |
3123 | 3117 | ||
3124 | if (!ret_val) | 3118 | if (!ret_val) |
3125 | phy->cable_polarity = (data & I82577_PHY_STATUS2_REV_POLARITY) | 3119 | phy->cable_polarity = (data & I82577_PHY_STATUS2_REV_POLARITY) |
@@ -3142,13 +3136,13 @@ s32 e1000_phy_force_speed_duplex_82577(struct e1000_hw *hw) | |||
3142 | u16 phy_data; | 3136 | u16 phy_data; |
3143 | bool link; | 3137 | bool link; |
3144 | 3138 | ||
3145 | ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_data); | 3139 | ret_val = e1e_rphy(hw, PHY_CONTROL, &phy_data); |
3146 | if (ret_val) | 3140 | if (ret_val) |
3147 | goto out; | 3141 | goto out; |
3148 | 3142 | ||
3149 | e1000e_phy_force_speed_duplex_setup(hw, &phy_data); | 3143 | e1000e_phy_force_speed_duplex_setup(hw, &phy_data); |
3150 | 3144 | ||
3151 | ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_data); | 3145 | ret_val = e1e_wphy(hw, PHY_CONTROL, phy_data); |
3152 | if (ret_val) | 3146 | if (ret_val) |
3153 | goto out; | 3147 | goto out; |
3154 | 3148 | ||
@@ -3212,7 +3206,7 @@ s32 e1000_get_phy_info_82577(struct e1000_hw *hw) | |||
3212 | if (ret_val) | 3206 | if (ret_val) |
3213 | goto out; | 3207 | goto out; |
3214 | 3208 | ||
3215 | ret_val = phy->ops.read_reg(hw, I82577_PHY_STATUS_2, &data); | 3209 | ret_val = e1e_rphy(hw, I82577_PHY_STATUS_2, &data); |
3216 | if (ret_val) | 3210 | if (ret_val) |
3217 | goto out; | 3211 | goto out; |
3218 | 3212 | ||
@@ -3224,7 +3218,7 @@ s32 e1000_get_phy_info_82577(struct e1000_hw *hw) | |||
3224 | if (ret_val) | 3218 | if (ret_val) |
3225 | goto out; | 3219 | goto out; |
3226 | 3220 | ||
3227 | ret_val = phy->ops.read_reg(hw, PHY_1000T_STATUS, &data); | 3221 | ret_val = e1e_rphy(hw, PHY_1000T_STATUS, &data); |
3228 | if (ret_val) | 3222 | if (ret_val) |
3229 | goto out; | 3223 | goto out; |
3230 | 3224 | ||
@@ -3258,7 +3252,7 @@ s32 e1000_get_cable_length_82577(struct e1000_hw *hw) | |||
3258 | s32 ret_val; | 3252 | s32 ret_val; |
3259 | u16 phy_data, length; | 3253 | u16 phy_data, length; |
3260 | 3254 | ||
3261 | ret_val = phy->ops.read_reg(hw, I82577_PHY_DIAG_STATUS, &phy_data); | 3255 | ret_val = e1e_rphy(hw, I82577_PHY_DIAG_STATUS, &phy_data); |
3262 | if (ret_val) | 3256 | if (ret_val) |
3263 | goto out; | 3257 | goto out; |
3264 | 3258 | ||
diff --git a/drivers/net/eepro.c b/drivers/net/eepro.c index 4fa8d2a4aef3..eb35951a2442 100644 --- a/drivers/net/eepro.c +++ b/drivers/net/eepro.c | |||
@@ -1761,7 +1761,7 @@ module_param_array(io, int, NULL, 0); | |||
1761 | module_param_array(irq, int, NULL, 0); | 1761 | module_param_array(irq, int, NULL, 0); |
1762 | module_param_array(mem, int, NULL, 0); | 1762 | module_param_array(mem, int, NULL, 0); |
1763 | module_param(autodetect, int, 0); | 1763 | module_param(autodetect, int, 0); |
1764 | MODULE_PARM_DESC(io, "EtherExpress Pro/10 I/O base addres(es)"); | 1764 | MODULE_PARM_DESC(io, "EtherExpress Pro/10 I/O base address(es)"); |
1765 | MODULE_PARM_DESC(irq, "EtherExpress Pro/10 IRQ number(s)"); | 1765 | MODULE_PARM_DESC(irq, "EtherExpress Pro/10 IRQ number(s)"); |
1766 | MODULE_PARM_DESC(mem, "EtherExpress Pro/10 Rx buffer size(es) in kB (3-29)"); | 1766 | MODULE_PARM_DESC(mem, "EtherExpress Pro/10 Rx buffer size(es) in kB (3-29)"); |
1767 | MODULE_PARM_DESC(autodetect, "EtherExpress Pro/10 force board(s) detection (0-1)"); | 1767 | MODULE_PARM_DESC(autodetect, "EtherExpress Pro/10 force board(s) detection (0-1)"); |
diff --git a/drivers/net/ehea/ehea.h b/drivers/net/ehea/ehea.h index a724a2d14506..6c7257bd73fc 100644 --- a/drivers/net/ehea/ehea.h +++ b/drivers/net/ehea/ehea.h | |||
@@ -40,7 +40,7 @@ | |||
40 | #include <asm/io.h> | 40 | #include <asm/io.h> |
41 | 41 | ||
42 | #define DRV_NAME "ehea" | 42 | #define DRV_NAME "ehea" |
43 | #define DRV_VERSION "EHEA_0106" | 43 | #define DRV_VERSION "EHEA_0107" |
44 | 44 | ||
45 | /* eHEA capability flags */ | 45 | /* eHEA capability flags */ |
46 | #define DLPAR_PORT_ADD_REM 1 | 46 | #define DLPAR_PORT_ADD_REM 1 |
diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c index 1032b5bbe238..f75d3144b8a5 100644 --- a/drivers/net/ehea/ehea_main.c +++ b/drivers/net/ehea/ehea_main.c | |||
@@ -437,7 +437,7 @@ static void ehea_init_fill_rq1(struct ehea_port_res *pr, int nr_rq1a) | |||
437 | } | 437 | } |
438 | } | 438 | } |
439 | /* Ring doorbell */ | 439 | /* Ring doorbell */ |
440 | ehea_update_rq1a(pr->qp, i); | 440 | ehea_update_rq1a(pr->qp, i - 1); |
441 | } | 441 | } |
442 | 442 | ||
443 | static int ehea_refill_rq_def(struct ehea_port_res *pr, | 443 | static int ehea_refill_rq_def(struct ehea_port_res *pr, |
@@ -1329,9 +1329,7 @@ static int ehea_fill_port_res(struct ehea_port_res *pr) | |||
1329 | int ret; | 1329 | int ret; |
1330 | struct ehea_qp_init_attr *init_attr = &pr->qp->init_attr; | 1330 | struct ehea_qp_init_attr *init_attr = &pr->qp->init_attr; |
1331 | 1331 | ||
1332 | ehea_init_fill_rq1(pr, init_attr->act_nr_rwqes_rq1 | 1332 | ehea_init_fill_rq1(pr, pr->rq1_skba.len); |
1333 | - init_attr->act_nr_rwqes_rq2 | ||
1334 | - init_attr->act_nr_rwqes_rq3 - 1); | ||
1335 | 1333 | ||
1336 | ret = ehea_refill_rq2(pr, init_attr->act_nr_rwqes_rq2 - 1); | 1334 | ret = ehea_refill_rq2(pr, init_attr->act_nr_rwqes_rq2 - 1); |
1337 | 1335 | ||
diff --git a/drivers/net/enc28j60.c b/drivers/net/enc28j60.c index 112c5aa9af7f..907b05a1c659 100644 --- a/drivers/net/enc28j60.c +++ b/drivers/net/enc28j60.c | |||
@@ -812,7 +812,7 @@ static void enc28j60_read_tsv(struct enc28j60_net *priv, u8 tsv[TSV_SIZE]) | |||
812 | if (netif_msg_hw(priv)) | 812 | if (netif_msg_hw(priv)) |
813 | printk(KERN_DEBUG DRV_NAME ": reading TSV at addr:0x%04x\n", | 813 | printk(KERN_DEBUG DRV_NAME ": reading TSV at addr:0x%04x\n", |
814 | endptr + 1); | 814 | endptr + 1); |
815 | enc28j60_mem_read(priv, endptr + 1, sizeof(tsv), tsv); | 815 | enc28j60_mem_read(priv, endptr + 1, TSV_SIZE, tsv); |
816 | } | 816 | } |
817 | 817 | ||
818 | static void enc28j60_dump_tsv(struct enc28j60_net *priv, const char *msg, | 818 | static void enc28j60_dump_tsv(struct enc28j60_net *priv, const char *msg, |
diff --git a/drivers/net/fec.c b/drivers/net/fec.c index cce32d43175f..cd0282d5d40f 100644 --- a/drivers/net/fec.c +++ b/drivers/net/fec.c | |||
@@ -17,6 +17,8 @@ | |||
17 | * | 17 | * |
18 | * Bug fixes and cleanup by Philippe De Muyter (phdm@macqel.be) | 18 | * Bug fixes and cleanup by Philippe De Muyter (phdm@macqel.be) |
19 | * Copyright (c) 2004-2006 Macq Electronique SA. | 19 | * Copyright (c) 2004-2006 Macq Electronique SA. |
20 | * | ||
21 | * Copyright (C) 2010 Freescale Semiconductor, Inc. | ||
20 | */ | 22 | */ |
21 | 23 | ||
22 | #include <linux/module.h> | 24 | #include <linux/module.h> |
@@ -45,29 +47,42 @@ | |||
45 | 47 | ||
46 | #include <asm/cacheflush.h> | 48 | #include <asm/cacheflush.h> |
47 | 49 | ||
48 | #ifndef CONFIG_ARCH_MXC | 50 | #ifndef CONFIG_ARM |
49 | #include <asm/coldfire.h> | 51 | #include <asm/coldfire.h> |
50 | #include <asm/mcfsim.h> | 52 | #include <asm/mcfsim.h> |
51 | #endif | 53 | #endif |
52 | 54 | ||
53 | #include "fec.h" | 55 | #include "fec.h" |
54 | 56 | ||
55 | #ifdef CONFIG_ARCH_MXC | 57 | #if defined(CONFIG_ARCH_MXC) || defined(CONFIG_SOC_IMX28) |
56 | #include <mach/hardware.h> | ||
57 | #define FEC_ALIGNMENT 0xf | 58 | #define FEC_ALIGNMENT 0xf |
58 | #else | 59 | #else |
59 | #define FEC_ALIGNMENT 0x3 | 60 | #define FEC_ALIGNMENT 0x3 |
60 | #endif | 61 | #endif |
61 | 62 | ||
62 | /* | 63 | #define DRIVER_NAME "fec" |
63 | * Define the fixed address of the FEC hardware. | 64 | |
64 | */ | 65 | /* Controller is ENET-MAC */ |
65 | #if defined(CONFIG_M5272) | 66 | #define FEC_QUIRK_ENET_MAC (1 << 0) |
67 | /* Controller needs driver to swap frame */ | ||
68 | #define FEC_QUIRK_SWAP_FRAME (1 << 1) | ||
66 | 69 | ||
67 | static unsigned char fec_mac_default[] = { | 70 | static struct platform_device_id fec_devtype[] = { |
68 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, | 71 | { |
72 | .name = DRIVER_NAME, | ||
73 | .driver_data = 0, | ||
74 | }, { | ||
75 | .name = "imx28-fec", | ||
76 | .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME, | ||
77 | }, | ||
78 | { } | ||
69 | }; | 79 | }; |
70 | 80 | ||
81 | static unsigned char macaddr[ETH_ALEN]; | ||
82 | module_param_array(macaddr, byte, NULL, 0); | ||
83 | MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address"); | ||
84 | |||
85 | #if defined(CONFIG_M5272) | ||
71 | /* | 86 | /* |
72 | * Some hardware gets it MAC address out of local flash memory. | 87 | * Some hardware gets it MAC address out of local flash memory. |
73 | * if this is non-zero then assume it is the address to get MAC from. | 88 | * if this is non-zero then assume it is the address to get MAC from. |
@@ -133,7 +148,8 @@ static unsigned char fec_mac_default[] = { | |||
133 | * account when setting it. | 148 | * account when setting it. |
134 | */ | 149 | */ |
135 | #if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \ | 150 | #if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \ |
136 | defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARCH_MXC) | 151 | defined(CONFIG_M520x) || defined(CONFIG_M532x) || \ |
152 | defined(CONFIG_ARCH_MXC) || defined(CONFIG_SOC_IMX28) | ||
137 | #define OPT_FRAME_SIZE (PKT_MAXBUF_SIZE << 16) | 153 | #define OPT_FRAME_SIZE (PKT_MAXBUF_SIZE << 16) |
138 | #else | 154 | #else |
139 | #define OPT_FRAME_SIZE 0 | 155 | #define OPT_FRAME_SIZE 0 |
@@ -186,7 +202,6 @@ struct fec_enet_private { | |||
186 | int mii_timeout; | 202 | int mii_timeout; |
187 | uint phy_speed; | 203 | uint phy_speed; |
188 | phy_interface_t phy_interface; | 204 | phy_interface_t phy_interface; |
189 | int index; | ||
190 | int link; | 205 | int link; |
191 | int full_duplex; | 206 | int full_duplex; |
192 | struct completion mdio_done; | 207 | struct completion mdio_done; |
@@ -213,10 +228,23 @@ static void fec_stop(struct net_device *dev); | |||
213 | /* Transmitter timeout */ | 228 | /* Transmitter timeout */ |
214 | #define TX_TIMEOUT (2 * HZ) | 229 | #define TX_TIMEOUT (2 * HZ) |
215 | 230 | ||
231 | static void *swap_buffer(void *bufaddr, int len) | ||
232 | { | ||
233 | int i; | ||
234 | unsigned int *buf = bufaddr; | ||
235 | |||
236 | for (i = 0; i < (len + 3) / 4; i++, buf++) | ||
237 | *buf = cpu_to_be32(*buf); | ||
238 | |||
239 | return bufaddr; | ||
240 | } | ||
241 | |||
216 | static netdev_tx_t | 242 | static netdev_tx_t |
217 | fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev) | 243 | fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev) |
218 | { | 244 | { |
219 | struct fec_enet_private *fep = netdev_priv(dev); | 245 | struct fec_enet_private *fep = netdev_priv(dev); |
246 | const struct platform_device_id *id_entry = | ||
247 | platform_get_device_id(fep->pdev); | ||
220 | struct bufdesc *bdp; | 248 | struct bufdesc *bdp; |
221 | void *bufaddr; | 249 | void *bufaddr; |
222 | unsigned short status; | 250 | unsigned short status; |
@@ -261,6 +289,14 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
261 | bufaddr = fep->tx_bounce[index]; | 289 | bufaddr = fep->tx_bounce[index]; |
262 | } | 290 | } |
263 | 291 | ||
292 | /* | ||
293 | * Some design made an incorrect assumption on endian mode of | ||
294 | * the system that it's running on. As the result, driver has to | ||
295 | * swap every frame going to and coming from the controller. | ||
296 | */ | ||
297 | if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) | ||
298 | swap_buffer(bufaddr, skb->len); | ||
299 | |||
264 | /* Save skb pointer */ | 300 | /* Save skb pointer */ |
265 | fep->tx_skbuff[fep->skb_cur] = skb; | 301 | fep->tx_skbuff[fep->skb_cur] = skb; |
266 | 302 | ||
@@ -429,6 +465,8 @@ static void | |||
429 | fec_enet_rx(struct net_device *dev) | 465 | fec_enet_rx(struct net_device *dev) |
430 | { | 466 | { |
431 | struct fec_enet_private *fep = netdev_priv(dev); | 467 | struct fec_enet_private *fep = netdev_priv(dev); |
468 | const struct platform_device_id *id_entry = | ||
469 | platform_get_device_id(fep->pdev); | ||
432 | struct bufdesc *bdp; | 470 | struct bufdesc *bdp; |
433 | unsigned short status; | 471 | unsigned short status; |
434 | struct sk_buff *skb; | 472 | struct sk_buff *skb; |
@@ -492,6 +530,9 @@ fec_enet_rx(struct net_device *dev) | |||
492 | dma_unmap_single(NULL, bdp->cbd_bufaddr, bdp->cbd_datlen, | 530 | dma_unmap_single(NULL, bdp->cbd_bufaddr, bdp->cbd_datlen, |
493 | DMA_FROM_DEVICE); | 531 | DMA_FROM_DEVICE); |
494 | 532 | ||
533 | if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) | ||
534 | swap_buffer(data, pkt_len); | ||
535 | |||
495 | /* This does 16 byte alignment, exactly what we need. | 536 | /* This does 16 byte alignment, exactly what we need. |
496 | * The packet length includes FCS, but we don't want to | 537 | * The packet length includes FCS, but we don't want to |
497 | * include that when passing upstream as it messes up | 538 | * include that when passing upstream as it messes up |
@@ -538,37 +579,50 @@ rx_processing_done: | |||
538 | } | 579 | } |
539 | 580 | ||
540 | /* ------------------------------------------------------------------------- */ | 581 | /* ------------------------------------------------------------------------- */ |
541 | #ifdef CONFIG_M5272 | ||
542 | static void __inline__ fec_get_mac(struct net_device *dev) | 582 | static void __inline__ fec_get_mac(struct net_device *dev) |
543 | { | 583 | { |
544 | struct fec_enet_private *fep = netdev_priv(dev); | 584 | struct fec_enet_private *fep = netdev_priv(dev); |
585 | struct fec_platform_data *pdata = fep->pdev->dev.platform_data; | ||
545 | unsigned char *iap, tmpaddr[ETH_ALEN]; | 586 | unsigned char *iap, tmpaddr[ETH_ALEN]; |
546 | 587 | ||
547 | if (FEC_FLASHMAC) { | 588 | /* |
548 | /* | 589 | * try to get mac address in following order: |
549 | * Get MAC address from FLASH. | 590 | * |
550 | * If it is all 1's or 0's, use the default. | 591 | * 1) module parameter via kernel command line in form |
551 | */ | 592 | * fec.macaddr=0x00,0x04,0x9f,0x01,0x30,0xe0 |
552 | iap = (unsigned char *)FEC_FLASHMAC; | 593 | */ |
553 | if ((iap[0] == 0) && (iap[1] == 0) && (iap[2] == 0) && | 594 | iap = macaddr; |
554 | (iap[3] == 0) && (iap[4] == 0) && (iap[5] == 0)) | 595 | |
555 | iap = fec_mac_default; | 596 | /* |
556 | if ((iap[0] == 0xff) && (iap[1] == 0xff) && (iap[2] == 0xff) && | 597 | * 2) from flash or fuse (via platform data) |
557 | (iap[3] == 0xff) && (iap[4] == 0xff) && (iap[5] == 0xff)) | 598 | */ |
558 | iap = fec_mac_default; | 599 | if (!is_valid_ether_addr(iap)) { |
559 | } else { | 600 | #ifdef CONFIG_M5272 |
560 | *((unsigned long *) &tmpaddr[0]) = readl(fep->hwp + FEC_ADDR_LOW); | 601 | if (FEC_FLASHMAC) |
561 | *((unsigned short *) &tmpaddr[4]) = (readl(fep->hwp + FEC_ADDR_HIGH) >> 16); | 602 | iap = (unsigned char *)FEC_FLASHMAC; |
603 | #else | ||
604 | if (pdata) | ||
605 | memcpy(iap, pdata->mac, ETH_ALEN); | ||
606 | #endif | ||
607 | } | ||
608 | |||
609 | /* | ||
610 | * 3) FEC mac registers set by bootloader | ||
611 | */ | ||
612 | if (!is_valid_ether_addr(iap)) { | ||
613 | *((unsigned long *) &tmpaddr[0]) = | ||
614 | be32_to_cpu(readl(fep->hwp + FEC_ADDR_LOW)); | ||
615 | *((unsigned short *) &tmpaddr[4]) = | ||
616 | be16_to_cpu(readl(fep->hwp + FEC_ADDR_HIGH) >> 16); | ||
562 | iap = &tmpaddr[0]; | 617 | iap = &tmpaddr[0]; |
563 | } | 618 | } |
564 | 619 | ||
565 | memcpy(dev->dev_addr, iap, ETH_ALEN); | 620 | memcpy(dev->dev_addr, iap, ETH_ALEN); |
566 | 621 | ||
567 | /* Adjust MAC if using default MAC address */ | 622 | /* Adjust MAC if using macaddr */ |
568 | if (iap == fec_mac_default) | 623 | if (iap == macaddr) |
569 | dev->dev_addr[ETH_ALEN-1] = fec_mac_default[ETH_ALEN-1] + fep->index; | 624 | dev->dev_addr[ETH_ALEN-1] = macaddr[ETH_ALEN-1] + fep->pdev->id; |
570 | } | 625 | } |
571 | #endif | ||
572 | 626 | ||
573 | /* ------------------------------------------------------------------------- */ | 627 | /* ------------------------------------------------------------------------- */ |
574 | 628 | ||
@@ -651,8 +705,8 @@ static int fec_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum, | |||
651 | fep->mii_timeout = 0; | 705 | fep->mii_timeout = 0; |
652 | init_completion(&fep->mdio_done); | 706 | init_completion(&fep->mdio_done); |
653 | 707 | ||
654 | /* start a read op */ | 708 | /* start a write op */ |
655 | writel(FEC_MMFR_ST | FEC_MMFR_OP_READ | | 709 | writel(FEC_MMFR_ST | FEC_MMFR_OP_WRITE | |
656 | FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(regnum) | | 710 | FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(regnum) | |
657 | FEC_MMFR_TA | FEC_MMFR_DATA(value), | 711 | FEC_MMFR_TA | FEC_MMFR_DATA(value), |
658 | fep->hwp + FEC_MII_DATA); | 712 | fep->hwp + FEC_MII_DATA); |
@@ -681,6 +735,7 @@ static int fec_enet_mii_probe(struct net_device *dev) | |||
681 | char mdio_bus_id[MII_BUS_ID_SIZE]; | 735 | char mdio_bus_id[MII_BUS_ID_SIZE]; |
682 | char phy_name[MII_BUS_ID_SIZE + 3]; | 736 | char phy_name[MII_BUS_ID_SIZE + 3]; |
683 | int phy_id; | 737 | int phy_id; |
738 | int dev_id = fep->pdev->id; | ||
684 | 739 | ||
685 | fep->phy_dev = NULL; | 740 | fep->phy_dev = NULL; |
686 | 741 | ||
@@ -692,6 +747,8 @@ static int fec_enet_mii_probe(struct net_device *dev) | |||
692 | continue; | 747 | continue; |
693 | if (fep->mii_bus->phy_map[phy_id]->phy_id == 0) | 748 | if (fep->mii_bus->phy_map[phy_id]->phy_id == 0) |
694 | continue; | 749 | continue; |
750 | if (dev_id--) | ||
751 | continue; | ||
695 | strncpy(mdio_bus_id, fep->mii_bus->id, MII_BUS_ID_SIZE); | 752 | strncpy(mdio_bus_id, fep->mii_bus->id, MII_BUS_ID_SIZE); |
696 | break; | 753 | break; |
697 | } | 754 | } |
@@ -729,10 +786,35 @@ static int fec_enet_mii_probe(struct net_device *dev) | |||
729 | 786 | ||
730 | static int fec_enet_mii_init(struct platform_device *pdev) | 787 | static int fec_enet_mii_init(struct platform_device *pdev) |
731 | { | 788 | { |
789 | static struct mii_bus *fec0_mii_bus; | ||
732 | struct net_device *dev = platform_get_drvdata(pdev); | 790 | struct net_device *dev = platform_get_drvdata(pdev); |
733 | struct fec_enet_private *fep = netdev_priv(dev); | 791 | struct fec_enet_private *fep = netdev_priv(dev); |
792 | const struct platform_device_id *id_entry = | ||
793 | platform_get_device_id(fep->pdev); | ||
734 | int err = -ENXIO, i; | 794 | int err = -ENXIO, i; |
735 | 795 | ||
796 | /* | ||
797 | * The dual fec interfaces are not equivalent with enet-mac. | ||
798 | * Here are the differences: | ||
799 | * | ||
800 | * - fec0 supports MII & RMII modes while fec1 only supports RMII | ||
801 | * - fec0 acts as the 1588 time master while fec1 is slave | ||
802 | * - external phys can only be configured by fec0 | ||
803 | * | ||
804 | * That is to say fec1 can not work independently. It only works | ||
805 | * when fec0 is working. The reason behind this design is that the | ||
806 | * second interface is added primarily for Switch mode. | ||
807 | * | ||
808 | * Because of the last point above, both phys are attached on fec0 | ||
809 | * mdio interface in board design, and need to be configured by | ||
810 | * fec0 mii_bus. | ||
811 | */ | ||
812 | if ((id_entry->driver_data & FEC_QUIRK_ENET_MAC) && pdev->id) { | ||
813 | /* fec1 uses fec0 mii_bus */ | ||
814 | fep->mii_bus = fec0_mii_bus; | ||
815 | return 0; | ||
816 | } | ||
817 | |||
736 | fep->mii_timeout = 0; | 818 | fep->mii_timeout = 0; |
737 | 819 | ||
738 | /* | 820 | /* |
@@ -769,6 +851,10 @@ static int fec_enet_mii_init(struct platform_device *pdev) | |||
769 | if (mdiobus_register(fep->mii_bus)) | 851 | if (mdiobus_register(fep->mii_bus)) |
770 | goto err_out_free_mdio_irq; | 852 | goto err_out_free_mdio_irq; |
771 | 853 | ||
854 | /* save fec0 mii_bus */ | ||
855 | if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) | ||
856 | fec0_mii_bus = fep->mii_bus; | ||
857 | |||
772 | return 0; | 858 | return 0; |
773 | 859 | ||
774 | err_out_free_mdio_irq: | 860 | err_out_free_mdio_irq: |
@@ -1067,9 +1153,8 @@ static const struct net_device_ops fec_netdev_ops = { | |||
1067 | /* | 1153 | /* |
1068 | * XXX: We need to clean up on failure exits here. | 1154 | * XXX: We need to clean up on failure exits here. |
1069 | * | 1155 | * |
1070 | * index is only used in legacy code | ||
1071 | */ | 1156 | */ |
1072 | static int fec_enet_init(struct net_device *dev, int index) | 1157 | static int fec_enet_init(struct net_device *dev) |
1073 | { | 1158 | { |
1074 | struct fec_enet_private *fep = netdev_priv(dev); | 1159 | struct fec_enet_private *fep = netdev_priv(dev); |
1075 | struct bufdesc *cbd_base; | 1160 | struct bufdesc *cbd_base; |
@@ -1086,26 +1171,11 @@ static int fec_enet_init(struct net_device *dev, int index) | |||
1086 | 1171 | ||
1087 | spin_lock_init(&fep->hw_lock); | 1172 | spin_lock_init(&fep->hw_lock); |
1088 | 1173 | ||
1089 | fep->index = index; | ||
1090 | fep->hwp = (void __iomem *)dev->base_addr; | 1174 | fep->hwp = (void __iomem *)dev->base_addr; |
1091 | fep->netdev = dev; | 1175 | fep->netdev = dev; |
1092 | 1176 | ||
1093 | /* Set the Ethernet address */ | 1177 | /* Get the Ethernet address */ |
1094 | #ifdef CONFIG_M5272 | ||
1095 | fec_get_mac(dev); | 1178 | fec_get_mac(dev); |
1096 | #else | ||
1097 | { | ||
1098 | unsigned long l; | ||
1099 | l = readl(fep->hwp + FEC_ADDR_LOW); | ||
1100 | dev->dev_addr[0] = (unsigned char)((l & 0xFF000000) >> 24); | ||
1101 | dev->dev_addr[1] = (unsigned char)((l & 0x00FF0000) >> 16); | ||
1102 | dev->dev_addr[2] = (unsigned char)((l & 0x0000FF00) >> 8); | ||
1103 | dev->dev_addr[3] = (unsigned char)((l & 0x000000FF) >> 0); | ||
1104 | l = readl(fep->hwp + FEC_ADDR_HIGH); | ||
1105 | dev->dev_addr[4] = (unsigned char)((l & 0xFF000000) >> 24); | ||
1106 | dev->dev_addr[5] = (unsigned char)((l & 0x00FF0000) >> 16); | ||
1107 | } | ||
1108 | #endif | ||
1109 | 1179 | ||
1110 | /* Set receive and transmit descriptor base. */ | 1180 | /* Set receive and transmit descriptor base. */ |
1111 | fep->rx_bd_base = cbd_base; | 1181 | fep->rx_bd_base = cbd_base; |
@@ -1156,12 +1226,25 @@ static void | |||
1156 | fec_restart(struct net_device *dev, int duplex) | 1226 | fec_restart(struct net_device *dev, int duplex) |
1157 | { | 1227 | { |
1158 | struct fec_enet_private *fep = netdev_priv(dev); | 1228 | struct fec_enet_private *fep = netdev_priv(dev); |
1229 | const struct platform_device_id *id_entry = | ||
1230 | platform_get_device_id(fep->pdev); | ||
1159 | int i; | 1231 | int i; |
1232 | u32 val, temp_mac[2]; | ||
1160 | 1233 | ||
1161 | /* Whack a reset. We should wait for this. */ | 1234 | /* Whack a reset. We should wait for this. */ |
1162 | writel(1, fep->hwp + FEC_ECNTRL); | 1235 | writel(1, fep->hwp + FEC_ECNTRL); |
1163 | udelay(10); | 1236 | udelay(10); |
1164 | 1237 | ||
1238 | /* | ||
1239 | * enet-mac reset will reset mac address registers too, | ||
1240 | * so need to reconfigure it. | ||
1241 | */ | ||
1242 | if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) { | ||
1243 | memcpy(&temp_mac, dev->dev_addr, ETH_ALEN); | ||
1244 | writel(cpu_to_be32(temp_mac[0]), fep->hwp + FEC_ADDR_LOW); | ||
1245 | writel(cpu_to_be32(temp_mac[1]), fep->hwp + FEC_ADDR_HIGH); | ||
1246 | } | ||
1247 | |||
1165 | /* Clear any outstanding interrupt. */ | 1248 | /* Clear any outstanding interrupt. */ |
1166 | writel(0xffc00000, fep->hwp + FEC_IEVENT); | 1249 | writel(0xffc00000, fep->hwp + FEC_IEVENT); |
1167 | 1250 | ||
@@ -1208,20 +1291,45 @@ fec_restart(struct net_device *dev, int duplex) | |||
1208 | /* Set MII speed */ | 1291 | /* Set MII speed */ |
1209 | writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED); | 1292 | writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED); |
1210 | 1293 | ||
1211 | #ifdef FEC_MIIGSK_ENR | 1294 | /* |
1212 | if (fep->phy_interface == PHY_INTERFACE_MODE_RMII) { | 1295 | * The phy interface and speed need to get configured |
1213 | /* disable the gasket and wait */ | 1296 | * differently on enet-mac. |
1214 | writel(0, fep->hwp + FEC_MIIGSK_ENR); | 1297 | */ |
1215 | while (readl(fep->hwp + FEC_MIIGSK_ENR) & 4) | 1298 | if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) { |
1216 | udelay(1); | 1299 | val = readl(fep->hwp + FEC_R_CNTRL); |
1217 | 1300 | ||
1218 | /* configure the gasket: RMII, 50 MHz, no loopback, no echo */ | 1301 | /* MII or RMII */ |
1219 | writel(1, fep->hwp + FEC_MIIGSK_CFGR); | 1302 | if (fep->phy_interface == PHY_INTERFACE_MODE_RMII) |
1303 | val |= (1 << 8); | ||
1304 | else | ||
1305 | val &= ~(1 << 8); | ||
1220 | 1306 | ||
1221 | /* re-enable the gasket */ | 1307 | /* 10M or 100M */ |
1222 | writel(2, fep->hwp + FEC_MIIGSK_ENR); | 1308 | if (fep->phy_dev && fep->phy_dev->speed == SPEED_100) |
1223 | } | 1309 | val &= ~(1 << 9); |
1310 | else | ||
1311 | val |= (1 << 9); | ||
1312 | |||
1313 | writel(val, fep->hwp + FEC_R_CNTRL); | ||
1314 | } else { | ||
1315 | #ifdef FEC_MIIGSK_ENR | ||
1316 | if (fep->phy_interface == PHY_INTERFACE_MODE_RMII) { | ||
1317 | /* disable the gasket and wait */ | ||
1318 | writel(0, fep->hwp + FEC_MIIGSK_ENR); | ||
1319 | while (readl(fep->hwp + FEC_MIIGSK_ENR) & 4) | ||
1320 | udelay(1); | ||
1321 | |||
1322 | /* | ||
1323 | * configure the gasket: | ||
1324 | * RMII, 50 MHz, no loopback, no echo | ||
1325 | */ | ||
1326 | writel(1, fep->hwp + FEC_MIIGSK_CFGR); | ||
1327 | |||
1328 | /* re-enable the gasket */ | ||
1329 | writel(2, fep->hwp + FEC_MIIGSK_ENR); | ||
1330 | } | ||
1224 | #endif | 1331 | #endif |
1332 | } | ||
1225 | 1333 | ||
1226 | /* And last, enable the transmit and receive processing */ | 1334 | /* And last, enable the transmit and receive processing */ |
1227 | writel(2, fep->hwp + FEC_ECNTRL); | 1335 | writel(2, fep->hwp + FEC_ECNTRL); |
@@ -1316,7 +1424,7 @@ fec_probe(struct platform_device *pdev) | |||
1316 | } | 1424 | } |
1317 | clk_enable(fep->clk); | 1425 | clk_enable(fep->clk); |
1318 | 1426 | ||
1319 | ret = fec_enet_init(ndev, 0); | 1427 | ret = fec_enet_init(ndev); |
1320 | if (ret) | 1428 | if (ret) |
1321 | goto failed_init; | 1429 | goto failed_init; |
1322 | 1430 | ||
@@ -1380,8 +1488,10 @@ fec_suspend(struct device *dev) | |||
1380 | 1488 | ||
1381 | if (ndev) { | 1489 | if (ndev) { |
1382 | fep = netdev_priv(ndev); | 1490 | fep = netdev_priv(ndev); |
1383 | if (netif_running(ndev)) | 1491 | if (netif_running(ndev)) { |
1384 | fec_enet_close(ndev); | 1492 | fec_stop(ndev); |
1493 | netif_device_detach(ndev); | ||
1494 | } | ||
1385 | clk_disable(fep->clk); | 1495 | clk_disable(fep->clk); |
1386 | } | 1496 | } |
1387 | return 0; | 1497 | return 0; |
@@ -1396,8 +1506,10 @@ fec_resume(struct device *dev) | |||
1396 | if (ndev) { | 1506 | if (ndev) { |
1397 | fep = netdev_priv(ndev); | 1507 | fep = netdev_priv(ndev); |
1398 | clk_enable(fep->clk); | 1508 | clk_enable(fep->clk); |
1399 | if (netif_running(ndev)) | 1509 | if (netif_running(ndev)) { |
1400 | fec_enet_open(ndev); | 1510 | fec_restart(ndev, fep->full_duplex); |
1511 | netif_device_attach(ndev); | ||
1512 | } | ||
1401 | } | 1513 | } |
1402 | return 0; | 1514 | return 0; |
1403 | } | 1515 | } |
@@ -1414,12 +1526,13 @@ static const struct dev_pm_ops fec_pm_ops = { | |||
1414 | 1526 | ||
1415 | static struct platform_driver fec_driver = { | 1527 | static struct platform_driver fec_driver = { |
1416 | .driver = { | 1528 | .driver = { |
1417 | .name = "fec", | 1529 | .name = DRIVER_NAME, |
1418 | .owner = THIS_MODULE, | 1530 | .owner = THIS_MODULE, |
1419 | #ifdef CONFIG_PM | 1531 | #ifdef CONFIG_PM |
1420 | .pm = &fec_pm_ops, | 1532 | .pm = &fec_pm_ops, |
1421 | #endif | 1533 | #endif |
1422 | }, | 1534 | }, |
1535 | .id_table = fec_devtype, | ||
1423 | .probe = fec_probe, | 1536 | .probe = fec_probe, |
1424 | .remove = __devexit_p(fec_drv_remove), | 1537 | .remove = __devexit_p(fec_drv_remove), |
1425 | }; | 1538 | }; |
diff --git a/drivers/net/fec.h b/drivers/net/fec.h index 2c48b25668d5..ace318df4c8d 100644 --- a/drivers/net/fec.h +++ b/drivers/net/fec.h | |||
@@ -14,7 +14,8 @@ | |||
14 | /****************************************************************************/ | 14 | /****************************************************************************/ |
15 | 15 | ||
16 | #if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \ | 16 | #if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \ |
17 | defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARCH_MXC) | 17 | defined(CONFIG_M520x) || defined(CONFIG_M532x) || \ |
18 | defined(CONFIG_ARCH_MXC) || defined(CONFIG_SOC_IMX28) | ||
18 | /* | 19 | /* |
19 | * Just figures, Motorola would have to change the offsets for | 20 | * Just figures, Motorola would have to change the offsets for |
20 | * registers in the same peripheral device on different models | 21 | * registers in the same peripheral device on different models |
@@ -78,7 +79,7 @@ | |||
78 | /* | 79 | /* |
79 | * Define the buffer descriptor structure. | 80 | * Define the buffer descriptor structure. |
80 | */ | 81 | */ |
81 | #ifdef CONFIG_ARCH_MXC | 82 | #if defined(CONFIG_ARCH_MXC) || defined(CONFIG_SOC_IMX28) |
82 | struct bufdesc { | 83 | struct bufdesc { |
83 | unsigned short cbd_datlen; /* Data length */ | 84 | unsigned short cbd_datlen; /* Data length */ |
84 | unsigned short cbd_sc; /* Control and status info */ | 85 | unsigned short cbd_sc; /* Control and status info */ |
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c index cd2d72d825df..9c0b1bac6af6 100644 --- a/drivers/net/forcedeth.c +++ b/drivers/net/forcedeth.c | |||
@@ -3949,6 +3949,7 @@ static int nv_set_wol(struct net_device *dev, struct ethtool_wolinfo *wolinfo) | |||
3949 | writel(flags, base + NvRegWakeUpFlags); | 3949 | writel(flags, base + NvRegWakeUpFlags); |
3950 | spin_unlock_irq(&np->lock); | 3950 | spin_unlock_irq(&np->lock); |
3951 | } | 3951 | } |
3952 | device_set_wakeup_enable(&np->pci_dev->dev, np->wolenabled); | ||
3952 | return 0; | 3953 | return 0; |
3953 | } | 3954 | } |
3954 | 3955 | ||
@@ -5488,14 +5489,10 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i | |||
5488 | /* set mac address */ | 5489 | /* set mac address */ |
5489 | nv_copy_mac_to_hw(dev); | 5490 | nv_copy_mac_to_hw(dev); |
5490 | 5491 | ||
5491 | /* Workaround current PCI init glitch: wakeup bits aren't | ||
5492 | * being set from PCI PM capability. | ||
5493 | */ | ||
5494 | device_init_wakeup(&pci_dev->dev, 1); | ||
5495 | |||
5496 | /* disable WOL */ | 5492 | /* disable WOL */ |
5497 | writel(0, base + NvRegWakeUpFlags); | 5493 | writel(0, base + NvRegWakeUpFlags); |
5498 | np->wolenabled = 0; | 5494 | np->wolenabled = 0; |
5495 | device_set_wakeup_enable(&pci_dev->dev, false); | ||
5499 | 5496 | ||
5500 | if (id->driver_data & DEV_HAS_POWER_CNTRL) { | 5497 | if (id->driver_data & DEV_HAS_POWER_CNTRL) { |
5501 | 5498 | ||
@@ -5648,6 +5645,8 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i | |||
5648 | goto out_error; | 5645 | goto out_error; |
5649 | } | 5646 | } |
5650 | 5647 | ||
5648 | netif_carrier_off(dev); | ||
5649 | |||
5651 | dev_info(&pci_dev->dev, "ifname %s, PHY OUI 0x%x @ %d, addr %pM\n", | 5650 | dev_info(&pci_dev->dev, "ifname %s, PHY OUI 0x%x @ %d, addr %pM\n", |
5652 | dev->name, np->phy_oui, np->phyaddr, dev->dev_addr); | 5651 | dev->name, np->phy_oui, np->phyaddr, dev->dev_addr); |
5653 | 5652 | ||
@@ -5746,8 +5745,9 @@ static void __devexit nv_remove(struct pci_dev *pci_dev) | |||
5746 | } | 5745 | } |
5747 | 5746 | ||
5748 | #ifdef CONFIG_PM | 5747 | #ifdef CONFIG_PM |
5749 | static int nv_suspend(struct pci_dev *pdev, pm_message_t state) | 5748 | static int nv_suspend(struct device *device) |
5750 | { | 5749 | { |
5750 | struct pci_dev *pdev = to_pci_dev(device); | ||
5751 | struct net_device *dev = pci_get_drvdata(pdev); | 5751 | struct net_device *dev = pci_get_drvdata(pdev); |
5752 | struct fe_priv *np = netdev_priv(dev); | 5752 | struct fe_priv *np = netdev_priv(dev); |
5753 | u8 __iomem *base = get_hwbase(dev); | 5753 | u8 __iomem *base = get_hwbase(dev); |
@@ -5763,25 +5763,17 @@ static int nv_suspend(struct pci_dev *pdev, pm_message_t state) | |||
5763 | for (i = 0; i <= np->register_size/sizeof(u32); i++) | 5763 | for (i = 0; i <= np->register_size/sizeof(u32); i++) |
5764 | np->saved_config_space[i] = readl(base + i*sizeof(u32)); | 5764 | np->saved_config_space[i] = readl(base + i*sizeof(u32)); |
5765 | 5765 | ||
5766 | pci_save_state(pdev); | ||
5767 | pci_enable_wake(pdev, pci_choose_state(pdev, state), np->wolenabled); | ||
5768 | pci_disable_device(pdev); | ||
5769 | pci_set_power_state(pdev, pci_choose_state(pdev, state)); | ||
5770 | return 0; | 5766 | return 0; |
5771 | } | 5767 | } |
5772 | 5768 | ||
5773 | static int nv_resume(struct pci_dev *pdev) | 5769 | static int nv_resume(struct device *device) |
5774 | { | 5770 | { |
5771 | struct pci_dev *pdev = to_pci_dev(device); | ||
5775 | struct net_device *dev = pci_get_drvdata(pdev); | 5772 | struct net_device *dev = pci_get_drvdata(pdev); |
5776 | struct fe_priv *np = netdev_priv(dev); | 5773 | struct fe_priv *np = netdev_priv(dev); |
5777 | u8 __iomem *base = get_hwbase(dev); | 5774 | u8 __iomem *base = get_hwbase(dev); |
5778 | int i, rc = 0; | 5775 | int i, rc = 0; |
5779 | 5776 | ||
5780 | pci_set_power_state(pdev, PCI_D0); | ||
5781 | pci_restore_state(pdev); | ||
5782 | /* ack any pending wake events, disable PME */ | ||
5783 | pci_enable_wake(pdev, PCI_D0, 0); | ||
5784 | |||
5785 | /* restore non-pci configuration space */ | 5777 | /* restore non-pci configuration space */ |
5786 | for (i = 0; i <= np->register_size/sizeof(u32); i++) | 5778 | for (i = 0; i <= np->register_size/sizeof(u32); i++) |
5787 | writel(np->saved_config_space[i], base+i*sizeof(u32)); | 5779 | writel(np->saved_config_space[i], base+i*sizeof(u32)); |
@@ -5800,6 +5792,9 @@ static int nv_resume(struct pci_dev *pdev) | |||
5800 | return rc; | 5792 | return rc; |
5801 | } | 5793 | } |
5802 | 5794 | ||
5795 | static SIMPLE_DEV_PM_OPS(nv_pm_ops, nv_suspend, nv_resume); | ||
5796 | #define NV_PM_OPS (&nv_pm_ops) | ||
5797 | |||
5803 | static void nv_shutdown(struct pci_dev *pdev) | 5798 | static void nv_shutdown(struct pci_dev *pdev) |
5804 | { | 5799 | { |
5805 | struct net_device *dev = pci_get_drvdata(pdev); | 5800 | struct net_device *dev = pci_get_drvdata(pdev); |
@@ -5822,15 +5817,13 @@ static void nv_shutdown(struct pci_dev *pdev) | |||
5822 | * only put the device into D3 if we really go for poweroff. | 5817 | * only put the device into D3 if we really go for poweroff. |
5823 | */ | 5818 | */ |
5824 | if (system_state == SYSTEM_POWER_OFF) { | 5819 | if (system_state == SYSTEM_POWER_OFF) { |
5825 | if (pci_enable_wake(pdev, PCI_D3cold, np->wolenabled)) | 5820 | pci_wake_from_d3(pdev, np->wolenabled); |
5826 | pci_enable_wake(pdev, PCI_D3hot, np->wolenabled); | ||
5827 | pci_set_power_state(pdev, PCI_D3hot); | 5821 | pci_set_power_state(pdev, PCI_D3hot); |
5828 | } | 5822 | } |
5829 | } | 5823 | } |
5830 | #else | 5824 | #else |
5831 | #define nv_suspend NULL | 5825 | #define NV_PM_OPS NULL |
5832 | #define nv_shutdown NULL | 5826 | #define nv_shutdown NULL |
5833 | #define nv_resume NULL | ||
5834 | #endif /* CONFIG_PM */ | 5827 | #endif /* CONFIG_PM */ |
5835 | 5828 | ||
5836 | static DEFINE_PCI_DEVICE_TABLE(pci_tbl) = { | 5829 | static DEFINE_PCI_DEVICE_TABLE(pci_tbl) = { |
@@ -6002,9 +5995,8 @@ static struct pci_driver driver = { | |||
6002 | .id_table = pci_tbl, | 5995 | .id_table = pci_tbl, |
6003 | .probe = nv_probe, | 5996 | .probe = nv_probe, |
6004 | .remove = __devexit_p(nv_remove), | 5997 | .remove = __devexit_p(nv_remove), |
6005 | .suspend = nv_suspend, | ||
6006 | .resume = nv_resume, | ||
6007 | .shutdown = nv_shutdown, | 5998 | .shutdown = nv_shutdown, |
5999 | .driver.pm = NV_PM_OPS, | ||
6008 | }; | 6000 | }; |
6009 | 6001 | ||
6010 | static int __init init_nic(void) | 6002 | static int __init init_nic(void) |
diff --git a/drivers/net/fs_enet/fs_enet-main.c b/drivers/net/fs_enet/fs_enet-main.c index d684f187de57..7a1f3d0ffa78 100644 --- a/drivers/net/fs_enet/fs_enet-main.c +++ b/drivers/net/fs_enet/fs_enet-main.c | |||
@@ -40,6 +40,7 @@ | |||
40 | #include <linux/of_mdio.h> | 40 | #include <linux/of_mdio.h> |
41 | #include <linux/of_platform.h> | 41 | #include <linux/of_platform.h> |
42 | #include <linux/of_gpio.h> | 42 | #include <linux/of_gpio.h> |
43 | #include <linux/of_net.h> | ||
43 | 44 | ||
44 | #include <linux/vmalloc.h> | 45 | #include <linux/vmalloc.h> |
45 | #include <asm/pgtable.h> | 46 | #include <asm/pgtable.h> |
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c index 45c4b7bfcf39..5ed8f9f9419f 100644 --- a/drivers/net/gianfar.c +++ b/drivers/net/gianfar.c | |||
@@ -95,6 +95,7 @@ | |||
95 | #include <linux/phy.h> | 95 | #include <linux/phy.h> |
96 | #include <linux/phy_fixed.h> | 96 | #include <linux/phy_fixed.h> |
97 | #include <linux/of.h> | 97 | #include <linux/of.h> |
98 | #include <linux/of_net.h> | ||
98 | 99 | ||
99 | #include "gianfar.h" | 100 | #include "gianfar.h" |
100 | #include "fsl_pq_mdio.h" | 101 | #include "fsl_pq_mdio.h" |
@@ -433,7 +434,6 @@ static void gfar_init_mac(struct net_device *ndev) | |||
433 | static struct net_device_stats *gfar_get_stats(struct net_device *dev) | 434 | static struct net_device_stats *gfar_get_stats(struct net_device *dev) |
434 | { | 435 | { |
435 | struct gfar_private *priv = netdev_priv(dev); | 436 | struct gfar_private *priv = netdev_priv(dev); |
436 | struct netdev_queue *txq; | ||
437 | unsigned long rx_packets = 0, rx_bytes = 0, rx_dropped = 0; | 437 | unsigned long rx_packets = 0, rx_bytes = 0, rx_dropped = 0; |
438 | unsigned long tx_packets = 0, tx_bytes = 0; | 438 | unsigned long tx_packets = 0, tx_bytes = 0; |
439 | int i = 0; | 439 | int i = 0; |
@@ -449,9 +449,8 @@ static struct net_device_stats *gfar_get_stats(struct net_device *dev) | |||
449 | dev->stats.rx_dropped = rx_dropped; | 449 | dev->stats.rx_dropped = rx_dropped; |
450 | 450 | ||
451 | for (i = 0; i < priv->num_tx_queues; i++) { | 451 | for (i = 0; i < priv->num_tx_queues; i++) { |
452 | txq = netdev_get_tx_queue(dev, i); | 452 | tx_bytes += priv->tx_queue[i]->stats.tx_bytes; |
453 | tx_bytes += txq->tx_bytes; | 453 | tx_packets += priv->tx_queue[i]->stats.tx_packets; |
454 | tx_packets += txq->tx_packets; | ||
455 | } | 454 | } |
456 | 455 | ||
457 | dev->stats.tx_bytes = tx_bytes; | 456 | dev->stats.tx_bytes = tx_bytes; |
@@ -1921,7 +1920,7 @@ int startup_gfar(struct net_device *ndev) | |||
1921 | if (err) { | 1920 | if (err) { |
1922 | for (j = 0; j < i; j++) | 1921 | for (j = 0; j < i; j++) |
1923 | free_grp_irqs(&priv->gfargrp[j]); | 1922 | free_grp_irqs(&priv->gfargrp[j]); |
1924 | goto irq_fail; | 1923 | goto irq_fail; |
1925 | } | 1924 | } |
1926 | } | 1925 | } |
1927 | 1926 | ||
@@ -2108,8 +2107,8 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
2108 | } | 2107 | } |
2109 | 2108 | ||
2110 | /* Update transmit stats */ | 2109 | /* Update transmit stats */ |
2111 | txq->tx_bytes += skb->len; | 2110 | tx_queue->stats.tx_bytes += skb->len; |
2112 | txq->tx_packets ++; | 2111 | tx_queue->stats.tx_packets++; |
2113 | 2112 | ||
2114 | txbdp = txbdp_start = tx_queue->cur_tx; | 2113 | txbdp = txbdp_start = tx_queue->cur_tx; |
2115 | lstatus = txbdp->lstatus; | 2114 | lstatus = txbdp->lstatus; |
diff --git a/drivers/net/gianfar.h b/drivers/net/gianfar.h index 68984eb88ae0..54de4135e932 100644 --- a/drivers/net/gianfar.h +++ b/drivers/net/gianfar.h | |||
@@ -907,12 +907,21 @@ enum { | |||
907 | MQ_MG_MODE | 907 | MQ_MG_MODE |
908 | }; | 908 | }; |
909 | 909 | ||
910 | /* | ||
911 | * Per TX queue stats | ||
912 | */ | ||
913 | struct tx_q_stats { | ||
914 | unsigned long tx_packets; | ||
915 | unsigned long tx_bytes; | ||
916 | }; | ||
917 | |||
910 | /** | 918 | /** |
911 | * struct gfar_priv_tx_q - per tx queue structure | 919 | * struct gfar_priv_tx_q - per tx queue structure |
912 | * @txlock: per queue tx spin lock | 920 | * @txlock: per queue tx spin lock |
913 | * @tx_skbuff:skb pointers | 921 | * @tx_skbuff:skb pointers |
914 | * @skb_curtx: to be used skb pointer | 922 | * @skb_curtx: to be used skb pointer |
915 | * @skb_dirtytx:the last used skb pointer | 923 | * @skb_dirtytx:the last used skb pointer |
924 | * @stats: bytes/packets stats | ||
916 | * @qindex: index of this queue | 925 | * @qindex: index of this queue |
917 | * @dev: back pointer to the dev structure | 926 | * @dev: back pointer to the dev structure |
918 | * @grp: back pointer to the group to which this queue belongs | 927 | * @grp: back pointer to the group to which this queue belongs |
@@ -934,6 +943,7 @@ struct gfar_priv_tx_q { | |||
934 | struct txbd8 *tx_bd_base; | 943 | struct txbd8 *tx_bd_base; |
935 | struct txbd8 *cur_tx; | 944 | struct txbd8 *cur_tx; |
936 | struct txbd8 *dirty_tx; | 945 | struct txbd8 *dirty_tx; |
946 | struct tx_q_stats stats; | ||
937 | struct net_device *dev; | 947 | struct net_device *dev; |
938 | struct gfar_priv_grp *grp; | 948 | struct gfar_priv_grp *grp; |
939 | u16 skb_curtx; | 949 | u16 skb_curtx; |
diff --git a/drivers/net/greth.c b/drivers/net/greth.c index 27d6960ce09e..fdb0333f5cb6 100644 --- a/drivers/net/greth.c +++ b/drivers/net/greth.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * Aeroflex Gaisler GRETH 10/100/1G Ethernet MAC. | 2 | * Aeroflex Gaisler GRETH 10/100/1G Ethernet MAC. |
3 | * | 3 | * |
4 | * 2005-2009 (c) Aeroflex Gaisler AB | 4 | * 2005-2010 (c) Aeroflex Gaisler AB |
5 | * | 5 | * |
6 | * This driver supports GRETH 10/100 and GRETH 10/100/1G Ethernet MACs | 6 | * This driver supports GRETH 10/100 and GRETH 10/100/1G Ethernet MACs |
7 | * available in the GRLIB VHDL IP core library. | 7 | * available in the GRLIB VHDL IP core library. |
@@ -356,6 +356,8 @@ static int greth_open(struct net_device *dev) | |||
356 | dev_dbg(&dev->dev, " starting queue\n"); | 356 | dev_dbg(&dev->dev, " starting queue\n"); |
357 | netif_start_queue(dev); | 357 | netif_start_queue(dev); |
358 | 358 | ||
359 | GRETH_REGSAVE(greth->regs->status, 0xFF); | ||
360 | |||
359 | napi_enable(&greth->napi); | 361 | napi_enable(&greth->napi); |
360 | 362 | ||
361 | greth_enable_irqs(greth); | 363 | greth_enable_irqs(greth); |
@@ -371,7 +373,9 @@ static int greth_close(struct net_device *dev) | |||
371 | 373 | ||
372 | napi_disable(&greth->napi); | 374 | napi_disable(&greth->napi); |
373 | 375 | ||
376 | greth_disable_irqs(greth); | ||
374 | greth_disable_tx(greth); | 377 | greth_disable_tx(greth); |
378 | greth_disable_rx(greth); | ||
375 | 379 | ||
376 | netif_stop_queue(dev); | 380 | netif_stop_queue(dev); |
377 | 381 | ||
@@ -388,12 +392,20 @@ greth_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
388 | struct greth_private *greth = netdev_priv(dev); | 392 | struct greth_private *greth = netdev_priv(dev); |
389 | struct greth_bd *bdp; | 393 | struct greth_bd *bdp; |
390 | int err = NETDEV_TX_OK; | 394 | int err = NETDEV_TX_OK; |
391 | u32 status, dma_addr; | 395 | u32 status, dma_addr, ctrl; |
396 | unsigned long flags; | ||
392 | 397 | ||
393 | bdp = greth->tx_bd_base + greth->tx_next; | 398 | /* Clean TX Ring */ |
399 | greth_clean_tx(greth->netdev); | ||
394 | 400 | ||
395 | if (unlikely(greth->tx_free <= 0)) { | 401 | if (unlikely(greth->tx_free <= 0)) { |
402 | spin_lock_irqsave(&greth->devlock, flags);/*save from poll/irq*/ | ||
403 | ctrl = GRETH_REGLOAD(greth->regs->control); | ||
404 | /* Enable TX IRQ only if not already in poll() routine */ | ||
405 | if (ctrl & GRETH_RXI) | ||
406 | GRETH_REGSAVE(greth->regs->control, ctrl | GRETH_TXI); | ||
396 | netif_stop_queue(dev); | 407 | netif_stop_queue(dev); |
408 | spin_unlock_irqrestore(&greth->devlock, flags); | ||
397 | return NETDEV_TX_BUSY; | 409 | return NETDEV_TX_BUSY; |
398 | } | 410 | } |
399 | 411 | ||
@@ -406,13 +418,14 @@ greth_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
406 | goto out; | 418 | goto out; |
407 | } | 419 | } |
408 | 420 | ||
421 | bdp = greth->tx_bd_base + greth->tx_next; | ||
409 | dma_addr = greth_read_bd(&bdp->addr); | 422 | dma_addr = greth_read_bd(&bdp->addr); |
410 | 423 | ||
411 | memcpy((unsigned char *) phys_to_virt(dma_addr), skb->data, skb->len); | 424 | memcpy((unsigned char *) phys_to_virt(dma_addr), skb->data, skb->len); |
412 | 425 | ||
413 | dma_sync_single_for_device(greth->dev, dma_addr, skb->len, DMA_TO_DEVICE); | 426 | dma_sync_single_for_device(greth->dev, dma_addr, skb->len, DMA_TO_DEVICE); |
414 | 427 | ||
415 | status = GRETH_BD_EN | (skb->len & GRETH_BD_LEN); | 428 | status = GRETH_BD_EN | GRETH_BD_IE | (skb->len & GRETH_BD_LEN); |
416 | 429 | ||
417 | /* Wrap around descriptor ring */ | 430 | /* Wrap around descriptor ring */ |
418 | if (greth->tx_next == GRETH_TXBD_NUM_MASK) { | 431 | if (greth->tx_next == GRETH_TXBD_NUM_MASK) { |
@@ -422,22 +435,11 @@ greth_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
422 | greth->tx_next = NEXT_TX(greth->tx_next); | 435 | greth->tx_next = NEXT_TX(greth->tx_next); |
423 | greth->tx_free--; | 436 | greth->tx_free--; |
424 | 437 | ||
425 | /* No more descriptors */ | ||
426 | if (unlikely(greth->tx_free == 0)) { | ||
427 | |||
428 | /* Free transmitted descriptors */ | ||
429 | greth_clean_tx(dev); | ||
430 | |||
431 | /* If nothing was cleaned, stop queue & wait for irq */ | ||
432 | if (unlikely(greth->tx_free == 0)) { | ||
433 | status |= GRETH_BD_IE; | ||
434 | netif_stop_queue(dev); | ||
435 | } | ||
436 | } | ||
437 | |||
438 | /* Write descriptor control word and enable transmission */ | 438 | /* Write descriptor control word and enable transmission */ |
439 | greth_write_bd(&bdp->stat, status); | 439 | greth_write_bd(&bdp->stat, status); |
440 | spin_lock_irqsave(&greth->devlock, flags); /*save from poll/irq*/ | ||
440 | greth_enable_tx(greth); | 441 | greth_enable_tx(greth); |
442 | spin_unlock_irqrestore(&greth->devlock, flags); | ||
441 | 443 | ||
442 | out: | 444 | out: |
443 | dev_kfree_skb(skb); | 445 | dev_kfree_skb(skb); |
@@ -450,13 +452,23 @@ greth_start_xmit_gbit(struct sk_buff *skb, struct net_device *dev) | |||
450 | { | 452 | { |
451 | struct greth_private *greth = netdev_priv(dev); | 453 | struct greth_private *greth = netdev_priv(dev); |
452 | struct greth_bd *bdp; | 454 | struct greth_bd *bdp; |
453 | u32 status = 0, dma_addr; | 455 | u32 status = 0, dma_addr, ctrl; |
454 | int curr_tx, nr_frags, i, err = NETDEV_TX_OK; | 456 | int curr_tx, nr_frags, i, err = NETDEV_TX_OK; |
457 | unsigned long flags; | ||
455 | 458 | ||
456 | nr_frags = skb_shinfo(skb)->nr_frags; | 459 | nr_frags = skb_shinfo(skb)->nr_frags; |
457 | 460 | ||
461 | /* Clean TX Ring */ | ||
462 | greth_clean_tx_gbit(dev); | ||
463 | |||
458 | if (greth->tx_free < nr_frags + 1) { | 464 | if (greth->tx_free < nr_frags + 1) { |
465 | spin_lock_irqsave(&greth->devlock, flags);/*save from poll/irq*/ | ||
466 | ctrl = GRETH_REGLOAD(greth->regs->control); | ||
467 | /* Enable TX IRQ only if not already in poll() routine */ | ||
468 | if (ctrl & GRETH_RXI) | ||
469 | GRETH_REGSAVE(greth->regs->control, ctrl | GRETH_TXI); | ||
459 | netif_stop_queue(dev); | 470 | netif_stop_queue(dev); |
471 | spin_unlock_irqrestore(&greth->devlock, flags); | ||
460 | err = NETDEV_TX_BUSY; | 472 | err = NETDEV_TX_BUSY; |
461 | goto out; | 473 | goto out; |
462 | } | 474 | } |
@@ -499,7 +511,7 @@ greth_start_xmit_gbit(struct sk_buff *skb, struct net_device *dev) | |||
499 | greth->tx_skbuff[curr_tx] = NULL; | 511 | greth->tx_skbuff[curr_tx] = NULL; |
500 | bdp = greth->tx_bd_base + curr_tx; | 512 | bdp = greth->tx_bd_base + curr_tx; |
501 | 513 | ||
502 | status = GRETH_TXBD_CSALL; | 514 | status = GRETH_TXBD_CSALL | GRETH_BD_EN; |
503 | status |= frag->size & GRETH_BD_LEN; | 515 | status |= frag->size & GRETH_BD_LEN; |
504 | 516 | ||
505 | /* Wrap around descriptor ring */ | 517 | /* Wrap around descriptor ring */ |
@@ -509,14 +521,8 @@ greth_start_xmit_gbit(struct sk_buff *skb, struct net_device *dev) | |||
509 | /* More fragments left */ | 521 | /* More fragments left */ |
510 | if (i < nr_frags - 1) | 522 | if (i < nr_frags - 1) |
511 | status |= GRETH_TXBD_MORE; | 523 | status |= GRETH_TXBD_MORE; |
512 | 524 | else | |
513 | /* ... last fragment, check if out of descriptors */ | 525 | status |= GRETH_BD_IE; /* enable IRQ on last fragment */ |
514 | else if (greth->tx_free - nr_frags - 1 < (MAX_SKB_FRAGS + 1)) { | ||
515 | |||
516 | /* Enable interrupts and stop queue */ | ||
517 | status |= GRETH_BD_IE; | ||
518 | netif_stop_queue(dev); | ||
519 | } | ||
520 | 526 | ||
521 | greth_write_bd(&bdp->stat, status); | 527 | greth_write_bd(&bdp->stat, status); |
522 | 528 | ||
@@ -536,26 +542,29 @@ greth_start_xmit_gbit(struct sk_buff *skb, struct net_device *dev) | |||
536 | 542 | ||
537 | wmb(); | 543 | wmb(); |
538 | 544 | ||
539 | /* Enable the descriptors that we configured ... */ | 545 | /* Enable the descriptor chain by enabling the first descriptor */ |
540 | for (i = 0; i < nr_frags + 1; i++) { | 546 | bdp = greth->tx_bd_base + greth->tx_next; |
541 | bdp = greth->tx_bd_base + greth->tx_next; | 547 | greth_write_bd(&bdp->stat, greth_read_bd(&bdp->stat) | GRETH_BD_EN); |
542 | greth_write_bd(&bdp->stat, greth_read_bd(&bdp->stat) | GRETH_BD_EN); | 548 | greth->tx_next = curr_tx; |
543 | greth->tx_next = NEXT_TX(greth->tx_next); | 549 | greth->tx_free -= nr_frags + 1; |
544 | greth->tx_free--; | ||
545 | } | ||
546 | 550 | ||
551 | wmb(); | ||
552 | |||
553 | spin_lock_irqsave(&greth->devlock, flags); /*save from poll/irq*/ | ||
547 | greth_enable_tx(greth); | 554 | greth_enable_tx(greth); |
555 | spin_unlock_irqrestore(&greth->devlock, flags); | ||
548 | 556 | ||
549 | return NETDEV_TX_OK; | 557 | return NETDEV_TX_OK; |
550 | 558 | ||
551 | frag_map_error: | 559 | frag_map_error: |
552 | /* Unmap SKB mappings that succeeded */ | 560 | /* Unmap SKB mappings that succeeded and disable descriptor */ |
553 | for (i = 0; greth->tx_next + i != curr_tx; i++) { | 561 | for (i = 0; greth->tx_next + i != curr_tx; i++) { |
554 | bdp = greth->tx_bd_base + greth->tx_next + i; | 562 | bdp = greth->tx_bd_base + greth->tx_next + i; |
555 | dma_unmap_single(greth->dev, | 563 | dma_unmap_single(greth->dev, |
556 | greth_read_bd(&bdp->addr), | 564 | greth_read_bd(&bdp->addr), |
557 | greth_read_bd(&bdp->stat) & GRETH_BD_LEN, | 565 | greth_read_bd(&bdp->stat) & GRETH_BD_LEN, |
558 | DMA_TO_DEVICE); | 566 | DMA_TO_DEVICE); |
567 | greth_write_bd(&bdp->stat, 0); | ||
559 | } | 568 | } |
560 | map_error: | 569 | map_error: |
561 | if (net_ratelimit()) | 570 | if (net_ratelimit()) |
@@ -565,12 +574,11 @@ out: | |||
565 | return err; | 574 | return err; |
566 | } | 575 | } |
567 | 576 | ||
568 | |||
569 | static irqreturn_t greth_interrupt(int irq, void *dev_id) | 577 | static irqreturn_t greth_interrupt(int irq, void *dev_id) |
570 | { | 578 | { |
571 | struct net_device *dev = dev_id; | 579 | struct net_device *dev = dev_id; |
572 | struct greth_private *greth; | 580 | struct greth_private *greth; |
573 | u32 status; | 581 | u32 status, ctrl; |
574 | irqreturn_t retval = IRQ_NONE; | 582 | irqreturn_t retval = IRQ_NONE; |
575 | 583 | ||
576 | greth = netdev_priv(dev); | 584 | greth = netdev_priv(dev); |
@@ -580,13 +588,15 @@ static irqreturn_t greth_interrupt(int irq, void *dev_id) | |||
580 | /* Get the interrupt events that caused us to be here. */ | 588 | /* Get the interrupt events that caused us to be here. */ |
581 | status = GRETH_REGLOAD(greth->regs->status); | 589 | status = GRETH_REGLOAD(greth->regs->status); |
582 | 590 | ||
583 | /* Handle rx and tx interrupts through poll */ | 591 | /* Must see if interrupts are enabled also, INT_TX|INT_RX flags may be |
584 | if (status & (GRETH_INT_RX | GRETH_INT_TX)) { | 592 | * set regardless of whether IRQ is enabled or not. Especially |
585 | 593 | * important when shared IRQ. | |
586 | /* Clear interrupt status */ | 594 | */ |
587 | GRETH_REGORIN(greth->regs->status, | 595 | ctrl = GRETH_REGLOAD(greth->regs->control); |
588 | status & (GRETH_INT_RX | GRETH_INT_TX)); | ||
589 | 596 | ||
597 | /* Handle rx and tx interrupts through poll */ | ||
598 | if (((status & (GRETH_INT_RE | GRETH_INT_RX)) && (ctrl & GRETH_RXI)) || | ||
599 | ((status & (GRETH_INT_TE | GRETH_INT_TX)) && (ctrl & GRETH_TXI))) { | ||
590 | retval = IRQ_HANDLED; | 600 | retval = IRQ_HANDLED; |
591 | 601 | ||
592 | /* Disable interrupts and schedule poll() */ | 602 | /* Disable interrupts and schedule poll() */ |
@@ -610,6 +620,8 @@ static void greth_clean_tx(struct net_device *dev) | |||
610 | 620 | ||
611 | while (1) { | 621 | while (1) { |
612 | bdp = greth->tx_bd_base + greth->tx_last; | 622 | bdp = greth->tx_bd_base + greth->tx_last; |
623 | GRETH_REGSAVE(greth->regs->status, GRETH_INT_TE | GRETH_INT_TX); | ||
624 | mb(); | ||
613 | stat = greth_read_bd(&bdp->stat); | 625 | stat = greth_read_bd(&bdp->stat); |
614 | 626 | ||
615 | if (unlikely(stat & GRETH_BD_EN)) | 627 | if (unlikely(stat & GRETH_BD_EN)) |
@@ -670,7 +682,10 @@ static void greth_clean_tx_gbit(struct net_device *dev) | |||
670 | 682 | ||
671 | /* We only clean fully completed SKBs */ | 683 | /* We only clean fully completed SKBs */ |
672 | bdp_last_frag = greth->tx_bd_base + SKIP_TX(greth->tx_last, nr_frags); | 684 | bdp_last_frag = greth->tx_bd_base + SKIP_TX(greth->tx_last, nr_frags); |
673 | stat = bdp_last_frag->stat; | 685 | |
686 | GRETH_REGSAVE(greth->regs->status, GRETH_INT_TE | GRETH_INT_TX); | ||
687 | mb(); | ||
688 | stat = greth_read_bd(&bdp_last_frag->stat); | ||
674 | 689 | ||
675 | if (stat & GRETH_BD_EN) | 690 | if (stat & GRETH_BD_EN) |
676 | break; | 691 | break; |
@@ -702,21 +717,9 @@ static void greth_clean_tx_gbit(struct net_device *dev) | |||
702 | greth->tx_free += nr_frags+1; | 717 | greth->tx_free += nr_frags+1; |
703 | dev_kfree_skb(skb); | 718 | dev_kfree_skb(skb); |
704 | } | 719 | } |
705 | if (greth->tx_free > (MAX_SKB_FRAGS + 1)) { | ||
706 | netif_wake_queue(dev); | ||
707 | } | ||
708 | } | ||
709 | 720 | ||
710 | static int greth_pending_packets(struct greth_private *greth) | 721 | if (netif_queue_stopped(dev) && (greth->tx_free > (MAX_SKB_FRAGS+1))) |
711 | { | 722 | netif_wake_queue(dev); |
712 | struct greth_bd *bdp; | ||
713 | u32 status; | ||
714 | bdp = greth->rx_bd_base + greth->rx_cur; | ||
715 | status = greth_read_bd(&bdp->stat); | ||
716 | if (status & GRETH_BD_EN) | ||
717 | return 0; | ||
718 | else | ||
719 | return 1; | ||
720 | } | 723 | } |
721 | 724 | ||
722 | static int greth_rx(struct net_device *dev, int limit) | 725 | static int greth_rx(struct net_device *dev, int limit) |
@@ -727,20 +730,24 @@ static int greth_rx(struct net_device *dev, int limit) | |||
727 | int pkt_len; | 730 | int pkt_len; |
728 | int bad, count; | 731 | int bad, count; |
729 | u32 status, dma_addr; | 732 | u32 status, dma_addr; |
733 | unsigned long flags; | ||
730 | 734 | ||
731 | greth = netdev_priv(dev); | 735 | greth = netdev_priv(dev); |
732 | 736 | ||
733 | for (count = 0; count < limit; ++count) { | 737 | for (count = 0; count < limit; ++count) { |
734 | 738 | ||
735 | bdp = greth->rx_bd_base + greth->rx_cur; | 739 | bdp = greth->rx_bd_base + greth->rx_cur; |
740 | GRETH_REGSAVE(greth->regs->status, GRETH_INT_RE | GRETH_INT_RX); | ||
741 | mb(); | ||
736 | status = greth_read_bd(&bdp->stat); | 742 | status = greth_read_bd(&bdp->stat); |
737 | dma_addr = greth_read_bd(&bdp->addr); | ||
738 | bad = 0; | ||
739 | 743 | ||
740 | if (unlikely(status & GRETH_BD_EN)) { | 744 | if (unlikely(status & GRETH_BD_EN)) { |
741 | break; | 745 | break; |
742 | } | 746 | } |
743 | 747 | ||
748 | dma_addr = greth_read_bd(&bdp->addr); | ||
749 | bad = 0; | ||
750 | |||
744 | /* Check status for errors. */ | 751 | /* Check status for errors. */ |
745 | if (unlikely(status & GRETH_RXBD_STATUS)) { | 752 | if (unlikely(status & GRETH_RXBD_STATUS)) { |
746 | if (status & GRETH_RXBD_ERR_FT) { | 753 | if (status & GRETH_RXBD_ERR_FT) { |
@@ -802,7 +809,9 @@ static int greth_rx(struct net_device *dev, int limit) | |||
802 | 809 | ||
803 | dma_sync_single_for_device(greth->dev, dma_addr, MAX_FRAME_SIZE, DMA_FROM_DEVICE); | 810 | dma_sync_single_for_device(greth->dev, dma_addr, MAX_FRAME_SIZE, DMA_FROM_DEVICE); |
804 | 811 | ||
812 | spin_lock_irqsave(&greth->devlock, flags); /* save from XMIT */ | ||
805 | greth_enable_rx(greth); | 813 | greth_enable_rx(greth); |
814 | spin_unlock_irqrestore(&greth->devlock, flags); | ||
806 | 815 | ||
807 | greth->rx_cur = NEXT_RX(greth->rx_cur); | 816 | greth->rx_cur = NEXT_RX(greth->rx_cur); |
808 | } | 817 | } |
@@ -836,6 +845,7 @@ static int greth_rx_gbit(struct net_device *dev, int limit) | |||
836 | int pkt_len; | 845 | int pkt_len; |
837 | int bad, count = 0; | 846 | int bad, count = 0; |
838 | u32 status, dma_addr; | 847 | u32 status, dma_addr; |
848 | unsigned long flags; | ||
839 | 849 | ||
840 | greth = netdev_priv(dev); | 850 | greth = netdev_priv(dev); |
841 | 851 | ||
@@ -843,6 +853,8 @@ static int greth_rx_gbit(struct net_device *dev, int limit) | |||
843 | 853 | ||
844 | bdp = greth->rx_bd_base + greth->rx_cur; | 854 | bdp = greth->rx_bd_base + greth->rx_cur; |
845 | skb = greth->rx_skbuff[greth->rx_cur]; | 855 | skb = greth->rx_skbuff[greth->rx_cur]; |
856 | GRETH_REGSAVE(greth->regs->status, GRETH_INT_RE | GRETH_INT_RX); | ||
857 | mb(); | ||
846 | status = greth_read_bd(&bdp->stat); | 858 | status = greth_read_bd(&bdp->stat); |
847 | bad = 0; | 859 | bad = 0; |
848 | 860 | ||
@@ -865,10 +877,9 @@ static int greth_rx_gbit(struct net_device *dev, int limit) | |||
865 | } | 877 | } |
866 | } | 878 | } |
867 | 879 | ||
868 | /* Allocate new skb to replace current */ | 880 | /* Allocate new skb to replace current, not needed if the |
869 | newskb = netdev_alloc_skb(dev, MAX_FRAME_SIZE + NET_IP_ALIGN); | 881 | * current skb can be reused */ |
870 | 882 | if (!bad && (newskb=netdev_alloc_skb(dev, MAX_FRAME_SIZE + NET_IP_ALIGN))) { | |
871 | if (!bad && newskb) { | ||
872 | skb_reserve(newskb, NET_IP_ALIGN); | 883 | skb_reserve(newskb, NET_IP_ALIGN); |
873 | 884 | ||
874 | dma_addr = dma_map_single(greth->dev, | 885 | dma_addr = dma_map_single(greth->dev, |
@@ -905,11 +916,22 @@ static int greth_rx_gbit(struct net_device *dev, int limit) | |||
905 | if (net_ratelimit()) | 916 | if (net_ratelimit()) |
906 | dev_warn(greth->dev, "Could not create DMA mapping, dropping packet\n"); | 917 | dev_warn(greth->dev, "Could not create DMA mapping, dropping packet\n"); |
907 | dev_kfree_skb(newskb); | 918 | dev_kfree_skb(newskb); |
919 | /* reusing current skb, so it is a drop */ | ||
908 | dev->stats.rx_dropped++; | 920 | dev->stats.rx_dropped++; |
909 | } | 921 | } |
922 | } else if (bad) { | ||
923 | /* Bad Frame transfer, the skb is reused */ | ||
924 | dev->stats.rx_dropped++; | ||
910 | } else { | 925 | } else { |
926 | /* Failed Allocating a new skb. This is rather stupid | ||
927 | * but the current "filled" skb is reused, as if | ||
928 | * transfer failure. One could argue that RX descriptor | ||
929 | * table handling should be divided into cleaning and | ||
930 | * filling as the TX part of the driver | ||
931 | */ | ||
911 | if (net_ratelimit()) | 932 | if (net_ratelimit()) |
912 | dev_warn(greth->dev, "Could not allocate SKB, dropping packet\n"); | 933 | dev_warn(greth->dev, "Could not allocate SKB, dropping packet\n"); |
934 | /* reusing current skb, so it is a drop */ | ||
913 | dev->stats.rx_dropped++; | 935 | dev->stats.rx_dropped++; |
914 | } | 936 | } |
915 | 937 | ||
@@ -920,7 +942,9 @@ static int greth_rx_gbit(struct net_device *dev, int limit) | |||
920 | 942 | ||
921 | wmb(); | 943 | wmb(); |
922 | greth_write_bd(&bdp->stat, status); | 944 | greth_write_bd(&bdp->stat, status); |
945 | spin_lock_irqsave(&greth->devlock, flags); | ||
923 | greth_enable_rx(greth); | 946 | greth_enable_rx(greth); |
947 | spin_unlock_irqrestore(&greth->devlock, flags); | ||
924 | greth->rx_cur = NEXT_RX(greth->rx_cur); | 948 | greth->rx_cur = NEXT_RX(greth->rx_cur); |
925 | } | 949 | } |
926 | 950 | ||
@@ -932,15 +956,18 @@ static int greth_poll(struct napi_struct *napi, int budget) | |||
932 | { | 956 | { |
933 | struct greth_private *greth; | 957 | struct greth_private *greth; |
934 | int work_done = 0; | 958 | int work_done = 0; |
959 | unsigned long flags; | ||
960 | u32 mask, ctrl; | ||
935 | greth = container_of(napi, struct greth_private, napi); | 961 | greth = container_of(napi, struct greth_private, napi); |
936 | 962 | ||
937 | if (greth->gbit_mac) { | 963 | restart_txrx_poll: |
938 | greth_clean_tx_gbit(greth->netdev); | 964 | if (netif_queue_stopped(greth->netdev)) { |
939 | } else { | 965 | if (greth->gbit_mac) |
940 | greth_clean_tx(greth->netdev); | 966 | greth_clean_tx_gbit(greth->netdev); |
967 | else | ||
968 | greth_clean_tx(greth->netdev); | ||
941 | } | 969 | } |
942 | 970 | ||
943 | restart_poll: | ||
944 | if (greth->gbit_mac) { | 971 | if (greth->gbit_mac) { |
945 | work_done += greth_rx_gbit(greth->netdev, budget - work_done); | 972 | work_done += greth_rx_gbit(greth->netdev, budget - work_done); |
946 | } else { | 973 | } else { |
@@ -949,15 +976,29 @@ restart_poll: | |||
949 | 976 | ||
950 | if (work_done < budget) { | 977 | if (work_done < budget) { |
951 | 978 | ||
952 | napi_complete(napi); | 979 | spin_lock_irqsave(&greth->devlock, flags); |
980 | |||
981 | ctrl = GRETH_REGLOAD(greth->regs->control); | ||
982 | if (netif_queue_stopped(greth->netdev)) { | ||
983 | GRETH_REGSAVE(greth->regs->control, | ||
984 | ctrl | GRETH_TXI | GRETH_RXI); | ||
985 | mask = GRETH_INT_RX | GRETH_INT_RE | | ||
986 | GRETH_INT_TX | GRETH_INT_TE; | ||
987 | } else { | ||
988 | GRETH_REGSAVE(greth->regs->control, ctrl | GRETH_RXI); | ||
989 | mask = GRETH_INT_RX | GRETH_INT_RE; | ||
990 | } | ||
953 | 991 | ||
954 | if (greth_pending_packets(greth)) { | 992 | if (GRETH_REGLOAD(greth->regs->status) & mask) { |
955 | napi_reschedule(napi); | 993 | GRETH_REGSAVE(greth->regs->control, ctrl); |
956 | goto restart_poll; | 994 | spin_unlock_irqrestore(&greth->devlock, flags); |
995 | goto restart_txrx_poll; | ||
996 | } else { | ||
997 | __napi_complete(napi); | ||
998 | spin_unlock_irqrestore(&greth->devlock, flags); | ||
957 | } | 999 | } |
958 | } | 1000 | } |
959 | 1001 | ||
960 | greth_enable_irqs(greth); | ||
961 | return work_done; | 1002 | return work_done; |
962 | } | 1003 | } |
963 | 1004 | ||
@@ -1152,11 +1193,11 @@ static const struct ethtool_ops greth_ethtool_ops = { | |||
1152 | }; | 1193 | }; |
1153 | 1194 | ||
1154 | static struct net_device_ops greth_netdev_ops = { | 1195 | static struct net_device_ops greth_netdev_ops = { |
1155 | .ndo_open = greth_open, | 1196 | .ndo_open = greth_open, |
1156 | .ndo_stop = greth_close, | 1197 | .ndo_stop = greth_close, |
1157 | .ndo_start_xmit = greth_start_xmit, | 1198 | .ndo_start_xmit = greth_start_xmit, |
1158 | .ndo_set_mac_address = greth_set_mac_add, | 1199 | .ndo_set_mac_address = greth_set_mac_add, |
1159 | .ndo_validate_addr = eth_validate_addr, | 1200 | .ndo_validate_addr = eth_validate_addr, |
1160 | }; | 1201 | }; |
1161 | 1202 | ||
1162 | static inline int wait_for_mdio(struct greth_private *greth) | 1203 | static inline int wait_for_mdio(struct greth_private *greth) |
@@ -1217,29 +1258,26 @@ static void greth_link_change(struct net_device *dev) | |||
1217 | struct greth_private *greth = netdev_priv(dev); | 1258 | struct greth_private *greth = netdev_priv(dev); |
1218 | struct phy_device *phydev = greth->phy; | 1259 | struct phy_device *phydev = greth->phy; |
1219 | unsigned long flags; | 1260 | unsigned long flags; |
1220 | |||
1221 | int status_change = 0; | 1261 | int status_change = 0; |
1262 | u32 ctrl; | ||
1222 | 1263 | ||
1223 | spin_lock_irqsave(&greth->devlock, flags); | 1264 | spin_lock_irqsave(&greth->devlock, flags); |
1224 | 1265 | ||
1225 | if (phydev->link) { | 1266 | if (phydev->link) { |
1226 | 1267 | ||
1227 | if ((greth->speed != phydev->speed) || (greth->duplex != phydev->duplex)) { | 1268 | if ((greth->speed != phydev->speed) || (greth->duplex != phydev->duplex)) { |
1228 | 1269 | ctrl = GRETH_REGLOAD(greth->regs->control) & | |
1229 | GRETH_REGANDIN(greth->regs->control, | 1270 | ~(GRETH_CTRL_FD | GRETH_CTRL_SP | GRETH_CTRL_GB); |
1230 | ~(GRETH_CTRL_FD | GRETH_CTRL_SP | GRETH_CTRL_GB)); | ||
1231 | 1271 | ||
1232 | if (phydev->duplex) | 1272 | if (phydev->duplex) |
1233 | GRETH_REGORIN(greth->regs->control, GRETH_CTRL_FD); | 1273 | ctrl |= GRETH_CTRL_FD; |
1234 | |||
1235 | if (phydev->speed == SPEED_100) { | ||
1236 | |||
1237 | GRETH_REGORIN(greth->regs->control, GRETH_CTRL_SP); | ||
1238 | } | ||
1239 | 1274 | ||
1275 | if (phydev->speed == SPEED_100) | ||
1276 | ctrl |= GRETH_CTRL_SP; | ||
1240 | else if (phydev->speed == SPEED_1000) | 1277 | else if (phydev->speed == SPEED_1000) |
1241 | GRETH_REGORIN(greth->regs->control, GRETH_CTRL_GB); | 1278 | ctrl |= GRETH_CTRL_GB; |
1242 | 1279 | ||
1280 | GRETH_REGSAVE(greth->regs->control, ctrl); | ||
1243 | greth->speed = phydev->speed; | 1281 | greth->speed = phydev->speed; |
1244 | greth->duplex = phydev->duplex; | 1282 | greth->duplex = phydev->duplex; |
1245 | status_change = 1; | 1283 | status_change = 1; |
@@ -1600,6 +1638,9 @@ static struct of_device_id greth_of_match[] = { | |||
1600 | { | 1638 | { |
1601 | .name = "GAISLER_ETHMAC", | 1639 | .name = "GAISLER_ETHMAC", |
1602 | }, | 1640 | }, |
1641 | { | ||
1642 | .name = "01_01d", | ||
1643 | }, | ||
1603 | {}, | 1644 | {}, |
1604 | }; | 1645 | }; |
1605 | 1646 | ||
diff --git a/drivers/net/greth.h b/drivers/net/greth.h index 03ad903cd676..be0f2062bd14 100644 --- a/drivers/net/greth.h +++ b/drivers/net/greth.h | |||
@@ -23,6 +23,7 @@ | |||
23 | #define GRETH_BD_LEN 0x7FF | 23 | #define GRETH_BD_LEN 0x7FF |
24 | 24 | ||
25 | #define GRETH_TXEN 0x1 | 25 | #define GRETH_TXEN 0x1 |
26 | #define GRETH_INT_TE 0x2 | ||
26 | #define GRETH_INT_TX 0x8 | 27 | #define GRETH_INT_TX 0x8 |
27 | #define GRETH_TXI 0x4 | 28 | #define GRETH_TXI 0x4 |
28 | #define GRETH_TXBD_STATUS 0x0001C000 | 29 | #define GRETH_TXBD_STATUS 0x0001C000 |
@@ -35,6 +36,7 @@ | |||
35 | #define GRETH_TXBD_ERR_UE 0x4000 | 36 | #define GRETH_TXBD_ERR_UE 0x4000 |
36 | #define GRETH_TXBD_ERR_AL 0x8000 | 37 | #define GRETH_TXBD_ERR_AL 0x8000 |
37 | 38 | ||
39 | #define GRETH_INT_RE 0x1 | ||
38 | #define GRETH_INT_RX 0x4 | 40 | #define GRETH_INT_RX 0x4 |
39 | #define GRETH_RXEN 0x2 | 41 | #define GRETH_RXEN 0x2 |
40 | #define GRETH_RXI 0x8 | 42 | #define GRETH_RXI 0x8 |
diff --git a/drivers/net/hamradio/yam.c b/drivers/net/hamradio/yam.c index 4e7d1d0a2340..7d9ced0738c5 100644 --- a/drivers/net/hamradio/yam.c +++ b/drivers/net/hamradio/yam.c | |||
@@ -396,7 +396,7 @@ static unsigned char *add_mcs(unsigned char *bits, int bitrate, | |||
396 | while (p) { | 396 | while (p) { |
397 | if (p->bitrate == bitrate) { | 397 | if (p->bitrate == bitrate) { |
398 | memcpy(p->bits, bits, YAM_FPGA_SIZE); | 398 | memcpy(p->bits, bits, YAM_FPGA_SIZE); |
399 | return p->bits; | 399 | goto out; |
400 | } | 400 | } |
401 | p = p->next; | 401 | p = p->next; |
402 | } | 402 | } |
@@ -411,7 +411,7 @@ static unsigned char *add_mcs(unsigned char *bits, int bitrate, | |||
411 | p->bitrate = bitrate; | 411 | p->bitrate = bitrate; |
412 | p->next = yam_data; | 412 | p->next = yam_data; |
413 | yam_data = p; | 413 | yam_data = p; |
414 | 414 | out: | |
415 | release_firmware(fw); | 415 | release_firmware(fw); |
416 | return p->bits; | 416 | return p->bits; |
417 | } | 417 | } |
diff --git a/drivers/net/igbvf/vf.c b/drivers/net/igbvf/vf.c index 74486a8b009a..af3822f9ea9a 100644 --- a/drivers/net/igbvf/vf.c +++ b/drivers/net/igbvf/vf.c | |||
@@ -220,7 +220,7 @@ static u32 e1000_hash_mc_addr_vf(struct e1000_hw *hw, u8 *mc_addr) | |||
220 | * The parameter rar_count will usually be hw->mac.rar_entry_count | 220 | * The parameter rar_count will usually be hw->mac.rar_entry_count |
221 | * unless there are workarounds that change this. | 221 | * unless there are workarounds that change this. |
222 | **/ | 222 | **/ |
223 | void e1000_update_mc_addr_list_vf(struct e1000_hw *hw, | 223 | static void e1000_update_mc_addr_list_vf(struct e1000_hw *hw, |
224 | u8 *mc_addr_list, u32 mc_addr_count, | 224 | u8 *mc_addr_list, u32 mc_addr_count, |
225 | u32 rar_used_count, u32 rar_count) | 225 | u32 rar_used_count, u32 rar_count) |
226 | { | 226 | { |
diff --git a/drivers/net/irda/bfin_sir.h b/drivers/net/irda/bfin_sir.h index b54a6f08db45..e3b285a67734 100644 --- a/drivers/net/irda/bfin_sir.h +++ b/drivers/net/irda/bfin_sir.h | |||
@@ -26,6 +26,8 @@ | |||
26 | #include <asm/cacheflush.h> | 26 | #include <asm/cacheflush.h> |
27 | #include <asm/dma.h> | 27 | #include <asm/dma.h> |
28 | #include <asm/portmux.h> | 28 | #include <asm/portmux.h> |
29 | #include <mach/bfin_serial_5xx.h> | ||
30 | #undef DRIVER_NAME | ||
29 | 31 | ||
30 | #ifdef CONFIG_SIR_BFIN_DMA | 32 | #ifdef CONFIG_SIR_BFIN_DMA |
31 | struct dma_rx_buf { | 33 | struct dma_rx_buf { |
diff --git a/drivers/net/irda/donauboe.h b/drivers/net/irda/donauboe.h index 4dc39e5f0156..77fcf4459161 100644 --- a/drivers/net/irda/donauboe.h +++ b/drivers/net/irda/donauboe.h | |||
@@ -30,7 +30,7 @@ | |||
30 | * or the type-DO IR port. | 30 | * or the type-DO IR port. |
31 | * | 31 | * |
32 | * IrDA chip set list from Toshiba Computer Engineering Corp. | 32 | * IrDA chip set list from Toshiba Computer Engineering Corp. |
33 | * model method maker controler Version | 33 | * model method maker controller Version |
34 | * Portege 320CT FIR,SIR Toshiba Oboe(Triangle) | 34 | * Portege 320CT FIR,SIR Toshiba Oboe(Triangle) |
35 | * Portege 3010CT FIR,SIR Toshiba Oboe(Sydney) | 35 | * Portege 3010CT FIR,SIR Toshiba Oboe(Sydney) |
36 | * Portege 3015CT FIR,SIR Toshiba Oboe(Sydney) | 36 | * Portege 3015CT FIR,SIR Toshiba Oboe(Sydney) |
diff --git a/drivers/net/irda/sh_irda.c b/drivers/net/irda/sh_irda.c index 9e3f4f54281d..4488bd581eca 100644 --- a/drivers/net/irda/sh_irda.c +++ b/drivers/net/irda/sh_irda.c | |||
@@ -635,7 +635,7 @@ static int sh_irda_hard_xmit(struct sk_buff *skb, struct net_device *ndev) | |||
635 | 635 | ||
636 | ret = sh_irda_set_baudrate(self, speed); | 636 | ret = sh_irda_set_baudrate(self, speed); |
637 | if (ret < 0) | 637 | if (ret < 0) |
638 | return ret; | 638 | goto sh_irda_hard_xmit_end; |
639 | 639 | ||
640 | self->tx_buff.len = 0; | 640 | self->tx_buff.len = 0; |
641 | if (skb->len) { | 641 | if (skb->len) { |
@@ -652,11 +652,21 @@ static int sh_irda_hard_xmit(struct sk_buff *skb, struct net_device *ndev) | |||
652 | 652 | ||
653 | sh_irda_write(self, IRTFLR, self->tx_buff.len); | 653 | sh_irda_write(self, IRTFLR, self->tx_buff.len); |
654 | sh_irda_write(self, IRTCTR, ARMOD | TE); | 654 | sh_irda_write(self, IRTCTR, ARMOD | TE); |
655 | } | 655 | } else |
656 | goto sh_irda_hard_xmit_end; | ||
656 | 657 | ||
657 | dev_kfree_skb(skb); | 658 | dev_kfree_skb(skb); |
658 | 659 | ||
659 | return 0; | 660 | return 0; |
661 | |||
662 | sh_irda_hard_xmit_end: | ||
663 | sh_irda_set_baudrate(self, 9600); | ||
664 | netif_wake_queue(self->ndev); | ||
665 | sh_irda_rcv_ctrl(self, 1); | ||
666 | dev_kfree_skb(skb); | ||
667 | |||
668 | return ret; | ||
669 | |||
660 | } | 670 | } |
661 | 671 | ||
662 | static int sh_irda_ioctl(struct net_device *ndev, struct ifreq *ifreq, int cmd) | 672 | static int sh_irda_ioctl(struct net_device *ndev, struct ifreq *ifreq, int cmd) |
diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h index 3ae30b8cb7d6..3b8c92463617 100644 --- a/drivers/net/ixgbe/ixgbe.h +++ b/drivers/net/ixgbe/ixgbe.h | |||
@@ -508,6 +508,8 @@ extern void ixgbe_free_rx_resources(struct ixgbe_ring *); | |||
508 | extern void ixgbe_free_tx_resources(struct ixgbe_ring *); | 508 | extern void ixgbe_free_tx_resources(struct ixgbe_ring *); |
509 | extern void ixgbe_configure_rx_ring(struct ixgbe_adapter *,struct ixgbe_ring *); | 509 | extern void ixgbe_configure_rx_ring(struct ixgbe_adapter *,struct ixgbe_ring *); |
510 | extern void ixgbe_configure_tx_ring(struct ixgbe_adapter *,struct ixgbe_ring *); | 510 | extern void ixgbe_configure_tx_ring(struct ixgbe_adapter *,struct ixgbe_ring *); |
511 | extern void ixgbe_disable_rx_queue(struct ixgbe_adapter *adapter, | ||
512 | struct ixgbe_ring *); | ||
511 | extern void ixgbe_update_stats(struct ixgbe_adapter *adapter); | 513 | extern void ixgbe_update_stats(struct ixgbe_adapter *adapter); |
512 | extern int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter); | 514 | extern int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter); |
513 | extern void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter); | 515 | extern void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter); |
@@ -524,26 +526,13 @@ extern s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw); | |||
524 | extern s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 pballoc); | 526 | extern s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 pballoc); |
525 | extern s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 pballoc); | 527 | extern s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 pballoc); |
526 | extern s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw, | 528 | extern s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw, |
527 | struct ixgbe_atr_input *input, | 529 | union ixgbe_atr_hash_dword input, |
530 | union ixgbe_atr_hash_dword common, | ||
528 | u8 queue); | 531 | u8 queue); |
529 | extern s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw, | 532 | extern s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw, |
530 | struct ixgbe_atr_input *input, | 533 | union ixgbe_atr_input *input, |
531 | struct ixgbe_atr_input_masks *input_masks, | 534 | struct ixgbe_atr_input_masks *input_masks, |
532 | u16 soft_id, u8 queue); | 535 | u16 soft_id, u8 queue); |
533 | extern s32 ixgbe_atr_set_vlan_id_82599(struct ixgbe_atr_input *input, | ||
534 | u16 vlan_id); | ||
535 | extern s32 ixgbe_atr_set_src_ipv4_82599(struct ixgbe_atr_input *input, | ||
536 | u32 src_addr); | ||
537 | extern s32 ixgbe_atr_set_dst_ipv4_82599(struct ixgbe_atr_input *input, | ||
538 | u32 dst_addr); | ||
539 | extern s32 ixgbe_atr_set_src_port_82599(struct ixgbe_atr_input *input, | ||
540 | u16 src_port); | ||
541 | extern s32 ixgbe_atr_set_dst_port_82599(struct ixgbe_atr_input *input, | ||
542 | u16 dst_port); | ||
543 | extern s32 ixgbe_atr_set_flex_byte_82599(struct ixgbe_atr_input *input, | ||
544 | u16 flex_byte); | ||
545 | extern s32 ixgbe_atr_set_l4type_82599(struct ixgbe_atr_input *input, | ||
546 | u8 l4type); | ||
547 | extern void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter, | 536 | extern void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter, |
548 | struct ixgbe_ring *ring); | 537 | struct ixgbe_ring *ring); |
549 | extern void ixgbe_clear_rscctl(struct ixgbe_adapter *adapter, | 538 | extern void ixgbe_clear_rscctl(struct ixgbe_adapter *adapter, |
diff --git a/drivers/net/ixgbe/ixgbe_82599.c b/drivers/net/ixgbe/ixgbe_82599.c index bfd3c227cd4a..a21f5817685b 100644 --- a/drivers/net/ixgbe/ixgbe_82599.c +++ b/drivers/net/ixgbe/ixgbe_82599.c | |||
@@ -1003,7 +1003,7 @@ s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw) | |||
1003 | udelay(10); | 1003 | udelay(10); |
1004 | } | 1004 | } |
1005 | if (i >= IXGBE_FDIRCMD_CMD_POLL) { | 1005 | if (i >= IXGBE_FDIRCMD_CMD_POLL) { |
1006 | hw_dbg(hw ,"Flow Director previous command isn't complete, " | 1006 | hw_dbg(hw, "Flow Director previous command isn't complete, " |
1007 | "aborting table re-initialization.\n"); | 1007 | "aborting table re-initialization.\n"); |
1008 | return IXGBE_ERR_FDIR_REINIT_FAILED; | 1008 | return IXGBE_ERR_FDIR_REINIT_FAILED; |
1009 | } | 1009 | } |
@@ -1079,7 +1079,7 @@ s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 pballoc) | |||
1079 | 1079 | ||
1080 | /* | 1080 | /* |
1081 | * The defaults in the HW for RX PB 1-7 are not zero and so should be | 1081 | * The defaults in the HW for RX PB 1-7 are not zero and so should be |
1082 | * intialized to zero for non DCB mode otherwise actual total RX PB | 1082 | * initialized to zero for non DCB mode otherwise actual total RX PB |
1083 | * would be bigger than programmed and filter space would run into | 1083 | * would be bigger than programmed and filter space would run into |
1084 | * the PB 0 region. | 1084 | * the PB 0 region. |
1085 | */ | 1085 | */ |
@@ -1113,13 +1113,10 @@ s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 pballoc) | |||
1113 | /* Move the flexible bytes to use the ethertype - shift 6 words */ | 1113 | /* Move the flexible bytes to use the ethertype - shift 6 words */ |
1114 | fdirctrl |= (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT); | 1114 | fdirctrl |= (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT); |
1115 | 1115 | ||
1116 | fdirctrl |= IXGBE_FDIRCTRL_REPORT_STATUS; | ||
1117 | 1116 | ||
1118 | /* Prime the keys for hashing */ | 1117 | /* Prime the keys for hashing */ |
1119 | IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY, | 1118 | IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY, IXGBE_ATR_BUCKET_HASH_KEY); |
1120 | htonl(IXGBE_ATR_BUCKET_HASH_KEY)); | 1119 | IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY, IXGBE_ATR_SIGNATURE_HASH_KEY); |
1121 | IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY, | ||
1122 | htonl(IXGBE_ATR_SIGNATURE_HASH_KEY)); | ||
1123 | 1120 | ||
1124 | /* | 1121 | /* |
1125 | * Poll init-done after we write the register. Estimated times: | 1122 | * Poll init-done after we write the register. Estimated times: |
@@ -1170,7 +1167,7 @@ s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 pballoc) | |||
1170 | 1167 | ||
1171 | /* | 1168 | /* |
1172 | * The defaults in the HW for RX PB 1-7 are not zero and so should be | 1169 | * The defaults in the HW for RX PB 1-7 are not zero and so should be |
1173 | * intialized to zero for non DCB mode otherwise actual total RX PB | 1170 | * initialized to zero for non DCB mode otherwise actual total RX PB |
1174 | * would be bigger than programmed and filter space would run into | 1171 | * would be bigger than programmed and filter space would run into |
1175 | * the PB 0 region. | 1172 | * the PB 0 region. |
1176 | */ | 1173 | */ |
@@ -1209,10 +1206,8 @@ s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 pballoc) | |||
1209 | fdirctrl |= (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT); | 1206 | fdirctrl |= (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT); |
1210 | 1207 | ||
1211 | /* Prime the keys for hashing */ | 1208 | /* Prime the keys for hashing */ |
1212 | IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY, | 1209 | IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY, IXGBE_ATR_BUCKET_HASH_KEY); |
1213 | htonl(IXGBE_ATR_BUCKET_HASH_KEY)); | 1210 | IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY, IXGBE_ATR_SIGNATURE_HASH_KEY); |
1214 | IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY, | ||
1215 | htonl(IXGBE_ATR_SIGNATURE_HASH_KEY)); | ||
1216 | 1211 | ||
1217 | /* | 1212 | /* |
1218 | * Poll init-done after we write the register. Estimated times: | 1213 | * Poll init-done after we write the register. Estimated times: |
@@ -1251,8 +1246,8 @@ s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 pballoc) | |||
1251 | * @stream: input bitstream to compute the hash on | 1246 | * @stream: input bitstream to compute the hash on |
1252 | * @key: 32-bit hash key | 1247 | * @key: 32-bit hash key |
1253 | **/ | 1248 | **/ |
1254 | static u16 ixgbe_atr_compute_hash_82599(struct ixgbe_atr_input *atr_input, | 1249 | static u32 ixgbe_atr_compute_hash_82599(union ixgbe_atr_input *atr_input, |
1255 | u32 key) | 1250 | u32 key) |
1256 | { | 1251 | { |
1257 | /* | 1252 | /* |
1258 | * The algorithm is as follows: | 1253 | * The algorithm is as follows: |
@@ -1272,410 +1267,250 @@ static u16 ixgbe_atr_compute_hash_82599(struct ixgbe_atr_input *atr_input, | |||
1272 | * To simplify for programming, the algorithm is implemented | 1267 | * To simplify for programming, the algorithm is implemented |
1273 | * in software this way: | 1268 | * in software this way: |
1274 | * | 1269 | * |
1275 | * Key[31:0], Stream[335:0] | 1270 | * key[31:0], hi_hash_dword[31:0], lo_hash_dword[31:0], hash[15:0] |
1271 | * | ||
1272 | * for (i = 0; i < 352; i+=32) | ||
1273 | * hi_hash_dword[31:0] ^= Stream[(i+31):i]; | ||
1274 | * | ||
1275 | * lo_hash_dword[15:0] ^= Stream[15:0]; | ||
1276 | * lo_hash_dword[15:0] ^= hi_hash_dword[31:16]; | ||
1277 | * lo_hash_dword[31:16] ^= hi_hash_dword[15:0]; | ||
1276 | * | 1278 | * |
1277 | * tmp_key[11 * 32 - 1:0] = 11{Key[31:0] = key concatenated 11 times | 1279 | * hi_hash_dword[31:0] ^= Stream[351:320]; |
1278 | * int_key[350:0] = tmp_key[351:1] | ||
1279 | * int_stream[365:0] = Stream[14:0] | Stream[335:0] | Stream[335:321] | ||
1280 | * | 1280 | * |
1281 | * hash[15:0] = 0; | 1281 | * if(key[0]) |
1282 | * for (i = 0; i < 351; i++) { | 1282 | * hash[15:0] ^= Stream[15:0]; |
1283 | * if (int_key[i]) | 1283 | * |
1284 | * hash ^= int_stream[(i + 15):i]; | 1284 | * for (i = 0; i < 16; i++) { |
1285 | * if (key[i]) | ||
1286 | * hash[15:0] ^= lo_hash_dword[(i+15):i]; | ||
1287 | * if (key[i + 16]) | ||
1288 | * hash[15:0] ^= hi_hash_dword[(i+15):i]; | ||
1285 | * } | 1289 | * } |
1290 | * | ||
1286 | */ | 1291 | */ |
1292 | __be32 common_hash_dword = 0; | ||
1293 | u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan; | ||
1294 | u32 hash_result = 0; | ||
1295 | u8 i; | ||
1287 | 1296 | ||
1288 | union { | 1297 | /* record the flow_vm_vlan bits as they are a key part to the hash */ |
1289 | u64 fill[6]; | 1298 | flow_vm_vlan = ntohl(atr_input->dword_stream[0]); |
1290 | u32 key[11]; | ||
1291 | u8 key_stream[44]; | ||
1292 | } tmp_key; | ||
1293 | 1299 | ||
1294 | u8 *stream = (u8 *)atr_input; | 1300 | /* generate common hash dword */ |
1295 | u8 int_key[44]; /* upper-most bit unused */ | 1301 | for (i = 10; i; i -= 2) |
1296 | u8 hash_str[46]; /* upper-most 2 bits unused */ | 1302 | common_hash_dword ^= atr_input->dword_stream[i] ^ |
1297 | u16 hash_result = 0; | 1303 | atr_input->dword_stream[i - 1]; |
1298 | int i, j, k, h; | ||
1299 | 1304 | ||
1300 | /* | 1305 | hi_hash_dword = ntohl(common_hash_dword); |
1301 | * Initialize the fill member to prevent warnings | ||
1302 | * on some compilers | ||
1303 | */ | ||
1304 | tmp_key.fill[0] = 0; | ||
1305 | 1306 | ||
1306 | /* First load the temporary key stream */ | 1307 | /* low dword is word swapped version of common */ |
1307 | for (i = 0; i < 6; i++) { | 1308 | lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16); |
1308 | u64 fillkey = ((u64)key << 32) | key; | ||
1309 | tmp_key.fill[i] = fillkey; | ||
1310 | } | ||
1311 | 1309 | ||
1312 | /* | 1310 | /* apply flow ID/VM pool/VLAN ID bits to hash words */ |
1313 | * Set the interim key for the hashing. Bit 352 is unused, so we must | 1311 | hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16); |
1314 | * shift and compensate when building the key. | ||
1315 | */ | ||
1316 | 1312 | ||
1317 | int_key[0] = tmp_key.key_stream[0] >> 1; | 1313 | /* Process bits 0 and 16 */ |
1318 | for (i = 1, j = 0; i < 44; i++) { | 1314 | if (key & 0x0001) hash_result ^= lo_hash_dword; |
1319 | unsigned int this_key = tmp_key.key_stream[j] << 7; | 1315 | if (key & 0x00010000) hash_result ^= hi_hash_dword; |
1320 | j++; | ||
1321 | int_key[i] = (u8)(this_key | (tmp_key.key_stream[j] >> 1)); | ||
1322 | } | ||
1323 | |||
1324 | /* | ||
1325 | * Set the interim bit string for the hashing. Bits 368 and 367 are | ||
1326 | * unused, so shift and compensate when building the string. | ||
1327 | */ | ||
1328 | hash_str[0] = (stream[40] & 0x7f) >> 1; | ||
1329 | for (i = 1, j = 40; i < 46; i++) { | ||
1330 | unsigned int this_str = stream[j] << 7; | ||
1331 | j++; | ||
1332 | if (j > 41) | ||
1333 | j = 0; | ||
1334 | hash_str[i] = (u8)(this_str | (stream[j] >> 1)); | ||
1335 | } | ||
1336 | 1316 | ||
1337 | /* | 1317 | /* |
1338 | * Now compute the hash. i is the index into hash_str, j is into our | 1318 | * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to |
1339 | * key stream, k is counting the number of bits, and h interates within | 1319 | * delay this because bit 0 of the stream should not be processed |
1340 | * each byte. | 1320 | * so we do not add the vlan until after bit 0 was processed |
1341 | */ | 1321 | */ |
1342 | for (i = 45, j = 43, k = 0; k < 351 && i >= 2 && j >= 0; i--, j--) { | 1322 | lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16); |
1343 | for (h = 0; h < 8 && k < 351; h++, k++) { | ||
1344 | if (int_key[j] & (1 << h)) { | ||
1345 | /* | ||
1346 | * Key bit is set, XOR in the current 16-bit | ||
1347 | * string. Example of processing: | ||
1348 | * h = 0, | ||
1349 | * tmp = (hash_str[i - 2] & 0 << 16) | | ||
1350 | * (hash_str[i - 1] & 0xff << 8) | | ||
1351 | * (hash_str[i] & 0xff >> 0) | ||
1352 | * So tmp = hash_str[15 + k:k], since the | ||
1353 | * i + 2 clause rolls off the 16-bit value | ||
1354 | * h = 7, | ||
1355 | * tmp = (hash_str[i - 2] & 0x7f << 9) | | ||
1356 | * (hash_str[i - 1] & 0xff << 1) | | ||
1357 | * (hash_str[i] & 0x80 >> 7) | ||
1358 | */ | ||
1359 | int tmp = (hash_str[i] >> h); | ||
1360 | tmp |= (hash_str[i - 1] << (8 - h)); | ||
1361 | tmp |= (int)(hash_str[i - 2] & ((1 << h) - 1)) | ||
1362 | << (16 - h); | ||
1363 | hash_result ^= (u16)tmp; | ||
1364 | } | ||
1365 | } | ||
1366 | } | ||
1367 | |||
1368 | return hash_result; | ||
1369 | } | ||
1370 | |||
1371 | /** | ||
1372 | * ixgbe_atr_set_vlan_id_82599 - Sets the VLAN id in the ATR input stream | ||
1373 | * @input: input stream to modify | ||
1374 | * @vlan: the VLAN id to load | ||
1375 | **/ | ||
1376 | s32 ixgbe_atr_set_vlan_id_82599(struct ixgbe_atr_input *input, u16 vlan) | ||
1377 | { | ||
1378 | input->byte_stream[IXGBE_ATR_VLAN_OFFSET + 1] = vlan >> 8; | ||
1379 | input->byte_stream[IXGBE_ATR_VLAN_OFFSET] = vlan & 0xff; | ||
1380 | |||
1381 | return 0; | ||
1382 | } | ||
1383 | |||
1384 | /** | ||
1385 | * ixgbe_atr_set_src_ipv4_82599 - Sets the source IPv4 address | ||
1386 | * @input: input stream to modify | ||
1387 | * @src_addr: the IP address to load | ||
1388 | **/ | ||
1389 | s32 ixgbe_atr_set_src_ipv4_82599(struct ixgbe_atr_input *input, u32 src_addr) | ||
1390 | { | ||
1391 | input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 3] = src_addr >> 24; | ||
1392 | input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 2] = | ||
1393 | (src_addr >> 16) & 0xff; | ||
1394 | input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 1] = | ||
1395 | (src_addr >> 8) & 0xff; | ||
1396 | input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET] = src_addr & 0xff; | ||
1397 | |||
1398 | return 0; | ||
1399 | } | ||
1400 | |||
1401 | /** | ||
1402 | * ixgbe_atr_set_dst_ipv4_82599 - Sets the destination IPv4 address | ||
1403 | * @input: input stream to modify | ||
1404 | * @dst_addr: the IP address to load | ||
1405 | **/ | ||
1406 | s32 ixgbe_atr_set_dst_ipv4_82599(struct ixgbe_atr_input *input, u32 dst_addr) | ||
1407 | { | ||
1408 | input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 3] = dst_addr >> 24; | ||
1409 | input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 2] = | ||
1410 | (dst_addr >> 16) & 0xff; | ||
1411 | input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 1] = | ||
1412 | (dst_addr >> 8) & 0xff; | ||
1413 | input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET] = dst_addr & 0xff; | ||
1414 | |||
1415 | return 0; | ||
1416 | } | ||
1417 | 1323 | ||
1418 | /** | ||
1419 | * ixgbe_atr_set_src_port_82599 - Sets the source port | ||
1420 | * @input: input stream to modify | ||
1421 | * @src_port: the source port to load | ||
1422 | **/ | ||
1423 | s32 ixgbe_atr_set_src_port_82599(struct ixgbe_atr_input *input, u16 src_port) | ||
1424 | { | ||
1425 | input->byte_stream[IXGBE_ATR_SRC_PORT_OFFSET + 1] = src_port >> 8; | ||
1426 | input->byte_stream[IXGBE_ATR_SRC_PORT_OFFSET] = src_port & 0xff; | ||
1427 | |||
1428 | return 0; | ||
1429 | } | ||
1430 | |||
1431 | /** | ||
1432 | * ixgbe_atr_set_dst_port_82599 - Sets the destination port | ||
1433 | * @input: input stream to modify | ||
1434 | * @dst_port: the destination port to load | ||
1435 | **/ | ||
1436 | s32 ixgbe_atr_set_dst_port_82599(struct ixgbe_atr_input *input, u16 dst_port) | ||
1437 | { | ||
1438 | input->byte_stream[IXGBE_ATR_DST_PORT_OFFSET + 1] = dst_port >> 8; | ||
1439 | input->byte_stream[IXGBE_ATR_DST_PORT_OFFSET] = dst_port & 0xff; | ||
1440 | |||
1441 | return 0; | ||
1442 | } | ||
1443 | |||
1444 | /** | ||
1445 | * ixgbe_atr_set_flex_byte_82599 - Sets the flexible bytes | ||
1446 | * @input: input stream to modify | ||
1447 | * @flex_bytes: the flexible bytes to load | ||
1448 | **/ | ||
1449 | s32 ixgbe_atr_set_flex_byte_82599(struct ixgbe_atr_input *input, u16 flex_byte) | ||
1450 | { | ||
1451 | input->byte_stream[IXGBE_ATR_FLEX_BYTE_OFFSET + 1] = flex_byte >> 8; | ||
1452 | input->byte_stream[IXGBE_ATR_FLEX_BYTE_OFFSET] = flex_byte & 0xff; | ||
1453 | |||
1454 | return 0; | ||
1455 | } | ||
1456 | |||
1457 | /** | ||
1458 | * ixgbe_atr_set_l4type_82599 - Sets the layer 4 packet type | ||
1459 | * @input: input stream to modify | ||
1460 | * @l4type: the layer 4 type value to load | ||
1461 | **/ | ||
1462 | s32 ixgbe_atr_set_l4type_82599(struct ixgbe_atr_input *input, u8 l4type) | ||
1463 | { | ||
1464 | input->byte_stream[IXGBE_ATR_L4TYPE_OFFSET] = l4type; | ||
1465 | |||
1466 | return 0; | ||
1467 | } | ||
1468 | |||
1469 | /** | ||
1470 | * ixgbe_atr_get_vlan_id_82599 - Gets the VLAN id from the ATR input stream | ||
1471 | * @input: input stream to search | ||
1472 | * @vlan: the VLAN id to load | ||
1473 | **/ | ||
1474 | static s32 ixgbe_atr_get_vlan_id_82599(struct ixgbe_atr_input *input, u16 *vlan) | ||
1475 | { | ||
1476 | *vlan = input->byte_stream[IXGBE_ATR_VLAN_OFFSET]; | ||
1477 | *vlan |= input->byte_stream[IXGBE_ATR_VLAN_OFFSET + 1] << 8; | ||
1478 | |||
1479 | return 0; | ||
1480 | } | ||
1481 | |||
1482 | /** | ||
1483 | * ixgbe_atr_get_src_ipv4_82599 - Gets the source IPv4 address | ||
1484 | * @input: input stream to search | ||
1485 | * @src_addr: the IP address to load | ||
1486 | **/ | ||
1487 | static s32 ixgbe_atr_get_src_ipv4_82599(struct ixgbe_atr_input *input, | ||
1488 | u32 *src_addr) | ||
1489 | { | ||
1490 | *src_addr = input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET]; | ||
1491 | *src_addr |= input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 1] << 8; | ||
1492 | *src_addr |= input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 2] << 16; | ||
1493 | *src_addr |= input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 3] << 24; | ||
1494 | |||
1495 | return 0; | ||
1496 | } | ||
1497 | 1324 | ||
1498 | /** | 1325 | /* process the remaining 30 bits in the key 2 bits at a time */ |
1499 | * ixgbe_atr_get_dst_ipv4_82599 - Gets the destination IPv4 address | 1326 | for (i = 15; i; i-- ) { |
1500 | * @input: input stream to search | 1327 | if (key & (0x0001 << i)) hash_result ^= lo_hash_dword >> i; |
1501 | * @dst_addr: the IP address to load | 1328 | if (key & (0x00010000 << i)) hash_result ^= hi_hash_dword >> i; |
1502 | **/ | 1329 | } |
1503 | static s32 ixgbe_atr_get_dst_ipv4_82599(struct ixgbe_atr_input *input, | ||
1504 | u32 *dst_addr) | ||
1505 | { | ||
1506 | *dst_addr = input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET]; | ||
1507 | *dst_addr |= input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 1] << 8; | ||
1508 | *dst_addr |= input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 2] << 16; | ||
1509 | *dst_addr |= input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 3] << 24; | ||
1510 | 1330 | ||
1511 | return 0; | 1331 | return hash_result & IXGBE_ATR_HASH_MASK; |
1512 | } | 1332 | } |
1513 | 1333 | ||
1514 | /** | 1334 | /* |
1515 | * ixgbe_atr_get_src_ipv6_82599 - Gets the source IPv6 address | 1335 | * These defines allow us to quickly generate all of the necessary instructions |
1516 | * @input: input stream to search | 1336 | * in the function below by simply calling out IXGBE_COMPUTE_SIG_HASH_ITERATION |
1517 | * @src_addr_1: the first 4 bytes of the IP address to load | 1337 | * for values 0 through 15 |
1518 | * @src_addr_2: the second 4 bytes of the IP address to load | 1338 | */ |
1519 | * @src_addr_3: the third 4 bytes of the IP address to load | 1339 | #define IXGBE_ATR_COMMON_HASH_KEY \ |
1520 | * @src_addr_4: the fourth 4 bytes of the IP address to load | 1340 | (IXGBE_ATR_BUCKET_HASH_KEY & IXGBE_ATR_SIGNATURE_HASH_KEY) |
1521 | **/ | 1341 | #define IXGBE_COMPUTE_SIG_HASH_ITERATION(_n) \ |
1522 | static s32 ixgbe_atr_get_src_ipv6_82599(struct ixgbe_atr_input *input, | 1342 | do { \ |
1523 | u32 *src_addr_1, u32 *src_addr_2, | 1343 | u32 n = (_n); \ |
1524 | u32 *src_addr_3, u32 *src_addr_4) | 1344 | if (IXGBE_ATR_COMMON_HASH_KEY & (0x01 << n)) \ |
1525 | { | 1345 | common_hash ^= lo_hash_dword >> n; \ |
1526 | *src_addr_1 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 12]; | 1346 | else if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \ |
1527 | *src_addr_1 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 13] << 8; | 1347 | bucket_hash ^= lo_hash_dword >> n; \ |
1528 | *src_addr_1 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 14] << 16; | 1348 | else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << n)) \ |
1529 | *src_addr_1 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 15] << 24; | 1349 | sig_hash ^= lo_hash_dword << (16 - n); \ |
1530 | 1350 | if (IXGBE_ATR_COMMON_HASH_KEY & (0x01 << (n + 16))) \ | |
1531 | *src_addr_2 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 8]; | 1351 | common_hash ^= hi_hash_dword >> n; \ |
1532 | *src_addr_2 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 9] << 8; | 1352 | else if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \ |
1533 | *src_addr_2 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 10] << 16; | 1353 | bucket_hash ^= hi_hash_dword >> n; \ |
1534 | *src_addr_2 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 11] << 24; | 1354 | else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << (n + 16))) \ |
1535 | 1355 | sig_hash ^= hi_hash_dword << (16 - n); \ | |
1536 | *src_addr_3 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 4]; | 1356 | } while (0); |
1537 | *src_addr_3 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 5] << 8; | ||
1538 | *src_addr_3 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 6] << 16; | ||
1539 | *src_addr_3 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 7] << 24; | ||
1540 | |||
1541 | *src_addr_4 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET]; | ||
1542 | *src_addr_4 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 1] << 8; | ||
1543 | *src_addr_4 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 2] << 16; | ||
1544 | *src_addr_4 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 3] << 24; | ||
1545 | |||
1546 | return 0; | ||
1547 | } | ||
1548 | 1357 | ||
1549 | /** | 1358 | /** |
1550 | * ixgbe_atr_get_src_port_82599 - Gets the source port | 1359 | * ixgbe_atr_compute_sig_hash_82599 - Compute the signature hash |
1551 | * @input: input stream to modify | 1360 | * @stream: input bitstream to compute the hash on |
1552 | * @src_port: the source port to load | ||
1553 | * | 1361 | * |
1554 | * Even though the input is given in big-endian, the FDIRPORT registers | 1362 | * This function is almost identical to the function above but contains |
1555 | * expect the ports to be programmed in little-endian. Hence the need to swap | 1363 | * several optomizations such as unwinding all of the loops, letting the |
1556 | * endianness when retrieving the data. This can be confusing since the | 1364 | * compiler work out all of the conditional ifs since the keys are static |
1557 | * internal hash engine expects it to be big-endian. | 1365 | * defines, and computing two keys at once since the hashed dword stream |
1366 | * will be the same for both keys. | ||
1558 | **/ | 1367 | **/ |
1559 | static s32 ixgbe_atr_get_src_port_82599(struct ixgbe_atr_input *input, | 1368 | static u32 ixgbe_atr_compute_sig_hash_82599(union ixgbe_atr_hash_dword input, |
1560 | u16 *src_port) | 1369 | union ixgbe_atr_hash_dword common) |
1561 | { | 1370 | { |
1562 | *src_port = input->byte_stream[IXGBE_ATR_SRC_PORT_OFFSET] << 8; | 1371 | u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan; |
1563 | *src_port |= input->byte_stream[IXGBE_ATR_SRC_PORT_OFFSET + 1]; | 1372 | u32 sig_hash = 0, bucket_hash = 0, common_hash = 0; |
1564 | 1373 | ||
1565 | return 0; | 1374 | /* record the flow_vm_vlan bits as they are a key part to the hash */ |
1566 | } | 1375 | flow_vm_vlan = ntohl(input.dword); |
1567 | 1376 | ||
1568 | /** | 1377 | /* generate common hash dword */ |
1569 | * ixgbe_atr_get_dst_port_82599 - Gets the destination port | 1378 | hi_hash_dword = ntohl(common.dword); |
1570 | * @input: input stream to modify | ||
1571 | * @dst_port: the destination port to load | ||
1572 | * | ||
1573 | * Even though the input is given in big-endian, the FDIRPORT registers | ||
1574 | * expect the ports to be programmed in little-endian. Hence the need to swap | ||
1575 | * endianness when retrieving the data. This can be confusing since the | ||
1576 | * internal hash engine expects it to be big-endian. | ||
1577 | **/ | ||
1578 | static s32 ixgbe_atr_get_dst_port_82599(struct ixgbe_atr_input *input, | ||
1579 | u16 *dst_port) | ||
1580 | { | ||
1581 | *dst_port = input->byte_stream[IXGBE_ATR_DST_PORT_OFFSET] << 8; | ||
1582 | *dst_port |= input->byte_stream[IXGBE_ATR_DST_PORT_OFFSET + 1]; | ||
1583 | 1379 | ||
1584 | return 0; | 1380 | /* low dword is word swapped version of common */ |
1585 | } | 1381 | lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16); |
1586 | 1382 | ||
1587 | /** | 1383 | /* apply flow ID/VM pool/VLAN ID bits to hash words */ |
1588 | * ixgbe_atr_get_flex_byte_82599 - Gets the flexible bytes | 1384 | hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16); |
1589 | * @input: input stream to modify | ||
1590 | * @flex_bytes: the flexible bytes to load | ||
1591 | **/ | ||
1592 | static s32 ixgbe_atr_get_flex_byte_82599(struct ixgbe_atr_input *input, | ||
1593 | u16 *flex_byte) | ||
1594 | { | ||
1595 | *flex_byte = input->byte_stream[IXGBE_ATR_FLEX_BYTE_OFFSET]; | ||
1596 | *flex_byte |= input->byte_stream[IXGBE_ATR_FLEX_BYTE_OFFSET + 1] << 8; | ||
1597 | 1385 | ||
1598 | return 0; | 1386 | /* Process bits 0 and 16 */ |
1599 | } | 1387 | IXGBE_COMPUTE_SIG_HASH_ITERATION(0); |
1600 | 1388 | ||
1601 | /** | 1389 | /* |
1602 | * ixgbe_atr_get_l4type_82599 - Gets the layer 4 packet type | 1390 | * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to |
1603 | * @input: input stream to modify | 1391 | * delay this because bit 0 of the stream should not be processed |
1604 | * @l4type: the layer 4 type value to load | 1392 | * so we do not add the vlan until after bit 0 was processed |
1605 | **/ | 1393 | */ |
1606 | static s32 ixgbe_atr_get_l4type_82599(struct ixgbe_atr_input *input, | 1394 | lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16); |
1607 | u8 *l4type) | 1395 | |
1608 | { | 1396 | /* Process remaining 30 bit of the key */ |
1609 | *l4type = input->byte_stream[IXGBE_ATR_L4TYPE_OFFSET]; | 1397 | IXGBE_COMPUTE_SIG_HASH_ITERATION(1); |
1398 | IXGBE_COMPUTE_SIG_HASH_ITERATION(2); | ||
1399 | IXGBE_COMPUTE_SIG_HASH_ITERATION(3); | ||
1400 | IXGBE_COMPUTE_SIG_HASH_ITERATION(4); | ||
1401 | IXGBE_COMPUTE_SIG_HASH_ITERATION(5); | ||
1402 | IXGBE_COMPUTE_SIG_HASH_ITERATION(6); | ||
1403 | IXGBE_COMPUTE_SIG_HASH_ITERATION(7); | ||
1404 | IXGBE_COMPUTE_SIG_HASH_ITERATION(8); | ||
1405 | IXGBE_COMPUTE_SIG_HASH_ITERATION(9); | ||
1406 | IXGBE_COMPUTE_SIG_HASH_ITERATION(10); | ||
1407 | IXGBE_COMPUTE_SIG_HASH_ITERATION(11); | ||
1408 | IXGBE_COMPUTE_SIG_HASH_ITERATION(12); | ||
1409 | IXGBE_COMPUTE_SIG_HASH_ITERATION(13); | ||
1410 | IXGBE_COMPUTE_SIG_HASH_ITERATION(14); | ||
1411 | IXGBE_COMPUTE_SIG_HASH_ITERATION(15); | ||
1412 | |||
1413 | /* combine common_hash result with signature and bucket hashes */ | ||
1414 | bucket_hash ^= common_hash; | ||
1415 | bucket_hash &= IXGBE_ATR_HASH_MASK; | ||
1610 | 1416 | ||
1611 | return 0; | 1417 | sig_hash ^= common_hash << 16; |
1418 | sig_hash &= IXGBE_ATR_HASH_MASK << 16; | ||
1419 | |||
1420 | /* return completed signature hash */ | ||
1421 | return sig_hash ^ bucket_hash; | ||
1612 | } | 1422 | } |
1613 | 1423 | ||
1614 | /** | 1424 | /** |
1615 | * ixgbe_atr_add_signature_filter_82599 - Adds a signature hash filter | 1425 | * ixgbe_atr_add_signature_filter_82599 - Adds a signature hash filter |
1616 | * @hw: pointer to hardware structure | 1426 | * @hw: pointer to hardware structure |
1617 | * @stream: input bitstream | 1427 | * @input: unique input dword |
1428 | * @common: compressed common input dword | ||
1618 | * @queue: queue index to direct traffic to | 1429 | * @queue: queue index to direct traffic to |
1619 | **/ | 1430 | **/ |
1620 | s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw, | 1431 | s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw, |
1621 | struct ixgbe_atr_input *input, | 1432 | union ixgbe_atr_hash_dword input, |
1433 | union ixgbe_atr_hash_dword common, | ||
1622 | u8 queue) | 1434 | u8 queue) |
1623 | { | 1435 | { |
1624 | u64 fdirhashcmd; | 1436 | u64 fdirhashcmd; |
1625 | u64 fdircmd; | 1437 | u32 fdircmd; |
1626 | u32 fdirhash; | ||
1627 | u16 bucket_hash, sig_hash; | ||
1628 | u8 l4type; | ||
1629 | |||
1630 | bucket_hash = ixgbe_atr_compute_hash_82599(input, | ||
1631 | IXGBE_ATR_BUCKET_HASH_KEY); | ||
1632 | |||
1633 | /* bucket_hash is only 15 bits */ | ||
1634 | bucket_hash &= IXGBE_ATR_HASH_MASK; | ||
1635 | |||
1636 | sig_hash = ixgbe_atr_compute_hash_82599(input, | ||
1637 | IXGBE_ATR_SIGNATURE_HASH_KEY); | ||
1638 | |||
1639 | /* Get the l4type in order to program FDIRCMD properly */ | ||
1640 | /* lowest 2 bits are FDIRCMD.L4TYPE, third lowest bit is FDIRCMD.IPV6 */ | ||
1641 | ixgbe_atr_get_l4type_82599(input, &l4type); | ||
1642 | 1438 | ||
1643 | /* | 1439 | /* |
1644 | * The lower 32-bits of fdirhashcmd is for FDIRHASH, the upper 32-bits | 1440 | * Get the flow_type in order to program FDIRCMD properly |
1645 | * is for FDIRCMD. Then do a 64-bit register write from FDIRHASH. | 1441 | * lowest 2 bits are FDIRCMD.L4TYPE, third lowest bit is FDIRCMD.IPV6 |
1646 | */ | 1442 | */ |
1647 | fdirhash = sig_hash << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT | bucket_hash; | 1443 | switch (input.formatted.flow_type) { |
1648 | 1444 | case IXGBE_ATR_FLOW_TYPE_TCPV4: | |
1649 | fdircmd = (IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE | | 1445 | case IXGBE_ATR_FLOW_TYPE_UDPV4: |
1650 | IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN); | 1446 | case IXGBE_ATR_FLOW_TYPE_SCTPV4: |
1651 | 1447 | case IXGBE_ATR_FLOW_TYPE_TCPV6: | |
1652 | switch (l4type & IXGBE_ATR_L4TYPE_MASK) { | 1448 | case IXGBE_ATR_FLOW_TYPE_UDPV6: |
1653 | case IXGBE_ATR_L4TYPE_TCP: | 1449 | case IXGBE_ATR_FLOW_TYPE_SCTPV6: |
1654 | fdircmd |= IXGBE_FDIRCMD_L4TYPE_TCP; | ||
1655 | break; | ||
1656 | case IXGBE_ATR_L4TYPE_UDP: | ||
1657 | fdircmd |= IXGBE_FDIRCMD_L4TYPE_UDP; | ||
1658 | break; | ||
1659 | case IXGBE_ATR_L4TYPE_SCTP: | ||
1660 | fdircmd |= IXGBE_FDIRCMD_L4TYPE_SCTP; | ||
1661 | break; | 1450 | break; |
1662 | default: | 1451 | default: |
1663 | hw_dbg(hw, "Error on l4type input\n"); | 1452 | hw_dbg(hw, " Error on flow type input\n"); |
1664 | return IXGBE_ERR_CONFIG; | 1453 | return IXGBE_ERR_CONFIG; |
1665 | } | 1454 | } |
1666 | 1455 | ||
1667 | if (l4type & IXGBE_ATR_L4TYPE_IPV6_MASK) | 1456 | /* configure FDIRCMD register */ |
1668 | fdircmd |= IXGBE_FDIRCMD_IPV6; | 1457 | fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE | |
1458 | IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN; | ||
1459 | fdircmd |= input.formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT; | ||
1460 | fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT; | ||
1669 | 1461 | ||
1670 | fdircmd |= ((u64)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT); | 1462 | /* |
1671 | fdirhashcmd = ((fdircmd << 32) | fdirhash); | 1463 | * The lower 32-bits of fdirhashcmd is for FDIRHASH, the upper 32-bits |
1464 | * is for FDIRCMD. Then do a 64-bit register write from FDIRHASH. | ||
1465 | */ | ||
1466 | fdirhashcmd = (u64)fdircmd << 32; | ||
1467 | fdirhashcmd |= ixgbe_atr_compute_sig_hash_82599(input, common); | ||
1672 | 1468 | ||
1673 | IXGBE_WRITE_REG64(hw, IXGBE_FDIRHASH, fdirhashcmd); | 1469 | IXGBE_WRITE_REG64(hw, IXGBE_FDIRHASH, fdirhashcmd); |
1674 | 1470 | ||
1471 | hw_dbg(hw, "Tx Queue=%x hash=%x\n", queue, (u32)fdirhashcmd); | ||
1472 | |||
1675 | return 0; | 1473 | return 0; |
1676 | } | 1474 | } |
1677 | 1475 | ||
1678 | /** | 1476 | /** |
1477 | * ixgbe_get_fdirtcpm_82599 - generate a tcp port from atr_input_masks | ||
1478 | * @input_mask: mask to be bit swapped | ||
1479 | * | ||
1480 | * The source and destination port masks for flow director are bit swapped | ||
1481 | * in that bit 15 effects bit 0, 14 effects 1, 13, 2 etc. In order to | ||
1482 | * generate a correctly swapped value we need to bit swap the mask and that | ||
1483 | * is what is accomplished by this function. | ||
1484 | **/ | ||
1485 | static u32 ixgbe_get_fdirtcpm_82599(struct ixgbe_atr_input_masks *input_masks) | ||
1486 | { | ||
1487 | u32 mask = ntohs(input_masks->dst_port_mask); | ||
1488 | mask <<= IXGBE_FDIRTCPM_DPORTM_SHIFT; | ||
1489 | mask |= ntohs(input_masks->src_port_mask); | ||
1490 | mask = ((mask & 0x55555555) << 1) | ((mask & 0xAAAAAAAA) >> 1); | ||
1491 | mask = ((mask & 0x33333333) << 2) | ((mask & 0xCCCCCCCC) >> 2); | ||
1492 | mask = ((mask & 0x0F0F0F0F) << 4) | ((mask & 0xF0F0F0F0) >> 4); | ||
1493 | return ((mask & 0x00FF00FF) << 8) | ((mask & 0xFF00FF00) >> 8); | ||
1494 | } | ||
1495 | |||
1496 | /* | ||
1497 | * These two macros are meant to address the fact that we have registers | ||
1498 | * that are either all or in part big-endian. As a result on big-endian | ||
1499 | * systems we will end up byte swapping the value to little-endian before | ||
1500 | * it is byte swapped again and written to the hardware in the original | ||
1501 | * big-endian format. | ||
1502 | */ | ||
1503 | #define IXGBE_STORE_AS_BE32(_value) \ | ||
1504 | (((u32)(_value) >> 24) | (((u32)(_value) & 0x00FF0000) >> 8) | \ | ||
1505 | (((u32)(_value) & 0x0000FF00) << 8) | ((u32)(_value) << 24)) | ||
1506 | |||
1507 | #define IXGBE_WRITE_REG_BE32(a, reg, value) \ | ||
1508 | IXGBE_WRITE_REG((a), (reg), IXGBE_STORE_AS_BE32(ntohl(value))) | ||
1509 | |||
1510 | #define IXGBE_STORE_AS_BE16(_value) \ | ||
1511 | (((u16)(_value) >> 8) | ((u16)(_value) << 8)) | ||
1512 | |||
1513 | /** | ||
1679 | * ixgbe_fdir_add_perfect_filter_82599 - Adds a perfect filter | 1514 | * ixgbe_fdir_add_perfect_filter_82599 - Adds a perfect filter |
1680 | * @hw: pointer to hardware structure | 1515 | * @hw: pointer to hardware structure |
1681 | * @input: input bitstream | 1516 | * @input: input bitstream |
@@ -1687,135 +1522,139 @@ s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw, | |||
1687 | * hardware writes must be protected from one another. | 1522 | * hardware writes must be protected from one another. |
1688 | **/ | 1523 | **/ |
1689 | s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw, | 1524 | s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw, |
1690 | struct ixgbe_atr_input *input, | 1525 | union ixgbe_atr_input *input, |
1691 | struct ixgbe_atr_input_masks *input_masks, | 1526 | struct ixgbe_atr_input_masks *input_masks, |
1692 | u16 soft_id, u8 queue) | 1527 | u16 soft_id, u8 queue) |
1693 | { | 1528 | { |
1694 | u32 fdircmd = 0; | ||
1695 | u32 fdirhash; | 1529 | u32 fdirhash; |
1696 | u32 src_ipv4 = 0, dst_ipv4 = 0; | 1530 | u32 fdircmd; |
1697 | u32 src_ipv6_1, src_ipv6_2, src_ipv6_3, src_ipv6_4; | 1531 | u32 fdirport, fdirtcpm; |
1698 | u16 src_port, dst_port, vlan_id, flex_bytes; | 1532 | u32 fdirvlan; |
1699 | u16 bucket_hash; | 1533 | /* start with VLAN, flex bytes, VM pool, and IPv6 destination masked */ |
1700 | u8 l4type; | 1534 | u32 fdirm = IXGBE_FDIRM_VLANID | IXGBE_FDIRM_VLANP | IXGBE_FDIRM_FLEX | |
1701 | u8 fdirm = 0; | 1535 | IXGBE_FDIRM_POOL | IXGBE_FDIRM_DIPv6; |
1702 | |||
1703 | /* Get our input values */ | ||
1704 | ixgbe_atr_get_l4type_82599(input, &l4type); | ||
1705 | 1536 | ||
1706 | /* | 1537 | /* |
1707 | * Check l4type formatting, and bail out before we touch the hardware | 1538 | * Check flow_type formatting, and bail out before we touch the hardware |
1708 | * if there's a configuration issue | 1539 | * if there's a configuration issue |
1709 | */ | 1540 | */ |
1710 | switch (l4type & IXGBE_ATR_L4TYPE_MASK) { | 1541 | switch (input->formatted.flow_type) { |
1711 | case IXGBE_ATR_L4TYPE_TCP: | 1542 | case IXGBE_ATR_FLOW_TYPE_IPV4: |
1712 | fdircmd |= IXGBE_FDIRCMD_L4TYPE_TCP; | 1543 | /* use the L4 protocol mask for raw IPv4/IPv6 traffic */ |
1713 | break; | 1544 | fdirm |= IXGBE_FDIRM_L4P; |
1714 | case IXGBE_ATR_L4TYPE_UDP: | 1545 | case IXGBE_ATR_FLOW_TYPE_SCTPV4: |
1715 | fdircmd |= IXGBE_FDIRCMD_L4TYPE_UDP; | 1546 | if (input_masks->dst_port_mask || input_masks->src_port_mask) { |
1716 | break; | 1547 | hw_dbg(hw, " Error on src/dst port mask\n"); |
1717 | case IXGBE_ATR_L4TYPE_SCTP: | 1548 | return IXGBE_ERR_CONFIG; |
1718 | fdircmd |= IXGBE_FDIRCMD_L4TYPE_SCTP; | 1549 | } |
1550 | case IXGBE_ATR_FLOW_TYPE_TCPV4: | ||
1551 | case IXGBE_ATR_FLOW_TYPE_UDPV4: | ||
1719 | break; | 1552 | break; |
1720 | default: | 1553 | default: |
1721 | hw_dbg(hw, "Error on l4type input\n"); | 1554 | hw_dbg(hw, " Error on flow type input\n"); |
1722 | return IXGBE_ERR_CONFIG; | 1555 | return IXGBE_ERR_CONFIG; |
1723 | } | 1556 | } |
1724 | 1557 | ||
1725 | bucket_hash = ixgbe_atr_compute_hash_82599(input, | ||
1726 | IXGBE_ATR_BUCKET_HASH_KEY); | ||
1727 | |||
1728 | /* bucket_hash is only 15 bits */ | ||
1729 | bucket_hash &= IXGBE_ATR_HASH_MASK; | ||
1730 | |||
1731 | ixgbe_atr_get_vlan_id_82599(input, &vlan_id); | ||
1732 | ixgbe_atr_get_src_port_82599(input, &src_port); | ||
1733 | ixgbe_atr_get_dst_port_82599(input, &dst_port); | ||
1734 | ixgbe_atr_get_flex_byte_82599(input, &flex_bytes); | ||
1735 | |||
1736 | fdirhash = soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT | bucket_hash; | ||
1737 | |||
1738 | /* Now figure out if we're IPv4 or IPv6 */ | ||
1739 | if (l4type & IXGBE_ATR_L4TYPE_IPV6_MASK) { | ||
1740 | /* IPv6 */ | ||
1741 | ixgbe_atr_get_src_ipv6_82599(input, &src_ipv6_1, &src_ipv6_2, | ||
1742 | &src_ipv6_3, &src_ipv6_4); | ||
1743 | |||
1744 | IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(0), src_ipv6_1); | ||
1745 | IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(1), src_ipv6_2); | ||
1746 | IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(2), src_ipv6_3); | ||
1747 | /* The last 4 bytes is the same register as IPv4 */ | ||
1748 | IXGBE_WRITE_REG(hw, IXGBE_FDIRIPSA, src_ipv6_4); | ||
1749 | |||
1750 | fdircmd |= IXGBE_FDIRCMD_IPV6; | ||
1751 | fdircmd |= IXGBE_FDIRCMD_IPv6DMATCH; | ||
1752 | } else { | ||
1753 | /* IPv4 */ | ||
1754 | ixgbe_atr_get_src_ipv4_82599(input, &src_ipv4); | ||
1755 | IXGBE_WRITE_REG(hw, IXGBE_FDIRIPSA, src_ipv4); | ||
1756 | } | ||
1757 | |||
1758 | ixgbe_atr_get_dst_ipv4_82599(input, &dst_ipv4); | ||
1759 | IXGBE_WRITE_REG(hw, IXGBE_FDIRIPDA, dst_ipv4); | ||
1760 | |||
1761 | IXGBE_WRITE_REG(hw, IXGBE_FDIRVLAN, (vlan_id | | ||
1762 | (flex_bytes << IXGBE_FDIRVLAN_FLEX_SHIFT))); | ||
1763 | IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, (src_port | | ||
1764 | (dst_port << IXGBE_FDIRPORT_DESTINATION_SHIFT))); | ||
1765 | |||
1766 | /* | 1558 | /* |
1767 | * Program the relevant mask registers. L4type cannot be | 1559 | * Program the relevant mask registers. If src/dst_port or src/dst_addr |
1768 | * masked out in this implementation. | 1560 | * are zero, then assume a full mask for that field. Also assume that |
1561 | * a VLAN of 0 is unspecified, so mask that out as well. L4type | ||
1562 | * cannot be masked out in this implementation. | ||
1769 | * | 1563 | * |
1770 | * This also assumes IPv4 only. IPv6 masking isn't supported at this | 1564 | * This also assumes IPv4 only. IPv6 masking isn't supported at this |
1771 | * point in time. | 1565 | * point in time. |
1772 | */ | 1566 | */ |
1773 | IXGBE_WRITE_REG(hw, IXGBE_FDIRSIP4M, input_masks->src_ip_mask); | 1567 | |
1774 | IXGBE_WRITE_REG(hw, IXGBE_FDIRDIP4M, input_masks->dst_ip_mask); | 1568 | /* Program FDIRM */ |
1775 | 1569 | switch (ntohs(input_masks->vlan_id_mask) & 0xEFFF) { | |
1776 | switch (l4type & IXGBE_ATR_L4TYPE_MASK) { | 1570 | case 0xEFFF: |
1777 | case IXGBE_ATR_L4TYPE_TCP: | 1571 | /* Unmask VLAN ID - bit 0 and fall through to unmask prio */ |
1778 | IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, input_masks->src_port_mask); | 1572 | fdirm &= ~IXGBE_FDIRM_VLANID; |
1779 | IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, | 1573 | case 0xE000: |
1780 | (IXGBE_READ_REG(hw, IXGBE_FDIRTCPM) | | 1574 | /* Unmask VLAN prio - bit 1 */ |
1781 | (input_masks->dst_port_mask << 16))); | 1575 | fdirm &= ~IXGBE_FDIRM_VLANP; |
1782 | break; | 1576 | break; |
1783 | case IXGBE_ATR_L4TYPE_UDP: | 1577 | case 0x0FFF: |
1784 | IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, input_masks->src_port_mask); | 1578 | /* Unmask VLAN ID - bit 0 */ |
1785 | IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, | 1579 | fdirm &= ~IXGBE_FDIRM_VLANID; |
1786 | (IXGBE_READ_REG(hw, IXGBE_FDIRUDPM) | | ||
1787 | (input_masks->src_port_mask << 16))); | ||
1788 | break; | 1580 | break; |
1789 | default: | 1581 | case 0x0000: |
1790 | /* this already would have failed above */ | 1582 | /* do nothing, vlans already masked */ |
1791 | break; | 1583 | break; |
1584 | default: | ||
1585 | hw_dbg(hw, " Error on VLAN mask\n"); | ||
1586 | return IXGBE_ERR_CONFIG; | ||
1792 | } | 1587 | } |
1793 | 1588 | ||
1794 | /* Program the last mask register, FDIRM */ | 1589 | if (input_masks->flex_mask & 0xFFFF) { |
1795 | if (input_masks->vlan_id_mask) | 1590 | if ((input_masks->flex_mask & 0xFFFF) != 0xFFFF) { |
1796 | /* Mask both VLAN and VLANP - bits 0 and 1 */ | 1591 | hw_dbg(hw, " Error on flexible byte mask\n"); |
1797 | fdirm |= 0x3; | 1592 | return IXGBE_ERR_CONFIG; |
1798 | 1593 | } | |
1799 | if (input_masks->data_mask) | 1594 | /* Unmask Flex Bytes - bit 4 */ |
1800 | /* Flex bytes need masking, so mask the whole thing - bit 4 */ | 1595 | fdirm &= ~IXGBE_FDIRM_FLEX; |
1801 | fdirm |= 0x10; | 1596 | } |
1802 | 1597 | ||
1803 | /* Now mask VM pool and destination IPv6 - bits 5 and 2 */ | 1598 | /* Now mask VM pool and destination IPv6 - bits 5 and 2 */ |
1804 | fdirm |= 0x24; | ||
1805 | |||
1806 | IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm); | 1599 | IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm); |
1807 | 1600 | ||
1808 | fdircmd |= IXGBE_FDIRCMD_CMD_ADD_FLOW; | 1601 | /* store the TCP/UDP port masks, bit reversed from port layout */ |
1809 | fdircmd |= IXGBE_FDIRCMD_FILTER_UPDATE; | 1602 | fdirtcpm = ixgbe_get_fdirtcpm_82599(input_masks); |
1810 | fdircmd |= IXGBE_FDIRCMD_LAST; | 1603 | |
1811 | fdircmd |= IXGBE_FDIRCMD_QUEUE_EN; | 1604 | /* write both the same so that UDP and TCP use the same mask */ |
1812 | fdircmd |= queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT; | 1605 | IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, ~fdirtcpm); |
1606 | IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, ~fdirtcpm); | ||
1607 | |||
1608 | /* store source and destination IP masks (big-enian) */ | ||
1609 | IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIP4M, | ||
1610 | ~input_masks->src_ip_mask[0]); | ||
1611 | IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRDIP4M, | ||
1612 | ~input_masks->dst_ip_mask[0]); | ||
1613 | |||
1614 | /* Apply masks to input data */ | ||
1615 | input->formatted.vlan_id &= input_masks->vlan_id_mask; | ||
1616 | input->formatted.flex_bytes &= input_masks->flex_mask; | ||
1617 | input->formatted.src_port &= input_masks->src_port_mask; | ||
1618 | input->formatted.dst_port &= input_masks->dst_port_mask; | ||
1619 | input->formatted.src_ip[0] &= input_masks->src_ip_mask[0]; | ||
1620 | input->formatted.dst_ip[0] &= input_masks->dst_ip_mask[0]; | ||
1621 | |||
1622 | /* record vlan (little-endian) and flex_bytes(big-endian) */ | ||
1623 | fdirvlan = | ||
1624 | IXGBE_STORE_AS_BE16(ntohs(input->formatted.flex_bytes)); | ||
1625 | fdirvlan <<= IXGBE_FDIRVLAN_FLEX_SHIFT; | ||
1626 | fdirvlan |= ntohs(input->formatted.vlan_id); | ||
1627 | IXGBE_WRITE_REG(hw, IXGBE_FDIRVLAN, fdirvlan); | ||
1628 | |||
1629 | /* record source and destination port (little-endian)*/ | ||
1630 | fdirport = ntohs(input->formatted.dst_port); | ||
1631 | fdirport <<= IXGBE_FDIRPORT_DESTINATION_SHIFT; | ||
1632 | fdirport |= ntohs(input->formatted.src_port); | ||
1633 | IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, fdirport); | ||
1634 | |||
1635 | /* record the first 32 bits of the destination address (big-endian) */ | ||
1636 | IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPDA, input->formatted.dst_ip[0]); | ||
1637 | |||
1638 | /* record the source address (big-endian) */ | ||
1639 | IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPSA, input->formatted.src_ip[0]); | ||
1640 | |||
1641 | /* configure FDIRCMD register */ | ||
1642 | fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE | | ||
1643 | IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN; | ||
1644 | fdircmd |= input->formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT; | ||
1645 | fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT; | ||
1646 | |||
1647 | /* we only want the bucket hash so drop the upper 16 bits */ | ||
1648 | fdirhash = ixgbe_atr_compute_hash_82599(input, | ||
1649 | IXGBE_ATR_BUCKET_HASH_KEY); | ||
1650 | fdirhash |= soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT; | ||
1813 | 1651 | ||
1814 | IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash); | 1652 | IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash); |
1815 | IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, fdircmd); | 1653 | IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, fdircmd); |
1816 | 1654 | ||
1817 | return 0; | 1655 | return 0; |
1818 | } | 1656 | } |
1657 | |||
1819 | /** | 1658 | /** |
1820 | * ixgbe_read_analog_reg8_82599 - Reads 8 bit Omer analog register | 1659 | * ixgbe_read_analog_reg8_82599 - Reads 8 bit Omer analog register |
1821 | * @hw: pointer to hardware structure | 1660 | * @hw: pointer to hardware structure |
diff --git a/drivers/net/ixgbe/ixgbe_common.c b/drivers/net/ixgbe/ixgbe_common.c index d5ede2df3e42..ebbda7d15254 100644 --- a/drivers/net/ixgbe/ixgbe_common.c +++ b/drivers/net/ixgbe/ixgbe_common.c | |||
@@ -1370,6 +1370,9 @@ s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw) | |||
1370 | hw_dbg(hw, " New MAC Addr =%pM\n", hw->mac.addr); | 1370 | hw_dbg(hw, " New MAC Addr =%pM\n", hw->mac.addr); |
1371 | 1371 | ||
1372 | hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV); | 1372 | hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV); |
1373 | |||
1374 | /* clear VMDq pool/queue selection for RAR 0 */ | ||
1375 | hw->mac.ops.clear_vmdq(hw, 0, IXGBE_CLEAR_VMDQ_ALL); | ||
1373 | } | 1376 | } |
1374 | hw->addr_ctrl.overflow_promisc = 0; | 1377 | hw->addr_ctrl.overflow_promisc = 0; |
1375 | 1378 | ||
diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c index 23ff23e8b393..2002ea88ca2a 100644 --- a/drivers/net/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ixgbe/ixgbe_ethtool.c | |||
@@ -1477,9 +1477,7 @@ static void ixgbe_free_desc_rings(struct ixgbe_adapter *adapter) | |||
1477 | reg_ctl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); | 1477 | reg_ctl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); |
1478 | reg_ctl &= ~IXGBE_RXCTRL_RXEN; | 1478 | reg_ctl &= ~IXGBE_RXCTRL_RXEN; |
1479 | IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, reg_ctl); | 1479 | IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, reg_ctl); |
1480 | reg_ctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rx_ring->reg_idx)); | 1480 | ixgbe_disable_rx_queue(adapter, rx_ring); |
1481 | reg_ctl &= ~IXGBE_RXDCTL_ENABLE; | ||
1482 | IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rx_ring->reg_idx), reg_ctl); | ||
1483 | 1481 | ||
1484 | /* now Tx */ | 1482 | /* now Tx */ |
1485 | reg_ctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(tx_ring->reg_idx)); | 1483 | reg_ctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(tx_ring->reg_idx)); |
@@ -2279,10 +2277,11 @@ static int ixgbe_set_rx_ntuple(struct net_device *dev, | |||
2279 | struct ethtool_rx_ntuple *cmd) | 2277 | struct ethtool_rx_ntuple *cmd) |
2280 | { | 2278 | { |
2281 | struct ixgbe_adapter *adapter = netdev_priv(dev); | 2279 | struct ixgbe_adapter *adapter = netdev_priv(dev); |
2282 | struct ethtool_rx_ntuple_flow_spec fs = cmd->fs; | 2280 | struct ethtool_rx_ntuple_flow_spec *fs = &cmd->fs; |
2283 | struct ixgbe_atr_input input_struct; | 2281 | union ixgbe_atr_input input_struct; |
2284 | struct ixgbe_atr_input_masks input_masks; | 2282 | struct ixgbe_atr_input_masks input_masks; |
2285 | int target_queue; | 2283 | int target_queue; |
2284 | int err; | ||
2286 | 2285 | ||
2287 | if (adapter->hw.mac.type == ixgbe_mac_82598EB) | 2286 | if (adapter->hw.mac.type == ixgbe_mac_82598EB) |
2288 | return -EOPNOTSUPP; | 2287 | return -EOPNOTSUPP; |
@@ -2291,67 +2290,122 @@ static int ixgbe_set_rx_ntuple(struct net_device *dev, | |||
2291 | * Don't allow programming if the action is a queue greater than | 2290 | * Don't allow programming if the action is a queue greater than |
2292 | * the number of online Tx queues. | 2291 | * the number of online Tx queues. |
2293 | */ | 2292 | */ |
2294 | if ((fs.action >= adapter->num_tx_queues) || | 2293 | if ((fs->action >= adapter->num_tx_queues) || |
2295 | (fs.action < ETHTOOL_RXNTUPLE_ACTION_DROP)) | 2294 | (fs->action < ETHTOOL_RXNTUPLE_ACTION_DROP)) |
2296 | return -EINVAL; | 2295 | return -EINVAL; |
2297 | 2296 | ||
2298 | memset(&input_struct, 0, sizeof(struct ixgbe_atr_input)); | 2297 | memset(&input_struct, 0, sizeof(union ixgbe_atr_input)); |
2299 | memset(&input_masks, 0, sizeof(struct ixgbe_atr_input_masks)); | 2298 | memset(&input_masks, 0, sizeof(struct ixgbe_atr_input_masks)); |
2300 | 2299 | ||
2301 | input_masks.src_ip_mask = fs.m_u.tcp_ip4_spec.ip4src; | 2300 | /* record flow type */ |
2302 | input_masks.dst_ip_mask = fs.m_u.tcp_ip4_spec.ip4dst; | 2301 | switch (fs->flow_type) { |
2303 | input_masks.src_port_mask = fs.m_u.tcp_ip4_spec.psrc; | 2302 | case IPV4_FLOW: |
2304 | input_masks.dst_port_mask = fs.m_u.tcp_ip4_spec.pdst; | 2303 | input_struct.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_IPV4; |
2305 | input_masks.vlan_id_mask = fs.vlan_tag_mask; | 2304 | break; |
2306 | /* only use the lowest 2 bytes for flex bytes */ | ||
2307 | input_masks.data_mask = (fs.data_mask & 0xffff); | ||
2308 | |||
2309 | switch (fs.flow_type) { | ||
2310 | case TCP_V4_FLOW: | 2305 | case TCP_V4_FLOW: |
2311 | ixgbe_atr_set_l4type_82599(&input_struct, IXGBE_ATR_L4TYPE_TCP); | 2306 | input_struct.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4; |
2312 | break; | 2307 | break; |
2313 | case UDP_V4_FLOW: | 2308 | case UDP_V4_FLOW: |
2314 | ixgbe_atr_set_l4type_82599(&input_struct, IXGBE_ATR_L4TYPE_UDP); | 2309 | input_struct.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_UDPV4; |
2315 | break; | 2310 | break; |
2316 | case SCTP_V4_FLOW: | 2311 | case SCTP_V4_FLOW: |
2317 | ixgbe_atr_set_l4type_82599(&input_struct, IXGBE_ATR_L4TYPE_SCTP); | 2312 | input_struct.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_SCTPV4; |
2318 | break; | 2313 | break; |
2319 | default: | 2314 | default: |
2320 | return -1; | 2315 | return -1; |
2321 | } | 2316 | } |
2322 | 2317 | ||
2323 | /* Mask bits from the inputs based on user-supplied mask */ | 2318 | /* copy vlan tag minus the CFI bit */ |
2324 | ixgbe_atr_set_src_ipv4_82599(&input_struct, | 2319 | if ((fs->vlan_tag & 0xEFFF) || (~fs->vlan_tag_mask & 0xEFFF)) { |
2325 | (fs.h_u.tcp_ip4_spec.ip4src & ~fs.m_u.tcp_ip4_spec.ip4src)); | 2320 | input_struct.formatted.vlan_id = htons(fs->vlan_tag & 0xEFFF); |
2326 | ixgbe_atr_set_dst_ipv4_82599(&input_struct, | 2321 | if (!fs->vlan_tag_mask) { |
2327 | (fs.h_u.tcp_ip4_spec.ip4dst & ~fs.m_u.tcp_ip4_spec.ip4dst)); | 2322 | input_masks.vlan_id_mask = htons(0xEFFF); |
2328 | /* 82599 expects these to be byte-swapped for perfect filtering */ | 2323 | } else { |
2329 | ixgbe_atr_set_src_port_82599(&input_struct, | 2324 | switch (~fs->vlan_tag_mask & 0xEFFF) { |
2330 | ((ntohs(fs.h_u.tcp_ip4_spec.psrc)) & ~fs.m_u.tcp_ip4_spec.psrc)); | 2325 | /* all of these are valid vlan-mask values */ |
2331 | ixgbe_atr_set_dst_port_82599(&input_struct, | 2326 | case 0xEFFF: |
2332 | ((ntohs(fs.h_u.tcp_ip4_spec.pdst)) & ~fs.m_u.tcp_ip4_spec.pdst)); | 2327 | case 0xE000: |
2333 | 2328 | case 0x0FFF: | |
2334 | /* VLAN and Flex bytes are either completely masked or not */ | 2329 | case 0x0000: |
2335 | if (!fs.vlan_tag_mask) | 2330 | input_masks.vlan_id_mask = |
2336 | ixgbe_atr_set_vlan_id_82599(&input_struct, fs.vlan_tag); | 2331 | htons(~fs->vlan_tag_mask); |
2337 | 2332 | break; | |
2338 | if (!input_masks.data_mask) | 2333 | /* exit with error if vlan-mask is invalid */ |
2339 | /* make sure we only use the first 2 bytes of user data */ | 2334 | default: |
2340 | ixgbe_atr_set_flex_byte_82599(&input_struct, | 2335 | e_err(drv, "Partial VLAN ID or " |
2341 | (fs.data & 0xffff)); | 2336 | "priority mask in vlan-mask is not " |
2337 | "supported by hardware\n"); | ||
2338 | return -1; | ||
2339 | } | ||
2340 | } | ||
2341 | } | ||
2342 | |||
2343 | /* make sure we only use the first 2 bytes of user data */ | ||
2344 | if ((fs->data & 0xFFFF) || (~fs->data_mask & 0xFFFF)) { | ||
2345 | input_struct.formatted.flex_bytes = htons(fs->data & 0xFFFF); | ||
2346 | if (!(fs->data_mask & 0xFFFF)) { | ||
2347 | input_masks.flex_mask = 0xFFFF; | ||
2348 | } else if (~fs->data_mask & 0xFFFF) { | ||
2349 | e_err(drv, "Partial user-def-mask is not " | ||
2350 | "supported by hardware\n"); | ||
2351 | return -1; | ||
2352 | } | ||
2353 | } | ||
2354 | |||
2355 | /* | ||
2356 | * Copy input into formatted structures | ||
2357 | * | ||
2358 | * These assignments are based on the following logic | ||
2359 | * If neither input or mask are set assume value is masked out. | ||
2360 | * If input is set, but mask is not mask should default to accept all. | ||
2361 | * If input is not set, but mask is set then mask likely results in 0. | ||
2362 | * If input is set and mask is set then assign both. | ||
2363 | */ | ||
2364 | if (fs->h_u.tcp_ip4_spec.ip4src || ~fs->m_u.tcp_ip4_spec.ip4src) { | ||
2365 | input_struct.formatted.src_ip[0] = fs->h_u.tcp_ip4_spec.ip4src; | ||
2366 | if (!fs->m_u.tcp_ip4_spec.ip4src) | ||
2367 | input_masks.src_ip_mask[0] = 0xFFFFFFFF; | ||
2368 | else | ||
2369 | input_masks.src_ip_mask[0] = | ||
2370 | ~fs->m_u.tcp_ip4_spec.ip4src; | ||
2371 | } | ||
2372 | if (fs->h_u.tcp_ip4_spec.ip4dst || ~fs->m_u.tcp_ip4_spec.ip4dst) { | ||
2373 | input_struct.formatted.dst_ip[0] = fs->h_u.tcp_ip4_spec.ip4dst; | ||
2374 | if (!fs->m_u.tcp_ip4_spec.ip4dst) | ||
2375 | input_masks.dst_ip_mask[0] = 0xFFFFFFFF; | ||
2376 | else | ||
2377 | input_masks.dst_ip_mask[0] = | ||
2378 | ~fs->m_u.tcp_ip4_spec.ip4dst; | ||
2379 | } | ||
2380 | if (fs->h_u.tcp_ip4_spec.psrc || ~fs->m_u.tcp_ip4_spec.psrc) { | ||
2381 | input_struct.formatted.src_port = fs->h_u.tcp_ip4_spec.psrc; | ||
2382 | if (!fs->m_u.tcp_ip4_spec.psrc) | ||
2383 | input_masks.src_port_mask = 0xFFFF; | ||
2384 | else | ||
2385 | input_masks.src_port_mask = ~fs->m_u.tcp_ip4_spec.psrc; | ||
2386 | } | ||
2387 | if (fs->h_u.tcp_ip4_spec.pdst || ~fs->m_u.tcp_ip4_spec.pdst) { | ||
2388 | input_struct.formatted.dst_port = fs->h_u.tcp_ip4_spec.pdst; | ||
2389 | if (!fs->m_u.tcp_ip4_spec.pdst) | ||
2390 | input_masks.dst_port_mask = 0xFFFF; | ||
2391 | else | ||
2392 | input_masks.dst_port_mask = ~fs->m_u.tcp_ip4_spec.pdst; | ||
2393 | } | ||
2342 | 2394 | ||
2343 | /* determine if we need to drop or route the packet */ | 2395 | /* determine if we need to drop or route the packet */ |
2344 | if (fs.action == ETHTOOL_RXNTUPLE_ACTION_DROP) | 2396 | if (fs->action == ETHTOOL_RXNTUPLE_ACTION_DROP) |
2345 | target_queue = MAX_RX_QUEUES - 1; | 2397 | target_queue = MAX_RX_QUEUES - 1; |
2346 | else | 2398 | else |
2347 | target_queue = fs.action; | 2399 | target_queue = fs->action; |
2348 | 2400 | ||
2349 | spin_lock(&adapter->fdir_perfect_lock); | 2401 | spin_lock(&adapter->fdir_perfect_lock); |
2350 | ixgbe_fdir_add_perfect_filter_82599(&adapter->hw, &input_struct, | 2402 | err = ixgbe_fdir_add_perfect_filter_82599(&adapter->hw, |
2351 | &input_masks, 0, target_queue); | 2403 | &input_struct, |
2404 | &input_masks, 0, | ||
2405 | target_queue); | ||
2352 | spin_unlock(&adapter->fdir_perfect_lock); | 2406 | spin_unlock(&adapter->fdir_perfect_lock); |
2353 | 2407 | ||
2354 | return 0; | 2408 | return err ? -1 : 0; |
2355 | } | 2409 | } |
2356 | 2410 | ||
2357 | static const struct ethtool_ops ixgbe_ethtool_ops = { | 2411 | static const struct ethtool_ops ixgbe_ethtool_ops = { |
diff --git a/drivers/net/ixgbe/ixgbe_fcoe.c b/drivers/net/ixgbe/ixgbe_fcoe.c index 6342d4859790..c54a88274d51 100644 --- a/drivers/net/ixgbe/ixgbe_fcoe.c +++ b/drivers/net/ixgbe/ixgbe_fcoe.c | |||
@@ -159,13 +159,13 @@ int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid, | |||
159 | struct scatterlist *sg; | 159 | struct scatterlist *sg; |
160 | unsigned int i, j, dmacount; | 160 | unsigned int i, j, dmacount; |
161 | unsigned int len; | 161 | unsigned int len; |
162 | static const unsigned int bufflen = 4096; | 162 | static const unsigned int bufflen = IXGBE_FCBUFF_MIN; |
163 | unsigned int firstoff = 0; | 163 | unsigned int firstoff = 0; |
164 | unsigned int lastsize; | 164 | unsigned int lastsize; |
165 | unsigned int thisoff = 0; | 165 | unsigned int thisoff = 0; |
166 | unsigned int thislen = 0; | 166 | unsigned int thislen = 0; |
167 | u32 fcbuff, fcdmarw, fcfltrw; | 167 | u32 fcbuff, fcdmarw, fcfltrw; |
168 | dma_addr_t addr; | 168 | dma_addr_t addr = 0; |
169 | 169 | ||
170 | if (!netdev || !sgl) | 170 | if (!netdev || !sgl) |
171 | return 0; | 171 | return 0; |
@@ -254,6 +254,24 @@ int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid, | |||
254 | /* only the last buffer may have non-full bufflen */ | 254 | /* only the last buffer may have non-full bufflen */ |
255 | lastsize = thisoff + thislen; | 255 | lastsize = thisoff + thislen; |
256 | 256 | ||
257 | /* | ||
258 | * lastsize can not be buffer len. | ||
259 | * If it is then adding another buffer with lastsize = 1. | ||
260 | */ | ||
261 | if (lastsize == bufflen) { | ||
262 | if (j >= IXGBE_BUFFCNT_MAX) { | ||
263 | e_err(drv, "xid=%x:%d,%d,%d:addr=%llx " | ||
264 | "not enough user buffers. We need an extra " | ||
265 | "buffer because lastsize is bufflen.\n", | ||
266 | xid, i, j, dmacount, (u64)addr); | ||
267 | goto out_noddp_free; | ||
268 | } | ||
269 | |||
270 | ddp->udl[j] = (u64)(fcoe->extra_ddp_buffer_dma); | ||
271 | j++; | ||
272 | lastsize = 1; | ||
273 | } | ||
274 | |||
257 | fcbuff = (IXGBE_FCBUFF_4KB << IXGBE_FCBUFF_BUFFSIZE_SHIFT); | 275 | fcbuff = (IXGBE_FCBUFF_4KB << IXGBE_FCBUFF_BUFFSIZE_SHIFT); |
258 | fcbuff |= ((j & 0xff) << IXGBE_FCBUFF_BUFFCNT_SHIFT); | 276 | fcbuff |= ((j & 0xff) << IXGBE_FCBUFF_BUFFCNT_SHIFT); |
259 | fcbuff |= (firstoff << IXGBE_FCBUFF_OFFSET_SHIFT); | 277 | fcbuff |= (firstoff << IXGBE_FCBUFF_OFFSET_SHIFT); |
@@ -532,6 +550,24 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter) | |||
532 | e_err(drv, "failed to allocated FCoE DDP pool\n"); | 550 | e_err(drv, "failed to allocated FCoE DDP pool\n"); |
533 | 551 | ||
534 | spin_lock_init(&fcoe->lock); | 552 | spin_lock_init(&fcoe->lock); |
553 | |||
554 | /* Extra buffer to be shared by all DDPs for HW work around */ | ||
555 | fcoe->extra_ddp_buffer = kmalloc(IXGBE_FCBUFF_MIN, GFP_ATOMIC); | ||
556 | if (fcoe->extra_ddp_buffer == NULL) { | ||
557 | e_err(drv, "failed to allocated extra DDP buffer\n"); | ||
558 | goto out_extra_ddp_buffer_alloc; | ||
559 | } | ||
560 | |||
561 | fcoe->extra_ddp_buffer_dma = | ||
562 | dma_map_single(&adapter->pdev->dev, | ||
563 | fcoe->extra_ddp_buffer, | ||
564 | IXGBE_FCBUFF_MIN, | ||
565 | DMA_FROM_DEVICE); | ||
566 | if (dma_mapping_error(&adapter->pdev->dev, | ||
567 | fcoe->extra_ddp_buffer_dma)) { | ||
568 | e_err(drv, "failed to map extra DDP buffer\n"); | ||
569 | goto out_extra_ddp_buffer_dma; | ||
570 | } | ||
535 | } | 571 | } |
536 | 572 | ||
537 | /* Enable L2 eth type filter for FCoE */ | 573 | /* Enable L2 eth type filter for FCoE */ |
@@ -581,6 +617,14 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter) | |||
581 | } | 617 | } |
582 | } | 618 | } |
583 | #endif | 619 | #endif |
620 | |||
621 | return; | ||
622 | |||
623 | out_extra_ddp_buffer_dma: | ||
624 | kfree(fcoe->extra_ddp_buffer); | ||
625 | out_extra_ddp_buffer_alloc: | ||
626 | pci_pool_destroy(fcoe->pool); | ||
627 | fcoe->pool = NULL; | ||
584 | } | 628 | } |
585 | 629 | ||
586 | /** | 630 | /** |
@@ -600,6 +644,11 @@ void ixgbe_cleanup_fcoe(struct ixgbe_adapter *adapter) | |||
600 | if (fcoe->pool) { | 644 | if (fcoe->pool) { |
601 | for (i = 0; i < IXGBE_FCOE_DDP_MAX; i++) | 645 | for (i = 0; i < IXGBE_FCOE_DDP_MAX; i++) |
602 | ixgbe_fcoe_ddp_put(adapter->netdev, i); | 646 | ixgbe_fcoe_ddp_put(adapter->netdev, i); |
647 | dma_unmap_single(&adapter->pdev->dev, | ||
648 | fcoe->extra_ddp_buffer_dma, | ||
649 | IXGBE_FCBUFF_MIN, | ||
650 | DMA_FROM_DEVICE); | ||
651 | kfree(fcoe->extra_ddp_buffer); | ||
603 | pci_pool_destroy(fcoe->pool); | 652 | pci_pool_destroy(fcoe->pool); |
604 | fcoe->pool = NULL; | 653 | fcoe->pool = NULL; |
605 | } | 654 | } |
diff --git a/drivers/net/ixgbe/ixgbe_fcoe.h b/drivers/net/ixgbe/ixgbe_fcoe.h index 4bc2c551c8db..65cc8fb14fe7 100644 --- a/drivers/net/ixgbe/ixgbe_fcoe.h +++ b/drivers/net/ixgbe/ixgbe_fcoe.h | |||
@@ -70,6 +70,8 @@ struct ixgbe_fcoe { | |||
70 | spinlock_t lock; | 70 | spinlock_t lock; |
71 | struct pci_pool *pool; | 71 | struct pci_pool *pool; |
72 | struct ixgbe_fcoe_ddp ddp[IXGBE_FCOE_DDP_MAX]; | 72 | struct ixgbe_fcoe_ddp ddp[IXGBE_FCOE_DDP_MAX]; |
73 | unsigned char *extra_ddp_buffer; | ||
74 | dma_addr_t extra_ddp_buffer_dma; | ||
73 | }; | 75 | }; |
74 | 76 | ||
75 | #endif /* _IXGBE_FCOE_H */ | 77 | #endif /* _IXGBE_FCOE_H */ |
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index 38ab4f3f8197..30f9ccfb4f87 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c | |||
@@ -52,7 +52,7 @@ char ixgbe_driver_name[] = "ixgbe"; | |||
52 | static const char ixgbe_driver_string[] = | 52 | static const char ixgbe_driver_string[] = |
53 | "Intel(R) 10 Gigabit PCI Express Network Driver"; | 53 | "Intel(R) 10 Gigabit PCI Express Network Driver"; |
54 | 54 | ||
55 | #define DRV_VERSION "3.0.12-k2" | 55 | #define DRV_VERSION "3.2.9-k2" |
56 | const char ixgbe_driver_version[] = DRV_VERSION; | 56 | const char ixgbe_driver_version[] = DRV_VERSION; |
57 | static char ixgbe_copyright[] = "Copyright (c) 1999-2010 Intel Corporation."; | 57 | static char ixgbe_copyright[] = "Copyright (c) 1999-2010 Intel Corporation."; |
58 | 58 | ||
@@ -3024,6 +3024,36 @@ static void ixgbe_rx_desc_queue_enable(struct ixgbe_adapter *adapter, | |||
3024 | } | 3024 | } |
3025 | } | 3025 | } |
3026 | 3026 | ||
3027 | void ixgbe_disable_rx_queue(struct ixgbe_adapter *adapter, | ||
3028 | struct ixgbe_ring *ring) | ||
3029 | { | ||
3030 | struct ixgbe_hw *hw = &adapter->hw; | ||
3031 | int wait_loop = IXGBE_MAX_RX_DESC_POLL; | ||
3032 | u32 rxdctl; | ||
3033 | u8 reg_idx = ring->reg_idx; | ||
3034 | |||
3035 | rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx)); | ||
3036 | rxdctl &= ~IXGBE_RXDCTL_ENABLE; | ||
3037 | |||
3038 | /* write value back with RXDCTL.ENABLE bit cleared */ | ||
3039 | IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl); | ||
3040 | |||
3041 | if (hw->mac.type == ixgbe_mac_82598EB && | ||
3042 | !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP)) | ||
3043 | return; | ||
3044 | |||
3045 | /* the hardware may take up to 100us to really disable the rx queue */ | ||
3046 | do { | ||
3047 | udelay(10); | ||
3048 | rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx)); | ||
3049 | } while (--wait_loop && (rxdctl & IXGBE_RXDCTL_ENABLE)); | ||
3050 | |||
3051 | if (!wait_loop) { | ||
3052 | e_err(drv, "RXDCTL.ENABLE on Rx queue %d not cleared within " | ||
3053 | "the polling period\n", reg_idx); | ||
3054 | } | ||
3055 | } | ||
3056 | |||
3027 | void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter, | 3057 | void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter, |
3028 | struct ixgbe_ring *ring) | 3058 | struct ixgbe_ring *ring) |
3029 | { | 3059 | { |
@@ -3034,9 +3064,7 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter, | |||
3034 | 3064 | ||
3035 | /* disable queue to avoid issues while updating state */ | 3065 | /* disable queue to avoid issues while updating state */ |
3036 | rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx)); | 3066 | rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx)); |
3037 | IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), | 3067 | ixgbe_disable_rx_queue(adapter, ring); |
3038 | rxdctl & ~IXGBE_RXDCTL_ENABLE); | ||
3039 | IXGBE_WRITE_FLUSH(hw); | ||
3040 | 3068 | ||
3041 | IXGBE_WRITE_REG(hw, IXGBE_RDBAL(reg_idx), (rdba & DMA_BIT_MASK(32))); | 3069 | IXGBE_WRITE_REG(hw, IXGBE_RDBAL(reg_idx), (rdba & DMA_BIT_MASK(32))); |
3042 | IXGBE_WRITE_REG(hw, IXGBE_RDBAH(reg_idx), (rdba >> 32)); | 3070 | IXGBE_WRITE_REG(hw, IXGBE_RDBAH(reg_idx), (rdba >> 32)); |
@@ -3148,9 +3176,16 @@ static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter) | |||
3148 | u32 mhadd, hlreg0; | 3176 | u32 mhadd, hlreg0; |
3149 | 3177 | ||
3150 | /* Decide whether to use packet split mode or not */ | 3178 | /* Decide whether to use packet split mode or not */ |
3179 | /* On by default */ | ||
3180 | adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED; | ||
3181 | |||
3151 | /* Do not use packet split if we're in SR-IOV Mode */ | 3182 | /* Do not use packet split if we're in SR-IOV Mode */ |
3152 | if (!adapter->num_vfs) | 3183 | if (adapter->num_vfs) |
3153 | adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED; | 3184 | adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED; |
3185 | |||
3186 | /* Disable packet split due to 82599 erratum #45 */ | ||
3187 | if (hw->mac.type == ixgbe_mac_82599EB) | ||
3188 | adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED; | ||
3154 | 3189 | ||
3155 | /* Set the RX buffer length according to the mode */ | 3190 | /* Set the RX buffer length according to the mode */ |
3156 | if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) { | 3191 | if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) { |
@@ -3693,7 +3728,8 @@ static void ixgbe_sfp_link_config(struct ixgbe_adapter *adapter) | |||
3693 | * We need to try and force an autonegotiation | 3728 | * We need to try and force an autonegotiation |
3694 | * session, then bring up link. | 3729 | * session, then bring up link. |
3695 | */ | 3730 | */ |
3696 | hw->mac.ops.setup_sfp(hw); | 3731 | if (hw->mac.ops.setup_sfp) |
3732 | hw->mac.ops.setup_sfp(hw); | ||
3697 | if (!(adapter->flags & IXGBE_FLAG_IN_SFP_LINK_TASK)) | 3733 | if (!(adapter->flags & IXGBE_FLAG_IN_SFP_LINK_TASK)) |
3698 | schedule_work(&adapter->multispeed_fiber_task); | 3734 | schedule_work(&adapter->multispeed_fiber_task); |
3699 | } else { | 3735 | } else { |
@@ -4064,7 +4100,11 @@ void ixgbe_down(struct ixgbe_adapter *adapter) | |||
4064 | rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); | 4100 | rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); |
4065 | IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN); | 4101 | IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN); |
4066 | 4102 | ||
4067 | IXGBE_WRITE_FLUSH(hw); | 4103 | /* disable all enabled rx queues */ |
4104 | for (i = 0; i < adapter->num_rx_queues; i++) | ||
4105 | /* this call also flushes the previous write */ | ||
4106 | ixgbe_disable_rx_queue(adapter, adapter->rx_ring[i]); | ||
4107 | |||
4068 | msleep(10); | 4108 | msleep(10); |
4069 | 4109 | ||
4070 | netif_tx_stop_all_queues(netdev); | 4110 | netif_tx_stop_all_queues(netdev); |
@@ -4789,6 +4829,12 @@ static int ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter) | |||
4789 | 4829 | ||
4790 | adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED; | 4830 | adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED; |
4791 | adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED; | 4831 | adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED; |
4832 | if (adapter->flags & (IXGBE_FLAG_FDIR_HASH_CAPABLE | | ||
4833 | IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) { | ||
4834 | e_err(probe, | ||
4835 | "Flow Director is not supported while multiple " | ||
4836 | "queues are disabled. Disabling Flow Director\n"); | ||
4837 | } | ||
4792 | adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; | 4838 | adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; |
4793 | adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE; | 4839 | adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE; |
4794 | adapter->atr_sample_rate = 0; | 4840 | adapter->atr_sample_rate = 0; |
@@ -4825,16 +4871,13 @@ static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter) | |||
4825 | { | 4871 | { |
4826 | int q_idx, num_q_vectors; | 4872 | int q_idx, num_q_vectors; |
4827 | struct ixgbe_q_vector *q_vector; | 4873 | struct ixgbe_q_vector *q_vector; |
4828 | int napi_vectors; | ||
4829 | int (*poll)(struct napi_struct *, int); | 4874 | int (*poll)(struct napi_struct *, int); |
4830 | 4875 | ||
4831 | if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { | 4876 | if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { |
4832 | num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; | 4877 | num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; |
4833 | napi_vectors = adapter->num_rx_queues; | ||
4834 | poll = &ixgbe_clean_rxtx_many; | 4878 | poll = &ixgbe_clean_rxtx_many; |
4835 | } else { | 4879 | } else { |
4836 | num_q_vectors = 1; | 4880 | num_q_vectors = 1; |
4837 | napi_vectors = 1; | ||
4838 | poll = &ixgbe_poll; | 4881 | poll = &ixgbe_poll; |
4839 | } | 4882 | } |
4840 | 4883 | ||
@@ -5094,16 +5137,11 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter) | |||
5094 | adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED; | 5137 | adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED; |
5095 | if (hw->device_id == IXGBE_DEV_ID_82599_T3_LOM) | 5138 | if (hw->device_id == IXGBE_DEV_ID_82599_T3_LOM) |
5096 | adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE; | 5139 | adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE; |
5097 | if (dev->features & NETIF_F_NTUPLE) { | 5140 | /* n-tuple support exists, always init our spinlock */ |
5098 | /* Flow Director perfect filter enabled */ | 5141 | spin_lock_init(&adapter->fdir_perfect_lock); |
5099 | adapter->flags |= IXGBE_FLAG_FDIR_PERFECT_CAPABLE; | 5142 | /* Flow Director hash filters enabled */ |
5100 | adapter->atr_sample_rate = 0; | 5143 | adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE; |
5101 | spin_lock_init(&adapter->fdir_perfect_lock); | 5144 | adapter->atr_sample_rate = 20; |
5102 | } else { | ||
5103 | /* Flow Director hash filters enabled */ | ||
5104 | adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE; | ||
5105 | adapter->atr_sample_rate = 20; | ||
5106 | } | ||
5107 | adapter->ring_feature[RING_F_FDIR].indices = | 5145 | adapter->ring_feature[RING_F_FDIR].indices = |
5108 | IXGBE_MAX_FDIR_INDICES; | 5146 | IXGBE_MAX_FDIR_INDICES; |
5109 | adapter->fdir_pballoc = 0; | 5147 | adapter->fdir_pballoc = 0; |
@@ -5931,7 +5969,8 @@ static void ixgbe_sfp_config_module_task(struct work_struct *work) | |||
5931 | unregister_netdev(adapter->netdev); | 5969 | unregister_netdev(adapter->netdev); |
5932 | return; | 5970 | return; |
5933 | } | 5971 | } |
5934 | hw->mac.ops.setup_sfp(hw); | 5972 | if (hw->mac.ops.setup_sfp) |
5973 | hw->mac.ops.setup_sfp(hw); | ||
5935 | 5974 | ||
5936 | if (!(adapter->flags & IXGBE_FLAG_IN_SFP_LINK_TASK)) | 5975 | if (!(adapter->flags & IXGBE_FLAG_IN_SFP_LINK_TASK)) |
5937 | /* This will also work for DA Twinax connections */ | 5976 | /* This will also work for DA Twinax connections */ |
@@ -6474,38 +6513,92 @@ static void ixgbe_tx_queue(struct ixgbe_ring *tx_ring, | |||
6474 | writel(i, tx_ring->tail); | 6513 | writel(i, tx_ring->tail); |
6475 | } | 6514 | } |
6476 | 6515 | ||
6477 | static void ixgbe_atr(struct ixgbe_adapter *adapter, struct sk_buff *skb, | 6516 | static void ixgbe_atr(struct ixgbe_ring *ring, struct sk_buff *skb, |
6478 | u8 queue, u32 tx_flags, __be16 protocol) | 6517 | u32 tx_flags, __be16 protocol) |
6479 | { | 6518 | { |
6480 | struct ixgbe_atr_input atr_input; | 6519 | struct ixgbe_q_vector *q_vector = ring->q_vector; |
6481 | struct iphdr *iph = ip_hdr(skb); | 6520 | union ixgbe_atr_hash_dword input = { .dword = 0 }; |
6482 | struct ethhdr *eth = (struct ethhdr *)skb->data; | 6521 | union ixgbe_atr_hash_dword common = { .dword = 0 }; |
6522 | union { | ||
6523 | unsigned char *network; | ||
6524 | struct iphdr *ipv4; | ||
6525 | struct ipv6hdr *ipv6; | ||
6526 | } hdr; | ||
6483 | struct tcphdr *th; | 6527 | struct tcphdr *th; |
6484 | u16 vlan_id; | 6528 | __be16 vlan_id; |
6529 | |||
6530 | /* if ring doesn't have a interrupt vector, cannot perform ATR */ | ||
6531 | if (!q_vector) | ||
6532 | return; | ||
6485 | 6533 | ||
6486 | /* Right now, we support IPv4 w/ TCP only */ | 6534 | /* do nothing if sampling is disabled */ |
6487 | if (protocol != htons(ETH_P_IP) || | 6535 | if (!ring->atr_sample_rate) |
6488 | iph->protocol != IPPROTO_TCP) | ||
6489 | return; | 6536 | return; |
6490 | 6537 | ||
6491 | memset(&atr_input, 0, sizeof(struct ixgbe_atr_input)); | 6538 | ring->atr_count++; |
6539 | |||
6540 | /* snag network header to get L4 type and address */ | ||
6541 | hdr.network = skb_network_header(skb); | ||
6492 | 6542 | ||
6493 | vlan_id = (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK) >> | 6543 | /* Currently only IPv4/IPv6 with TCP is supported */ |
6494 | IXGBE_TX_FLAGS_VLAN_SHIFT; | 6544 | if ((protocol != __constant_htons(ETH_P_IPV6) || |
6545 | hdr.ipv6->nexthdr != IPPROTO_TCP) && | ||
6546 | (protocol != __constant_htons(ETH_P_IP) || | ||
6547 | hdr.ipv4->protocol != IPPROTO_TCP)) | ||
6548 | return; | ||
6495 | 6549 | ||
6496 | th = tcp_hdr(skb); | 6550 | th = tcp_hdr(skb); |
6497 | 6551 | ||
6498 | ixgbe_atr_set_vlan_id_82599(&atr_input, vlan_id); | 6552 | /* skip this packet since the socket is closing */ |
6499 | ixgbe_atr_set_src_port_82599(&atr_input, th->dest); | 6553 | if (th->fin) |
6500 | ixgbe_atr_set_dst_port_82599(&atr_input, th->source); | 6554 | return; |
6501 | ixgbe_atr_set_flex_byte_82599(&atr_input, eth->h_proto); | 6555 | |
6502 | ixgbe_atr_set_l4type_82599(&atr_input, IXGBE_ATR_L4TYPE_TCP); | 6556 | /* sample on all syn packets or once every atr sample count */ |
6503 | /* src and dst are inverted, think how the receiver sees them */ | 6557 | if (!th->syn && (ring->atr_count < ring->atr_sample_rate)) |
6504 | ixgbe_atr_set_src_ipv4_82599(&atr_input, iph->daddr); | 6558 | return; |
6505 | ixgbe_atr_set_dst_ipv4_82599(&atr_input, iph->saddr); | 6559 | |
6560 | /* reset sample count */ | ||
6561 | ring->atr_count = 0; | ||
6562 | |||
6563 | vlan_id = htons(tx_flags >> IXGBE_TX_FLAGS_VLAN_SHIFT); | ||
6564 | |||
6565 | /* | ||
6566 | * src and dst are inverted, think how the receiver sees them | ||
6567 | * | ||
6568 | * The input is broken into two sections, a non-compressed section | ||
6569 | * containing vm_pool, vlan_id, and flow_type. The rest of the data | ||
6570 | * is XORed together and stored in the compressed dword. | ||
6571 | */ | ||
6572 | input.formatted.vlan_id = vlan_id; | ||
6573 | |||
6574 | /* | ||
6575 | * since src port and flex bytes occupy the same word XOR them together | ||
6576 | * and write the value to source port portion of compressed dword | ||
6577 | */ | ||
6578 | if (vlan_id) | ||
6579 | common.port.src ^= th->dest ^ __constant_htons(ETH_P_8021Q); | ||
6580 | else | ||
6581 | common.port.src ^= th->dest ^ protocol; | ||
6582 | common.port.dst ^= th->source; | ||
6583 | |||
6584 | if (protocol == __constant_htons(ETH_P_IP)) { | ||
6585 | input.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4; | ||
6586 | common.ip ^= hdr.ipv4->saddr ^ hdr.ipv4->daddr; | ||
6587 | } else { | ||
6588 | input.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV6; | ||
6589 | common.ip ^= hdr.ipv6->saddr.s6_addr32[0] ^ | ||
6590 | hdr.ipv6->saddr.s6_addr32[1] ^ | ||
6591 | hdr.ipv6->saddr.s6_addr32[2] ^ | ||
6592 | hdr.ipv6->saddr.s6_addr32[3] ^ | ||
6593 | hdr.ipv6->daddr.s6_addr32[0] ^ | ||
6594 | hdr.ipv6->daddr.s6_addr32[1] ^ | ||
6595 | hdr.ipv6->daddr.s6_addr32[2] ^ | ||
6596 | hdr.ipv6->daddr.s6_addr32[3]; | ||
6597 | } | ||
6506 | 6598 | ||
6507 | /* This assumes the Rx queue and Tx queue are bound to the same CPU */ | 6599 | /* This assumes the Rx queue and Tx queue are bound to the same CPU */ |
6508 | ixgbe_fdir_add_signature_filter_82599(&adapter->hw, &atr_input, queue); | 6600 | ixgbe_fdir_add_signature_filter_82599(&q_vector->adapter->hw, |
6601 | input, common, ring->queue_index); | ||
6509 | } | 6602 | } |
6510 | 6603 | ||
6511 | static int __ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, int size) | 6604 | static int __ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, int size) |
@@ -6580,8 +6673,6 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, | |||
6580 | struct ixgbe_adapter *adapter, | 6673 | struct ixgbe_adapter *adapter, |
6581 | struct ixgbe_ring *tx_ring) | 6674 | struct ixgbe_ring *tx_ring) |
6582 | { | 6675 | { |
6583 | struct net_device *netdev = tx_ring->netdev; | ||
6584 | struct netdev_queue *txq; | ||
6585 | unsigned int first; | 6676 | unsigned int first; |
6586 | unsigned int tx_flags = 0; | 6677 | unsigned int tx_flags = 0; |
6587 | u8 hdr_len = 0; | 6678 | u8 hdr_len = 0; |
@@ -6676,19 +6767,8 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, | |||
6676 | count = ixgbe_tx_map(adapter, tx_ring, skb, tx_flags, first, hdr_len); | 6767 | count = ixgbe_tx_map(adapter, tx_ring, skb, tx_flags, first, hdr_len); |
6677 | if (count) { | 6768 | if (count) { |
6678 | /* add the ATR filter if ATR is on */ | 6769 | /* add the ATR filter if ATR is on */ |
6679 | if (tx_ring->atr_sample_rate) { | 6770 | if (test_bit(__IXGBE_TX_FDIR_INIT_DONE, &tx_ring->state)) |
6680 | ++tx_ring->atr_count; | 6771 | ixgbe_atr(tx_ring, skb, tx_flags, protocol); |
6681 | if ((tx_ring->atr_count >= tx_ring->atr_sample_rate) && | ||
6682 | test_bit(__IXGBE_TX_FDIR_INIT_DONE, | ||
6683 | &tx_ring->state)) { | ||
6684 | ixgbe_atr(adapter, skb, tx_ring->queue_index, | ||
6685 | tx_flags, protocol); | ||
6686 | tx_ring->atr_count = 0; | ||
6687 | } | ||
6688 | } | ||
6689 | txq = netdev_get_tx_queue(netdev, tx_ring->queue_index); | ||
6690 | txq->tx_bytes += skb->len; | ||
6691 | txq->tx_packets++; | ||
6692 | ixgbe_tx_queue(tx_ring, tx_flags, count, skb->len, hdr_len); | 6772 | ixgbe_tx_queue(tx_ring, tx_flags, count, skb->len, hdr_len); |
6693 | ixgbe_maybe_stop_tx(tx_ring, DESC_NEEDED); | 6773 | ixgbe_maybe_stop_tx(tx_ring, DESC_NEEDED); |
6694 | 6774 | ||
@@ -6846,8 +6926,6 @@ static struct rtnl_link_stats64 *ixgbe_get_stats64(struct net_device *netdev, | |||
6846 | struct ixgbe_adapter *adapter = netdev_priv(netdev); | 6926 | struct ixgbe_adapter *adapter = netdev_priv(netdev); |
6847 | int i; | 6927 | int i; |
6848 | 6928 | ||
6849 | /* accurate rx/tx bytes/packets stats */ | ||
6850 | dev_txq_stats_fold(netdev, stats); | ||
6851 | rcu_read_lock(); | 6929 | rcu_read_lock(); |
6852 | for (i = 0; i < adapter->num_rx_queues; i++) { | 6930 | for (i = 0; i < adapter->num_rx_queues; i++) { |
6853 | struct ixgbe_ring *ring = ACCESS_ONCE(adapter->rx_ring[i]); | 6931 | struct ixgbe_ring *ring = ACCESS_ONCE(adapter->rx_ring[i]); |
@@ -6864,6 +6942,22 @@ static struct rtnl_link_stats64 *ixgbe_get_stats64(struct net_device *netdev, | |||
6864 | stats->rx_bytes += bytes; | 6942 | stats->rx_bytes += bytes; |
6865 | } | 6943 | } |
6866 | } | 6944 | } |
6945 | |||
6946 | for (i = 0; i < adapter->num_tx_queues; i++) { | ||
6947 | struct ixgbe_ring *ring = ACCESS_ONCE(adapter->tx_ring[i]); | ||
6948 | u64 bytes, packets; | ||
6949 | unsigned int start; | ||
6950 | |||
6951 | if (ring) { | ||
6952 | do { | ||
6953 | start = u64_stats_fetch_begin_bh(&ring->syncp); | ||
6954 | packets = ring->stats.packets; | ||
6955 | bytes = ring->stats.bytes; | ||
6956 | } while (u64_stats_fetch_retry_bh(&ring->syncp, start)); | ||
6957 | stats->tx_packets += packets; | ||
6958 | stats->tx_bytes += bytes; | ||
6959 | } | ||
6960 | } | ||
6867 | rcu_read_unlock(); | 6961 | rcu_read_unlock(); |
6868 | /* following stats updated by ixgbe_watchdog_task() */ | 6962 | /* following stats updated by ixgbe_watchdog_task() */ |
6869 | stats->multicast = netdev->stats.multicast; | 6963 | stats->multicast = netdev->stats.multicast; |
diff --git a/drivers/net/ixgbe/ixgbe_sriov.c b/drivers/net/ixgbe/ixgbe_sriov.c index 47b15738b009..187b3a16ec1f 100644 --- a/drivers/net/ixgbe/ixgbe_sriov.c +++ b/drivers/net/ixgbe/ixgbe_sriov.c | |||
@@ -110,12 +110,10 @@ static int ixgbe_set_vf_vlan(struct ixgbe_adapter *adapter, int add, int vid, | |||
110 | return adapter->hw.mac.ops.set_vfta(&adapter->hw, vid, vf, (bool)add); | 110 | return adapter->hw.mac.ops.set_vfta(&adapter->hw, vid, vf, (bool)add); |
111 | } | 111 | } |
112 | 112 | ||
113 | |||
114 | static void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf, bool aupe) | 113 | static void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf, bool aupe) |
115 | { | 114 | { |
116 | u32 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf)); | 115 | u32 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf)); |
117 | vmolr |= (IXGBE_VMOLR_ROMPE | | 116 | vmolr |= (IXGBE_VMOLR_ROMPE | |
118 | IXGBE_VMOLR_ROPE | | ||
119 | IXGBE_VMOLR_BAM); | 117 | IXGBE_VMOLR_BAM); |
120 | if (aupe) | 118 | if (aupe) |
121 | vmolr |= IXGBE_VMOLR_AUPE; | 119 | vmolr |= IXGBE_VMOLR_AUPE; |
diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h index 446f3467d3c7..fd3358f54139 100644 --- a/drivers/net/ixgbe/ixgbe_type.h +++ b/drivers/net/ixgbe/ixgbe_type.h | |||
@@ -1947,10 +1947,9 @@ enum ixgbe_fdir_pballoc_type { | |||
1947 | #define IXGBE_FDIRM_VLANID 0x00000001 | 1947 | #define IXGBE_FDIRM_VLANID 0x00000001 |
1948 | #define IXGBE_FDIRM_VLANP 0x00000002 | 1948 | #define IXGBE_FDIRM_VLANP 0x00000002 |
1949 | #define IXGBE_FDIRM_POOL 0x00000004 | 1949 | #define IXGBE_FDIRM_POOL 0x00000004 |
1950 | #define IXGBE_FDIRM_L3P 0x00000008 | 1950 | #define IXGBE_FDIRM_L4P 0x00000008 |
1951 | #define IXGBE_FDIRM_L4P 0x00000010 | 1951 | #define IXGBE_FDIRM_FLEX 0x00000010 |
1952 | #define IXGBE_FDIRM_FLEX 0x00000020 | 1952 | #define IXGBE_FDIRM_DIPv6 0x00000020 |
1953 | #define IXGBE_FDIRM_DIPv6 0x00000040 | ||
1954 | 1953 | ||
1955 | #define IXGBE_FDIRFREE_FREE_MASK 0xFFFF | 1954 | #define IXGBE_FDIRFREE_FREE_MASK 0xFFFF |
1956 | #define IXGBE_FDIRFREE_FREE_SHIFT 0 | 1955 | #define IXGBE_FDIRFREE_FREE_SHIFT 0 |
@@ -1990,6 +1989,7 @@ enum ixgbe_fdir_pballoc_type { | |||
1990 | #define IXGBE_FDIRCMD_LAST 0x00000800 | 1989 | #define IXGBE_FDIRCMD_LAST 0x00000800 |
1991 | #define IXGBE_FDIRCMD_COLLISION 0x00001000 | 1990 | #define IXGBE_FDIRCMD_COLLISION 0x00001000 |
1992 | #define IXGBE_FDIRCMD_QUEUE_EN 0x00008000 | 1991 | #define IXGBE_FDIRCMD_QUEUE_EN 0x00008000 |
1992 | #define IXGBE_FDIRCMD_FLOW_TYPE_SHIFT 5 | ||
1993 | #define IXGBE_FDIRCMD_RX_QUEUE_SHIFT 16 | 1993 | #define IXGBE_FDIRCMD_RX_QUEUE_SHIFT 16 |
1994 | #define IXGBE_FDIRCMD_VT_POOL_SHIFT 24 | 1994 | #define IXGBE_FDIRCMD_VT_POOL_SHIFT 24 |
1995 | #define IXGBE_FDIR_INIT_DONE_POLL 10 | 1995 | #define IXGBE_FDIR_INIT_DONE_POLL 10 |
@@ -2147,51 +2147,80 @@ typedef u32 ixgbe_physical_layer; | |||
2147 | #define FC_LOW_WATER(MTU) (2 * (2 * PAUSE_MTU(MTU) + PAUSE_RTT)) | 2147 | #define FC_LOW_WATER(MTU) (2 * (2 * PAUSE_MTU(MTU) + PAUSE_RTT)) |
2148 | 2148 | ||
2149 | /* Software ATR hash keys */ | 2149 | /* Software ATR hash keys */ |
2150 | #define IXGBE_ATR_BUCKET_HASH_KEY 0xE214AD3D | 2150 | #define IXGBE_ATR_BUCKET_HASH_KEY 0x3DAD14E2 |
2151 | #define IXGBE_ATR_SIGNATURE_HASH_KEY 0x14364D17 | 2151 | #define IXGBE_ATR_SIGNATURE_HASH_KEY 0x174D3614 |
2152 | |||
2153 | /* Software ATR input stream offsets and masks */ | ||
2154 | #define IXGBE_ATR_VLAN_OFFSET 0 | ||
2155 | #define IXGBE_ATR_SRC_IPV6_OFFSET 2 | ||
2156 | #define IXGBE_ATR_SRC_IPV4_OFFSET 14 | ||
2157 | #define IXGBE_ATR_DST_IPV6_OFFSET 18 | ||
2158 | #define IXGBE_ATR_DST_IPV4_OFFSET 30 | ||
2159 | #define IXGBE_ATR_SRC_PORT_OFFSET 34 | ||
2160 | #define IXGBE_ATR_DST_PORT_OFFSET 36 | ||
2161 | #define IXGBE_ATR_FLEX_BYTE_OFFSET 38 | ||
2162 | #define IXGBE_ATR_VM_POOL_OFFSET 40 | ||
2163 | #define IXGBE_ATR_L4TYPE_OFFSET 41 | ||
2164 | 2152 | ||
2153 | /* Software ATR input stream values and masks */ | ||
2154 | #define IXGBE_ATR_HASH_MASK 0x7fff | ||
2165 | #define IXGBE_ATR_L4TYPE_MASK 0x3 | 2155 | #define IXGBE_ATR_L4TYPE_MASK 0x3 |
2166 | #define IXGBE_ATR_L4TYPE_IPV6_MASK 0x4 | ||
2167 | #define IXGBE_ATR_L4TYPE_UDP 0x1 | 2156 | #define IXGBE_ATR_L4TYPE_UDP 0x1 |
2168 | #define IXGBE_ATR_L4TYPE_TCP 0x2 | 2157 | #define IXGBE_ATR_L4TYPE_TCP 0x2 |
2169 | #define IXGBE_ATR_L4TYPE_SCTP 0x3 | 2158 | #define IXGBE_ATR_L4TYPE_SCTP 0x3 |
2170 | #define IXGBE_ATR_HASH_MASK 0x7fff | 2159 | #define IXGBE_ATR_L4TYPE_IPV6_MASK 0x4 |
2160 | enum ixgbe_atr_flow_type { | ||
2161 | IXGBE_ATR_FLOW_TYPE_IPV4 = 0x0, | ||
2162 | IXGBE_ATR_FLOW_TYPE_UDPV4 = 0x1, | ||
2163 | IXGBE_ATR_FLOW_TYPE_TCPV4 = 0x2, | ||
2164 | IXGBE_ATR_FLOW_TYPE_SCTPV4 = 0x3, | ||
2165 | IXGBE_ATR_FLOW_TYPE_IPV6 = 0x4, | ||
2166 | IXGBE_ATR_FLOW_TYPE_UDPV6 = 0x5, | ||
2167 | IXGBE_ATR_FLOW_TYPE_TCPV6 = 0x6, | ||
2168 | IXGBE_ATR_FLOW_TYPE_SCTPV6 = 0x7, | ||
2169 | }; | ||
2171 | 2170 | ||
2172 | /* Flow Director ATR input struct. */ | 2171 | /* Flow Director ATR input struct. */ |
2173 | struct ixgbe_atr_input { | 2172 | union ixgbe_atr_input { |
2174 | /* Byte layout in order, all values with MSB first: | 2173 | /* |
2174 | * Byte layout in order, all values with MSB first: | ||
2175 | * | 2175 | * |
2176 | * vm_pool - 1 byte | ||
2177 | * flow_type - 1 byte | ||
2176 | * vlan_id - 2 bytes | 2178 | * vlan_id - 2 bytes |
2177 | * src_ip - 16 bytes | 2179 | * src_ip - 16 bytes |
2178 | * dst_ip - 16 bytes | 2180 | * dst_ip - 16 bytes |
2179 | * src_port - 2 bytes | 2181 | * src_port - 2 bytes |
2180 | * dst_port - 2 bytes | 2182 | * dst_port - 2 bytes |
2181 | * flex_bytes - 2 bytes | 2183 | * flex_bytes - 2 bytes |
2182 | * vm_pool - 1 byte | 2184 | * rsvd0 - 2 bytes - space reserved must be 0. |
2183 | * l4type - 1 byte | ||
2184 | */ | 2185 | */ |
2185 | u8 byte_stream[42]; | 2186 | struct { |
2187 | u8 vm_pool; | ||
2188 | u8 flow_type; | ||
2189 | __be16 vlan_id; | ||
2190 | __be32 dst_ip[4]; | ||
2191 | __be32 src_ip[4]; | ||
2192 | __be16 src_port; | ||
2193 | __be16 dst_port; | ||
2194 | __be16 flex_bytes; | ||
2195 | __be16 rsvd0; | ||
2196 | } formatted; | ||
2197 | __be32 dword_stream[11]; | ||
2198 | }; | ||
2199 | |||
2200 | /* Flow Director compressed ATR hash input struct */ | ||
2201 | union ixgbe_atr_hash_dword { | ||
2202 | struct { | ||
2203 | u8 vm_pool; | ||
2204 | u8 flow_type; | ||
2205 | __be16 vlan_id; | ||
2206 | } formatted; | ||
2207 | __be32 ip; | ||
2208 | struct { | ||
2209 | __be16 src; | ||
2210 | __be16 dst; | ||
2211 | } port; | ||
2212 | __be16 flex_bytes; | ||
2213 | __be32 dword; | ||
2186 | }; | 2214 | }; |
2187 | 2215 | ||
2188 | struct ixgbe_atr_input_masks { | 2216 | struct ixgbe_atr_input_masks { |
2189 | u32 src_ip_mask; | 2217 | __be16 rsvd0; |
2190 | u32 dst_ip_mask; | 2218 | __be16 vlan_id_mask; |
2191 | u16 src_port_mask; | 2219 | __be32 dst_ip_mask[4]; |
2192 | u16 dst_port_mask; | 2220 | __be32 src_ip_mask[4]; |
2193 | u16 vlan_id_mask; | 2221 | __be16 src_port_mask; |
2194 | u16 data_mask; | 2222 | __be16 dst_port_mask; |
2223 | __be16 flex_mask; | ||
2195 | }; | 2224 | }; |
2196 | 2225 | ||
2197 | enum ixgbe_eeprom_type { | 2226 | enum ixgbe_eeprom_type { |
diff --git a/drivers/net/ixgbe/ixgbe_x540.c b/drivers/net/ixgbe/ixgbe_x540.c index 3a8923993ce3..f2518b01067d 100644 --- a/drivers/net/ixgbe/ixgbe_x540.c +++ b/drivers/net/ixgbe/ixgbe_x540.c | |||
@@ -133,17 +133,17 @@ static s32 ixgbe_reset_hw_X540(struct ixgbe_hw *hw) | |||
133 | } | 133 | } |
134 | 134 | ||
135 | ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); | 135 | ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); |
136 | IXGBE_WRITE_REG(hw, IXGBE_CTRL, (ctrl | IXGBE_CTRL_RST)); | 136 | IXGBE_WRITE_REG(hw, IXGBE_CTRL, (ctrl | reset_bit)); |
137 | IXGBE_WRITE_FLUSH(hw); | 137 | IXGBE_WRITE_FLUSH(hw); |
138 | 138 | ||
139 | /* Poll for reset bit to self-clear indicating reset is complete */ | 139 | /* Poll for reset bit to self-clear indicating reset is complete */ |
140 | for (i = 0; i < 10; i++) { | 140 | for (i = 0; i < 10; i++) { |
141 | udelay(1); | 141 | udelay(1); |
142 | ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); | 142 | ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); |
143 | if (!(ctrl & IXGBE_CTRL_RST)) | 143 | if (!(ctrl & reset_bit)) |
144 | break; | 144 | break; |
145 | } | 145 | } |
146 | if (ctrl & IXGBE_CTRL_RST) { | 146 | if (ctrl & reset_bit) { |
147 | status = IXGBE_ERR_RESET_FAILED; | 147 | status = IXGBE_ERR_RESET_FAILED; |
148 | hw_dbg(hw, "Reset polling failed to complete.\n"); | 148 | hw_dbg(hw, "Reset polling failed to complete.\n"); |
149 | } | 149 | } |
diff --git a/drivers/net/ll_temac_main.c b/drivers/net/ll_temac_main.c index 183765cb7f25..f35554d11441 100644 --- a/drivers/net/ll_temac_main.c +++ b/drivers/net/ll_temac_main.c | |||
@@ -238,7 +238,7 @@ static int temac_dma_bd_init(struct net_device *ndev) | |||
238 | goto out; | 238 | goto out; |
239 | } | 239 | } |
240 | /* allocate the tx and rx ring buffer descriptors. */ | 240 | /* allocate the tx and rx ring buffer descriptors. */ |
241 | /* returns a virtual addres and a physical address. */ | 241 | /* returns a virtual address and a physical address. */ |
242 | lp->tx_bd_v = dma_alloc_coherent(ndev->dev.parent, | 242 | lp->tx_bd_v = dma_alloc_coherent(ndev->dev.parent, |
243 | sizeof(*lp->tx_bd_v) * TX_BD_NUM, | 243 | sizeof(*lp->tx_bd_v) * TX_BD_NUM, |
244 | &lp->tx_bd_p, GFP_KERNEL); | 244 | &lp->tx_bd_p, GFP_KERNEL); |
diff --git a/drivers/net/macb.c b/drivers/net/macb.c index f69e73e2191e..79ccb54ab00c 100644 --- a/drivers/net/macb.c +++ b/drivers/net/macb.c | |||
@@ -260,7 +260,7 @@ static int macb_mii_init(struct macb *bp) | |||
260 | for (i = 0; i < PHY_MAX_ADDR; i++) | 260 | for (i = 0; i < PHY_MAX_ADDR; i++) |
261 | bp->mii_bus->irq[i] = PHY_POLL; | 261 | bp->mii_bus->irq[i] = PHY_POLL; |
262 | 262 | ||
263 | platform_set_drvdata(bp->dev, bp->mii_bus); | 263 | dev_set_drvdata(&bp->dev->dev, bp->mii_bus); |
264 | 264 | ||
265 | if (mdiobus_register(bp->mii_bus)) | 265 | if (mdiobus_register(bp->mii_bus)) |
266 | goto err_out_free_mdio_irq; | 266 | goto err_out_free_mdio_irq; |
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c index 21845affea13..5933621ac3ff 100644 --- a/drivers/net/macvtap.c +++ b/drivers/net/macvtap.c | |||
@@ -585,7 +585,7 @@ err: | |||
585 | rcu_read_lock_bh(); | 585 | rcu_read_lock_bh(); |
586 | vlan = rcu_dereference(q->vlan); | 586 | vlan = rcu_dereference(q->vlan); |
587 | if (vlan) | 587 | if (vlan) |
588 | netdev_get_tx_queue(vlan->dev, 0)->tx_dropped++; | 588 | vlan->dev->stats.tx_dropped++; |
589 | rcu_read_unlock_bh(); | 589 | rcu_read_unlock_bh(); |
590 | 590 | ||
591 | return err; | 591 | return err; |
diff --git a/drivers/net/mlx4/alloc.c b/drivers/net/mlx4/alloc.c index 8f4bf1f07c11..3a4277f6fac4 100644 --- a/drivers/net/mlx4/alloc.c +++ b/drivers/net/mlx4/alloc.c | |||
@@ -178,6 +178,7 @@ int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct, | |||
178 | } else { | 178 | } else { |
179 | int i; | 179 | int i; |
180 | 180 | ||
181 | buf->direct.buf = NULL; | ||
181 | buf->nbufs = (size + PAGE_SIZE - 1) / PAGE_SIZE; | 182 | buf->nbufs = (size + PAGE_SIZE - 1) / PAGE_SIZE; |
182 | buf->npages = buf->nbufs; | 183 | buf->npages = buf->nbufs; |
183 | buf->page_shift = PAGE_SHIFT; | 184 | buf->page_shift = PAGE_SHIFT; |
@@ -229,7 +230,7 @@ void mlx4_buf_free(struct mlx4_dev *dev, int size, struct mlx4_buf *buf) | |||
229 | dma_free_coherent(&dev->pdev->dev, size, buf->direct.buf, | 230 | dma_free_coherent(&dev->pdev->dev, size, buf->direct.buf, |
230 | buf->direct.map); | 231 | buf->direct.map); |
231 | else { | 232 | else { |
232 | if (BITS_PER_LONG == 64) | 233 | if (BITS_PER_LONG == 64 && buf->direct.buf) |
233 | vunmap(buf->direct.buf); | 234 | vunmap(buf->direct.buf); |
234 | 235 | ||
235 | for (i = 0; i < buf->nbufs; ++i) | 236 | for (i = 0; i < buf->nbufs; ++i) |
diff --git a/drivers/net/mlx4/catas.c b/drivers/net/mlx4/catas.c index 68aaa42d0ced..32f947154c33 100644 --- a/drivers/net/mlx4/catas.c +++ b/drivers/net/mlx4/catas.c | |||
@@ -113,7 +113,7 @@ static void catas_reset(struct work_struct *work) | |||
113 | void mlx4_start_catas_poll(struct mlx4_dev *dev) | 113 | void mlx4_start_catas_poll(struct mlx4_dev *dev) |
114 | { | 114 | { |
115 | struct mlx4_priv *priv = mlx4_priv(dev); | 115 | struct mlx4_priv *priv = mlx4_priv(dev); |
116 | unsigned long addr; | 116 | phys_addr_t addr; |
117 | 117 | ||
118 | INIT_LIST_HEAD(&priv->catas_err.list); | 118 | INIT_LIST_HEAD(&priv->catas_err.list); |
119 | init_timer(&priv->catas_err.timer); | 119 | init_timer(&priv->catas_err.timer); |
@@ -124,8 +124,8 @@ void mlx4_start_catas_poll(struct mlx4_dev *dev) | |||
124 | 124 | ||
125 | priv->catas_err.map = ioremap(addr, priv->fw.catas_size * 4); | 125 | priv->catas_err.map = ioremap(addr, priv->fw.catas_size * 4); |
126 | if (!priv->catas_err.map) { | 126 | if (!priv->catas_err.map) { |
127 | mlx4_warn(dev, "Failed to map internal error buffer at 0x%lx\n", | 127 | mlx4_warn(dev, "Failed to map internal error buffer at 0x%llx\n", |
128 | addr); | 128 | (unsigned long long) addr); |
129 | return; | 129 | return; |
130 | } | 130 | } |
131 | 131 | ||
diff --git a/drivers/net/mlx4/en_main.c b/drivers/net/mlx4/en_main.c index f6e0d40cd876..1ff6ca6466ed 100644 --- a/drivers/net/mlx4/en_main.c +++ b/drivers/net/mlx4/en_main.c | |||
@@ -202,7 +202,8 @@ static void *mlx4_en_add(struct mlx4_dev *dev) | |||
202 | if (mlx4_uar_alloc(dev, &mdev->priv_uar)) | 202 | if (mlx4_uar_alloc(dev, &mdev->priv_uar)) |
203 | goto err_pd; | 203 | goto err_pd; |
204 | 204 | ||
205 | mdev->uar_map = ioremap(mdev->priv_uar.pfn << PAGE_SHIFT, PAGE_SIZE); | 205 | mdev->uar_map = ioremap((phys_addr_t) mdev->priv_uar.pfn << PAGE_SHIFT, |
206 | PAGE_SIZE); | ||
206 | if (!mdev->uar_map) | 207 | if (!mdev->uar_map) |
207 | goto err_uar; | 208 | goto err_uar; |
208 | spin_lock_init(&mdev->uar_lock); | 209 | spin_lock_init(&mdev->uar_lock); |
diff --git a/drivers/net/mlx4/en_netdev.c b/drivers/net/mlx4/en_netdev.c index 6d6806b361e3..897f576b8b17 100644 --- a/drivers/net/mlx4/en_netdev.c +++ b/drivers/net/mlx4/en_netdev.c | |||
@@ -972,7 +972,8 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, | |||
972 | int i; | 972 | int i; |
973 | int err; | 973 | int err; |
974 | 974 | ||
975 | dev = alloc_etherdev_mq(sizeof(struct mlx4_en_priv), prof->tx_ring_num); | 975 | dev = alloc_etherdev_mqs(sizeof(struct mlx4_en_priv), |
976 | prof->tx_ring_num, prof->rx_ring_num); | ||
976 | if (dev == NULL) { | 977 | if (dev == NULL) { |
977 | mlx4_err(mdev, "Net device allocation failed\n"); | 978 | mlx4_err(mdev, "Net device allocation failed\n"); |
978 | return -ENOMEM; | 979 | return -ENOMEM; |
diff --git a/drivers/net/mlx4/fw.c b/drivers/net/mlx4/fw.c index 7a7e18ba278a..5de1db897835 100644 --- a/drivers/net/mlx4/fw.c +++ b/drivers/net/mlx4/fw.c | |||
@@ -289,10 +289,8 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) | |||
289 | MLX4_GET(field, outbox, QUERY_DEV_CAP_LOG_BF_REG_SZ_OFFSET); | 289 | MLX4_GET(field, outbox, QUERY_DEV_CAP_LOG_BF_REG_SZ_OFFSET); |
290 | dev_cap->bf_reg_size = 1 << (field & 0x1f); | 290 | dev_cap->bf_reg_size = 1 << (field & 0x1f); |
291 | MLX4_GET(field, outbox, QUERY_DEV_CAP_LOG_MAX_BF_REGS_PER_PAGE_OFFSET); | 291 | MLX4_GET(field, outbox, QUERY_DEV_CAP_LOG_MAX_BF_REGS_PER_PAGE_OFFSET); |
292 | if ((1 << (field & 0x3f)) > (PAGE_SIZE / dev_cap->bf_reg_size)) { | 292 | if ((1 << (field & 0x3f)) > (PAGE_SIZE / dev_cap->bf_reg_size)) |
293 | mlx4_warn(dev, "firmware bug: log2 # of blue flame regs is invalid (%d), forcing 3\n", field & 0x1f); | ||
294 | field = 3; | 293 | field = 3; |
295 | } | ||
296 | dev_cap->bf_regs_per_page = 1 << (field & 0x3f); | 294 | dev_cap->bf_regs_per_page = 1 << (field & 0x3f); |
297 | mlx4_dbg(dev, "BlueFlame available (reg size %d, regs/page %d)\n", | 295 | mlx4_dbg(dev, "BlueFlame available (reg size %d, regs/page %d)\n", |
298 | dev_cap->bf_reg_size, dev_cap->bf_regs_per_page); | 296 | dev_cap->bf_reg_size, dev_cap->bf_regs_per_page); |
diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c index 782f11d8fa71..2765a3ce9c24 100644 --- a/drivers/net/mlx4/main.c +++ b/drivers/net/mlx4/main.c | |||
@@ -829,7 +829,7 @@ static int mlx4_setup_hca(struct mlx4_dev *dev) | |||
829 | goto err_uar_table_free; | 829 | goto err_uar_table_free; |
830 | } | 830 | } |
831 | 831 | ||
832 | priv->kar = ioremap(priv->driver_uar.pfn << PAGE_SHIFT, PAGE_SIZE); | 832 | priv->kar = ioremap((phys_addr_t) priv->driver_uar.pfn << PAGE_SHIFT, PAGE_SIZE); |
833 | if (!priv->kar) { | 833 | if (!priv->kar) { |
834 | mlx4_err(dev, "Couldn't map kernel access region, " | 834 | mlx4_err(dev, "Couldn't map kernel access region, " |
835 | "aborting.\n"); | 835 | "aborting.\n"); |
@@ -1286,6 +1286,21 @@ static DEFINE_PCI_DEVICE_TABLE(mlx4_pci_table) = { | |||
1286 | { PCI_VDEVICE(MELLANOX, 0x6764) }, /* MT26468 ConnectX EN 10GigE PCIe gen2*/ | 1286 | { PCI_VDEVICE(MELLANOX, 0x6764) }, /* MT26468 ConnectX EN 10GigE PCIe gen2*/ |
1287 | { PCI_VDEVICE(MELLANOX, 0x6746) }, /* MT26438 ConnectX EN 40GigE PCIe gen2 5GT/s */ | 1287 | { PCI_VDEVICE(MELLANOX, 0x6746) }, /* MT26438 ConnectX EN 40GigE PCIe gen2 5GT/s */ |
1288 | { PCI_VDEVICE(MELLANOX, 0x676e) }, /* MT26478 ConnectX2 40GigE PCIe gen2 */ | 1288 | { PCI_VDEVICE(MELLANOX, 0x676e) }, /* MT26478 ConnectX2 40GigE PCIe gen2 */ |
1289 | { PCI_VDEVICE(MELLANOX, 0x1002) }, /* MT25400 Family [ConnectX-2 Virtual Function] */ | ||
1290 | { PCI_VDEVICE(MELLANOX, 0x1003) }, /* MT27500 Family [ConnectX-3] */ | ||
1291 | { PCI_VDEVICE(MELLANOX, 0x1004) }, /* MT27500 Family [ConnectX-3 Virtual Function] */ | ||
1292 | { PCI_VDEVICE(MELLANOX, 0x1005) }, /* MT27510 Family */ | ||
1293 | { PCI_VDEVICE(MELLANOX, 0x1006) }, /* MT27511 Family */ | ||
1294 | { PCI_VDEVICE(MELLANOX, 0x1007) }, /* MT27520 Family */ | ||
1295 | { PCI_VDEVICE(MELLANOX, 0x1008) }, /* MT27521 Family */ | ||
1296 | { PCI_VDEVICE(MELLANOX, 0x1009) }, /* MT27530 Family */ | ||
1297 | { PCI_VDEVICE(MELLANOX, 0x100a) }, /* MT27531 Family */ | ||
1298 | { PCI_VDEVICE(MELLANOX, 0x100b) }, /* MT27540 Family */ | ||
1299 | { PCI_VDEVICE(MELLANOX, 0x100c) }, /* MT27541 Family */ | ||
1300 | { PCI_VDEVICE(MELLANOX, 0x100d) }, /* MT27550 Family */ | ||
1301 | { PCI_VDEVICE(MELLANOX, 0x100e) }, /* MT27551 Family */ | ||
1302 | { PCI_VDEVICE(MELLANOX, 0x100f) }, /* MT27560 Family */ | ||
1303 | { PCI_VDEVICE(MELLANOX, 0x1010) }, /* MT27561 Family */ | ||
1289 | { 0, } | 1304 | { 0, } |
1290 | }; | 1305 | }; |
1291 | 1306 | ||
diff --git a/drivers/net/mlx4/mcg.c b/drivers/net/mlx4/mcg.c index c4f88b7ef7b6..79cf42db2ea9 100644 --- a/drivers/net/mlx4/mcg.c +++ b/drivers/net/mlx4/mcg.c | |||
@@ -95,7 +95,8 @@ static int mlx4_MGID_HASH(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox | |||
95 | * entry in hash chain and *mgm holds end of hash chain. | 95 | * entry in hash chain and *mgm holds end of hash chain. |
96 | */ | 96 | */ |
97 | static int find_mgm(struct mlx4_dev *dev, | 97 | static int find_mgm(struct mlx4_dev *dev, |
98 | u8 *gid, struct mlx4_cmd_mailbox *mgm_mailbox, | 98 | u8 *gid, enum mlx4_protocol protocol, |
99 | struct mlx4_cmd_mailbox *mgm_mailbox, | ||
99 | u16 *hash, int *prev, int *index) | 100 | u16 *hash, int *prev, int *index) |
100 | { | 101 | { |
101 | struct mlx4_cmd_mailbox *mailbox; | 102 | struct mlx4_cmd_mailbox *mailbox; |
@@ -134,7 +135,8 @@ static int find_mgm(struct mlx4_dev *dev, | |||
134 | return err; | 135 | return err; |
135 | } | 136 | } |
136 | 137 | ||
137 | if (!memcmp(mgm->gid, gid, 16)) | 138 | if (!memcmp(mgm->gid, gid, 16) && |
139 | be32_to_cpu(mgm->members_count) >> 30 == protocol) | ||
138 | return err; | 140 | return err; |
139 | 141 | ||
140 | *prev = *index; | 142 | *prev = *index; |
@@ -146,7 +148,7 @@ static int find_mgm(struct mlx4_dev *dev, | |||
146 | } | 148 | } |
147 | 149 | ||
148 | int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], | 150 | int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], |
149 | int block_mcast_loopback) | 151 | int block_mcast_loopback, enum mlx4_protocol protocol) |
150 | { | 152 | { |
151 | struct mlx4_priv *priv = mlx4_priv(dev); | 153 | struct mlx4_priv *priv = mlx4_priv(dev); |
152 | struct mlx4_cmd_mailbox *mailbox; | 154 | struct mlx4_cmd_mailbox *mailbox; |
@@ -165,7 +167,7 @@ int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], | |||
165 | 167 | ||
166 | mutex_lock(&priv->mcg_table.mutex); | 168 | mutex_lock(&priv->mcg_table.mutex); |
167 | 169 | ||
168 | err = find_mgm(dev, gid, mailbox, &hash, &prev, &index); | 170 | err = find_mgm(dev, gid, protocol, mailbox, &hash, &prev, &index); |
169 | if (err) | 171 | if (err) |
170 | goto out; | 172 | goto out; |
171 | 173 | ||
@@ -187,7 +189,7 @@ int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], | |||
187 | memcpy(mgm->gid, gid, 16); | 189 | memcpy(mgm->gid, gid, 16); |
188 | } | 190 | } |
189 | 191 | ||
190 | members_count = be32_to_cpu(mgm->members_count); | 192 | members_count = be32_to_cpu(mgm->members_count) & 0xffffff; |
191 | if (members_count == MLX4_QP_PER_MGM) { | 193 | if (members_count == MLX4_QP_PER_MGM) { |
192 | mlx4_err(dev, "MGM at index %x is full.\n", index); | 194 | mlx4_err(dev, "MGM at index %x is full.\n", index); |
193 | err = -ENOMEM; | 195 | err = -ENOMEM; |
@@ -207,7 +209,7 @@ int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], | |||
207 | else | 209 | else |
208 | mgm->qp[members_count++] = cpu_to_be32(qp->qpn & MGM_QPN_MASK); | 210 | mgm->qp[members_count++] = cpu_to_be32(qp->qpn & MGM_QPN_MASK); |
209 | 211 | ||
210 | mgm->members_count = cpu_to_be32(members_count); | 212 | mgm->members_count = cpu_to_be32(members_count | (u32) protocol << 30); |
211 | 213 | ||
212 | err = mlx4_WRITE_MCG(dev, index, mailbox); | 214 | err = mlx4_WRITE_MCG(dev, index, mailbox); |
213 | if (err) | 215 | if (err) |
@@ -242,7 +244,8 @@ out: | |||
242 | } | 244 | } |
243 | EXPORT_SYMBOL_GPL(mlx4_multicast_attach); | 245 | EXPORT_SYMBOL_GPL(mlx4_multicast_attach); |
244 | 246 | ||
245 | int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16]) | 247 | int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], |
248 | enum mlx4_protocol protocol) | ||
246 | { | 249 | { |
247 | struct mlx4_priv *priv = mlx4_priv(dev); | 250 | struct mlx4_priv *priv = mlx4_priv(dev); |
248 | struct mlx4_cmd_mailbox *mailbox; | 251 | struct mlx4_cmd_mailbox *mailbox; |
@@ -260,7 +263,7 @@ int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16]) | |||
260 | 263 | ||
261 | mutex_lock(&priv->mcg_table.mutex); | 264 | mutex_lock(&priv->mcg_table.mutex); |
262 | 265 | ||
263 | err = find_mgm(dev, gid, mailbox, &hash, &prev, &index); | 266 | err = find_mgm(dev, gid, protocol, mailbox, &hash, &prev, &index); |
264 | if (err) | 267 | if (err) |
265 | goto out; | 268 | goto out; |
266 | 269 | ||
@@ -270,7 +273,7 @@ int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16]) | |||
270 | goto out; | 273 | goto out; |
271 | } | 274 | } |
272 | 275 | ||
273 | members_count = be32_to_cpu(mgm->members_count); | 276 | members_count = be32_to_cpu(mgm->members_count) & 0xffffff; |
274 | for (loc = -1, i = 0; i < members_count; ++i) | 277 | for (loc = -1, i = 0; i < members_count; ++i) |
275 | if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qp->qpn) | 278 | if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qp->qpn) |
276 | loc = i; | 279 | loc = i; |
@@ -282,7 +285,7 @@ int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16]) | |||
282 | } | 285 | } |
283 | 286 | ||
284 | 287 | ||
285 | mgm->members_count = cpu_to_be32(--members_count); | 288 | mgm->members_count = cpu_to_be32(--members_count | (u32) protocol << 30); |
286 | mgm->qp[loc] = mgm->qp[i - 1]; | 289 | mgm->qp[loc] = mgm->qp[i - 1]; |
287 | mgm->qp[i - 1] = 0; | 290 | mgm->qp[i - 1] = 0; |
288 | 291 | ||
diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c index a37fcf11ab36..ea5cfe2c3a04 100644 --- a/drivers/net/myri10ge/myri10ge.c +++ b/drivers/net/myri10ge/myri10ge.c | |||
@@ -3403,9 +3403,7 @@ static int myri10ge_resume(struct pci_dev *pdev) | |||
3403 | return -EIO; | 3403 | return -EIO; |
3404 | } | 3404 | } |
3405 | 3405 | ||
3406 | status = pci_restore_state(pdev); | 3406 | pci_restore_state(pdev); |
3407 | if (status) | ||
3408 | return status; | ||
3409 | 3407 | ||
3410 | status = pci_enable_device(pdev); | 3408 | status = pci_enable_device(pdev); |
3411 | if (status) { | 3409 | if (status) { |
diff --git a/drivers/net/niu.c b/drivers/net/niu.c index 2541321bad82..9fb59d3f9c92 100644 --- a/drivers/net/niu.c +++ b/drivers/net/niu.c | |||
@@ -4489,6 +4489,9 @@ static int niu_alloc_channels(struct niu *np) | |||
4489 | { | 4489 | { |
4490 | struct niu_parent *parent = np->parent; | 4490 | struct niu_parent *parent = np->parent; |
4491 | int first_rx_channel, first_tx_channel; | 4491 | int first_rx_channel, first_tx_channel; |
4492 | int num_rx_rings, num_tx_rings; | ||
4493 | struct rx_ring_info *rx_rings; | ||
4494 | struct tx_ring_info *tx_rings; | ||
4492 | int i, port, err; | 4495 | int i, port, err; |
4493 | 4496 | ||
4494 | port = np->port; | 4497 | port = np->port; |
@@ -4498,18 +4501,21 @@ static int niu_alloc_channels(struct niu *np) | |||
4498 | first_tx_channel += parent->txchan_per_port[i]; | 4501 | first_tx_channel += parent->txchan_per_port[i]; |
4499 | } | 4502 | } |
4500 | 4503 | ||
4501 | np->num_rx_rings = parent->rxchan_per_port[port]; | 4504 | num_rx_rings = parent->rxchan_per_port[port]; |
4502 | np->num_tx_rings = parent->txchan_per_port[port]; | 4505 | num_tx_rings = parent->txchan_per_port[port]; |
4503 | 4506 | ||
4504 | netif_set_real_num_rx_queues(np->dev, np->num_rx_rings); | 4507 | rx_rings = kcalloc(num_rx_rings, sizeof(struct rx_ring_info), |
4505 | netif_set_real_num_tx_queues(np->dev, np->num_tx_rings); | 4508 | GFP_KERNEL); |
4506 | |||
4507 | np->rx_rings = kcalloc(np->num_rx_rings, sizeof(struct rx_ring_info), | ||
4508 | GFP_KERNEL); | ||
4509 | err = -ENOMEM; | 4509 | err = -ENOMEM; |
4510 | if (!np->rx_rings) | 4510 | if (!rx_rings) |
4511 | goto out_err; | 4511 | goto out_err; |
4512 | 4512 | ||
4513 | np->num_rx_rings = num_rx_rings; | ||
4514 | smp_wmb(); | ||
4515 | np->rx_rings = rx_rings; | ||
4516 | |||
4517 | netif_set_real_num_rx_queues(np->dev, num_rx_rings); | ||
4518 | |||
4513 | for (i = 0; i < np->num_rx_rings; i++) { | 4519 | for (i = 0; i < np->num_rx_rings; i++) { |
4514 | struct rx_ring_info *rp = &np->rx_rings[i]; | 4520 | struct rx_ring_info *rp = &np->rx_rings[i]; |
4515 | 4521 | ||
@@ -4538,12 +4544,18 @@ static int niu_alloc_channels(struct niu *np) | |||
4538 | return err; | 4544 | return err; |
4539 | } | 4545 | } |
4540 | 4546 | ||
4541 | np->tx_rings = kcalloc(np->num_tx_rings, sizeof(struct tx_ring_info), | 4547 | tx_rings = kcalloc(num_tx_rings, sizeof(struct tx_ring_info), |
4542 | GFP_KERNEL); | 4548 | GFP_KERNEL); |
4543 | err = -ENOMEM; | 4549 | err = -ENOMEM; |
4544 | if (!np->tx_rings) | 4550 | if (!tx_rings) |
4545 | goto out_err; | 4551 | goto out_err; |
4546 | 4552 | ||
4553 | np->num_tx_rings = num_tx_rings; | ||
4554 | smp_wmb(); | ||
4555 | np->tx_rings = tx_rings; | ||
4556 | |||
4557 | netif_set_real_num_tx_queues(np->dev, num_tx_rings); | ||
4558 | |||
4547 | for (i = 0; i < np->num_tx_rings; i++) { | 4559 | for (i = 0; i < np->num_tx_rings; i++) { |
4548 | struct tx_ring_info *rp = &np->tx_rings[i]; | 4560 | struct tx_ring_info *rp = &np->tx_rings[i]; |
4549 | 4561 | ||
@@ -6246,11 +6258,17 @@ static void niu_sync_mac_stats(struct niu *np) | |||
6246 | static void niu_get_rx_stats(struct niu *np) | 6258 | static void niu_get_rx_stats(struct niu *np) |
6247 | { | 6259 | { |
6248 | unsigned long pkts, dropped, errors, bytes; | 6260 | unsigned long pkts, dropped, errors, bytes; |
6261 | struct rx_ring_info *rx_rings; | ||
6249 | int i; | 6262 | int i; |
6250 | 6263 | ||
6251 | pkts = dropped = errors = bytes = 0; | 6264 | pkts = dropped = errors = bytes = 0; |
6265 | |||
6266 | rx_rings = ACCESS_ONCE(np->rx_rings); | ||
6267 | if (!rx_rings) | ||
6268 | goto no_rings; | ||
6269 | |||
6252 | for (i = 0; i < np->num_rx_rings; i++) { | 6270 | for (i = 0; i < np->num_rx_rings; i++) { |
6253 | struct rx_ring_info *rp = &np->rx_rings[i]; | 6271 | struct rx_ring_info *rp = &rx_rings[i]; |
6254 | 6272 | ||
6255 | niu_sync_rx_discard_stats(np, rp, 0); | 6273 | niu_sync_rx_discard_stats(np, rp, 0); |
6256 | 6274 | ||
@@ -6259,6 +6277,8 @@ static void niu_get_rx_stats(struct niu *np) | |||
6259 | dropped += rp->rx_dropped; | 6277 | dropped += rp->rx_dropped; |
6260 | errors += rp->rx_errors; | 6278 | errors += rp->rx_errors; |
6261 | } | 6279 | } |
6280 | |||
6281 | no_rings: | ||
6262 | np->dev->stats.rx_packets = pkts; | 6282 | np->dev->stats.rx_packets = pkts; |
6263 | np->dev->stats.rx_bytes = bytes; | 6283 | np->dev->stats.rx_bytes = bytes; |
6264 | np->dev->stats.rx_dropped = dropped; | 6284 | np->dev->stats.rx_dropped = dropped; |
@@ -6268,16 +6288,24 @@ static void niu_get_rx_stats(struct niu *np) | |||
6268 | static void niu_get_tx_stats(struct niu *np) | 6288 | static void niu_get_tx_stats(struct niu *np) |
6269 | { | 6289 | { |
6270 | unsigned long pkts, errors, bytes; | 6290 | unsigned long pkts, errors, bytes; |
6291 | struct tx_ring_info *tx_rings; | ||
6271 | int i; | 6292 | int i; |
6272 | 6293 | ||
6273 | pkts = errors = bytes = 0; | 6294 | pkts = errors = bytes = 0; |
6295 | |||
6296 | tx_rings = ACCESS_ONCE(np->tx_rings); | ||
6297 | if (!tx_rings) | ||
6298 | goto no_rings; | ||
6299 | |||
6274 | for (i = 0; i < np->num_tx_rings; i++) { | 6300 | for (i = 0; i < np->num_tx_rings; i++) { |
6275 | struct tx_ring_info *rp = &np->tx_rings[i]; | 6301 | struct tx_ring_info *rp = &tx_rings[i]; |
6276 | 6302 | ||
6277 | pkts += rp->tx_packets; | 6303 | pkts += rp->tx_packets; |
6278 | bytes += rp->tx_bytes; | 6304 | bytes += rp->tx_bytes; |
6279 | errors += rp->tx_errors; | 6305 | errors += rp->tx_errors; |
6280 | } | 6306 | } |
6307 | |||
6308 | no_rings: | ||
6281 | np->dev->stats.tx_packets = pkts; | 6309 | np->dev->stats.tx_packets = pkts; |
6282 | np->dev->stats.tx_bytes = bytes; | 6310 | np->dev->stats.tx_bytes = bytes; |
6283 | np->dev->stats.tx_errors = errors; | 6311 | np->dev->stats.tx_errors = errors; |
@@ -6287,9 +6315,10 @@ static struct net_device_stats *niu_get_stats(struct net_device *dev) | |||
6287 | { | 6315 | { |
6288 | struct niu *np = netdev_priv(dev); | 6316 | struct niu *np = netdev_priv(dev); |
6289 | 6317 | ||
6290 | niu_get_rx_stats(np); | 6318 | if (netif_running(dev)) { |
6291 | niu_get_tx_stats(np); | 6319 | niu_get_rx_stats(np); |
6292 | 6320 | niu_get_tx_stats(np); | |
6321 | } | ||
6293 | return &dev->stats; | 6322 | return &dev->stats; |
6294 | } | 6323 | } |
6295 | 6324 | ||
diff --git a/drivers/net/ns83820.c b/drivers/net/ns83820.c index 84134c766f3a..a41b2cf4d917 100644 --- a/drivers/net/ns83820.c +++ b/drivers/net/ns83820.c | |||
@@ -1988,12 +1988,11 @@ static int __devinit ns83820_init_one(struct pci_dev *pci_dev, | |||
1988 | } | 1988 | } |
1989 | 1989 | ||
1990 | ndev = alloc_etherdev(sizeof(struct ns83820)); | 1990 | ndev = alloc_etherdev(sizeof(struct ns83820)); |
1991 | dev = PRIV(ndev); | ||
1992 | |||
1993 | err = -ENOMEM; | 1991 | err = -ENOMEM; |
1994 | if (!dev) | 1992 | if (!ndev) |
1995 | goto out; | 1993 | goto out; |
1996 | 1994 | ||
1995 | dev = PRIV(ndev); | ||
1997 | dev->ndev = ndev; | 1996 | dev->ndev = ndev; |
1998 | 1997 | ||
1999 | spin_lock_init(&dev->rx_info.lock); | 1998 | spin_lock_init(&dev->rx_info.lock); |
diff --git a/drivers/net/pch_gbe/pch_gbe.h b/drivers/net/pch_gbe/pch_gbe.h index a0c26a99520f..e1e33c80fb25 100644 --- a/drivers/net/pch_gbe/pch_gbe.h +++ b/drivers/net/pch_gbe/pch_gbe.h | |||
@@ -73,7 +73,7 @@ struct pch_gbe_regs { | |||
73 | struct pch_gbe_regs_mac_adr mac_adr[16]; | 73 | struct pch_gbe_regs_mac_adr mac_adr[16]; |
74 | u32 ADDR_MASK; | 74 | u32 ADDR_MASK; |
75 | u32 MIIM; | 75 | u32 MIIM; |
76 | u32 reserve2; | 76 | u32 MAC_ADDR_LOAD; |
77 | u32 RGMII_ST; | 77 | u32 RGMII_ST; |
78 | u32 RGMII_CTRL; | 78 | u32 RGMII_CTRL; |
79 | u32 reserve3[3]; | 79 | u32 reserve3[3]; |
diff --git a/drivers/net/pch_gbe/pch_gbe_main.c b/drivers/net/pch_gbe/pch_gbe_main.c index d7355306a738..b99e90aca37d 100644 --- a/drivers/net/pch_gbe/pch_gbe_main.c +++ b/drivers/net/pch_gbe/pch_gbe_main.c | |||
@@ -29,6 +29,7 @@ const char pch_driver_version[] = DRV_VERSION; | |||
29 | #define PCH_GBE_SHORT_PKT 64 | 29 | #define PCH_GBE_SHORT_PKT 64 |
30 | #define DSC_INIT16 0xC000 | 30 | #define DSC_INIT16 0xC000 |
31 | #define PCH_GBE_DMA_ALIGN 0 | 31 | #define PCH_GBE_DMA_ALIGN 0 |
32 | #define PCH_GBE_DMA_PADDING 2 | ||
32 | #define PCH_GBE_WATCHDOG_PERIOD (1 * HZ) /* watchdog time */ | 33 | #define PCH_GBE_WATCHDOG_PERIOD (1 * HZ) /* watchdog time */ |
33 | #define PCH_GBE_COPYBREAK_DEFAULT 256 | 34 | #define PCH_GBE_COPYBREAK_DEFAULT 256 |
34 | #define PCH_GBE_PCI_BAR 1 | 35 | #define PCH_GBE_PCI_BAR 1 |
@@ -88,6 +89,12 @@ static unsigned int copybreak __read_mostly = PCH_GBE_COPYBREAK_DEFAULT; | |||
88 | static int pch_gbe_mdio_read(struct net_device *netdev, int addr, int reg); | 89 | static int pch_gbe_mdio_read(struct net_device *netdev, int addr, int reg); |
89 | static void pch_gbe_mdio_write(struct net_device *netdev, int addr, int reg, | 90 | static void pch_gbe_mdio_write(struct net_device *netdev, int addr, int reg, |
90 | int data); | 91 | int data); |
92 | |||
93 | inline void pch_gbe_mac_load_mac_addr(struct pch_gbe_hw *hw) | ||
94 | { | ||
95 | iowrite32(0x01, &hw->reg->MAC_ADDR_LOAD); | ||
96 | } | ||
97 | |||
91 | /** | 98 | /** |
92 | * pch_gbe_mac_read_mac_addr - Read MAC address | 99 | * pch_gbe_mac_read_mac_addr - Read MAC address |
93 | * @hw: Pointer to the HW structure | 100 | * @hw: Pointer to the HW structure |
@@ -519,7 +526,9 @@ static void pch_gbe_reset_task(struct work_struct *work) | |||
519 | struct pch_gbe_adapter *adapter; | 526 | struct pch_gbe_adapter *adapter; |
520 | adapter = container_of(work, struct pch_gbe_adapter, reset_task); | 527 | adapter = container_of(work, struct pch_gbe_adapter, reset_task); |
521 | 528 | ||
529 | rtnl_lock(); | ||
522 | pch_gbe_reinit_locked(adapter); | 530 | pch_gbe_reinit_locked(adapter); |
531 | rtnl_unlock(); | ||
523 | } | 532 | } |
524 | 533 | ||
525 | /** | 534 | /** |
@@ -528,14 +537,8 @@ static void pch_gbe_reset_task(struct work_struct *work) | |||
528 | */ | 537 | */ |
529 | void pch_gbe_reinit_locked(struct pch_gbe_adapter *adapter) | 538 | void pch_gbe_reinit_locked(struct pch_gbe_adapter *adapter) |
530 | { | 539 | { |
531 | struct net_device *netdev = adapter->netdev; | 540 | pch_gbe_down(adapter); |
532 | 541 | pch_gbe_up(adapter); | |
533 | rtnl_lock(); | ||
534 | if (netif_running(netdev)) { | ||
535 | pch_gbe_down(adapter); | ||
536 | pch_gbe_up(adapter); | ||
537 | } | ||
538 | rtnl_unlock(); | ||
539 | } | 542 | } |
540 | 543 | ||
541 | /** | 544 | /** |
@@ -1369,16 +1372,13 @@ pch_gbe_clean_rx(struct pch_gbe_adapter *adapter, | |||
1369 | struct pch_gbe_buffer *buffer_info; | 1372 | struct pch_gbe_buffer *buffer_info; |
1370 | struct pch_gbe_rx_desc *rx_desc; | 1373 | struct pch_gbe_rx_desc *rx_desc; |
1371 | u32 length; | 1374 | u32 length; |
1372 | unsigned char tmp_packet[ETH_HLEN]; | ||
1373 | unsigned int i; | 1375 | unsigned int i; |
1374 | unsigned int cleaned_count = 0; | 1376 | unsigned int cleaned_count = 0; |
1375 | bool cleaned = false; | 1377 | bool cleaned = false; |
1376 | struct sk_buff *skb; | 1378 | struct sk_buff *skb, *new_skb; |
1377 | u8 dma_status; | 1379 | u8 dma_status; |
1378 | u16 gbec_status; | 1380 | u16 gbec_status; |
1379 | u32 tcp_ip_status; | 1381 | u32 tcp_ip_status; |
1380 | u8 skb_copy_flag = 0; | ||
1381 | u8 skb_padding_flag = 0; | ||
1382 | 1382 | ||
1383 | i = rx_ring->next_to_clean; | 1383 | i = rx_ring->next_to_clean; |
1384 | 1384 | ||
@@ -1422,55 +1422,70 @@ pch_gbe_clean_rx(struct pch_gbe_adapter *adapter, | |||
1422 | pr_err("Receive CRC Error\n"); | 1422 | pr_err("Receive CRC Error\n"); |
1423 | } else { | 1423 | } else { |
1424 | /* get receive length */ | 1424 | /* get receive length */ |
1425 | /* length convert[-3], padding[-2] */ | 1425 | /* length convert[-3] */ |
1426 | length = (rx_desc->rx_words_eob) - 3 - 2; | 1426 | length = (rx_desc->rx_words_eob) - 3; |
1427 | 1427 | ||
1428 | /* Decide the data conversion method */ | 1428 | /* Decide the data conversion method */ |
1429 | if (!adapter->rx_csum) { | 1429 | if (!adapter->rx_csum) { |
1430 | /* [Header:14][payload] */ | 1430 | /* [Header:14][payload] */ |
1431 | skb_padding_flag = 0; | 1431 | if (NET_IP_ALIGN) { |
1432 | skb_copy_flag = 1; | 1432 | /* Because alignment differs, |
1433 | * the new_skb is newly allocated, | ||
1434 | * and data is copied to new_skb.*/ | ||
1435 | new_skb = netdev_alloc_skb(netdev, | ||
1436 | length + NET_IP_ALIGN); | ||
1437 | if (!new_skb) { | ||
1438 | /* dorrop error */ | ||
1439 | pr_err("New skb allocation " | ||
1440 | "Error\n"); | ||
1441 | goto dorrop; | ||
1442 | } | ||
1443 | skb_reserve(new_skb, NET_IP_ALIGN); | ||
1444 | memcpy(new_skb->data, skb->data, | ||
1445 | length); | ||
1446 | skb = new_skb; | ||
1447 | } else { | ||
1448 | /* DMA buffer is used as SKB as it is.*/ | ||
1449 | buffer_info->skb = NULL; | ||
1450 | } | ||
1433 | } else { | 1451 | } else { |
1434 | /* [Header:14][padding:2][payload] */ | 1452 | /* [Header:14][padding:2][payload] */ |
1435 | skb_padding_flag = 1; | 1453 | /* The length includes padding length */ |
1436 | if (length < copybreak) | 1454 | length = length - PCH_GBE_DMA_PADDING; |
1437 | skb_copy_flag = 1; | 1455 | if ((length < copybreak) || |
1438 | else | 1456 | (NET_IP_ALIGN != PCH_GBE_DMA_PADDING)) { |
1439 | skb_copy_flag = 0; | 1457 | /* Because alignment differs, |
1440 | } | 1458 | * the new_skb is newly allocated, |
1441 | 1459 | * and data is copied to new_skb. | |
1442 | /* Data conversion */ | 1460 | * Padding data is deleted |
1443 | if (skb_copy_flag) { /* recycle skb */ | 1461 | * at the time of a copy.*/ |
1444 | struct sk_buff *new_skb; | 1462 | new_skb = netdev_alloc_skb(netdev, |
1445 | new_skb = | 1463 | length + NET_IP_ALIGN); |
1446 | netdev_alloc_skb(netdev, | 1464 | if (!new_skb) { |
1447 | length + NET_IP_ALIGN); | 1465 | /* dorrop error */ |
1448 | if (new_skb) { | 1466 | pr_err("New skb allocation " |
1449 | if (!skb_padding_flag) { | 1467 | "Error\n"); |
1450 | skb_reserve(new_skb, | 1468 | goto dorrop; |
1451 | NET_IP_ALIGN); | ||
1452 | } | 1469 | } |
1470 | skb_reserve(new_skb, NET_IP_ALIGN); | ||
1453 | memcpy(new_skb->data, skb->data, | 1471 | memcpy(new_skb->data, skb->data, |
1454 | length); | 1472 | ETH_HLEN); |
1455 | /* save the skb | 1473 | memcpy(&new_skb->data[ETH_HLEN], |
1456 | * in buffer_info as good */ | 1474 | &skb->data[ETH_HLEN + |
1475 | PCH_GBE_DMA_PADDING], | ||
1476 | length - ETH_HLEN); | ||
1457 | skb = new_skb; | 1477 | skb = new_skb; |
1458 | } else if (!skb_padding_flag) { | 1478 | } else { |
1459 | /* dorrop error */ | 1479 | /* Padding data is deleted |
1460 | pr_err("New skb allocation Error\n"); | 1480 | * by moving header data.*/ |
1461 | goto dorrop; | 1481 | memmove(&skb->data[PCH_GBE_DMA_PADDING], |
1482 | &skb->data[0], ETH_HLEN); | ||
1483 | skb_reserve(skb, NET_IP_ALIGN); | ||
1484 | buffer_info->skb = NULL; | ||
1462 | } | 1485 | } |
1463 | } else { | ||
1464 | buffer_info->skb = NULL; | ||
1465 | } | 1486 | } |
1466 | if (skb_padding_flag) { | 1487 | /* The length includes FCS length */ |
1467 | memcpy(&tmp_packet[0], &skb->data[0], ETH_HLEN); | 1488 | length = length - ETH_FCS_LEN; |
1468 | memcpy(&skb->data[NET_IP_ALIGN], &tmp_packet[0], | ||
1469 | ETH_HLEN); | ||
1470 | skb_reserve(skb, NET_IP_ALIGN); | ||
1471 | |||
1472 | } | ||
1473 | |||
1474 | /* update status of driver */ | 1489 | /* update status of driver */ |
1475 | adapter->stats.rx_bytes += length; | 1490 | adapter->stats.rx_bytes += length; |
1476 | adapter->stats.rx_packets++; | 1491 | adapter->stats.rx_packets++; |
@@ -2247,7 +2262,7 @@ static void pch_gbe_remove(struct pci_dev *pdev) | |||
2247 | struct net_device *netdev = pci_get_drvdata(pdev); | 2262 | struct net_device *netdev = pci_get_drvdata(pdev); |
2248 | struct pch_gbe_adapter *adapter = netdev_priv(netdev); | 2263 | struct pch_gbe_adapter *adapter = netdev_priv(netdev); |
2249 | 2264 | ||
2250 | flush_scheduled_work(); | 2265 | cancel_work_sync(&adapter->reset_task); |
2251 | unregister_netdev(netdev); | 2266 | unregister_netdev(netdev); |
2252 | 2267 | ||
2253 | pch_gbe_hal_phy_hw_reset(&adapter->hw); | 2268 | pch_gbe_hal_phy_hw_reset(&adapter->hw); |
@@ -2322,6 +2337,7 @@ static int pch_gbe_probe(struct pci_dev *pdev, | |||
2322 | netdev->features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_GRO; | 2337 | netdev->features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_GRO; |
2323 | pch_gbe_set_ethtool_ops(netdev); | 2338 | pch_gbe_set_ethtool_ops(netdev); |
2324 | 2339 | ||
2340 | pch_gbe_mac_load_mac_addr(&adapter->hw); | ||
2325 | pch_gbe_mac_reset_hw(&adapter->hw); | 2341 | pch_gbe_mac_reset_hw(&adapter->hw); |
2326 | 2342 | ||
2327 | /* setup the private structure */ | 2343 | /* setup the private structure */ |
diff --git a/drivers/net/pcmcia/axnet_cs.c b/drivers/net/pcmcia/axnet_cs.c index 1f42f6ac8551..d3cb77205863 100644 --- a/drivers/net/pcmcia/axnet_cs.c +++ b/drivers/net/pcmcia/axnet_cs.c | |||
@@ -1488,12 +1488,10 @@ static void ei_rx_overrun(struct net_device *dev) | |||
1488 | 1488 | ||
1489 | /* | 1489 | /* |
1490 | * Wait a full Tx time (1.2ms) + some guard time, NS says 1.6ms total. | 1490 | * Wait a full Tx time (1.2ms) + some guard time, NS says 1.6ms total. |
1491 | * Early datasheets said to poll the reset bit, but now they say that | 1491 | * We wait at least 2ms. |
1492 | * it "is not a reliable indicator and subsequently should be ignored." | ||
1493 | * We wait at least 10ms. | ||
1494 | */ | 1492 | */ |
1495 | 1493 | ||
1496 | mdelay(10); | 1494 | mdelay(2); |
1497 | 1495 | ||
1498 | /* | 1496 | /* |
1499 | * Reset RBCR[01] back to zero as per magic incantation. | 1497 | * Reset RBCR[01] back to zero as per magic incantation. |
diff --git a/drivers/net/pcmcia/fmvj18x_cs.c b/drivers/net/pcmcia/fmvj18x_cs.c index 9226cda4d054..530ab5a10bd3 100644 --- a/drivers/net/pcmcia/fmvj18x_cs.c +++ b/drivers/net/pcmcia/fmvj18x_cs.c | |||
@@ -691,6 +691,7 @@ static struct pcmcia_device_id fmvj18x_ids[] = { | |||
691 | PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0105, 0x0e0a), | 691 | PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0105, 0x0e0a), |
692 | PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0032, 0x0e01), | 692 | PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0032, 0x0e01), |
693 | PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0032, 0x0a05), | 693 | PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0032, 0x0a05), |
694 | PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0032, 0x0b05), | ||
694 | PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0032, 0x1101), | 695 | PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0032, 0x1101), |
695 | PCMCIA_DEVICE_NULL, | 696 | PCMCIA_DEVICE_NULL, |
696 | }; | 697 | }; |
diff --git a/drivers/net/pcmcia/pcnet_cs.c b/drivers/net/pcmcia/pcnet_cs.c index 2c158910f7ea..e953793a33ff 100644 --- a/drivers/net/pcmcia/pcnet_cs.c +++ b/drivers/net/pcmcia/pcnet_cs.c | |||
@@ -1536,6 +1536,7 @@ static struct pcmcia_device_id pcnet_ids[] = { | |||
1536 | PCMCIA_DEVICE_PROD_ID12("CONTEC", "C-NET(PC)C-10L", 0x21cab552, 0xf6f90722), | 1536 | PCMCIA_DEVICE_PROD_ID12("CONTEC", "C-NET(PC)C-10L", 0x21cab552, 0xf6f90722), |
1537 | PCMCIA_DEVICE_PROD_ID12("corega", "FEther PCC-TXF", 0x0a21501a, 0xa51564a2), | 1537 | PCMCIA_DEVICE_PROD_ID12("corega", "FEther PCC-TXF", 0x0a21501a, 0xa51564a2), |
1538 | PCMCIA_DEVICE_PROD_ID12("corega", "Ether CF-TD", 0x0a21501a, 0x6589340a), | 1538 | PCMCIA_DEVICE_PROD_ID12("corega", "Ether CF-TD", 0x0a21501a, 0x6589340a), |
1539 | PCMCIA_DEVICE_PROD_ID12("corega K.K.", "corega Ether CF-TD LAN Card", 0x5261440f, 0x8797663b), | ||
1539 | PCMCIA_DEVICE_PROD_ID12("corega K.K.", "corega EtherII PCC-T", 0x5261440f, 0xfa9d85bd), | 1540 | PCMCIA_DEVICE_PROD_ID12("corega K.K.", "corega EtherII PCC-T", 0x5261440f, 0xfa9d85bd), |
1540 | PCMCIA_DEVICE_PROD_ID12("corega K.K.", "corega EtherII PCC-TD", 0x5261440f, 0xc49bd73d), | 1541 | PCMCIA_DEVICE_PROD_ID12("corega K.K.", "corega EtherII PCC-TD", 0x5261440f, 0xc49bd73d), |
1541 | PCMCIA_DEVICE_PROD_ID12("Corega K.K.", "corega EtherII PCC-TD", 0xd4fdcbd8, 0xc49bd73d), | 1542 | PCMCIA_DEVICE_PROD_ID12("Corega K.K.", "corega EtherII PCC-TD", 0xd4fdcbd8, 0xc49bd73d), |
diff --git a/drivers/net/ppp_async.c b/drivers/net/ppp_async.c index 78d70a6481bf..a1b82c9c67d2 100644 --- a/drivers/net/ppp_async.c +++ b/drivers/net/ppp_async.c | |||
@@ -32,6 +32,7 @@ | |||
32 | #include <linux/init.h> | 32 | #include <linux/init.h> |
33 | #include <linux/jiffies.h> | 33 | #include <linux/jiffies.h> |
34 | #include <linux/slab.h> | 34 | #include <linux/slab.h> |
35 | #include <asm/unaligned.h> | ||
35 | #include <asm/uaccess.h> | 36 | #include <asm/uaccess.h> |
36 | #include <asm/string.h> | 37 | #include <asm/string.h> |
37 | 38 | ||
@@ -542,7 +543,7 @@ ppp_async_encode(struct asyncppp *ap) | |||
542 | data = ap->tpkt->data; | 543 | data = ap->tpkt->data; |
543 | count = ap->tpkt->len; | 544 | count = ap->tpkt->len; |
544 | fcs = ap->tfcs; | 545 | fcs = ap->tfcs; |
545 | proto = (data[0] << 8) + data[1]; | 546 | proto = get_unaligned_be16(data); |
546 | 547 | ||
547 | /* | 548 | /* |
548 | * LCP packets with code values between 1 (configure-reqest) | 549 | * LCP packets with code values between 1 (configure-reqest) |
@@ -963,7 +964,7 @@ static void async_lcp_peek(struct asyncppp *ap, unsigned char *data, | |||
963 | code = data[0]; | 964 | code = data[0]; |
964 | if (code != CONFACK && code != CONFREQ) | 965 | if (code != CONFACK && code != CONFREQ) |
965 | return; | 966 | return; |
966 | dlen = (data[2] << 8) + data[3]; | 967 | dlen = get_unaligned_be16(data + 2); |
967 | if (len < dlen) | 968 | if (len < dlen) |
968 | return; /* packet got truncated or length is bogus */ | 969 | return; /* packet got truncated or length is bogus */ |
969 | 970 | ||
@@ -997,15 +998,14 @@ static void async_lcp_peek(struct asyncppp *ap, unsigned char *data, | |||
997 | while (dlen >= 2 && dlen >= data[1] && data[1] >= 2) { | 998 | while (dlen >= 2 && dlen >= data[1] && data[1] >= 2) { |
998 | switch (data[0]) { | 999 | switch (data[0]) { |
999 | case LCP_MRU: | 1000 | case LCP_MRU: |
1000 | val = (data[2] << 8) + data[3]; | 1001 | val = get_unaligned_be16(data + 2); |
1001 | if (inbound) | 1002 | if (inbound) |
1002 | ap->mru = val; | 1003 | ap->mru = val; |
1003 | else | 1004 | else |
1004 | ap->chan.mtu = val; | 1005 | ap->chan.mtu = val; |
1005 | break; | 1006 | break; |
1006 | case LCP_ASYNCMAP: | 1007 | case LCP_ASYNCMAP: |
1007 | val = (data[2] << 24) + (data[3] << 16) | 1008 | val = get_unaligned_be32(data + 2); |
1008 | + (data[4] << 8) + data[5]; | ||
1009 | if (inbound) | 1009 | if (inbound) |
1010 | ap->raccm = val; | 1010 | ap->raccm = val; |
1011 | else | 1011 | else |
diff --git a/drivers/net/ppp_deflate.c b/drivers/net/ppp_deflate.c index 695bc83e0cfd..43583309a65d 100644 --- a/drivers/net/ppp_deflate.c +++ b/drivers/net/ppp_deflate.c | |||
@@ -41,6 +41,7 @@ | |||
41 | #include <linux/ppp-comp.h> | 41 | #include <linux/ppp-comp.h> |
42 | 42 | ||
43 | #include <linux/zlib.h> | 43 | #include <linux/zlib.h> |
44 | #include <asm/unaligned.h> | ||
44 | 45 | ||
45 | /* | 46 | /* |
46 | * State for a Deflate (de)compressor. | 47 | * State for a Deflate (de)compressor. |
@@ -232,11 +233,9 @@ static int z_compress(void *arg, unsigned char *rptr, unsigned char *obuf, | |||
232 | */ | 233 | */ |
233 | wptr[0] = PPP_ADDRESS(rptr); | 234 | wptr[0] = PPP_ADDRESS(rptr); |
234 | wptr[1] = PPP_CONTROL(rptr); | 235 | wptr[1] = PPP_CONTROL(rptr); |
235 | wptr[2] = PPP_COMP >> 8; | 236 | put_unaligned_be16(PPP_COMP, wptr + 2); |
236 | wptr[3] = PPP_COMP; | ||
237 | wptr += PPP_HDRLEN; | 237 | wptr += PPP_HDRLEN; |
238 | wptr[0] = state->seqno >> 8; | 238 | put_unaligned_be16(state->seqno, wptr); |
239 | wptr[1] = state->seqno; | ||
240 | wptr += DEFLATE_OVHD; | 239 | wptr += DEFLATE_OVHD; |
241 | olen = PPP_HDRLEN + DEFLATE_OVHD; | 240 | olen = PPP_HDRLEN + DEFLATE_OVHD; |
242 | state->strm.next_out = wptr; | 241 | state->strm.next_out = wptr; |
@@ -451,7 +450,7 @@ static int z_decompress(void *arg, unsigned char *ibuf, int isize, | |||
451 | } | 450 | } |
452 | 451 | ||
453 | /* Check the sequence number. */ | 452 | /* Check the sequence number. */ |
454 | seq = (ibuf[PPP_HDRLEN] << 8) + ibuf[PPP_HDRLEN+1]; | 453 | seq = get_unaligned_be16(ibuf + PPP_HDRLEN); |
455 | if (seq != (state->seqno & 0xffff)) { | 454 | if (seq != (state->seqno & 0xffff)) { |
456 | if (state->debug) | 455 | if (state->debug) |
457 | printk(KERN_DEBUG "z_decompress%d: bad seq # %d, expected %d\n", | 456 | printk(KERN_DEBUG "z_decompress%d: bad seq # %d, expected %d\n", |
diff --git a/drivers/net/ppp_generic.c b/drivers/net/ppp_generic.c index 6456484c0299..c7a6c4466978 100644 --- a/drivers/net/ppp_generic.c +++ b/drivers/net/ppp_generic.c | |||
@@ -46,6 +46,7 @@ | |||
46 | #include <linux/device.h> | 46 | #include <linux/device.h> |
47 | #include <linux/mutex.h> | 47 | #include <linux/mutex.h> |
48 | #include <linux/slab.h> | 48 | #include <linux/slab.h> |
49 | #include <asm/unaligned.h> | ||
49 | #include <net/slhc_vj.h> | 50 | #include <net/slhc_vj.h> |
50 | #include <asm/atomic.h> | 51 | #include <asm/atomic.h> |
51 | 52 | ||
@@ -210,7 +211,7 @@ struct ppp_net { | |||
210 | }; | 211 | }; |
211 | 212 | ||
212 | /* Get the PPP protocol number from a skb */ | 213 | /* Get the PPP protocol number from a skb */ |
213 | #define PPP_PROTO(skb) (((skb)->data[0] << 8) + (skb)->data[1]) | 214 | #define PPP_PROTO(skb) get_unaligned_be16((skb)->data) |
214 | 215 | ||
215 | /* We limit the length of ppp->file.rq to this (arbitrary) value */ | 216 | /* We limit the length of ppp->file.rq to this (arbitrary) value */ |
216 | #define PPP_MAX_RQLEN 32 | 217 | #define PPP_MAX_RQLEN 32 |
@@ -964,8 +965,7 @@ ppp_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
964 | 965 | ||
965 | pp = skb_push(skb, 2); | 966 | pp = skb_push(skb, 2); |
966 | proto = npindex_to_proto[npi]; | 967 | proto = npindex_to_proto[npi]; |
967 | pp[0] = proto >> 8; | 968 | put_unaligned_be16(proto, pp); |
968 | pp[1] = proto; | ||
969 | 969 | ||
970 | netif_stop_queue(dev); | 970 | netif_stop_queue(dev); |
971 | skb_queue_tail(&ppp->file.xq, skb); | 971 | skb_queue_tail(&ppp->file.xq, skb); |
@@ -1473,8 +1473,7 @@ static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb) | |||
1473 | q = skb_put(frag, flen + hdrlen); | 1473 | q = skb_put(frag, flen + hdrlen); |
1474 | 1474 | ||
1475 | /* make the MP header */ | 1475 | /* make the MP header */ |
1476 | q[0] = PPP_MP >> 8; | 1476 | put_unaligned_be16(PPP_MP, q); |
1477 | q[1] = PPP_MP; | ||
1478 | if (ppp->flags & SC_MP_XSHORTSEQ) { | 1477 | if (ppp->flags & SC_MP_XSHORTSEQ) { |
1479 | q[2] = bits + ((ppp->nxseq >> 8) & 0xf); | 1478 | q[2] = bits + ((ppp->nxseq >> 8) & 0xf); |
1480 | q[3] = ppp->nxseq; | 1479 | q[3] = ppp->nxseq; |
diff --git a/drivers/net/ppp_mppe.c b/drivers/net/ppp_mppe.c index 6d1a1b80cc3e..9a1849a83e2a 100644 --- a/drivers/net/ppp_mppe.c +++ b/drivers/net/ppp_mppe.c | |||
@@ -55,6 +55,7 @@ | |||
55 | #include <linux/ppp_defs.h> | 55 | #include <linux/ppp_defs.h> |
56 | #include <linux/ppp-comp.h> | 56 | #include <linux/ppp-comp.h> |
57 | #include <linux/scatterlist.h> | 57 | #include <linux/scatterlist.h> |
58 | #include <asm/unaligned.h> | ||
58 | 59 | ||
59 | #include "ppp_mppe.h" | 60 | #include "ppp_mppe.h" |
60 | 61 | ||
@@ -395,16 +396,14 @@ mppe_compress(void *arg, unsigned char *ibuf, unsigned char *obuf, | |||
395 | */ | 396 | */ |
396 | obuf[0] = PPP_ADDRESS(ibuf); | 397 | obuf[0] = PPP_ADDRESS(ibuf); |
397 | obuf[1] = PPP_CONTROL(ibuf); | 398 | obuf[1] = PPP_CONTROL(ibuf); |
398 | obuf[2] = PPP_COMP >> 8; /* isize + MPPE_OVHD + 1 */ | 399 | put_unaligned_be16(PPP_COMP, obuf + 2); |
399 | obuf[3] = PPP_COMP; /* isize + MPPE_OVHD + 2 */ | ||
400 | obuf += PPP_HDRLEN; | 400 | obuf += PPP_HDRLEN; |
401 | 401 | ||
402 | state->ccount = (state->ccount + 1) % MPPE_CCOUNT_SPACE; | 402 | state->ccount = (state->ccount + 1) % MPPE_CCOUNT_SPACE; |
403 | if (state->debug >= 7) | 403 | if (state->debug >= 7) |
404 | printk(KERN_DEBUG "mppe_compress[%d]: ccount %d\n", state->unit, | 404 | printk(KERN_DEBUG "mppe_compress[%d]: ccount %d\n", state->unit, |
405 | state->ccount); | 405 | state->ccount); |
406 | obuf[0] = state->ccount >> 8; | 406 | put_unaligned_be16(state->ccount, obuf); |
407 | obuf[1] = state->ccount & 0xff; | ||
408 | 407 | ||
409 | if (!state->stateful || /* stateless mode */ | 408 | if (!state->stateful || /* stateless mode */ |
410 | ((state->ccount & 0xff) == 0xff) || /* "flag" packet */ | 409 | ((state->ccount & 0xff) == 0xff) || /* "flag" packet */ |
diff --git a/drivers/net/ppp_synctty.c b/drivers/net/ppp_synctty.c index 4c95ec3fb8d4..4e6b72f57de8 100644 --- a/drivers/net/ppp_synctty.c +++ b/drivers/net/ppp_synctty.c | |||
@@ -45,6 +45,7 @@ | |||
45 | #include <linux/completion.h> | 45 | #include <linux/completion.h> |
46 | #include <linux/init.h> | 46 | #include <linux/init.h> |
47 | #include <linux/slab.h> | 47 | #include <linux/slab.h> |
48 | #include <asm/unaligned.h> | ||
48 | #include <asm/uaccess.h> | 49 | #include <asm/uaccess.h> |
49 | 50 | ||
50 | #define PPP_VERSION "2.4.2" | 51 | #define PPP_VERSION "2.4.2" |
@@ -563,7 +564,7 @@ ppp_sync_txmunge(struct syncppp *ap, struct sk_buff *skb) | |||
563 | int islcp; | 564 | int islcp; |
564 | 565 | ||
565 | data = skb->data; | 566 | data = skb->data; |
566 | proto = (data[0] << 8) + data[1]; | 567 | proto = get_unaligned_be16(data); |
567 | 568 | ||
568 | /* LCP packets with codes between 1 (configure-request) | 569 | /* LCP packets with codes between 1 (configure-request) |
569 | * and 7 (code-reject) must be sent as though no options | 570 | * and 7 (code-reject) must be sent as though no options |
diff --git a/drivers/net/qlcnic/qlcnic.h b/drivers/net/qlcnic/qlcnic.h index 9c2a02d204dc..44e316fd67b8 100644 --- a/drivers/net/qlcnic/qlcnic.h +++ b/drivers/net/qlcnic/qlcnic.h | |||
@@ -34,8 +34,8 @@ | |||
34 | 34 | ||
35 | #define _QLCNIC_LINUX_MAJOR 5 | 35 | #define _QLCNIC_LINUX_MAJOR 5 |
36 | #define _QLCNIC_LINUX_MINOR 0 | 36 | #define _QLCNIC_LINUX_MINOR 0 |
37 | #define _QLCNIC_LINUX_SUBVERSION 14 | 37 | #define _QLCNIC_LINUX_SUBVERSION 15 |
38 | #define QLCNIC_LINUX_VERSIONID "5.0.14" | 38 | #define QLCNIC_LINUX_VERSIONID "5.0.15" |
39 | #define QLCNIC_DRV_IDC_VER 0x01 | 39 | #define QLCNIC_DRV_IDC_VER 0x01 |
40 | #define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\ | 40 | #define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\ |
41 | (_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION)) | 41 | (_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION)) |
@@ -289,6 +289,26 @@ struct uni_data_desc{ | |||
289 | u32 reserved[5]; | 289 | u32 reserved[5]; |
290 | }; | 290 | }; |
291 | 291 | ||
292 | /* Flash Defines and Structures */ | ||
293 | #define QLCNIC_FLT_LOCATION 0x3F1000 | ||
294 | #define QLCNIC_FW_IMAGE_REGION 0x74 | ||
295 | struct qlcnic_flt_header { | ||
296 | u16 version; | ||
297 | u16 len; | ||
298 | u16 checksum; | ||
299 | u16 reserved; | ||
300 | }; | ||
301 | |||
302 | struct qlcnic_flt_entry { | ||
303 | u8 region; | ||
304 | u8 reserved0; | ||
305 | u8 attrib; | ||
306 | u8 reserved1; | ||
307 | u32 size; | ||
308 | u32 start_addr; | ||
309 | u32 end_add; | ||
310 | }; | ||
311 | |||
292 | /* Magic number to let user know flash is programmed */ | 312 | /* Magic number to let user know flash is programmed */ |
293 | #define QLCNIC_BDINFO_MAGIC 0x12345678 | 313 | #define QLCNIC_BDINFO_MAGIC 0x12345678 |
294 | 314 | ||
diff --git a/drivers/net/qlcnic/qlcnic_ethtool.c b/drivers/net/qlcnic/qlcnic_ethtool.c index 1e7af709d395..4c14510e2a87 100644 --- a/drivers/net/qlcnic/qlcnic_ethtool.c +++ b/drivers/net/qlcnic/qlcnic_ethtool.c | |||
@@ -672,7 +672,7 @@ qlcnic_diag_test(struct net_device *dev, struct ethtool_test *eth_test, | |||
672 | if (data[1]) | 672 | if (data[1]) |
673 | eth_test->flags |= ETH_TEST_FL_FAILED; | 673 | eth_test->flags |= ETH_TEST_FL_FAILED; |
674 | 674 | ||
675 | if (eth_test->flags == ETH_TEST_FL_OFFLINE) { | 675 | if (eth_test->flags & ETH_TEST_FL_OFFLINE) { |
676 | data[2] = qlcnic_irq_test(dev); | 676 | data[2] = qlcnic_irq_test(dev); |
677 | if (data[2]) | 677 | if (data[2]) |
678 | eth_test->flags |= ETH_TEST_FL_FAILED; | 678 | eth_test->flags |= ETH_TEST_FL_FAILED; |
diff --git a/drivers/net/qlcnic/qlcnic_init.c b/drivers/net/qlcnic/qlcnic_init.c index 9b9c7c39d3ee..a7f1d5b7e811 100644 --- a/drivers/net/qlcnic/qlcnic_init.c +++ b/drivers/net/qlcnic/qlcnic_init.c | |||
@@ -627,12 +627,73 @@ qlcnic_setup_idc_param(struct qlcnic_adapter *adapter) { | |||
627 | return 0; | 627 | return 0; |
628 | } | 628 | } |
629 | 629 | ||
630 | static int qlcnic_get_flt_entry(struct qlcnic_adapter *adapter, u8 region, | ||
631 | struct qlcnic_flt_entry *region_entry) | ||
632 | { | ||
633 | struct qlcnic_flt_header flt_hdr; | ||
634 | struct qlcnic_flt_entry *flt_entry; | ||
635 | int i = 0, ret; | ||
636 | u32 entry_size; | ||
637 | |||
638 | memset(region_entry, 0, sizeof(struct qlcnic_flt_entry)); | ||
639 | ret = qlcnic_rom_fast_read_words(adapter, QLCNIC_FLT_LOCATION, | ||
640 | (u8 *)&flt_hdr, | ||
641 | sizeof(struct qlcnic_flt_header)); | ||
642 | if (ret) { | ||
643 | dev_warn(&adapter->pdev->dev, | ||
644 | "error reading flash layout header\n"); | ||
645 | return -EIO; | ||
646 | } | ||
647 | |||
648 | entry_size = flt_hdr.len - sizeof(struct qlcnic_flt_header); | ||
649 | flt_entry = (struct qlcnic_flt_entry *)vzalloc(entry_size); | ||
650 | if (flt_entry == NULL) { | ||
651 | dev_warn(&adapter->pdev->dev, "error allocating memory\n"); | ||
652 | return -EIO; | ||
653 | } | ||
654 | |||
655 | ret = qlcnic_rom_fast_read_words(adapter, QLCNIC_FLT_LOCATION + | ||
656 | sizeof(struct qlcnic_flt_header), | ||
657 | (u8 *)flt_entry, entry_size); | ||
658 | if (ret) { | ||
659 | dev_warn(&adapter->pdev->dev, | ||
660 | "error reading flash layout entries\n"); | ||
661 | goto err_out; | ||
662 | } | ||
663 | |||
664 | while (i < (entry_size/sizeof(struct qlcnic_flt_entry))) { | ||
665 | if (flt_entry[i].region == region) | ||
666 | break; | ||
667 | i++; | ||
668 | } | ||
669 | if (i >= (entry_size/sizeof(struct qlcnic_flt_entry))) { | ||
670 | dev_warn(&adapter->pdev->dev, | ||
671 | "region=%x not found in %d regions\n", region, i); | ||
672 | ret = -EIO; | ||
673 | goto err_out; | ||
674 | } | ||
675 | memcpy(region_entry, &flt_entry[i], sizeof(struct qlcnic_flt_entry)); | ||
676 | |||
677 | err_out: | ||
678 | vfree(flt_entry); | ||
679 | return ret; | ||
680 | } | ||
681 | |||
630 | int | 682 | int |
631 | qlcnic_check_flash_fw_ver(struct qlcnic_adapter *adapter) | 683 | qlcnic_check_flash_fw_ver(struct qlcnic_adapter *adapter) |
632 | { | 684 | { |
685 | struct qlcnic_flt_entry fw_entry; | ||
633 | u32 ver = -1, min_ver; | 686 | u32 ver = -1, min_ver; |
687 | int ret; | ||
634 | 688 | ||
635 | qlcnic_rom_fast_read(adapter, QLCNIC_FW_VERSION_OFFSET, (int *)&ver); | 689 | ret = qlcnic_get_flt_entry(adapter, QLCNIC_FW_IMAGE_REGION, &fw_entry); |
690 | if (!ret) | ||
691 | /* 0-4:-signature, 4-8:-fw version */ | ||
692 | qlcnic_rom_fast_read(adapter, fw_entry.start_addr + 4, | ||
693 | (int *)&ver); | ||
694 | else | ||
695 | qlcnic_rom_fast_read(adapter, QLCNIC_FW_VERSION_OFFSET, | ||
696 | (int *)&ver); | ||
636 | 697 | ||
637 | ver = QLCNIC_DECODE_VERSION(ver); | 698 | ver = QLCNIC_DECODE_VERSION(ver); |
638 | min_ver = QLCNIC_MIN_FW_VERSION; | 699 | min_ver = QLCNIC_MIN_FW_VERSION; |
diff --git a/drivers/net/qlcnic/qlcnic_main.c b/drivers/net/qlcnic/qlcnic_main.c index 11e3a46c0911..37c04b4fade3 100644 --- a/drivers/net/qlcnic/qlcnic_main.c +++ b/drivers/net/qlcnic/qlcnic_main.c | |||
@@ -31,15 +31,15 @@ static const char qlcnic_driver_string[] = "QLogic 1/10 GbE " | |||
31 | 31 | ||
32 | static struct workqueue_struct *qlcnic_wq; | 32 | static struct workqueue_struct *qlcnic_wq; |
33 | static int qlcnic_mac_learn; | 33 | static int qlcnic_mac_learn; |
34 | module_param(qlcnic_mac_learn, int, 0644); | 34 | module_param(qlcnic_mac_learn, int, 0444); |
35 | MODULE_PARM_DESC(qlcnic_mac_learn, "Mac Filter (0=disabled, 1=enabled)"); | 35 | MODULE_PARM_DESC(qlcnic_mac_learn, "Mac Filter (0=disabled, 1=enabled)"); |
36 | 36 | ||
37 | static int use_msi = 1; | 37 | static int use_msi = 1; |
38 | module_param(use_msi, int, 0644); | 38 | module_param(use_msi, int, 0444); |
39 | MODULE_PARM_DESC(use_msi, "MSI interrupt (0=disabled, 1=enabled"); | 39 | MODULE_PARM_DESC(use_msi, "MSI interrupt (0=disabled, 1=enabled"); |
40 | 40 | ||
41 | static int use_msi_x = 1; | 41 | static int use_msi_x = 1; |
42 | module_param(use_msi_x, int, 0644); | 42 | module_param(use_msi_x, int, 0444); |
43 | MODULE_PARM_DESC(use_msi_x, "MSI-X interrupt (0=disabled, 1=enabled"); | 43 | MODULE_PARM_DESC(use_msi_x, "MSI-X interrupt (0=disabled, 1=enabled"); |
44 | 44 | ||
45 | static int auto_fw_reset = AUTO_FW_RESET_ENABLED; | 45 | static int auto_fw_reset = AUTO_FW_RESET_ENABLED; |
@@ -47,11 +47,11 @@ module_param(auto_fw_reset, int, 0644); | |||
47 | MODULE_PARM_DESC(auto_fw_reset, "Auto firmware reset (0=disabled, 1=enabled"); | 47 | MODULE_PARM_DESC(auto_fw_reset, "Auto firmware reset (0=disabled, 1=enabled"); |
48 | 48 | ||
49 | static int load_fw_file; | 49 | static int load_fw_file; |
50 | module_param(load_fw_file, int, 0644); | 50 | module_param(load_fw_file, int, 0444); |
51 | MODULE_PARM_DESC(load_fw_file, "Load firmware from (0=flash, 1=file"); | 51 | MODULE_PARM_DESC(load_fw_file, "Load firmware from (0=flash, 1=file"); |
52 | 52 | ||
53 | static int qlcnic_config_npars; | 53 | static int qlcnic_config_npars; |
54 | module_param(qlcnic_config_npars, int, 0644); | 54 | module_param(qlcnic_config_npars, int, 0444); |
55 | MODULE_PARM_DESC(qlcnic_config_npars, "Configure NPARs (0=disabled, 1=enabled"); | 55 | MODULE_PARM_DESC(qlcnic_config_npars, "Configure NPARs (0=disabled, 1=enabled"); |
56 | 56 | ||
57 | static int __devinit qlcnic_probe(struct pci_dev *pdev, | 57 | static int __devinit qlcnic_probe(struct pci_dev *pdev, |
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c index 27a7c20f64cd..7ffdb80adf40 100644 --- a/drivers/net/r8169.c +++ b/drivers/net/r8169.c | |||
@@ -25,6 +25,7 @@ | |||
25 | #include <linux/dma-mapping.h> | 25 | #include <linux/dma-mapping.h> |
26 | #include <linux/pm_runtime.h> | 26 | #include <linux/pm_runtime.h> |
27 | #include <linux/firmware.h> | 27 | #include <linux/firmware.h> |
28 | #include <linux/pci-aspm.h> | ||
28 | 29 | ||
29 | #include <asm/system.h> | 30 | #include <asm/system.h> |
30 | #include <asm/io.h> | 31 | #include <asm/io.h> |
@@ -554,6 +555,8 @@ struct rtl8169_private { | |||
554 | struct mii_if_info mii; | 555 | struct mii_if_info mii; |
555 | struct rtl8169_counters counters; | 556 | struct rtl8169_counters counters; |
556 | u32 saved_wolopts; | 557 | u32 saved_wolopts; |
558 | |||
559 | const struct firmware *fw; | ||
557 | }; | 560 | }; |
558 | 561 | ||
559 | MODULE_AUTHOR("Realtek and the Linux r8169 crew <netdev@vger.kernel.org>"); | 562 | MODULE_AUTHOR("Realtek and the Linux r8169 crew <netdev@vger.kernel.org>"); |
@@ -615,8 +618,9 @@ static void ocp_write(struct rtl8169_private *tp, u8 mask, u16 reg, u32 data) | |||
615 | } | 618 | } |
616 | } | 619 | } |
617 | 620 | ||
618 | static void rtl8168_oob_notify(void __iomem *ioaddr, u8 cmd) | 621 | static void rtl8168_oob_notify(struct rtl8169_private *tp, u8 cmd) |
619 | { | 622 | { |
623 | void __iomem *ioaddr = tp->mmio_addr; | ||
620 | int i; | 624 | int i; |
621 | 625 | ||
622 | RTL_W8(ERIDR, cmd); | 626 | RTL_W8(ERIDR, cmd); |
@@ -628,7 +632,7 @@ static void rtl8168_oob_notify(void __iomem *ioaddr, u8 cmd) | |||
628 | break; | 632 | break; |
629 | } | 633 | } |
630 | 634 | ||
631 | ocp_write(ioaddr, 0x1, 0x30, 0x00000001); | 635 | ocp_write(tp, 0x1, 0x30, 0x00000001); |
632 | } | 636 | } |
633 | 637 | ||
634 | #define OOB_CMD_RESET 0x00 | 638 | #define OOB_CMD_RESET 0x00 |
@@ -971,7 +975,8 @@ static void __rtl8169_check_link_status(struct net_device *dev, | |||
971 | if (pm) | 975 | if (pm) |
972 | pm_request_resume(&tp->pci_dev->dev); | 976 | pm_request_resume(&tp->pci_dev->dev); |
973 | netif_carrier_on(dev); | 977 | netif_carrier_on(dev); |
974 | netif_info(tp, ifup, dev, "link up\n"); | 978 | if (net_ratelimit()) |
979 | netif_info(tp, ifup, dev, "link up\n"); | ||
975 | } else { | 980 | } else { |
976 | netif_carrier_off(dev); | 981 | netif_carrier_off(dev); |
977 | netif_info(tp, ifdown, dev, "link down\n"); | 982 | netif_info(tp, ifdown, dev, "link down\n"); |
@@ -1632,42 +1637,163 @@ rtl_phy_write_fw(struct rtl8169_private *tp, const struct firmware *fw) | |||
1632 | { | 1637 | { |
1633 | __le32 *phytable = (__le32 *)fw->data; | 1638 | __le32 *phytable = (__le32 *)fw->data; |
1634 | struct net_device *dev = tp->dev; | 1639 | struct net_device *dev = tp->dev; |
1635 | size_t i; | 1640 | size_t index, fw_size = fw->size / sizeof(*phytable); |
1641 | u32 predata, count; | ||
1636 | 1642 | ||
1637 | if (fw->size % sizeof(*phytable)) { | 1643 | if (fw->size % sizeof(*phytable)) { |
1638 | netif_err(tp, probe, dev, "odd sized firmware %zd\n", fw->size); | 1644 | netif_err(tp, probe, dev, "odd sized firmware %zd\n", fw->size); |
1639 | return; | 1645 | return; |
1640 | } | 1646 | } |
1641 | 1647 | ||
1642 | for (i = 0; i < fw->size / sizeof(*phytable); i++) { | 1648 | for (index = 0; index < fw_size; index++) { |
1643 | u32 action = le32_to_cpu(phytable[i]); | 1649 | u32 action = le32_to_cpu(phytable[index]); |
1650 | u32 regno = (action & 0x0fff0000) >> 16; | ||
1644 | 1651 | ||
1645 | if (!action) | 1652 | switch(action & 0xf0000000) { |
1653 | case PHY_READ: | ||
1654 | case PHY_DATA_OR: | ||
1655 | case PHY_DATA_AND: | ||
1656 | case PHY_READ_EFUSE: | ||
1657 | case PHY_CLEAR_READCOUNT: | ||
1658 | case PHY_WRITE: | ||
1659 | case PHY_WRITE_PREVIOUS: | ||
1660 | case PHY_DELAY_MS: | ||
1646 | break; | 1661 | break; |
1647 | 1662 | ||
1648 | if ((action & 0xf0000000) != PHY_WRITE) { | 1663 | case PHY_BJMPN: |
1649 | netif_err(tp, probe, dev, | 1664 | if (regno > index) { |
1650 | "unknown action 0x%08x\n", action); | 1665 | netif_err(tp, probe, tp->dev, |
1666 | "Out of range of firmware\n"); | ||
1667 | return; | ||
1668 | } | ||
1669 | break; | ||
1670 | case PHY_READCOUNT_EQ_SKIP: | ||
1671 | if (index + 2 >= fw_size) { | ||
1672 | netif_err(tp, probe, tp->dev, | ||
1673 | "Out of range of firmware\n"); | ||
1674 | return; | ||
1675 | } | ||
1676 | break; | ||
1677 | case PHY_COMP_EQ_SKIPN: | ||
1678 | case PHY_COMP_NEQ_SKIPN: | ||
1679 | case PHY_SKIPN: | ||
1680 | if (index + 1 + regno >= fw_size) { | ||
1681 | netif_err(tp, probe, tp->dev, | ||
1682 | "Out of range of firmware\n"); | ||
1683 | return; | ||
1684 | } | ||
1685 | break; | ||
1686 | |||
1687 | case PHY_READ_MAC_BYTE: | ||
1688 | case PHY_WRITE_MAC_BYTE: | ||
1689 | case PHY_WRITE_ERI_WORD: | ||
1690 | default: | ||
1691 | netif_err(tp, probe, tp->dev, | ||
1692 | "Invalid action 0x%08x\n", action); | ||
1651 | return; | 1693 | return; |
1652 | } | 1694 | } |
1653 | } | 1695 | } |
1654 | 1696 | ||
1655 | while (i-- != 0) { | 1697 | predata = 0; |
1656 | u32 action = le32_to_cpu(*phytable); | 1698 | count = 0; |
1699 | |||
1700 | for (index = 0; index < fw_size; ) { | ||
1701 | u32 action = le32_to_cpu(phytable[index]); | ||
1657 | u32 data = action & 0x0000ffff; | 1702 | u32 data = action & 0x0000ffff; |
1658 | u32 reg = (action & 0x0fff0000) >> 16; | 1703 | u32 regno = (action & 0x0fff0000) >> 16; |
1704 | |||
1705 | if (!action) | ||
1706 | break; | ||
1659 | 1707 | ||
1660 | switch(action & 0xf0000000) { | 1708 | switch(action & 0xf0000000) { |
1709 | case PHY_READ: | ||
1710 | predata = rtl_readphy(tp, regno); | ||
1711 | count++; | ||
1712 | index++; | ||
1713 | break; | ||
1714 | case PHY_DATA_OR: | ||
1715 | predata |= data; | ||
1716 | index++; | ||
1717 | break; | ||
1718 | case PHY_DATA_AND: | ||
1719 | predata &= data; | ||
1720 | index++; | ||
1721 | break; | ||
1722 | case PHY_BJMPN: | ||
1723 | index -= regno; | ||
1724 | break; | ||
1725 | case PHY_READ_EFUSE: | ||
1726 | predata = rtl8168d_efuse_read(tp->mmio_addr, regno); | ||
1727 | index++; | ||
1728 | break; | ||
1729 | case PHY_CLEAR_READCOUNT: | ||
1730 | count = 0; | ||
1731 | index++; | ||
1732 | break; | ||
1661 | case PHY_WRITE: | 1733 | case PHY_WRITE: |
1662 | rtl_writephy(tp, reg, data); | 1734 | rtl_writephy(tp, regno, data); |
1663 | phytable++; | 1735 | index++; |
1736 | break; | ||
1737 | case PHY_READCOUNT_EQ_SKIP: | ||
1738 | if (count == data) | ||
1739 | index += 2; | ||
1740 | else | ||
1741 | index += 1; | ||
1742 | break; | ||
1743 | case PHY_COMP_EQ_SKIPN: | ||
1744 | if (predata == data) | ||
1745 | index += regno; | ||
1746 | index++; | ||
1747 | break; | ||
1748 | case PHY_COMP_NEQ_SKIPN: | ||
1749 | if (predata != data) | ||
1750 | index += regno; | ||
1751 | index++; | ||
1664 | break; | 1752 | break; |
1753 | case PHY_WRITE_PREVIOUS: | ||
1754 | rtl_writephy(tp, regno, predata); | ||
1755 | index++; | ||
1756 | break; | ||
1757 | case PHY_SKIPN: | ||
1758 | index += regno + 1; | ||
1759 | break; | ||
1760 | case PHY_DELAY_MS: | ||
1761 | mdelay(data); | ||
1762 | index++; | ||
1763 | break; | ||
1764 | |||
1765 | case PHY_READ_MAC_BYTE: | ||
1766 | case PHY_WRITE_MAC_BYTE: | ||
1767 | case PHY_WRITE_ERI_WORD: | ||
1665 | default: | 1768 | default: |
1666 | BUG(); | 1769 | BUG(); |
1667 | } | 1770 | } |
1668 | } | 1771 | } |
1669 | } | 1772 | } |
1670 | 1773 | ||
1774 | static void rtl_release_firmware(struct rtl8169_private *tp) | ||
1775 | { | ||
1776 | release_firmware(tp->fw); | ||
1777 | tp->fw = NULL; | ||
1778 | } | ||
1779 | |||
1780 | static int rtl_apply_firmware(struct rtl8169_private *tp, const char *fw_name) | ||
1781 | { | ||
1782 | const struct firmware **fw = &tp->fw; | ||
1783 | int rc = !*fw; | ||
1784 | |||
1785 | if (rc) { | ||
1786 | rc = request_firmware(fw, fw_name, &tp->pci_dev->dev); | ||
1787 | if (rc < 0) | ||
1788 | goto out; | ||
1789 | } | ||
1790 | |||
1791 | /* TODO: release firmware once rtl_phy_write_fw signals failures. */ | ||
1792 | rtl_phy_write_fw(tp, *fw); | ||
1793 | out: | ||
1794 | return rc; | ||
1795 | } | ||
1796 | |||
1671 | static void rtl8169s_hw_phy_config(struct rtl8169_private *tp) | 1797 | static void rtl8169s_hw_phy_config(struct rtl8169_private *tp) |
1672 | { | 1798 | { |
1673 | static const struct phy_reg phy_reg_init[] = { | 1799 | static const struct phy_reg phy_reg_init[] = { |
@@ -2041,7 +2167,6 @@ static void rtl8168d_1_hw_phy_config(struct rtl8169_private *tp) | |||
2041 | { 0x0d, 0xf880 } | 2167 | { 0x0d, 0xf880 } |
2042 | }; | 2168 | }; |
2043 | void __iomem *ioaddr = tp->mmio_addr; | 2169 | void __iomem *ioaddr = tp->mmio_addr; |
2044 | const struct firmware *fw; | ||
2045 | 2170 | ||
2046 | rtl_writephy_batch(tp, phy_reg_init_0, ARRAY_SIZE(phy_reg_init_0)); | 2171 | rtl_writephy_batch(tp, phy_reg_init_0, ARRAY_SIZE(phy_reg_init_0)); |
2047 | 2172 | ||
@@ -2105,11 +2230,8 @@ static void rtl8168d_1_hw_phy_config(struct rtl8169_private *tp) | |||
2105 | 2230 | ||
2106 | rtl_writephy(tp, 0x1f, 0x0005); | 2231 | rtl_writephy(tp, 0x1f, 0x0005); |
2107 | rtl_writephy(tp, 0x05, 0x001b); | 2232 | rtl_writephy(tp, 0x05, 0x001b); |
2108 | if (rtl_readphy(tp, 0x06) == 0xbf00 && | 2233 | if ((rtl_readphy(tp, 0x06) != 0xbf00) || |
2109 | request_firmware(&fw, FIRMWARE_8168D_1, &tp->pci_dev->dev) == 0) { | 2234 | (rtl_apply_firmware(tp, FIRMWARE_8168D_1) < 0)) { |
2110 | rtl_phy_write_fw(tp, fw); | ||
2111 | release_firmware(fw); | ||
2112 | } else { | ||
2113 | netif_warn(tp, probe, tp->dev, "unable to apply firmware patch\n"); | 2235 | netif_warn(tp, probe, tp->dev, "unable to apply firmware patch\n"); |
2114 | } | 2236 | } |
2115 | 2237 | ||
@@ -2159,7 +2281,6 @@ static void rtl8168d_2_hw_phy_config(struct rtl8169_private *tp) | |||
2159 | { 0x0d, 0xf880 } | 2281 | { 0x0d, 0xf880 } |
2160 | }; | 2282 | }; |
2161 | void __iomem *ioaddr = tp->mmio_addr; | 2283 | void __iomem *ioaddr = tp->mmio_addr; |
2162 | const struct firmware *fw; | ||
2163 | 2284 | ||
2164 | rtl_writephy_batch(tp, phy_reg_init_0, ARRAY_SIZE(phy_reg_init_0)); | 2285 | rtl_writephy_batch(tp, phy_reg_init_0, ARRAY_SIZE(phy_reg_init_0)); |
2165 | 2286 | ||
@@ -2214,11 +2335,8 @@ static void rtl8168d_2_hw_phy_config(struct rtl8169_private *tp) | |||
2214 | 2335 | ||
2215 | rtl_writephy(tp, 0x1f, 0x0005); | 2336 | rtl_writephy(tp, 0x1f, 0x0005); |
2216 | rtl_writephy(tp, 0x05, 0x001b); | 2337 | rtl_writephy(tp, 0x05, 0x001b); |
2217 | if (rtl_readphy(tp, 0x06) == 0xb300 && | 2338 | if ((rtl_readphy(tp, 0x06) != 0xb300) || |
2218 | request_firmware(&fw, FIRMWARE_8168D_2, &tp->pci_dev->dev) == 0) { | 2339 | (rtl_apply_firmware(tp, FIRMWARE_8168D_2) < 0)) { |
2219 | rtl_phy_write_fw(tp, fw); | ||
2220 | release_firmware(fw); | ||
2221 | } else { | ||
2222 | netif_warn(tp, probe, tp->dev, "unable to apply firmware patch\n"); | 2340 | netif_warn(tp, probe, tp->dev, "unable to apply firmware patch\n"); |
2223 | } | 2341 | } |
2224 | 2342 | ||
@@ -2752,8 +2870,11 @@ static void r8168_pll_power_down(struct rtl8169_private *tp) | |||
2752 | { | 2870 | { |
2753 | void __iomem *ioaddr = tp->mmio_addr; | 2871 | void __iomem *ioaddr = tp->mmio_addr; |
2754 | 2872 | ||
2755 | if (tp->mac_version == RTL_GIGA_MAC_VER_27) | 2873 | if (((tp->mac_version == RTL_GIGA_MAC_VER_27) || |
2874 | (tp->mac_version == RTL_GIGA_MAC_VER_28)) && | ||
2875 | (ocp_read(tp, 0x0f, 0x0010) & 0x00008000)) { | ||
2756 | return; | 2876 | return; |
2877 | } | ||
2757 | 2878 | ||
2758 | if (((tp->mac_version == RTL_GIGA_MAC_VER_23) || | 2879 | if (((tp->mac_version == RTL_GIGA_MAC_VER_23) || |
2759 | (tp->mac_version == RTL_GIGA_MAC_VER_24)) && | 2880 | (tp->mac_version == RTL_GIGA_MAC_VER_24)) && |
@@ -2775,6 +2896,8 @@ static void r8168_pll_power_down(struct rtl8169_private *tp) | |||
2775 | switch (tp->mac_version) { | 2896 | switch (tp->mac_version) { |
2776 | case RTL_GIGA_MAC_VER_25: | 2897 | case RTL_GIGA_MAC_VER_25: |
2777 | case RTL_GIGA_MAC_VER_26: | 2898 | case RTL_GIGA_MAC_VER_26: |
2899 | case RTL_GIGA_MAC_VER_27: | ||
2900 | case RTL_GIGA_MAC_VER_28: | ||
2778 | RTL_W8(PMCH, RTL_R8(PMCH) & ~0x80); | 2901 | RTL_W8(PMCH, RTL_R8(PMCH) & ~0x80); |
2779 | break; | 2902 | break; |
2780 | } | 2903 | } |
@@ -2784,12 +2907,17 @@ static void r8168_pll_power_up(struct rtl8169_private *tp) | |||
2784 | { | 2907 | { |
2785 | void __iomem *ioaddr = tp->mmio_addr; | 2908 | void __iomem *ioaddr = tp->mmio_addr; |
2786 | 2909 | ||
2787 | if (tp->mac_version == RTL_GIGA_MAC_VER_27) | 2910 | if (((tp->mac_version == RTL_GIGA_MAC_VER_27) || |
2911 | (tp->mac_version == RTL_GIGA_MAC_VER_28)) && | ||
2912 | (ocp_read(tp, 0x0f, 0x0010) & 0x00008000)) { | ||
2788 | return; | 2913 | return; |
2914 | } | ||
2789 | 2915 | ||
2790 | switch (tp->mac_version) { | 2916 | switch (tp->mac_version) { |
2791 | case RTL_GIGA_MAC_VER_25: | 2917 | case RTL_GIGA_MAC_VER_25: |
2792 | case RTL_GIGA_MAC_VER_26: | 2918 | case RTL_GIGA_MAC_VER_26: |
2919 | case RTL_GIGA_MAC_VER_27: | ||
2920 | case RTL_GIGA_MAC_VER_28: | ||
2793 | RTL_W8(PMCH, RTL_R8(PMCH) | 0x80); | 2921 | RTL_W8(PMCH, RTL_R8(PMCH) | 0x80); |
2794 | break; | 2922 | break; |
2795 | } | 2923 | } |
@@ -2893,6 +3021,11 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
2893 | mii->reg_num_mask = 0x1f; | 3021 | mii->reg_num_mask = 0x1f; |
2894 | mii->supports_gmii = !!(cfg->features & RTL_FEATURE_GMII); | 3022 | mii->supports_gmii = !!(cfg->features & RTL_FEATURE_GMII); |
2895 | 3023 | ||
3024 | /* disable ASPM completely as that cause random device stop working | ||
3025 | * problems as well as full system hangs for some PCIe devices users */ | ||
3026 | pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 | | ||
3027 | PCIE_LINK_STATE_CLKPM); | ||
3028 | |||
2896 | /* enable device (incl. PCI PM wakeup and hotplug setup) */ | 3029 | /* enable device (incl. PCI PM wakeup and hotplug setup) */ |
2897 | rc = pci_enable_device(pdev); | 3030 | rc = pci_enable_device(pdev); |
2898 | if (rc < 0) { | 3031 | if (rc < 0) { |
@@ -2926,7 +3059,7 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
2926 | goto err_out_mwi_2; | 3059 | goto err_out_mwi_2; |
2927 | } | 3060 | } |
2928 | 3061 | ||
2929 | tp->cp_cmd = PCIMulRW | RxChkSum; | 3062 | tp->cp_cmd = RxChkSum; |
2930 | 3063 | ||
2931 | if ((sizeof(dma_addr_t) > 4) && | 3064 | if ((sizeof(dma_addr_t) > 4) && |
2932 | !pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) && use_dac) { | 3065 | !pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) && use_dac) { |
@@ -3069,20 +3202,13 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
3069 | rtl8168_driver_start(tp); | 3202 | rtl8168_driver_start(tp); |
3070 | } | 3203 | } |
3071 | 3204 | ||
3072 | rtl8169_init_phy(dev, tp); | ||
3073 | |||
3074 | /* | ||
3075 | * Pretend we are using VLANs; This bypasses a nasty bug where | ||
3076 | * Interrupts stop flowing on high load on 8110SCd controllers. | ||
3077 | */ | ||
3078 | if (tp->mac_version == RTL_GIGA_MAC_VER_05) | ||
3079 | RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) | RxVlan); | ||
3080 | |||
3081 | device_set_wakeup_enable(&pdev->dev, tp->features & RTL_FEATURE_WOL); | 3205 | device_set_wakeup_enable(&pdev->dev, tp->features & RTL_FEATURE_WOL); |
3082 | 3206 | ||
3083 | if (pci_dev_run_wake(pdev)) | 3207 | if (pci_dev_run_wake(pdev)) |
3084 | pm_runtime_put_noidle(&pdev->dev); | 3208 | pm_runtime_put_noidle(&pdev->dev); |
3085 | 3209 | ||
3210 | netif_carrier_off(dev); | ||
3211 | |||
3086 | out: | 3212 | out: |
3087 | return rc; | 3213 | return rc; |
3088 | 3214 | ||
@@ -3111,6 +3237,8 @@ static void __devexit rtl8169_remove_one(struct pci_dev *pdev) | |||
3111 | 3237 | ||
3112 | cancel_delayed_work_sync(&tp->task); | 3238 | cancel_delayed_work_sync(&tp->task); |
3113 | 3239 | ||
3240 | rtl_release_firmware(tp); | ||
3241 | |||
3114 | unregister_netdev(dev); | 3242 | unregister_netdev(dev); |
3115 | 3243 | ||
3116 | if (pci_dev_run_wake(pdev)) | 3244 | if (pci_dev_run_wake(pdev)) |
@@ -3127,6 +3255,7 @@ static void __devexit rtl8169_remove_one(struct pci_dev *pdev) | |||
3127 | static int rtl8169_open(struct net_device *dev) | 3255 | static int rtl8169_open(struct net_device *dev) |
3128 | { | 3256 | { |
3129 | struct rtl8169_private *tp = netdev_priv(dev); | 3257 | struct rtl8169_private *tp = netdev_priv(dev); |
3258 | void __iomem *ioaddr = tp->mmio_addr; | ||
3130 | struct pci_dev *pdev = tp->pci_dev; | 3259 | struct pci_dev *pdev = tp->pci_dev; |
3131 | int retval = -ENOMEM; | 3260 | int retval = -ENOMEM; |
3132 | 3261 | ||
@@ -3162,6 +3291,15 @@ static int rtl8169_open(struct net_device *dev) | |||
3162 | 3291 | ||
3163 | napi_enable(&tp->napi); | 3292 | napi_enable(&tp->napi); |
3164 | 3293 | ||
3294 | rtl8169_init_phy(dev, tp); | ||
3295 | |||
3296 | /* | ||
3297 | * Pretend we are using VLANs; This bypasses a nasty bug where | ||
3298 | * Interrupts stop flowing on high load on 8110SCd controllers. | ||
3299 | */ | ||
3300 | if (tp->mac_version == RTL_GIGA_MAC_VER_05) | ||
3301 | RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) | RxVlan); | ||
3302 | |||
3165 | rtl_pll_power_up(tp); | 3303 | rtl_pll_power_up(tp); |
3166 | 3304 | ||
3167 | rtl_hw_start(dev); | 3305 | rtl_hw_start(dev); |
@@ -3171,7 +3309,7 @@ static int rtl8169_open(struct net_device *dev) | |||
3171 | tp->saved_wolopts = 0; | 3309 | tp->saved_wolopts = 0; |
3172 | pm_runtime_put_noidle(&pdev->dev); | 3310 | pm_runtime_put_noidle(&pdev->dev); |
3173 | 3311 | ||
3174 | rtl8169_check_link_status(dev, tp, tp->mmio_addr); | 3312 | rtl8169_check_link_status(dev, tp, ioaddr); |
3175 | out: | 3313 | out: |
3176 | return retval; | 3314 | return retval; |
3177 | 3315 | ||
@@ -3197,7 +3335,8 @@ static void rtl8169_hw_reset(struct rtl8169_private *tp) | |||
3197 | /* Disable interrupts */ | 3335 | /* Disable interrupts */ |
3198 | rtl8169_irq_mask_and_ack(ioaddr); | 3336 | rtl8169_irq_mask_and_ack(ioaddr); |
3199 | 3337 | ||
3200 | if (tp->mac_version == RTL_GIGA_MAC_VER_28) { | 3338 | if (tp->mac_version == RTL_GIGA_MAC_VER_27 || |
3339 | tp->mac_version == RTL_GIGA_MAC_VER_28) { | ||
3201 | while (RTL_R8(TxPoll) & NPQ) | 3340 | while (RTL_R8(TxPoll) & NPQ) |
3202 | udelay(20); | 3341 | udelay(20); |
3203 | 3342 | ||
@@ -3639,7 +3778,8 @@ static void rtl_hw_start_8168(struct net_device *dev) | |||
3639 | RTL_W16(IntrMitigate, 0x5151); | 3778 | RTL_W16(IntrMitigate, 0x5151); |
3640 | 3779 | ||
3641 | /* Work around for RxFIFO overflow. */ | 3780 | /* Work around for RxFIFO overflow. */ |
3642 | if (tp->mac_version == RTL_GIGA_MAC_VER_11) { | 3781 | if (tp->mac_version == RTL_GIGA_MAC_VER_11 || |
3782 | tp->mac_version == RTL_GIGA_MAC_VER_22) { | ||
3643 | tp->intr_event |= RxFIFOOver | PCSTimeout; | 3783 | tp->intr_event |= RxFIFOOver | PCSTimeout; |
3644 | tp->intr_event &= ~RxOverflow; | 3784 | tp->intr_event &= ~RxOverflow; |
3645 | } | 3785 | } |
@@ -3725,8 +3865,7 @@ static void rtl_hw_start_8168(struct net_device *dev) | |||
3725 | Cxpl_dbg_sel | \ | 3865 | Cxpl_dbg_sel | \ |
3726 | ASF | \ | 3866 | ASF | \ |
3727 | PktCntrDisable | \ | 3867 | PktCntrDisable | \ |
3728 | PCIDAC | \ | 3868 | Mac_dbgo_sel) |
3729 | PCIMulRW) | ||
3730 | 3869 | ||
3731 | static void rtl_hw_start_8102e_1(void __iomem *ioaddr, struct pci_dev *pdev) | 3870 | static void rtl_hw_start_8102e_1(void __iomem *ioaddr, struct pci_dev *pdev) |
3732 | { | 3871 | { |
@@ -3756,8 +3895,6 @@ static void rtl_hw_start_8102e_1(void __iomem *ioaddr, struct pci_dev *pdev) | |||
3756 | if ((cfg1 & LEDS0) && (cfg1 & LEDS1)) | 3895 | if ((cfg1 & LEDS0) && (cfg1 & LEDS1)) |
3757 | RTL_W8(Config1, cfg1 & ~LEDS0); | 3896 | RTL_W8(Config1, cfg1 & ~LEDS0); |
3758 | 3897 | ||
3759 | RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R810X_CPCMD_QUIRK_MASK); | ||
3760 | |||
3761 | rtl_ephy_init(ioaddr, e_info_8102e_1, ARRAY_SIZE(e_info_8102e_1)); | 3898 | rtl_ephy_init(ioaddr, e_info_8102e_1, ARRAY_SIZE(e_info_8102e_1)); |
3762 | } | 3899 | } |
3763 | 3900 | ||
@@ -3769,8 +3906,6 @@ static void rtl_hw_start_8102e_2(void __iomem *ioaddr, struct pci_dev *pdev) | |||
3769 | 3906 | ||
3770 | RTL_W8(Config1, MEMMAP | IOMAP | VPD | PMEnable); | 3907 | RTL_W8(Config1, MEMMAP | IOMAP | VPD | PMEnable); |
3771 | RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en); | 3908 | RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en); |
3772 | |||
3773 | RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R810X_CPCMD_QUIRK_MASK); | ||
3774 | } | 3909 | } |
3775 | 3910 | ||
3776 | static void rtl_hw_start_8102e_3(void __iomem *ioaddr, struct pci_dev *pdev) | 3911 | static void rtl_hw_start_8102e_3(void __iomem *ioaddr, struct pci_dev *pdev) |
@@ -3796,6 +3931,8 @@ static void rtl_hw_start_8101(struct net_device *dev) | |||
3796 | } | 3931 | } |
3797 | } | 3932 | } |
3798 | 3933 | ||
3934 | RTL_W8(Cfg9346, Cfg9346_Unlock); | ||
3935 | |||
3799 | switch (tp->mac_version) { | 3936 | switch (tp->mac_version) { |
3800 | case RTL_GIGA_MAC_VER_07: | 3937 | case RTL_GIGA_MAC_VER_07: |
3801 | rtl_hw_start_8102e_1(ioaddr, pdev); | 3938 | rtl_hw_start_8102e_1(ioaddr, pdev); |
@@ -3810,14 +3947,13 @@ static void rtl_hw_start_8101(struct net_device *dev) | |||
3810 | break; | 3947 | break; |
3811 | } | 3948 | } |
3812 | 3949 | ||
3813 | RTL_W8(Cfg9346, Cfg9346_Unlock); | 3950 | RTL_W8(Cfg9346, Cfg9346_Lock); |
3814 | 3951 | ||
3815 | RTL_W8(MaxTxPacketSize, TxPacketMax); | 3952 | RTL_W8(MaxTxPacketSize, TxPacketMax); |
3816 | 3953 | ||
3817 | rtl_set_rx_max_size(ioaddr, rx_buf_sz); | 3954 | rtl_set_rx_max_size(ioaddr, rx_buf_sz); |
3818 | 3955 | ||
3819 | tp->cp_cmd |= rtl_rw_cpluscmd(ioaddr) | PCIMulRW; | 3956 | tp->cp_cmd &= ~R810X_CPCMD_QUIRK_MASK; |
3820 | |||
3821 | RTL_W16(CPlusCmd, tp->cp_cmd); | 3957 | RTL_W16(CPlusCmd, tp->cp_cmd); |
3822 | 3958 | ||
3823 | RTL_W16(IntrMitigate, 0x0000); | 3959 | RTL_W16(IntrMitigate, 0x0000); |
@@ -3827,14 +3963,10 @@ static void rtl_hw_start_8101(struct net_device *dev) | |||
3827 | RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb); | 3963 | RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb); |
3828 | rtl_set_rx_tx_config_registers(tp); | 3964 | rtl_set_rx_tx_config_registers(tp); |
3829 | 3965 | ||
3830 | RTL_W8(Cfg9346, Cfg9346_Lock); | ||
3831 | |||
3832 | RTL_R8(IntrMask); | 3966 | RTL_R8(IntrMask); |
3833 | 3967 | ||
3834 | rtl_set_rx_mode(dev); | 3968 | rtl_set_rx_mode(dev); |
3835 | 3969 | ||
3836 | RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb); | ||
3837 | |||
3838 | RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xf000); | 3970 | RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xf000); |
3839 | 3971 | ||
3840 | RTL_W16(IntrMask, tp->intr_event); | 3972 | RTL_W16(IntrMask, tp->intr_event); |
@@ -4521,12 +4653,33 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance) | |||
4521 | break; | 4653 | break; |
4522 | } | 4654 | } |
4523 | 4655 | ||
4524 | /* Work around for rx fifo overflow */ | 4656 | if (unlikely(status & RxFIFOOver)) { |
4525 | if (unlikely(status & RxFIFOOver) && | 4657 | switch (tp->mac_version) { |
4526 | (tp->mac_version == RTL_GIGA_MAC_VER_11)) { | 4658 | /* Work around for rx fifo overflow */ |
4527 | netif_stop_queue(dev); | 4659 | case RTL_GIGA_MAC_VER_11: |
4528 | rtl8169_tx_timeout(dev); | 4660 | case RTL_GIGA_MAC_VER_22: |
4529 | break; | 4661 | case RTL_GIGA_MAC_VER_26: |
4662 | netif_stop_queue(dev); | ||
4663 | rtl8169_tx_timeout(dev); | ||
4664 | goto done; | ||
4665 | /* Testers needed. */ | ||
4666 | case RTL_GIGA_MAC_VER_17: | ||
4667 | case RTL_GIGA_MAC_VER_19: | ||
4668 | case RTL_GIGA_MAC_VER_20: | ||
4669 | case RTL_GIGA_MAC_VER_21: | ||
4670 | case RTL_GIGA_MAC_VER_23: | ||
4671 | case RTL_GIGA_MAC_VER_24: | ||
4672 | case RTL_GIGA_MAC_VER_27: | ||
4673 | case RTL_GIGA_MAC_VER_28: | ||
4674 | /* Experimental science. Pktgen proof. */ | ||
4675 | case RTL_GIGA_MAC_VER_12: | ||
4676 | case RTL_GIGA_MAC_VER_25: | ||
4677 | if (status == RxFIFOOver) | ||
4678 | goto done; | ||
4679 | break; | ||
4680 | default: | ||
4681 | break; | ||
4682 | } | ||
4530 | } | 4683 | } |
4531 | 4684 | ||
4532 | if (unlikely(status & SYSErr)) { | 4685 | if (unlikely(status & SYSErr)) { |
@@ -4562,7 +4715,7 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance) | |||
4562 | (status & RxFIFOOver) ? (status | RxOverflow) : status); | 4715 | (status & RxFIFOOver) ? (status | RxOverflow) : status); |
4563 | status = RTL_R16(IntrStatus); | 4716 | status = RTL_R16(IntrStatus); |
4564 | } | 4717 | } |
4565 | 4718 | done: | |
4566 | return IRQ_RETVAL(handled); | 4719 | return IRQ_RETVAL(handled); |
4567 | } | 4720 | } |
4568 | 4721 | ||
diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c index 711449c6e675..002bac743843 100644 --- a/drivers/net/sfc/efx.c +++ b/drivers/net/sfc/efx.c | |||
@@ -1153,6 +1153,9 @@ static int efx_wanted_channels(void) | |||
1153 | int count; | 1153 | int count; |
1154 | int cpu; | 1154 | int cpu; |
1155 | 1155 | ||
1156 | if (rss_cpus) | ||
1157 | return rss_cpus; | ||
1158 | |||
1156 | if (unlikely(!zalloc_cpumask_var(&core_mask, GFP_KERNEL))) { | 1159 | if (unlikely(!zalloc_cpumask_var(&core_mask, GFP_KERNEL))) { |
1157 | printk(KERN_WARNING | 1160 | printk(KERN_WARNING |
1158 | "sfc: RSS disabled due to allocation failure\n"); | 1161 | "sfc: RSS disabled due to allocation failure\n"); |
@@ -1266,27 +1269,18 @@ static void efx_remove_interrupts(struct efx_nic *efx) | |||
1266 | efx->legacy_irq = 0; | 1269 | efx->legacy_irq = 0; |
1267 | } | 1270 | } |
1268 | 1271 | ||
1269 | struct efx_tx_queue * | ||
1270 | efx_get_tx_queue(struct efx_nic *efx, unsigned index, unsigned type) | ||
1271 | { | ||
1272 | unsigned tx_channel_offset = | ||
1273 | separate_tx_channels ? efx->n_channels - efx->n_tx_channels : 0; | ||
1274 | EFX_BUG_ON_PARANOID(index >= efx->n_tx_channels || | ||
1275 | type >= EFX_TXQ_TYPES); | ||
1276 | return &efx->channel[tx_channel_offset + index]->tx_queue[type]; | ||
1277 | } | ||
1278 | |||
1279 | static void efx_set_channels(struct efx_nic *efx) | 1272 | static void efx_set_channels(struct efx_nic *efx) |
1280 | { | 1273 | { |
1281 | struct efx_channel *channel; | 1274 | struct efx_channel *channel; |
1282 | struct efx_tx_queue *tx_queue; | 1275 | struct efx_tx_queue *tx_queue; |
1283 | unsigned tx_channel_offset = | 1276 | |
1277 | efx->tx_channel_offset = | ||
1284 | separate_tx_channels ? efx->n_channels - efx->n_tx_channels : 0; | 1278 | separate_tx_channels ? efx->n_channels - efx->n_tx_channels : 0; |
1285 | 1279 | ||
1286 | /* Channel pointers were set in efx_init_struct() but we now | 1280 | /* Channel pointers were set in efx_init_struct() but we now |
1287 | * need to clear them for TX queues in any RX-only channels. */ | 1281 | * need to clear them for TX queues in any RX-only channels. */ |
1288 | efx_for_each_channel(channel, efx) { | 1282 | efx_for_each_channel(channel, efx) { |
1289 | if (channel->channel - tx_channel_offset >= | 1283 | if (channel->channel - efx->tx_channel_offset >= |
1290 | efx->n_tx_channels) { | 1284 | efx->n_tx_channels) { |
1291 | efx_for_each_channel_tx_queue(tx_queue, channel) | 1285 | efx_for_each_channel_tx_queue(tx_queue, channel) |
1292 | tx_queue->channel = NULL; | 1286 | tx_queue->channel = NULL; |
diff --git a/drivers/net/sfc/ethtool.c b/drivers/net/sfc/ethtool.c index 0e8bb19ed60d..ca886d98bdc7 100644 --- a/drivers/net/sfc/ethtool.c +++ b/drivers/net/sfc/ethtool.c | |||
@@ -569,9 +569,14 @@ static void efx_ethtool_self_test(struct net_device *net_dev, | |||
569 | struct ethtool_test *test, u64 *data) | 569 | struct ethtool_test *test, u64 *data) |
570 | { | 570 | { |
571 | struct efx_nic *efx = netdev_priv(net_dev); | 571 | struct efx_nic *efx = netdev_priv(net_dev); |
572 | struct efx_self_tests efx_tests; | 572 | struct efx_self_tests *efx_tests; |
573 | int already_up; | 573 | int already_up; |
574 | int rc; | 574 | int rc = -ENOMEM; |
575 | |||
576 | efx_tests = kzalloc(sizeof(*efx_tests), GFP_KERNEL); | ||
577 | if (!efx_tests) | ||
578 | goto fail; | ||
579 | |||
575 | 580 | ||
576 | ASSERT_RTNL(); | 581 | ASSERT_RTNL(); |
577 | if (efx->state != STATE_RUNNING) { | 582 | if (efx->state != STATE_RUNNING) { |
@@ -589,13 +594,11 @@ static void efx_ethtool_self_test(struct net_device *net_dev, | |||
589 | if (rc) { | 594 | if (rc) { |
590 | netif_err(efx, drv, efx->net_dev, | 595 | netif_err(efx, drv, efx->net_dev, |
591 | "failed opening device.\n"); | 596 | "failed opening device.\n"); |
592 | goto fail2; | 597 | goto fail1; |
593 | } | 598 | } |
594 | } | 599 | } |
595 | 600 | ||
596 | memset(&efx_tests, 0, sizeof(efx_tests)); | 601 | rc = efx_selftest(efx, efx_tests, test->flags); |
597 | |||
598 | rc = efx_selftest(efx, &efx_tests, test->flags); | ||
599 | 602 | ||
600 | if (!already_up) | 603 | if (!already_up) |
601 | dev_close(efx->net_dev); | 604 | dev_close(efx->net_dev); |
@@ -604,10 +607,11 @@ static void efx_ethtool_self_test(struct net_device *net_dev, | |||
604 | rc == 0 ? "passed" : "failed", | 607 | rc == 0 ? "passed" : "failed", |
605 | (test->flags & ETH_TEST_FL_OFFLINE) ? "off" : "on"); | 608 | (test->flags & ETH_TEST_FL_OFFLINE) ? "off" : "on"); |
606 | 609 | ||
607 | fail2: | 610 | fail1: |
608 | fail1: | ||
609 | /* Fill ethtool results structures */ | 611 | /* Fill ethtool results structures */ |
610 | efx_ethtool_fill_self_tests(efx, &efx_tests, NULL, data); | 612 | efx_ethtool_fill_self_tests(efx, efx_tests, NULL, data); |
613 | kfree(efx_tests); | ||
614 | fail: | ||
611 | if (rc) | 615 | if (rc) |
612 | test->flags |= ETH_TEST_FL_FAILED; | 616 | test->flags |= ETH_TEST_FL_FAILED; |
613 | } | 617 | } |
diff --git a/drivers/net/sfc/falcon.c b/drivers/net/sfc/falcon.c index 70e4f7dcce81..61ddd2c6e750 100644 --- a/drivers/net/sfc/falcon.c +++ b/drivers/net/sfc/falcon.c | |||
@@ -1107,22 +1107,9 @@ static int __falcon_reset_hw(struct efx_nic *efx, enum reset_type method) | |||
1107 | 1107 | ||
1108 | /* Restore PCI configuration if needed */ | 1108 | /* Restore PCI configuration if needed */ |
1109 | if (method == RESET_TYPE_WORLD) { | 1109 | if (method == RESET_TYPE_WORLD) { |
1110 | if (efx_nic_is_dual_func(efx)) { | 1110 | if (efx_nic_is_dual_func(efx)) |
1111 | rc = pci_restore_state(nic_data->pci_dev2); | 1111 | pci_restore_state(nic_data->pci_dev2); |
1112 | if (rc) { | 1112 | pci_restore_state(efx->pci_dev); |
1113 | netif_err(efx, drv, efx->net_dev, | ||
1114 | "failed to restore PCI config for " | ||
1115 | "the secondary function\n"); | ||
1116 | goto fail3; | ||
1117 | } | ||
1118 | } | ||
1119 | rc = pci_restore_state(efx->pci_dev); | ||
1120 | if (rc) { | ||
1121 | netif_err(efx, drv, efx->net_dev, | ||
1122 | "failed to restore PCI config for the " | ||
1123 | "primary function\n"); | ||
1124 | goto fail4; | ||
1125 | } | ||
1126 | netif_dbg(efx, drv, efx->net_dev, | 1113 | netif_dbg(efx, drv, efx->net_dev, |
1127 | "successfully restored PCI config\n"); | 1114 | "successfully restored PCI config\n"); |
1128 | } | 1115 | } |
@@ -1133,7 +1120,7 @@ static int __falcon_reset_hw(struct efx_nic *efx, enum reset_type method) | |||
1133 | rc = -ETIMEDOUT; | 1120 | rc = -ETIMEDOUT; |
1134 | netif_err(efx, hw, efx->net_dev, | 1121 | netif_err(efx, hw, efx->net_dev, |
1135 | "timed out waiting for hardware reset\n"); | 1122 | "timed out waiting for hardware reset\n"); |
1136 | goto fail5; | 1123 | goto fail3; |
1137 | } | 1124 | } |
1138 | netif_dbg(efx, hw, efx->net_dev, "hardware reset complete\n"); | 1125 | netif_dbg(efx, hw, efx->net_dev, "hardware reset complete\n"); |
1139 | 1126 | ||
@@ -1141,11 +1128,9 @@ static int __falcon_reset_hw(struct efx_nic *efx, enum reset_type method) | |||
1141 | 1128 | ||
1142 | /* pci_save_state() and pci_restore_state() MUST be called in pairs */ | 1129 | /* pci_save_state() and pci_restore_state() MUST be called in pairs */ |
1143 | fail2: | 1130 | fail2: |
1144 | fail3: | ||
1145 | pci_restore_state(efx->pci_dev); | 1131 | pci_restore_state(efx->pci_dev); |
1146 | fail1: | 1132 | fail1: |
1147 | fail4: | 1133 | fail3: |
1148 | fail5: | ||
1149 | return rc; | 1134 | return rc; |
1150 | } | 1135 | } |
1151 | 1136 | ||
diff --git a/drivers/net/sfc/net_driver.h b/drivers/net/sfc/net_driver.h index bdce66ddf93a..28df8665256a 100644 --- a/drivers/net/sfc/net_driver.h +++ b/drivers/net/sfc/net_driver.h | |||
@@ -735,6 +735,7 @@ struct efx_nic { | |||
735 | unsigned next_buffer_table; | 735 | unsigned next_buffer_table; |
736 | unsigned n_channels; | 736 | unsigned n_channels; |
737 | unsigned n_rx_channels; | 737 | unsigned n_rx_channels; |
738 | unsigned tx_channel_offset; | ||
738 | unsigned n_tx_channels; | 739 | unsigned n_tx_channels; |
739 | unsigned int rx_buffer_len; | 740 | unsigned int rx_buffer_len; |
740 | unsigned int rx_buffer_order; | 741 | unsigned int rx_buffer_order; |
@@ -929,8 +930,13 @@ efx_get_channel(struct efx_nic *efx, unsigned index) | |||
929 | _channel = (_channel->channel + 1 < (_efx)->n_channels) ? \ | 930 | _channel = (_channel->channel + 1 < (_efx)->n_channels) ? \ |
930 | (_efx)->channel[_channel->channel + 1] : NULL) | 931 | (_efx)->channel[_channel->channel + 1] : NULL) |
931 | 932 | ||
932 | extern struct efx_tx_queue * | 933 | static inline struct efx_tx_queue * |
933 | efx_get_tx_queue(struct efx_nic *efx, unsigned index, unsigned type); | 934 | efx_get_tx_queue(struct efx_nic *efx, unsigned index, unsigned type) |
935 | { | ||
936 | EFX_BUG_ON_PARANOID(index >= efx->n_tx_channels || | ||
937 | type >= EFX_TXQ_TYPES); | ||
938 | return &efx->channel[efx->tx_channel_offset + index]->tx_queue[type]; | ||
939 | } | ||
934 | 940 | ||
935 | static inline struct efx_tx_queue * | 941 | static inline struct efx_tx_queue * |
936 | efx_channel_get_tx_queue(struct efx_channel *channel, unsigned type) | 942 | efx_channel_get_tx_queue(struct efx_channel *channel, unsigned type) |
diff --git a/drivers/net/sis900.c b/drivers/net/sis900.c index 581836867098..640e368ebeee 100644 --- a/drivers/net/sis900.c +++ b/drivers/net/sis900.c | |||
@@ -36,7 +36,7 @@ | |||
36 | Rev 1.07.06 Nov. 7 2000 Jeff Garzik <jgarzik@pobox.com> some bug fix and cleaning | 36 | Rev 1.07.06 Nov. 7 2000 Jeff Garzik <jgarzik@pobox.com> some bug fix and cleaning |
37 | Rev 1.07.05 Nov. 6 2000 metapirat<metapirat@gmx.de> contribute media type select by ifconfig | 37 | Rev 1.07.05 Nov. 6 2000 metapirat<metapirat@gmx.de> contribute media type select by ifconfig |
38 | Rev 1.07.04 Sep. 6 2000 Lei-Chun Chang added ICS1893 PHY support | 38 | Rev 1.07.04 Sep. 6 2000 Lei-Chun Chang added ICS1893 PHY support |
39 | Rev 1.07.03 Aug. 24 2000 Lei-Chun Chang (lcchang@sis.com.tw) modified 630E eqaulizer workaround rule | 39 | Rev 1.07.03 Aug. 24 2000 Lei-Chun Chang (lcchang@sis.com.tw) modified 630E equalizer workaround rule |
40 | Rev 1.07.01 Aug. 08 2000 Ollie Lho minor update for SiS 630E and SiS 630E A1 | 40 | Rev 1.07.01 Aug. 08 2000 Ollie Lho minor update for SiS 630E and SiS 630E A1 |
41 | Rev 1.07 Mar. 07 2000 Ollie Lho bug fix in Rx buffer ring | 41 | Rev 1.07 Mar. 07 2000 Ollie Lho bug fix in Rx buffer ring |
42 | Rev 1.06.04 Feb. 11 2000 Jeff Garzik <jgarzik@pobox.com> softnet and init for kernel 2.4 | 42 | Rev 1.06.04 Feb. 11 2000 Jeff Garzik <jgarzik@pobox.com> softnet and init for kernel 2.4 |
@@ -1777,6 +1777,7 @@ static int sis900_rx(struct net_device *net_dev) | |||
1777 | "cur_rx:%4.4d, dirty_rx:%4.4d\n", | 1777 | "cur_rx:%4.4d, dirty_rx:%4.4d\n", |
1778 | net_dev->name, sis_priv->cur_rx, | 1778 | net_dev->name, sis_priv->cur_rx, |
1779 | sis_priv->dirty_rx); | 1779 | sis_priv->dirty_rx); |
1780 | dev_kfree_skb(skb); | ||
1780 | break; | 1781 | break; |
1781 | } | 1782 | } |
1782 | 1783 | ||
diff --git a/drivers/net/skge.c b/drivers/net/skge.c index 42daf98ba736..35b28f42d208 100644 --- a/drivers/net/skge.c +++ b/drivers/net/skge.c | |||
@@ -3856,9 +3856,6 @@ static struct net_device *skge_devinit(struct skge_hw *hw, int port, | |||
3856 | memcpy_fromio(dev->dev_addr, hw->regs + B2_MAC_1 + port*8, ETH_ALEN); | 3856 | memcpy_fromio(dev->dev_addr, hw->regs + B2_MAC_1 + port*8, ETH_ALEN); |
3857 | memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); | 3857 | memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); |
3858 | 3858 | ||
3859 | /* device is off until link detection */ | ||
3860 | netif_carrier_off(dev); | ||
3861 | |||
3862 | return dev; | 3859 | return dev; |
3863 | } | 3860 | } |
3864 | 3861 | ||
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c index 39996bf3b247..7d85a38377a1 100644 --- a/drivers/net/sky2.c +++ b/drivers/net/sky2.c | |||
@@ -46,10 +46,6 @@ | |||
46 | 46 | ||
47 | #include <asm/irq.h> | 47 | #include <asm/irq.h> |
48 | 48 | ||
49 | #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) | ||
50 | #define SKY2_VLAN_TAG_USED 1 | ||
51 | #endif | ||
52 | |||
53 | #include "sky2.h" | 49 | #include "sky2.h" |
54 | 50 | ||
55 | #define DRV_NAME "sky2" | 51 | #define DRV_NAME "sky2" |
@@ -1326,40 +1322,35 @@ static int sky2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) | |||
1326 | return err; | 1322 | return err; |
1327 | } | 1323 | } |
1328 | 1324 | ||
1329 | #ifdef SKY2_VLAN_TAG_USED | 1325 | #define NETIF_F_ALL_VLAN (NETIF_F_HW_VLAN_TX|NETIF_F_HW_VLAN_RX) |
1330 | static void sky2_set_vlan_mode(struct sky2_hw *hw, u16 port, bool onoff) | 1326 | |
1327 | static void sky2_vlan_mode(struct net_device *dev) | ||
1331 | { | 1328 | { |
1332 | if (onoff) { | 1329 | struct sky2_port *sky2 = netdev_priv(dev); |
1330 | struct sky2_hw *hw = sky2->hw; | ||
1331 | u16 port = sky2->port; | ||
1332 | |||
1333 | if (dev->features & NETIF_F_HW_VLAN_RX) | ||
1333 | sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T), | 1334 | sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T), |
1334 | RX_VLAN_STRIP_ON); | 1335 | RX_VLAN_STRIP_ON); |
1335 | sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), | 1336 | else |
1336 | TX_VLAN_TAG_ON); | ||
1337 | } else { | ||
1338 | sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T), | 1337 | sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T), |
1339 | RX_VLAN_STRIP_OFF); | 1338 | RX_VLAN_STRIP_OFF); |
1339 | |||
1340 | dev->vlan_features = dev->features &~ NETIF_F_ALL_VLAN; | ||
1341 | if (dev->features & NETIF_F_HW_VLAN_TX) | ||
1342 | sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), | ||
1343 | TX_VLAN_TAG_ON); | ||
1344 | else { | ||
1340 | sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), | 1345 | sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), |
1341 | TX_VLAN_TAG_OFF); | 1346 | TX_VLAN_TAG_OFF); |
1347 | |||
1348 | /* Can't do transmit offload of vlan without hw vlan */ | ||
1349 | dev->vlan_features &= ~(NETIF_F_TSO | NETIF_F_SG | ||
1350 | | NETIF_F_ALL_CSUM); | ||
1342 | } | 1351 | } |
1343 | } | 1352 | } |
1344 | 1353 | ||
1345 | static void sky2_vlan_rx_register(struct net_device *dev, struct vlan_group *grp) | ||
1346 | { | ||
1347 | struct sky2_port *sky2 = netdev_priv(dev); | ||
1348 | struct sky2_hw *hw = sky2->hw; | ||
1349 | u16 port = sky2->port; | ||
1350 | |||
1351 | netif_tx_lock_bh(dev); | ||
1352 | napi_disable(&hw->napi); | ||
1353 | |||
1354 | sky2->vlgrp = grp; | ||
1355 | sky2_set_vlan_mode(hw, port, grp != NULL); | ||
1356 | |||
1357 | sky2_read32(hw, B0_Y2_SP_LISR); | ||
1358 | napi_enable(&hw->napi); | ||
1359 | netif_tx_unlock_bh(dev); | ||
1360 | } | ||
1361 | #endif | ||
1362 | |||
1363 | /* Amount of required worst case padding in rx buffer */ | 1354 | /* Amount of required worst case padding in rx buffer */ |
1364 | static inline unsigned sky2_rx_pad(const struct sky2_hw *hw) | 1355 | static inline unsigned sky2_rx_pad(const struct sky2_hw *hw) |
1365 | { | 1356 | { |
@@ -1635,9 +1626,7 @@ static void sky2_hw_up(struct sky2_port *sky2) | |||
1635 | sky2_prefetch_init(hw, txqaddr[port], sky2->tx_le_map, | 1626 | sky2_prefetch_init(hw, txqaddr[port], sky2->tx_le_map, |
1636 | sky2->tx_ring_size - 1); | 1627 | sky2->tx_ring_size - 1); |
1637 | 1628 | ||
1638 | #ifdef SKY2_VLAN_TAG_USED | 1629 | sky2_vlan_mode(sky2->netdev); |
1639 | sky2_set_vlan_mode(hw, port, sky2->vlgrp != NULL); | ||
1640 | #endif | ||
1641 | 1630 | ||
1642 | sky2_rx_start(sky2); | 1631 | sky2_rx_start(sky2); |
1643 | } | 1632 | } |
@@ -1780,7 +1769,7 @@ static netdev_tx_t sky2_xmit_frame(struct sk_buff *skb, | |||
1780 | } | 1769 | } |
1781 | 1770 | ||
1782 | ctrl = 0; | 1771 | ctrl = 0; |
1783 | #ifdef SKY2_VLAN_TAG_USED | 1772 | |
1784 | /* Add VLAN tag, can piggyback on LRGLEN or ADDR64 */ | 1773 | /* Add VLAN tag, can piggyback on LRGLEN or ADDR64 */ |
1785 | if (vlan_tx_tag_present(skb)) { | 1774 | if (vlan_tx_tag_present(skb)) { |
1786 | if (!le) { | 1775 | if (!le) { |
@@ -1792,7 +1781,6 @@ static netdev_tx_t sky2_xmit_frame(struct sk_buff *skb, | |||
1792 | le->length = cpu_to_be16(vlan_tx_tag_get(skb)); | 1781 | le->length = cpu_to_be16(vlan_tx_tag_get(skb)); |
1793 | ctrl |= INS_VLAN; | 1782 | ctrl |= INS_VLAN; |
1794 | } | 1783 | } |
1795 | #endif | ||
1796 | 1784 | ||
1797 | /* Handle TCP checksum offload */ | 1785 | /* Handle TCP checksum offload */ |
1798 | if (skb->ip_summed == CHECKSUM_PARTIAL) { | 1786 | if (skb->ip_summed == CHECKSUM_PARTIAL) { |
@@ -2432,11 +2420,8 @@ static struct sk_buff *sky2_receive(struct net_device *dev, | |||
2432 | struct sk_buff *skb = NULL; | 2420 | struct sk_buff *skb = NULL; |
2433 | u16 count = (status & GMR_FS_LEN) >> 16; | 2421 | u16 count = (status & GMR_FS_LEN) >> 16; |
2434 | 2422 | ||
2435 | #ifdef SKY2_VLAN_TAG_USED | 2423 | if (status & GMR_FS_VLAN) |
2436 | /* Account for vlan tag */ | 2424 | count -= VLAN_HLEN; /* Account for vlan tag */ |
2437 | if (sky2->vlgrp && (status & GMR_FS_VLAN)) | ||
2438 | count -= VLAN_HLEN; | ||
2439 | #endif | ||
2440 | 2425 | ||
2441 | netif_printk(sky2, rx_status, KERN_DEBUG, dev, | 2426 | netif_printk(sky2, rx_status, KERN_DEBUG, dev, |
2442 | "rx slot %u status 0x%x len %d\n", | 2427 | "rx slot %u status 0x%x len %d\n", |
@@ -2504,17 +2489,9 @@ static inline void sky2_tx_done(struct net_device *dev, u16 last) | |||
2504 | static inline void sky2_skb_rx(const struct sky2_port *sky2, | 2489 | static inline void sky2_skb_rx(const struct sky2_port *sky2, |
2505 | u32 status, struct sk_buff *skb) | 2490 | u32 status, struct sk_buff *skb) |
2506 | { | 2491 | { |
2507 | #ifdef SKY2_VLAN_TAG_USED | 2492 | if (status & GMR_FS_VLAN) |
2508 | u16 vlan_tag = be16_to_cpu(sky2->rx_tag); | 2493 | __vlan_hwaccel_put_tag(skb, be16_to_cpu(sky2->rx_tag)); |
2509 | if (sky2->vlgrp && (status & GMR_FS_VLAN)) { | 2494 | |
2510 | if (skb->ip_summed == CHECKSUM_NONE) | ||
2511 | vlan_hwaccel_receive_skb(skb, sky2->vlgrp, vlan_tag); | ||
2512 | else | ||
2513 | vlan_gro_receive(&sky2->hw->napi, sky2->vlgrp, | ||
2514 | vlan_tag, skb); | ||
2515 | return; | ||
2516 | } | ||
2517 | #endif | ||
2518 | if (skb->ip_summed == CHECKSUM_NONE) | 2495 | if (skb->ip_summed == CHECKSUM_NONE) |
2519 | netif_receive_skb(skb); | 2496 | netif_receive_skb(skb); |
2520 | else | 2497 | else |
@@ -2631,7 +2608,6 @@ static int sky2_status_intr(struct sky2_hw *hw, int to_do, u16 idx) | |||
2631 | goto exit_loop; | 2608 | goto exit_loop; |
2632 | break; | 2609 | break; |
2633 | 2610 | ||
2634 | #ifdef SKY2_VLAN_TAG_USED | ||
2635 | case OP_RXVLAN: | 2611 | case OP_RXVLAN: |
2636 | sky2->rx_tag = length; | 2612 | sky2->rx_tag = length; |
2637 | break; | 2613 | break; |
@@ -2639,7 +2615,6 @@ static int sky2_status_intr(struct sky2_hw *hw, int to_do, u16 idx) | |||
2639 | case OP_RXCHKSVLAN: | 2615 | case OP_RXCHKSVLAN: |
2640 | sky2->rx_tag = length; | 2616 | sky2->rx_tag = length; |
2641 | /* fall through */ | 2617 | /* fall through */ |
2642 | #endif | ||
2643 | case OP_RXCHKS: | 2618 | case OP_RXCHKS: |
2644 | if (likely(sky2->flags & SKY2_FLAG_RX_CHECKSUM)) | 2619 | if (likely(sky2->flags & SKY2_FLAG_RX_CHECKSUM)) |
2645 | sky2_rx_checksum(sky2, status); | 2620 | sky2_rx_checksum(sky2, status); |
@@ -3042,6 +3017,10 @@ static int __devinit sky2_init(struct sky2_hw *hw) | |||
3042 | | SKY2_HW_NEW_LE | 3017 | | SKY2_HW_NEW_LE |
3043 | | SKY2_HW_AUTO_TX_SUM | 3018 | | SKY2_HW_AUTO_TX_SUM |
3044 | | SKY2_HW_ADV_POWER_CTL; | 3019 | | SKY2_HW_ADV_POWER_CTL; |
3020 | |||
3021 | /* The workaround for status conflicts VLAN tag detection. */ | ||
3022 | if (hw->chip_rev == CHIP_REV_YU_FE2_A0) | ||
3023 | hw->flags |= SKY2_HW_VLAN_BROKEN; | ||
3045 | break; | 3024 | break; |
3046 | 3025 | ||
3047 | case CHIP_ID_YUKON_SUPR: | 3026 | case CHIP_ID_YUKON_SUPR: |
@@ -3411,18 +3390,15 @@ static u32 sky2_supported_modes(const struct sky2_hw *hw) | |||
3411 | u32 modes = SUPPORTED_10baseT_Half | 3390 | u32 modes = SUPPORTED_10baseT_Half |
3412 | | SUPPORTED_10baseT_Full | 3391 | | SUPPORTED_10baseT_Full |
3413 | | SUPPORTED_100baseT_Half | 3392 | | SUPPORTED_100baseT_Half |
3414 | | SUPPORTED_100baseT_Full | 3393 | | SUPPORTED_100baseT_Full; |
3415 | | SUPPORTED_Autoneg | SUPPORTED_TP; | ||
3416 | 3394 | ||
3417 | if (hw->flags & SKY2_HW_GIGABIT) | 3395 | if (hw->flags & SKY2_HW_GIGABIT) |
3418 | modes |= SUPPORTED_1000baseT_Half | 3396 | modes |= SUPPORTED_1000baseT_Half |
3419 | | SUPPORTED_1000baseT_Full; | 3397 | | SUPPORTED_1000baseT_Full; |
3420 | return modes; | 3398 | return modes; |
3421 | } else | 3399 | } else |
3422 | return SUPPORTED_1000baseT_Half | 3400 | return SUPPORTED_1000baseT_Half |
3423 | | SUPPORTED_1000baseT_Full | 3401 | | SUPPORTED_1000baseT_Full; |
3424 | | SUPPORTED_Autoneg | ||
3425 | | SUPPORTED_FIBRE; | ||
3426 | } | 3402 | } |
3427 | 3403 | ||
3428 | static int sky2_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd) | 3404 | static int sky2_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd) |
@@ -3436,9 +3412,11 @@ static int sky2_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd) | |||
3436 | if (sky2_is_copper(hw)) { | 3412 | if (sky2_is_copper(hw)) { |
3437 | ecmd->port = PORT_TP; | 3413 | ecmd->port = PORT_TP; |
3438 | ecmd->speed = sky2->speed; | 3414 | ecmd->speed = sky2->speed; |
3415 | ecmd->supported |= SUPPORTED_Autoneg | SUPPORTED_TP; | ||
3439 | } else { | 3416 | } else { |
3440 | ecmd->speed = SPEED_1000; | 3417 | ecmd->speed = SPEED_1000; |
3441 | ecmd->port = PORT_FIBRE; | 3418 | ecmd->port = PORT_FIBRE; |
3419 | ecmd->supported |= SUPPORTED_Autoneg | SUPPORTED_FIBRE; | ||
3442 | } | 3420 | } |
3443 | 3421 | ||
3444 | ecmd->advertising = sky2->advertising; | 3422 | ecmd->advertising = sky2->advertising; |
@@ -3455,8 +3433,19 @@ static int sky2_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd) | |||
3455 | u32 supported = sky2_supported_modes(hw); | 3433 | u32 supported = sky2_supported_modes(hw); |
3456 | 3434 | ||
3457 | if (ecmd->autoneg == AUTONEG_ENABLE) { | 3435 | if (ecmd->autoneg == AUTONEG_ENABLE) { |
3436 | if (ecmd->advertising & ~supported) | ||
3437 | return -EINVAL; | ||
3438 | |||
3439 | if (sky2_is_copper(hw)) | ||
3440 | sky2->advertising = ecmd->advertising | | ||
3441 | ADVERTISED_TP | | ||
3442 | ADVERTISED_Autoneg; | ||
3443 | else | ||
3444 | sky2->advertising = ecmd->advertising | | ||
3445 | ADVERTISED_FIBRE | | ||
3446 | ADVERTISED_Autoneg; | ||
3447 | |||
3458 | sky2->flags |= SKY2_FLAG_AUTO_SPEED; | 3448 | sky2->flags |= SKY2_FLAG_AUTO_SPEED; |
3459 | ecmd->advertising = supported; | ||
3460 | sky2->duplex = -1; | 3449 | sky2->duplex = -1; |
3461 | sky2->speed = -1; | 3450 | sky2->speed = -1; |
3462 | } else { | 3451 | } else { |
@@ -3500,8 +3489,6 @@ static int sky2_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd) | |||
3500 | sky2->flags &= ~SKY2_FLAG_AUTO_SPEED; | 3489 | sky2->flags &= ~SKY2_FLAG_AUTO_SPEED; |
3501 | } | 3490 | } |
3502 | 3491 | ||
3503 | sky2->advertising = ecmd->advertising; | ||
3504 | |||
3505 | if (netif_running(dev)) { | 3492 | if (netif_running(dev)) { |
3506 | sky2_phy_reinit(sky2); | 3493 | sky2_phy_reinit(sky2); |
3507 | sky2_set_multicast(dev); | 3494 | sky2_set_multicast(dev); |
@@ -4229,15 +4216,28 @@ static int sky2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom | |||
4229 | static int sky2_set_flags(struct net_device *dev, u32 data) | 4216 | static int sky2_set_flags(struct net_device *dev, u32 data) |
4230 | { | 4217 | { |
4231 | struct sky2_port *sky2 = netdev_priv(dev); | 4218 | struct sky2_port *sky2 = netdev_priv(dev); |
4232 | u32 supported = | 4219 | unsigned long old_feat = dev->features; |
4233 | (sky2->hw->flags & SKY2_HW_RSS_BROKEN) ? 0 : ETH_FLAG_RXHASH; | 4220 | u32 supported = 0; |
4234 | int rc; | 4221 | int rc; |
4235 | 4222 | ||
4223 | if (!(sky2->hw->flags & SKY2_HW_RSS_BROKEN)) | ||
4224 | supported |= ETH_FLAG_RXHASH; | ||
4225 | |||
4226 | if (!(sky2->hw->flags & SKY2_HW_VLAN_BROKEN)) | ||
4227 | supported |= ETH_FLAG_RXVLAN | ETH_FLAG_TXVLAN; | ||
4228 | |||
4229 | printk(KERN_DEBUG "sky2 set_flags: supported %x data %x\n", | ||
4230 | supported, data); | ||
4231 | |||
4236 | rc = ethtool_op_set_flags(dev, data, supported); | 4232 | rc = ethtool_op_set_flags(dev, data, supported); |
4237 | if (rc) | 4233 | if (rc) |
4238 | return rc; | 4234 | return rc; |
4239 | 4235 | ||
4240 | rx_set_rss(dev); | 4236 | if ((old_feat ^ dev->features) & NETIF_F_RXHASH) |
4237 | rx_set_rss(dev); | ||
4238 | |||
4239 | if ((old_feat ^ dev->features) & NETIF_F_ALL_VLAN) | ||
4240 | sky2_vlan_mode(dev); | ||
4241 | 4241 | ||
4242 | return 0; | 4242 | return 0; |
4243 | } | 4243 | } |
@@ -4273,6 +4273,7 @@ static const struct ethtool_ops sky2_ethtool_ops = { | |||
4273 | .get_sset_count = sky2_get_sset_count, | 4273 | .get_sset_count = sky2_get_sset_count, |
4274 | .get_ethtool_stats = sky2_get_ethtool_stats, | 4274 | .get_ethtool_stats = sky2_get_ethtool_stats, |
4275 | .set_flags = sky2_set_flags, | 4275 | .set_flags = sky2_set_flags, |
4276 | .get_flags = ethtool_op_get_flags, | ||
4276 | }; | 4277 | }; |
4277 | 4278 | ||
4278 | #ifdef CONFIG_SKY2_DEBUG | 4279 | #ifdef CONFIG_SKY2_DEBUG |
@@ -4554,9 +4555,6 @@ static const struct net_device_ops sky2_netdev_ops[2] = { | |||
4554 | .ndo_change_mtu = sky2_change_mtu, | 4555 | .ndo_change_mtu = sky2_change_mtu, |
4555 | .ndo_tx_timeout = sky2_tx_timeout, | 4556 | .ndo_tx_timeout = sky2_tx_timeout, |
4556 | .ndo_get_stats64 = sky2_get_stats, | 4557 | .ndo_get_stats64 = sky2_get_stats, |
4557 | #ifdef SKY2_VLAN_TAG_USED | ||
4558 | .ndo_vlan_rx_register = sky2_vlan_rx_register, | ||
4559 | #endif | ||
4560 | #ifdef CONFIG_NET_POLL_CONTROLLER | 4558 | #ifdef CONFIG_NET_POLL_CONTROLLER |
4561 | .ndo_poll_controller = sky2_netpoll, | 4559 | .ndo_poll_controller = sky2_netpoll, |
4562 | #endif | 4560 | #endif |
@@ -4572,9 +4570,6 @@ static const struct net_device_ops sky2_netdev_ops[2] = { | |||
4572 | .ndo_change_mtu = sky2_change_mtu, | 4570 | .ndo_change_mtu = sky2_change_mtu, |
4573 | .ndo_tx_timeout = sky2_tx_timeout, | 4571 | .ndo_tx_timeout = sky2_tx_timeout, |
4574 | .ndo_get_stats64 = sky2_get_stats, | 4572 | .ndo_get_stats64 = sky2_get_stats, |
4575 | #ifdef SKY2_VLAN_TAG_USED | ||
4576 | .ndo_vlan_rx_register = sky2_vlan_rx_register, | ||
4577 | #endif | ||
4578 | }, | 4573 | }, |
4579 | }; | 4574 | }; |
4580 | 4575 | ||
@@ -4625,7 +4620,8 @@ static __devinit struct net_device *sky2_init_netdev(struct sky2_hw *hw, | |||
4625 | sky2->port = port; | 4620 | sky2->port = port; |
4626 | 4621 | ||
4627 | dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG | 4622 | dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG |
4628 | | NETIF_F_TSO | NETIF_F_GRO; | 4623 | | NETIF_F_TSO | NETIF_F_GRO; |
4624 | |||
4629 | if (highmem) | 4625 | if (highmem) |
4630 | dev->features |= NETIF_F_HIGHDMA; | 4626 | dev->features |= NETIF_F_HIGHDMA; |
4631 | 4627 | ||
@@ -4633,13 +4629,8 @@ static __devinit struct net_device *sky2_init_netdev(struct sky2_hw *hw, | |||
4633 | if (!(hw->flags & SKY2_HW_RSS_BROKEN)) | 4629 | if (!(hw->flags & SKY2_HW_RSS_BROKEN)) |
4634 | dev->features |= NETIF_F_RXHASH; | 4630 | dev->features |= NETIF_F_RXHASH; |
4635 | 4631 | ||
4636 | #ifdef SKY2_VLAN_TAG_USED | 4632 | if (!(hw->flags & SKY2_HW_VLAN_BROKEN)) |
4637 | /* The workaround for FE+ status conflicts with VLAN tag detection. */ | ||
4638 | if (!(sky2->hw->chip_id == CHIP_ID_YUKON_FE_P && | ||
4639 | sky2->hw->chip_rev == CHIP_REV_YU_FE2_A0)) { | ||
4640 | dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; | 4633 | dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; |
4641 | } | ||
4642 | #endif | ||
4643 | 4634 | ||
4644 | /* read the mac address */ | 4635 | /* read the mac address */ |
4645 | memcpy_fromio(dev->dev_addr, hw->regs + B2_MAC_1 + port * 8, ETH_ALEN); | 4636 | memcpy_fromio(dev->dev_addr, hw->regs + B2_MAC_1 + port * 8, ETH_ALEN); |
diff --git a/drivers/net/sky2.h b/drivers/net/sky2.h index 80bdc404f1ea..6861b0e8db9a 100644 --- a/drivers/net/sky2.h +++ b/drivers/net/sky2.h | |||
@@ -2236,11 +2236,8 @@ struct sky2_port { | |||
2236 | u16 rx_pending; | 2236 | u16 rx_pending; |
2237 | u16 rx_data_size; | 2237 | u16 rx_data_size; |
2238 | u16 rx_nfrags; | 2238 | u16 rx_nfrags; |
2239 | |||
2240 | #ifdef SKY2_VLAN_TAG_USED | ||
2241 | u16 rx_tag; | 2239 | u16 rx_tag; |
2242 | struct vlan_group *vlgrp; | 2240 | |
2243 | #endif | ||
2244 | struct { | 2241 | struct { |
2245 | unsigned long last; | 2242 | unsigned long last; |
2246 | u32 mac_rp; | 2243 | u32 mac_rp; |
@@ -2284,6 +2281,7 @@ struct sky2_hw { | |||
2284 | #define SKY2_HW_AUTO_TX_SUM 0x00000040 /* new IP decode for Tx */ | 2281 | #define SKY2_HW_AUTO_TX_SUM 0x00000040 /* new IP decode for Tx */ |
2285 | #define SKY2_HW_ADV_POWER_CTL 0x00000080 /* additional PHY power regs */ | 2282 | #define SKY2_HW_ADV_POWER_CTL 0x00000080 /* additional PHY power regs */ |
2286 | #define SKY2_HW_RSS_BROKEN 0x00000100 | 2283 | #define SKY2_HW_RSS_BROKEN 0x00000100 |
2284 | #define SKY2_HW_VLAN_BROKEN 0x00000200 | ||
2287 | 2285 | ||
2288 | u8 chip_id; | 2286 | u8 chip_id; |
2289 | u8 chip_rev; | 2287 | u8 chip_rev; |
diff --git a/drivers/net/stmmac/stmmac_main.c b/drivers/net/stmmac/stmmac_main.c index 34a0af3837f9..0e5f03135b50 100644 --- a/drivers/net/stmmac/stmmac_main.c +++ b/drivers/net/stmmac/stmmac_main.c | |||
@@ -1560,8 +1560,10 @@ static int stmmac_mac_device_setup(struct net_device *dev) | |||
1560 | 1560 | ||
1561 | priv->hw = device; | 1561 | priv->hw = device; |
1562 | 1562 | ||
1563 | if (device_can_wakeup(priv->device)) | 1563 | if (device_can_wakeup(priv->device)) { |
1564 | priv->wolopts = WAKE_MAGIC; /* Magic Frame as default */ | 1564 | priv->wolopts = WAKE_MAGIC; /* Magic Frame as default */ |
1565 | enable_irq_wake(dev->irq); | ||
1566 | } | ||
1565 | 1567 | ||
1566 | return 0; | 1568 | return 0; |
1567 | } | 1569 | } |
diff --git a/drivers/net/tehuti.c b/drivers/net/tehuti.c index 296000bf5a25..3397618d4d96 100644 --- a/drivers/net/tehuti.c +++ b/drivers/net/tehuti.c | |||
@@ -12,7 +12,7 @@ | |||
12 | /* | 12 | /* |
13 | * RX HW/SW interaction overview | 13 | * RX HW/SW interaction overview |
14 | * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | 14 | * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ |
15 | * There are 2 types of RX communication channels betwean driver and NIC. | 15 | * There are 2 types of RX communication channels between driver and NIC. |
16 | * 1) RX Free Fifo - RXF - holds descriptors of empty buffers to accept incoming | 16 | * 1) RX Free Fifo - RXF - holds descriptors of empty buffers to accept incoming |
17 | * traffic. This Fifo is filled by SW and is readen by HW. Each descriptor holds | 17 | * traffic. This Fifo is filled by SW and is readen by HW. Each descriptor holds |
18 | * info about buffer's location, size and ID. An ID field is used to identify a | 18 | * info about buffer's location, size and ID. An ID field is used to identify a |
@@ -821,7 +821,7 @@ static void bdx_setmulti(struct net_device *ndev) | |||
821 | } | 821 | } |
822 | 822 | ||
823 | /* use PMF to accept first MAC_MCST_NUM (15) addresses */ | 823 | /* use PMF to accept first MAC_MCST_NUM (15) addresses */ |
824 | /* TBD: sort addreses and write them in ascending order | 824 | /* TBD: sort addresses and write them in ascending order |
825 | * into RX_MAC_MCST regs. we skip this phase now and accept ALL | 825 | * into RX_MAC_MCST regs. we skip this phase now and accept ALL |
826 | * multicast frames throu IMF */ | 826 | * multicast frames throu IMF */ |
827 | /* accept the rest of addresses throu IMF */ | 827 | /* accept the rest of addresses throu IMF */ |
@@ -1346,7 +1346,7 @@ static void print_rxfd(struct rxf_desc *rxfd) | |||
1346 | /* | 1346 | /* |
1347 | * TX HW/SW interaction overview | 1347 | * TX HW/SW interaction overview |
1348 | * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | 1348 | * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ |
1349 | * There are 2 types of TX communication channels betwean driver and NIC. | 1349 | * There are 2 types of TX communication channels between driver and NIC. |
1350 | * 1) TX Free Fifo - TXF - holds ack descriptors for sent packets | 1350 | * 1) TX Free Fifo - TXF - holds ack descriptors for sent packets |
1351 | * 2) TX Data Fifo - TXD - holds descriptors of full buffers. | 1351 | * 2) TX Data Fifo - TXD - holds descriptors of full buffers. |
1352 | * | 1352 | * |
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c index 7841a8f69998..06c0e5033656 100644 --- a/drivers/net/tg3.c +++ b/drivers/net/tg3.c | |||
@@ -60,12 +60,6 @@ | |||
60 | #define BAR_0 0 | 60 | #define BAR_0 0 |
61 | #define BAR_2 2 | 61 | #define BAR_2 2 |
62 | 62 | ||
63 | #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) | ||
64 | #define TG3_VLAN_TAG_USED 1 | ||
65 | #else | ||
66 | #define TG3_VLAN_TAG_USED 0 | ||
67 | #endif | ||
68 | |||
69 | #include "tg3.h" | 63 | #include "tg3.h" |
70 | 64 | ||
71 | #define DRV_MODULE_NAME "tg3" | 65 | #define DRV_MODULE_NAME "tg3" |
@@ -134,9 +128,6 @@ | |||
134 | TG3_TX_RING_SIZE) | 128 | TG3_TX_RING_SIZE) |
135 | #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1)) | 129 | #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1)) |
136 | 130 | ||
137 | #define TG3_RX_DMA_ALIGN 16 | ||
138 | #define TG3_RX_HEADROOM ALIGN(VLAN_HLEN, TG3_RX_DMA_ALIGN) | ||
139 | |||
140 | #define TG3_DMA_BYTE_ENAB 64 | 131 | #define TG3_DMA_BYTE_ENAB 64 |
141 | 132 | ||
142 | #define TG3_RX_STD_DMA_SZ 1536 | 133 | #define TG3_RX_STD_DMA_SZ 1536 |
@@ -4722,8 +4713,6 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget) | |||
4722 | struct sk_buff *skb; | 4713 | struct sk_buff *skb; |
4723 | dma_addr_t dma_addr; | 4714 | dma_addr_t dma_addr; |
4724 | u32 opaque_key, desc_idx, *post_ptr; | 4715 | u32 opaque_key, desc_idx, *post_ptr; |
4725 | bool hw_vlan __maybe_unused = false; | ||
4726 | u16 vtag __maybe_unused = 0; | ||
4727 | 4716 | ||
4728 | desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK; | 4717 | desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK; |
4729 | opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK; | 4718 | opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK; |
@@ -4782,12 +4771,12 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget) | |||
4782 | tg3_recycle_rx(tnapi, tpr, opaque_key, | 4771 | tg3_recycle_rx(tnapi, tpr, opaque_key, |
4783 | desc_idx, *post_ptr); | 4772 | desc_idx, *post_ptr); |
4784 | 4773 | ||
4785 | copy_skb = netdev_alloc_skb(tp->dev, len + VLAN_HLEN + | 4774 | copy_skb = netdev_alloc_skb(tp->dev, len + |
4786 | TG3_RAW_IP_ALIGN); | 4775 | TG3_RAW_IP_ALIGN); |
4787 | if (copy_skb == NULL) | 4776 | if (copy_skb == NULL) |
4788 | goto drop_it_no_recycle; | 4777 | goto drop_it_no_recycle; |
4789 | 4778 | ||
4790 | skb_reserve(copy_skb, TG3_RAW_IP_ALIGN + VLAN_HLEN); | 4779 | skb_reserve(copy_skb, TG3_RAW_IP_ALIGN); |
4791 | skb_put(copy_skb, len); | 4780 | skb_put(copy_skb, len); |
4792 | pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE); | 4781 | pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE); |
4793 | skb_copy_from_linear_data(skb, copy_skb->data, len); | 4782 | skb_copy_from_linear_data(skb, copy_skb->data, len); |
@@ -4814,30 +4803,11 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget) | |||
4814 | } | 4803 | } |
4815 | 4804 | ||
4816 | if (desc->type_flags & RXD_FLAG_VLAN && | 4805 | if (desc->type_flags & RXD_FLAG_VLAN && |
4817 | !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG)) { | 4806 | !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG)) |
4818 | vtag = desc->err_vlan & RXD_VLAN_MASK; | 4807 | __vlan_hwaccel_put_tag(skb, |
4819 | #if TG3_VLAN_TAG_USED | 4808 | desc->err_vlan & RXD_VLAN_MASK); |
4820 | if (tp->vlgrp) | ||
4821 | hw_vlan = true; | ||
4822 | else | ||
4823 | #endif | ||
4824 | { | ||
4825 | struct vlan_ethhdr *ve = (struct vlan_ethhdr *) | ||
4826 | __skb_push(skb, VLAN_HLEN); | ||
4827 | |||
4828 | memmove(ve, skb->data + VLAN_HLEN, | ||
4829 | ETH_ALEN * 2); | ||
4830 | ve->h_vlan_proto = htons(ETH_P_8021Q); | ||
4831 | ve->h_vlan_TCI = htons(vtag); | ||
4832 | } | ||
4833 | } | ||
4834 | 4809 | ||
4835 | #if TG3_VLAN_TAG_USED | 4810 | napi_gro_receive(&tnapi->napi, skb); |
4836 | if (hw_vlan) | ||
4837 | vlan_gro_receive(&tnapi->napi, tp->vlgrp, vtag, skb); | ||
4838 | else | ||
4839 | #endif | ||
4840 | napi_gro_receive(&tnapi->napi, skb); | ||
4841 | 4811 | ||
4842 | received++; | 4812 | received++; |
4843 | budget--; | 4813 | budget--; |
@@ -5740,11 +5710,9 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, | |||
5740 | base_flags |= TXD_FLAG_TCPUDP_CSUM; | 5710 | base_flags |= TXD_FLAG_TCPUDP_CSUM; |
5741 | } | 5711 | } |
5742 | 5712 | ||
5743 | #if TG3_VLAN_TAG_USED | ||
5744 | if (vlan_tx_tag_present(skb)) | 5713 | if (vlan_tx_tag_present(skb)) |
5745 | base_flags |= (TXD_FLAG_VLAN | | 5714 | base_flags |= (TXD_FLAG_VLAN | |
5746 | (vlan_tx_tag_get(skb) << 16)); | 5715 | (vlan_tx_tag_get(skb) << 16)); |
5747 | #endif | ||
5748 | 5716 | ||
5749 | len = skb_headlen(skb); | 5717 | len = skb_headlen(skb); |
5750 | 5718 | ||
@@ -5986,11 +5954,10 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb, | |||
5986 | } | 5954 | } |
5987 | } | 5955 | } |
5988 | } | 5956 | } |
5989 | #if TG3_VLAN_TAG_USED | 5957 | |
5990 | if (vlan_tx_tag_present(skb)) | 5958 | if (vlan_tx_tag_present(skb)) |
5991 | base_flags |= (TXD_FLAG_VLAN | | 5959 | base_flags |= (TXD_FLAG_VLAN | |
5992 | (vlan_tx_tag_get(skb) << 16)); | 5960 | (vlan_tx_tag_get(skb) << 16)); |
5993 | #endif | ||
5994 | 5961 | ||
5995 | if ((tp->tg3_flags3 & TG3_FLG3_USE_JUMBO_BDFLAG) && | 5962 | if ((tp->tg3_flags3 & TG3_FLG3_USE_JUMBO_BDFLAG) && |
5996 | !mss && skb->len > VLAN_ETH_FRAME_LEN) | 5963 | !mss && skb->len > VLAN_ETH_FRAME_LEN) |
@@ -9532,17 +9499,10 @@ static void __tg3_set_rx_mode(struct net_device *dev) | |||
9532 | rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC | | 9499 | rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC | |
9533 | RX_MODE_KEEP_VLAN_TAG); | 9500 | RX_MODE_KEEP_VLAN_TAG); |
9534 | 9501 | ||
9502 | #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE) | ||
9535 | /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG | 9503 | /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG |
9536 | * flag clear. | 9504 | * flag clear. |
9537 | */ | 9505 | */ |
9538 | #if TG3_VLAN_TAG_USED | ||
9539 | if (!tp->vlgrp && | ||
9540 | !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) | ||
9541 | rx_mode |= RX_MODE_KEEP_VLAN_TAG; | ||
9542 | #else | ||
9543 | /* By definition, VLAN is disabled always in this | ||
9544 | * case. | ||
9545 | */ | ||
9546 | if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) | 9506 | if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) |
9547 | rx_mode |= RX_MODE_KEEP_VLAN_TAG; | 9507 | rx_mode |= RX_MODE_KEEP_VLAN_TAG; |
9548 | #endif | 9508 | #endif |
@@ -11198,7 +11158,9 @@ static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) | |||
11198 | if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) | 11158 | if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) |
11199 | break; /* We have no PHY */ | 11159 | break; /* We have no PHY */ |
11200 | 11160 | ||
11201 | if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) | 11161 | if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) || |
11162 | ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) && | ||
11163 | !netif_running(dev))) | ||
11202 | return -EAGAIN; | 11164 | return -EAGAIN; |
11203 | 11165 | ||
11204 | spin_lock_bh(&tp->lock); | 11166 | spin_lock_bh(&tp->lock); |
@@ -11214,7 +11176,9 @@ static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) | |||
11214 | if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) | 11176 | if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) |
11215 | break; /* We have no PHY */ | 11177 | break; /* We have no PHY */ |
11216 | 11178 | ||
11217 | if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) | 11179 | if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) || |
11180 | ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) && | ||
11181 | !netif_running(dev))) | ||
11218 | return -EAGAIN; | 11182 | return -EAGAIN; |
11219 | 11183 | ||
11220 | spin_lock_bh(&tp->lock); | 11184 | spin_lock_bh(&tp->lock); |
@@ -11230,31 +11194,6 @@ static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) | |||
11230 | return -EOPNOTSUPP; | 11194 | return -EOPNOTSUPP; |
11231 | } | 11195 | } |
11232 | 11196 | ||
11233 | #if TG3_VLAN_TAG_USED | ||
11234 | static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp) | ||
11235 | { | ||
11236 | struct tg3 *tp = netdev_priv(dev); | ||
11237 | |||
11238 | if (!netif_running(dev)) { | ||
11239 | tp->vlgrp = grp; | ||
11240 | return; | ||
11241 | } | ||
11242 | |||
11243 | tg3_netif_stop(tp); | ||
11244 | |||
11245 | tg3_full_lock(tp, 0); | ||
11246 | |||
11247 | tp->vlgrp = grp; | ||
11248 | |||
11249 | /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */ | ||
11250 | __tg3_set_rx_mode(dev); | ||
11251 | |||
11252 | tg3_netif_start(tp); | ||
11253 | |||
11254 | tg3_full_unlock(tp); | ||
11255 | } | ||
11256 | #endif | ||
11257 | |||
11258 | static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec) | 11197 | static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec) |
11259 | { | 11198 | { |
11260 | struct tg3 *tp = netdev_priv(dev); | 11199 | struct tg3 *tp = netdev_priv(dev); |
@@ -13066,9 +13005,7 @@ static struct pci_dev * __devinit tg3_find_peer(struct tg3 *); | |||
13066 | 13005 | ||
13067 | static void inline vlan_features_add(struct net_device *dev, unsigned long flags) | 13006 | static void inline vlan_features_add(struct net_device *dev, unsigned long flags) |
13068 | { | 13007 | { |
13069 | #if TG3_VLAN_TAG_USED | ||
13070 | dev->vlan_features |= flags; | 13008 | dev->vlan_features |= flags; |
13071 | #endif | ||
13072 | } | 13009 | } |
13073 | 13010 | ||
13074 | static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp) | 13011 | static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp) |
@@ -13861,11 +13798,11 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) | |||
13861 | else | 13798 | else |
13862 | tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES; | 13799 | tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES; |
13863 | 13800 | ||
13864 | tp->rx_offset = NET_IP_ALIGN + TG3_RX_HEADROOM; | 13801 | tp->rx_offset = NET_IP_ALIGN; |
13865 | tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD; | 13802 | tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD; |
13866 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 && | 13803 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 && |
13867 | (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0) { | 13804 | (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0) { |
13868 | tp->rx_offset -= NET_IP_ALIGN; | 13805 | tp->rx_offset = 0; |
13869 | #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS | 13806 | #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS |
13870 | tp->rx_copy_thresh = ~(u16)0; | 13807 | tp->rx_copy_thresh = ~(u16)0; |
13871 | #endif | 13808 | #endif |
@@ -14629,9 +14566,6 @@ static const struct net_device_ops tg3_netdev_ops = { | |||
14629 | .ndo_do_ioctl = tg3_ioctl, | 14566 | .ndo_do_ioctl = tg3_ioctl, |
14630 | .ndo_tx_timeout = tg3_tx_timeout, | 14567 | .ndo_tx_timeout = tg3_tx_timeout, |
14631 | .ndo_change_mtu = tg3_change_mtu, | 14568 | .ndo_change_mtu = tg3_change_mtu, |
14632 | #if TG3_VLAN_TAG_USED | ||
14633 | .ndo_vlan_rx_register = tg3_vlan_rx_register, | ||
14634 | #endif | ||
14635 | #ifdef CONFIG_NET_POLL_CONTROLLER | 14569 | #ifdef CONFIG_NET_POLL_CONTROLLER |
14636 | .ndo_poll_controller = tg3_poll_controller, | 14570 | .ndo_poll_controller = tg3_poll_controller, |
14637 | #endif | 14571 | #endif |
@@ -14648,9 +14582,6 @@ static const struct net_device_ops tg3_netdev_ops_dma_bug = { | |||
14648 | .ndo_do_ioctl = tg3_ioctl, | 14582 | .ndo_do_ioctl = tg3_ioctl, |
14649 | .ndo_tx_timeout = tg3_tx_timeout, | 14583 | .ndo_tx_timeout = tg3_tx_timeout, |
14650 | .ndo_change_mtu = tg3_change_mtu, | 14584 | .ndo_change_mtu = tg3_change_mtu, |
14651 | #if TG3_VLAN_TAG_USED | ||
14652 | .ndo_vlan_rx_register = tg3_vlan_rx_register, | ||
14653 | #endif | ||
14654 | #ifdef CONFIG_NET_POLL_CONTROLLER | 14585 | #ifdef CONFIG_NET_POLL_CONTROLLER |
14655 | .ndo_poll_controller = tg3_poll_controller, | 14586 | .ndo_poll_controller = tg3_poll_controller, |
14656 | #endif | 14587 | #endif |
@@ -14700,9 +14631,7 @@ static int __devinit tg3_init_one(struct pci_dev *pdev, | |||
14700 | 14631 | ||
14701 | SET_NETDEV_DEV(dev, &pdev->dev); | 14632 | SET_NETDEV_DEV(dev, &pdev->dev); |
14702 | 14633 | ||
14703 | #if TG3_VLAN_TAG_USED | ||
14704 | dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; | 14634 | dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; |
14705 | #endif | ||
14706 | 14635 | ||
14707 | tp = netdev_priv(dev); | 14636 | tp = netdev_priv(dev); |
14708 | tp->pdev = pdev; | 14637 | tp->pdev = pdev; |
diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h index d62c8d937c82..f528243e1a4f 100644 --- a/drivers/net/tg3.h +++ b/drivers/net/tg3.h | |||
@@ -2808,9 +2808,6 @@ struct tg3 { | |||
2808 | u32 rx_std_max_post; | 2808 | u32 rx_std_max_post; |
2809 | u32 rx_offset; | 2809 | u32 rx_offset; |
2810 | u32 rx_pkt_map_sz; | 2810 | u32 rx_pkt_map_sz; |
2811 | #if TG3_VLAN_TAG_USED | ||
2812 | struct vlan_group *vlgrp; | ||
2813 | #endif | ||
2814 | 2811 | ||
2815 | 2812 | ||
2816 | /* begin "everything else" cacheline(s) section */ | 2813 | /* begin "everything else" cacheline(s) section */ |
diff --git a/drivers/net/tile/tilepro.c b/drivers/net/tile/tilepro.c index 0e6bac5ec65b..7cb301da7474 100644 --- a/drivers/net/tile/tilepro.c +++ b/drivers/net/tile/tilepro.c | |||
@@ -142,14 +142,6 @@ | |||
142 | MODULE_AUTHOR("Tilera"); | 142 | MODULE_AUTHOR("Tilera"); |
143 | MODULE_LICENSE("GPL"); | 143 | MODULE_LICENSE("GPL"); |
144 | 144 | ||
145 | |||
146 | #define IS_MULTICAST(mac_addr) \ | ||
147 | (((u8 *)(mac_addr))[0] & 0x01) | ||
148 | |||
149 | #define IS_BROADCAST(mac_addr) \ | ||
150 | (((u16 *)(mac_addr))[0] == 0xffff) | ||
151 | |||
152 | |||
153 | /* | 145 | /* |
154 | * Queue of incoming packets for a specific cpu and device. | 146 | * Queue of incoming packets for a specific cpu and device. |
155 | * | 147 | * |
@@ -795,7 +787,7 @@ static bool tile_net_poll_aux(struct tile_net_cpu *info, int index) | |||
795 | /* | 787 | /* |
796 | * FIXME: Implement HW multicast filter. | 788 | * FIXME: Implement HW multicast filter. |
797 | */ | 789 | */ |
798 | if (!IS_MULTICAST(buf) && !IS_BROADCAST(buf)) { | 790 | if (is_unicast_ether_addr(buf)) { |
799 | /* Filter packets not for our address. */ | 791 | /* Filter packets not for our address. */ |
800 | const u8 *mine = dev->dev_addr; | 792 | const u8 *mine = dev->dev_addr; |
801 | filter = compare_ether_addr(mine, buf); | 793 | filter = compare_ether_addr(mine, buf); |
diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 7599c457abd1..b100bd50a0d7 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c | |||
@@ -1309,7 +1309,7 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd, | |||
1309 | break; | 1309 | break; |
1310 | 1310 | ||
1311 | case SIOCGIFHWADDR: | 1311 | case SIOCGIFHWADDR: |
1312 | /* Get hw addres */ | 1312 | /* Get hw address */ |
1313 | memcpy(ifr.ifr_hwaddr.sa_data, tun->dev->dev_addr, ETH_ALEN); | 1313 | memcpy(ifr.ifr_hwaddr.sa_data, tun->dev->dev_addr, ETH_ALEN); |
1314 | ifr.ifr_hwaddr.sa_family = tun->dev->type; | 1314 | ifr.ifr_hwaddr.sa_family = tun->dev->type; |
1315 | if (copy_to_user(argp, &ifr, ifreq_len)) | 1315 | if (copy_to_user(argp, &ifr, ifreq_len)) |
diff --git a/drivers/net/ucc_geth.c b/drivers/net/ucc_geth.c index acbdab3d66ca..715e7b47e7e9 100644 --- a/drivers/net/ucc_geth.c +++ b/drivers/net/ucc_geth.c | |||
@@ -28,6 +28,7 @@ | |||
28 | #include <linux/phy.h> | 28 | #include <linux/phy.h> |
29 | #include <linux/workqueue.h> | 29 | #include <linux/workqueue.h> |
30 | #include <linux/of_mdio.h> | 30 | #include <linux/of_mdio.h> |
31 | #include <linux/of_net.h> | ||
31 | #include <linux/of_platform.h> | 32 | #include <linux/of_platform.h> |
32 | 33 | ||
33 | #include <asm/uaccess.h> | 34 | #include <asm/uaccess.h> |
@@ -2031,7 +2032,7 @@ static void ucc_geth_set_multi(struct net_device *dev) | |||
2031 | netdev_for_each_mc_addr(ha, dev) { | 2032 | netdev_for_each_mc_addr(ha, dev) { |
2032 | /* Only support group multicast for now. | 2033 | /* Only support group multicast for now. |
2033 | */ | 2034 | */ |
2034 | if (!(ha->addr[0] & 1)) | 2035 | if (!is_multicast_ether_addr(ha->addr)) |
2035 | continue; | 2036 | continue; |
2036 | 2037 | ||
2037 | /* Ask CPM to run CRC and set bit in | 2038 | /* Ask CPM to run CRC and set bit in |
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c index 593c104ab199..7113168473cf 100644 --- a/drivers/net/usb/cdc_ncm.c +++ b/drivers/net/usb/cdc_ncm.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * cdc_ncm.c | 2 | * cdc_ncm.c |
3 | * | 3 | * |
4 | * Copyright (C) ST-Ericsson 2010 | 4 | * Copyright (C) ST-Ericsson 2010-2011 |
5 | * Contact: Alexey Orishko <alexey.orishko@stericsson.com> | 5 | * Contact: Alexey Orishko <alexey.orishko@stericsson.com> |
6 | * Original author: Hans Petter Selasky <hans.petter.selasky@stericsson.com> | 6 | * Original author: Hans Petter Selasky <hans.petter.selasky@stericsson.com> |
7 | * | 7 | * |
@@ -54,7 +54,7 @@ | |||
54 | #include <linux/usb/usbnet.h> | 54 | #include <linux/usb/usbnet.h> |
55 | #include <linux/usb/cdc.h> | 55 | #include <linux/usb/cdc.h> |
56 | 56 | ||
57 | #define DRIVER_VERSION "30-Nov-2010" | 57 | #define DRIVER_VERSION "7-Feb-2011" |
58 | 58 | ||
59 | /* CDC NCM subclass 3.2.1 */ | 59 | /* CDC NCM subclass 3.2.1 */ |
60 | #define USB_CDC_NCM_NDP16_LENGTH_MIN 0x10 | 60 | #define USB_CDC_NCM_NDP16_LENGTH_MIN 0x10 |
@@ -77,6 +77,9 @@ | |||
77 | */ | 77 | */ |
78 | #define CDC_NCM_DPT_DATAGRAMS_MAX 32 | 78 | #define CDC_NCM_DPT_DATAGRAMS_MAX 32 |
79 | 79 | ||
80 | /* Maximum amount of IN datagrams in NTB */ | ||
81 | #define CDC_NCM_DPT_DATAGRAMS_IN_MAX 0 /* unlimited */ | ||
82 | |||
80 | /* Restart the timer, if amount of datagrams is less than given value */ | 83 | /* Restart the timer, if amount of datagrams is less than given value */ |
81 | #define CDC_NCM_RESTART_TIMER_DATAGRAM_CNT 3 | 84 | #define CDC_NCM_RESTART_TIMER_DATAGRAM_CNT 3 |
82 | 85 | ||
@@ -85,11 +88,6 @@ | |||
85 | (sizeof(struct usb_cdc_ncm_nth16) + sizeof(struct usb_cdc_ncm_ndp16) + \ | 88 | (sizeof(struct usb_cdc_ncm_nth16) + sizeof(struct usb_cdc_ncm_ndp16) + \ |
86 | (CDC_NCM_DPT_DATAGRAMS_MAX + 1) * sizeof(struct usb_cdc_ncm_dpe16)) | 89 | (CDC_NCM_DPT_DATAGRAMS_MAX + 1) * sizeof(struct usb_cdc_ncm_dpe16)) |
87 | 90 | ||
88 | struct connection_speed_change { | ||
89 | __le32 USBitRate; /* holds 3GPP downlink value, bits per second */ | ||
90 | __le32 DSBitRate; /* holds 3GPP uplink value, bits per second */ | ||
91 | } __attribute__ ((packed)); | ||
92 | |||
93 | struct cdc_ncm_data { | 91 | struct cdc_ncm_data { |
94 | struct usb_cdc_ncm_nth16 nth16; | 92 | struct usb_cdc_ncm_nth16 nth16; |
95 | struct usb_cdc_ncm_ndp16 ndp16; | 93 | struct usb_cdc_ncm_ndp16 ndp16; |
@@ -198,10 +196,10 @@ static u8 cdc_ncm_setup(struct cdc_ncm_ctx *ctx) | |||
198 | { | 196 | { |
199 | struct usb_cdc_notification req; | 197 | struct usb_cdc_notification req; |
200 | u32 val; | 198 | u32 val; |
201 | __le16 max_datagram_size; | ||
202 | u8 flags; | 199 | u8 flags; |
203 | u8 iface_no; | 200 | u8 iface_no; |
204 | int err; | 201 | int err; |
202 | u16 ntb_fmt_supported; | ||
205 | 203 | ||
206 | iface_no = ctx->control->cur_altsetting->desc.bInterfaceNumber; | 204 | iface_no = ctx->control->cur_altsetting->desc.bInterfaceNumber; |
207 | 205 | ||
@@ -223,6 +221,9 @@ static u8 cdc_ncm_setup(struct cdc_ncm_ctx *ctx) | |||
223 | ctx->tx_remainder = le16_to_cpu(ctx->ncm_parm.wNdpOutPayloadRemainder); | 221 | ctx->tx_remainder = le16_to_cpu(ctx->ncm_parm.wNdpOutPayloadRemainder); |
224 | ctx->tx_modulus = le16_to_cpu(ctx->ncm_parm.wNdpOutDivisor); | 222 | ctx->tx_modulus = le16_to_cpu(ctx->ncm_parm.wNdpOutDivisor); |
225 | ctx->tx_ndp_modulus = le16_to_cpu(ctx->ncm_parm.wNdpOutAlignment); | 223 | ctx->tx_ndp_modulus = le16_to_cpu(ctx->ncm_parm.wNdpOutAlignment); |
224 | /* devices prior to NCM Errata shall set this field to zero */ | ||
225 | ctx->tx_max_datagrams = le16_to_cpu(ctx->ncm_parm.wNtbOutMaxDatagrams); | ||
226 | ntb_fmt_supported = le16_to_cpu(ctx->ncm_parm.bmNtbFormatsSupported); | ||
226 | 227 | ||
227 | if (ctx->func_desc != NULL) | 228 | if (ctx->func_desc != NULL) |
228 | flags = ctx->func_desc->bmNetworkCapabilities; | 229 | flags = ctx->func_desc->bmNetworkCapabilities; |
@@ -231,22 +232,58 @@ static u8 cdc_ncm_setup(struct cdc_ncm_ctx *ctx) | |||
231 | 232 | ||
232 | pr_debug("dwNtbInMaxSize=%u dwNtbOutMaxSize=%u " | 233 | pr_debug("dwNtbInMaxSize=%u dwNtbOutMaxSize=%u " |
233 | "wNdpOutPayloadRemainder=%u wNdpOutDivisor=%u " | 234 | "wNdpOutPayloadRemainder=%u wNdpOutDivisor=%u " |
234 | "wNdpOutAlignment=%u flags=0x%x\n", | 235 | "wNdpOutAlignment=%u wNtbOutMaxDatagrams=%u flags=0x%x\n", |
235 | ctx->rx_max, ctx->tx_max, ctx->tx_remainder, ctx->tx_modulus, | 236 | ctx->rx_max, ctx->tx_max, ctx->tx_remainder, ctx->tx_modulus, |
236 | ctx->tx_ndp_modulus, flags); | 237 | ctx->tx_ndp_modulus, ctx->tx_max_datagrams, flags); |
237 | 238 | ||
238 | /* max count of tx datagrams without terminating NULL entry */ | 239 | /* max count of tx datagrams */ |
239 | ctx->tx_max_datagrams = CDC_NCM_DPT_DATAGRAMS_MAX; | 240 | if ((ctx->tx_max_datagrams == 0) || |
241 | (ctx->tx_max_datagrams > CDC_NCM_DPT_DATAGRAMS_MAX)) | ||
242 | ctx->tx_max_datagrams = CDC_NCM_DPT_DATAGRAMS_MAX; | ||
240 | 243 | ||
241 | /* verify maximum size of received NTB in bytes */ | 244 | /* verify maximum size of received NTB in bytes */ |
242 | if ((ctx->rx_max < | 245 | if (ctx->rx_max < USB_CDC_NCM_NTB_MIN_IN_SIZE) { |
243 | (CDC_NCM_MIN_HDR_SIZE + CDC_NCM_MIN_DATAGRAM_SIZE)) || | 246 | pr_debug("Using min receive length=%d\n", |
244 | (ctx->rx_max > CDC_NCM_NTB_MAX_SIZE_RX)) { | 247 | USB_CDC_NCM_NTB_MIN_IN_SIZE); |
248 | ctx->rx_max = USB_CDC_NCM_NTB_MIN_IN_SIZE; | ||
249 | } | ||
250 | |||
251 | if (ctx->rx_max > CDC_NCM_NTB_MAX_SIZE_RX) { | ||
245 | pr_debug("Using default maximum receive length=%d\n", | 252 | pr_debug("Using default maximum receive length=%d\n", |
246 | CDC_NCM_NTB_MAX_SIZE_RX); | 253 | CDC_NCM_NTB_MAX_SIZE_RX); |
247 | ctx->rx_max = CDC_NCM_NTB_MAX_SIZE_RX; | 254 | ctx->rx_max = CDC_NCM_NTB_MAX_SIZE_RX; |
248 | } | 255 | } |
249 | 256 | ||
257 | /* inform device about NTB input size changes */ | ||
258 | if (ctx->rx_max != le32_to_cpu(ctx->ncm_parm.dwNtbInMaxSize)) { | ||
259 | req.bmRequestType = USB_TYPE_CLASS | USB_DIR_OUT | | ||
260 | USB_RECIP_INTERFACE; | ||
261 | req.bNotificationType = USB_CDC_SET_NTB_INPUT_SIZE; | ||
262 | req.wValue = 0; | ||
263 | req.wIndex = cpu_to_le16(iface_no); | ||
264 | |||
265 | if (flags & USB_CDC_NCM_NCAP_NTB_INPUT_SIZE) { | ||
266 | struct usb_cdc_ncm_ndp_input_size ndp_in_sz; | ||
267 | |||
268 | req.wLength = 8; | ||
269 | ndp_in_sz.dwNtbInMaxSize = cpu_to_le32(ctx->rx_max); | ||
270 | ndp_in_sz.wNtbInMaxDatagrams = | ||
271 | cpu_to_le16(CDC_NCM_DPT_DATAGRAMS_MAX); | ||
272 | ndp_in_sz.wReserved = 0; | ||
273 | err = cdc_ncm_do_request(ctx, &req, &ndp_in_sz, 0, NULL, | ||
274 | 1000); | ||
275 | } else { | ||
276 | __le32 dwNtbInMaxSize = cpu_to_le32(ctx->rx_max); | ||
277 | |||
278 | req.wLength = 4; | ||
279 | err = cdc_ncm_do_request(ctx, &req, &dwNtbInMaxSize, 0, | ||
280 | NULL, 1000); | ||
281 | } | ||
282 | |||
283 | if (err) | ||
284 | pr_debug("Setting NTB Input Size failed\n"); | ||
285 | } | ||
286 | |||
250 | /* verify maximum size of transmitted NTB in bytes */ | 287 | /* verify maximum size of transmitted NTB in bytes */ |
251 | if ((ctx->tx_max < | 288 | if ((ctx->tx_max < |
252 | (CDC_NCM_MIN_HDR_SIZE + CDC_NCM_MIN_DATAGRAM_SIZE)) || | 289 | (CDC_NCM_MIN_HDR_SIZE + CDC_NCM_MIN_DATAGRAM_SIZE)) || |
@@ -297,47 +334,84 @@ static u8 cdc_ncm_setup(struct cdc_ncm_ctx *ctx) | |||
297 | /* additional configuration */ | 334 | /* additional configuration */ |
298 | 335 | ||
299 | /* set CRC Mode */ | 336 | /* set CRC Mode */ |
300 | req.bmRequestType = USB_TYPE_CLASS | USB_DIR_OUT | USB_RECIP_INTERFACE; | 337 | if (flags & USB_CDC_NCM_NCAP_CRC_MODE) { |
301 | req.bNotificationType = USB_CDC_SET_CRC_MODE; | 338 | req.bmRequestType = USB_TYPE_CLASS | USB_DIR_OUT | |
302 | req.wValue = cpu_to_le16(USB_CDC_NCM_CRC_NOT_APPENDED); | 339 | USB_RECIP_INTERFACE; |
303 | req.wIndex = cpu_to_le16(iface_no); | 340 | req.bNotificationType = USB_CDC_SET_CRC_MODE; |
304 | req.wLength = 0; | 341 | req.wValue = cpu_to_le16(USB_CDC_NCM_CRC_NOT_APPENDED); |
305 | 342 | req.wIndex = cpu_to_le16(iface_no); | |
306 | err = cdc_ncm_do_request(ctx, &req, NULL, 0, NULL, 1000); | 343 | req.wLength = 0; |
307 | if (err) | 344 | |
308 | pr_debug("Setting CRC mode off failed\n"); | 345 | err = cdc_ncm_do_request(ctx, &req, NULL, 0, NULL, 1000); |
346 | if (err) | ||
347 | pr_debug("Setting CRC mode off failed\n"); | ||
348 | } | ||
309 | 349 | ||
310 | /* set NTB format */ | 350 | /* set NTB format, if both formats are supported */ |
311 | req.bmRequestType = USB_TYPE_CLASS | USB_DIR_OUT | USB_RECIP_INTERFACE; | 351 | if (ntb_fmt_supported & USB_CDC_NCM_NTH32_SIGN) { |
312 | req.bNotificationType = USB_CDC_SET_NTB_FORMAT; | 352 | req.bmRequestType = USB_TYPE_CLASS | USB_DIR_OUT | |
313 | req.wValue = cpu_to_le16(USB_CDC_NCM_NTB16_FORMAT); | 353 | USB_RECIP_INTERFACE; |
314 | req.wIndex = cpu_to_le16(iface_no); | 354 | req.bNotificationType = USB_CDC_SET_NTB_FORMAT; |
315 | req.wLength = 0; | 355 | req.wValue = cpu_to_le16(USB_CDC_NCM_NTB16_FORMAT); |
356 | req.wIndex = cpu_to_le16(iface_no); | ||
357 | req.wLength = 0; | ||
358 | |||
359 | err = cdc_ncm_do_request(ctx, &req, NULL, 0, NULL, 1000); | ||
360 | if (err) | ||
361 | pr_debug("Setting NTB format to 16-bit failed\n"); | ||
362 | } | ||
316 | 363 | ||
317 | err = cdc_ncm_do_request(ctx, &req, NULL, 0, NULL, 1000); | 364 | ctx->max_datagram_size = CDC_NCM_MIN_DATAGRAM_SIZE; |
318 | if (err) | ||
319 | pr_debug("Setting NTB format to 16-bit failed\n"); | ||
320 | 365 | ||
321 | /* set Max Datagram Size (MTU) */ | 366 | /* set Max Datagram Size (MTU) */ |
322 | req.bmRequestType = USB_TYPE_CLASS | USB_DIR_IN | USB_RECIP_INTERFACE; | 367 | if (flags & USB_CDC_NCM_NCAP_MAX_DATAGRAM_SIZE) { |
323 | req.bNotificationType = USB_CDC_GET_MAX_DATAGRAM_SIZE; | 368 | __le16 max_datagram_size; |
324 | req.wValue = 0; | 369 | u16 eth_max_sz = le16_to_cpu(ctx->ether_desc->wMaxSegmentSize); |
325 | req.wIndex = cpu_to_le16(iface_no); | 370 | |
326 | req.wLength = cpu_to_le16(2); | 371 | req.bmRequestType = USB_TYPE_CLASS | USB_DIR_IN | |
372 | USB_RECIP_INTERFACE; | ||
373 | req.bNotificationType = USB_CDC_GET_MAX_DATAGRAM_SIZE; | ||
374 | req.wValue = 0; | ||
375 | req.wIndex = cpu_to_le16(iface_no); | ||
376 | req.wLength = cpu_to_le16(2); | ||
377 | |||
378 | err = cdc_ncm_do_request(ctx, &req, &max_datagram_size, 0, NULL, | ||
379 | 1000); | ||
380 | if (err) { | ||
381 | pr_debug("GET_MAX_DATAGRAM_SIZE failed, use size=%u\n", | ||
382 | CDC_NCM_MIN_DATAGRAM_SIZE); | ||
383 | } else { | ||
384 | ctx->max_datagram_size = le16_to_cpu(max_datagram_size); | ||
385 | /* Check Eth descriptor value */ | ||
386 | if (eth_max_sz < CDC_NCM_MAX_DATAGRAM_SIZE) { | ||
387 | if (ctx->max_datagram_size > eth_max_sz) | ||
388 | ctx->max_datagram_size = eth_max_sz; | ||
389 | } else { | ||
390 | if (ctx->max_datagram_size > | ||
391 | CDC_NCM_MAX_DATAGRAM_SIZE) | ||
392 | ctx->max_datagram_size = | ||
393 | CDC_NCM_MAX_DATAGRAM_SIZE; | ||
394 | } | ||
327 | 395 | ||
328 | err = cdc_ncm_do_request(ctx, &req, &max_datagram_size, 0, NULL, 1000); | 396 | if (ctx->max_datagram_size < CDC_NCM_MIN_DATAGRAM_SIZE) |
329 | if (err) { | 397 | ctx->max_datagram_size = |
330 | pr_debug(" GET_MAX_DATAGRAM_SIZE failed, using size=%u\n", | 398 | CDC_NCM_MIN_DATAGRAM_SIZE; |
331 | CDC_NCM_MIN_DATAGRAM_SIZE); | 399 | |
332 | /* use default */ | 400 | /* if value changed, update device */ |
333 | ctx->max_datagram_size = CDC_NCM_MIN_DATAGRAM_SIZE; | 401 | req.bmRequestType = USB_TYPE_CLASS | USB_DIR_OUT | |
334 | } else { | 402 | USB_RECIP_INTERFACE; |
335 | ctx->max_datagram_size = le16_to_cpu(max_datagram_size); | 403 | req.bNotificationType = USB_CDC_SET_MAX_DATAGRAM_SIZE; |
404 | req.wValue = 0; | ||
405 | req.wIndex = cpu_to_le16(iface_no); | ||
406 | req.wLength = 2; | ||
407 | max_datagram_size = cpu_to_le16(ctx->max_datagram_size); | ||
408 | |||
409 | err = cdc_ncm_do_request(ctx, &req, &max_datagram_size, | ||
410 | 0, NULL, 1000); | ||
411 | if (err) | ||
412 | pr_debug("SET_MAX_DATAGRAM_SIZE failed\n"); | ||
413 | } | ||
336 | 414 | ||
337 | if (ctx->max_datagram_size < CDC_NCM_MIN_DATAGRAM_SIZE) | ||
338 | ctx->max_datagram_size = CDC_NCM_MIN_DATAGRAM_SIZE; | ||
339 | else if (ctx->max_datagram_size > CDC_NCM_MAX_DATAGRAM_SIZE) | ||
340 | ctx->max_datagram_size = CDC_NCM_MAX_DATAGRAM_SIZE; | ||
341 | } | 415 | } |
342 | 416 | ||
343 | if (ctx->netdev->mtu != (ctx->max_datagram_size - ETH_HLEN)) | 417 | if (ctx->netdev->mtu != (ctx->max_datagram_size - ETH_HLEN)) |
@@ -466,19 +540,13 @@ static int cdc_ncm_bind(struct usbnet *dev, struct usb_interface *intf) | |||
466 | 540 | ||
467 | ctx->ether_desc = | 541 | ctx->ether_desc = |
468 | (const struct usb_cdc_ether_desc *)buf; | 542 | (const struct usb_cdc_ether_desc *)buf; |
469 | |||
470 | dev->hard_mtu = | 543 | dev->hard_mtu = |
471 | le16_to_cpu(ctx->ether_desc->wMaxSegmentSize); | 544 | le16_to_cpu(ctx->ether_desc->wMaxSegmentSize); |
472 | 545 | ||
473 | if (dev->hard_mtu < | 546 | if (dev->hard_mtu < CDC_NCM_MIN_DATAGRAM_SIZE) |
474 | (CDC_NCM_MIN_DATAGRAM_SIZE - ETH_HLEN)) | 547 | dev->hard_mtu = CDC_NCM_MIN_DATAGRAM_SIZE; |
475 | dev->hard_mtu = | 548 | else if (dev->hard_mtu > CDC_NCM_MAX_DATAGRAM_SIZE) |
476 | CDC_NCM_MIN_DATAGRAM_SIZE - ETH_HLEN; | 549 | dev->hard_mtu = CDC_NCM_MAX_DATAGRAM_SIZE; |
477 | |||
478 | else if (dev->hard_mtu > | ||
479 | (CDC_NCM_MAX_DATAGRAM_SIZE - ETH_HLEN)) | ||
480 | dev->hard_mtu = | ||
481 | CDC_NCM_MAX_DATAGRAM_SIZE - ETH_HLEN; | ||
482 | break; | 550 | break; |
483 | 551 | ||
484 | case USB_CDC_NCM_TYPE: | 552 | case USB_CDC_NCM_TYPE: |
@@ -628,13 +696,13 @@ cdc_ncm_fill_tx_frame(struct cdc_ncm_ctx *ctx, struct sk_buff *skb) | |||
628 | u32 offset; | 696 | u32 offset; |
629 | u32 last_offset; | 697 | u32 last_offset; |
630 | u16 n = 0; | 698 | u16 n = 0; |
631 | u8 timeout = 0; | 699 | u8 ready2send = 0; |
632 | 700 | ||
633 | /* if there is a remaining skb, it gets priority */ | 701 | /* if there is a remaining skb, it gets priority */ |
634 | if (skb != NULL) | 702 | if (skb != NULL) |
635 | swap(skb, ctx->tx_rem_skb); | 703 | swap(skb, ctx->tx_rem_skb); |
636 | else | 704 | else |
637 | timeout = 1; | 705 | ready2send = 1; |
638 | 706 | ||
639 | /* | 707 | /* |
640 | * +----------------+ | 708 | * +----------------+ |
@@ -682,9 +750,10 @@ cdc_ncm_fill_tx_frame(struct cdc_ncm_ctx *ctx, struct sk_buff *skb) | |||
682 | 750 | ||
683 | for (; n < ctx->tx_max_datagrams; n++) { | 751 | for (; n < ctx->tx_max_datagrams; n++) { |
684 | /* check if end of transmit buffer is reached */ | 752 | /* check if end of transmit buffer is reached */ |
685 | if (offset >= ctx->tx_max) | 753 | if (offset >= ctx->tx_max) { |
754 | ready2send = 1; | ||
686 | break; | 755 | break; |
687 | 756 | } | |
688 | /* compute maximum buffer size */ | 757 | /* compute maximum buffer size */ |
689 | rem = ctx->tx_max - offset; | 758 | rem = ctx->tx_max - offset; |
690 | 759 | ||
@@ -711,9 +780,7 @@ cdc_ncm_fill_tx_frame(struct cdc_ncm_ctx *ctx, struct sk_buff *skb) | |||
711 | } | 780 | } |
712 | ctx->tx_rem_skb = skb; | 781 | ctx->tx_rem_skb = skb; |
713 | skb = NULL; | 782 | skb = NULL; |
714 | 783 | ready2send = 1; | |
715 | /* loop one more time */ | ||
716 | timeout = 1; | ||
717 | } | 784 | } |
718 | break; | 785 | break; |
719 | } | 786 | } |
@@ -756,7 +823,7 @@ cdc_ncm_fill_tx_frame(struct cdc_ncm_ctx *ctx, struct sk_buff *skb) | |||
756 | ctx->tx_curr_last_offset = last_offset; | 823 | ctx->tx_curr_last_offset = last_offset; |
757 | goto exit_no_skb; | 824 | goto exit_no_skb; |
758 | 825 | ||
759 | } else if ((n < ctx->tx_max_datagrams) && (timeout == 0)) { | 826 | } else if ((n < ctx->tx_max_datagrams) && (ready2send == 0)) { |
760 | /* wait for more frames */ | 827 | /* wait for more frames */ |
761 | /* push variables */ | 828 | /* push variables */ |
762 | ctx->tx_curr_skb = skb_out; | 829 | ctx->tx_curr_skb = skb_out; |
@@ -813,7 +880,7 @@ cdc_ncm_fill_tx_frame(struct cdc_ncm_ctx *ctx, struct sk_buff *skb) | |||
813 | cpu_to_le16(sizeof(ctx->tx_ncm.nth16)); | 880 | cpu_to_le16(sizeof(ctx->tx_ncm.nth16)); |
814 | ctx->tx_ncm.nth16.wSequence = cpu_to_le16(ctx->tx_seq); | 881 | ctx->tx_ncm.nth16.wSequence = cpu_to_le16(ctx->tx_seq); |
815 | ctx->tx_ncm.nth16.wBlockLength = cpu_to_le16(last_offset); | 882 | ctx->tx_ncm.nth16.wBlockLength = cpu_to_le16(last_offset); |
816 | ctx->tx_ncm.nth16.wFpIndex = ALIGN(sizeof(struct usb_cdc_ncm_nth16), | 883 | ctx->tx_ncm.nth16.wNdpIndex = ALIGN(sizeof(struct usb_cdc_ncm_nth16), |
817 | ctx->tx_ndp_modulus); | 884 | ctx->tx_ndp_modulus); |
818 | 885 | ||
819 | memcpy(skb_out->data, &(ctx->tx_ncm.nth16), sizeof(ctx->tx_ncm.nth16)); | 886 | memcpy(skb_out->data, &(ctx->tx_ncm.nth16), sizeof(ctx->tx_ncm.nth16)); |
@@ -825,13 +892,13 @@ cdc_ncm_fill_tx_frame(struct cdc_ncm_ctx *ctx, struct sk_buff *skb) | |||
825 | rem = sizeof(ctx->tx_ncm.ndp16) + ((ctx->tx_curr_frame_num + 1) * | 892 | rem = sizeof(ctx->tx_ncm.ndp16) + ((ctx->tx_curr_frame_num + 1) * |
826 | sizeof(struct usb_cdc_ncm_dpe16)); | 893 | sizeof(struct usb_cdc_ncm_dpe16)); |
827 | ctx->tx_ncm.ndp16.wLength = cpu_to_le16(rem); | 894 | ctx->tx_ncm.ndp16.wLength = cpu_to_le16(rem); |
828 | ctx->tx_ncm.ndp16.wNextFpIndex = 0; /* reserved */ | 895 | ctx->tx_ncm.ndp16.wNextNdpIndex = 0; /* reserved */ |
829 | 896 | ||
830 | memcpy(((u8 *)skb_out->data) + ctx->tx_ncm.nth16.wFpIndex, | 897 | memcpy(((u8 *)skb_out->data) + ctx->tx_ncm.nth16.wNdpIndex, |
831 | &(ctx->tx_ncm.ndp16), | 898 | &(ctx->tx_ncm.ndp16), |
832 | sizeof(ctx->tx_ncm.ndp16)); | 899 | sizeof(ctx->tx_ncm.ndp16)); |
833 | 900 | ||
834 | memcpy(((u8 *)skb_out->data) + ctx->tx_ncm.nth16.wFpIndex + | 901 | memcpy(((u8 *)skb_out->data) + ctx->tx_ncm.nth16.wNdpIndex + |
835 | sizeof(ctx->tx_ncm.ndp16), | 902 | sizeof(ctx->tx_ncm.ndp16), |
836 | &(ctx->tx_ncm.dpe16), | 903 | &(ctx->tx_ncm.dpe16), |
837 | (ctx->tx_curr_frame_num + 1) * | 904 | (ctx->tx_curr_frame_num + 1) * |
@@ -868,15 +935,19 @@ static void cdc_ncm_tx_timeout(unsigned long arg) | |||
868 | if (ctx->tx_timer_pending != 0) { | 935 | if (ctx->tx_timer_pending != 0) { |
869 | ctx->tx_timer_pending--; | 936 | ctx->tx_timer_pending--; |
870 | restart = 1; | 937 | restart = 1; |
871 | } else | 938 | } else { |
872 | restart = 0; | 939 | restart = 0; |
940 | } | ||
873 | 941 | ||
874 | spin_unlock(&ctx->mtx); | 942 | spin_unlock(&ctx->mtx); |
875 | 943 | ||
876 | if (restart) | 944 | if (restart) { |
945 | spin_lock(&ctx->mtx); | ||
877 | cdc_ncm_tx_timeout_start(ctx); | 946 | cdc_ncm_tx_timeout_start(ctx); |
878 | else if (ctx->netdev != NULL) | 947 | spin_unlock(&ctx->mtx); |
948 | } else if (ctx->netdev != NULL) { | ||
879 | usbnet_start_xmit(NULL, ctx->netdev); | 949 | usbnet_start_xmit(NULL, ctx->netdev); |
950 | } | ||
880 | } | 951 | } |
881 | 952 | ||
882 | static struct sk_buff * | 953 | static struct sk_buff * |
@@ -900,7 +971,6 @@ cdc_ncm_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags) | |||
900 | skb_out = cdc_ncm_fill_tx_frame(ctx, skb); | 971 | skb_out = cdc_ncm_fill_tx_frame(ctx, skb); |
901 | if (ctx->tx_curr_skb != NULL) | 972 | if (ctx->tx_curr_skb != NULL) |
902 | need_timer = 1; | 973 | need_timer = 1; |
903 | spin_unlock(&ctx->mtx); | ||
904 | 974 | ||
905 | /* Start timer, if there is a remaining skb */ | 975 | /* Start timer, if there is a remaining skb */ |
906 | if (need_timer) | 976 | if (need_timer) |
@@ -908,6 +978,8 @@ cdc_ncm_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags) | |||
908 | 978 | ||
909 | if (skb_out) | 979 | if (skb_out) |
910 | dev->net->stats.tx_packets += ctx->tx_curr_frame_num; | 980 | dev->net->stats.tx_packets += ctx->tx_curr_frame_num; |
981 | |||
982 | spin_unlock(&ctx->mtx); | ||
911 | return skb_out; | 983 | return skb_out; |
912 | 984 | ||
913 | error: | 985 | error: |
@@ -956,7 +1028,7 @@ static int cdc_ncm_rx_fixup(struct usbnet *dev, struct sk_buff *skb_in) | |||
956 | goto error; | 1028 | goto error; |
957 | } | 1029 | } |
958 | 1030 | ||
959 | temp = le16_to_cpu(ctx->rx_ncm.nth16.wFpIndex); | 1031 | temp = le16_to_cpu(ctx->rx_ncm.nth16.wNdpIndex); |
960 | if ((temp + sizeof(ctx->rx_ncm.ndp16)) > actlen) { | 1032 | if ((temp + sizeof(ctx->rx_ncm.ndp16)) > actlen) { |
961 | pr_debug("invalid DPT16 index\n"); | 1033 | pr_debug("invalid DPT16 index\n"); |
962 | goto error; | 1034 | goto error; |
@@ -1020,14 +1092,16 @@ static int cdc_ncm_rx_fixup(struct usbnet *dev, struct sk_buff *skb_in) | |||
1020 | if (((offset + temp) > actlen) || | 1092 | if (((offset + temp) > actlen) || |
1021 | (temp > CDC_NCM_MAX_DATAGRAM_SIZE) || (temp < ETH_HLEN)) { | 1093 | (temp > CDC_NCM_MAX_DATAGRAM_SIZE) || (temp < ETH_HLEN)) { |
1022 | pr_debug("invalid frame detected (ignored)" | 1094 | pr_debug("invalid frame detected (ignored)" |
1023 | "offset[%u]=%u, length=%u, skb=%p\n", | 1095 | "offset[%u]=%u, length=%u, skb=%p\n", |
1024 | x, offset, temp, skb); | 1096 | x, offset, temp, skb_in); |
1025 | if (!x) | 1097 | if (!x) |
1026 | goto error; | 1098 | goto error; |
1027 | break; | 1099 | break; |
1028 | 1100 | ||
1029 | } else { | 1101 | } else { |
1030 | skb = skb_clone(skb_in, GFP_ATOMIC); | 1102 | skb = skb_clone(skb_in, GFP_ATOMIC); |
1103 | if (!skb) | ||
1104 | goto error; | ||
1031 | skb->len = temp; | 1105 | skb->len = temp; |
1032 | skb->data = ((u8 *)skb_in->data) + offset; | 1106 | skb->data = ((u8 *)skb_in->data) + offset; |
1033 | skb_set_tail_pointer(skb, temp); | 1107 | skb_set_tail_pointer(skb, temp); |
@@ -1041,10 +1115,10 @@ error: | |||
1041 | 1115 | ||
1042 | static void | 1116 | static void |
1043 | cdc_ncm_speed_change(struct cdc_ncm_ctx *ctx, | 1117 | cdc_ncm_speed_change(struct cdc_ncm_ctx *ctx, |
1044 | struct connection_speed_change *data) | 1118 | struct usb_cdc_speed_change *data) |
1045 | { | 1119 | { |
1046 | uint32_t rx_speed = le32_to_cpu(data->USBitRate); | 1120 | uint32_t rx_speed = le32_to_cpu(data->DLBitRRate); |
1047 | uint32_t tx_speed = le32_to_cpu(data->DSBitRate); | 1121 | uint32_t tx_speed = le32_to_cpu(data->ULBitRate); |
1048 | 1122 | ||
1049 | /* | 1123 | /* |
1050 | * Currently the USB-NET API does not support reporting the actual | 1124 | * Currently the USB-NET API does not support reporting the actual |
@@ -1085,7 +1159,7 @@ static void cdc_ncm_status(struct usbnet *dev, struct urb *urb) | |||
1085 | /* test for split data in 8-byte chunks */ | 1159 | /* test for split data in 8-byte chunks */ |
1086 | if (test_and_clear_bit(EVENT_STS_SPLIT, &dev->flags)) { | 1160 | if (test_and_clear_bit(EVENT_STS_SPLIT, &dev->flags)) { |
1087 | cdc_ncm_speed_change(ctx, | 1161 | cdc_ncm_speed_change(ctx, |
1088 | (struct connection_speed_change *)urb->transfer_buffer); | 1162 | (struct usb_cdc_speed_change *)urb->transfer_buffer); |
1089 | return; | 1163 | return; |
1090 | } | 1164 | } |
1091 | 1165 | ||
@@ -1113,12 +1187,12 @@ static void cdc_ncm_status(struct usbnet *dev, struct urb *urb) | |||
1113 | break; | 1187 | break; |
1114 | 1188 | ||
1115 | case USB_CDC_NOTIFY_SPEED_CHANGE: | 1189 | case USB_CDC_NOTIFY_SPEED_CHANGE: |
1116 | if (urb->actual_length < | 1190 | if (urb->actual_length < (sizeof(*event) + |
1117 | (sizeof(*event) + sizeof(struct connection_speed_change))) | 1191 | sizeof(struct usb_cdc_speed_change))) |
1118 | set_bit(EVENT_STS_SPLIT, &dev->flags); | 1192 | set_bit(EVENT_STS_SPLIT, &dev->flags); |
1119 | else | 1193 | else |
1120 | cdc_ncm_speed_change(ctx, | 1194 | cdc_ncm_speed_change(ctx, |
1121 | (struct connection_speed_change *) &event[1]); | 1195 | (struct usb_cdc_speed_change *) &event[1]); |
1122 | break; | 1196 | break; |
1123 | 1197 | ||
1124 | default: | 1198 | default: |
diff --git a/drivers/net/usb/dm9601.c b/drivers/net/usb/dm9601.c index 02b622e3b9fb..5002f5be47be 100644 --- a/drivers/net/usb/dm9601.c +++ b/drivers/net/usb/dm9601.c | |||
@@ -651,6 +651,10 @@ static const struct usb_device_id products[] = { | |||
651 | .driver_info = (unsigned long)&dm9601_info, | 651 | .driver_info = (unsigned long)&dm9601_info, |
652 | }, | 652 | }, |
653 | { | 653 | { |
654 | USB_DEVICE(0x0fe6, 0x9700), /* DM9601 USB to Fast Ethernet Adapter */ | ||
655 | .driver_info = (unsigned long)&dm9601_info, | ||
656 | }, | ||
657 | { | ||
654 | USB_DEVICE(0x0a46, 0x9000), /* DM9000E */ | 658 | USB_DEVICE(0x0a46, 0x9000), /* DM9000E */ |
655 | .driver_info = (unsigned long)&dm9601_info, | 659 | .driver_info = (unsigned long)&dm9601_info, |
656 | }, | 660 | }, |
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c index bed8fcedff49..6d83812603b6 100644 --- a/drivers/net/usb/hso.c +++ b/drivers/net/usb/hso.c | |||
@@ -2628,15 +2628,15 @@ exit: | |||
2628 | 2628 | ||
2629 | static void hso_free_tiomget(struct hso_serial *serial) | 2629 | static void hso_free_tiomget(struct hso_serial *serial) |
2630 | { | 2630 | { |
2631 | struct hso_tiocmget *tiocmget = serial->tiocmget; | 2631 | struct hso_tiocmget *tiocmget; |
2632 | if (!serial) | ||
2633 | return; | ||
2634 | tiocmget = serial->tiocmget; | ||
2632 | if (tiocmget) { | 2635 | if (tiocmget) { |
2633 | if (tiocmget->urb) { | 2636 | usb_free_urb(tiocmget->urb); |
2634 | usb_free_urb(tiocmget->urb); | 2637 | tiocmget->urb = NULL; |
2635 | tiocmget->urb = NULL; | ||
2636 | } | ||
2637 | serial->tiocmget = NULL; | 2638 | serial->tiocmget = NULL; |
2638 | kfree(tiocmget); | 2639 | kfree(tiocmget); |
2639 | |||
2640 | } | 2640 | } |
2641 | } | 2641 | } |
2642 | 2642 | ||
diff --git a/drivers/net/usb/kaweth.c b/drivers/net/usb/kaweth.c index 5e98643a4a21..7dc84971f26f 100644 --- a/drivers/net/usb/kaweth.c +++ b/drivers/net/usb/kaweth.c | |||
@@ -406,6 +406,7 @@ static int kaweth_download_firmware(struct kaweth_device *kaweth, | |||
406 | 406 | ||
407 | if (fw->size > KAWETH_FIRMWARE_BUF_SIZE) { | 407 | if (fw->size > KAWETH_FIRMWARE_BUF_SIZE) { |
408 | err("Firmware too big: %zu", fw->size); | 408 | err("Firmware too big: %zu", fw->size); |
409 | release_firmware(fw); | ||
409 | return -ENOSPC; | 410 | return -ENOSPC; |
410 | } | 411 | } |
411 | data_len = fw->size; | 412 | data_len = fw->size; |
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c index ed9a41643ff4..95c41d56631c 100644 --- a/drivers/net/usb/usbnet.c +++ b/drivers/net/usb/usbnet.c | |||
@@ -931,8 +931,10 @@ fail_halt: | |||
931 | if (urb != NULL) { | 931 | if (urb != NULL) { |
932 | clear_bit (EVENT_RX_MEMORY, &dev->flags); | 932 | clear_bit (EVENT_RX_MEMORY, &dev->flags); |
933 | status = usb_autopm_get_interface(dev->intf); | 933 | status = usb_autopm_get_interface(dev->intf); |
934 | if (status < 0) | 934 | if (status < 0) { |
935 | usb_free_urb(urb); | ||
935 | goto fail_lowmem; | 936 | goto fail_lowmem; |
937 | } | ||
936 | if (rx_submit (dev, urb, GFP_KERNEL) == -ENOLINK) | 938 | if (rx_submit (dev, urb, GFP_KERNEL) == -ENOLINK) |
937 | resched = 0; | 939 | resched = 0; |
938 | usb_autopm_put_interface(dev->intf); | 940 | usb_autopm_put_interface(dev->intf); |
diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c index cab96ad49e60..09cac704fdd7 100644 --- a/drivers/net/via-velocity.c +++ b/drivers/net/via-velocity.c | |||
@@ -898,7 +898,7 @@ static int velocity_set_media_mode(struct velocity_info *vptr, u32 mii_status) | |||
898 | set_mii_flow_control(vptr); | 898 | set_mii_flow_control(vptr); |
899 | 899 | ||
900 | /* | 900 | /* |
901 | Check if new status is consisent with current status | 901 | Check if new status is consistent with current status |
902 | if (((mii_status & curr_status) & VELOCITY_AUTONEG_ENABLE) || | 902 | if (((mii_status & curr_status) & VELOCITY_AUTONEG_ENABLE) || |
903 | (mii_status==curr_status)) { | 903 | (mii_status==curr_status)) { |
904 | vptr->mii_status=mii_check_media_mode(vptr->mac_regs); | 904 | vptr->mii_status=mii_check_media_mode(vptr->mac_regs); |
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 90a23e410d1b..82dba5aaf423 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c | |||
@@ -446,6 +446,20 @@ static void skb_recv_done(struct virtqueue *rvq) | |||
446 | } | 446 | } |
447 | } | 447 | } |
448 | 448 | ||
449 | static void virtnet_napi_enable(struct virtnet_info *vi) | ||
450 | { | ||
451 | napi_enable(&vi->napi); | ||
452 | |||
453 | /* If all buffers were filled by other side before we napi_enabled, we | ||
454 | * won't get another interrupt, so process any outstanding packets | ||
455 | * now. virtnet_poll wants re-enable the queue, so we disable here. | ||
456 | * We synchronize against interrupts via NAPI_STATE_SCHED */ | ||
457 | if (napi_schedule_prep(&vi->napi)) { | ||
458 | virtqueue_disable_cb(vi->rvq); | ||
459 | __napi_schedule(&vi->napi); | ||
460 | } | ||
461 | } | ||
462 | |||
449 | static void refill_work(struct work_struct *work) | 463 | static void refill_work(struct work_struct *work) |
450 | { | 464 | { |
451 | struct virtnet_info *vi; | 465 | struct virtnet_info *vi; |
@@ -454,7 +468,7 @@ static void refill_work(struct work_struct *work) | |||
454 | vi = container_of(work, struct virtnet_info, refill.work); | 468 | vi = container_of(work, struct virtnet_info, refill.work); |
455 | napi_disable(&vi->napi); | 469 | napi_disable(&vi->napi); |
456 | still_empty = !try_fill_recv(vi, GFP_KERNEL); | 470 | still_empty = !try_fill_recv(vi, GFP_KERNEL); |
457 | napi_enable(&vi->napi); | 471 | virtnet_napi_enable(vi); |
458 | 472 | ||
459 | /* In theory, this can happen: if we don't get any buffers in | 473 | /* In theory, this can happen: if we don't get any buffers in |
460 | * we will *never* try to fill again. */ | 474 | * we will *never* try to fill again. */ |
@@ -638,16 +652,7 @@ static int virtnet_open(struct net_device *dev) | |||
638 | { | 652 | { |
639 | struct virtnet_info *vi = netdev_priv(dev); | 653 | struct virtnet_info *vi = netdev_priv(dev); |
640 | 654 | ||
641 | napi_enable(&vi->napi); | 655 | virtnet_napi_enable(vi); |
642 | |||
643 | /* If all buffers were filled by other side before we napi_enabled, we | ||
644 | * won't get another interrupt, so process any outstanding packets | ||
645 | * now. virtnet_poll wants re-enable the queue, so we disable here. | ||
646 | * We synchronize against interrupts via NAPI_STATE_SCHED */ | ||
647 | if (napi_schedule_prep(&vi->napi)) { | ||
648 | virtqueue_disable_cb(vi->rvq); | ||
649 | __napi_schedule(&vi->napi); | ||
650 | } | ||
651 | return 0; | 656 | return 0; |
652 | } | 657 | } |
653 | 658 | ||
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c index d143e8b72b5b..cc14b4a75048 100644 --- a/drivers/net/vmxnet3/vmxnet3_drv.c +++ b/drivers/net/vmxnet3/vmxnet3_drv.c | |||
@@ -48,6 +48,9 @@ static atomic_t devices_found; | |||
48 | static int enable_mq = 1; | 48 | static int enable_mq = 1; |
49 | static int irq_share_mode; | 49 | static int irq_share_mode; |
50 | 50 | ||
51 | static void | ||
52 | vmxnet3_write_mac_addr(struct vmxnet3_adapter *adapter, u8 *mac); | ||
53 | |||
51 | /* | 54 | /* |
52 | * Enable/Disable the given intr | 55 | * Enable/Disable the given intr |
53 | */ | 56 | */ |
@@ -139,9 +142,13 @@ vmxnet3_check_link(struct vmxnet3_adapter *adapter, bool affectTxQueue) | |||
139 | { | 142 | { |
140 | u32 ret; | 143 | u32 ret; |
141 | int i; | 144 | int i; |
145 | unsigned long flags; | ||
142 | 146 | ||
147 | spin_lock_irqsave(&adapter->cmd_lock, flags); | ||
143 | VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_LINK); | 148 | VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_LINK); |
144 | ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD); | 149 | ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD); |
150 | spin_unlock_irqrestore(&adapter->cmd_lock, flags); | ||
151 | |||
145 | adapter->link_speed = ret >> 16; | 152 | adapter->link_speed = ret >> 16; |
146 | if (ret & 1) { /* Link is up. */ | 153 | if (ret & 1) { /* Link is up. */ |
147 | printk(KERN_INFO "%s: NIC Link is Up %d Mbps\n", | 154 | printk(KERN_INFO "%s: NIC Link is Up %d Mbps\n", |
@@ -183,8 +190,10 @@ vmxnet3_process_events(struct vmxnet3_adapter *adapter) | |||
183 | 190 | ||
184 | /* Check if there is an error on xmit/recv queues */ | 191 | /* Check if there is an error on xmit/recv queues */ |
185 | if (events & (VMXNET3_ECR_TQERR | VMXNET3_ECR_RQERR)) { | 192 | if (events & (VMXNET3_ECR_TQERR | VMXNET3_ECR_RQERR)) { |
193 | spin_lock(&adapter->cmd_lock); | ||
186 | VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, | 194 | VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, |
187 | VMXNET3_CMD_GET_QUEUE_STATUS); | 195 | VMXNET3_CMD_GET_QUEUE_STATUS); |
196 | spin_unlock(&adapter->cmd_lock); | ||
188 | 197 | ||
189 | for (i = 0; i < adapter->num_tx_queues; i++) | 198 | for (i = 0; i < adapter->num_tx_queues; i++) |
190 | if (adapter->tqd_start[i].status.stopped) | 199 | if (adapter->tqd_start[i].status.stopped) |
@@ -804,30 +813,25 @@ vmxnet3_parse_and_copy_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq, | |||
804 | skb_transport_header(skb))->doff * 4; | 813 | skb_transport_header(skb))->doff * 4; |
805 | ctx->copy_size = ctx->eth_ip_hdr_size + ctx->l4_hdr_size; | 814 | ctx->copy_size = ctx->eth_ip_hdr_size + ctx->l4_hdr_size; |
806 | } else { | 815 | } else { |
807 | unsigned int pull_size; | ||
808 | |||
809 | if (skb->ip_summed == CHECKSUM_PARTIAL) { | 816 | if (skb->ip_summed == CHECKSUM_PARTIAL) { |
810 | ctx->eth_ip_hdr_size = skb_checksum_start_offset(skb); | 817 | ctx->eth_ip_hdr_size = skb_checksum_start_offset(skb); |
811 | 818 | ||
812 | if (ctx->ipv4) { | 819 | if (ctx->ipv4) { |
813 | struct iphdr *iph = (struct iphdr *) | 820 | struct iphdr *iph = (struct iphdr *) |
814 | skb_network_header(skb); | 821 | skb_network_header(skb); |
815 | if (iph->protocol == IPPROTO_TCP) { | 822 | if (iph->protocol == IPPROTO_TCP) |
816 | pull_size = ctx->eth_ip_hdr_size + | ||
817 | sizeof(struct tcphdr); | ||
818 | |||
819 | if (unlikely(!pskb_may_pull(skb, | ||
820 | pull_size))) { | ||
821 | goto err; | ||
822 | } | ||
823 | ctx->l4_hdr_size = ((struct tcphdr *) | 823 | ctx->l4_hdr_size = ((struct tcphdr *) |
824 | skb_transport_header(skb))->doff * 4; | 824 | skb_transport_header(skb))->doff * 4; |
825 | } else if (iph->protocol == IPPROTO_UDP) { | 825 | else if (iph->protocol == IPPROTO_UDP) |
826 | /* | ||
827 | * Use tcp header size so that bytes to | ||
828 | * be copied are more than required by | ||
829 | * the device. | ||
830 | */ | ||
826 | ctx->l4_hdr_size = | 831 | ctx->l4_hdr_size = |
827 | sizeof(struct udphdr); | 832 | sizeof(struct tcphdr); |
828 | } else { | 833 | else |
829 | ctx->l4_hdr_size = 0; | 834 | ctx->l4_hdr_size = 0; |
830 | } | ||
831 | } else { | 835 | } else { |
832 | /* for simplicity, don't copy L4 headers */ | 836 | /* for simplicity, don't copy L4 headers */ |
833 | ctx->l4_hdr_size = 0; | 837 | ctx->l4_hdr_size = 0; |
@@ -1859,18 +1863,14 @@ vmxnet3_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp) | |||
1859 | struct vmxnet3_adapter *adapter = netdev_priv(netdev); | 1863 | struct vmxnet3_adapter *adapter = netdev_priv(netdev); |
1860 | struct Vmxnet3_DriverShared *shared = adapter->shared; | 1864 | struct Vmxnet3_DriverShared *shared = adapter->shared; |
1861 | u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable; | 1865 | u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable; |
1866 | unsigned long flags; | ||
1862 | 1867 | ||
1863 | if (grp) { | 1868 | if (grp) { |
1864 | /* add vlan rx stripping. */ | 1869 | /* add vlan rx stripping. */ |
1865 | if (adapter->netdev->features & NETIF_F_HW_VLAN_RX) { | 1870 | if (adapter->netdev->features & NETIF_F_HW_VLAN_RX) { |
1866 | int i; | 1871 | int i; |
1867 | struct Vmxnet3_DSDevRead *devRead = &shared->devRead; | ||
1868 | adapter->vlan_grp = grp; | 1872 | adapter->vlan_grp = grp; |
1869 | 1873 | ||
1870 | /* update FEATURES to device */ | ||
1871 | devRead->misc.uptFeatures |= UPT1_F_RXVLAN; | ||
1872 | VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, | ||
1873 | VMXNET3_CMD_UPDATE_FEATURE); | ||
1874 | /* | 1874 | /* |
1875 | * Clear entire vfTable; then enable untagged pkts. | 1875 | * Clear entire vfTable; then enable untagged pkts. |
1876 | * Note: setting one entry in vfTable to non-zero turns | 1876 | * Note: setting one entry in vfTable to non-zero turns |
@@ -1880,8 +1880,10 @@ vmxnet3_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp) | |||
1880 | vfTable[i] = 0; | 1880 | vfTable[i] = 0; |
1881 | 1881 | ||
1882 | VMXNET3_SET_VFTABLE_ENTRY(vfTable, 0); | 1882 | VMXNET3_SET_VFTABLE_ENTRY(vfTable, 0); |
1883 | spin_lock_irqsave(&adapter->cmd_lock, flags); | ||
1883 | VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, | 1884 | VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, |
1884 | VMXNET3_CMD_UPDATE_VLAN_FILTERS); | 1885 | VMXNET3_CMD_UPDATE_VLAN_FILTERS); |
1886 | spin_unlock_irqrestore(&adapter->cmd_lock, flags); | ||
1885 | } else { | 1887 | } else { |
1886 | printk(KERN_ERR "%s: vlan_rx_register when device has " | 1888 | printk(KERN_ERR "%s: vlan_rx_register when device has " |
1887 | "no NETIF_F_HW_VLAN_RX\n", netdev->name); | 1889 | "no NETIF_F_HW_VLAN_RX\n", netdev->name); |
@@ -1900,13 +1902,10 @@ vmxnet3_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp) | |||
1900 | */ | 1902 | */ |
1901 | vfTable[i] = 0; | 1903 | vfTable[i] = 0; |
1902 | } | 1904 | } |
1905 | spin_lock_irqsave(&adapter->cmd_lock, flags); | ||
1903 | VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, | 1906 | VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, |
1904 | VMXNET3_CMD_UPDATE_VLAN_FILTERS); | 1907 | VMXNET3_CMD_UPDATE_VLAN_FILTERS); |
1905 | 1908 | spin_unlock_irqrestore(&adapter->cmd_lock, flags); | |
1906 | /* update FEATURES to device */ | ||
1907 | devRead->misc.uptFeatures &= ~UPT1_F_RXVLAN; | ||
1908 | VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, | ||
1909 | VMXNET3_CMD_UPDATE_FEATURE); | ||
1910 | } | 1909 | } |
1911 | } | 1910 | } |
1912 | } | 1911 | } |
@@ -1939,10 +1938,13 @@ vmxnet3_vlan_rx_add_vid(struct net_device *netdev, u16 vid) | |||
1939 | { | 1938 | { |
1940 | struct vmxnet3_adapter *adapter = netdev_priv(netdev); | 1939 | struct vmxnet3_adapter *adapter = netdev_priv(netdev); |
1941 | u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable; | 1940 | u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable; |
1941 | unsigned long flags; | ||
1942 | 1942 | ||
1943 | VMXNET3_SET_VFTABLE_ENTRY(vfTable, vid); | 1943 | VMXNET3_SET_VFTABLE_ENTRY(vfTable, vid); |
1944 | spin_lock_irqsave(&adapter->cmd_lock, flags); | ||
1944 | VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, | 1945 | VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, |
1945 | VMXNET3_CMD_UPDATE_VLAN_FILTERS); | 1946 | VMXNET3_CMD_UPDATE_VLAN_FILTERS); |
1947 | spin_unlock_irqrestore(&adapter->cmd_lock, flags); | ||
1946 | } | 1948 | } |
1947 | 1949 | ||
1948 | 1950 | ||
@@ -1951,10 +1953,13 @@ vmxnet3_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) | |||
1951 | { | 1953 | { |
1952 | struct vmxnet3_adapter *adapter = netdev_priv(netdev); | 1954 | struct vmxnet3_adapter *adapter = netdev_priv(netdev); |
1953 | u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable; | 1955 | u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable; |
1956 | unsigned long flags; | ||
1954 | 1957 | ||
1955 | VMXNET3_CLEAR_VFTABLE_ENTRY(vfTable, vid); | 1958 | VMXNET3_CLEAR_VFTABLE_ENTRY(vfTable, vid); |
1959 | spin_lock_irqsave(&adapter->cmd_lock, flags); | ||
1956 | VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, | 1960 | VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, |
1957 | VMXNET3_CMD_UPDATE_VLAN_FILTERS); | 1961 | VMXNET3_CMD_UPDATE_VLAN_FILTERS); |
1962 | spin_unlock_irqrestore(&adapter->cmd_lock, flags); | ||
1958 | } | 1963 | } |
1959 | 1964 | ||
1960 | 1965 | ||
@@ -1985,6 +1990,7 @@ static void | |||
1985 | vmxnet3_set_mc(struct net_device *netdev) | 1990 | vmxnet3_set_mc(struct net_device *netdev) |
1986 | { | 1991 | { |
1987 | struct vmxnet3_adapter *adapter = netdev_priv(netdev); | 1992 | struct vmxnet3_adapter *adapter = netdev_priv(netdev); |
1993 | unsigned long flags; | ||
1988 | struct Vmxnet3_RxFilterConf *rxConf = | 1994 | struct Vmxnet3_RxFilterConf *rxConf = |
1989 | &adapter->shared->devRead.rxFilterConf; | 1995 | &adapter->shared->devRead.rxFilterConf; |
1990 | u8 *new_table = NULL; | 1996 | u8 *new_table = NULL; |
@@ -2020,6 +2026,7 @@ vmxnet3_set_mc(struct net_device *netdev) | |||
2020 | rxConf->mfTablePA = 0; | 2026 | rxConf->mfTablePA = 0; |
2021 | } | 2027 | } |
2022 | 2028 | ||
2029 | spin_lock_irqsave(&adapter->cmd_lock, flags); | ||
2023 | if (new_mode != rxConf->rxMode) { | 2030 | if (new_mode != rxConf->rxMode) { |
2024 | rxConf->rxMode = cpu_to_le32(new_mode); | 2031 | rxConf->rxMode = cpu_to_le32(new_mode); |
2025 | VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, | 2032 | VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, |
@@ -2028,6 +2035,7 @@ vmxnet3_set_mc(struct net_device *netdev) | |||
2028 | 2035 | ||
2029 | VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, | 2036 | VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, |
2030 | VMXNET3_CMD_UPDATE_MAC_FILTERS); | 2037 | VMXNET3_CMD_UPDATE_MAC_FILTERS); |
2038 | spin_unlock_irqrestore(&adapter->cmd_lock, flags); | ||
2031 | 2039 | ||
2032 | kfree(new_table); | 2040 | kfree(new_table); |
2033 | } | 2041 | } |
@@ -2080,10 +2088,8 @@ vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter) | |||
2080 | devRead->misc.uptFeatures |= UPT1_F_LRO; | 2088 | devRead->misc.uptFeatures |= UPT1_F_LRO; |
2081 | devRead->misc.maxNumRxSG = cpu_to_le16(1 + MAX_SKB_FRAGS); | 2089 | devRead->misc.maxNumRxSG = cpu_to_le16(1 + MAX_SKB_FRAGS); |
2082 | } | 2090 | } |
2083 | if ((adapter->netdev->features & NETIF_F_HW_VLAN_RX) && | 2091 | if (adapter->netdev->features & NETIF_F_HW_VLAN_RX) |
2084 | adapter->vlan_grp) { | ||
2085 | devRead->misc.uptFeatures |= UPT1_F_RXVLAN; | 2092 | devRead->misc.uptFeatures |= UPT1_F_RXVLAN; |
2086 | } | ||
2087 | 2093 | ||
2088 | devRead->misc.mtu = cpu_to_le32(adapter->netdev->mtu); | 2094 | devRead->misc.mtu = cpu_to_le32(adapter->netdev->mtu); |
2089 | devRead->misc.queueDescPA = cpu_to_le64(adapter->queue_desc_pa); | 2095 | devRead->misc.queueDescPA = cpu_to_le64(adapter->queue_desc_pa); |
@@ -2168,6 +2174,8 @@ vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter) | |||
2168 | /* rx filter settings */ | 2174 | /* rx filter settings */ |
2169 | devRead->rxFilterConf.rxMode = 0; | 2175 | devRead->rxFilterConf.rxMode = 0; |
2170 | vmxnet3_restore_vlan(adapter); | 2176 | vmxnet3_restore_vlan(adapter); |
2177 | vmxnet3_write_mac_addr(adapter, adapter->netdev->dev_addr); | ||
2178 | |||
2171 | /* the rest are already zeroed */ | 2179 | /* the rest are already zeroed */ |
2172 | } | 2180 | } |
2173 | 2181 | ||
@@ -2177,6 +2185,7 @@ vmxnet3_activate_dev(struct vmxnet3_adapter *adapter) | |||
2177 | { | 2185 | { |
2178 | int err, i; | 2186 | int err, i; |
2179 | u32 ret; | 2187 | u32 ret; |
2188 | unsigned long flags; | ||
2180 | 2189 | ||
2181 | dev_dbg(&adapter->netdev->dev, "%s: skb_buf_size %d, rx_buf_per_pkt %d," | 2190 | dev_dbg(&adapter->netdev->dev, "%s: skb_buf_size %d, rx_buf_per_pkt %d," |
2182 | " ring sizes %u %u %u\n", adapter->netdev->name, | 2191 | " ring sizes %u %u %u\n", adapter->netdev->name, |
@@ -2206,9 +2215,11 @@ vmxnet3_activate_dev(struct vmxnet3_adapter *adapter) | |||
2206 | adapter->shared_pa)); | 2215 | adapter->shared_pa)); |
2207 | VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAH, VMXNET3_GET_ADDR_HI( | 2216 | VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAH, VMXNET3_GET_ADDR_HI( |
2208 | adapter->shared_pa)); | 2217 | adapter->shared_pa)); |
2218 | spin_lock_irqsave(&adapter->cmd_lock, flags); | ||
2209 | VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, | 2219 | VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, |
2210 | VMXNET3_CMD_ACTIVATE_DEV); | 2220 | VMXNET3_CMD_ACTIVATE_DEV); |
2211 | ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD); | 2221 | ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD); |
2222 | spin_unlock_irqrestore(&adapter->cmd_lock, flags); | ||
2212 | 2223 | ||
2213 | if (ret != 0) { | 2224 | if (ret != 0) { |
2214 | printk(KERN_ERR "Failed to activate dev %s: error %u\n", | 2225 | printk(KERN_ERR "Failed to activate dev %s: error %u\n", |
@@ -2255,7 +2266,10 @@ rq_err: | |||
2255 | void | 2266 | void |
2256 | vmxnet3_reset_dev(struct vmxnet3_adapter *adapter) | 2267 | vmxnet3_reset_dev(struct vmxnet3_adapter *adapter) |
2257 | { | 2268 | { |
2269 | unsigned long flags; | ||
2270 | spin_lock_irqsave(&adapter->cmd_lock, flags); | ||
2258 | VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_RESET_DEV); | 2271 | VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_RESET_DEV); |
2272 | spin_unlock_irqrestore(&adapter->cmd_lock, flags); | ||
2259 | } | 2273 | } |
2260 | 2274 | ||
2261 | 2275 | ||
@@ -2263,12 +2277,15 @@ int | |||
2263 | vmxnet3_quiesce_dev(struct vmxnet3_adapter *adapter) | 2277 | vmxnet3_quiesce_dev(struct vmxnet3_adapter *adapter) |
2264 | { | 2278 | { |
2265 | int i; | 2279 | int i; |
2280 | unsigned long flags; | ||
2266 | if (test_and_set_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state)) | 2281 | if (test_and_set_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state)) |
2267 | return 0; | 2282 | return 0; |
2268 | 2283 | ||
2269 | 2284 | ||
2285 | spin_lock_irqsave(&adapter->cmd_lock, flags); | ||
2270 | VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, | 2286 | VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, |
2271 | VMXNET3_CMD_QUIESCE_DEV); | 2287 | VMXNET3_CMD_QUIESCE_DEV); |
2288 | spin_unlock_irqrestore(&adapter->cmd_lock, flags); | ||
2272 | vmxnet3_disable_all_intrs(adapter); | 2289 | vmxnet3_disable_all_intrs(adapter); |
2273 | 2290 | ||
2274 | for (i = 0; i < adapter->num_rx_queues; i++) | 2291 | for (i = 0; i < adapter->num_rx_queues; i++) |
@@ -2426,7 +2443,7 @@ vmxnet3_adjust_rx_ring_size(struct vmxnet3_adapter *adapter) | |||
2426 | sz = adapter->rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN; | 2443 | sz = adapter->rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN; |
2427 | ring0_size = adapter->rx_queue[0].rx_ring[0].size; | 2444 | ring0_size = adapter->rx_queue[0].rx_ring[0].size; |
2428 | ring0_size = (ring0_size + sz - 1) / sz * sz; | 2445 | ring0_size = (ring0_size + sz - 1) / sz * sz; |
2429 | ring0_size = min_t(u32, rq->rx_ring[0].size, VMXNET3_RX_RING_MAX_SIZE / | 2446 | ring0_size = min_t(u32, ring0_size, VMXNET3_RX_RING_MAX_SIZE / |
2430 | sz * sz); | 2447 | sz * sz); |
2431 | ring1_size = adapter->rx_queue[0].rx_ring[1].size; | 2448 | ring1_size = adapter->rx_queue[0].rx_ring[1].size; |
2432 | comp_size = ring0_size + ring1_size; | 2449 | comp_size = ring0_size + ring1_size; |
@@ -2695,7 +2712,7 @@ vmxnet3_acquire_msix_vectors(struct vmxnet3_adapter *adapter, | |||
2695 | break; | 2712 | break; |
2696 | } else { | 2713 | } else { |
2697 | /* If fails to enable required number of MSI-x vectors | 2714 | /* If fails to enable required number of MSI-x vectors |
2698 | * try enabling 3 of them. One each for rx, tx and event | 2715 | * try enabling minimum number of vectors required. |
2699 | */ | 2716 | */ |
2700 | vectors = vector_threshold; | 2717 | vectors = vector_threshold; |
2701 | printk(KERN_ERR "Failed to enable %d MSI-X for %s, try" | 2718 | printk(KERN_ERR "Failed to enable %d MSI-X for %s, try" |
@@ -2718,9 +2735,11 @@ vmxnet3_alloc_intr_resources(struct vmxnet3_adapter *adapter) | |||
2718 | u32 cfg; | 2735 | u32 cfg; |
2719 | 2736 | ||
2720 | /* intr settings */ | 2737 | /* intr settings */ |
2738 | spin_lock(&adapter->cmd_lock); | ||
2721 | VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, | 2739 | VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, |
2722 | VMXNET3_CMD_GET_CONF_INTR); | 2740 | VMXNET3_CMD_GET_CONF_INTR); |
2723 | cfg = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD); | 2741 | cfg = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD); |
2742 | spin_unlock(&adapter->cmd_lock); | ||
2724 | adapter->intr.type = cfg & 0x3; | 2743 | adapter->intr.type = cfg & 0x3; |
2725 | adapter->intr.mask_mode = (cfg >> 2) & 0x3; | 2744 | adapter->intr.mask_mode = (cfg >> 2) & 0x3; |
2726 | 2745 | ||
@@ -2755,7 +2774,7 @@ vmxnet3_alloc_intr_resources(struct vmxnet3_adapter *adapter) | |||
2755 | */ | 2774 | */ |
2756 | if (err == VMXNET3_LINUX_MIN_MSIX_VECT) { | 2775 | if (err == VMXNET3_LINUX_MIN_MSIX_VECT) { |
2757 | if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE | 2776 | if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE |
2758 | || adapter->num_rx_queues != 2) { | 2777 | || adapter->num_rx_queues != 1) { |
2759 | adapter->share_intr = VMXNET3_INTR_TXSHARE; | 2778 | adapter->share_intr = VMXNET3_INTR_TXSHARE; |
2760 | printk(KERN_ERR "Number of rx queues : 1\n"); | 2779 | printk(KERN_ERR "Number of rx queues : 1\n"); |
2761 | adapter->num_rx_queues = 1; | 2780 | adapter->num_rx_queues = 1; |
@@ -2905,6 +2924,7 @@ vmxnet3_probe_device(struct pci_dev *pdev, | |||
2905 | adapter->netdev = netdev; | 2924 | adapter->netdev = netdev; |
2906 | adapter->pdev = pdev; | 2925 | adapter->pdev = pdev; |
2907 | 2926 | ||
2927 | spin_lock_init(&adapter->cmd_lock); | ||
2908 | adapter->shared = pci_alloc_consistent(adapter->pdev, | 2928 | adapter->shared = pci_alloc_consistent(adapter->pdev, |
2909 | sizeof(struct Vmxnet3_DriverShared), | 2929 | sizeof(struct Vmxnet3_DriverShared), |
2910 | &adapter->shared_pa); | 2930 | &adapter->shared_pa); |
@@ -3108,11 +3128,15 @@ vmxnet3_suspend(struct device *device) | |||
3108 | u8 *arpreq; | 3128 | u8 *arpreq; |
3109 | struct in_device *in_dev; | 3129 | struct in_device *in_dev; |
3110 | struct in_ifaddr *ifa; | 3130 | struct in_ifaddr *ifa; |
3131 | unsigned long flags; | ||
3111 | int i = 0; | 3132 | int i = 0; |
3112 | 3133 | ||
3113 | if (!netif_running(netdev)) | 3134 | if (!netif_running(netdev)) |
3114 | return 0; | 3135 | return 0; |
3115 | 3136 | ||
3137 | for (i = 0; i < adapter->num_rx_queues; i++) | ||
3138 | napi_disable(&adapter->rx_queue[i].napi); | ||
3139 | |||
3116 | vmxnet3_disable_all_intrs(adapter); | 3140 | vmxnet3_disable_all_intrs(adapter); |
3117 | vmxnet3_free_irqs(adapter); | 3141 | vmxnet3_free_irqs(adapter); |
3118 | vmxnet3_free_intr_resources(adapter); | 3142 | vmxnet3_free_intr_resources(adapter); |
@@ -3188,8 +3212,10 @@ skip_arp: | |||
3188 | adapter->shared->devRead.pmConfDesc.confPA = cpu_to_le64(virt_to_phys( | 3212 | adapter->shared->devRead.pmConfDesc.confPA = cpu_to_le64(virt_to_phys( |
3189 | pmConf)); | 3213 | pmConf)); |
3190 | 3214 | ||
3215 | spin_lock_irqsave(&adapter->cmd_lock, flags); | ||
3191 | VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, | 3216 | VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, |
3192 | VMXNET3_CMD_UPDATE_PMCFG); | 3217 | VMXNET3_CMD_UPDATE_PMCFG); |
3218 | spin_unlock_irqrestore(&adapter->cmd_lock, flags); | ||
3193 | 3219 | ||
3194 | pci_save_state(pdev); | 3220 | pci_save_state(pdev); |
3195 | pci_enable_wake(pdev, pci_choose_state(pdev, PMSG_SUSPEND), | 3221 | pci_enable_wake(pdev, pci_choose_state(pdev, PMSG_SUSPEND), |
@@ -3204,7 +3230,8 @@ skip_arp: | |||
3204 | static int | 3230 | static int |
3205 | vmxnet3_resume(struct device *device) | 3231 | vmxnet3_resume(struct device *device) |
3206 | { | 3232 | { |
3207 | int err; | 3233 | int err, i = 0; |
3234 | unsigned long flags; | ||
3208 | struct pci_dev *pdev = to_pci_dev(device); | 3235 | struct pci_dev *pdev = to_pci_dev(device); |
3209 | struct net_device *netdev = pci_get_drvdata(pdev); | 3236 | struct net_device *netdev = pci_get_drvdata(pdev); |
3210 | struct vmxnet3_adapter *adapter = netdev_priv(netdev); | 3237 | struct vmxnet3_adapter *adapter = netdev_priv(netdev); |
@@ -3232,10 +3259,14 @@ vmxnet3_resume(struct device *device) | |||
3232 | 3259 | ||
3233 | pci_enable_wake(pdev, PCI_D0, 0); | 3260 | pci_enable_wake(pdev, PCI_D0, 0); |
3234 | 3261 | ||
3262 | spin_lock_irqsave(&adapter->cmd_lock, flags); | ||
3235 | VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, | 3263 | VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, |
3236 | VMXNET3_CMD_UPDATE_PMCFG); | 3264 | VMXNET3_CMD_UPDATE_PMCFG); |
3265 | spin_unlock_irqrestore(&adapter->cmd_lock, flags); | ||
3237 | vmxnet3_alloc_intr_resources(adapter); | 3266 | vmxnet3_alloc_intr_resources(adapter); |
3238 | vmxnet3_request_irqs(adapter); | 3267 | vmxnet3_request_irqs(adapter); |
3268 | for (i = 0; i < adapter->num_rx_queues; i++) | ||
3269 | napi_enable(&adapter->rx_queue[i].napi); | ||
3239 | vmxnet3_enable_all_intrs(adapter); | 3270 | vmxnet3_enable_all_intrs(adapter); |
3240 | 3271 | ||
3241 | return 0; | 3272 | return 0; |
diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c index 8e17fc8a7fe7..81254be85b92 100644 --- a/drivers/net/vmxnet3/vmxnet3_ethtool.c +++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c | |||
@@ -45,6 +45,7 @@ static int | |||
45 | vmxnet3_set_rx_csum(struct net_device *netdev, u32 val) | 45 | vmxnet3_set_rx_csum(struct net_device *netdev, u32 val) |
46 | { | 46 | { |
47 | struct vmxnet3_adapter *adapter = netdev_priv(netdev); | 47 | struct vmxnet3_adapter *adapter = netdev_priv(netdev); |
48 | unsigned long flags; | ||
48 | 49 | ||
49 | if (adapter->rxcsum != val) { | 50 | if (adapter->rxcsum != val) { |
50 | adapter->rxcsum = val; | 51 | adapter->rxcsum = val; |
@@ -56,8 +57,10 @@ vmxnet3_set_rx_csum(struct net_device *netdev, u32 val) | |||
56 | adapter->shared->devRead.misc.uptFeatures &= | 57 | adapter->shared->devRead.misc.uptFeatures &= |
57 | ~UPT1_F_RXCSUM; | 58 | ~UPT1_F_RXCSUM; |
58 | 59 | ||
60 | spin_lock_irqsave(&adapter->cmd_lock, flags); | ||
59 | VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, | 61 | VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, |
60 | VMXNET3_CMD_UPDATE_FEATURE); | 62 | VMXNET3_CMD_UPDATE_FEATURE); |
63 | spin_unlock_irqrestore(&adapter->cmd_lock, flags); | ||
61 | } | 64 | } |
62 | } | 65 | } |
63 | return 0; | 66 | return 0; |
@@ -68,76 +71,78 @@ vmxnet3_set_rx_csum(struct net_device *netdev, u32 val) | |||
68 | static const struct vmxnet3_stat_desc | 71 | static const struct vmxnet3_stat_desc |
69 | vmxnet3_tq_dev_stats[] = { | 72 | vmxnet3_tq_dev_stats[] = { |
70 | /* description, offset */ | 73 | /* description, offset */ |
71 | { "TSO pkts tx", offsetof(struct UPT1_TxStats, TSOPktsTxOK) }, | 74 | { "Tx Queue#", 0 }, |
72 | { "TSO bytes tx", offsetof(struct UPT1_TxStats, TSOBytesTxOK) }, | 75 | { " TSO pkts tx", offsetof(struct UPT1_TxStats, TSOPktsTxOK) }, |
73 | { "ucast pkts tx", offsetof(struct UPT1_TxStats, ucastPktsTxOK) }, | 76 | { " TSO bytes tx", offsetof(struct UPT1_TxStats, TSOBytesTxOK) }, |
74 | { "ucast bytes tx", offsetof(struct UPT1_TxStats, ucastBytesTxOK) }, | 77 | { " ucast pkts tx", offsetof(struct UPT1_TxStats, ucastPktsTxOK) }, |
75 | { "mcast pkts tx", offsetof(struct UPT1_TxStats, mcastPktsTxOK) }, | 78 | { " ucast bytes tx", offsetof(struct UPT1_TxStats, ucastBytesTxOK) }, |
76 | { "mcast bytes tx", offsetof(struct UPT1_TxStats, mcastBytesTxOK) }, | 79 | { " mcast pkts tx", offsetof(struct UPT1_TxStats, mcastPktsTxOK) }, |
77 | { "bcast pkts tx", offsetof(struct UPT1_TxStats, bcastPktsTxOK) }, | 80 | { " mcast bytes tx", offsetof(struct UPT1_TxStats, mcastBytesTxOK) }, |
78 | { "bcast bytes tx", offsetof(struct UPT1_TxStats, bcastBytesTxOK) }, | 81 | { " bcast pkts tx", offsetof(struct UPT1_TxStats, bcastPktsTxOK) }, |
79 | { "pkts tx err", offsetof(struct UPT1_TxStats, pktsTxError) }, | 82 | { " bcast bytes tx", offsetof(struct UPT1_TxStats, bcastBytesTxOK) }, |
80 | { "pkts tx discard", offsetof(struct UPT1_TxStats, pktsTxDiscard) }, | 83 | { " pkts tx err", offsetof(struct UPT1_TxStats, pktsTxError) }, |
84 | { " pkts tx discard", offsetof(struct UPT1_TxStats, pktsTxDiscard) }, | ||
81 | }; | 85 | }; |
82 | 86 | ||
83 | /* per tq stats maintained by the driver */ | 87 | /* per tq stats maintained by the driver */ |
84 | static const struct vmxnet3_stat_desc | 88 | static const struct vmxnet3_stat_desc |
85 | vmxnet3_tq_driver_stats[] = { | 89 | vmxnet3_tq_driver_stats[] = { |
86 | /* description, offset */ | 90 | /* description, offset */ |
87 | {"drv dropped tx total", offsetof(struct vmxnet3_tq_driver_stats, | 91 | {" drv dropped tx total", offsetof(struct vmxnet3_tq_driver_stats, |
88 | drop_total) }, | 92 | drop_total) }, |
89 | { " too many frags", offsetof(struct vmxnet3_tq_driver_stats, | 93 | { " too many frags", offsetof(struct vmxnet3_tq_driver_stats, |
90 | drop_too_many_frags) }, | 94 | drop_too_many_frags) }, |
91 | { " giant hdr", offsetof(struct vmxnet3_tq_driver_stats, | 95 | { " giant hdr", offsetof(struct vmxnet3_tq_driver_stats, |
92 | drop_oversized_hdr) }, | 96 | drop_oversized_hdr) }, |
93 | { " hdr err", offsetof(struct vmxnet3_tq_driver_stats, | 97 | { " hdr err", offsetof(struct vmxnet3_tq_driver_stats, |
94 | drop_hdr_inspect_err) }, | 98 | drop_hdr_inspect_err) }, |
95 | { " tso", offsetof(struct vmxnet3_tq_driver_stats, | 99 | { " tso", offsetof(struct vmxnet3_tq_driver_stats, |
96 | drop_tso) }, | 100 | drop_tso) }, |
97 | { "ring full", offsetof(struct vmxnet3_tq_driver_stats, | 101 | { " ring full", offsetof(struct vmxnet3_tq_driver_stats, |
98 | tx_ring_full) }, | 102 | tx_ring_full) }, |
99 | { "pkts linearized", offsetof(struct vmxnet3_tq_driver_stats, | 103 | { " pkts linearized", offsetof(struct vmxnet3_tq_driver_stats, |
100 | linearized) }, | 104 | linearized) }, |
101 | { "hdr cloned", offsetof(struct vmxnet3_tq_driver_stats, | 105 | { " hdr cloned", offsetof(struct vmxnet3_tq_driver_stats, |
102 | copy_skb_header) }, | 106 | copy_skb_header) }, |
103 | { "giant hdr", offsetof(struct vmxnet3_tq_driver_stats, | 107 | { " giant hdr", offsetof(struct vmxnet3_tq_driver_stats, |
104 | oversized_hdr) }, | 108 | oversized_hdr) }, |
105 | }; | 109 | }; |
106 | 110 | ||
107 | /* per rq stats maintained by the device */ | 111 | /* per rq stats maintained by the device */ |
108 | static const struct vmxnet3_stat_desc | 112 | static const struct vmxnet3_stat_desc |
109 | vmxnet3_rq_dev_stats[] = { | 113 | vmxnet3_rq_dev_stats[] = { |
110 | { "LRO pkts rx", offsetof(struct UPT1_RxStats, LROPktsRxOK) }, | 114 | { "Rx Queue#", 0 }, |
111 | { "LRO byte rx", offsetof(struct UPT1_RxStats, LROBytesRxOK) }, | 115 | { " LRO pkts rx", offsetof(struct UPT1_RxStats, LROPktsRxOK) }, |
112 | { "ucast pkts rx", offsetof(struct UPT1_RxStats, ucastPktsRxOK) }, | 116 | { " LRO byte rx", offsetof(struct UPT1_RxStats, LROBytesRxOK) }, |
113 | { "ucast bytes rx", offsetof(struct UPT1_RxStats, ucastBytesRxOK) }, | 117 | { " ucast pkts rx", offsetof(struct UPT1_RxStats, ucastPktsRxOK) }, |
114 | { "mcast pkts rx", offsetof(struct UPT1_RxStats, mcastPktsRxOK) }, | 118 | { " ucast bytes rx", offsetof(struct UPT1_RxStats, ucastBytesRxOK) }, |
115 | { "mcast bytes rx", offsetof(struct UPT1_RxStats, mcastBytesRxOK) }, | 119 | { " mcast pkts rx", offsetof(struct UPT1_RxStats, mcastPktsRxOK) }, |
116 | { "bcast pkts rx", offsetof(struct UPT1_RxStats, bcastPktsRxOK) }, | 120 | { " mcast bytes rx", offsetof(struct UPT1_RxStats, mcastBytesRxOK) }, |
117 | { "bcast bytes rx", offsetof(struct UPT1_RxStats, bcastBytesRxOK) }, | 121 | { " bcast pkts rx", offsetof(struct UPT1_RxStats, bcastPktsRxOK) }, |
118 | { "pkts rx out of buf", offsetof(struct UPT1_RxStats, pktsRxOutOfBuf) }, | 122 | { " bcast bytes rx", offsetof(struct UPT1_RxStats, bcastBytesRxOK) }, |
119 | { "pkts rx err", offsetof(struct UPT1_RxStats, pktsRxError) }, | 123 | { " pkts rx OOB", offsetof(struct UPT1_RxStats, pktsRxOutOfBuf) }, |
124 | { " pkts rx err", offsetof(struct UPT1_RxStats, pktsRxError) }, | ||
120 | }; | 125 | }; |
121 | 126 | ||
122 | /* per rq stats maintained by the driver */ | 127 | /* per rq stats maintained by the driver */ |
123 | static const struct vmxnet3_stat_desc | 128 | static const struct vmxnet3_stat_desc |
124 | vmxnet3_rq_driver_stats[] = { | 129 | vmxnet3_rq_driver_stats[] = { |
125 | /* description, offset */ | 130 | /* description, offset */ |
126 | { "drv dropped rx total", offsetof(struct vmxnet3_rq_driver_stats, | 131 | { " drv dropped rx total", offsetof(struct vmxnet3_rq_driver_stats, |
127 | drop_total) }, | 132 | drop_total) }, |
128 | { " err", offsetof(struct vmxnet3_rq_driver_stats, | 133 | { " err", offsetof(struct vmxnet3_rq_driver_stats, |
129 | drop_err) }, | 134 | drop_err) }, |
130 | { " fcs", offsetof(struct vmxnet3_rq_driver_stats, | 135 | { " fcs", offsetof(struct vmxnet3_rq_driver_stats, |
131 | drop_fcs) }, | 136 | drop_fcs) }, |
132 | { "rx buf alloc fail", offsetof(struct vmxnet3_rq_driver_stats, | 137 | { " rx buf alloc fail", offsetof(struct vmxnet3_rq_driver_stats, |
133 | rx_buf_alloc_failure) }, | 138 | rx_buf_alloc_failure) }, |
134 | }; | 139 | }; |
135 | 140 | ||
136 | /* gloabl stats maintained by the driver */ | 141 | /* gloabl stats maintained by the driver */ |
137 | static const struct vmxnet3_stat_desc | 142 | static const struct vmxnet3_stat_desc |
138 | vmxnet3_global_stats[] = { | 143 | vmxnet3_global_stats[] = { |
139 | /* description, offset */ | 144 | /* description, offset */ |
140 | { "tx timeout count", offsetof(struct vmxnet3_adapter, | 145 | { "tx timeout count", offsetof(struct vmxnet3_adapter, |
141 | tx_timeout_count) } | 146 | tx_timeout_count) } |
142 | }; | 147 | }; |
143 | 148 | ||
@@ -151,12 +156,15 @@ vmxnet3_get_stats(struct net_device *netdev) | |||
151 | struct UPT1_TxStats *devTxStats; | 156 | struct UPT1_TxStats *devTxStats; |
152 | struct UPT1_RxStats *devRxStats; | 157 | struct UPT1_RxStats *devRxStats; |
153 | struct net_device_stats *net_stats = &netdev->stats; | 158 | struct net_device_stats *net_stats = &netdev->stats; |
159 | unsigned long flags; | ||
154 | int i; | 160 | int i; |
155 | 161 | ||
156 | adapter = netdev_priv(netdev); | 162 | adapter = netdev_priv(netdev); |
157 | 163 | ||
158 | /* Collect the dev stats into the shared area */ | 164 | /* Collect the dev stats into the shared area */ |
165 | spin_lock_irqsave(&adapter->cmd_lock, flags); | ||
159 | VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_STATS); | 166 | VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_STATS); |
167 | spin_unlock_irqrestore(&adapter->cmd_lock, flags); | ||
160 | 168 | ||
161 | memset(net_stats, 0, sizeof(*net_stats)); | 169 | memset(net_stats, 0, sizeof(*net_stats)); |
162 | for (i = 0; i < adapter->num_tx_queues; i++) { | 170 | for (i = 0; i < adapter->num_tx_queues; i++) { |
@@ -193,12 +201,15 @@ vmxnet3_get_stats(struct net_device *netdev) | |||
193 | static int | 201 | static int |
194 | vmxnet3_get_sset_count(struct net_device *netdev, int sset) | 202 | vmxnet3_get_sset_count(struct net_device *netdev, int sset) |
195 | { | 203 | { |
204 | struct vmxnet3_adapter *adapter = netdev_priv(netdev); | ||
196 | switch (sset) { | 205 | switch (sset) { |
197 | case ETH_SS_STATS: | 206 | case ETH_SS_STATS: |
198 | return ARRAY_SIZE(vmxnet3_tq_dev_stats) + | 207 | return (ARRAY_SIZE(vmxnet3_tq_dev_stats) + |
199 | ARRAY_SIZE(vmxnet3_tq_driver_stats) + | 208 | ARRAY_SIZE(vmxnet3_tq_driver_stats)) * |
200 | ARRAY_SIZE(vmxnet3_rq_dev_stats) + | 209 | adapter->num_tx_queues + |
201 | ARRAY_SIZE(vmxnet3_rq_driver_stats) + | 210 | (ARRAY_SIZE(vmxnet3_rq_dev_stats) + |
211 | ARRAY_SIZE(vmxnet3_rq_driver_stats)) * | ||
212 | adapter->num_rx_queues + | ||
202 | ARRAY_SIZE(vmxnet3_global_stats); | 213 | ARRAY_SIZE(vmxnet3_global_stats); |
203 | default: | 214 | default: |
204 | return -EOPNOTSUPP; | 215 | return -EOPNOTSUPP; |
@@ -206,10 +217,16 @@ vmxnet3_get_sset_count(struct net_device *netdev, int sset) | |||
206 | } | 217 | } |
207 | 218 | ||
208 | 219 | ||
220 | /* Should be multiple of 4 */ | ||
221 | #define NUM_TX_REGS 8 | ||
222 | #define NUM_RX_REGS 12 | ||
223 | |||
209 | static int | 224 | static int |
210 | vmxnet3_get_regs_len(struct net_device *netdev) | 225 | vmxnet3_get_regs_len(struct net_device *netdev) |
211 | { | 226 | { |
212 | return 20 * sizeof(u32); | 227 | struct vmxnet3_adapter *adapter = netdev_priv(netdev); |
228 | return (adapter->num_tx_queues * NUM_TX_REGS * sizeof(u32) + | ||
229 | adapter->num_rx_queues * NUM_RX_REGS * sizeof(u32)); | ||
213 | } | 230 | } |
214 | 231 | ||
215 | 232 | ||
@@ -240,29 +257,37 @@ vmxnet3_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) | |||
240 | static void | 257 | static void |
241 | vmxnet3_get_strings(struct net_device *netdev, u32 stringset, u8 *buf) | 258 | vmxnet3_get_strings(struct net_device *netdev, u32 stringset, u8 *buf) |
242 | { | 259 | { |
260 | struct vmxnet3_adapter *adapter = netdev_priv(netdev); | ||
243 | if (stringset == ETH_SS_STATS) { | 261 | if (stringset == ETH_SS_STATS) { |
244 | int i; | 262 | int i, j; |
245 | 263 | for (j = 0; j < adapter->num_tx_queues; j++) { | |
246 | for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_dev_stats); i++) { | 264 | for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_dev_stats); i++) { |
247 | memcpy(buf, vmxnet3_tq_dev_stats[i].desc, | 265 | memcpy(buf, vmxnet3_tq_dev_stats[i].desc, |
248 | ETH_GSTRING_LEN); | 266 | ETH_GSTRING_LEN); |
249 | buf += ETH_GSTRING_LEN; | 267 | buf += ETH_GSTRING_LEN; |
250 | } | 268 | } |
251 | for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_driver_stats); i++) { | 269 | for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_driver_stats); |
252 | memcpy(buf, vmxnet3_tq_driver_stats[i].desc, | 270 | i++) { |
253 | ETH_GSTRING_LEN); | 271 | memcpy(buf, vmxnet3_tq_driver_stats[i].desc, |
254 | buf += ETH_GSTRING_LEN; | 272 | ETH_GSTRING_LEN); |
255 | } | 273 | buf += ETH_GSTRING_LEN; |
256 | for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_dev_stats); i++) { | 274 | } |
257 | memcpy(buf, vmxnet3_rq_dev_stats[i].desc, | ||
258 | ETH_GSTRING_LEN); | ||
259 | buf += ETH_GSTRING_LEN; | ||
260 | } | 275 | } |
261 | for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_driver_stats); i++) { | 276 | |
262 | memcpy(buf, vmxnet3_rq_driver_stats[i].desc, | 277 | for (j = 0; j < adapter->num_rx_queues; j++) { |
263 | ETH_GSTRING_LEN); | 278 | for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_dev_stats); i++) { |
264 | buf += ETH_GSTRING_LEN; | 279 | memcpy(buf, vmxnet3_rq_dev_stats[i].desc, |
280 | ETH_GSTRING_LEN); | ||
281 | buf += ETH_GSTRING_LEN; | ||
282 | } | ||
283 | for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_driver_stats); | ||
284 | i++) { | ||
285 | memcpy(buf, vmxnet3_rq_driver_stats[i].desc, | ||
286 | ETH_GSTRING_LEN); | ||
287 | buf += ETH_GSTRING_LEN; | ||
288 | } | ||
265 | } | 289 | } |
290 | |||
266 | for (i = 0; i < ARRAY_SIZE(vmxnet3_global_stats); i++) { | 291 | for (i = 0; i < ARRAY_SIZE(vmxnet3_global_stats); i++) { |
267 | memcpy(buf, vmxnet3_global_stats[i].desc, | 292 | memcpy(buf, vmxnet3_global_stats[i].desc, |
268 | ETH_GSTRING_LEN); | 293 | ETH_GSTRING_LEN); |
@@ -277,6 +302,7 @@ vmxnet3_set_flags(struct net_device *netdev, u32 data) | |||
277 | struct vmxnet3_adapter *adapter = netdev_priv(netdev); | 302 | struct vmxnet3_adapter *adapter = netdev_priv(netdev); |
278 | u8 lro_requested = (data & ETH_FLAG_LRO) == 0 ? 0 : 1; | 303 | u8 lro_requested = (data & ETH_FLAG_LRO) == 0 ? 0 : 1; |
279 | u8 lro_present = (netdev->features & NETIF_F_LRO) == 0 ? 0 : 1; | 304 | u8 lro_present = (netdev->features & NETIF_F_LRO) == 0 ? 0 : 1; |
305 | unsigned long flags; | ||
280 | 306 | ||
281 | if (data & ~ETH_FLAG_LRO) | 307 | if (data & ~ETH_FLAG_LRO) |
282 | return -EOPNOTSUPP; | 308 | return -EOPNOTSUPP; |
@@ -292,8 +318,10 @@ vmxnet3_set_flags(struct net_device *netdev, u32 data) | |||
292 | else | 318 | else |
293 | adapter->shared->devRead.misc.uptFeatures &= | 319 | adapter->shared->devRead.misc.uptFeatures &= |
294 | ~UPT1_F_LRO; | 320 | ~UPT1_F_LRO; |
321 | spin_lock_irqsave(&adapter->cmd_lock, flags); | ||
295 | VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, | 322 | VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, |
296 | VMXNET3_CMD_UPDATE_FEATURE); | 323 | VMXNET3_CMD_UPDATE_FEATURE); |
324 | spin_unlock_irqrestore(&adapter->cmd_lock, flags); | ||
297 | } | 325 | } |
298 | return 0; | 326 | return 0; |
299 | } | 327 | } |
@@ -303,30 +331,41 @@ vmxnet3_get_ethtool_stats(struct net_device *netdev, | |||
303 | struct ethtool_stats *stats, u64 *buf) | 331 | struct ethtool_stats *stats, u64 *buf) |
304 | { | 332 | { |
305 | struct vmxnet3_adapter *adapter = netdev_priv(netdev); | 333 | struct vmxnet3_adapter *adapter = netdev_priv(netdev); |
334 | unsigned long flags; | ||
306 | u8 *base; | 335 | u8 *base; |
307 | int i; | 336 | int i; |
308 | int j = 0; | 337 | int j = 0; |
309 | 338 | ||
339 | spin_lock_irqsave(&adapter->cmd_lock, flags); | ||
310 | VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_STATS); | 340 | VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_STATS); |
341 | spin_unlock_irqrestore(&adapter->cmd_lock, flags); | ||
311 | 342 | ||
312 | /* this does assume each counter is 64-bit wide */ | 343 | /* this does assume each counter is 64-bit wide */ |
313 | /* TODO change this for multiple queues */ | 344 | for (j = 0; j < adapter->num_tx_queues; j++) { |
314 | 345 | base = (u8 *)&adapter->tqd_start[j].stats; | |
315 | base = (u8 *)&adapter->tqd_start[j].stats; | 346 | *buf++ = (u64)j; |
316 | for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_dev_stats); i++) | 347 | for (i = 1; i < ARRAY_SIZE(vmxnet3_tq_dev_stats); i++) |
317 | *buf++ = *(u64 *)(base + vmxnet3_tq_dev_stats[i].offset); | 348 | *buf++ = *(u64 *)(base + |
318 | 349 | vmxnet3_tq_dev_stats[i].offset); | |
319 | base = (u8 *)&adapter->tx_queue[j].stats; | 350 | |
320 | for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_driver_stats); i++) | 351 | base = (u8 *)&adapter->tx_queue[j].stats; |
321 | *buf++ = *(u64 *)(base + vmxnet3_tq_driver_stats[i].offset); | 352 | for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_driver_stats); i++) |
322 | 353 | *buf++ = *(u64 *)(base + | |
323 | base = (u8 *)&adapter->rqd_start[j].stats; | 354 | vmxnet3_tq_driver_stats[i].offset); |
324 | for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_dev_stats); i++) | 355 | } |
325 | *buf++ = *(u64 *)(base + vmxnet3_rq_dev_stats[i].offset); | ||
326 | 356 | ||
327 | base = (u8 *)&adapter->rx_queue[j].stats; | 357 | for (j = 0; j < adapter->num_tx_queues; j++) { |
328 | for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_driver_stats); i++) | 358 | base = (u8 *)&adapter->rqd_start[j].stats; |
329 | *buf++ = *(u64 *)(base + vmxnet3_rq_driver_stats[i].offset); | 359 | *buf++ = (u64) j; |
360 | for (i = 1; i < ARRAY_SIZE(vmxnet3_rq_dev_stats); i++) | ||
361 | *buf++ = *(u64 *)(base + | ||
362 | vmxnet3_rq_dev_stats[i].offset); | ||
363 | |||
364 | base = (u8 *)&adapter->rx_queue[j].stats; | ||
365 | for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_driver_stats); i++) | ||
366 | *buf++ = *(u64 *)(base + | ||
367 | vmxnet3_rq_driver_stats[i].offset); | ||
368 | } | ||
330 | 369 | ||
331 | base = (u8 *)adapter; | 370 | base = (u8 *)adapter; |
332 | for (i = 0; i < ARRAY_SIZE(vmxnet3_global_stats); i++) | 371 | for (i = 0; i < ARRAY_SIZE(vmxnet3_global_stats); i++) |
@@ -339,7 +378,7 @@ vmxnet3_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p) | |||
339 | { | 378 | { |
340 | struct vmxnet3_adapter *adapter = netdev_priv(netdev); | 379 | struct vmxnet3_adapter *adapter = netdev_priv(netdev); |
341 | u32 *buf = p; | 380 | u32 *buf = p; |
342 | int i = 0; | 381 | int i = 0, j = 0; |
343 | 382 | ||
344 | memset(p, 0, vmxnet3_get_regs_len(netdev)); | 383 | memset(p, 0, vmxnet3_get_regs_len(netdev)); |
345 | 384 | ||
@@ -348,31 +387,35 @@ vmxnet3_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p) | |||
348 | /* Update vmxnet3_get_regs_len if we want to dump more registers */ | 387 | /* Update vmxnet3_get_regs_len if we want to dump more registers */ |
349 | 388 | ||
350 | /* make each ring use multiple of 16 bytes */ | 389 | /* make each ring use multiple of 16 bytes */ |
351 | /* TODO change this for multiple queues */ | 390 | for (i = 0; i < adapter->num_tx_queues; i++) { |
352 | buf[0] = adapter->tx_queue[i].tx_ring.next2fill; | 391 | buf[j++] = adapter->tx_queue[i].tx_ring.next2fill; |
353 | buf[1] = adapter->tx_queue[i].tx_ring.next2comp; | 392 | buf[j++] = adapter->tx_queue[i].tx_ring.next2comp; |
354 | buf[2] = adapter->tx_queue[i].tx_ring.gen; | 393 | buf[j++] = adapter->tx_queue[i].tx_ring.gen; |
355 | buf[3] = 0; | 394 | buf[j++] = 0; |
356 | 395 | ||
357 | buf[4] = adapter->tx_queue[i].comp_ring.next2proc; | 396 | buf[j++] = adapter->tx_queue[i].comp_ring.next2proc; |
358 | buf[5] = adapter->tx_queue[i].comp_ring.gen; | 397 | buf[j++] = adapter->tx_queue[i].comp_ring.gen; |
359 | buf[6] = adapter->tx_queue[i].stopped; | 398 | buf[j++] = adapter->tx_queue[i].stopped; |
360 | buf[7] = 0; | 399 | buf[j++] = 0; |
361 | 400 | } | |
362 | buf[8] = adapter->rx_queue[i].rx_ring[0].next2fill; | 401 | |
363 | buf[9] = adapter->rx_queue[i].rx_ring[0].next2comp; | 402 | for (i = 0; i < adapter->num_rx_queues; i++) { |
364 | buf[10] = adapter->rx_queue[i].rx_ring[0].gen; | 403 | buf[j++] = adapter->rx_queue[i].rx_ring[0].next2fill; |
365 | buf[11] = 0; | 404 | buf[j++] = adapter->rx_queue[i].rx_ring[0].next2comp; |
366 | 405 | buf[j++] = adapter->rx_queue[i].rx_ring[0].gen; | |
367 | buf[12] = adapter->rx_queue[i].rx_ring[1].next2fill; | 406 | buf[j++] = 0; |
368 | buf[13] = adapter->rx_queue[i].rx_ring[1].next2comp; | 407 | |
369 | buf[14] = adapter->rx_queue[i].rx_ring[1].gen; | 408 | buf[j++] = adapter->rx_queue[i].rx_ring[1].next2fill; |
370 | buf[15] = 0; | 409 | buf[j++] = adapter->rx_queue[i].rx_ring[1].next2comp; |
371 | 410 | buf[j++] = adapter->rx_queue[i].rx_ring[1].gen; | |
372 | buf[16] = adapter->rx_queue[i].comp_ring.next2proc; | 411 | buf[j++] = 0; |
373 | buf[17] = adapter->rx_queue[i].comp_ring.gen; | 412 | |
374 | buf[18] = 0; | 413 | buf[j++] = adapter->rx_queue[i].comp_ring.next2proc; |
375 | buf[19] = 0; | 414 | buf[j++] = adapter->rx_queue[i].comp_ring.gen; |
415 | buf[j++] = 0; | ||
416 | buf[j++] = 0; | ||
417 | } | ||
418 | |||
376 | } | 419 | } |
377 | 420 | ||
378 | 421 | ||
@@ -574,6 +617,7 @@ vmxnet3_set_rss_indir(struct net_device *netdev, | |||
574 | const struct ethtool_rxfh_indir *p) | 617 | const struct ethtool_rxfh_indir *p) |
575 | { | 618 | { |
576 | unsigned int i; | 619 | unsigned int i; |
620 | unsigned long flags; | ||
577 | struct vmxnet3_adapter *adapter = netdev_priv(netdev); | 621 | struct vmxnet3_adapter *adapter = netdev_priv(netdev); |
578 | struct UPT1_RSSConf *rssConf = adapter->rss_conf; | 622 | struct UPT1_RSSConf *rssConf = adapter->rss_conf; |
579 | 623 | ||
@@ -592,8 +636,10 @@ vmxnet3_set_rss_indir(struct net_device *netdev, | |||
592 | for (i = 0; i < rssConf->indTableSize; i++) | 636 | for (i = 0; i < rssConf->indTableSize; i++) |
593 | rssConf->indTable[i] = p->ring_index[i]; | 637 | rssConf->indTable[i] = p->ring_index[i]; |
594 | 638 | ||
639 | spin_lock_irqsave(&adapter->cmd_lock, flags); | ||
595 | VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, | 640 | VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, |
596 | VMXNET3_CMD_UPDATE_RSSIDT); | 641 | VMXNET3_CMD_UPDATE_RSSIDT); |
642 | spin_unlock_irqrestore(&adapter->cmd_lock, flags); | ||
597 | 643 | ||
598 | return 0; | 644 | return 0; |
599 | 645 | ||
diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h index 7fadeed37f03..fb5d245ac878 100644 --- a/drivers/net/vmxnet3/vmxnet3_int.h +++ b/drivers/net/vmxnet3/vmxnet3_int.h | |||
@@ -68,10 +68,10 @@ | |||
68 | /* | 68 | /* |
69 | * Version numbers | 69 | * Version numbers |
70 | */ | 70 | */ |
71 | #define VMXNET3_DRIVER_VERSION_STRING "1.0.16.0-k" | 71 | #define VMXNET3_DRIVER_VERSION_STRING "1.0.25.0-k" |
72 | 72 | ||
73 | /* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */ | 73 | /* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */ |
74 | #define VMXNET3_DRIVER_VERSION_NUM 0x01001000 | 74 | #define VMXNET3_DRIVER_VERSION_NUM 0x01001900 |
75 | 75 | ||
76 | #if defined(CONFIG_PCI_MSI) | 76 | #if defined(CONFIG_PCI_MSI) |
77 | /* RSS only makes sense if MSI-X is supported. */ | 77 | /* RSS only makes sense if MSI-X is supported. */ |
@@ -289,7 +289,7 @@ struct vmxnet3_rx_queue { | |||
289 | 289 | ||
290 | #define VMXNET3_LINUX_MAX_MSIX_VECT (VMXNET3_DEVICE_MAX_TX_QUEUES + \ | 290 | #define VMXNET3_LINUX_MAX_MSIX_VECT (VMXNET3_DEVICE_MAX_TX_QUEUES + \ |
291 | VMXNET3_DEVICE_MAX_RX_QUEUES + 1) | 291 | VMXNET3_DEVICE_MAX_RX_QUEUES + 1) |
292 | #define VMXNET3_LINUX_MIN_MSIX_VECT 3 /* 1 for each : tx, rx and event */ | 292 | #define VMXNET3_LINUX_MIN_MSIX_VECT 2 /* 1 for tx-rx pair and 1 for event */ |
293 | 293 | ||
294 | 294 | ||
295 | struct vmxnet3_intr { | 295 | struct vmxnet3_intr { |
@@ -317,6 +317,7 @@ struct vmxnet3_adapter { | |||
317 | struct vmxnet3_rx_queue rx_queue[VMXNET3_DEVICE_MAX_RX_QUEUES]; | 317 | struct vmxnet3_rx_queue rx_queue[VMXNET3_DEVICE_MAX_RX_QUEUES]; |
318 | struct vlan_group *vlan_grp; | 318 | struct vlan_group *vlan_grp; |
319 | struct vmxnet3_intr intr; | 319 | struct vmxnet3_intr intr; |
320 | spinlock_t cmd_lock; | ||
320 | struct Vmxnet3_DriverShared *shared; | 321 | struct Vmxnet3_DriverShared *shared; |
321 | struct Vmxnet3_PMConf *pm_conf; | 322 | struct Vmxnet3_PMConf *pm_conf; |
322 | struct Vmxnet3_TxQueueDesc *tqd_start; /* all tx queue desc */ | 323 | struct Vmxnet3_TxQueueDesc *tqd_start; /* all tx queue desc */ |
diff --git a/drivers/net/vxge/vxge-config.c b/drivers/net/vxge/vxge-config.c index 01c05f53e2f9..228d4f7a58af 100644 --- a/drivers/net/vxge/vxge-config.c +++ b/drivers/net/vxge/vxge-config.c | |||
@@ -3690,7 +3690,7 @@ __vxge_hw_vpath_rts_table_get(struct __vxge_hw_vpath_handle *vp, | |||
3690 | if (status != VXGE_HW_OK) | 3690 | if (status != VXGE_HW_OK) |
3691 | goto exit; | 3691 | goto exit; |
3692 | 3692 | ||
3693 | if ((rts_table != VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA) || | 3693 | if ((rts_table != VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA) && |
3694 | (rts_table != | 3694 | (rts_table != |
3695 | VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT)) | 3695 | VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT)) |
3696 | *data1 = 0; | 3696 | *data1 = 0; |
diff --git a/drivers/net/vxge/vxge-main.c b/drivers/net/vxge/vxge-main.c index 1ac9b568f1b0..c81a6512c683 100644 --- a/drivers/net/vxge/vxge-main.c +++ b/drivers/net/vxge/vxge-main.c | |||
@@ -4120,6 +4120,7 @@ int vxge_fw_upgrade(struct vxgedev *vdev, char *fw_name, int override) | |||
4120 | "hotplug event.\n"); | 4120 | "hotplug event.\n"); |
4121 | 4121 | ||
4122 | out: | 4122 | out: |
4123 | release_firmware(fw); | ||
4123 | return ret; | 4124 | return ret; |
4124 | } | 4125 | } |
4125 | 4126 | ||
diff --git a/drivers/net/vxge/vxge-traffic.h b/drivers/net/vxge/vxge-traffic.h index 8c3103fb6442..d48486d6afa1 100644 --- a/drivers/net/vxge/vxge-traffic.h +++ b/drivers/net/vxge/vxge-traffic.h | |||
@@ -1695,7 +1695,7 @@ struct vxge_hw_device_stats_sw_err { | |||
1695 | * struct vxge_hw_device_stats - Contains HW per-device statistics, | 1695 | * struct vxge_hw_device_stats - Contains HW per-device statistics, |
1696 | * including hw. | 1696 | * including hw. |
1697 | * @devh: HW device handle. | 1697 | * @devh: HW device handle. |
1698 | * @dma_addr: DMA addres of the %hw_info. Given to device to fill-in the stats. | 1698 | * @dma_addr: DMA address of the %hw_info. Given to device to fill-in the stats. |
1699 | * @hw_info_dmah: DMA handle used to map hw statistics onto the device memory | 1699 | * @hw_info_dmah: DMA handle used to map hw statistics onto the device memory |
1700 | * space. | 1700 | * space. |
1701 | * @hw_info_dma_acch: One more DMA handle used subsequently to free the | 1701 | * @hw_info_dma_acch: One more DMA handle used subsequently to free the |
diff --git a/drivers/net/wan/dscc4.c b/drivers/net/wan/dscc4.c index 34cff6ce6d27..4578e5b4b411 100644 --- a/drivers/net/wan/dscc4.c +++ b/drivers/net/wan/dscc4.c | |||
@@ -125,7 +125,7 @@ static u32 dscc4_pci_config_store[16]; | |||
125 | /* Module parameters */ | 125 | /* Module parameters */ |
126 | 126 | ||
127 | MODULE_AUTHOR("Maintainer: Francois Romieu <romieu@cogenit.fr>"); | 127 | MODULE_AUTHOR("Maintainer: Francois Romieu <romieu@cogenit.fr>"); |
128 | MODULE_DESCRIPTION("Siemens PEB20534 PCI Controler"); | 128 | MODULE_DESCRIPTION("Siemens PEB20534 PCI Controller"); |
129 | MODULE_LICENSE("GPL"); | 129 | MODULE_LICENSE("GPL"); |
130 | module_param(debug, int, 0); | 130 | module_param(debug, int, 0); |
131 | MODULE_PARM_DESC(debug,"Enable/disable extra messages"); | 131 | MODULE_PARM_DESC(debug,"Enable/disable extra messages"); |
diff --git a/drivers/net/wimax/i2400m/driver.c b/drivers/net/wimax/i2400m/driver.c index f0603327aafa..65bc334ed57b 100644 --- a/drivers/net/wimax/i2400m/driver.c +++ b/drivers/net/wimax/i2400m/driver.c | |||
@@ -232,7 +232,7 @@ int i2400m_check_mac_addr(struct i2400m *i2400m) | |||
232 | result); | 232 | result); |
233 | goto error; | 233 | goto error; |
234 | } | 234 | } |
235 | /* Extract MAC addresss */ | 235 | /* Extract MAC address */ |
236 | ddi = (void *) skb->data; | 236 | ddi = (void *) skb->data; |
237 | BUILD_BUG_ON(ETH_ALEN != sizeof(ddi->mac_address)); | 237 | BUILD_BUG_ON(ETH_ALEN != sizeof(ddi->mac_address)); |
238 | d_printf(2, dev, "GET DEVICE INFO: mac addr %pM\n", | 238 | d_printf(2, dev, "GET DEVICE INFO: mac addr %pM\n", |
diff --git a/drivers/net/wimax/i2400m/i2400m.h b/drivers/net/wimax/i2400m/i2400m.h index 17ecaa41a807..030cbfd31704 100644 --- a/drivers/net/wimax/i2400m/i2400m.h +++ b/drivers/net/wimax/i2400m/i2400m.h | |||
@@ -186,7 +186,7 @@ enum { | |||
186 | * struct i2400m_poke_table - Hardware poke table for the Intel 2400m | 186 | * struct i2400m_poke_table - Hardware poke table for the Intel 2400m |
187 | * | 187 | * |
188 | * This structure will be used to create a device specific poke table | 188 | * This structure will be used to create a device specific poke table |
189 | * to put the device in a consistant state at boot time. | 189 | * to put the device in a consistent state at boot time. |
190 | * | 190 | * |
191 | * @address: The device address to poke | 191 | * @address: The device address to poke |
192 | * | 192 | * |
@@ -703,7 +703,7 @@ enum i2400m_bm_cmd_flags { | |||
703 | * @I2400M_BRI_MAC_REINIT: We need to reinitialize the boot | 703 | * @I2400M_BRI_MAC_REINIT: We need to reinitialize the boot |
704 | * rom after reading the MAC address. This is quite a dirty hack, | 704 | * rom after reading the MAC address. This is quite a dirty hack, |
705 | * if you ask me -- the device requires the bootrom to be | 705 | * if you ask me -- the device requires the bootrom to be |
706 | * intialized after reading the MAC address. | 706 | * initialized after reading the MAC address. |
707 | */ | 707 | */ |
708 | enum i2400m_bri { | 708 | enum i2400m_bri { |
709 | I2400M_BRI_SOFT = 1 << 1, | 709 | I2400M_BRI_SOFT = 1 << 1, |
diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c index 019a74d533a6..09ae4ef0fd51 100644 --- a/drivers/net/wireless/ath/ath5k/base.c +++ b/drivers/net/wireless/ath/ath5k/base.c | |||
@@ -2294,6 +2294,8 @@ ath5k_tx_complete_poll_work(struct work_struct *work) | |||
2294 | int i; | 2294 | int i; |
2295 | bool needreset = false; | 2295 | bool needreset = false; |
2296 | 2296 | ||
2297 | mutex_lock(&sc->lock); | ||
2298 | |||
2297 | for (i = 0; i < ARRAY_SIZE(sc->txqs); i++) { | 2299 | for (i = 0; i < ARRAY_SIZE(sc->txqs); i++) { |
2298 | if (sc->txqs[i].setup) { | 2300 | if (sc->txqs[i].setup) { |
2299 | txq = &sc->txqs[i]; | 2301 | txq = &sc->txqs[i]; |
@@ -2321,6 +2323,8 @@ ath5k_tx_complete_poll_work(struct work_struct *work) | |||
2321 | ath5k_reset(sc, NULL, true); | 2323 | ath5k_reset(sc, NULL, true); |
2322 | } | 2324 | } |
2323 | 2325 | ||
2326 | mutex_unlock(&sc->lock); | ||
2327 | |||
2324 | ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work, | 2328 | ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work, |
2325 | msecs_to_jiffies(ATH5K_TX_COMPLETE_POLL_INT)); | 2329 | msecs_to_jiffies(ATH5K_TX_COMPLETE_POLL_INT)); |
2326 | } | 2330 | } |
diff --git a/drivers/net/wireless/ath/ath5k/dma.c b/drivers/net/wireless/ath/ath5k/dma.c index 0064be7ce5c9..21091c26a9a5 100644 --- a/drivers/net/wireless/ath/ath5k/dma.c +++ b/drivers/net/wireless/ath/ath5k/dma.c | |||
@@ -838,9 +838,9 @@ int ath5k_hw_dma_stop(struct ath5k_hw *ah) | |||
838 | for (i = 0; i < qmax; i++) { | 838 | for (i = 0; i < qmax; i++) { |
839 | err = ath5k_hw_stop_tx_dma(ah, i); | 839 | err = ath5k_hw_stop_tx_dma(ah, i); |
840 | /* -EINVAL -> queue inactive */ | 840 | /* -EINVAL -> queue inactive */ |
841 | if (err != -EINVAL) | 841 | if (err && err != -EINVAL) |
842 | return err; | 842 | return err; |
843 | } | 843 | } |
844 | 844 | ||
845 | return err; | 845 | return 0; |
846 | } | 846 | } |
diff --git a/drivers/net/wireless/ath/ath5k/pcu.c b/drivers/net/wireless/ath/ath5k/pcu.c index e5f2b96a4c63..a702817daf72 100644 --- a/drivers/net/wireless/ath/ath5k/pcu.c +++ b/drivers/net/wireless/ath/ath5k/pcu.c | |||
@@ -86,7 +86,7 @@ int ath5k_hw_get_frame_duration(struct ath5k_hw *ah, | |||
86 | if (!ah->ah_bwmode) { | 86 | if (!ah->ah_bwmode) { |
87 | dur = ieee80211_generic_frame_duration(sc->hw, | 87 | dur = ieee80211_generic_frame_duration(sc->hw, |
88 | NULL, len, rate); | 88 | NULL, len, rate); |
89 | return dur; | 89 | return le16_to_cpu(dur); |
90 | } | 90 | } |
91 | 91 | ||
92 | bitrate = rate->bitrate; | 92 | bitrate = rate->bitrate; |
@@ -265,8 +265,6 @@ static inline void ath5k_hw_write_rate_duration(struct ath5k_hw *ah) | |||
265 | * what rate we should choose to TX ACKs. */ | 265 | * what rate we should choose to TX ACKs. */ |
266 | tx_time = ath5k_hw_get_frame_duration(ah, 10, rate); | 266 | tx_time = ath5k_hw_get_frame_duration(ah, 10, rate); |
267 | 267 | ||
268 | tx_time = le16_to_cpu(tx_time); | ||
269 | |||
270 | ath5k_hw_reg_write(ah, tx_time, reg); | 268 | ath5k_hw_reg_write(ah, tx_time, reg); |
271 | 269 | ||
272 | if (!(rate->flags & IEEE80211_RATE_SHORT_PREAMBLE)) | 270 | if (!(rate->flags & IEEE80211_RATE_SHORT_PREAMBLE)) |
diff --git a/drivers/net/wireless/ath/ath5k/phy.c b/drivers/net/wireless/ath/ath5k/phy.c index 78c26fdccad1..62ce2f4e8605 100644 --- a/drivers/net/wireless/ath/ath5k/phy.c +++ b/drivers/net/wireless/ath/ath5k/phy.c | |||
@@ -282,6 +282,34 @@ int ath5k_hw_phy_disable(struct ath5k_hw *ah) | |||
282 | return 0; | 282 | return 0; |
283 | } | 283 | } |
284 | 284 | ||
285 | /* | ||
286 | * Wait for synth to settle | ||
287 | */ | ||
288 | static void ath5k_hw_wait_for_synth(struct ath5k_hw *ah, | ||
289 | struct ieee80211_channel *channel) | ||
290 | { | ||
291 | /* | ||
292 | * On 5211+ read activation -> rx delay | ||
293 | * and use it (100ns steps). | ||
294 | */ | ||
295 | if (ah->ah_version != AR5K_AR5210) { | ||
296 | u32 delay; | ||
297 | delay = ath5k_hw_reg_read(ah, AR5K_PHY_RX_DELAY) & | ||
298 | AR5K_PHY_RX_DELAY_M; | ||
299 | delay = (channel->hw_value & CHANNEL_CCK) ? | ||
300 | ((delay << 2) / 22) : (delay / 10); | ||
301 | if (ah->ah_bwmode == AR5K_BWMODE_10MHZ) | ||
302 | delay = delay << 1; | ||
303 | if (ah->ah_bwmode == AR5K_BWMODE_5MHZ) | ||
304 | delay = delay << 2; | ||
305 | /* XXX: /2 on turbo ? Let's be safe | ||
306 | * for now */ | ||
307 | udelay(100 + delay); | ||
308 | } else { | ||
309 | mdelay(1); | ||
310 | } | ||
311 | } | ||
312 | |||
285 | 313 | ||
286 | /**********************\ | 314 | /**********************\ |
287 | * RF Gain optimization * | 315 | * RF Gain optimization * |
@@ -1253,6 +1281,7 @@ static int ath5k_hw_channel(struct ath5k_hw *ah, | |||
1253 | case AR5K_RF5111: | 1281 | case AR5K_RF5111: |
1254 | ret = ath5k_hw_rf5111_channel(ah, channel); | 1282 | ret = ath5k_hw_rf5111_channel(ah, channel); |
1255 | break; | 1283 | break; |
1284 | case AR5K_RF2317: | ||
1256 | case AR5K_RF2425: | 1285 | case AR5K_RF2425: |
1257 | ret = ath5k_hw_rf2425_channel(ah, channel); | 1286 | ret = ath5k_hw_rf2425_channel(ah, channel); |
1258 | break; | 1287 | break; |
@@ -3237,6 +3266,13 @@ int ath5k_hw_phy_init(struct ath5k_hw *ah, struct ieee80211_channel *channel, | |||
3237 | /* Failed */ | 3266 | /* Failed */ |
3238 | if (i >= 100) | 3267 | if (i >= 100) |
3239 | return -EIO; | 3268 | return -EIO; |
3269 | |||
3270 | /* Set channel and wait for synth */ | ||
3271 | ret = ath5k_hw_channel(ah, channel); | ||
3272 | if (ret) | ||
3273 | return ret; | ||
3274 | |||
3275 | ath5k_hw_wait_for_synth(ah, channel); | ||
3240 | } | 3276 | } |
3241 | 3277 | ||
3242 | /* | 3278 | /* |
@@ -3251,13 +3287,53 @@ int ath5k_hw_phy_init(struct ath5k_hw *ah, struct ieee80211_channel *channel, | |||
3251 | if (ret) | 3287 | if (ret) |
3252 | return ret; | 3288 | return ret; |
3253 | 3289 | ||
3290 | /* Write OFDM timings on 5212*/ | ||
3291 | if (ah->ah_version == AR5K_AR5212 && | ||
3292 | channel->hw_value & CHANNEL_OFDM) { | ||
3293 | |||
3294 | ret = ath5k_hw_write_ofdm_timings(ah, channel); | ||
3295 | if (ret) | ||
3296 | return ret; | ||
3297 | |||
3298 | /* Spur info is available only from EEPROM versions | ||
3299 | * greater than 5.3, but the EEPROM routines will use | ||
3300 | * static values for older versions */ | ||
3301 | if (ah->ah_mac_srev >= AR5K_SREV_AR5424) | ||
3302 | ath5k_hw_set_spur_mitigation_filter(ah, | ||
3303 | channel); | ||
3304 | } | ||
3305 | |||
3306 | /* If we used fast channel switching | ||
3307 | * we are done, release RF bus and | ||
3308 | * fire up NF calibration. | ||
3309 | * | ||
3310 | * Note: Only NF calibration due to | ||
3311 | * channel change, not AGC calibration | ||
3312 | * since AGC is still running ! | ||
3313 | */ | ||
3314 | if (fast) { | ||
3315 | /* | ||
3316 | * Release RF Bus grant | ||
3317 | */ | ||
3318 | AR5K_REG_DISABLE_BITS(ah, AR5K_PHY_RFBUS_REQ, | ||
3319 | AR5K_PHY_RFBUS_REQ_REQUEST); | ||
3320 | |||
3321 | /* | ||
3322 | * Start NF calibration | ||
3323 | */ | ||
3324 | AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_AGCCTL, | ||
3325 | AR5K_PHY_AGCCTL_NF); | ||
3326 | |||
3327 | return ret; | ||
3328 | } | ||
3329 | |||
3254 | /* | 3330 | /* |
3255 | * For 5210 we do all initialization using | 3331 | * For 5210 we do all initialization using |
3256 | * initvals, so we don't have to modify | 3332 | * initvals, so we don't have to modify |
3257 | * any settings (5210 also only supports | 3333 | * any settings (5210 also only supports |
3258 | * a/aturbo modes) | 3334 | * a/aturbo modes) |
3259 | */ | 3335 | */ |
3260 | if ((ah->ah_version != AR5K_AR5210) && !fast) { | 3336 | if (ah->ah_version != AR5K_AR5210) { |
3261 | 3337 | ||
3262 | /* | 3338 | /* |
3263 | * Write initial RF gain settings | 3339 | * Write initial RF gain settings |
@@ -3276,22 +3352,6 @@ int ath5k_hw_phy_init(struct ath5k_hw *ah, struct ieee80211_channel *channel, | |||
3276 | if (ret) | 3352 | if (ret) |
3277 | return ret; | 3353 | return ret; |
3278 | 3354 | ||
3279 | /* Write OFDM timings on 5212*/ | ||
3280 | if (ah->ah_version == AR5K_AR5212 && | ||
3281 | channel->hw_value & CHANNEL_OFDM) { | ||
3282 | |||
3283 | ret = ath5k_hw_write_ofdm_timings(ah, channel); | ||
3284 | if (ret) | ||
3285 | return ret; | ||
3286 | |||
3287 | /* Spur info is available only from EEPROM versions | ||
3288 | * greater than 5.3, but the EEPROM routines will use | ||
3289 | * static values for older versions */ | ||
3290 | if (ah->ah_mac_srev >= AR5K_SREV_AR5424) | ||
3291 | ath5k_hw_set_spur_mitigation_filter(ah, | ||
3292 | channel); | ||
3293 | } | ||
3294 | |||
3295 | /*Enable/disable 802.11b mode on 5111 | 3355 | /*Enable/disable 802.11b mode on 5111 |
3296 | (enable 2111 frequency converter + CCK)*/ | 3356 | (enable 2111 frequency converter + CCK)*/ |
3297 | if (ah->ah_radio == AR5K_RF5111) { | 3357 | if (ah->ah_radio == AR5K_RF5111) { |
@@ -3322,47 +3382,20 @@ int ath5k_hw_phy_init(struct ath5k_hw *ah, struct ieee80211_channel *channel, | |||
3322 | */ | 3382 | */ |
3323 | ath5k_hw_reg_write(ah, AR5K_PHY_ACT_ENABLE, AR5K_PHY_ACT); | 3383 | ath5k_hw_reg_write(ah, AR5K_PHY_ACT_ENABLE, AR5K_PHY_ACT); |
3324 | 3384 | ||
3385 | ath5k_hw_wait_for_synth(ah, channel); | ||
3386 | |||
3325 | /* | 3387 | /* |
3326 | * On 5211+ read activation -> rx delay | 3388 | * Perform ADC test to see if baseband is ready |
3327 | * and use it. | 3389 | * Set tx hold and check adc test register |
3328 | */ | 3390 | */ |
3329 | if (ah->ah_version != AR5K_AR5210) { | 3391 | phy_tst1 = ath5k_hw_reg_read(ah, AR5K_PHY_TST1); |
3330 | u32 delay; | 3392 | ath5k_hw_reg_write(ah, AR5K_PHY_TST1_TXHOLD, AR5K_PHY_TST1); |
3331 | delay = ath5k_hw_reg_read(ah, AR5K_PHY_RX_DELAY) & | 3393 | for (i = 0; i <= 20; i++) { |
3332 | AR5K_PHY_RX_DELAY_M; | 3394 | if (!(ath5k_hw_reg_read(ah, AR5K_PHY_ADC_TEST) & 0x10)) |
3333 | delay = (channel->hw_value & CHANNEL_CCK) ? | 3395 | break; |
3334 | ((delay << 2) / 22) : (delay / 10); | 3396 | udelay(200); |
3335 | if (ah->ah_bwmode == AR5K_BWMODE_10MHZ) | ||
3336 | delay = delay << 1; | ||
3337 | if (ah->ah_bwmode == AR5K_BWMODE_5MHZ) | ||
3338 | delay = delay << 2; | ||
3339 | /* XXX: /2 on turbo ? Let's be safe | ||
3340 | * for now */ | ||
3341 | udelay(100 + delay); | ||
3342 | } else { | ||
3343 | mdelay(1); | ||
3344 | } | ||
3345 | |||
3346 | if (fast) | ||
3347 | /* | ||
3348 | * Release RF Bus grant | ||
3349 | */ | ||
3350 | AR5K_REG_DISABLE_BITS(ah, AR5K_PHY_RFBUS_REQ, | ||
3351 | AR5K_PHY_RFBUS_REQ_REQUEST); | ||
3352 | else { | ||
3353 | /* | ||
3354 | * Perform ADC test to see if baseband is ready | ||
3355 | * Set tx hold and check adc test register | ||
3356 | */ | ||
3357 | phy_tst1 = ath5k_hw_reg_read(ah, AR5K_PHY_TST1); | ||
3358 | ath5k_hw_reg_write(ah, AR5K_PHY_TST1_TXHOLD, AR5K_PHY_TST1); | ||
3359 | for (i = 0; i <= 20; i++) { | ||
3360 | if (!(ath5k_hw_reg_read(ah, AR5K_PHY_ADC_TEST) & 0x10)) | ||
3361 | break; | ||
3362 | udelay(200); | ||
3363 | } | ||
3364 | ath5k_hw_reg_write(ah, phy_tst1, AR5K_PHY_TST1); | ||
3365 | } | 3397 | } |
3398 | ath5k_hw_reg_write(ah, phy_tst1, AR5K_PHY_TST1); | ||
3366 | 3399 | ||
3367 | /* | 3400 | /* |
3368 | * Start automatic gain control calibration | 3401 | * Start automatic gain control calibration |
diff --git a/drivers/net/wireless/ath/ath5k/reg.h b/drivers/net/wireless/ath/ath5k/reg.h index 7ad05d401ab5..fd14b9103951 100644 --- a/drivers/net/wireless/ath/ath5k/reg.h +++ b/drivers/net/wireless/ath/ath5k/reg.h | |||
@@ -1064,7 +1064,7 @@ | |||
1064 | /* | 1064 | /* |
1065 | * EEPROM command register | 1065 | * EEPROM command register |
1066 | */ | 1066 | */ |
1067 | #define AR5K_EEPROM_CMD 0x6008 /* Register Addres */ | 1067 | #define AR5K_EEPROM_CMD 0x6008 /* Register Address */ |
1068 | #define AR5K_EEPROM_CMD_READ 0x00000001 /* EEPROM read */ | 1068 | #define AR5K_EEPROM_CMD_READ 0x00000001 /* EEPROM read */ |
1069 | #define AR5K_EEPROM_CMD_WRITE 0x00000002 /* EEPROM write */ | 1069 | #define AR5K_EEPROM_CMD_WRITE 0x00000002 /* EEPROM write */ |
1070 | #define AR5K_EEPROM_CMD_RESET 0x00000004 /* EEPROM reset */ | 1070 | #define AR5K_EEPROM_CMD_RESET 0x00000004 /* EEPROM reset */ |
@@ -1084,7 +1084,7 @@ | |||
1084 | /* | 1084 | /* |
1085 | * EEPROM config register | 1085 | * EEPROM config register |
1086 | */ | 1086 | */ |
1087 | #define AR5K_EEPROM_CFG 0x6010 /* Register Addres */ | 1087 | #define AR5K_EEPROM_CFG 0x6010 /* Register Address */ |
1088 | #define AR5K_EEPROM_CFG_SIZE 0x00000003 /* Size determination override */ | 1088 | #define AR5K_EEPROM_CFG_SIZE 0x00000003 /* Size determination override */ |
1089 | #define AR5K_EEPROM_CFG_SIZE_AUTO 0 | 1089 | #define AR5K_EEPROM_CFG_SIZE_AUTO 0 |
1090 | #define AR5K_EEPROM_CFG_SIZE_4KBIT 1 | 1090 | #define AR5K_EEPROM_CFG_SIZE_4KBIT 1 |
@@ -1126,7 +1126,7 @@ | |||
1126 | * Second station id register (Upper 16 bits of MAC address + PCU settings) | 1126 | * Second station id register (Upper 16 bits of MAC address + PCU settings) |
1127 | */ | 1127 | */ |
1128 | #define AR5K_STA_ID1 0x8004 /* Register Address */ | 1128 | #define AR5K_STA_ID1 0x8004 /* Register Address */ |
1129 | #define AR5K_STA_ID1_ADDR_U16 0x0000ffff /* Upper 16 bits of MAC addres */ | 1129 | #define AR5K_STA_ID1_ADDR_U16 0x0000ffff /* Upper 16 bits of MAC address */ |
1130 | #define AR5K_STA_ID1_AP 0x00010000 /* Set AP mode */ | 1130 | #define AR5K_STA_ID1_AP 0x00010000 /* Set AP mode */ |
1131 | #define AR5K_STA_ID1_ADHOC 0x00020000 /* Set Ad-Hoc mode */ | 1131 | #define AR5K_STA_ID1_ADHOC 0x00020000 /* Set Ad-Hoc mode */ |
1132 | #define AR5K_STA_ID1_PWR_SV 0x00040000 /* Power save reporting */ | 1132 | #define AR5K_STA_ID1_PWR_SV 0x00040000 /* Power save reporting */ |
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_calib.c b/drivers/net/wireless/ath/ath9k/ar9002_calib.c index 01880aa13e36..5e300bd3d264 100644 --- a/drivers/net/wireless/ath/ath9k/ar9002_calib.c +++ b/drivers/net/wireless/ath/ath9k/ar9002_calib.c | |||
@@ -679,10 +679,6 @@ static bool ar9002_hw_calibrate(struct ath_hw *ah, | |||
679 | 679 | ||
680 | /* Do NF cal only at longer intervals */ | 680 | /* Do NF cal only at longer intervals */ |
681 | if (longcal || nfcal_pending) { | 681 | if (longcal || nfcal_pending) { |
682 | /* Do periodic PAOffset Cal */ | ||
683 | ar9002_hw_pa_cal(ah, false); | ||
684 | ar9002_hw_olc_temp_compensation(ah); | ||
685 | |||
686 | /* | 682 | /* |
687 | * Get the value from the previous NF cal and update | 683 | * Get the value from the previous NF cal and update |
688 | * history buffer. | 684 | * history buffer. |
@@ -697,8 +693,12 @@ static bool ar9002_hw_calibrate(struct ath_hw *ah, | |||
697 | ath9k_hw_loadnf(ah, ah->curchan); | 693 | ath9k_hw_loadnf(ah, ah->curchan); |
698 | } | 694 | } |
699 | 695 | ||
700 | if (longcal) | 696 | if (longcal) { |
701 | ath9k_hw_start_nfcal(ah, false); | 697 | ath9k_hw_start_nfcal(ah, false); |
698 | /* Do periodic PAOffset Cal */ | ||
699 | ar9002_hw_pa_cal(ah, false); | ||
700 | ar9002_hw_olc_temp_compensation(ah); | ||
701 | } | ||
702 | } | 702 | } |
703 | 703 | ||
704 | return iscaldone; | 704 | return iscaldone; |
@@ -954,6 +954,9 @@ static void ar9002_hw_init_cal_settings(struct ath_hw *ah) | |||
954 | &adc_dc_cal_multi_sample; | 954 | &adc_dc_cal_multi_sample; |
955 | } | 955 | } |
956 | ah->supp_cals = ADC_GAIN_CAL | ADC_DC_CAL | IQ_MISMATCH_CAL; | 956 | ah->supp_cals = ADC_GAIN_CAL | ADC_DC_CAL | IQ_MISMATCH_CAL; |
957 | |||
958 | if (AR_SREV_9287(ah)) | ||
959 | ah->supp_cals &= ~ADC_GAIN_CAL; | ||
957 | } | 960 | } |
958 | } | 961 | } |
959 | 962 | ||
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_hw.c b/drivers/net/wireless/ath/ath9k/ar9002_hw.c index f8a7771faee2..f44c84ab5dce 100644 --- a/drivers/net/wireless/ath/ath9k/ar9002_hw.c +++ b/drivers/net/wireless/ath/ath9k/ar9002_hw.c | |||
@@ -426,9 +426,8 @@ static void ar9002_hw_configpcipowersave(struct ath_hw *ah, | |||
426 | } | 426 | } |
427 | 427 | ||
428 | /* WAR for ASPM system hang */ | 428 | /* WAR for ASPM system hang */ |
429 | if (AR_SREV_9280(ah) || AR_SREV_9285(ah) || AR_SREV_9287(ah)) { | 429 | if (AR_SREV_9285(ah) || AR_SREV_9287(ah)) |
430 | val |= (AR_WA_BIT6 | AR_WA_BIT7); | 430 | val |= (AR_WA_BIT6 | AR_WA_BIT7); |
431 | } | ||
432 | 431 | ||
433 | if (AR_SREV_9285E_20(ah)) | 432 | if (AR_SREV_9285E_20(ah)) |
434 | val |= AR_WA_BIT23; | 433 | val |= AR_WA_BIT23; |
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h b/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h index 81f9cf294dec..9ecca93392e8 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h +++ b/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h | |||
@@ -1842,7 +1842,7 @@ static const u32 ar9300_2p2_soc_preamble[][2] = { | |||
1842 | 1842 | ||
1843 | static const u32 ar9300PciePhy_pll_on_clkreq_disable_L1_2p2[][2] = { | 1843 | static const u32 ar9300PciePhy_pll_on_clkreq_disable_L1_2p2[][2] = { |
1844 | /* Addr allmodes */ | 1844 | /* Addr allmodes */ |
1845 | {0x00004040, 0x08212e5e}, | 1845 | {0x00004040, 0x0821265e}, |
1846 | {0x00004040, 0x0008003b}, | 1846 | {0x00004040, 0x0008003b}, |
1847 | {0x00004044, 0x00000000}, | 1847 | {0x00004044, 0x00000000}, |
1848 | }; | 1848 | }; |
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_hw.c b/drivers/net/wireless/ath/ath9k/ar9003_hw.c index 6137634e46ca..06fb2c850535 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_hw.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_hw.c | |||
@@ -146,8 +146,8 @@ static void ar9003_hw_init_mode_regs(struct ath_hw *ah) | |||
146 | /* Sleep Setting */ | 146 | /* Sleep Setting */ |
147 | 147 | ||
148 | INIT_INI_ARRAY(&ah->iniPcieSerdesLowPower, | 148 | INIT_INI_ARRAY(&ah->iniPcieSerdesLowPower, |
149 | ar9300PciePhy_clkreq_enable_L1_2p2, | 149 | ar9300PciePhy_pll_on_clkreq_disable_L1_2p2, |
150 | ARRAY_SIZE(ar9300PciePhy_clkreq_enable_L1_2p2), | 150 | ARRAY_SIZE(ar9300PciePhy_pll_on_clkreq_disable_L1_2p2), |
151 | 2); | 151 | 2); |
152 | 152 | ||
153 | /* Fast clock modal settings */ | 153 | /* Fast clock modal settings */ |
diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h index 3681caf54282..1a7fa6ea4cf5 100644 --- a/drivers/net/wireless/ath/ath9k/ath9k.h +++ b/drivers/net/wireless/ath/ath9k/ath9k.h | |||
@@ -21,7 +21,6 @@ | |||
21 | #include <linux/device.h> | 21 | #include <linux/device.h> |
22 | #include <linux/leds.h> | 22 | #include <linux/leds.h> |
23 | #include <linux/completion.h> | 23 | #include <linux/completion.h> |
24 | #include <linux/pm_qos_params.h> | ||
25 | 24 | ||
26 | #include "debug.h" | 25 | #include "debug.h" |
27 | #include "common.h" | 26 | #include "common.h" |
@@ -57,8 +56,6 @@ struct ath_node; | |||
57 | 56 | ||
58 | #define A_MAX(a, b) ((a) > (b) ? (a) : (b)) | 57 | #define A_MAX(a, b) ((a) > (b) ? (a) : (b)) |
59 | 58 | ||
60 | #define ATH9K_PM_QOS_DEFAULT_VALUE 55 | ||
61 | |||
62 | #define TSF_TO_TU(_h,_l) \ | 59 | #define TSF_TO_TU(_h,_l) \ |
63 | ((((u32)(_h)) << 22) | (((u32)(_l)) >> 10)) | 60 | ((((u32)(_h)) << 22) | (((u32)(_l)) >> 10)) |
64 | 61 | ||
@@ -218,6 +215,7 @@ struct ath_frame_info { | |||
218 | struct ath_buf_state { | 215 | struct ath_buf_state { |
219 | u8 bf_type; | 216 | u8 bf_type; |
220 | u8 bfs_paprd; | 217 | u8 bfs_paprd; |
218 | unsigned long bfs_paprd_timestamp; | ||
221 | enum ath9k_internal_frame_type bfs_ftype; | 219 | enum ath9k_internal_frame_type bfs_ftype; |
222 | }; | 220 | }; |
223 | 221 | ||
@@ -593,7 +591,6 @@ struct ath_softc { | |||
593 | struct work_struct paprd_work; | 591 | struct work_struct paprd_work; |
594 | struct work_struct hw_check_work; | 592 | struct work_struct hw_check_work; |
595 | struct completion paprd_complete; | 593 | struct completion paprd_complete; |
596 | bool paprd_pending; | ||
597 | 594 | ||
598 | u32 intrstatus; | 595 | u32 intrstatus; |
599 | u32 sc_flags; /* SC_OP_* */ | 596 | u32 sc_flags; /* SC_OP_* */ |
@@ -633,8 +630,6 @@ struct ath_softc { | |||
633 | struct ath_descdma txsdma; | 630 | struct ath_descdma txsdma; |
634 | 631 | ||
635 | struct ath_ant_comb ant_comb; | 632 | struct ath_ant_comb ant_comb; |
636 | |||
637 | struct pm_qos_request_list pm_qos_req; | ||
638 | }; | 633 | }; |
639 | 634 | ||
640 | struct ath_wiphy { | 635 | struct ath_wiphy { |
@@ -666,7 +661,6 @@ static inline void ath_read_cachesize(struct ath_common *common, int *csz) | |||
666 | extern struct ieee80211_ops ath9k_ops; | 661 | extern struct ieee80211_ops ath9k_ops; |
667 | extern int ath9k_modparam_nohwcrypt; | 662 | extern int ath9k_modparam_nohwcrypt; |
668 | extern int led_blink; | 663 | extern int led_blink; |
669 | extern int ath9k_pm_qos_value; | ||
670 | extern bool is_ath9k_unloaded; | 664 | extern bool is_ath9k_unloaded; |
671 | 665 | ||
672 | irqreturn_t ath_isr(int irq, void *dev); | 666 | irqreturn_t ath_isr(int irq, void *dev); |
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_def.c b/drivers/net/wireless/ath/ath9k/eeprom_def.c index 088f141f2006..749a93608664 100644 --- a/drivers/net/wireless/ath/ath9k/eeprom_def.c +++ b/drivers/net/wireless/ath/ath9k/eeprom_def.c | |||
@@ -226,6 +226,10 @@ static int ath9k_hw_def_check_eeprom(struct ath_hw *ah) | |||
226 | eep->baseEepHeader.pwdclkind == 0) | 226 | eep->baseEepHeader.pwdclkind == 0) |
227 | ah->need_an_top2_fixup = 1; | 227 | ah->need_an_top2_fixup = 1; |
228 | 228 | ||
229 | if ((common->bus_ops->ath_bus_type == ATH_USB) && | ||
230 | (AR_SREV_9280(ah))) | ||
231 | eep->modalHeader[0].xpaBiasLvl = 0; | ||
232 | |||
229 | return 0; | 233 | return 0; |
230 | } | 234 | } |
231 | 235 | ||
diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c index 5ab3084eb9cb..07b1633b7f3f 100644 --- a/drivers/net/wireless/ath/ath9k/hif_usb.c +++ b/drivers/net/wireless/ath/ath9k/hif_usb.c | |||
@@ -219,8 +219,9 @@ static int __hif_usb_tx(struct hif_device_usb *hif_dev) | |||
219 | struct tx_buf *tx_buf = NULL; | 219 | struct tx_buf *tx_buf = NULL; |
220 | struct sk_buff *nskb = NULL; | 220 | struct sk_buff *nskb = NULL; |
221 | int ret = 0, i; | 221 | int ret = 0, i; |
222 | u16 *hdr, tx_skb_cnt = 0; | 222 | u16 tx_skb_cnt = 0; |
223 | u8 *buf; | 223 | u8 *buf; |
224 | __le16 *hdr; | ||
224 | 225 | ||
225 | if (hif_dev->tx.tx_skb_cnt == 0) | 226 | if (hif_dev->tx.tx_skb_cnt == 0) |
226 | return 0; | 227 | return 0; |
@@ -245,9 +246,9 @@ static int __hif_usb_tx(struct hif_device_usb *hif_dev) | |||
245 | 246 | ||
246 | buf = tx_buf->buf; | 247 | buf = tx_buf->buf; |
247 | buf += tx_buf->offset; | 248 | buf += tx_buf->offset; |
248 | hdr = (u16 *)buf; | 249 | hdr = (__le16 *)buf; |
249 | *hdr++ = nskb->len; | 250 | *hdr++ = cpu_to_le16(nskb->len); |
250 | *hdr++ = ATH_USB_TX_STREAM_MODE_TAG; | 251 | *hdr++ = cpu_to_le16(ATH_USB_TX_STREAM_MODE_TAG); |
251 | buf += 4; | 252 | buf += 4; |
252 | memcpy(buf, nskb->data, nskb->len); | 253 | memcpy(buf, nskb->data, nskb->len); |
253 | tx_buf->len = nskb->len + 4; | 254 | tx_buf->len = nskb->len + 4; |
diff --git a/drivers/net/wireless/ath/ath9k/htc.h b/drivers/net/wireless/ath/ath9k/htc.h index a099b3e87ed3..780ac5eac501 100644 --- a/drivers/net/wireless/ath/ath9k/htc.h +++ b/drivers/net/wireless/ath/ath9k/htc.h | |||
@@ -78,7 +78,7 @@ struct tx_frame_hdr { | |||
78 | u8 node_idx; | 78 | u8 node_idx; |
79 | u8 vif_idx; | 79 | u8 vif_idx; |
80 | u8 tidno; | 80 | u8 tidno; |
81 | u32 flags; /* ATH9K_HTC_TX_* */ | 81 | __be32 flags; /* ATH9K_HTC_TX_* */ |
82 | u8 key_type; | 82 | u8 key_type; |
83 | u8 keyix; | 83 | u8 keyix; |
84 | u8 reserved[26]; | 84 | u8 reserved[26]; |
@@ -433,6 +433,7 @@ void ath9k_htc_txep(void *priv, struct sk_buff *skb, enum htc_endpoint_id ep_id, | |||
433 | void ath9k_htc_beaconep(void *drv_priv, struct sk_buff *skb, | 433 | void ath9k_htc_beaconep(void *drv_priv, struct sk_buff *skb, |
434 | enum htc_endpoint_id ep_id, bool txok); | 434 | enum htc_endpoint_id ep_id, bool txok); |
435 | 435 | ||
436 | int ath9k_htc_update_cap_target(struct ath9k_htc_priv *priv); | ||
436 | void ath9k_htc_station_work(struct work_struct *work); | 437 | void ath9k_htc_station_work(struct work_struct *work); |
437 | void ath9k_htc_aggr_work(struct work_struct *work); | 438 | void ath9k_htc_aggr_work(struct work_struct *work); |
438 | void ath9k_ani_work(struct work_struct *work);; | 439 | void ath9k_ani_work(struct work_struct *work);; |
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c index 38433f9bfe59..0352f0994caa 100644 --- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c +++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c | |||
@@ -142,9 +142,6 @@ static void ath9k_deinit_priv(struct ath9k_htc_priv *priv) | |||
142 | { | 142 | { |
143 | ath9k_htc_exit_debug(priv->ah); | 143 | ath9k_htc_exit_debug(priv->ah); |
144 | ath9k_hw_deinit(priv->ah); | 144 | ath9k_hw_deinit(priv->ah); |
145 | tasklet_kill(&priv->swba_tasklet); | ||
146 | tasklet_kill(&priv->rx_tasklet); | ||
147 | tasklet_kill(&priv->tx_tasklet); | ||
148 | kfree(priv->ah); | 145 | kfree(priv->ah); |
149 | priv->ah = NULL; | 146 | priv->ah = NULL; |
150 | } | 147 | } |
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c index 845b4c938d16..6bb59958f71e 100644 --- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c +++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c | |||
@@ -301,6 +301,16 @@ static int ath9k_htc_add_monitor_interface(struct ath9k_htc_priv *priv) | |||
301 | 301 | ||
302 | priv->nstations++; | 302 | priv->nstations++; |
303 | 303 | ||
304 | /* | ||
305 | * Set chainmask etc. on the target. | ||
306 | */ | ||
307 | ret = ath9k_htc_update_cap_target(priv); | ||
308 | if (ret) | ||
309 | ath_dbg(common, ATH_DBG_CONFIG, | ||
310 | "Failed to update capability in target\n"); | ||
311 | |||
312 | priv->ah->is_monitoring = true; | ||
313 | |||
304 | return 0; | 314 | return 0; |
305 | 315 | ||
306 | err_vif: | 316 | err_vif: |
@@ -328,6 +338,7 @@ static int ath9k_htc_remove_monitor_interface(struct ath9k_htc_priv *priv) | |||
328 | } | 338 | } |
329 | 339 | ||
330 | priv->nstations--; | 340 | priv->nstations--; |
341 | priv->ah->is_monitoring = false; | ||
331 | 342 | ||
332 | return 0; | 343 | return 0; |
333 | } | 344 | } |
@@ -419,7 +430,7 @@ static int ath9k_htc_remove_station(struct ath9k_htc_priv *priv, | |||
419 | return 0; | 430 | return 0; |
420 | } | 431 | } |
421 | 432 | ||
422 | static int ath9k_htc_update_cap_target(struct ath9k_htc_priv *priv) | 433 | int ath9k_htc_update_cap_target(struct ath9k_htc_priv *priv) |
423 | { | 434 | { |
424 | struct ath9k_htc_cap_target tcap; | 435 | struct ath9k_htc_cap_target tcap; |
425 | int ret; | 436 | int ret; |
@@ -1014,12 +1025,6 @@ static void ath9k_htc_stop(struct ieee80211_hw *hw) | |||
1014 | int ret = 0; | 1025 | int ret = 0; |
1015 | u8 cmd_rsp; | 1026 | u8 cmd_rsp; |
1016 | 1027 | ||
1017 | /* Cancel all the running timers/work .. */ | ||
1018 | cancel_work_sync(&priv->fatal_work); | ||
1019 | cancel_work_sync(&priv->ps_work); | ||
1020 | cancel_delayed_work_sync(&priv->ath9k_led_blink_work); | ||
1021 | ath9k_led_stop_brightness(priv); | ||
1022 | |||
1023 | mutex_lock(&priv->mutex); | 1028 | mutex_lock(&priv->mutex); |
1024 | 1029 | ||
1025 | if (priv->op_flags & OP_INVALID) { | 1030 | if (priv->op_flags & OP_INVALID) { |
@@ -1033,8 +1038,23 @@ static void ath9k_htc_stop(struct ieee80211_hw *hw) | |||
1033 | WMI_CMD(WMI_DISABLE_INTR_CMDID); | 1038 | WMI_CMD(WMI_DISABLE_INTR_CMDID); |
1034 | WMI_CMD(WMI_DRAIN_TXQ_ALL_CMDID); | 1039 | WMI_CMD(WMI_DRAIN_TXQ_ALL_CMDID); |
1035 | WMI_CMD(WMI_STOP_RECV_CMDID); | 1040 | WMI_CMD(WMI_STOP_RECV_CMDID); |
1041 | |||
1042 | tasklet_kill(&priv->swba_tasklet); | ||
1043 | tasklet_kill(&priv->rx_tasklet); | ||
1044 | tasklet_kill(&priv->tx_tasklet); | ||
1045 | |||
1036 | skb_queue_purge(&priv->tx_queue); | 1046 | skb_queue_purge(&priv->tx_queue); |
1037 | 1047 | ||
1048 | mutex_unlock(&priv->mutex); | ||
1049 | |||
1050 | /* Cancel all the running timers/work .. */ | ||
1051 | cancel_work_sync(&priv->fatal_work); | ||
1052 | cancel_work_sync(&priv->ps_work); | ||
1053 | cancel_delayed_work_sync(&priv->ath9k_led_blink_work); | ||
1054 | ath9k_led_stop_brightness(priv); | ||
1055 | |||
1056 | mutex_lock(&priv->mutex); | ||
1057 | |||
1038 | /* Remove monitor interface here */ | 1058 | /* Remove monitor interface here */ |
1039 | if (ah->opmode == NL80211_IFTYPE_MONITOR) { | 1059 | if (ah->opmode == NL80211_IFTYPE_MONITOR) { |
1040 | if (ath9k_htc_remove_monitor_interface(priv)) | 1060 | if (ath9k_htc_remove_monitor_interface(priv)) |
@@ -1186,6 +1206,20 @@ static int ath9k_htc_config(struct ieee80211_hw *hw, u32 changed) | |||
1186 | } | 1206 | } |
1187 | } | 1207 | } |
1188 | 1208 | ||
1209 | /* | ||
1210 | * Monitor interface should be added before | ||
1211 | * IEEE80211_CONF_CHANGE_CHANNEL is handled. | ||
1212 | */ | ||
1213 | if (changed & IEEE80211_CONF_CHANGE_MONITOR) { | ||
1214 | if (conf->flags & IEEE80211_CONF_MONITOR) { | ||
1215 | if (ath9k_htc_add_monitor_interface(priv)) | ||
1216 | ath_err(common, "Failed to set monitor mode\n"); | ||
1217 | else | ||
1218 | ath_dbg(common, ATH_DBG_CONFIG, | ||
1219 | "HW opmode set to Monitor mode\n"); | ||
1220 | } | ||
1221 | } | ||
1222 | |||
1189 | if (changed & IEEE80211_CONF_CHANGE_CHANNEL) { | 1223 | if (changed & IEEE80211_CONF_CHANGE_CHANNEL) { |
1190 | struct ieee80211_channel *curchan = hw->conf.channel; | 1224 | struct ieee80211_channel *curchan = hw->conf.channel; |
1191 | int pos = curchan->hw_value; | 1225 | int pos = curchan->hw_value; |
@@ -1221,16 +1255,6 @@ static int ath9k_htc_config(struct ieee80211_hw *hw, u32 changed) | |||
1221 | ath_update_txpow(priv); | 1255 | ath_update_txpow(priv); |
1222 | } | 1256 | } |
1223 | 1257 | ||
1224 | if (changed & IEEE80211_CONF_CHANGE_MONITOR) { | ||
1225 | if (conf->flags & IEEE80211_CONF_MONITOR) { | ||
1226 | if (ath9k_htc_add_monitor_interface(priv)) | ||
1227 | ath_err(common, "Failed to set monitor mode\n"); | ||
1228 | else | ||
1229 | ath_dbg(common, ATH_DBG_CONFIG, | ||
1230 | "HW opmode set to Monitor mode\n"); | ||
1231 | } | ||
1232 | } | ||
1233 | |||
1234 | if (changed & IEEE80211_CONF_CHANGE_IDLE) { | 1258 | if (changed & IEEE80211_CONF_CHANGE_IDLE) { |
1235 | mutex_lock(&priv->htc_pm_lock); | 1259 | mutex_lock(&priv->htc_pm_lock); |
1236 | if (!priv->ps_idle) { | 1260 | if (!priv->ps_idle) { |
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c index 33f36029fa4f..7a5ffca21958 100644 --- a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c +++ b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c | |||
@@ -113,6 +113,7 @@ int ath9k_htc_tx_start(struct ath9k_htc_priv *priv, struct sk_buff *skb) | |||
113 | 113 | ||
114 | if (ieee80211_is_data(fc)) { | 114 | if (ieee80211_is_data(fc)) { |
115 | struct tx_frame_hdr tx_hdr; | 115 | struct tx_frame_hdr tx_hdr; |
116 | u32 flags = 0; | ||
116 | u8 *qc; | 117 | u8 *qc; |
117 | 118 | ||
118 | memset(&tx_hdr, 0, sizeof(struct tx_frame_hdr)); | 119 | memset(&tx_hdr, 0, sizeof(struct tx_frame_hdr)); |
@@ -136,13 +137,14 @@ int ath9k_htc_tx_start(struct ath9k_htc_priv *priv, struct sk_buff *skb) | |||
136 | /* Check for RTS protection */ | 137 | /* Check for RTS protection */ |
137 | if (priv->hw->wiphy->rts_threshold != (u32) -1) | 138 | if (priv->hw->wiphy->rts_threshold != (u32) -1) |
138 | if (skb->len > priv->hw->wiphy->rts_threshold) | 139 | if (skb->len > priv->hw->wiphy->rts_threshold) |
139 | tx_hdr.flags |= ATH9K_HTC_TX_RTSCTS; | 140 | flags |= ATH9K_HTC_TX_RTSCTS; |
140 | 141 | ||
141 | /* CTS-to-self */ | 142 | /* CTS-to-self */ |
142 | if (!(tx_hdr.flags & ATH9K_HTC_TX_RTSCTS) && | 143 | if (!(flags & ATH9K_HTC_TX_RTSCTS) && |
143 | (priv->op_flags & OP_PROTECT_ENABLE)) | 144 | (priv->op_flags & OP_PROTECT_ENABLE)) |
144 | tx_hdr.flags |= ATH9K_HTC_TX_CTSONLY; | 145 | flags |= ATH9K_HTC_TX_CTSONLY; |
145 | 146 | ||
147 | tx_hdr.flags = cpu_to_be32(flags); | ||
146 | tx_hdr.key_type = ath9k_cmn_get_hw_crypto_keytype(skb); | 148 | tx_hdr.key_type = ath9k_cmn_get_hw_crypto_keytype(skb); |
147 | if (tx_hdr.key_type == ATH9K_KEY_TYPE_CLEAR) | 149 | if (tx_hdr.key_type == ATH9K_KEY_TYPE_CLEAR) |
148 | tx_hdr.keyix = (u8) ATH9K_TXKEYIX_INVALID; | 150 | tx_hdr.keyix = (u8) ATH9K_TXKEYIX_INVALID; |
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c index fde978665e07..9f01e50d5cda 100644 --- a/drivers/net/wireless/ath/ath9k/hw.c +++ b/drivers/net/wireless/ath/ath9k/hw.c | |||
@@ -369,6 +369,9 @@ static void ath9k_hw_init_config(struct ath_hw *ah) | |||
369 | else | 369 | else |
370 | ah->config.ht_enable = 0; | 370 | ah->config.ht_enable = 0; |
371 | 371 | ||
372 | /* PAPRD needs some more work to be enabled */ | ||
373 | ah->config.paprd_disable = 1; | ||
374 | |||
372 | ah->config.rx_intr_mitigation = true; | 375 | ah->config.rx_intr_mitigation = true; |
373 | ah->config.pcieSerDesWrite = true; | 376 | ah->config.pcieSerDesWrite = true; |
374 | 377 | ||
@@ -436,9 +439,10 @@ static int ath9k_hw_init_macaddr(struct ath_hw *ah) | |||
436 | 439 | ||
437 | static int ath9k_hw_post_init(struct ath_hw *ah) | 440 | static int ath9k_hw_post_init(struct ath_hw *ah) |
438 | { | 441 | { |
442 | struct ath_common *common = ath9k_hw_common(ah); | ||
439 | int ecode; | 443 | int ecode; |
440 | 444 | ||
441 | if (!AR_SREV_9271(ah)) { | 445 | if (common->bus_ops->ath_bus_type != ATH_USB) { |
442 | if (!ath9k_hw_chip_test(ah)) | 446 | if (!ath9k_hw_chip_test(ah)) |
443 | return -ENODEV; | 447 | return -ENODEV; |
444 | } | 448 | } |
@@ -1213,7 +1217,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan, | |||
1213 | ah->txchainmask = common->tx_chainmask; | 1217 | ah->txchainmask = common->tx_chainmask; |
1214 | ah->rxchainmask = common->rx_chainmask; | 1218 | ah->rxchainmask = common->rx_chainmask; |
1215 | 1219 | ||
1216 | if (!ah->chip_fullsleep) { | 1220 | if ((common->bus_ops->ath_bus_type != ATH_USB) && !ah->chip_fullsleep) { |
1217 | ath9k_hw_abortpcurecv(ah); | 1221 | ath9k_hw_abortpcurecv(ah); |
1218 | if (!ath9k_hw_stopdmarecv(ah)) { | 1222 | if (!ath9k_hw_stopdmarecv(ah)) { |
1219 | ath_dbg(common, ATH_DBG_XMIT, | 1223 | ath_dbg(common, ATH_DBG_XMIT, |
@@ -1932,7 +1936,8 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah) | |||
1932 | pCap->rx_status_len = sizeof(struct ar9003_rxs); | 1936 | pCap->rx_status_len = sizeof(struct ar9003_rxs); |
1933 | pCap->tx_desc_len = sizeof(struct ar9003_txc); | 1937 | pCap->tx_desc_len = sizeof(struct ar9003_txc); |
1934 | pCap->txs_len = sizeof(struct ar9003_txs); | 1938 | pCap->txs_len = sizeof(struct ar9003_txs); |
1935 | if (ah->eep_ops->get_eeprom(ah, EEP_PAPRD)) | 1939 | if (!ah->config.paprd_disable && |
1940 | ah->eep_ops->get_eeprom(ah, EEP_PAPRD)) | ||
1936 | pCap->hw_caps |= ATH9K_HW_CAP_PAPRD; | 1941 | pCap->hw_caps |= ATH9K_HW_CAP_PAPRD; |
1937 | } else { | 1942 | } else { |
1938 | pCap->tx_desc_len = sizeof(struct ath_desc); | 1943 | pCap->tx_desc_len = sizeof(struct ath_desc); |
diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h index 5a3dfec45e96..ea9fde670646 100644 --- a/drivers/net/wireless/ath/ath9k/hw.h +++ b/drivers/net/wireless/ath/ath9k/hw.h | |||
@@ -225,6 +225,7 @@ struct ath9k_ops_config { | |||
225 | u32 pcie_waen; | 225 | u32 pcie_waen; |
226 | u8 analog_shiftreg; | 226 | u8 analog_shiftreg; |
227 | u8 ht_enable; | 227 | u8 ht_enable; |
228 | u8 paprd_disable; | ||
228 | u32 ofdm_trig_low; | 229 | u32 ofdm_trig_low; |
229 | u32 ofdm_trig_high; | 230 | u32 ofdm_trig_high; |
230 | u32 cck_trig_high; | 231 | u32 cck_trig_high; |
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c index 767d8b86f1e1..a033d01bf8a0 100644 --- a/drivers/net/wireless/ath/ath9k/init.c +++ b/drivers/net/wireless/ath/ath9k/init.c | |||
@@ -41,10 +41,6 @@ static int ath9k_btcoex_enable; | |||
41 | module_param_named(btcoex_enable, ath9k_btcoex_enable, int, 0444); | 41 | module_param_named(btcoex_enable, ath9k_btcoex_enable, int, 0444); |
42 | MODULE_PARM_DESC(btcoex_enable, "Enable wifi-BT coexistence"); | 42 | MODULE_PARM_DESC(btcoex_enable, "Enable wifi-BT coexistence"); |
43 | 43 | ||
44 | int ath9k_pm_qos_value = ATH9K_PM_QOS_DEFAULT_VALUE; | ||
45 | module_param_named(pmqos, ath9k_pm_qos_value, int, S_IRUSR | S_IRGRP | S_IROTH); | ||
46 | MODULE_PARM_DESC(pmqos, "User specified PM-QOS value"); | ||
47 | |||
48 | bool is_ath9k_unloaded; | 44 | bool is_ath9k_unloaded; |
49 | /* We use the hw_value as an index into our private channel structure */ | 45 | /* We use the hw_value as an index into our private channel structure */ |
50 | 46 | ||
@@ -598,8 +594,6 @@ err_btcoex: | |||
598 | err_queues: | 594 | err_queues: |
599 | ath9k_hw_deinit(ah); | 595 | ath9k_hw_deinit(ah); |
600 | err_hw: | 596 | err_hw: |
601 | tasklet_kill(&sc->intr_tq); | ||
602 | tasklet_kill(&sc->bcon_tasklet); | ||
603 | 597 | ||
604 | kfree(ah); | 598 | kfree(ah); |
605 | sc->sc_ah = NULL; | 599 | sc->sc_ah = NULL; |
@@ -764,9 +758,6 @@ int ath9k_init_device(u16 devid, struct ath_softc *sc, u16 subsysid, | |||
764 | ath_init_leds(sc); | 758 | ath_init_leds(sc); |
765 | ath_start_rfkill_poll(sc); | 759 | ath_start_rfkill_poll(sc); |
766 | 760 | ||
767 | pm_qos_add_request(&sc->pm_qos_req, PM_QOS_CPU_DMA_LATENCY, | ||
768 | PM_QOS_DEFAULT_VALUE); | ||
769 | |||
770 | return 0; | 761 | return 0; |
771 | 762 | ||
772 | error_world: | 763 | error_world: |
@@ -807,9 +798,6 @@ static void ath9k_deinit_softc(struct ath_softc *sc) | |||
807 | 798 | ||
808 | ath9k_hw_deinit(sc->sc_ah); | 799 | ath9k_hw_deinit(sc->sc_ah); |
809 | 800 | ||
810 | tasklet_kill(&sc->intr_tq); | ||
811 | tasklet_kill(&sc->bcon_tasklet); | ||
812 | |||
813 | kfree(sc->sc_ah); | 801 | kfree(sc->sc_ah); |
814 | sc->sc_ah = NULL; | 802 | sc->sc_ah = NULL; |
815 | } | 803 | } |
@@ -824,6 +812,8 @@ void ath9k_deinit_device(struct ath_softc *sc) | |||
824 | wiphy_rfkill_stop_polling(sc->hw->wiphy); | 812 | wiphy_rfkill_stop_polling(sc->hw->wiphy); |
825 | ath_deinit_leds(sc); | 813 | ath_deinit_leds(sc); |
826 | 814 | ||
815 | ath9k_ps_restore(sc); | ||
816 | |||
827 | for (i = 0; i < sc->num_sec_wiphy; i++) { | 817 | for (i = 0; i < sc->num_sec_wiphy; i++) { |
828 | struct ath_wiphy *aphy = sc->sec_wiphy[i]; | 818 | struct ath_wiphy *aphy = sc->sec_wiphy[i]; |
829 | if (aphy == NULL) | 819 | if (aphy == NULL) |
@@ -834,7 +824,6 @@ void ath9k_deinit_device(struct ath_softc *sc) | |||
834 | } | 824 | } |
835 | 825 | ||
836 | ieee80211_unregister_hw(hw); | 826 | ieee80211_unregister_hw(hw); |
837 | pm_qos_remove_request(&sc->pm_qos_req); | ||
838 | ath_rx_cleanup(sc); | 827 | ath_rx_cleanup(sc); |
839 | ath_tx_cleanup(sc); | 828 | ath_tx_cleanup(sc); |
840 | ath9k_deinit_softc(sc); | 829 | ath9k_deinit_softc(sc); |
diff --git a/drivers/net/wireless/ath/ath9k/mac.c b/drivers/net/wireless/ath/ath9k/mac.c index 180170d3ce25..2915b11edefb 100644 --- a/drivers/net/wireless/ath/ath9k/mac.c +++ b/drivers/net/wireless/ath/ath9k/mac.c | |||
@@ -885,7 +885,7 @@ void ath9k_hw_set_interrupts(struct ath_hw *ah, enum ath9k_int ints) | |||
885 | struct ath_common *common = ath9k_hw_common(ah); | 885 | struct ath_common *common = ath9k_hw_common(ah); |
886 | 886 | ||
887 | if (!(ints & ATH9K_INT_GLOBAL)) | 887 | if (!(ints & ATH9K_INT_GLOBAL)) |
888 | ath9k_hw_enable_interrupts(ah); | 888 | ath9k_hw_disable_interrupts(ah); |
889 | 889 | ||
890 | ath_dbg(common, ATH_DBG_INTERRUPT, "0x%x => 0x%x\n", omask, ints); | 890 | ath_dbg(common, ATH_DBG_INTERRUPT, "0x%x => 0x%x\n", omask, ints); |
891 | 891 | ||
@@ -963,7 +963,8 @@ void ath9k_hw_set_interrupts(struct ath_hw *ah, enum ath9k_int ints) | |||
963 | REG_CLR_BIT(ah, AR_IMR_S5, AR_IMR_S5_TIM_TIMER); | 963 | REG_CLR_BIT(ah, AR_IMR_S5, AR_IMR_S5_TIM_TIMER); |
964 | } | 964 | } |
965 | 965 | ||
966 | ath9k_hw_enable_interrupts(ah); | 966 | if (ints & ATH9K_INT_GLOBAL) |
967 | ath9k_hw_enable_interrupts(ah); | ||
967 | 968 | ||
968 | return; | 969 | return; |
969 | } | 970 | } |
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c index f90a6ca94a76..a09d15f7aa6e 100644 --- a/drivers/net/wireless/ath/ath9k/main.c +++ b/drivers/net/wireless/ath/ath9k/main.c | |||
@@ -325,6 +325,8 @@ static bool ath_paprd_send_frame(struct ath_softc *sc, struct sk_buff *skb, int | |||
325 | { | 325 | { |
326 | struct ieee80211_hw *hw = sc->hw; | 326 | struct ieee80211_hw *hw = sc->hw; |
327 | struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); | 327 | struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); |
328 | struct ath_hw *ah = sc->sc_ah; | ||
329 | struct ath_common *common = ath9k_hw_common(ah); | ||
328 | struct ath_tx_control txctl; | 330 | struct ath_tx_control txctl; |
329 | int time_left; | 331 | int time_left; |
330 | 332 | ||
@@ -340,14 +342,16 @@ static bool ath_paprd_send_frame(struct ath_softc *sc, struct sk_buff *skb, int | |||
340 | tx_info->control.rates[1].idx = -1; | 342 | tx_info->control.rates[1].idx = -1; |
341 | 343 | ||
342 | init_completion(&sc->paprd_complete); | 344 | init_completion(&sc->paprd_complete); |
343 | sc->paprd_pending = true; | ||
344 | txctl.paprd = BIT(chain); | 345 | txctl.paprd = BIT(chain); |
345 | if (ath_tx_start(hw, skb, &txctl) != 0) | 346 | |
347 | if (ath_tx_start(hw, skb, &txctl) != 0) { | ||
348 | ath_dbg(common, ATH_DBG_XMIT, "PAPRD TX failed\n"); | ||
349 | dev_kfree_skb_any(skb); | ||
346 | return false; | 350 | return false; |
351 | } | ||
347 | 352 | ||
348 | time_left = wait_for_completion_timeout(&sc->paprd_complete, | 353 | time_left = wait_for_completion_timeout(&sc->paprd_complete, |
349 | msecs_to_jiffies(ATH_PAPRD_TIMEOUT)); | 354 | msecs_to_jiffies(ATH_PAPRD_TIMEOUT)); |
350 | sc->paprd_pending = false; | ||
351 | 355 | ||
352 | if (!time_left) | 356 | if (!time_left) |
353 | ath_dbg(ath9k_hw_common(sc->sc_ah), ATH_DBG_CALIBRATE, | 357 | ath_dbg(ath9k_hw_common(sc->sc_ah), ATH_DBG_CALIBRATE, |
@@ -592,14 +596,12 @@ void ath9k_tasklet(unsigned long data) | |||
592 | u32 status = sc->intrstatus; | 596 | u32 status = sc->intrstatus; |
593 | u32 rxmask; | 597 | u32 rxmask; |
594 | 598 | ||
595 | ath9k_ps_wakeup(sc); | ||
596 | |||
597 | if (status & ATH9K_INT_FATAL) { | 599 | if (status & ATH9K_INT_FATAL) { |
598 | ath_reset(sc, true); | 600 | ath_reset(sc, true); |
599 | ath9k_ps_restore(sc); | ||
600 | return; | 601 | return; |
601 | } | 602 | } |
602 | 603 | ||
604 | ath9k_ps_wakeup(sc); | ||
603 | spin_lock(&sc->sc_pcu_lock); | 605 | spin_lock(&sc->sc_pcu_lock); |
604 | 606 | ||
605 | if (!ath9k_hw_check_alive(ah)) | 607 | if (!ath9k_hw_check_alive(ah)) |
@@ -955,8 +957,6 @@ void ath_radio_disable(struct ath_softc *sc, struct ieee80211_hw *hw) | |||
955 | 957 | ||
956 | spin_unlock_bh(&sc->sc_pcu_lock); | 958 | spin_unlock_bh(&sc->sc_pcu_lock); |
957 | ath9k_ps_restore(sc); | 959 | ath9k_ps_restore(sc); |
958 | |||
959 | ath9k_setpower(sc, ATH9K_PM_FULL_SLEEP); | ||
960 | } | 960 | } |
961 | 961 | ||
962 | int ath_reset(struct ath_softc *sc, bool retry_tx) | 962 | int ath_reset(struct ath_softc *sc, bool retry_tx) |
@@ -969,6 +969,7 @@ int ath_reset(struct ath_softc *sc, bool retry_tx) | |||
969 | /* Stop ANI */ | 969 | /* Stop ANI */ |
970 | del_timer_sync(&common->ani.timer); | 970 | del_timer_sync(&common->ani.timer); |
971 | 971 | ||
972 | ath9k_ps_wakeup(sc); | ||
972 | spin_lock_bh(&sc->sc_pcu_lock); | 973 | spin_lock_bh(&sc->sc_pcu_lock); |
973 | 974 | ||
974 | ieee80211_stop_queues(hw); | 975 | ieee80211_stop_queues(hw); |
@@ -1015,6 +1016,7 @@ int ath_reset(struct ath_softc *sc, bool retry_tx) | |||
1015 | 1016 | ||
1016 | /* Start ANI */ | 1017 | /* Start ANI */ |
1017 | ath_start_ani(common); | 1018 | ath_start_ani(common); |
1019 | ath9k_ps_restore(sc); | ||
1018 | 1020 | ||
1019 | return r; | 1021 | return r; |
1020 | } | 1022 | } |
@@ -1171,12 +1173,6 @@ static int ath9k_start(struct ieee80211_hw *hw) | |||
1171 | ath9k_btcoex_timer_resume(sc); | 1173 | ath9k_btcoex_timer_resume(sc); |
1172 | } | 1174 | } |
1173 | 1175 | ||
1174 | /* User has the option to provide pm-qos value as a module | ||
1175 | * parameter rather than using the default value of | ||
1176 | * 'ATH9K_PM_QOS_DEFAULT_VALUE'. | ||
1177 | */ | ||
1178 | pm_qos_update_request(&sc->pm_qos_req, ath9k_pm_qos_value); | ||
1179 | |||
1180 | if (ah->caps.pcie_lcr_extsync_en && common->bus_ops->extn_synch_en) | 1176 | if (ah->caps.pcie_lcr_extsync_en && common->bus_ops->extn_synch_en) |
1181 | common->bus_ops->extn_synch_en(common); | 1177 | common->bus_ops->extn_synch_en(common); |
1182 | 1178 | ||
@@ -1309,6 +1305,9 @@ static void ath9k_stop(struct ieee80211_hw *hw) | |||
1309 | 1305 | ||
1310 | spin_lock_bh(&sc->sc_pcu_lock); | 1306 | spin_lock_bh(&sc->sc_pcu_lock); |
1311 | 1307 | ||
1308 | /* prevent tasklets to enable interrupts once we disable them */ | ||
1309 | ah->imask &= ~ATH9K_INT_GLOBAL; | ||
1310 | |||
1312 | /* make sure h/w will not generate any interrupt | 1311 | /* make sure h/w will not generate any interrupt |
1313 | * before setting the invalid flag. */ | 1312 | * before setting the invalid flag. */ |
1314 | ath9k_hw_disable_interrupts(ah); | 1313 | ath9k_hw_disable_interrupts(ah); |
@@ -1326,6 +1325,12 @@ static void ath9k_stop(struct ieee80211_hw *hw) | |||
1326 | 1325 | ||
1327 | spin_unlock_bh(&sc->sc_pcu_lock); | 1326 | spin_unlock_bh(&sc->sc_pcu_lock); |
1328 | 1327 | ||
1328 | /* we can now sync irq and kill any running tasklets, since we already | ||
1329 | * disabled interrupts and not holding a spin lock */ | ||
1330 | synchronize_irq(sc->irq); | ||
1331 | tasklet_kill(&sc->intr_tq); | ||
1332 | tasklet_kill(&sc->bcon_tasklet); | ||
1333 | |||
1329 | ath9k_ps_restore(sc); | 1334 | ath9k_ps_restore(sc); |
1330 | 1335 | ||
1331 | sc->ps_idle = true; | 1336 | sc->ps_idle = true; |
@@ -1334,8 +1339,6 @@ static void ath9k_stop(struct ieee80211_hw *hw) | |||
1334 | 1339 | ||
1335 | sc->sc_flags |= SC_OP_INVALID; | 1340 | sc->sc_flags |= SC_OP_INVALID; |
1336 | 1341 | ||
1337 | pm_qos_update_request(&sc->pm_qos_req, PM_QOS_DEFAULT_VALUE); | ||
1338 | |||
1339 | mutex_unlock(&sc->mutex); | 1342 | mutex_unlock(&sc->mutex); |
1340 | 1343 | ||
1341 | ath_dbg(common, ATH_DBG_CONFIG, "Driver halt\n"); | 1344 | ath_dbg(common, ATH_DBG_CONFIG, "Driver halt\n"); |
@@ -1701,7 +1704,9 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed) | |||
1701 | skip_chan_change: | 1704 | skip_chan_change: |
1702 | if (changed & IEEE80211_CONF_CHANGE_POWER) { | 1705 | if (changed & IEEE80211_CONF_CHANGE_POWER) { |
1703 | sc->config.txpowlimit = 2 * conf->power_level; | 1706 | sc->config.txpowlimit = 2 * conf->power_level; |
1707 | ath9k_ps_wakeup(sc); | ||
1704 | ath_update_txpow(sc); | 1708 | ath_update_txpow(sc); |
1709 | ath9k_ps_restore(sc); | ||
1705 | } | 1710 | } |
1706 | 1711 | ||
1707 | spin_lock_bh(&sc->wiphy_lock); | 1712 | spin_lock_bh(&sc->wiphy_lock); |
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c index 332d1feb5c18..07b7804aec5b 100644 --- a/drivers/net/wireless/ath/ath9k/xmit.c +++ b/drivers/net/wireless/ath/ath9k/xmit.c | |||
@@ -1725,6 +1725,9 @@ static void ath_tx_start_dma(struct ath_softc *sc, struct ath_buf *bf, | |||
1725 | ar9003_hw_set_paprd_txdesc(sc->sc_ah, bf->bf_desc, | 1725 | ar9003_hw_set_paprd_txdesc(sc->sc_ah, bf->bf_desc, |
1726 | bf->bf_state.bfs_paprd); | 1726 | bf->bf_state.bfs_paprd); |
1727 | 1727 | ||
1728 | if (txctl->paprd) | ||
1729 | bf->bf_state.bfs_paprd_timestamp = jiffies; | ||
1730 | |||
1728 | ath_tx_send_normal(sc, txctl->txq, tid, &bf_head); | 1731 | ath_tx_send_normal(sc, txctl->txq, tid, &bf_head); |
1729 | } | 1732 | } |
1730 | 1733 | ||
@@ -1886,7 +1889,9 @@ static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf, | |||
1886 | bf->bf_buf_addr = 0; | 1889 | bf->bf_buf_addr = 0; |
1887 | 1890 | ||
1888 | if (bf->bf_state.bfs_paprd) { | 1891 | if (bf->bf_state.bfs_paprd) { |
1889 | if (!sc->paprd_pending) | 1892 | if (time_after(jiffies, |
1893 | bf->bf_state.bfs_paprd_timestamp + | ||
1894 | msecs_to_jiffies(ATH_PAPRD_TIMEOUT))) | ||
1890 | dev_kfree_skb_any(skb); | 1895 | dev_kfree_skb_any(skb); |
1891 | else | 1896 | else |
1892 | complete(&sc->paprd_complete); | 1897 | complete(&sc->paprd_complete); |
@@ -2113,9 +2118,7 @@ static void ath_tx_complete_poll_work(struct work_struct *work) | |||
2113 | if (needreset) { | 2118 | if (needreset) { |
2114 | ath_dbg(ath9k_hw_common(sc->sc_ah), ATH_DBG_RESET, | 2119 | ath_dbg(ath9k_hw_common(sc->sc_ah), ATH_DBG_RESET, |
2115 | "tx hung, resetting the chip\n"); | 2120 | "tx hung, resetting the chip\n"); |
2116 | ath9k_ps_wakeup(sc); | ||
2117 | ath_reset(sc, true); | 2121 | ath_reset(sc, true); |
2118 | ath9k_ps_restore(sc); | ||
2119 | } | 2122 | } |
2120 | 2123 | ||
2121 | ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work, | 2124 | ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work, |
diff --git a/drivers/net/wireless/ath/carl9170/rx.c b/drivers/net/wireless/ath/carl9170/rx.c index 939a0e96ed1f..84866a4b8350 100644 --- a/drivers/net/wireless/ath/carl9170/rx.c +++ b/drivers/net/wireless/ath/carl9170/rx.c | |||
@@ -564,7 +564,7 @@ static void carl9170_ps_beacon(struct ar9170 *ar, void *data, unsigned int len) | |||
564 | cam = ieee80211_check_tim(tim_ie, tim_len, ar->common.curaid); | 564 | cam = ieee80211_check_tim(tim_ie, tim_len, ar->common.curaid); |
565 | 565 | ||
566 | /* 2. Maybe the AP wants to send multicast/broadcast data? */ | 566 | /* 2. Maybe the AP wants to send multicast/broadcast data? */ |
567 | cam = !!(tim_ie->bitmap_ctrl & 0x01); | 567 | cam |= !!(tim_ie->bitmap_ctrl & 0x01); |
568 | 568 | ||
569 | if (!cam) { | 569 | if (!cam) { |
570 | /* back to low-power land. */ | 570 | /* back to low-power land. */ |
diff --git a/drivers/net/wireless/ath/carl9170/usb.c b/drivers/net/wireless/ath/carl9170/usb.c index 537732e5964f..f82c400be288 100644 --- a/drivers/net/wireless/ath/carl9170/usb.c +++ b/drivers/net/wireless/ath/carl9170/usb.c | |||
@@ -118,6 +118,8 @@ static struct usb_device_id carl9170_usb_ids[] = { | |||
118 | { USB_DEVICE(0x057c, 0x8402) }, | 118 | { USB_DEVICE(0x057c, 0x8402) }, |
119 | /* Qwest/Actiontec 802AIN Wireless N USB Network Adapter */ | 119 | /* Qwest/Actiontec 802AIN Wireless N USB Network Adapter */ |
120 | { USB_DEVICE(0x1668, 0x1200) }, | 120 | { USB_DEVICE(0x1668, 0x1200) }, |
121 | /* Airlive X.USB a/b/g/n */ | ||
122 | { USB_DEVICE(0x1b75, 0x9170) }, | ||
121 | 123 | ||
122 | /* terminate */ | 124 | /* terminate */ |
123 | {} | 125 | {} |
diff --git a/drivers/net/wireless/b43/phy_g.c b/drivers/net/wireless/b43/phy_g.c index 0dc33b65e86b..be4828167012 100644 --- a/drivers/net/wireless/b43/phy_g.c +++ b/drivers/net/wireless/b43/phy_g.c | |||
@@ -1919,7 +1919,7 @@ static void b43_hardware_pctl_init_gphy(struct b43_wldev *dev) | |||
1919 | b43_hf_write(dev, b43_hf_read(dev) | B43_HF_HWPCTL); | 1919 | b43_hf_write(dev, b43_hf_read(dev) | B43_HF_HWPCTL); |
1920 | } | 1920 | } |
1921 | 1921 | ||
1922 | /* Intialize B/G PHY power control */ | 1922 | /* Initialize B/G PHY power control */ |
1923 | static void b43_phy_init_pctl(struct b43_wldev *dev) | 1923 | static void b43_phy_init_pctl(struct b43_wldev *dev) |
1924 | { | 1924 | { |
1925 | struct ssb_bus *bus = dev->dev->bus; | 1925 | struct ssb_bus *bus = dev->dev->bus; |
diff --git a/drivers/net/wireless/b43legacy/phy.c b/drivers/net/wireless/b43legacy/phy.c index 35033dd342ce..28e477d01587 100644 --- a/drivers/net/wireless/b43legacy/phy.c +++ b/drivers/net/wireless/b43legacy/phy.c | |||
@@ -153,7 +153,7 @@ void b43legacy_phy_calibrate(struct b43legacy_wldev *dev) | |||
153 | phy->calibrated = 1; | 153 | phy->calibrated = 1; |
154 | } | 154 | } |
155 | 155 | ||
156 | /* intialize B PHY power control | 156 | /* initialize B PHY power control |
157 | * as described in http://bcm-specs.sipsolutions.net/InitPowerControl | 157 | * as described in http://bcm-specs.sipsolutions.net/InitPowerControl |
158 | */ | 158 | */ |
159 | static void b43legacy_phy_init_pctl(struct b43legacy_wldev *dev) | 159 | static void b43legacy_phy_init_pctl(struct b43legacy_wldev *dev) |
diff --git a/drivers/net/wireless/hostap/hostap_cs.c b/drivers/net/wireless/hostap/hostap_cs.c index bd8a4134edeb..2176edede39b 100644 --- a/drivers/net/wireless/hostap/hostap_cs.c +++ b/drivers/net/wireless/hostap/hostap_cs.c | |||
@@ -518,22 +518,21 @@ static int prism2_config(struct pcmcia_device *link) | |||
518 | hw_priv->link = link; | 518 | hw_priv->link = link; |
519 | 519 | ||
520 | /* | 520 | /* |
521 | * Make sure the IRQ handler cannot proceed until at least | 521 | * We enable IRQ here, but IRQ handler will not proceed |
522 | * dev->base_addr is initialized. | 522 | * until dev->base_addr is set below. This protect us from |
523 | * receive interrupts when driver is not initialized. | ||
523 | */ | 524 | */ |
524 | spin_lock_irqsave(&local->irq_init_lock, flags); | ||
525 | |||
526 | ret = pcmcia_request_irq(link, prism2_interrupt); | 525 | ret = pcmcia_request_irq(link, prism2_interrupt); |
527 | if (ret) | 526 | if (ret) |
528 | goto failed_unlock; | 527 | goto failed; |
529 | 528 | ||
530 | ret = pcmcia_enable_device(link); | 529 | ret = pcmcia_enable_device(link); |
531 | if (ret) | 530 | if (ret) |
532 | goto failed_unlock; | 531 | goto failed; |
533 | 532 | ||
533 | spin_lock_irqsave(&local->irq_init_lock, flags); | ||
534 | dev->irq = link->irq; | 534 | dev->irq = link->irq; |
535 | dev->base_addr = link->resource[0]->start; | 535 | dev->base_addr = link->resource[0]->start; |
536 | |||
537 | spin_unlock_irqrestore(&local->irq_init_lock, flags); | 536 | spin_unlock_irqrestore(&local->irq_init_lock, flags); |
538 | 537 | ||
539 | local->shutdown = 0; | 538 | local->shutdown = 0; |
@@ -546,8 +545,6 @@ static int prism2_config(struct pcmcia_device *link) | |||
546 | 545 | ||
547 | return ret; | 546 | return ret; |
548 | 547 | ||
549 | failed_unlock: | ||
550 | spin_unlock_irqrestore(&local->irq_init_lock, flags); | ||
551 | failed: | 548 | failed: |
552 | kfree(hw_priv); | 549 | kfree(hw_priv); |
553 | prism2_release((u_long)link); | 550 | prism2_release((u_long)link); |
diff --git a/drivers/net/wireless/ipw2x00/ipw2200.c b/drivers/net/wireless/ipw2x00/ipw2200.c index 8d6ed5f6f46f..ae438ed80c2f 100644 --- a/drivers/net/wireless/ipw2x00/ipw2200.c +++ b/drivers/net/wireless/ipw2x00/ipw2200.c | |||
@@ -1973,6 +1973,13 @@ static void ipw_irq_tasklet(struct ipw_priv *priv) | |||
1973 | 1973 | ||
1974 | inta = ipw_read32(priv, IPW_INTA_RW); | 1974 | inta = ipw_read32(priv, IPW_INTA_RW); |
1975 | inta_mask = ipw_read32(priv, IPW_INTA_MASK_R); | 1975 | inta_mask = ipw_read32(priv, IPW_INTA_MASK_R); |
1976 | |||
1977 | if (inta == 0xFFFFFFFF) { | ||
1978 | /* Hardware disappeared */ | ||
1979 | IPW_WARNING("TASKLET INTA == 0xFFFFFFFF\n"); | ||
1980 | /* Only handle the cached INTA values */ | ||
1981 | inta = 0; | ||
1982 | } | ||
1976 | inta &= (IPW_INTA_MASK_ALL & inta_mask); | 1983 | inta &= (IPW_INTA_MASK_ALL & inta_mask); |
1977 | 1984 | ||
1978 | /* Add any cached INTA values that need to be handled */ | 1985 | /* Add any cached INTA values that need to be handled */ |
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.c b/drivers/net/wireless/iwlwifi/iwl-3945.c index a9b852be4509..39b6f16c87fa 100644 --- a/drivers/net/wireless/iwlwifi/iwl-3945.c +++ b/drivers/net/wireless/iwlwifi/iwl-3945.c | |||
@@ -402,72 +402,6 @@ static void iwl3945_accumulative_statistics(struct iwl_priv *priv, | |||
402 | } | 402 | } |
403 | #endif | 403 | #endif |
404 | 404 | ||
405 | /** | ||
406 | * iwl3945_good_plcp_health - checks for plcp error. | ||
407 | * | ||
408 | * When the plcp error is exceeding the thresholds, reset the radio | ||
409 | * to improve the throughput. | ||
410 | */ | ||
411 | static bool iwl3945_good_plcp_health(struct iwl_priv *priv, | ||
412 | struct iwl_rx_packet *pkt) | ||
413 | { | ||
414 | bool rc = true; | ||
415 | struct iwl3945_notif_statistics current_stat; | ||
416 | int combined_plcp_delta; | ||
417 | unsigned int plcp_msec; | ||
418 | unsigned long plcp_received_jiffies; | ||
419 | |||
420 | if (priv->cfg->base_params->plcp_delta_threshold == | ||
421 | IWL_MAX_PLCP_ERR_THRESHOLD_DISABLE) { | ||
422 | IWL_DEBUG_RADIO(priv, "plcp_err check disabled\n"); | ||
423 | return rc; | ||
424 | } | ||
425 | memcpy(¤t_stat, pkt->u.raw, sizeof(struct | ||
426 | iwl3945_notif_statistics)); | ||
427 | /* | ||
428 | * check for plcp_err and trigger radio reset if it exceeds | ||
429 | * the plcp error threshold plcp_delta. | ||
430 | */ | ||
431 | plcp_received_jiffies = jiffies; | ||
432 | plcp_msec = jiffies_to_msecs((long) plcp_received_jiffies - | ||
433 | (long) priv->plcp_jiffies); | ||
434 | priv->plcp_jiffies = plcp_received_jiffies; | ||
435 | /* | ||
436 | * check to make sure plcp_msec is not 0 to prevent division | ||
437 | * by zero. | ||
438 | */ | ||
439 | if (plcp_msec) { | ||
440 | combined_plcp_delta = | ||
441 | (le32_to_cpu(current_stat.rx.ofdm.plcp_err) - | ||
442 | le32_to_cpu(priv->_3945.statistics.rx.ofdm.plcp_err)); | ||
443 | |||
444 | if ((combined_plcp_delta > 0) && | ||
445 | ((combined_plcp_delta * 100) / plcp_msec) > | ||
446 | priv->cfg->base_params->plcp_delta_threshold) { | ||
447 | /* | ||
448 | * if plcp_err exceed the threshold, the following | ||
449 | * data is printed in csv format: | ||
450 | * Text: plcp_err exceeded %d, | ||
451 | * Received ofdm.plcp_err, | ||
452 | * Current ofdm.plcp_err, | ||
453 | * combined_plcp_delta, | ||
454 | * plcp_msec | ||
455 | */ | ||
456 | IWL_DEBUG_RADIO(priv, "plcp_err exceeded %u, " | ||
457 | "%u, %d, %u mSecs\n", | ||
458 | priv->cfg->base_params->plcp_delta_threshold, | ||
459 | le32_to_cpu(current_stat.rx.ofdm.plcp_err), | ||
460 | combined_plcp_delta, plcp_msec); | ||
461 | /* | ||
462 | * Reset the RF radio due to the high plcp | ||
463 | * error rate | ||
464 | */ | ||
465 | rc = false; | ||
466 | } | ||
467 | } | ||
468 | return rc; | ||
469 | } | ||
470 | |||
471 | void iwl3945_hw_rx_statistics(struct iwl_priv *priv, | 405 | void iwl3945_hw_rx_statistics(struct iwl_priv *priv, |
472 | struct iwl_rx_mem_buffer *rxb) | 406 | struct iwl_rx_mem_buffer *rxb) |
473 | { | 407 | { |
@@ -2734,7 +2668,6 @@ static struct iwl_lib_ops iwl3945_lib = { | |||
2734 | .isr_ops = { | 2668 | .isr_ops = { |
2735 | .isr = iwl_isr_legacy, | 2669 | .isr = iwl_isr_legacy, |
2736 | }, | 2670 | }, |
2737 | .check_plcp_health = iwl3945_good_plcp_health, | ||
2738 | 2671 | ||
2739 | .debugfs_ops = { | 2672 | .debugfs_ops = { |
2740 | .rx_stats_read = iwl3945_ucode_rx_stats_read, | 2673 | .rx_stats_read = iwl3945_ucode_rx_stats_read, |
diff --git a/drivers/net/wireless/iwlwifi/iwl-4965.c b/drivers/net/wireless/iwlwifi/iwl-4965.c index 3f1e5f1bf847..91a9f5253469 100644 --- a/drivers/net/wireless/iwlwifi/iwl-4965.c +++ b/drivers/net/wireless/iwlwifi/iwl-4965.c | |||
@@ -2624,6 +2624,7 @@ struct iwl_cfg iwl4965_agn_cfg = { | |||
2624 | .fw_name_pre = IWL4965_FW_PRE, | 2624 | .fw_name_pre = IWL4965_FW_PRE, |
2625 | .ucode_api_max = IWL4965_UCODE_API_MAX, | 2625 | .ucode_api_max = IWL4965_UCODE_API_MAX, |
2626 | .ucode_api_min = IWL4965_UCODE_API_MIN, | 2626 | .ucode_api_min = IWL4965_UCODE_API_MIN, |
2627 | .sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N, | ||
2627 | .valid_tx_ant = ANT_AB, | 2628 | .valid_tx_ant = ANT_AB, |
2628 | .valid_rx_ant = ANT_ABC, | 2629 | .valid_rx_ant = ANT_ABC, |
2629 | .eeprom_ver = EEPROM_4965_EEPROM_VERSION, | 2630 | .eeprom_ver = EEPROM_4965_EEPROM_VERSION, |
diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c index 79ab0a6b1386..537fb8c84e3a 100644 --- a/drivers/net/wireless/iwlwifi/iwl-5000.c +++ b/drivers/net/wireless/iwlwifi/iwl-5000.c | |||
@@ -51,7 +51,7 @@ | |||
51 | #include "iwl-agn-debugfs.h" | 51 | #include "iwl-agn-debugfs.h" |
52 | 52 | ||
53 | /* Highest firmware API version supported */ | 53 | /* Highest firmware API version supported */ |
54 | #define IWL5000_UCODE_API_MAX 2 | 54 | #define IWL5000_UCODE_API_MAX 5 |
55 | #define IWL5150_UCODE_API_MAX 2 | 55 | #define IWL5150_UCODE_API_MAX 2 |
56 | 56 | ||
57 | /* Lowest firmware API version supported */ | 57 | /* Lowest firmware API version supported */ |
diff --git a/drivers/net/wireless/iwlwifi/iwl-6000.c b/drivers/net/wireless/iwlwifi/iwl-6000.c index af505bcd7ae0..ef36aff1bb43 100644 --- a/drivers/net/wireless/iwlwifi/iwl-6000.c +++ b/drivers/net/wireless/iwlwifi/iwl-6000.c | |||
@@ -681,6 +681,8 @@ struct iwl_cfg iwl6000i_2bg_cfg = { | |||
681 | .fw_name_pre = IWL6050_FW_PRE, \ | 681 | .fw_name_pre = IWL6050_FW_PRE, \ |
682 | .ucode_api_max = IWL6050_UCODE_API_MAX, \ | 682 | .ucode_api_max = IWL6050_UCODE_API_MAX, \ |
683 | .ucode_api_min = IWL6050_UCODE_API_MIN, \ | 683 | .ucode_api_min = IWL6050_UCODE_API_MIN, \ |
684 | .valid_tx_ant = ANT_AB, /* .cfg overwrite */ \ | ||
685 | .valid_rx_ant = ANT_AB, /* .cfg overwrite */ \ | ||
684 | .ops = &iwl6050_ops, \ | 686 | .ops = &iwl6050_ops, \ |
685 | .eeprom_ver = EEPROM_6050_EEPROM_VERSION, \ | 687 | .eeprom_ver = EEPROM_6050_EEPROM_VERSION, \ |
686 | .eeprom_calib_ver = EEPROM_6050_TX_POWER_VERSION, \ | 688 | .eeprom_calib_ver = EEPROM_6050_TX_POWER_VERSION, \ |
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-eeprom.c b/drivers/net/wireless/iwlwifi/iwl-agn-eeprom.c index 97906dd442e6..27b5a3eec9dc 100644 --- a/drivers/net/wireless/iwlwifi/iwl-agn-eeprom.c +++ b/drivers/net/wireless/iwlwifi/iwl-agn-eeprom.c | |||
@@ -152,11 +152,14 @@ int iwl_eeprom_check_sku(struct iwl_priv *priv) | |||
152 | 152 | ||
153 | eeprom_sku = iwl_eeprom_query16(priv, EEPROM_SKU_CAP); | 153 | eeprom_sku = iwl_eeprom_query16(priv, EEPROM_SKU_CAP); |
154 | 154 | ||
155 | priv->cfg->sku = ((eeprom_sku & EEPROM_SKU_CAP_BAND_SELECTION) >> | 155 | if (!priv->cfg->sku) { |
156 | /* not using sku overwrite */ | ||
157 | priv->cfg->sku = | ||
158 | ((eeprom_sku & EEPROM_SKU_CAP_BAND_SELECTION) >> | ||
156 | EEPROM_SKU_CAP_BAND_POS); | 159 | EEPROM_SKU_CAP_BAND_POS); |
157 | if (eeprom_sku & EEPROM_SKU_CAP_11N_ENABLE) | 160 | if (eeprom_sku & EEPROM_SKU_CAP_11N_ENABLE) |
158 | priv->cfg->sku |= IWL_SKU_N; | 161 | priv->cfg->sku |= IWL_SKU_N; |
159 | 162 | } | |
160 | if (!priv->cfg->sku) { | 163 | if (!priv->cfg->sku) { |
161 | IWL_ERR(priv, "Invalid device sku\n"); | 164 | IWL_ERR(priv, "Invalid device sku\n"); |
162 | return -EINVAL; | 165 | return -EINVAL; |
@@ -168,7 +171,7 @@ int iwl_eeprom_check_sku(struct iwl_priv *priv) | |||
168 | /* not using .cfg overwrite */ | 171 | /* not using .cfg overwrite */ |
169 | radio_cfg = iwl_eeprom_query16(priv, EEPROM_RADIO_CONFIG); | 172 | radio_cfg = iwl_eeprom_query16(priv, EEPROM_RADIO_CONFIG); |
170 | priv->cfg->valid_tx_ant = EEPROM_RF_CFG_TX_ANT_MSK(radio_cfg); | 173 | priv->cfg->valid_tx_ant = EEPROM_RF_CFG_TX_ANT_MSK(radio_cfg); |
171 | priv->cfg->valid_rx_ant = EEPROM_RF_CFG_TX_ANT_MSK(radio_cfg); | 174 | priv->cfg->valid_rx_ant = EEPROM_RF_CFG_RX_ANT_MSK(radio_cfg); |
172 | if (!priv->cfg->valid_tx_ant || !priv->cfg->valid_rx_ant) { | 175 | if (!priv->cfg->valid_tx_ant || !priv->cfg->valid_rx_ant) { |
173 | IWL_ERR(priv, "Invalid chain (0X%x, 0X%x)\n", | 176 | IWL_ERR(priv, "Invalid chain (0X%x, 0X%x)\n", |
174 | priv->cfg->valid_tx_ant, | 177 | priv->cfg->valid_tx_ant, |
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-ict.c b/drivers/net/wireless/iwlwifi/iwl-agn-ict.c index a5dbfea1bfad..b5cb3be0eb4b 100644 --- a/drivers/net/wireless/iwlwifi/iwl-agn-ict.c +++ b/drivers/net/wireless/iwlwifi/iwl-agn-ict.c | |||
@@ -197,7 +197,7 @@ static irqreturn_t iwl_isr(int irq, void *data) | |||
197 | 197 | ||
198 | none: | 198 | none: |
199 | /* re-enable interrupts here since we don't have anything to service. */ | 199 | /* re-enable interrupts here since we don't have anything to service. */ |
200 | /* only Re-enable if diabled by irq and no schedules tasklet. */ | 200 | /* only Re-enable if disabled by irq and no schedules tasklet. */ |
201 | if (test_bit(STATUS_INT_ENABLED, &priv->status) && !priv->_agn.inta) | 201 | if (test_bit(STATUS_INT_ENABLED, &priv->status) && !priv->_agn.inta) |
202 | iwl_enable_interrupts(priv); | 202 | iwl_enable_interrupts(priv); |
203 | 203 | ||
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c index f13a83a7e62b..c1cfd9952e52 100644 --- a/drivers/net/wireless/iwlwifi/iwl-agn.c +++ b/drivers/net/wireless/iwlwifi/iwl-agn.c | |||
@@ -1154,9 +1154,12 @@ static void iwl_irq_tasklet_legacy(struct iwl_priv *priv) | |||
1154 | } | 1154 | } |
1155 | 1155 | ||
1156 | /* Re-enable all interrupts */ | 1156 | /* Re-enable all interrupts */ |
1157 | /* only Re-enable if diabled by irq */ | 1157 | /* only Re-enable if disabled by irq */ |
1158 | if (test_bit(STATUS_INT_ENABLED, &priv->status)) | 1158 | if (test_bit(STATUS_INT_ENABLED, &priv->status)) |
1159 | iwl_enable_interrupts(priv); | 1159 | iwl_enable_interrupts(priv); |
1160 | /* Re-enable RF_KILL if it occurred */ | ||
1161 | else if (handled & CSR_INT_BIT_RF_KILL) | ||
1162 | iwl_enable_rfkill_int(priv); | ||
1160 | 1163 | ||
1161 | #ifdef CONFIG_IWLWIFI_DEBUG | 1164 | #ifdef CONFIG_IWLWIFI_DEBUG |
1162 | if (iwl_get_debug_level(priv) & (IWL_DL_ISR)) { | 1165 | if (iwl_get_debug_level(priv) & (IWL_DL_ISR)) { |
@@ -1368,9 +1371,12 @@ static void iwl_irq_tasklet(struct iwl_priv *priv) | |||
1368 | } | 1371 | } |
1369 | 1372 | ||
1370 | /* Re-enable all interrupts */ | 1373 | /* Re-enable all interrupts */ |
1371 | /* only Re-enable if diabled by irq */ | 1374 | /* only Re-enable if disabled by irq */ |
1372 | if (test_bit(STATUS_INT_ENABLED, &priv->status)) | 1375 | if (test_bit(STATUS_INT_ENABLED, &priv->status)) |
1373 | iwl_enable_interrupts(priv); | 1376 | iwl_enable_interrupts(priv); |
1377 | /* Re-enable RF_KILL if it occurred */ | ||
1378 | else if (handled & CSR_INT_BIT_RF_KILL) | ||
1379 | iwl_enable_rfkill_int(priv); | ||
1374 | } | 1380 | } |
1375 | 1381 | ||
1376 | /* the threshold ratio of actual_ack_cnt to expected_ack_cnt in percent */ | 1382 | /* the threshold ratio of actual_ack_cnt to expected_ack_cnt in percent */ |
diff --git a/drivers/net/wireless/iwlwifi/iwl-legacy.c b/drivers/net/wireless/iwlwifi/iwl-legacy.c index a08b4e56e6b1..bb1a742a98a0 100644 --- a/drivers/net/wireless/iwlwifi/iwl-legacy.c +++ b/drivers/net/wireless/iwlwifi/iwl-legacy.c | |||
@@ -619,7 +619,7 @@ unplugged: | |||
619 | 619 | ||
620 | none: | 620 | none: |
621 | /* re-enable interrupts here since we don't have anything to service. */ | 621 | /* re-enable interrupts here since we don't have anything to service. */ |
622 | /* only Re-enable if diabled by irq */ | 622 | /* only Re-enable if disabled by irq */ |
623 | if (test_bit(STATUS_INT_ENABLED, &priv->status)) | 623 | if (test_bit(STATUS_INT_ENABLED, &priv->status)) |
624 | iwl_enable_interrupts(priv); | 624 | iwl_enable_interrupts(priv); |
625 | spin_unlock_irqrestore(&priv->lock, flags); | 625 | spin_unlock_irqrestore(&priv->lock, flags); |
diff --git a/drivers/net/wireless/iwlwifi/iwl-sta.c b/drivers/net/wireless/iwlwifi/iwl-sta.c index 4776323b1eba..49493d176515 100644 --- a/drivers/net/wireless/iwlwifi/iwl-sta.c +++ b/drivers/net/wireless/iwlwifi/iwl-sta.c | |||
@@ -107,7 +107,7 @@ static int iwl_process_add_sta_resp(struct iwl_priv *priv, | |||
107 | /* | 107 | /* |
108 | * XXX: The MAC address in the command buffer is often changed from | 108 | * XXX: The MAC address in the command buffer is often changed from |
109 | * the original sent to the device. That is, the MAC address | 109 | * the original sent to the device. That is, the MAC address |
110 | * written to the command buffer often is not the same MAC adress | 110 | * written to the command buffer often is not the same MAC address |
111 | * read from the command buffer when the command returns. This | 111 | * read from the command buffer when the command returns. This |
112 | * issue has not yet been resolved and this debugging is left to | 112 | * issue has not yet been resolved and this debugging is left to |
113 | * observe the problem. | 113 | * observe the problem. |
diff --git a/drivers/net/wireless/iwmc3200wifi/netdev.c b/drivers/net/wireless/iwmc3200wifi/netdev.c index 13a69ebf2a94..5091d77e02ce 100644 --- a/drivers/net/wireless/iwmc3200wifi/netdev.c +++ b/drivers/net/wireless/iwmc3200wifi/netdev.c | |||
@@ -126,6 +126,7 @@ void *iwm_if_alloc(int sizeof_bus, struct device *dev, | |||
126 | ndev = alloc_netdev_mq(0, "wlan%d", ether_setup, IWM_TX_QUEUES); | 126 | ndev = alloc_netdev_mq(0, "wlan%d", ether_setup, IWM_TX_QUEUES); |
127 | if (!ndev) { | 127 | if (!ndev) { |
128 | dev_err(dev, "no memory for network device instance\n"); | 128 | dev_err(dev, "no memory for network device instance\n"); |
129 | ret = -ENOMEM; | ||
129 | goto out_priv; | 130 | goto out_priv; |
130 | } | 131 | } |
131 | 132 | ||
@@ -138,6 +139,7 @@ void *iwm_if_alloc(int sizeof_bus, struct device *dev, | |||
138 | GFP_KERNEL); | 139 | GFP_KERNEL); |
139 | if (!iwm->umac_profile) { | 140 | if (!iwm->umac_profile) { |
140 | dev_err(dev, "Couldn't alloc memory for profile\n"); | 141 | dev_err(dev, "Couldn't alloc memory for profile\n"); |
142 | ret = -ENOMEM; | ||
141 | goto out_profile; | 143 | goto out_profile; |
142 | } | 144 | } |
143 | 145 | ||
diff --git a/drivers/net/wireless/p54/p54pci.c b/drivers/net/wireless/p54/p54pci.c index 1eacba4daa5b..0494d7b102d4 100644 --- a/drivers/net/wireless/p54/p54pci.c +++ b/drivers/net/wireless/p54/p54pci.c | |||
@@ -199,6 +199,7 @@ static void p54p_check_rx_ring(struct ieee80211_hw *dev, u32 *index, | |||
199 | while (i != idx) { | 199 | while (i != idx) { |
200 | u16 len; | 200 | u16 len; |
201 | struct sk_buff *skb; | 201 | struct sk_buff *skb; |
202 | dma_addr_t dma_addr; | ||
202 | desc = &ring[i]; | 203 | desc = &ring[i]; |
203 | len = le16_to_cpu(desc->len); | 204 | len = le16_to_cpu(desc->len); |
204 | skb = rx_buf[i]; | 205 | skb = rx_buf[i]; |
@@ -216,17 +217,20 @@ static void p54p_check_rx_ring(struct ieee80211_hw *dev, u32 *index, | |||
216 | 217 | ||
217 | len = priv->common.rx_mtu; | 218 | len = priv->common.rx_mtu; |
218 | } | 219 | } |
220 | dma_addr = le32_to_cpu(desc->host_addr); | ||
221 | pci_dma_sync_single_for_cpu(priv->pdev, dma_addr, | ||
222 | priv->common.rx_mtu + 32, PCI_DMA_FROMDEVICE); | ||
219 | skb_put(skb, len); | 223 | skb_put(skb, len); |
220 | 224 | ||
221 | if (p54_rx(dev, skb)) { | 225 | if (p54_rx(dev, skb)) { |
222 | pci_unmap_single(priv->pdev, | 226 | pci_unmap_single(priv->pdev, dma_addr, |
223 | le32_to_cpu(desc->host_addr), | 227 | priv->common.rx_mtu + 32, PCI_DMA_FROMDEVICE); |
224 | priv->common.rx_mtu + 32, | ||
225 | PCI_DMA_FROMDEVICE); | ||
226 | rx_buf[i] = NULL; | 228 | rx_buf[i] = NULL; |
227 | desc->host_addr = 0; | 229 | desc->host_addr = cpu_to_le32(0); |
228 | } else { | 230 | } else { |
229 | skb_trim(skb, 0); | 231 | skb_trim(skb, 0); |
232 | pci_dma_sync_single_for_device(priv->pdev, dma_addr, | ||
233 | priv->common.rx_mtu + 32, PCI_DMA_FROMDEVICE); | ||
230 | desc->len = cpu_to_le16(priv->common.rx_mtu + 32); | 234 | desc->len = cpu_to_le16(priv->common.rx_mtu + 32); |
231 | } | 235 | } |
232 | 236 | ||
diff --git a/drivers/net/wireless/p54/p54usb.c b/drivers/net/wireless/p54/p54usb.c index 21713a7638c4..9b344a921e74 100644 --- a/drivers/net/wireless/p54/p54usb.c +++ b/drivers/net/wireless/p54/p54usb.c | |||
@@ -98,6 +98,7 @@ static struct usb_device_id p54u_table[] __devinitdata = { | |||
98 | {USB_DEVICE(0x1413, 0x5400)}, /* Telsey 802.11g USB2.0 Adapter */ | 98 | {USB_DEVICE(0x1413, 0x5400)}, /* Telsey 802.11g USB2.0 Adapter */ |
99 | {USB_DEVICE(0x1435, 0x0427)}, /* Inventel UR054G */ | 99 | {USB_DEVICE(0x1435, 0x0427)}, /* Inventel UR054G */ |
100 | {USB_DEVICE(0x1668, 0x1050)}, /* Actiontec 802UIG-1 */ | 100 | {USB_DEVICE(0x1668, 0x1050)}, /* Actiontec 802UIG-1 */ |
101 | {USB_DEVICE(0x1740, 0x1000)}, /* Senao NUB-350 */ | ||
101 | {USB_DEVICE(0x2001, 0x3704)}, /* DLink DWL-G122 rev A2 */ | 102 | {USB_DEVICE(0x2001, 0x3704)}, /* DLink DWL-G122 rev A2 */ |
102 | {USB_DEVICE(0x2001, 0x3705)}, /* D-Link DWL-G120 rev C1 */ | 103 | {USB_DEVICE(0x2001, 0x3705)}, /* D-Link DWL-G120 rev C1 */ |
103 | {USB_DEVICE(0x413c, 0x5513)}, /* Dell WLA3310 USB Wireless Adapter */ | 104 | {USB_DEVICE(0x413c, 0x5513)}, /* Dell WLA3310 USB Wireless Adapter */ |
diff --git a/drivers/net/wireless/p54/txrx.c b/drivers/net/wireless/p54/txrx.c index 76b2318a7dc7..f618b9623e5a 100644 --- a/drivers/net/wireless/p54/txrx.c +++ b/drivers/net/wireless/p54/txrx.c | |||
@@ -618,7 +618,7 @@ static void p54_tx_80211_header(struct p54_common *priv, struct sk_buff *skb, | |||
618 | else | 618 | else |
619 | *burst_possible = false; | 619 | *burst_possible = false; |
620 | 620 | ||
621 | if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) | 621 | if (!(info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)) |
622 | *flags |= P54_HDR_FLAG_DATA_OUT_SEQNR; | 622 | *flags |= P54_HDR_FLAG_DATA_OUT_SEQNR; |
623 | 623 | ||
624 | if (info->flags & IEEE80211_TX_CTL_PSPOLL_RESPONSE) | 624 | if (info->flags & IEEE80211_TX_CTL_PSPOLL_RESPONSE) |
diff --git a/drivers/net/wireless/prism54/islpci_dev.c b/drivers/net/wireless/prism54/islpci_dev.c index 2c8cc954d1b6..ec2c75d77cea 100644 --- a/drivers/net/wireless/prism54/islpci_dev.c +++ b/drivers/net/wireless/prism54/islpci_dev.c | |||
@@ -630,7 +630,7 @@ islpci_alloc_memory(islpci_private *priv) | |||
630 | printk(KERN_DEBUG "islpci_alloc_memory\n"); | 630 | printk(KERN_DEBUG "islpci_alloc_memory\n"); |
631 | #endif | 631 | #endif |
632 | 632 | ||
633 | /* remap the PCI device base address to accessable */ | 633 | /* remap the PCI device base address to accessible */ |
634 | if (!(priv->device_base = | 634 | if (!(priv->device_base = |
635 | ioremap(pci_resource_start(priv->pdev, 0), | 635 | ioremap(pci_resource_start(priv->pdev, 0), |
636 | ISL38XX_PCI_MEM_SIZE))) { | 636 | ISL38XX_PCI_MEM_SIZE))) { |
@@ -709,7 +709,7 @@ islpci_alloc_memory(islpci_private *priv) | |||
709 | PCI_DMA_FROMDEVICE); | 709 | PCI_DMA_FROMDEVICE); |
710 | if (!priv->pci_map_rx_address[counter]) { | 710 | if (!priv->pci_map_rx_address[counter]) { |
711 | /* error mapping the buffer to device | 711 | /* error mapping the buffer to device |
712 | accessable memory address */ | 712 | accessible memory address */ |
713 | printk(KERN_ERR "failed to map skb DMA'able\n"); | 713 | printk(KERN_ERR "failed to map skb DMA'able\n"); |
714 | goto out_free; | 714 | goto out_free; |
715 | } | 715 | } |
@@ -773,7 +773,7 @@ islpci_free_memory(islpci_private *priv) | |||
773 | priv->data_low_rx[counter] = NULL; | 773 | priv->data_low_rx[counter] = NULL; |
774 | } | 774 | } |
775 | 775 | ||
776 | /* Free the acces control list and the WPA list */ | 776 | /* Free the access control list and the WPA list */ |
777 | prism54_acl_clean(&priv->acl); | 777 | prism54_acl_clean(&priv->acl); |
778 | prism54_wpa_bss_ie_clean(priv); | 778 | prism54_wpa_bss_ie_clean(priv); |
779 | mgt_clean(priv); | 779 | mgt_clean(priv); |
diff --git a/drivers/net/wireless/prism54/islpci_eth.c b/drivers/net/wireless/prism54/islpci_eth.c index 2fc52bc2d7dd..d44f8e20cce0 100644 --- a/drivers/net/wireless/prism54/islpci_eth.c +++ b/drivers/net/wireless/prism54/islpci_eth.c | |||
@@ -450,7 +450,7 @@ islpci_eth_receive(islpci_private *priv) | |||
450 | MAX_FRAGMENT_SIZE_RX + 2, | 450 | MAX_FRAGMENT_SIZE_RX + 2, |
451 | PCI_DMA_FROMDEVICE); | 451 | PCI_DMA_FROMDEVICE); |
452 | if (unlikely(!priv->pci_map_rx_address[index])) { | 452 | if (unlikely(!priv->pci_map_rx_address[index])) { |
453 | /* error mapping the buffer to device accessable memory address */ | 453 | /* error mapping the buffer to device accessible memory address */ |
454 | DEBUG(SHOW_ERROR_MESSAGES, | 454 | DEBUG(SHOW_ERROR_MESSAGES, |
455 | "Error mapping DMA address\n"); | 455 | "Error mapping DMA address\n"); |
456 | 456 | ||
diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c index 848cc2cce247..518542b4bf9e 100644 --- a/drivers/net/wireless/rndis_wlan.c +++ b/drivers/net/wireless/rndis_wlan.c | |||
@@ -2597,6 +2597,9 @@ static int rndis_set_power_mgmt(struct wiphy *wiphy, struct net_device *dev, | |||
2597 | __le32 mode; | 2597 | __le32 mode; |
2598 | int ret; | 2598 | int ret; |
2599 | 2599 | ||
2600 | if (priv->device_type != RNDIS_BCM4320B) | ||
2601 | return -ENOTSUPP; | ||
2602 | |||
2600 | netdev_dbg(usbdev->net, "%s(): %s, %d\n", __func__, | 2603 | netdev_dbg(usbdev->net, "%s(): %s, %d\n", __func__, |
2601 | enabled ? "enabled" : "disabled", | 2604 | enabled ? "enabled" : "disabled", |
2602 | timeout); | 2605 | timeout); |
diff --git a/drivers/net/wireless/rt2x00/rt2800pci.c b/drivers/net/wireless/rt2x00/rt2800pci.c index aa97971a38af..3b3f1e45ab3e 100644 --- a/drivers/net/wireless/rt2x00/rt2800pci.c +++ b/drivers/net/wireless/rt2x00/rt2800pci.c | |||
@@ -652,6 +652,12 @@ static void rt2800pci_fill_rxdone(struct queue_entry *entry, | |||
652 | */ | 652 | */ |
653 | rxdesc->flags |= RX_FLAG_IV_STRIPPED; | 653 | rxdesc->flags |= RX_FLAG_IV_STRIPPED; |
654 | 654 | ||
655 | /* | ||
656 | * The hardware has already checked the Michael Mic and has | ||
657 | * stripped it from the frame. Signal this to mac80211. | ||
658 | */ | ||
659 | rxdesc->flags |= RX_FLAG_MMIC_STRIPPED; | ||
660 | |||
655 | if (rxdesc->cipher_status == RX_CRYPTO_SUCCESS) | 661 | if (rxdesc->cipher_status == RX_CRYPTO_SUCCESS) |
656 | rxdesc->flags |= RX_FLAG_DECRYPTED; | 662 | rxdesc->flags |= RX_FLAG_DECRYPTED; |
657 | else if (rxdesc->cipher_status == RX_CRYPTO_FAIL_MIC) | 663 | else if (rxdesc->cipher_status == RX_CRYPTO_FAIL_MIC) |
@@ -1065,6 +1071,8 @@ static DEFINE_PCI_DEVICE_TABLE(rt2800pci_device_table) = { | |||
1065 | { PCI_DEVICE(0x1814, 0x3390), PCI_DEVICE_DATA(&rt2800pci_ops) }, | 1071 | { PCI_DEVICE(0x1814, 0x3390), PCI_DEVICE_DATA(&rt2800pci_ops) }, |
1066 | #endif | 1072 | #endif |
1067 | #ifdef CONFIG_RT2800PCI_RT35XX | 1073 | #ifdef CONFIG_RT2800PCI_RT35XX |
1074 | { PCI_DEVICE(0x1432, 0x7711), PCI_DEVICE_DATA(&rt2800pci_ops) }, | ||
1075 | { PCI_DEVICE(0x1432, 0x7722), PCI_DEVICE_DATA(&rt2800pci_ops) }, | ||
1068 | { PCI_DEVICE(0x1814, 0x3060), PCI_DEVICE_DATA(&rt2800pci_ops) }, | 1076 | { PCI_DEVICE(0x1814, 0x3060), PCI_DEVICE_DATA(&rt2800pci_ops) }, |
1069 | { PCI_DEVICE(0x1814, 0x3062), PCI_DEVICE_DATA(&rt2800pci_ops) }, | 1077 | { PCI_DEVICE(0x1814, 0x3062), PCI_DEVICE_DATA(&rt2800pci_ops) }, |
1070 | { PCI_DEVICE(0x1814, 0x3562), PCI_DEVICE_DATA(&rt2800pci_ops) }, | 1078 | { PCI_DEVICE(0x1814, 0x3562), PCI_DEVICE_DATA(&rt2800pci_ops) }, |
diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c index b97a4a54ff4c..197a36c05fda 100644 --- a/drivers/net/wireless/rt2x00/rt2800usb.c +++ b/drivers/net/wireless/rt2x00/rt2800usb.c | |||
@@ -486,6 +486,12 @@ static void rt2800usb_fill_rxdone(struct queue_entry *entry, | |||
486 | */ | 486 | */ |
487 | rxdesc->flags |= RX_FLAG_IV_STRIPPED; | 487 | rxdesc->flags |= RX_FLAG_IV_STRIPPED; |
488 | 488 | ||
489 | /* | ||
490 | * The hardware has already checked the Michael Mic and has | ||
491 | * stripped it from the frame. Signal this to mac80211. | ||
492 | */ | ||
493 | rxdesc->flags |= RX_FLAG_MMIC_STRIPPED; | ||
494 | |||
489 | if (rxdesc->cipher_status == RX_CRYPTO_SUCCESS) | 495 | if (rxdesc->cipher_status == RX_CRYPTO_SUCCESS) |
490 | rxdesc->flags |= RX_FLAG_DECRYPTED; | 496 | rxdesc->flags |= RX_FLAG_DECRYPTED; |
491 | else if (rxdesc->cipher_status == RX_CRYPTO_FAIL_MIC) | 497 | else if (rxdesc->cipher_status == RX_CRYPTO_FAIL_MIC) |
diff --git a/drivers/net/wireless/rt2x00/rt2x00firmware.c b/drivers/net/wireless/rt2x00/rt2x00firmware.c index f0e1eb72befc..be0ff78c1b16 100644 --- a/drivers/net/wireless/rt2x00/rt2x00firmware.c +++ b/drivers/net/wireless/rt2x00/rt2x00firmware.c | |||
@@ -58,6 +58,7 @@ static int rt2x00lib_request_firmware(struct rt2x00_dev *rt2x00dev) | |||
58 | 58 | ||
59 | if (!fw || !fw->size || !fw->data) { | 59 | if (!fw || !fw->size || !fw->data) { |
60 | ERROR(rt2x00dev, "Failed to read Firmware.\n"); | 60 | ERROR(rt2x00dev, "Failed to read Firmware.\n"); |
61 | release_firmware(fw); | ||
61 | return -ENOENT; | 62 | return -ENOENT; |
62 | } | 63 | } |
63 | 64 | ||
diff --git a/drivers/net/wireless/rt2x00/rt2x00mac.c b/drivers/net/wireless/rt2x00/rt2x00mac.c index 658542d2efe1..f3da051df39e 100644 --- a/drivers/net/wireless/rt2x00/rt2x00mac.c +++ b/drivers/net/wireless/rt2x00/rt2x00mac.c | |||
@@ -273,7 +273,7 @@ int rt2x00mac_add_interface(struct ieee80211_hw *hw, | |||
273 | intf->beacon = entry; | 273 | intf->beacon = entry; |
274 | 274 | ||
275 | /* | 275 | /* |
276 | * The MAC adddress must be configured after the device | 276 | * The MAC address must be configured after the device |
277 | * has been initialized. Otherwise the device can reset | 277 | * has been initialized. Otherwise the device can reset |
278 | * the MAC registers. | 278 | * the MAC registers. |
279 | * The BSSID address must only be configured in AP mode, | 279 | * The BSSID address must only be configured in AP mode, |
diff --git a/drivers/net/wireless/rt2x00/rt2x00pci.c b/drivers/net/wireless/rt2x00/rt2x00pci.c index 73631c6fbb30..ace0b668c04e 100644 --- a/drivers/net/wireless/rt2x00/rt2x00pci.c +++ b/drivers/net/wireless/rt2x00/rt2x00pci.c | |||
@@ -363,12 +363,12 @@ int rt2x00pci_resume(struct pci_dev *pci_dev) | |||
363 | struct rt2x00_dev *rt2x00dev = hw->priv; | 363 | struct rt2x00_dev *rt2x00dev = hw->priv; |
364 | 364 | ||
365 | if (pci_set_power_state(pci_dev, PCI_D0) || | 365 | if (pci_set_power_state(pci_dev, PCI_D0) || |
366 | pci_enable_device(pci_dev) || | 366 | pci_enable_device(pci_dev)) { |
367 | pci_restore_state(pci_dev)) { | ||
368 | ERROR(rt2x00dev, "Failed to resume device.\n"); | 367 | ERROR(rt2x00dev, "Failed to resume device.\n"); |
369 | return -EIO; | 368 | return -EIO; |
370 | } | 369 | } |
371 | 370 | ||
371 | pci_restore_state(pci_dev); | ||
372 | return rt2x00lib_resume(rt2x00dev); | 372 | return rt2x00lib_resume(rt2x00dev); |
373 | } | 373 | } |
374 | EXPORT_SYMBOL_GPL(rt2x00pci_resume); | 374 | EXPORT_SYMBOL_GPL(rt2x00pci_resume); |
diff --git a/drivers/net/wireless/rt2x00/rt73usb.c b/drivers/net/wireless/rt2x00/rt73usb.c index 0b4e8590cbb7..029be3c6c030 100644 --- a/drivers/net/wireless/rt2x00/rt73usb.c +++ b/drivers/net/wireless/rt2x00/rt73usb.c | |||
@@ -2446,6 +2446,7 @@ static struct usb_device_id rt73usb_device_table[] = { | |||
2446 | { USB_DEVICE(0x04bb, 0x093d), USB_DEVICE_DATA(&rt73usb_ops) }, | 2446 | { USB_DEVICE(0x04bb, 0x093d), USB_DEVICE_DATA(&rt73usb_ops) }, |
2447 | { USB_DEVICE(0x148f, 0x2573), USB_DEVICE_DATA(&rt73usb_ops) }, | 2447 | { USB_DEVICE(0x148f, 0x2573), USB_DEVICE_DATA(&rt73usb_ops) }, |
2448 | { USB_DEVICE(0x148f, 0x2671), USB_DEVICE_DATA(&rt73usb_ops) }, | 2448 | { USB_DEVICE(0x148f, 0x2671), USB_DEVICE_DATA(&rt73usb_ops) }, |
2449 | { USB_DEVICE(0x0812, 0x3101), USB_DEVICE_DATA(&rt73usb_ops) }, | ||
2449 | /* Qcom */ | 2450 | /* Qcom */ |
2450 | { USB_DEVICE(0x18e8, 0x6196), USB_DEVICE_DATA(&rt73usb_ops) }, | 2451 | { USB_DEVICE(0x18e8, 0x6196), USB_DEVICE_DATA(&rt73usb_ops) }, |
2451 | { USB_DEVICE(0x18e8, 0x6229), USB_DEVICE_DATA(&rt73usb_ops) }, | 2452 | { USB_DEVICE(0x18e8, 0x6229), USB_DEVICE_DATA(&rt73usb_ops) }, |
diff --git a/drivers/net/wireless/rtlwifi/efuse.c b/drivers/net/wireless/rtlwifi/efuse.c index b8433f3a9bc2..62876cd5c41a 100644 --- a/drivers/net/wireless/rtlwifi/efuse.c +++ b/drivers/net/wireless/rtlwifi/efuse.c | |||
@@ -726,9 +726,9 @@ static int efuse_pg_packet_read(struct ieee80211_hw *hw, u8 offset, u8 *data) | |||
726 | } | 726 | } |
727 | 727 | ||
728 | static void efuse_write_data_case1(struct ieee80211_hw *hw, u16 *efuse_addr, | 728 | static void efuse_write_data_case1(struct ieee80211_hw *hw, u16 *efuse_addr, |
729 | u8 efuse_data, u8 offset, int *bcontinual, | 729 | u8 efuse_data, u8 offset, int *bcontinual, |
730 | u8 *write_state, struct pgpkt_struct target_pkt, | 730 | u8 *write_state, struct pgpkt_struct *target_pkt, |
731 | int *repeat_times, int *bresult, u8 word_en) | 731 | int *repeat_times, int *bresult, u8 word_en) |
732 | { | 732 | { |
733 | struct rtl_priv *rtlpriv = rtl_priv(hw); | 733 | struct rtl_priv *rtlpriv = rtl_priv(hw); |
734 | struct pgpkt_struct tmp_pkt; | 734 | struct pgpkt_struct tmp_pkt; |
@@ -744,8 +744,8 @@ static void efuse_write_data_case1(struct ieee80211_hw *hw, u16 *efuse_addr, | |||
744 | tmp_pkt.word_en = tmp_header & 0x0F; | 744 | tmp_pkt.word_en = tmp_header & 0x0F; |
745 | tmp_word_cnts = efuse_calculate_word_cnts(tmp_pkt.word_en); | 745 | tmp_word_cnts = efuse_calculate_word_cnts(tmp_pkt.word_en); |
746 | 746 | ||
747 | if (tmp_pkt.offset != target_pkt.offset) { | 747 | if (tmp_pkt.offset != target_pkt->offset) { |
748 | efuse_addr = efuse_addr + (tmp_word_cnts * 2) + 1; | 748 | *efuse_addr = *efuse_addr + (tmp_word_cnts * 2) + 1; |
749 | *write_state = PG_STATE_HEADER; | 749 | *write_state = PG_STATE_HEADER; |
750 | } else { | 750 | } else { |
751 | for (tmpindex = 0; tmpindex < (tmp_word_cnts * 2); tmpindex++) { | 751 | for (tmpindex = 0; tmpindex < (tmp_word_cnts * 2); tmpindex++) { |
@@ -756,23 +756,23 @@ static void efuse_write_data_case1(struct ieee80211_hw *hw, u16 *efuse_addr, | |||
756 | } | 756 | } |
757 | 757 | ||
758 | if (bdataempty == false) { | 758 | if (bdataempty == false) { |
759 | efuse_addr = efuse_addr + (tmp_word_cnts * 2) + 1; | 759 | *efuse_addr = *efuse_addr + (tmp_word_cnts * 2) + 1; |
760 | *write_state = PG_STATE_HEADER; | 760 | *write_state = PG_STATE_HEADER; |
761 | } else { | 761 | } else { |
762 | match_word_en = 0x0F; | 762 | match_word_en = 0x0F; |
763 | if (!((target_pkt.word_en & BIT(0)) | | 763 | if (!((target_pkt->word_en & BIT(0)) | |
764 | (tmp_pkt.word_en & BIT(0)))) | 764 | (tmp_pkt.word_en & BIT(0)))) |
765 | match_word_en &= (~BIT(0)); | 765 | match_word_en &= (~BIT(0)); |
766 | 766 | ||
767 | if (!((target_pkt.word_en & BIT(1)) | | 767 | if (!((target_pkt->word_en & BIT(1)) | |
768 | (tmp_pkt.word_en & BIT(1)))) | 768 | (tmp_pkt.word_en & BIT(1)))) |
769 | match_word_en &= (~BIT(1)); | 769 | match_word_en &= (~BIT(1)); |
770 | 770 | ||
771 | if (!((target_pkt.word_en & BIT(2)) | | 771 | if (!((target_pkt->word_en & BIT(2)) | |
772 | (tmp_pkt.word_en & BIT(2)))) | 772 | (tmp_pkt.word_en & BIT(2)))) |
773 | match_word_en &= (~BIT(2)); | 773 | match_word_en &= (~BIT(2)); |
774 | 774 | ||
775 | if (!((target_pkt.word_en & BIT(3)) | | 775 | if (!((target_pkt->word_en & BIT(3)) | |
776 | (tmp_pkt.word_en & BIT(3)))) | 776 | (tmp_pkt.word_en & BIT(3)))) |
777 | match_word_en &= (~BIT(3)); | 777 | match_word_en &= (~BIT(3)); |
778 | 778 | ||
@@ -780,7 +780,7 @@ static void efuse_write_data_case1(struct ieee80211_hw *hw, u16 *efuse_addr, | |||
780 | badworden = efuse_word_enable_data_write( | 780 | badworden = efuse_word_enable_data_write( |
781 | hw, *efuse_addr + 1, | 781 | hw, *efuse_addr + 1, |
782 | tmp_pkt.word_en, | 782 | tmp_pkt.word_en, |
783 | target_pkt.data); | 783 | target_pkt->data); |
784 | 784 | ||
785 | if (0x0F != (badworden & 0x0F)) { | 785 | if (0x0F != (badworden & 0x0F)) { |
786 | u8 reorg_offset = offset; | 786 | u8 reorg_offset = offset; |
@@ -791,26 +791,26 @@ static void efuse_write_data_case1(struct ieee80211_hw *hw, u16 *efuse_addr, | |||
791 | } | 791 | } |
792 | 792 | ||
793 | tmp_word_en = 0x0F; | 793 | tmp_word_en = 0x0F; |
794 | if ((target_pkt.word_en & BIT(0)) ^ | 794 | if ((target_pkt->word_en & BIT(0)) ^ |
795 | (match_word_en & BIT(0))) | 795 | (match_word_en & BIT(0))) |
796 | tmp_word_en &= (~BIT(0)); | 796 | tmp_word_en &= (~BIT(0)); |
797 | 797 | ||
798 | if ((target_pkt.word_en & BIT(1)) ^ | 798 | if ((target_pkt->word_en & BIT(1)) ^ |
799 | (match_word_en & BIT(1))) | 799 | (match_word_en & BIT(1))) |
800 | tmp_word_en &= (~BIT(1)); | 800 | tmp_word_en &= (~BIT(1)); |
801 | 801 | ||
802 | if ((target_pkt.word_en & BIT(2)) ^ | 802 | if ((target_pkt->word_en & BIT(2)) ^ |
803 | (match_word_en & BIT(2))) | 803 | (match_word_en & BIT(2))) |
804 | tmp_word_en &= (~BIT(2)); | 804 | tmp_word_en &= (~BIT(2)); |
805 | 805 | ||
806 | if ((target_pkt.word_en & BIT(3)) ^ | 806 | if ((target_pkt->word_en & BIT(3)) ^ |
807 | (match_word_en & BIT(3))) | 807 | (match_word_en & BIT(3))) |
808 | tmp_word_en &= (~BIT(3)); | 808 | tmp_word_en &= (~BIT(3)); |
809 | 809 | ||
810 | if ((tmp_word_en & 0x0F) != 0x0F) { | 810 | if ((tmp_word_en & 0x0F) != 0x0F) { |
811 | *efuse_addr = efuse_get_current_size(hw); | 811 | *efuse_addr = efuse_get_current_size(hw); |
812 | target_pkt.offset = offset; | 812 | target_pkt->offset = offset; |
813 | target_pkt.word_en = tmp_word_en; | 813 | target_pkt->word_en = tmp_word_en; |
814 | } else | 814 | } else |
815 | *bcontinual = false; | 815 | *bcontinual = false; |
816 | *write_state = PG_STATE_HEADER; | 816 | *write_state = PG_STATE_HEADER; |
@@ -821,8 +821,8 @@ static void efuse_write_data_case1(struct ieee80211_hw *hw, u16 *efuse_addr, | |||
821 | } | 821 | } |
822 | } else { | 822 | } else { |
823 | *efuse_addr += (2 * tmp_word_cnts) + 1; | 823 | *efuse_addr += (2 * tmp_word_cnts) + 1; |
824 | target_pkt.offset = offset; | 824 | target_pkt->offset = offset; |
825 | target_pkt.word_en = word_en; | 825 | target_pkt->word_en = word_en; |
826 | *write_state = PG_STATE_HEADER; | 826 | *write_state = PG_STATE_HEADER; |
827 | } | 827 | } |
828 | } | 828 | } |
@@ -938,7 +938,7 @@ static int efuse_pg_packet_write(struct ieee80211_hw *hw, | |||
938 | efuse_write_data_case1(hw, &efuse_addr, | 938 | efuse_write_data_case1(hw, &efuse_addr, |
939 | efuse_data, offset, | 939 | efuse_data, offset, |
940 | &bcontinual, | 940 | &bcontinual, |
941 | &write_state, target_pkt, | 941 | &write_state, &target_pkt, |
942 | &repeat_times, &bresult, | 942 | &repeat_times, &bresult, |
943 | word_en); | 943 | word_en); |
944 | else | 944 | else |
diff --git a/drivers/net/wireless/rtlwifi/pci.c b/drivers/net/wireless/rtlwifi/pci.c index 0fa36aa6701a..1758d4463247 100644 --- a/drivers/net/wireless/rtlwifi/pci.c +++ b/drivers/net/wireless/rtlwifi/pci.c | |||
@@ -619,6 +619,13 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw) | |||
619 | struct sk_buff *uskb = NULL; | 619 | struct sk_buff *uskb = NULL; |
620 | u8 *pdata; | 620 | u8 *pdata; |
621 | uskb = dev_alloc_skb(skb->len + 128); | 621 | uskb = dev_alloc_skb(skb->len + 128); |
622 | if (!uskb) { | ||
623 | RT_TRACE(rtlpriv, | ||
624 | (COMP_INTR | COMP_RECV), | ||
625 | DBG_EMERG, | ||
626 | ("can't alloc rx skb\n")); | ||
627 | goto done; | ||
628 | } | ||
622 | memcpy(IEEE80211_SKB_RXCB(uskb), | 629 | memcpy(IEEE80211_SKB_RXCB(uskb), |
623 | &rx_status, | 630 | &rx_status, |
624 | sizeof(rx_status)); | 631 | sizeof(rx_status)); |
@@ -641,7 +648,7 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw) | |||
641 | new_skb = dev_alloc_skb(rtlpci->rxbuffersize); | 648 | new_skb = dev_alloc_skb(rtlpci->rxbuffersize); |
642 | if (unlikely(!new_skb)) { | 649 | if (unlikely(!new_skb)) { |
643 | RT_TRACE(rtlpriv, (COMP_INTR | COMP_RECV), | 650 | RT_TRACE(rtlpriv, (COMP_INTR | COMP_RECV), |
644 | DBG_DMESG, | 651 | DBG_EMERG, |
645 | ("can't alloc skb for rx\n")); | 652 | ("can't alloc skb for rx\n")); |
646 | goto done; | 653 | goto done; |
647 | } | 654 | } |
@@ -1066,9 +1073,9 @@ static int _rtl_pci_init_rx_ring(struct ieee80211_hw *hw) | |||
1066 | struct sk_buff *skb = | 1073 | struct sk_buff *skb = |
1067 | dev_alloc_skb(rtlpci->rxbuffersize); | 1074 | dev_alloc_skb(rtlpci->rxbuffersize); |
1068 | u32 bufferaddress; | 1075 | u32 bufferaddress; |
1069 | entry = &rtlpci->rx_ring[rx_queue_idx].desc[i]; | ||
1070 | if (!skb) | 1076 | if (!skb) |
1071 | return 0; | 1077 | return 0; |
1078 | entry = &rtlpci->rx_ring[rx_queue_idx].desc[i]; | ||
1072 | 1079 | ||
1073 | /*skb->dev = dev; */ | 1080 | /*skb->dev = dev; */ |
1074 | 1081 | ||
diff --git a/drivers/net/wireless/wl1251/acx.h b/drivers/net/wireless/wl1251/acx.h index e54b21a4f8b1..efcc3aaca14f 100644 --- a/drivers/net/wireless/wl1251/acx.h +++ b/drivers/net/wireless/wl1251/acx.h | |||
@@ -1272,10 +1272,10 @@ struct wl1251_acx_tid_cfg { | |||
1272 | /* OBSOLETE */ | 1272 | /* OBSOLETE */ |
1273 | #define WL1251_ACX_INTR_WAKE_ON_HOST BIT(6) | 1273 | #define WL1251_ACX_INTR_WAKE_ON_HOST BIT(6) |
1274 | 1274 | ||
1275 | /* Trace meassge on MBOX #A */ | 1275 | /* Trace message on MBOX #A */ |
1276 | #define WL1251_ACX_INTR_TRACE_A BIT(7) | 1276 | #define WL1251_ACX_INTR_TRACE_A BIT(7) |
1277 | 1277 | ||
1278 | /* Trace meassge on MBOX #B */ | 1278 | /* Trace message on MBOX #B */ |
1279 | #define WL1251_ACX_INTR_TRACE_B BIT(8) | 1279 | #define WL1251_ACX_INTR_TRACE_B BIT(8) |
1280 | 1280 | ||
1281 | /* Command processing completion */ | 1281 | /* Command processing completion */ |
diff --git a/drivers/net/wireless/wl1251/main.c b/drivers/net/wireless/wl1251/main.c index 012e1a4016fe..40372bac9482 100644 --- a/drivers/net/wireless/wl1251/main.c +++ b/drivers/net/wireless/wl1251/main.c | |||
@@ -1039,6 +1039,9 @@ static void wl1251_op_bss_info_changed(struct ieee80211_hw *hw, | |||
1039 | 1039 | ||
1040 | if (changed & BSS_CHANGED_BEACON) { | 1040 | if (changed & BSS_CHANGED_BEACON) { |
1041 | beacon = ieee80211_beacon_get(hw, vif); | 1041 | beacon = ieee80211_beacon_get(hw, vif); |
1042 | if (!beacon) | ||
1043 | goto out_sleep; | ||
1044 | |||
1042 | ret = wl1251_cmd_template_set(wl, CMD_BEACON, beacon->data, | 1045 | ret = wl1251_cmd_template_set(wl, CMD_BEACON, beacon->data, |
1043 | beacon->len); | 1046 | beacon->len); |
1044 | 1047 | ||
diff --git a/drivers/net/wireless/wl1251/wl1251.h b/drivers/net/wireless/wl1251/wl1251.h index 13fbeeccf609..c0ce2c8b43b8 100644 --- a/drivers/net/wireless/wl1251/wl1251.h +++ b/drivers/net/wireless/wl1251/wl1251.h | |||
@@ -419,7 +419,7 @@ void wl1251_disable_interrupts(struct wl1251 *wl); | |||
419 | #define WL1251_FW_NAME "wl1251-fw.bin" | 419 | #define WL1251_FW_NAME "wl1251-fw.bin" |
420 | #define WL1251_NVS_NAME "wl1251-nvs.bin" | 420 | #define WL1251_NVS_NAME "wl1251-nvs.bin" |
421 | 421 | ||
422 | #define WL1251_POWER_ON_SLEEP 10 /* in miliseconds */ | 422 | #define WL1251_POWER_ON_SLEEP 10 /* in milliseconds */ |
423 | 423 | ||
424 | #define WL1251_PART_DOWN_MEM_START 0x0 | 424 | #define WL1251_PART_DOWN_MEM_START 0x0 |
425 | #define WL1251_PART_DOWN_MEM_SIZE 0x16800 | 425 | #define WL1251_PART_DOWN_MEM_SIZE 0x16800 |
diff --git a/drivers/net/wireless/wl12xx/acx.h b/drivers/net/wireless/wl12xx/acx.h index 9cbc3f40c8dd..7bd8e4db4a71 100644 --- a/drivers/net/wireless/wl12xx/acx.h +++ b/drivers/net/wireless/wl12xx/acx.h | |||
@@ -47,9 +47,9 @@ | |||
47 | #define WL1271_ACX_INTR_HW_AVAILABLE BIT(5) | 47 | #define WL1271_ACX_INTR_HW_AVAILABLE BIT(5) |
48 | /* The MISC bit is used for aggregation of RX, TxComplete and TX rate update */ | 48 | /* The MISC bit is used for aggregation of RX, TxComplete and TX rate update */ |
49 | #define WL1271_ACX_INTR_DATA BIT(6) | 49 | #define WL1271_ACX_INTR_DATA BIT(6) |
50 | /* Trace meassge on MBOX #A */ | 50 | /* Trace message on MBOX #A */ |
51 | #define WL1271_ACX_INTR_TRACE_A BIT(7) | 51 | #define WL1271_ACX_INTR_TRACE_A BIT(7) |
52 | /* Trace meassge on MBOX #B */ | 52 | /* Trace message on MBOX #B */ |
53 | #define WL1271_ACX_INTR_TRACE_B BIT(8) | 53 | #define WL1271_ACX_INTR_TRACE_B BIT(8) |
54 | 54 | ||
55 | #define WL1271_ACX_INTR_ALL 0xFFFFFFFF | 55 | #define WL1271_ACX_INTR_ALL 0xFFFFFFFF |
diff --git a/drivers/net/wireless/wl12xx/spi.c b/drivers/net/wireless/wl12xx/spi.c index 46714910f98c..7145ea543783 100644 --- a/drivers/net/wireless/wl12xx/spi.c +++ b/drivers/net/wireless/wl12xx/spi.c | |||
@@ -110,9 +110,8 @@ static void wl1271_spi_reset(struct wl1271 *wl) | |||
110 | spi_message_add_tail(&t, &m); | 110 | spi_message_add_tail(&t, &m); |
111 | 111 | ||
112 | spi_sync(wl_to_spi(wl), &m); | 112 | spi_sync(wl_to_spi(wl), &m); |
113 | kfree(cmd); | ||
114 | |||
115 | wl1271_dump(DEBUG_SPI, "spi reset -> ", cmd, WSPI_INIT_CMD_LEN); | 113 | wl1271_dump(DEBUG_SPI, "spi reset -> ", cmd, WSPI_INIT_CMD_LEN); |
114 | kfree(cmd); | ||
116 | } | 115 | } |
117 | 116 | ||
118 | static void wl1271_spi_init(struct wl1271 *wl) | 117 | static void wl1271_spi_init(struct wl1271 *wl) |
diff --git a/drivers/net/wireless/wl12xx/wl12xx.h b/drivers/net/wireless/wl12xx/wl12xx.h index ce3d31f98c55..9050dd9b62d2 100644 --- a/drivers/net/wireless/wl12xx/wl12xx.h +++ b/drivers/net/wireless/wl12xx/wl12xx.h | |||
@@ -416,8 +416,8 @@ int wl1271_plt_stop(struct wl1271 *wl); | |||
416 | 416 | ||
417 | /* WL1271 needs a 200ms sleep after power on, and a 20ms sleep before power | 417 | /* WL1271 needs a 200ms sleep after power on, and a 20ms sleep before power |
418 | on in case is has been shut down shortly before */ | 418 | on in case is has been shut down shortly before */ |
419 | #define WL1271_PRE_POWER_ON_SLEEP 20 /* in miliseconds */ | 419 | #define WL1271_PRE_POWER_ON_SLEEP 20 /* in milliseconds */ |
420 | #define WL1271_POWER_ON_SLEEP 200 /* in miliseconds */ | 420 | #define WL1271_POWER_ON_SLEEP 200 /* in milliseconds */ |
421 | 421 | ||
422 | /* Macros to handle wl1271.sta_rate_set */ | 422 | /* Macros to handle wl1271.sta_rate_set */ |
423 | #define HW_BG_RATES_MASK 0xffff | 423 | #define HW_BG_RATES_MASK 0xffff |
diff --git a/drivers/net/wireless/wl3501_cs.c b/drivers/net/wireless/wl3501_cs.c index ee82df62e646..3e5befe4d03b 100644 --- a/drivers/net/wireless/wl3501_cs.c +++ b/drivers/net/wireless/wl3501_cs.c | |||
@@ -192,7 +192,7 @@ static inline void wl3501_switch_page(struct wl3501_card *this, u8 page) | |||
192 | } | 192 | } |
193 | 193 | ||
194 | /* | 194 | /* |
195 | * Get Ethernet MAC addresss. | 195 | * Get Ethernet MAC address. |
196 | * | 196 | * |
197 | * WARNING: We switch to FPAGE0 and switc back again. | 197 | * WARNING: We switch to FPAGE0 and switc back again. |
198 | * Making sure there is no other WL function beening called by ISR. | 198 | * Making sure there is no other WL function beening called by ISR. |
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index cdbeec9f83ea..da1f12120346 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c | |||
@@ -120,6 +120,9 @@ struct netfront_info { | |||
120 | unsigned long rx_pfn_array[NET_RX_RING_SIZE]; | 120 | unsigned long rx_pfn_array[NET_RX_RING_SIZE]; |
121 | struct multicall_entry rx_mcl[NET_RX_RING_SIZE+1]; | 121 | struct multicall_entry rx_mcl[NET_RX_RING_SIZE+1]; |
122 | struct mmu_update rx_mmu[NET_RX_RING_SIZE]; | 122 | struct mmu_update rx_mmu[NET_RX_RING_SIZE]; |
123 | |||
124 | /* Statistics */ | ||
125 | int rx_gso_checksum_fixup; | ||
123 | }; | 126 | }; |
124 | 127 | ||
125 | struct netfront_rx_info { | 128 | struct netfront_rx_info { |
@@ -488,7 +491,7 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
488 | 491 | ||
489 | if (unlikely(!netif_carrier_ok(dev) || | 492 | if (unlikely(!netif_carrier_ok(dev) || |
490 | (frags > 1 && !xennet_can_sg(dev)) || | 493 | (frags > 1 && !xennet_can_sg(dev)) || |
491 | netif_needs_gso(dev, skb))) { | 494 | netif_needs_gso(skb, netif_skb_features(skb)))) { |
492 | spin_unlock_irq(&np->tx_lock); | 495 | spin_unlock_irq(&np->tx_lock); |
493 | goto drop; | 496 | goto drop; |
494 | } | 497 | } |
@@ -770,11 +773,29 @@ static RING_IDX xennet_fill_frags(struct netfront_info *np, | |||
770 | return cons; | 773 | return cons; |
771 | } | 774 | } |
772 | 775 | ||
773 | static int skb_checksum_setup(struct sk_buff *skb) | 776 | static int checksum_setup(struct net_device *dev, struct sk_buff *skb) |
774 | { | 777 | { |
775 | struct iphdr *iph; | 778 | struct iphdr *iph; |
776 | unsigned char *th; | 779 | unsigned char *th; |
777 | int err = -EPROTO; | 780 | int err = -EPROTO; |
781 | int recalculate_partial_csum = 0; | ||
782 | |||
783 | /* | ||
784 | * A GSO SKB must be CHECKSUM_PARTIAL. However some buggy | ||
785 | * peers can fail to set NETRXF_csum_blank when sending a GSO | ||
786 | * frame. In this case force the SKB to CHECKSUM_PARTIAL and | ||
787 | * recalculate the partial checksum. | ||
788 | */ | ||
789 | if (skb->ip_summed != CHECKSUM_PARTIAL && skb_is_gso(skb)) { | ||
790 | struct netfront_info *np = netdev_priv(dev); | ||
791 | np->rx_gso_checksum_fixup++; | ||
792 | skb->ip_summed = CHECKSUM_PARTIAL; | ||
793 | recalculate_partial_csum = 1; | ||
794 | } | ||
795 | |||
796 | /* A non-CHECKSUM_PARTIAL SKB does not require setup. */ | ||
797 | if (skb->ip_summed != CHECKSUM_PARTIAL) | ||
798 | return 0; | ||
778 | 799 | ||
779 | if (skb->protocol != htons(ETH_P_IP)) | 800 | if (skb->protocol != htons(ETH_P_IP)) |
780 | goto out; | 801 | goto out; |
@@ -788,9 +809,23 @@ static int skb_checksum_setup(struct sk_buff *skb) | |||
788 | switch (iph->protocol) { | 809 | switch (iph->protocol) { |
789 | case IPPROTO_TCP: | 810 | case IPPROTO_TCP: |
790 | skb->csum_offset = offsetof(struct tcphdr, check); | 811 | skb->csum_offset = offsetof(struct tcphdr, check); |
812 | |||
813 | if (recalculate_partial_csum) { | ||
814 | struct tcphdr *tcph = (struct tcphdr *)th; | ||
815 | tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, | ||
816 | skb->len - iph->ihl*4, | ||
817 | IPPROTO_TCP, 0); | ||
818 | } | ||
791 | break; | 819 | break; |
792 | case IPPROTO_UDP: | 820 | case IPPROTO_UDP: |
793 | skb->csum_offset = offsetof(struct udphdr, check); | 821 | skb->csum_offset = offsetof(struct udphdr, check); |
822 | |||
823 | if (recalculate_partial_csum) { | ||
824 | struct udphdr *udph = (struct udphdr *)th; | ||
825 | udph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, | ||
826 | skb->len - iph->ihl*4, | ||
827 | IPPROTO_UDP, 0); | ||
828 | } | ||
794 | break; | 829 | break; |
795 | default: | 830 | default: |
796 | if (net_ratelimit()) | 831 | if (net_ratelimit()) |
@@ -829,13 +864,11 @@ static int handle_incoming_queue(struct net_device *dev, | |||
829 | /* Ethernet work: Delayed to here as it peeks the header. */ | 864 | /* Ethernet work: Delayed to here as it peeks the header. */ |
830 | skb->protocol = eth_type_trans(skb, dev); | 865 | skb->protocol = eth_type_trans(skb, dev); |
831 | 866 | ||
832 | if (skb->ip_summed == CHECKSUM_PARTIAL) { | 867 | if (checksum_setup(dev, skb)) { |
833 | if (skb_checksum_setup(skb)) { | 868 | kfree_skb(skb); |
834 | kfree_skb(skb); | 869 | packets_dropped++; |
835 | packets_dropped++; | 870 | dev->stats.rx_errors++; |
836 | dev->stats.rx_errors++; | 871 | continue; |
837 | continue; | ||
838 | } | ||
839 | } | 872 | } |
840 | 873 | ||
841 | dev->stats.rx_packets++; | 874 | dev->stats.rx_packets++; |
@@ -1632,12 +1665,59 @@ static void netback_changed(struct xenbus_device *dev, | |||
1632 | } | 1665 | } |
1633 | } | 1666 | } |
1634 | 1667 | ||
1668 | static const struct xennet_stat { | ||
1669 | char name[ETH_GSTRING_LEN]; | ||
1670 | u16 offset; | ||
1671 | } xennet_stats[] = { | ||
1672 | { | ||
1673 | "rx_gso_checksum_fixup", | ||
1674 | offsetof(struct netfront_info, rx_gso_checksum_fixup) | ||
1675 | }, | ||
1676 | }; | ||
1677 | |||
1678 | static int xennet_get_sset_count(struct net_device *dev, int string_set) | ||
1679 | { | ||
1680 | switch (string_set) { | ||
1681 | case ETH_SS_STATS: | ||
1682 | return ARRAY_SIZE(xennet_stats); | ||
1683 | default: | ||
1684 | return -EINVAL; | ||
1685 | } | ||
1686 | } | ||
1687 | |||
1688 | static void xennet_get_ethtool_stats(struct net_device *dev, | ||
1689 | struct ethtool_stats *stats, u64 * data) | ||
1690 | { | ||
1691 | void *np = netdev_priv(dev); | ||
1692 | int i; | ||
1693 | |||
1694 | for (i = 0; i < ARRAY_SIZE(xennet_stats); i++) | ||
1695 | data[i] = *(int *)(np + xennet_stats[i].offset); | ||
1696 | } | ||
1697 | |||
1698 | static void xennet_get_strings(struct net_device *dev, u32 stringset, u8 * data) | ||
1699 | { | ||
1700 | int i; | ||
1701 | |||
1702 | switch (stringset) { | ||
1703 | case ETH_SS_STATS: | ||
1704 | for (i = 0; i < ARRAY_SIZE(xennet_stats); i++) | ||
1705 | memcpy(data + i * ETH_GSTRING_LEN, | ||
1706 | xennet_stats[i].name, ETH_GSTRING_LEN); | ||
1707 | break; | ||
1708 | } | ||
1709 | } | ||
1710 | |||
1635 | static const struct ethtool_ops xennet_ethtool_ops = | 1711 | static const struct ethtool_ops xennet_ethtool_ops = |
1636 | { | 1712 | { |
1637 | .set_tx_csum = ethtool_op_set_tx_csum, | 1713 | .set_tx_csum = ethtool_op_set_tx_csum, |
1638 | .set_sg = xennet_set_sg, | 1714 | .set_sg = xennet_set_sg, |
1639 | .set_tso = xennet_set_tso, | 1715 | .set_tso = xennet_set_tso, |
1640 | .get_link = ethtool_op_get_link, | 1716 | .get_link = ethtool_op_get_link, |
1717 | |||
1718 | .get_sset_count = xennet_get_sset_count, | ||
1719 | .get_ethtool_stats = xennet_get_ethtool_stats, | ||
1720 | .get_strings = xennet_get_strings, | ||
1641 | }; | 1721 | }; |
1642 | 1722 | ||
1643 | #ifdef CONFIG_SYSFS | 1723 | #ifdef CONFIG_SYSFS |
diff --git a/drivers/net/xilinx_emaclite.c b/drivers/net/xilinx_emaclite.c index de6c3086d232..cad66ce1640b 100644 --- a/drivers/net/xilinx_emaclite.c +++ b/drivers/net/xilinx_emaclite.c | |||
@@ -24,6 +24,7 @@ | |||
24 | #include <linux/of_device.h> | 24 | #include <linux/of_device.h> |
25 | #include <linux/of_platform.h> | 25 | #include <linux/of_platform.h> |
26 | #include <linux/of_mdio.h> | 26 | #include <linux/of_mdio.h> |
27 | #include <linux/of_net.h> | ||
27 | #include <linux/phy.h> | 28 | #include <linux/phy.h> |
28 | 29 | ||
29 | #define DRIVER_NAME "xilinx_emaclite" | 30 | #define DRIVER_NAME "xilinx_emaclite" |