diff options
115 files changed, 1327 insertions, 1041 deletions
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index 5780dad6a3cb..ff652c77a0a5 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig | |||
@@ -1950,14 +1950,6 @@ config FEC | |||
1950 | Say Y here if you want to use the built-in 10/100 Fast ethernet | 1950 | Say Y here if you want to use the built-in 10/100 Fast ethernet |
1951 | controller on some Motorola ColdFire and Freescale i.MX processors. | 1951 | controller on some Motorola ColdFire and Freescale i.MX processors. |
1952 | 1952 | ||
1953 | config FEC2 | ||
1954 | bool "Second FEC ethernet controller" | ||
1955 | depends on FEC | ||
1956 | help | ||
1957 | Say Y here if you want to use the second built-in 10/100 Fast | ||
1958 | ethernet controller on some Motorola ColdFire and Freescale | ||
1959 | i.MX processors. | ||
1960 | |||
1961 | config FEC_MPC52xx | 1953 | config FEC_MPC52xx |
1962 | tristate "MPC52xx FEC driver" | 1954 | tristate "MPC52xx FEC driver" |
1963 | depends on PPC_MPC52xx && PPC_BESTCOMM | 1955 | depends on PPC_MPC52xx && PPC_BESTCOMM |
diff --git a/drivers/net/arm/ks8695net.c b/drivers/net/arm/ks8695net.c index 54c6d849cf25..62d6f88cbab5 100644 --- a/drivers/net/arm/ks8695net.c +++ b/drivers/net/arm/ks8695net.c | |||
@@ -854,12 +854,12 @@ ks8695_set_msglevel(struct net_device *ndev, u32 value) | |||
854 | } | 854 | } |
855 | 855 | ||
856 | /** | 856 | /** |
857 | * ks8695_get_settings - Get device-specific settings. | 857 | * ks8695_wan_get_settings - Get device-specific settings. |
858 | * @ndev: The network device to read settings from | 858 | * @ndev: The network device to read settings from |
859 | * @cmd: The ethtool structure to read into | 859 | * @cmd: The ethtool structure to read into |
860 | */ | 860 | */ |
861 | static int | 861 | static int |
862 | ks8695_get_settings(struct net_device *ndev, struct ethtool_cmd *cmd) | 862 | ks8695_wan_get_settings(struct net_device *ndev, struct ethtool_cmd *cmd) |
863 | { | 863 | { |
864 | struct ks8695_priv *ksp = netdev_priv(ndev); | 864 | struct ks8695_priv *ksp = netdev_priv(ndev); |
865 | u32 ctrl; | 865 | u32 ctrl; |
@@ -870,69 +870,50 @@ ks8695_get_settings(struct net_device *ndev, struct ethtool_cmd *cmd) | |||
870 | SUPPORTED_TP | SUPPORTED_MII); | 870 | SUPPORTED_TP | SUPPORTED_MII); |
871 | cmd->transceiver = XCVR_INTERNAL; | 871 | cmd->transceiver = XCVR_INTERNAL; |
872 | 872 | ||
873 | /* Port specific extras */ | 873 | cmd->advertising = ADVERTISED_TP | ADVERTISED_MII; |
874 | switch (ksp->dtype) { | 874 | cmd->port = PORT_MII; |
875 | case KS8695_DTYPE_HPNA: | 875 | cmd->supported |= (SUPPORTED_Autoneg | SUPPORTED_Pause); |
876 | cmd->phy_address = 0; | 876 | cmd->phy_address = 0; |
877 | /* not supported for HPNA */ | ||
878 | cmd->autoneg = AUTONEG_DISABLE; | ||
879 | 877 | ||
880 | /* BUG: Erm, dtype hpna implies no phy regs */ | 878 | ctrl = readl(ksp->phyiface_regs + KS8695_WMC); |
881 | /* | 879 | if ((ctrl & WMC_WAND) == 0) { |
882 | ctrl = readl(KS8695_MISC_VA + KS8695_HMC); | 880 | /* auto-negotiation is enabled */ |
883 | cmd->speed = (ctrl & HMC_HSS) ? SPEED_100 : SPEED_10; | 881 | cmd->advertising |= ADVERTISED_Autoneg; |
884 | cmd->duplex = (ctrl & HMC_HDS) ? DUPLEX_FULL : DUPLEX_HALF; | 882 | if (ctrl & WMC_WANA100F) |
885 | */ | 883 | cmd->advertising |= ADVERTISED_100baseT_Full; |
886 | return -EOPNOTSUPP; | 884 | if (ctrl & WMC_WANA100H) |
887 | case KS8695_DTYPE_WAN: | 885 | cmd->advertising |= ADVERTISED_100baseT_Half; |
888 | cmd->advertising = ADVERTISED_TP | ADVERTISED_MII; | 886 | if (ctrl & WMC_WANA10F) |
889 | cmd->port = PORT_MII; | 887 | cmd->advertising |= ADVERTISED_10baseT_Full; |
890 | cmd->supported |= (SUPPORTED_Autoneg | SUPPORTED_Pause); | 888 | if (ctrl & WMC_WANA10H) |
891 | cmd->phy_address = 0; | 889 | cmd->advertising |= ADVERTISED_10baseT_Half; |
890 | if (ctrl & WMC_WANAP) | ||
891 | cmd->advertising |= ADVERTISED_Pause; | ||
892 | cmd->autoneg = AUTONEG_ENABLE; | ||
893 | |||
894 | cmd->speed = (ctrl & WMC_WSS) ? SPEED_100 : SPEED_10; | ||
895 | cmd->duplex = (ctrl & WMC_WDS) ? | ||
896 | DUPLEX_FULL : DUPLEX_HALF; | ||
897 | } else { | ||
898 | /* auto-negotiation is disabled */ | ||
899 | cmd->autoneg = AUTONEG_DISABLE; | ||
892 | 900 | ||
893 | ctrl = readl(ksp->phyiface_regs + KS8695_WMC); | 901 | cmd->speed = (ctrl & WMC_WANF100) ? |
894 | if ((ctrl & WMC_WAND) == 0) { | 902 | SPEED_100 : SPEED_10; |
895 | /* auto-negotiation is enabled */ | 903 | cmd->duplex = (ctrl & WMC_WANFF) ? |
896 | cmd->advertising |= ADVERTISED_Autoneg; | 904 | DUPLEX_FULL : DUPLEX_HALF; |
897 | if (ctrl & WMC_WANA100F) | ||
898 | cmd->advertising |= ADVERTISED_100baseT_Full; | ||
899 | if (ctrl & WMC_WANA100H) | ||
900 | cmd->advertising |= ADVERTISED_100baseT_Half; | ||
901 | if (ctrl & WMC_WANA10F) | ||
902 | cmd->advertising |= ADVERTISED_10baseT_Full; | ||
903 | if (ctrl & WMC_WANA10H) | ||
904 | cmd->advertising |= ADVERTISED_10baseT_Half; | ||
905 | if (ctrl & WMC_WANAP) | ||
906 | cmd->advertising |= ADVERTISED_Pause; | ||
907 | cmd->autoneg = AUTONEG_ENABLE; | ||
908 | |||
909 | cmd->speed = (ctrl & WMC_WSS) ? SPEED_100 : SPEED_10; | ||
910 | cmd->duplex = (ctrl & WMC_WDS) ? | ||
911 | DUPLEX_FULL : DUPLEX_HALF; | ||
912 | } else { | ||
913 | /* auto-negotiation is disabled */ | ||
914 | cmd->autoneg = AUTONEG_DISABLE; | ||
915 | |||
916 | cmd->speed = (ctrl & WMC_WANF100) ? | ||
917 | SPEED_100 : SPEED_10; | ||
918 | cmd->duplex = (ctrl & WMC_WANFF) ? | ||
919 | DUPLEX_FULL : DUPLEX_HALF; | ||
920 | } | ||
921 | break; | ||
922 | case KS8695_DTYPE_LAN: | ||
923 | return -EOPNOTSUPP; | ||
924 | } | 905 | } |
925 | 906 | ||
926 | return 0; | 907 | return 0; |
927 | } | 908 | } |
928 | 909 | ||
929 | /** | 910 | /** |
930 | * ks8695_set_settings - Set device-specific settings. | 911 | * ks8695_wan_set_settings - Set device-specific settings. |
931 | * @ndev: The network device to configure | 912 | * @ndev: The network device to configure |
932 | * @cmd: The settings to configure | 913 | * @cmd: The settings to configure |
933 | */ | 914 | */ |
934 | static int | 915 | static int |
935 | ks8695_set_settings(struct net_device *ndev, struct ethtool_cmd *cmd) | 916 | ks8695_wan_set_settings(struct net_device *ndev, struct ethtool_cmd *cmd) |
936 | { | 917 | { |
937 | struct ks8695_priv *ksp = netdev_priv(ndev); | 918 | struct ks8695_priv *ksp = netdev_priv(ndev); |
938 | u32 ctrl; | 919 | u32 ctrl; |
@@ -956,171 +937,85 @@ ks8695_set_settings(struct net_device *ndev, struct ethtool_cmd *cmd) | |||
956 | ADVERTISED_100baseT_Full)) == 0) | 937 | ADVERTISED_100baseT_Full)) == 0) |
957 | return -EINVAL; | 938 | return -EINVAL; |
958 | 939 | ||
959 | switch (ksp->dtype) { | 940 | ctrl = readl(ksp->phyiface_regs + KS8695_WMC); |
960 | case KS8695_DTYPE_HPNA: | ||
961 | /* HPNA does not support auto-negotiation. */ | ||
962 | return -EINVAL; | ||
963 | case KS8695_DTYPE_WAN: | ||
964 | ctrl = readl(ksp->phyiface_regs + KS8695_WMC); | ||
965 | |||
966 | ctrl &= ~(WMC_WAND | WMC_WANA100F | WMC_WANA100H | | ||
967 | WMC_WANA10F | WMC_WANA10H); | ||
968 | if (cmd->advertising & ADVERTISED_100baseT_Full) | ||
969 | ctrl |= WMC_WANA100F; | ||
970 | if (cmd->advertising & ADVERTISED_100baseT_Half) | ||
971 | ctrl |= WMC_WANA100H; | ||
972 | if (cmd->advertising & ADVERTISED_10baseT_Full) | ||
973 | ctrl |= WMC_WANA10F; | ||
974 | if (cmd->advertising & ADVERTISED_10baseT_Half) | ||
975 | ctrl |= WMC_WANA10H; | ||
976 | |||
977 | /* force a re-negotiation */ | ||
978 | ctrl |= WMC_WANR; | ||
979 | writel(ctrl, ksp->phyiface_regs + KS8695_WMC); | ||
980 | break; | ||
981 | case KS8695_DTYPE_LAN: | ||
982 | return -EOPNOTSUPP; | ||
983 | } | ||
984 | 941 | ||
942 | ctrl &= ~(WMC_WAND | WMC_WANA100F | WMC_WANA100H | | ||
943 | WMC_WANA10F | WMC_WANA10H); | ||
944 | if (cmd->advertising & ADVERTISED_100baseT_Full) | ||
945 | ctrl |= WMC_WANA100F; | ||
946 | if (cmd->advertising & ADVERTISED_100baseT_Half) | ||
947 | ctrl |= WMC_WANA100H; | ||
948 | if (cmd->advertising & ADVERTISED_10baseT_Full) | ||
949 | ctrl |= WMC_WANA10F; | ||
950 | if (cmd->advertising & ADVERTISED_10baseT_Half) | ||
951 | ctrl |= WMC_WANA10H; | ||
952 | |||
953 | /* force a re-negotiation */ | ||
954 | ctrl |= WMC_WANR; | ||
955 | writel(ctrl, ksp->phyiface_regs + KS8695_WMC); | ||
985 | } else { | 956 | } else { |
986 | switch (ksp->dtype) { | 957 | ctrl = readl(ksp->phyiface_regs + KS8695_WMC); |
987 | case KS8695_DTYPE_HPNA: | 958 | |
988 | /* BUG: dtype_hpna implies no phy registers */ | 959 | /* disable auto-negotiation */ |
989 | /* | 960 | ctrl |= WMC_WAND; |
990 | ctrl = __raw_readl(KS8695_MISC_VA + KS8695_HMC); | 961 | ctrl &= ~(WMC_WANF100 | WMC_WANFF); |
991 | 962 | ||
992 | ctrl &= ~(HMC_HSS | HMC_HDS); | 963 | if (cmd->speed == SPEED_100) |
993 | if (cmd->speed == SPEED_100) | 964 | ctrl |= WMC_WANF100; |
994 | ctrl |= HMC_HSS; | 965 | if (cmd->duplex == DUPLEX_FULL) |
995 | if (cmd->duplex == DUPLEX_FULL) | 966 | ctrl |= WMC_WANFF; |
996 | ctrl |= HMC_HDS; | 967 | |
997 | 968 | writel(ctrl, ksp->phyiface_regs + KS8695_WMC); | |
998 | __raw_writel(ctrl, KS8695_MISC_VA + KS8695_HMC); | ||
999 | */ | ||
1000 | return -EOPNOTSUPP; | ||
1001 | case KS8695_DTYPE_WAN: | ||
1002 | ctrl = readl(ksp->phyiface_regs + KS8695_WMC); | ||
1003 | |||
1004 | /* disable auto-negotiation */ | ||
1005 | ctrl |= WMC_WAND; | ||
1006 | ctrl &= ~(WMC_WANF100 | WMC_WANFF); | ||
1007 | |||
1008 | if (cmd->speed == SPEED_100) | ||
1009 | ctrl |= WMC_WANF100; | ||
1010 | if (cmd->duplex == DUPLEX_FULL) | ||
1011 | ctrl |= WMC_WANFF; | ||
1012 | |||
1013 | writel(ctrl, ksp->phyiface_regs + KS8695_WMC); | ||
1014 | break; | ||
1015 | case KS8695_DTYPE_LAN: | ||
1016 | return -EOPNOTSUPP; | ||
1017 | } | ||
1018 | } | 969 | } |
1019 | 970 | ||
1020 | return 0; | 971 | return 0; |
1021 | } | 972 | } |
1022 | 973 | ||
1023 | /** | 974 | /** |
1024 | * ks8695_nwayreset - Restart the autonegotiation on the port. | 975 | * ks8695_wan_nwayreset - Restart the autonegotiation on the port. |
1025 | * @ndev: The network device to restart autoneotiation on | 976 | * @ndev: The network device to restart autoneotiation on |
1026 | */ | 977 | */ |
1027 | static int | 978 | static int |
1028 | ks8695_nwayreset(struct net_device *ndev) | 979 | ks8695_wan_nwayreset(struct net_device *ndev) |
1029 | { | 980 | { |
1030 | struct ks8695_priv *ksp = netdev_priv(ndev); | 981 | struct ks8695_priv *ksp = netdev_priv(ndev); |
1031 | u32 ctrl; | 982 | u32 ctrl; |
1032 | 983 | ||
1033 | switch (ksp->dtype) { | 984 | ctrl = readl(ksp->phyiface_regs + KS8695_WMC); |
1034 | case KS8695_DTYPE_HPNA: | ||
1035 | /* No phy means no autonegotiation on hpna */ | ||
1036 | return -EINVAL; | ||
1037 | case KS8695_DTYPE_WAN: | ||
1038 | ctrl = readl(ksp->phyiface_regs + KS8695_WMC); | ||
1039 | |||
1040 | if ((ctrl & WMC_WAND) == 0) | ||
1041 | writel(ctrl | WMC_WANR, | ||
1042 | ksp->phyiface_regs + KS8695_WMC); | ||
1043 | else | ||
1044 | /* auto-negotiation not enabled */ | ||
1045 | return -EINVAL; | ||
1046 | break; | ||
1047 | case KS8695_DTYPE_LAN: | ||
1048 | return -EOPNOTSUPP; | ||
1049 | } | ||
1050 | |||
1051 | return 0; | ||
1052 | } | ||
1053 | 985 | ||
1054 | /** | 986 | if ((ctrl & WMC_WAND) == 0) |
1055 | * ks8695_get_link - Retrieve link status of network interface | 987 | writel(ctrl | WMC_WANR, |
1056 | * @ndev: The network interface to retrive the link status of. | 988 | ksp->phyiface_regs + KS8695_WMC); |
1057 | */ | 989 | else |
1058 | static u32 | 990 | /* auto-negotiation not enabled */ |
1059 | ks8695_get_link(struct net_device *ndev) | 991 | return -EINVAL; |
1060 | { | ||
1061 | struct ks8695_priv *ksp = netdev_priv(ndev); | ||
1062 | u32 ctrl; | ||
1063 | 992 | ||
1064 | switch (ksp->dtype) { | ||
1065 | case KS8695_DTYPE_HPNA: | ||
1066 | /* HPNA always has link */ | ||
1067 | return 1; | ||
1068 | case KS8695_DTYPE_WAN: | ||
1069 | /* WAN we can read the PHY for */ | ||
1070 | ctrl = readl(ksp->phyiface_regs + KS8695_WMC); | ||
1071 | return ctrl & WMC_WLS; | ||
1072 | case KS8695_DTYPE_LAN: | ||
1073 | return -EOPNOTSUPP; | ||
1074 | } | ||
1075 | return 0; | 993 | return 0; |
1076 | } | 994 | } |
1077 | 995 | ||
1078 | /** | 996 | /** |
1079 | * ks8695_get_pause - Retrieve network pause/flow-control advertising | 997 | * ks8695_wan_get_pause - Retrieve network pause/flow-control advertising |
1080 | * @ndev: The device to retrieve settings from | 998 | * @ndev: The device to retrieve settings from |
1081 | * @param: The structure to fill out with the information | 999 | * @param: The structure to fill out with the information |
1082 | */ | 1000 | */ |
1083 | static void | 1001 | static void |
1084 | ks8695_get_pause(struct net_device *ndev, struct ethtool_pauseparam *param) | 1002 | ks8695_wan_get_pause(struct net_device *ndev, struct ethtool_pauseparam *param) |
1085 | { | 1003 | { |
1086 | struct ks8695_priv *ksp = netdev_priv(ndev); | 1004 | struct ks8695_priv *ksp = netdev_priv(ndev); |
1087 | u32 ctrl; | 1005 | u32 ctrl; |
1088 | 1006 | ||
1089 | switch (ksp->dtype) { | 1007 | ctrl = readl(ksp->phyiface_regs + KS8695_WMC); |
1090 | case KS8695_DTYPE_HPNA: | ||
1091 | /* No phy link on hpna to configure */ | ||
1092 | return; | ||
1093 | case KS8695_DTYPE_WAN: | ||
1094 | ctrl = readl(ksp->phyiface_regs + KS8695_WMC); | ||
1095 | |||
1096 | /* advertise Pause */ | ||
1097 | param->autoneg = (ctrl & WMC_WANAP); | ||
1098 | 1008 | ||
1099 | /* current Rx Flow-control */ | 1009 | /* advertise Pause */ |
1100 | ctrl = ks8695_readreg(ksp, KS8695_DRXC); | 1010 | param->autoneg = (ctrl & WMC_WANAP); |
1101 | param->rx_pause = (ctrl & DRXC_RFCE); | ||
1102 | 1011 | ||
1103 | /* current Tx Flow-control */ | 1012 | /* current Rx Flow-control */ |
1104 | ctrl = ks8695_readreg(ksp, KS8695_DTXC); | 1013 | ctrl = ks8695_readreg(ksp, KS8695_DRXC); |
1105 | param->tx_pause = (ctrl & DTXC_TFCE); | 1014 | param->rx_pause = (ctrl & DRXC_RFCE); |
1106 | break; | ||
1107 | case KS8695_DTYPE_LAN: | ||
1108 | /* The LAN's "phy" is a direct-attached switch */ | ||
1109 | return; | ||
1110 | } | ||
1111 | } | ||
1112 | 1015 | ||
1113 | /** | 1016 | /* current Tx Flow-control */ |
1114 | * ks8695_set_pause - Configure pause/flow-control | 1017 | ctrl = ks8695_readreg(ksp, KS8695_DTXC); |
1115 | * @ndev: The device to configure | 1018 | param->tx_pause = (ctrl & DTXC_TFCE); |
1116 | * @param: The pause parameters to set | ||
1117 | * | ||
1118 | * TODO: Implement this | ||
1119 | */ | ||
1120 | static int | ||
1121 | ks8695_set_pause(struct net_device *ndev, struct ethtool_pauseparam *param) | ||
1122 | { | ||
1123 | return -EOPNOTSUPP; | ||
1124 | } | 1019 | } |
1125 | 1020 | ||
1126 | /** | 1021 | /** |
@@ -1140,12 +1035,17 @@ ks8695_get_drvinfo(struct net_device *ndev, struct ethtool_drvinfo *info) | |||
1140 | static const struct ethtool_ops ks8695_ethtool_ops = { | 1035 | static const struct ethtool_ops ks8695_ethtool_ops = { |
1141 | .get_msglevel = ks8695_get_msglevel, | 1036 | .get_msglevel = ks8695_get_msglevel, |
1142 | .set_msglevel = ks8695_set_msglevel, | 1037 | .set_msglevel = ks8695_set_msglevel, |
1143 | .get_settings = ks8695_get_settings, | 1038 | .get_drvinfo = ks8695_get_drvinfo, |
1144 | .set_settings = ks8695_set_settings, | 1039 | }; |
1145 | .nway_reset = ks8695_nwayreset, | 1040 | |
1146 | .get_link = ks8695_get_link, | 1041 | static const struct ethtool_ops ks8695_wan_ethtool_ops = { |
1147 | .get_pauseparam = ks8695_get_pause, | 1042 | .get_msglevel = ks8695_get_msglevel, |
1148 | .set_pauseparam = ks8695_set_pause, | 1043 | .set_msglevel = ks8695_set_msglevel, |
1044 | .get_settings = ks8695_wan_get_settings, | ||
1045 | .set_settings = ks8695_wan_set_settings, | ||
1046 | .nway_reset = ks8695_wan_nwayreset, | ||
1047 | .get_link = ethtool_op_get_link, | ||
1048 | .get_pauseparam = ks8695_wan_get_pause, | ||
1149 | .get_drvinfo = ks8695_get_drvinfo, | 1049 | .get_drvinfo = ks8695_get_drvinfo, |
1150 | }; | 1050 | }; |
1151 | 1051 | ||
@@ -1541,7 +1441,6 @@ ks8695_probe(struct platform_device *pdev) | |||
1541 | 1441 | ||
1542 | /* driver system setup */ | 1442 | /* driver system setup */ |
1543 | ndev->netdev_ops = &ks8695_netdev_ops; | 1443 | ndev->netdev_ops = &ks8695_netdev_ops; |
1544 | SET_ETHTOOL_OPS(ndev, &ks8695_ethtool_ops); | ||
1545 | ndev->watchdog_timeo = msecs_to_jiffies(watchdog); | 1444 | ndev->watchdog_timeo = msecs_to_jiffies(watchdog); |
1546 | 1445 | ||
1547 | netif_napi_add(ndev, &ksp->napi, ks8695_poll, NAPI_WEIGHT); | 1446 | netif_napi_add(ndev, &ksp->napi, ks8695_poll, NAPI_WEIGHT); |
@@ -1608,12 +1507,15 @@ ks8695_probe(struct platform_device *pdev) | |||
1608 | if (ksp->phyiface_regs && ksp->link_irq == -1) { | 1507 | if (ksp->phyiface_regs && ksp->link_irq == -1) { |
1609 | ks8695_init_switch(ksp); | 1508 | ks8695_init_switch(ksp); |
1610 | ksp->dtype = KS8695_DTYPE_LAN; | 1509 | ksp->dtype = KS8695_DTYPE_LAN; |
1510 | SET_ETHTOOL_OPS(ndev, &ks8695_ethtool_ops); | ||
1611 | } else if (ksp->phyiface_regs && ksp->link_irq != -1) { | 1511 | } else if (ksp->phyiface_regs && ksp->link_irq != -1) { |
1612 | ks8695_init_wan_phy(ksp); | 1512 | ks8695_init_wan_phy(ksp); |
1613 | ksp->dtype = KS8695_DTYPE_WAN; | 1513 | ksp->dtype = KS8695_DTYPE_WAN; |
1514 | SET_ETHTOOL_OPS(ndev, &ks8695_wan_ethtool_ops); | ||
1614 | } else { | 1515 | } else { |
1615 | /* No initialisation since HPNA does not have a PHY */ | 1516 | /* No initialisation since HPNA does not have a PHY */ |
1616 | ksp->dtype = KS8695_DTYPE_HPNA; | 1517 | ksp->dtype = KS8695_DTYPE_HPNA; |
1518 | SET_ETHTOOL_OPS(ndev, &ks8695_ethtool_ops); | ||
1617 | } | 1519 | } |
1618 | 1520 | ||
1619 | /* And bring up the net_device with the net core */ | 1521 | /* And bring up the net_device with the net core */ |
diff --git a/drivers/net/bfin_mac.c b/drivers/net/bfin_mac.c index ce1e5e9d06f6..22abfb39d813 100644 --- a/drivers/net/bfin_mac.c +++ b/drivers/net/bfin_mac.c | |||
@@ -8,6 +8,11 @@ | |||
8 | * Licensed under the GPL-2 or later. | 8 | * Licensed under the GPL-2 or later. |
9 | */ | 9 | */ |
10 | 10 | ||
11 | #define DRV_VERSION "1.1" | ||
12 | #define DRV_DESC "Blackfin on-chip Ethernet MAC driver" | ||
13 | |||
14 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
15 | |||
11 | #include <linux/init.h> | 16 | #include <linux/init.h> |
12 | #include <linux/module.h> | 17 | #include <linux/module.h> |
13 | #include <linux/kernel.h> | 18 | #include <linux/kernel.h> |
@@ -41,12 +46,7 @@ | |||
41 | 46 | ||
42 | #include "bfin_mac.h" | 47 | #include "bfin_mac.h" |
43 | 48 | ||
44 | #define DRV_NAME "bfin_mac" | 49 | MODULE_AUTHOR("Bryan Wu, Luke Yang"); |
45 | #define DRV_VERSION "1.1" | ||
46 | #define DRV_AUTHOR "Bryan Wu, Luke Yang" | ||
47 | #define DRV_DESC "Blackfin on-chip Ethernet MAC driver" | ||
48 | |||
49 | MODULE_AUTHOR(DRV_AUTHOR); | ||
50 | MODULE_LICENSE("GPL"); | 50 | MODULE_LICENSE("GPL"); |
51 | MODULE_DESCRIPTION(DRV_DESC); | 51 | MODULE_DESCRIPTION(DRV_DESC); |
52 | MODULE_ALIAS("platform:bfin_mac"); | 52 | MODULE_ALIAS("platform:bfin_mac"); |
@@ -189,8 +189,7 @@ static int desc_list_init(void) | |||
189 | /* allocate a new skb for next time receive */ | 189 | /* allocate a new skb for next time receive */ |
190 | new_skb = dev_alloc_skb(PKT_BUF_SZ + NET_IP_ALIGN); | 190 | new_skb = dev_alloc_skb(PKT_BUF_SZ + NET_IP_ALIGN); |
191 | if (!new_skb) { | 191 | if (!new_skb) { |
192 | printk(KERN_NOTICE DRV_NAME | 192 | pr_notice("init: low on mem - packet dropped\n"); |
193 | ": init: low on mem - packet dropped\n"); | ||
194 | goto init_error; | 193 | goto init_error; |
195 | } | 194 | } |
196 | skb_reserve(new_skb, NET_IP_ALIGN); | 195 | skb_reserve(new_skb, NET_IP_ALIGN); |
@@ -240,7 +239,7 @@ static int desc_list_init(void) | |||
240 | 239 | ||
241 | init_error: | 240 | init_error: |
242 | desc_list_free(); | 241 | desc_list_free(); |
243 | printk(KERN_ERR DRV_NAME ": kmalloc failed\n"); | 242 | pr_err("kmalloc failed\n"); |
244 | return -ENOMEM; | 243 | return -ENOMEM; |
245 | } | 244 | } |
246 | 245 | ||
@@ -259,8 +258,7 @@ static int bfin_mdio_poll(void) | |||
259 | while ((bfin_read_EMAC_STAADD()) & STABUSY) { | 258 | while ((bfin_read_EMAC_STAADD()) & STABUSY) { |
260 | udelay(1); | 259 | udelay(1); |
261 | if (timeout_cnt-- < 0) { | 260 | if (timeout_cnt-- < 0) { |
262 | printk(KERN_ERR DRV_NAME | 261 | pr_err("wait MDC/MDIO transaction to complete timeout\n"); |
263 | ": wait MDC/MDIO transaction to complete timeout\n"); | ||
264 | return -ETIMEDOUT; | 262 | return -ETIMEDOUT; |
265 | } | 263 | } |
266 | } | 264 | } |
@@ -350,9 +348,9 @@ static void bfin_mac_adjust_link(struct net_device *dev) | |||
350 | opmode &= ~RMII_10; | 348 | opmode &= ~RMII_10; |
351 | break; | 349 | break; |
352 | default: | 350 | default: |
353 | printk(KERN_WARNING | 351 | netdev_warn(dev, |
354 | "%s: Ack! Speed (%d) is not 10/100!\n", | 352 | "Ack! Speed (%d) is not 10/100!\n", |
355 | DRV_NAME, phydev->speed); | 353 | phydev->speed); |
356 | break; | 354 | break; |
357 | } | 355 | } |
358 | bfin_write_EMAC_OPMODE(opmode); | 356 | bfin_write_EMAC_OPMODE(opmode); |
@@ -417,14 +415,13 @@ static int mii_probe(struct net_device *dev, int phy_mode) | |||
417 | 415 | ||
418 | /* now we are supposed to have a proper phydev, to attach to... */ | 416 | /* now we are supposed to have a proper phydev, to attach to... */ |
419 | if (!phydev) { | 417 | if (!phydev) { |
420 | printk(KERN_INFO "%s: Don't found any phy device at all\n", | 418 | netdev_err(dev, "no phy device found\n"); |
421 | dev->name); | ||
422 | return -ENODEV; | 419 | return -ENODEV; |
423 | } | 420 | } |
424 | 421 | ||
425 | if (phy_mode != PHY_INTERFACE_MODE_RMII && | 422 | if (phy_mode != PHY_INTERFACE_MODE_RMII && |
426 | phy_mode != PHY_INTERFACE_MODE_MII) { | 423 | phy_mode != PHY_INTERFACE_MODE_MII) { |
427 | printk(KERN_INFO "%s: Invalid phy interface mode\n", dev->name); | 424 | netdev_err(dev, "invalid phy interface mode\n"); |
428 | return -EINVAL; | 425 | return -EINVAL; |
429 | } | 426 | } |
430 | 427 | ||
@@ -432,7 +429,7 @@ static int mii_probe(struct net_device *dev, int phy_mode) | |||
432 | 0, phy_mode); | 429 | 0, phy_mode); |
433 | 430 | ||
434 | if (IS_ERR(phydev)) { | 431 | if (IS_ERR(phydev)) { |
435 | printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name); | 432 | netdev_err(dev, "could not attach PHY\n"); |
436 | return PTR_ERR(phydev); | 433 | return PTR_ERR(phydev); |
437 | } | 434 | } |
438 | 435 | ||
@@ -453,11 +450,10 @@ static int mii_probe(struct net_device *dev, int phy_mode) | |||
453 | lp->old_duplex = -1; | 450 | lp->old_duplex = -1; |
454 | lp->phydev = phydev; | 451 | lp->phydev = phydev; |
455 | 452 | ||
456 | printk(KERN_INFO "%s: attached PHY driver [%s] " | 453 | pr_info("attached PHY driver [%s] " |
457 | "(mii_bus:phy_addr=%s, irq=%d, mdc_clk=%dHz(mdc_div=%d)" | 454 | "(mii_bus:phy_addr=%s, irq=%d, mdc_clk=%dHz(mdc_div=%d)@sclk=%dMHz)\n", |
458 | "@sclk=%dMHz)\n", | 455 | phydev->drv->name, dev_name(&phydev->dev), phydev->irq, |
459 | DRV_NAME, phydev->drv->name, dev_name(&phydev->dev), phydev->irq, | 456 | MDC_CLK, mdc_div, sclk/1000000); |
460 | MDC_CLK, mdc_div, sclk/1000000); | ||
461 | 457 | ||
462 | return 0; | 458 | return 0; |
463 | } | 459 | } |
@@ -502,7 +498,7 @@ bfin_mac_ethtool_setsettings(struct net_device *dev, struct ethtool_cmd *cmd) | |||
502 | static void bfin_mac_ethtool_getdrvinfo(struct net_device *dev, | 498 | static void bfin_mac_ethtool_getdrvinfo(struct net_device *dev, |
503 | struct ethtool_drvinfo *info) | 499 | struct ethtool_drvinfo *info) |
504 | { | 500 | { |
505 | strcpy(info->driver, DRV_NAME); | 501 | strcpy(info->driver, KBUILD_MODNAME); |
506 | strcpy(info->version, DRV_VERSION); | 502 | strcpy(info->version, DRV_VERSION); |
507 | strcpy(info->fw_version, "N/A"); | 503 | strcpy(info->fw_version, "N/A"); |
508 | strcpy(info->bus_info, dev_name(&dev->dev)); | 504 | strcpy(info->bus_info, dev_name(&dev->dev)); |
@@ -562,7 +558,7 @@ static const struct ethtool_ops bfin_mac_ethtool_ops = { | |||
562 | }; | 558 | }; |
563 | 559 | ||
564 | /**************************************************************************/ | 560 | /**************************************************************************/ |
565 | void setup_system_regs(struct net_device *dev) | 561 | static void setup_system_regs(struct net_device *dev) |
566 | { | 562 | { |
567 | struct bfin_mac_local *lp = netdev_priv(dev); | 563 | struct bfin_mac_local *lp = netdev_priv(dev); |
568 | int i; | 564 | int i; |
@@ -592,6 +588,10 @@ void setup_system_regs(struct net_device *dev) | |||
592 | 588 | ||
593 | bfin_write_EMAC_MMC_CTL(RSTC | CROLL); | 589 | bfin_write_EMAC_MMC_CTL(RSTC | CROLL); |
594 | 590 | ||
591 | /* Set vlan regs to let 1522 bytes long packets pass through */ | ||
592 | bfin_write_EMAC_VLAN1(lp->vlan1_mask); | ||
593 | bfin_write_EMAC_VLAN2(lp->vlan2_mask); | ||
594 | |||
595 | /* Initialize the TX DMA channel registers */ | 595 | /* Initialize the TX DMA channel registers */ |
596 | bfin_write_DMA2_X_COUNT(0); | 596 | bfin_write_DMA2_X_COUNT(0); |
597 | bfin_write_DMA2_X_MODIFY(4); | 597 | bfin_write_DMA2_X_MODIFY(4); |
@@ -827,8 +827,7 @@ static void bfin_tx_hwtstamp(struct net_device *netdev, struct sk_buff *skb) | |||
827 | while ((!(bfin_read_EMAC_PTP_ISTAT() & TXTL)) && (--timeout_cnt)) | 827 | while ((!(bfin_read_EMAC_PTP_ISTAT() & TXTL)) && (--timeout_cnt)) |
828 | udelay(1); | 828 | udelay(1); |
829 | if (timeout_cnt == 0) | 829 | if (timeout_cnt == 0) |
830 | printk(KERN_ERR DRV_NAME | 830 | netdev_err(netdev, "timestamp the TX packet failed\n"); |
831 | ": fails to timestamp the TX packet\n"); | ||
832 | else { | 831 | else { |
833 | struct skb_shared_hwtstamps shhwtstamps; | 832 | struct skb_shared_hwtstamps shhwtstamps; |
834 | u64 ns; | 833 | u64 ns; |
@@ -1083,8 +1082,7 @@ static void bfin_mac_rx(struct net_device *dev) | |||
1083 | * we which case we simply drop the packet | 1082 | * we which case we simply drop the packet |
1084 | */ | 1083 | */ |
1085 | if (current_rx_ptr->status.status_word & RX_ERROR_MASK) { | 1084 | if (current_rx_ptr->status.status_word & RX_ERROR_MASK) { |
1086 | printk(KERN_NOTICE DRV_NAME | 1085 | netdev_notice(dev, "rx: receive error - packet dropped\n"); |
1087 | ": rx: receive error - packet dropped\n"); | ||
1088 | dev->stats.rx_dropped++; | 1086 | dev->stats.rx_dropped++; |
1089 | goto out; | 1087 | goto out; |
1090 | } | 1088 | } |
@@ -1094,8 +1092,7 @@ static void bfin_mac_rx(struct net_device *dev) | |||
1094 | 1092 | ||
1095 | new_skb = dev_alloc_skb(PKT_BUF_SZ + NET_IP_ALIGN); | 1093 | new_skb = dev_alloc_skb(PKT_BUF_SZ + NET_IP_ALIGN); |
1096 | if (!new_skb) { | 1094 | if (!new_skb) { |
1097 | printk(KERN_NOTICE DRV_NAME | 1095 | netdev_notice(dev, "rx: low on mem - packet dropped\n"); |
1098 | ": rx: low on mem - packet dropped\n"); | ||
1099 | dev->stats.rx_dropped++; | 1096 | dev->stats.rx_dropped++; |
1100 | goto out; | 1097 | goto out; |
1101 | } | 1098 | } |
@@ -1213,7 +1210,7 @@ static int bfin_mac_enable(struct phy_device *phydev) | |||
1213 | int ret; | 1210 | int ret; |
1214 | u32 opmode; | 1211 | u32 opmode; |
1215 | 1212 | ||
1216 | pr_debug("%s: %s\n", DRV_NAME, __func__); | 1213 | pr_debug("%s\n", __func__); |
1217 | 1214 | ||
1218 | /* Set RX DMA */ | 1215 | /* Set RX DMA */ |
1219 | bfin_write_DMA1_NEXT_DESC_PTR(&(rx_list_head->desc_a)); | 1216 | bfin_write_DMA1_NEXT_DESC_PTR(&(rx_list_head->desc_a)); |
@@ -1287,19 +1284,12 @@ static void bfin_mac_multicast_hash(struct net_device *dev) | |||
1287 | { | 1284 | { |
1288 | u32 emac_hashhi, emac_hashlo; | 1285 | u32 emac_hashhi, emac_hashlo; |
1289 | struct netdev_hw_addr *ha; | 1286 | struct netdev_hw_addr *ha; |
1290 | char *addrs; | ||
1291 | u32 crc; | 1287 | u32 crc; |
1292 | 1288 | ||
1293 | emac_hashhi = emac_hashlo = 0; | 1289 | emac_hashhi = emac_hashlo = 0; |
1294 | 1290 | ||
1295 | netdev_for_each_mc_addr(ha, dev) { | 1291 | netdev_for_each_mc_addr(ha, dev) { |
1296 | addrs = ha->addr; | 1292 | crc = ether_crc(ETH_ALEN, ha->addr); |
1297 | |||
1298 | /* skip non-multicast addresses */ | ||
1299 | if (!(*addrs & 1)) | ||
1300 | continue; | ||
1301 | |||
1302 | crc = ether_crc(ETH_ALEN, addrs); | ||
1303 | crc >>= 26; | 1293 | crc >>= 26; |
1304 | 1294 | ||
1305 | if (crc & 0x20) | 1295 | if (crc & 0x20) |
@@ -1323,7 +1313,7 @@ static void bfin_mac_set_multicast_list(struct net_device *dev) | |||
1323 | u32 sysctl; | 1313 | u32 sysctl; |
1324 | 1314 | ||
1325 | if (dev->flags & IFF_PROMISC) { | 1315 | if (dev->flags & IFF_PROMISC) { |
1326 | printk(KERN_INFO "%s: set to promisc mode\n", dev->name); | 1316 | netdev_info(dev, "set promisc mode\n"); |
1327 | sysctl = bfin_read_EMAC_OPMODE(); | 1317 | sysctl = bfin_read_EMAC_OPMODE(); |
1328 | sysctl |= PR; | 1318 | sysctl |= PR; |
1329 | bfin_write_EMAC_OPMODE(sysctl); | 1319 | bfin_write_EMAC_OPMODE(sysctl); |
@@ -1393,7 +1383,7 @@ static int bfin_mac_open(struct net_device *dev) | |||
1393 | * address using ifconfig eth0 hw ether xx:xx:xx:xx:xx:xx | 1383 | * address using ifconfig eth0 hw ether xx:xx:xx:xx:xx:xx |
1394 | */ | 1384 | */ |
1395 | if (!is_valid_ether_addr(dev->dev_addr)) { | 1385 | if (!is_valid_ether_addr(dev->dev_addr)) { |
1396 | printk(KERN_WARNING DRV_NAME ": no valid ethernet hw addr\n"); | 1386 | netdev_warn(dev, "no valid ethernet hw addr\n"); |
1397 | return -EINVAL; | 1387 | return -EINVAL; |
1398 | } | 1388 | } |
1399 | 1389 | ||
@@ -1527,6 +1517,9 @@ static int __devinit bfin_mac_probe(struct platform_device *pdev) | |||
1527 | goto out_err_mii_probe; | 1517 | goto out_err_mii_probe; |
1528 | } | 1518 | } |
1529 | 1519 | ||
1520 | lp->vlan1_mask = ETH_P_8021Q | mii_bus_data->vlan1_mask; | ||
1521 | lp->vlan2_mask = ETH_P_8021Q | mii_bus_data->vlan2_mask; | ||
1522 | |||
1530 | /* Fill in the fields of the device structure with ethernet values. */ | 1523 | /* Fill in the fields of the device structure with ethernet values. */ |
1531 | ether_setup(ndev); | 1524 | ether_setup(ndev); |
1532 | 1525 | ||
@@ -1558,7 +1551,7 @@ static int __devinit bfin_mac_probe(struct platform_device *pdev) | |||
1558 | bfin_mac_hwtstamp_init(ndev); | 1551 | bfin_mac_hwtstamp_init(ndev); |
1559 | 1552 | ||
1560 | /* now, print out the card info, in a short format.. */ | 1553 | /* now, print out the card info, in a short format.. */ |
1561 | dev_info(&pdev->dev, "%s, Version %s\n", DRV_DESC, DRV_VERSION); | 1554 | netdev_info(ndev, "%s, Version %s\n", DRV_DESC, DRV_VERSION); |
1562 | 1555 | ||
1563 | return 0; | 1556 | return 0; |
1564 | 1557 | ||
@@ -1650,7 +1643,7 @@ static int __devinit bfin_mii_bus_probe(struct platform_device *pdev) | |||
1650 | * so set the GPIO pins to Ethernet mode | 1643 | * so set the GPIO pins to Ethernet mode |
1651 | */ | 1644 | */ |
1652 | pin_req = mii_bus_pd->mac_peripherals; | 1645 | pin_req = mii_bus_pd->mac_peripherals; |
1653 | rc = peripheral_request_list(pin_req, DRV_NAME); | 1646 | rc = peripheral_request_list(pin_req, KBUILD_MODNAME); |
1654 | if (rc) { | 1647 | if (rc) { |
1655 | dev_err(&pdev->dev, "Requesting peripherals failed!\n"); | 1648 | dev_err(&pdev->dev, "Requesting peripherals failed!\n"); |
1656 | return rc; | 1649 | return rc; |
@@ -1739,7 +1732,7 @@ static struct platform_driver bfin_mac_driver = { | |||
1739 | .resume = bfin_mac_resume, | 1732 | .resume = bfin_mac_resume, |
1740 | .suspend = bfin_mac_suspend, | 1733 | .suspend = bfin_mac_suspend, |
1741 | .driver = { | 1734 | .driver = { |
1742 | .name = DRV_NAME, | 1735 | .name = KBUILD_MODNAME, |
1743 | .owner = THIS_MODULE, | 1736 | .owner = THIS_MODULE, |
1744 | }, | 1737 | }, |
1745 | }; | 1738 | }; |
diff --git a/drivers/net/bfin_mac.h b/drivers/net/bfin_mac.h index aed68bed2365..f8559ac9a403 100644 --- a/drivers/net/bfin_mac.h +++ b/drivers/net/bfin_mac.h | |||
@@ -17,7 +17,14 @@ | |||
17 | #include <linux/etherdevice.h> | 17 | #include <linux/etherdevice.h> |
18 | #include <linux/bfin_mac.h> | 18 | #include <linux/bfin_mac.h> |
19 | 19 | ||
20 | /* | ||
21 | * Disable hardware checksum for bug #5600 if writeback cache is | ||
22 | * enabled. Otherwize, corrupted RX packet will be sent up stack | ||
23 | * without error mark. | ||
24 | */ | ||
25 | #ifndef CONFIG_BFIN_EXTMEM_WRITEBACK | ||
20 | #define BFIN_MAC_CSUM_OFFLOAD | 26 | #define BFIN_MAC_CSUM_OFFLOAD |
27 | #endif | ||
21 | 28 | ||
22 | #define TX_RECLAIM_JIFFIES (HZ / 5) | 29 | #define TX_RECLAIM_JIFFIES (HZ / 5) |
23 | 30 | ||
@@ -68,7 +75,6 @@ struct bfin_mac_local { | |||
68 | */ | 75 | */ |
69 | struct net_device_stats stats; | 76 | struct net_device_stats stats; |
70 | 77 | ||
71 | unsigned char Mac[6]; /* MAC address of the board */ | ||
72 | spinlock_t lock; | 78 | spinlock_t lock; |
73 | 79 | ||
74 | int wol; /* Wake On Lan */ | 80 | int wol; /* Wake On Lan */ |
@@ -76,6 +82,9 @@ struct bfin_mac_local { | |||
76 | struct timer_list tx_reclaim_timer; | 82 | struct timer_list tx_reclaim_timer; |
77 | struct net_device *ndev; | 83 | struct net_device *ndev; |
78 | 84 | ||
85 | /* Data for EMAC_VLAN1 regs */ | ||
86 | u16 vlan1_mask, vlan2_mask; | ||
87 | |||
79 | /* MII and PHY stuffs */ | 88 | /* MII and PHY stuffs */ |
80 | int old_link; /* used by bf537_adjust_link */ | 89 | int old_link; /* used by bf537_adjust_link */ |
81 | int old_speed; | 90 | int old_speed; |
diff --git a/drivers/net/bna/bnad_ethtool.c b/drivers/net/bna/bnad_ethtool.c index 99be5ae91991..142d6047da27 100644 --- a/drivers/net/bna/bnad_ethtool.c +++ b/drivers/net/bna/bnad_ethtool.c | |||
@@ -275,7 +275,6 @@ bnad_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) | |||
275 | 275 | ||
276 | ioc_attr = kzalloc(sizeof(*ioc_attr), GFP_KERNEL); | 276 | ioc_attr = kzalloc(sizeof(*ioc_attr), GFP_KERNEL); |
277 | if (ioc_attr) { | 277 | if (ioc_attr) { |
278 | memset(ioc_attr, 0, sizeof(*ioc_attr)); | ||
279 | spin_lock_irqsave(&bnad->bna_lock, flags); | 278 | spin_lock_irqsave(&bnad->bna_lock, flags); |
280 | bfa_nw_ioc_get_attr(&bnad->bna.device.ioc, ioc_attr); | 279 | bfa_nw_ioc_get_attr(&bnad->bna.device.ioc, ioc_attr); |
281 | spin_unlock_irqrestore(&bnad->bna_lock, flags); | 280 | spin_unlock_irqrestore(&bnad->bna_lock, flags); |
diff --git a/drivers/net/cassini.c b/drivers/net/cassini.c index 7206ab2cbbf8..3437613f0454 100644 --- a/drivers/net/cassini.c +++ b/drivers/net/cassini.c | |||
@@ -3203,7 +3203,7 @@ static int cas_get_vpd_info(struct cas *cp, unsigned char *dev_addr, | |||
3203 | int phy_type = CAS_PHY_MII_MDIO0; /* default phy type */ | 3203 | int phy_type = CAS_PHY_MII_MDIO0; /* default phy type */ |
3204 | int mac_off = 0; | 3204 | int mac_off = 0; |
3205 | 3205 | ||
3206 | #if defined(CONFIG_OF) | 3206 | #if defined(CONFIG_SPARC) |
3207 | const unsigned char *addr; | 3207 | const unsigned char *addr; |
3208 | #endif | 3208 | #endif |
3209 | 3209 | ||
@@ -3354,7 +3354,7 @@ use_random_mac_addr: | |||
3354 | if (found & VPD_FOUND_MAC) | 3354 | if (found & VPD_FOUND_MAC) |
3355 | goto done; | 3355 | goto done; |
3356 | 3356 | ||
3357 | #if defined(CONFIG_OF) | 3357 | #if defined(CONFIG_SPARC) |
3358 | addr = of_get_property(cp->of_node, "local-mac-address", NULL); | 3358 | addr = of_get_property(cp->of_node, "local-mac-address", NULL); |
3359 | if (addr != NULL) { | 3359 | if (addr != NULL) { |
3360 | memcpy(dev_addr, addr, 6); | 3360 | memcpy(dev_addr, addr, 6); |
@@ -5031,7 +5031,7 @@ static int __devinit cas_init_one(struct pci_dev *pdev, | |||
5031 | cp->msg_enable = (cassini_debug < 0) ? CAS_DEF_MSG_ENABLE : | 5031 | cp->msg_enable = (cassini_debug < 0) ? CAS_DEF_MSG_ENABLE : |
5032 | cassini_debug; | 5032 | cassini_debug; |
5033 | 5033 | ||
5034 | #if defined(CONFIG_OF) | 5034 | #if defined(CONFIG_SPARC) |
5035 | cp->of_node = pci_device_to_OF_node(pdev); | 5035 | cp->of_node = pci_device_to_OF_node(pdev); |
5036 | #endif | 5036 | #endif |
5037 | 5037 | ||
diff --git a/drivers/net/cxgb4vf/cxgb4vf_main.c b/drivers/net/cxgb4vf/cxgb4vf_main.c index 3c403f895750..56166ae2059f 100644 --- a/drivers/net/cxgb4vf/cxgb4vf_main.c +++ b/drivers/net/cxgb4vf/cxgb4vf_main.c | |||
@@ -749,13 +749,19 @@ static int cxgb4vf_open(struct net_device *dev) | |||
749 | netif_set_real_num_tx_queues(dev, pi->nqsets); | 749 | netif_set_real_num_tx_queues(dev, pi->nqsets); |
750 | err = netif_set_real_num_rx_queues(dev, pi->nqsets); | 750 | err = netif_set_real_num_rx_queues(dev, pi->nqsets); |
751 | if (err) | 751 | if (err) |
752 | return err; | 752 | goto err_unwind; |
753 | set_bit(pi->port_id, &adapter->open_device_map); | ||
754 | err = link_start(dev); | 753 | err = link_start(dev); |
755 | if (err) | 754 | if (err) |
756 | return err; | 755 | goto err_unwind; |
756 | |||
757 | netif_tx_start_all_queues(dev); | 757 | netif_tx_start_all_queues(dev); |
758 | set_bit(pi->port_id, &adapter->open_device_map); | ||
758 | return 0; | 759 | return 0; |
760 | |||
761 | err_unwind: | ||
762 | if (adapter->open_device_map == 0) | ||
763 | adapter_down(adapter); | ||
764 | return err; | ||
759 | } | 765 | } |
760 | 766 | ||
761 | /* | 767 | /* |
@@ -764,13 +770,12 @@ static int cxgb4vf_open(struct net_device *dev) | |||
764 | */ | 770 | */ |
765 | static int cxgb4vf_stop(struct net_device *dev) | 771 | static int cxgb4vf_stop(struct net_device *dev) |
766 | { | 772 | { |
767 | int ret; | ||
768 | struct port_info *pi = netdev_priv(dev); | 773 | struct port_info *pi = netdev_priv(dev); |
769 | struct adapter *adapter = pi->adapter; | 774 | struct adapter *adapter = pi->adapter; |
770 | 775 | ||
771 | netif_tx_stop_all_queues(dev); | 776 | netif_tx_stop_all_queues(dev); |
772 | netif_carrier_off(dev); | 777 | netif_carrier_off(dev); |
773 | ret = t4vf_enable_vi(adapter, pi->viid, false, false); | 778 | t4vf_enable_vi(adapter, pi->viid, false, false); |
774 | pi->link_cfg.link_ok = 0; | 779 | pi->link_cfg.link_ok = 0; |
775 | 780 | ||
776 | clear_bit(pi->port_id, &adapter->open_device_map); | 781 | clear_bit(pi->port_id, &adapter->open_device_map); |
diff --git a/drivers/net/cxgb4vf/t4vf_hw.c b/drivers/net/cxgb4vf/t4vf_hw.c index e4bec78c8e3f..0f51c80475ce 100644 --- a/drivers/net/cxgb4vf/t4vf_hw.c +++ b/drivers/net/cxgb4vf/t4vf_hw.c | |||
@@ -147,9 +147,20 @@ int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size, | |||
147 | /* | 147 | /* |
148 | * Write the command array into the Mailbox Data register array and | 148 | * Write the command array into the Mailbox Data register array and |
149 | * transfer ownership of the mailbox to the firmware. | 149 | * transfer ownership of the mailbox to the firmware. |
150 | * | ||
151 | * For the VFs, the Mailbox Data "registers" are actually backed by | ||
152 | * T4's "MA" interface rather than PL Registers (as is the case for | ||
153 | * the PFs). Because these are in different coherency domains, the | ||
154 | * write to the VF's PL-register-backed Mailbox Control can race in | ||
155 | * front of the writes to the MA-backed VF Mailbox Data "registers". | ||
156 | * So we need to do a read-back on at least one byte of the VF Mailbox | ||
157 | * Data registers before doing the write to the VF Mailbox Control | ||
158 | * register. | ||
150 | */ | 159 | */ |
151 | for (i = 0, p = cmd; i < size; i += 8) | 160 | for (i = 0, p = cmd; i < size; i += 8) |
152 | t4_write_reg64(adapter, mbox_data + i, be64_to_cpu(*p++)); | 161 | t4_write_reg64(adapter, mbox_data + i, be64_to_cpu(*p++)); |
162 | t4_read_reg(adapter, mbox_data); /* flush write */ | ||
163 | |||
153 | t4_write_reg(adapter, mbox_ctl, | 164 | t4_write_reg(adapter, mbox_ctl, |
154 | MBMSGVALID | MBOWNER(MBOX_OWNER_FW)); | 165 | MBMSGVALID | MBOWNER(MBOX_OWNER_FW)); |
155 | t4_read_reg(adapter, mbox_ctl); /* flush write */ | 166 | t4_read_reg(adapter, mbox_ctl); /* flush write */ |
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c index 4ff88a683f61..e332aee386f6 100644 --- a/drivers/net/e1000/e1000_main.c +++ b/drivers/net/e1000/e1000_main.c | |||
@@ -3478,9 +3478,17 @@ static irqreturn_t e1000_intr(int irq, void *data) | |||
3478 | struct e1000_hw *hw = &adapter->hw; | 3478 | struct e1000_hw *hw = &adapter->hw; |
3479 | u32 icr = er32(ICR); | 3479 | u32 icr = er32(ICR); |
3480 | 3480 | ||
3481 | if (unlikely((!icr) || test_bit(__E1000_DOWN, &adapter->flags))) | 3481 | if (unlikely((!icr))) |
3482 | return IRQ_NONE; /* Not our interrupt */ | 3482 | return IRQ_NONE; /* Not our interrupt */ |
3483 | 3483 | ||
3484 | /* | ||
3485 | * we might have caused the interrupt, but the above | ||
3486 | * read cleared it, and just in case the driver is | ||
3487 | * down there is nothing to do so return handled | ||
3488 | */ | ||
3489 | if (unlikely(test_bit(__E1000_DOWN, &adapter->flags))) | ||
3490 | return IRQ_HANDLED; | ||
3491 | |||
3484 | if (unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) { | 3492 | if (unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) { |
3485 | hw->get_link_status = 1; | 3493 | hw->get_link_status = 1; |
3486 | /* guard against interrupt when we're going down */ | 3494 | /* guard against interrupt when we're going down */ |
diff --git a/drivers/net/e1000e/82571.c b/drivers/net/e1000e/82571.c index cb6c7b1c1fb8..7bdec0b0c562 100644 --- a/drivers/net/e1000e/82571.c +++ b/drivers/net/e1000e/82571.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************************* | 1 | /******************************************************************************* |
2 | 2 | ||
3 | Intel PRO/1000 Linux driver | 3 | Intel PRO/1000 Linux driver |
4 | Copyright(c) 1999 - 2010 Intel Corporation. | 4 | Copyright(c) 1999 - 2011 Intel Corporation. |
5 | 5 | ||
6 | This program is free software; you can redistribute it and/or modify it | 6 | This program is free software; you can redistribute it and/or modify it |
7 | under the terms and conditions of the GNU General Public License, | 7 | under the terms and conditions of the GNU General Public License, |
@@ -1310,7 +1310,7 @@ static void e1000_initialize_hw_bits_82571(struct e1000_hw *hw) | |||
1310 | * apply workaround for hardware errata documented in errata | 1310 | * apply workaround for hardware errata documented in errata |
1311 | * docs Fixes issue where some error prone or unreliable PCIe | 1311 | * docs Fixes issue where some error prone or unreliable PCIe |
1312 | * completions are occurring, particularly with ASPM enabled. | 1312 | * completions are occurring, particularly with ASPM enabled. |
1313 | * Without fix, issue can cause tx timeouts. | 1313 | * Without fix, issue can cause Tx timeouts. |
1314 | */ | 1314 | */ |
1315 | reg = er32(GCR2); | 1315 | reg = er32(GCR2); |
1316 | reg |= 1; | 1316 | reg |= 1; |
diff --git a/drivers/net/e1000e/Makefile b/drivers/net/e1000e/Makefile index 360c91369f35..28519acacd2d 100644 --- a/drivers/net/e1000e/Makefile +++ b/drivers/net/e1000e/Makefile | |||
@@ -1,7 +1,7 @@ | |||
1 | ################################################################################ | 1 | ################################################################################ |
2 | # | 2 | # |
3 | # Intel PRO/1000 Linux driver | 3 | # Intel PRO/1000 Linux driver |
4 | # Copyright(c) 1999 - 2008 Intel Corporation. | 4 | # Copyright(c) 1999 - 2011 Intel Corporation. |
5 | # | 5 | # |
6 | # This program is free software; you can redistribute it and/or modify it | 6 | # This program is free software; you can redistribute it and/or modify it |
7 | # under the terms and conditions of the GNU General Public License, | 7 | # under the terms and conditions of the GNU General Public License, |
diff --git a/drivers/net/e1000e/defines.h b/drivers/net/e1000e/defines.h index 7245dc2e0b7c..13149983d07e 100644 --- a/drivers/net/e1000e/defines.h +++ b/drivers/net/e1000e/defines.h | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************************* | 1 | /******************************************************************************* |
2 | 2 | ||
3 | Intel PRO/1000 Linux driver | 3 | Intel PRO/1000 Linux driver |
4 | Copyright(c) 1999 - 2010 Intel Corporation. | 4 | Copyright(c) 1999 - 2011 Intel Corporation. |
5 | 5 | ||
6 | This program is free software; you can redistribute it and/or modify it | 6 | This program is free software; you can redistribute it and/or modify it |
7 | under the terms and conditions of the GNU General Public License, | 7 | under the terms and conditions of the GNU General Public License, |
diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h index 5255be753746..e610e1369053 100644 --- a/drivers/net/e1000e/e1000.h +++ b/drivers/net/e1000e/e1000.h | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************************* | 1 | /******************************************************************************* |
2 | 2 | ||
3 | Intel PRO/1000 Linux driver | 3 | Intel PRO/1000 Linux driver |
4 | Copyright(c) 1999 - 2010 Intel Corporation. | 4 | Copyright(c) 1999 - 2011 Intel Corporation. |
5 | 5 | ||
6 | This program is free software; you can redistribute it and/or modify it | 6 | This program is free software; you can redistribute it and/or modify it |
7 | under the terms and conditions of the GNU General Public License, | 7 | under the terms and conditions of the GNU General Public License, |
diff --git a/drivers/net/e1000e/es2lan.c b/drivers/net/e1000e/es2lan.c index e45a61c8930a..2fefa820302b 100644 --- a/drivers/net/e1000e/es2lan.c +++ b/drivers/net/e1000e/es2lan.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************************* | 1 | /******************************************************************************* |
2 | 2 | ||
3 | Intel PRO/1000 Linux driver | 3 | Intel PRO/1000 Linux driver |
4 | Copyright(c) 1999 - 2010 Intel Corporation. | 4 | Copyright(c) 1999 - 2011 Intel Corporation. |
5 | 5 | ||
6 | This program is free software; you can redistribute it and/or modify it | 6 | This program is free software; you can redistribute it and/or modify it |
7 | under the terms and conditions of the GNU General Public License, | 7 | under the terms and conditions of the GNU General Public License, |
diff --git a/drivers/net/e1000e/ethtool.c b/drivers/net/e1000e/ethtool.c index f8ed03dab9b1..fa08b6336cfb 100644 --- a/drivers/net/e1000e/ethtool.c +++ b/drivers/net/e1000e/ethtool.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************************* | 1 | /******************************************************************************* |
2 | 2 | ||
3 | Intel PRO/1000 Linux driver | 3 | Intel PRO/1000 Linux driver |
4 | Copyright(c) 1999 - 2010 Intel Corporation. | 4 | Copyright(c) 1999 - 2011 Intel Corporation. |
5 | 5 | ||
6 | This program is free software; you can redistribute it and/or modify it | 6 | This program is free software; you can redistribute it and/or modify it |
7 | under the terms and conditions of the GNU General Public License, | 7 | under the terms and conditions of the GNU General Public License, |
diff --git a/drivers/net/e1000e/hw.h b/drivers/net/e1000e/hw.h index e774380c7cec..bc0860a598c9 100644 --- a/drivers/net/e1000e/hw.h +++ b/drivers/net/e1000e/hw.h | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************************* | 1 | /******************************************************************************* |
2 | 2 | ||
3 | Intel PRO/1000 Linux driver | 3 | Intel PRO/1000 Linux driver |
4 | Copyright(c) 1999 - 2010 Intel Corporation. | 4 | Copyright(c) 1999 - 2011 Intel Corporation. |
5 | 5 | ||
6 | This program is free software; you can redistribute it and/or modify it | 6 | This program is free software; you can redistribute it and/or modify it |
7 | under the terms and conditions of the GNU General Public License, | 7 | under the terms and conditions of the GNU General Public License, |
@@ -102,7 +102,7 @@ enum e1e_registers { | |||
102 | E1000_RDTR = 0x02820, /* Rx Delay Timer - RW */ | 102 | E1000_RDTR = 0x02820, /* Rx Delay Timer - RW */ |
103 | E1000_RXDCTL_BASE = 0x02828, /* Rx Descriptor Control - RW */ | 103 | E1000_RXDCTL_BASE = 0x02828, /* Rx Descriptor Control - RW */ |
104 | #define E1000_RXDCTL(_n) (E1000_RXDCTL_BASE + (_n << 8)) | 104 | #define E1000_RXDCTL(_n) (E1000_RXDCTL_BASE + (_n << 8)) |
105 | E1000_RADV = 0x0282C, /* RX Interrupt Absolute Delay Timer - RW */ | 105 | E1000_RADV = 0x0282C, /* Rx Interrupt Absolute Delay Timer - RW */ |
106 | 106 | ||
107 | /* Convenience macros | 107 | /* Convenience macros |
108 | * | 108 | * |
diff --git a/drivers/net/e1000e/ich8lan.c b/drivers/net/e1000e/ich8lan.c index 5328a2927731..b43fc7fb1ee4 100644 --- a/drivers/net/e1000e/ich8lan.c +++ b/drivers/net/e1000e/ich8lan.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************************* | 1 | /******************************************************************************* |
2 | 2 | ||
3 | Intel PRO/1000 Linux driver | 3 | Intel PRO/1000 Linux driver |
4 | Copyright(c) 1999 - 2010 Intel Corporation. | 4 | Copyright(c) 1999 - 2011 Intel Corporation. |
5 | 5 | ||
6 | This program is free software; you can redistribute it and/or modify it | 6 | This program is free software; you can redistribute it and/or modify it |
7 | under the terms and conditions of the GNU General Public License, | 7 | under the terms and conditions of the GNU General Public License, |
diff --git a/drivers/net/e1000e/lib.c b/drivers/net/e1000e/lib.c index ff2872153b21..68aa1749bf66 100644 --- a/drivers/net/e1000e/lib.c +++ b/drivers/net/e1000e/lib.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************************* | 1 | /******************************************************************************* |
2 | 2 | ||
3 | Intel PRO/1000 Linux driver | 3 | Intel PRO/1000 Linux driver |
4 | Copyright(c) 1999 - 2010 Intel Corporation. | 4 | Copyright(c) 1999 - 2011 Intel Corporation. |
5 | 5 | ||
6 | This program is free software; you can redistribute it and/or modify it | 6 | This program is free software; you can redistribute it and/or modify it |
7 | under the terms and conditions of the GNU General Public License, | 7 | under the terms and conditions of the GNU General Public License, |
@@ -533,7 +533,7 @@ s32 e1000e_check_for_fiber_link(struct e1000_hw *hw) | |||
533 | mac->autoneg_failed = 1; | 533 | mac->autoneg_failed = 1; |
534 | return 0; | 534 | return 0; |
535 | } | 535 | } |
536 | e_dbg("NOT RXing /C/, disable AutoNeg and force link.\n"); | 536 | e_dbg("NOT Rx'ing /C/, disable AutoNeg and force link.\n"); |
537 | 537 | ||
538 | /* Disable auto-negotiation in the TXCW register */ | 538 | /* Disable auto-negotiation in the TXCW register */ |
539 | ew32(TXCW, (mac->txcw & ~E1000_TXCW_ANE)); | 539 | ew32(TXCW, (mac->txcw & ~E1000_TXCW_ANE)); |
@@ -556,7 +556,7 @@ s32 e1000e_check_for_fiber_link(struct e1000_hw *hw) | |||
556 | * and disable forced link in the Device Control register | 556 | * and disable forced link in the Device Control register |
557 | * in an attempt to auto-negotiate with our link partner. | 557 | * in an attempt to auto-negotiate with our link partner. |
558 | */ | 558 | */ |
559 | e_dbg("RXing /C/, enable AutoNeg and stop forcing link.\n"); | 559 | e_dbg("Rx'ing /C/, enable AutoNeg and stop forcing link.\n"); |
560 | ew32(TXCW, mac->txcw); | 560 | ew32(TXCW, mac->txcw); |
561 | ew32(CTRL, (ctrl & ~E1000_CTRL_SLU)); | 561 | ew32(CTRL, (ctrl & ~E1000_CTRL_SLU)); |
562 | 562 | ||
@@ -598,7 +598,7 @@ s32 e1000e_check_for_serdes_link(struct e1000_hw *hw) | |||
598 | mac->autoneg_failed = 1; | 598 | mac->autoneg_failed = 1; |
599 | return 0; | 599 | return 0; |
600 | } | 600 | } |
601 | e_dbg("NOT RXing /C/, disable AutoNeg and force link.\n"); | 601 | e_dbg("NOT Rx'ing /C/, disable AutoNeg and force link.\n"); |
602 | 602 | ||
603 | /* Disable auto-negotiation in the TXCW register */ | 603 | /* Disable auto-negotiation in the TXCW register */ |
604 | ew32(TXCW, (mac->txcw & ~E1000_TXCW_ANE)); | 604 | ew32(TXCW, (mac->txcw & ~E1000_TXCW_ANE)); |
@@ -621,7 +621,7 @@ s32 e1000e_check_for_serdes_link(struct e1000_hw *hw) | |||
621 | * and disable forced link in the Device Control register | 621 | * and disable forced link in the Device Control register |
622 | * in an attempt to auto-negotiate with our link partner. | 622 | * in an attempt to auto-negotiate with our link partner. |
623 | */ | 623 | */ |
624 | e_dbg("RXing /C/, enable AutoNeg and stop forcing link.\n"); | 624 | e_dbg("Rx'ing /C/, enable AutoNeg and stop forcing link.\n"); |
625 | ew32(TXCW, mac->txcw); | 625 | ew32(TXCW, mac->txcw); |
626 | ew32(CTRL, (ctrl & ~E1000_CTRL_SLU)); | 626 | ew32(CTRL, (ctrl & ~E1000_CTRL_SLU)); |
627 | 627 | ||
@@ -800,9 +800,9 @@ static s32 e1000_commit_fc_settings_generic(struct e1000_hw *hw) | |||
800 | * The possible values of the "fc" parameter are: | 800 | * The possible values of the "fc" parameter are: |
801 | * 0: Flow control is completely disabled | 801 | * 0: Flow control is completely disabled |
802 | * 1: Rx flow control is enabled (we can receive pause frames, | 802 | * 1: Rx flow control is enabled (we can receive pause frames, |
803 | * but not send pause frames). | 803 | * but not send pause frames). |
804 | * 2: Tx flow control is enabled (we can send pause frames but we | 804 | * 2: Tx flow control is enabled (we can send pause frames but we |
805 | * do not support receiving pause frames). | 805 | * do not support receiving pause frames). |
806 | * 3: Both Rx and Tx flow control (symmetric) are enabled. | 806 | * 3: Both Rx and Tx flow control (symmetric) are enabled. |
807 | */ | 807 | */ |
808 | switch (hw->fc.current_mode) { | 808 | switch (hw->fc.current_mode) { |
@@ -1031,9 +1031,9 @@ s32 e1000e_force_mac_fc(struct e1000_hw *hw) | |||
1031 | * The possible values of the "fc" parameter are: | 1031 | * The possible values of the "fc" parameter are: |
1032 | * 0: Flow control is completely disabled | 1032 | * 0: Flow control is completely disabled |
1033 | * 1: Rx flow control is enabled (we can receive pause | 1033 | * 1: Rx flow control is enabled (we can receive pause |
1034 | * frames but not send pause frames). | 1034 | * frames but not send pause frames). |
1035 | * 2: Tx flow control is enabled (we can send pause frames | 1035 | * 2: Tx flow control is enabled (we can send pause frames |
1036 | * frames but we do not receive pause frames). | 1036 | * frames but we do not receive pause frames). |
1037 | * 3: Both Rx and Tx flow control (symmetric) is enabled. | 1037 | * 3: Both Rx and Tx flow control (symmetric) is enabled. |
1038 | * other: No other values should be possible at this point. | 1038 | * other: No other values should be possible at this point. |
1039 | */ | 1039 | */ |
@@ -1189,7 +1189,7 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw) | |||
1189 | } else { | 1189 | } else { |
1190 | hw->fc.current_mode = e1000_fc_rx_pause; | 1190 | hw->fc.current_mode = e1000_fc_rx_pause; |
1191 | e_dbg("Flow Control = " | 1191 | e_dbg("Flow Control = " |
1192 | "RX PAUSE frames only.\r\n"); | 1192 | "Rx PAUSE frames only.\r\n"); |
1193 | } | 1193 | } |
1194 | } | 1194 | } |
1195 | /* | 1195 | /* |
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c index fa5b60452547..1c18f26b0812 100644 --- a/drivers/net/e1000e/netdev.c +++ b/drivers/net/e1000e/netdev.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************************* | 1 | /******************************************************************************* |
2 | 2 | ||
3 | Intel PRO/1000 Linux driver | 3 | Intel PRO/1000 Linux driver |
4 | Copyright(c) 1999 - 2010 Intel Corporation. | 4 | Copyright(c) 1999 - 2011 Intel Corporation. |
5 | 5 | ||
6 | This program is free software; you can redistribute it and/or modify it | 6 | This program is free software; you can redistribute it and/or modify it |
7 | under the terms and conditions of the GNU General Public License, | 7 | under the terms and conditions of the GNU General Public License, |
@@ -77,17 +77,17 @@ struct e1000_reg_info { | |||
77 | char *name; | 77 | char *name; |
78 | }; | 78 | }; |
79 | 79 | ||
80 | #define E1000_RDFH 0x02410 /* Rx Data FIFO Head - RW */ | 80 | #define E1000_RDFH 0x02410 /* Rx Data FIFO Head - RW */ |
81 | #define E1000_RDFT 0x02418 /* Rx Data FIFO Tail - RW */ | 81 | #define E1000_RDFT 0x02418 /* Rx Data FIFO Tail - RW */ |
82 | #define E1000_RDFHS 0x02420 /* Rx Data FIFO Head Saved - RW */ | 82 | #define E1000_RDFHS 0x02420 /* Rx Data FIFO Head Saved - RW */ |
83 | #define E1000_RDFTS 0x02428 /* Rx Data FIFO Tail Saved - RW */ | 83 | #define E1000_RDFTS 0x02428 /* Rx Data FIFO Tail Saved - RW */ |
84 | #define E1000_RDFPC 0x02430 /* Rx Data FIFO Packet Count - RW */ | 84 | #define E1000_RDFPC 0x02430 /* Rx Data FIFO Packet Count - RW */ |
85 | 85 | ||
86 | #define E1000_TDFH 0x03410 /* Tx Data FIFO Head - RW */ | 86 | #define E1000_TDFH 0x03410 /* Tx Data FIFO Head - RW */ |
87 | #define E1000_TDFT 0x03418 /* Tx Data FIFO Tail - RW */ | 87 | #define E1000_TDFT 0x03418 /* Tx Data FIFO Tail - RW */ |
88 | #define E1000_TDFHS 0x03420 /* Tx Data FIFO Head Saved - RW */ | 88 | #define E1000_TDFHS 0x03420 /* Tx Data FIFO Head Saved - RW */ |
89 | #define E1000_TDFTS 0x03428 /* Tx Data FIFO Tail Saved - RW */ | 89 | #define E1000_TDFTS 0x03428 /* Tx Data FIFO Tail Saved - RW */ |
90 | #define E1000_TDFPC 0x03430 /* Tx Data FIFO Packet Count - RW */ | 90 | #define E1000_TDFPC 0x03430 /* Tx Data FIFO Packet Count - RW */ |
91 | 91 | ||
92 | static const struct e1000_reg_info e1000_reg_info_tbl[] = { | 92 | static const struct e1000_reg_info e1000_reg_info_tbl[] = { |
93 | 93 | ||
@@ -99,7 +99,7 @@ static const struct e1000_reg_info e1000_reg_info_tbl[] = { | |||
99 | /* Interrupt Registers */ | 99 | /* Interrupt Registers */ |
100 | {E1000_ICR, "ICR"}, | 100 | {E1000_ICR, "ICR"}, |
101 | 101 | ||
102 | /* RX Registers */ | 102 | /* Rx Registers */ |
103 | {E1000_RCTL, "RCTL"}, | 103 | {E1000_RCTL, "RCTL"}, |
104 | {E1000_RDLEN, "RDLEN"}, | 104 | {E1000_RDLEN, "RDLEN"}, |
105 | {E1000_RDH, "RDH"}, | 105 | {E1000_RDH, "RDH"}, |
@@ -115,7 +115,7 @@ static const struct e1000_reg_info e1000_reg_info_tbl[] = { | |||
115 | {E1000_RDFTS, "RDFTS"}, | 115 | {E1000_RDFTS, "RDFTS"}, |
116 | {E1000_RDFPC, "RDFPC"}, | 116 | {E1000_RDFPC, "RDFPC"}, |
117 | 117 | ||
118 | /* TX Registers */ | 118 | /* Tx Registers */ |
119 | {E1000_TCTL, "TCTL"}, | 119 | {E1000_TCTL, "TCTL"}, |
120 | {E1000_TDBAL, "TDBAL"}, | 120 | {E1000_TDBAL, "TDBAL"}, |
121 | {E1000_TDBAH, "TDBAH"}, | 121 | {E1000_TDBAH, "TDBAH"}, |
@@ -160,7 +160,7 @@ static void e1000_regdump(struct e1000_hw *hw, struct e1000_reg_info *reginfo) | |||
160 | break; | 160 | break; |
161 | default: | 161 | default: |
162 | printk(KERN_INFO "%-15s %08x\n", | 162 | printk(KERN_INFO "%-15s %08x\n", |
163 | reginfo->name, __er32(hw, reginfo->ofs)); | 163 | reginfo->name, __er32(hw, reginfo->ofs)); |
164 | return; | 164 | return; |
165 | } | 165 | } |
166 | 166 | ||
@@ -171,9 +171,8 @@ static void e1000_regdump(struct e1000_hw *hw, struct e1000_reg_info *reginfo) | |||
171 | printk(KERN_CONT "\n"); | 171 | printk(KERN_CONT "\n"); |
172 | } | 172 | } |
173 | 173 | ||
174 | |||
175 | /* | 174 | /* |
176 | * e1000e_dump - Print registers, tx-ring and rx-ring | 175 | * e1000e_dump - Print registers, Tx-ring and Rx-ring |
177 | */ | 176 | */ |
178 | static void e1000e_dump(struct e1000_adapter *adapter) | 177 | static void e1000e_dump(struct e1000_adapter *adapter) |
179 | { | 178 | { |
@@ -182,12 +181,20 @@ static void e1000e_dump(struct e1000_adapter *adapter) | |||
182 | struct e1000_reg_info *reginfo; | 181 | struct e1000_reg_info *reginfo; |
183 | struct e1000_ring *tx_ring = adapter->tx_ring; | 182 | struct e1000_ring *tx_ring = adapter->tx_ring; |
184 | struct e1000_tx_desc *tx_desc; | 183 | struct e1000_tx_desc *tx_desc; |
185 | struct my_u0 { u64 a; u64 b; } *u0; | 184 | struct my_u0 { |
185 | u64 a; | ||
186 | u64 b; | ||
187 | } *u0; | ||
186 | struct e1000_buffer *buffer_info; | 188 | struct e1000_buffer *buffer_info; |
187 | struct e1000_ring *rx_ring = adapter->rx_ring; | 189 | struct e1000_ring *rx_ring = adapter->rx_ring; |
188 | union e1000_rx_desc_packet_split *rx_desc_ps; | 190 | union e1000_rx_desc_packet_split *rx_desc_ps; |
189 | struct e1000_rx_desc *rx_desc; | 191 | struct e1000_rx_desc *rx_desc; |
190 | struct my_u1 { u64 a; u64 b; u64 c; u64 d; } *u1; | 192 | struct my_u1 { |
193 | u64 a; | ||
194 | u64 b; | ||
195 | u64 c; | ||
196 | u64 d; | ||
197 | } *u1; | ||
191 | u32 staterr; | 198 | u32 staterr; |
192 | int i = 0; | 199 | int i = 0; |
193 | 200 | ||
@@ -198,12 +205,10 @@ static void e1000e_dump(struct e1000_adapter *adapter) | |||
198 | if (netdev) { | 205 | if (netdev) { |
199 | dev_info(&adapter->pdev->dev, "Net device Info\n"); | 206 | dev_info(&adapter->pdev->dev, "Net device Info\n"); |
200 | printk(KERN_INFO "Device Name state " | 207 | printk(KERN_INFO "Device Name state " |
201 | "trans_start last_rx\n"); | 208 | "trans_start last_rx\n"); |
202 | printk(KERN_INFO "%-15s %016lX %016lX %016lX\n", | 209 | printk(KERN_INFO "%-15s %016lX %016lX %016lX\n", |
203 | netdev->name, | 210 | netdev->name, netdev->state, netdev->trans_start, |
204 | netdev->state, | 211 | netdev->last_rx); |
205 | netdev->trans_start, | ||
206 | netdev->last_rx); | ||
207 | } | 212 | } |
208 | 213 | ||
209 | /* Print Registers */ | 214 | /* Print Registers */ |
@@ -214,26 +219,26 @@ static void e1000e_dump(struct e1000_adapter *adapter) | |||
214 | e1000_regdump(hw, reginfo); | 219 | e1000_regdump(hw, reginfo); |
215 | } | 220 | } |
216 | 221 | ||
217 | /* Print TX Ring Summary */ | 222 | /* Print Tx Ring Summary */ |
218 | if (!netdev || !netif_running(netdev)) | 223 | if (!netdev || !netif_running(netdev)) |
219 | goto exit; | 224 | goto exit; |
220 | 225 | ||
221 | dev_info(&adapter->pdev->dev, "TX Rings Summary\n"); | 226 | dev_info(&adapter->pdev->dev, "Tx Ring Summary\n"); |
222 | printk(KERN_INFO "Queue [NTU] [NTC] [bi(ntc)->dma ]" | 227 | printk(KERN_INFO "Queue [NTU] [NTC] [bi(ntc)->dma ]" |
223 | " leng ntw timestamp\n"); | 228 | " leng ntw timestamp\n"); |
224 | buffer_info = &tx_ring->buffer_info[tx_ring->next_to_clean]; | 229 | buffer_info = &tx_ring->buffer_info[tx_ring->next_to_clean]; |
225 | printk(KERN_INFO " %5d %5X %5X %016llX %04X %3X %016llX\n", | 230 | printk(KERN_INFO " %5d %5X %5X %016llX %04X %3X %016llX\n", |
226 | 0, tx_ring->next_to_use, tx_ring->next_to_clean, | 231 | 0, tx_ring->next_to_use, tx_ring->next_to_clean, |
227 | (unsigned long long)buffer_info->dma, | 232 | (unsigned long long)buffer_info->dma, |
228 | buffer_info->length, | 233 | buffer_info->length, |
229 | buffer_info->next_to_watch, | 234 | buffer_info->next_to_watch, |
230 | (unsigned long long)buffer_info->time_stamp); | 235 | (unsigned long long)buffer_info->time_stamp); |
231 | 236 | ||
232 | /* Print TX Rings */ | 237 | /* Print Tx Ring */ |
233 | if (!netif_msg_tx_done(adapter)) | 238 | if (!netif_msg_tx_done(adapter)) |
234 | goto rx_ring_summary; | 239 | goto rx_ring_summary; |
235 | 240 | ||
236 | dev_info(&adapter->pdev->dev, "TX Rings Dump\n"); | 241 | dev_info(&adapter->pdev->dev, "Tx Ring Dump\n"); |
237 | 242 | ||
238 | /* Transmit Descriptor Formats - DEXT[29] is 0 (Legacy) or 1 (Extended) | 243 | /* Transmit Descriptor Formats - DEXT[29] is 0 (Legacy) or 1 (Extended) |
239 | * | 244 | * |
@@ -263,22 +268,22 @@ static void e1000e_dump(struct e1000_adapter *adapter) | |||
263 | * 63 48 47 40 39 36 35 32 31 24 23 20 19 0 | 268 | * 63 48 47 40 39 36 35 32 31 24 23 20 19 0 |
264 | */ | 269 | */ |
265 | printk(KERN_INFO "Tl[desc] [address 63:0 ] [SpeCssSCmCsLen]" | 270 | printk(KERN_INFO "Tl[desc] [address 63:0 ] [SpeCssSCmCsLen]" |
266 | " [bi->dma ] leng ntw timestamp bi->skb " | 271 | " [bi->dma ] leng ntw timestamp bi->skb " |
267 | "<-- Legacy format\n"); | 272 | "<-- Legacy format\n"); |
268 | printk(KERN_INFO "Tc[desc] [Ce CoCsIpceCoS] [MssHlRSCm0Plen]" | 273 | printk(KERN_INFO "Tc[desc] [Ce CoCsIpceCoS] [MssHlRSCm0Plen]" |
269 | " [bi->dma ] leng ntw timestamp bi->skb " | 274 | " [bi->dma ] leng ntw timestamp bi->skb " |
270 | "<-- Ext Context format\n"); | 275 | "<-- Ext Context format\n"); |
271 | printk(KERN_INFO "Td[desc] [address 63:0 ] [VlaPoRSCm1Dlen]" | 276 | printk(KERN_INFO "Td[desc] [address 63:0 ] [VlaPoRSCm1Dlen]" |
272 | " [bi->dma ] leng ntw timestamp bi->skb " | 277 | " [bi->dma ] leng ntw timestamp bi->skb " |
273 | "<-- Ext Data format\n"); | 278 | "<-- Ext Data format\n"); |
274 | for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) { | 279 | for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) { |
275 | tx_desc = E1000_TX_DESC(*tx_ring, i); | 280 | tx_desc = E1000_TX_DESC(*tx_ring, i); |
276 | buffer_info = &tx_ring->buffer_info[i]; | 281 | buffer_info = &tx_ring->buffer_info[i]; |
277 | u0 = (struct my_u0 *)tx_desc; | 282 | u0 = (struct my_u0 *)tx_desc; |
278 | printk(KERN_INFO "T%c[0x%03X] %016llX %016llX %016llX " | 283 | printk(KERN_INFO "T%c[0x%03X] %016llX %016llX %016llX " |
279 | "%04X %3X %016llX %p", | 284 | "%04X %3X %016llX %p", |
280 | (!(le64_to_cpu(u0->b) & (1<<29)) ? 'l' : | 285 | (!(le64_to_cpu(u0->b) & (1 << 29)) ? 'l' : |
281 | ((le64_to_cpu(u0->b) & (1<<20)) ? 'd' : 'c')), i, | 286 | ((le64_to_cpu(u0->b) & (1 << 20)) ? 'd' : 'c')), i, |
282 | (unsigned long long)le64_to_cpu(u0->a), | 287 | (unsigned long long)le64_to_cpu(u0->a), |
283 | (unsigned long long)le64_to_cpu(u0->b), | 288 | (unsigned long long)le64_to_cpu(u0->b), |
284 | (unsigned long long)buffer_info->dma, | 289 | (unsigned long long)buffer_info->dma, |
@@ -296,22 +301,22 @@ static void e1000e_dump(struct e1000_adapter *adapter) | |||
296 | 301 | ||
297 | if (netif_msg_pktdata(adapter) && buffer_info->dma != 0) | 302 | if (netif_msg_pktdata(adapter) && buffer_info->dma != 0) |
298 | print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS, | 303 | print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS, |
299 | 16, 1, phys_to_virt(buffer_info->dma), | 304 | 16, 1, phys_to_virt(buffer_info->dma), |
300 | buffer_info->length, true); | 305 | buffer_info->length, true); |
301 | } | 306 | } |
302 | 307 | ||
303 | /* Print RX Rings Summary */ | 308 | /* Print Rx Ring Summary */ |
304 | rx_ring_summary: | 309 | rx_ring_summary: |
305 | dev_info(&adapter->pdev->dev, "RX Rings Summary\n"); | 310 | dev_info(&adapter->pdev->dev, "Rx Ring Summary\n"); |
306 | printk(KERN_INFO "Queue [NTU] [NTC]\n"); | 311 | printk(KERN_INFO "Queue [NTU] [NTC]\n"); |
307 | printk(KERN_INFO " %5d %5X %5X\n", 0, | 312 | printk(KERN_INFO " %5d %5X %5X\n", 0, |
308 | rx_ring->next_to_use, rx_ring->next_to_clean); | 313 | rx_ring->next_to_use, rx_ring->next_to_clean); |
309 | 314 | ||
310 | /* Print RX Rings */ | 315 | /* Print Rx Ring */ |
311 | if (!netif_msg_rx_status(adapter)) | 316 | if (!netif_msg_rx_status(adapter)) |
312 | goto exit; | 317 | goto exit; |
313 | 318 | ||
314 | dev_info(&adapter->pdev->dev, "RX Rings Dump\n"); | 319 | dev_info(&adapter->pdev->dev, "Rx Ring Dump\n"); |
315 | switch (adapter->rx_ps_pages) { | 320 | switch (adapter->rx_ps_pages) { |
316 | case 1: | 321 | case 1: |
317 | case 2: | 322 | case 2: |
@@ -329,7 +334,7 @@ rx_ring_summary: | |||
329 | * +-----------------------------------------------------+ | 334 | * +-----------------------------------------------------+ |
330 | */ | 335 | */ |
331 | printk(KERN_INFO "R [desc] [buffer 0 63:0 ] " | 336 | printk(KERN_INFO "R [desc] [buffer 0 63:0 ] " |
332 | "[buffer 1 63:0 ] " | 337 | "[buffer 1 63:0 ] " |
333 | "[buffer 2 63:0 ] [buffer 3 63:0 ] [bi->dma ] " | 338 | "[buffer 2 63:0 ] [buffer 3 63:0 ] [bi->dma ] " |
334 | "[bi->skb] <-- Ext Pkt Split format\n"); | 339 | "[bi->skb] <-- Ext Pkt Split format\n"); |
335 | /* [Extended] Receive Descriptor (Write-Back) Format | 340 | /* [Extended] Receive Descriptor (Write-Back) Format |
@@ -344,7 +349,7 @@ rx_ring_summary: | |||
344 | * 63 48 47 32 31 20 19 0 | 349 | * 63 48 47 32 31 20 19 0 |
345 | */ | 350 | */ |
346 | printk(KERN_INFO "RWB[desc] [ck ipid mrqhsh] " | 351 | printk(KERN_INFO "RWB[desc] [ck ipid mrqhsh] " |
347 | "[vl l0 ee es] " | 352 | "[vl l0 ee es] " |
348 | "[ l3 l2 l1 hs] [reserved ] ---------------- " | 353 | "[ l3 l2 l1 hs] [reserved ] ---------------- " |
349 | "[bi->skb] <-- Ext Rx Write-Back format\n"); | 354 | "[bi->skb] <-- Ext Rx Write-Back format\n"); |
350 | for (i = 0; i < rx_ring->count; i++) { | 355 | for (i = 0; i < rx_ring->count; i++) { |
@@ -352,26 +357,26 @@ rx_ring_summary: | |||
352 | rx_desc_ps = E1000_RX_DESC_PS(*rx_ring, i); | 357 | rx_desc_ps = E1000_RX_DESC_PS(*rx_ring, i); |
353 | u1 = (struct my_u1 *)rx_desc_ps; | 358 | u1 = (struct my_u1 *)rx_desc_ps; |
354 | staterr = | 359 | staterr = |
355 | le32_to_cpu(rx_desc_ps->wb.middle.status_error); | 360 | le32_to_cpu(rx_desc_ps->wb.middle.status_error); |
356 | if (staterr & E1000_RXD_STAT_DD) { | 361 | if (staterr & E1000_RXD_STAT_DD) { |
357 | /* Descriptor Done */ | 362 | /* Descriptor Done */ |
358 | printk(KERN_INFO "RWB[0x%03X] %016llX " | 363 | printk(KERN_INFO "RWB[0x%03X] %016llX " |
359 | "%016llX %016llX %016llX " | 364 | "%016llX %016llX %016llX " |
360 | "---------------- %p", i, | 365 | "---------------- %p", i, |
361 | (unsigned long long)le64_to_cpu(u1->a), | 366 | (unsigned long long)le64_to_cpu(u1->a), |
362 | (unsigned long long)le64_to_cpu(u1->b), | 367 | (unsigned long long)le64_to_cpu(u1->b), |
363 | (unsigned long long)le64_to_cpu(u1->c), | 368 | (unsigned long long)le64_to_cpu(u1->c), |
364 | (unsigned long long)le64_to_cpu(u1->d), | 369 | (unsigned long long)le64_to_cpu(u1->d), |
365 | buffer_info->skb); | 370 | buffer_info->skb); |
366 | } else { | 371 | } else { |
367 | printk(KERN_INFO "R [0x%03X] %016llX " | 372 | printk(KERN_INFO "R [0x%03X] %016llX " |
368 | "%016llX %016llX %016llX %016llX %p", i, | 373 | "%016llX %016llX %016llX %016llX %p", i, |
369 | (unsigned long long)le64_to_cpu(u1->a), | 374 | (unsigned long long)le64_to_cpu(u1->a), |
370 | (unsigned long long)le64_to_cpu(u1->b), | 375 | (unsigned long long)le64_to_cpu(u1->b), |
371 | (unsigned long long)le64_to_cpu(u1->c), | 376 | (unsigned long long)le64_to_cpu(u1->c), |
372 | (unsigned long long)le64_to_cpu(u1->d), | 377 | (unsigned long long)le64_to_cpu(u1->d), |
373 | (unsigned long long)buffer_info->dma, | 378 | (unsigned long long)buffer_info->dma, |
374 | buffer_info->skb); | 379 | buffer_info->skb); |
375 | 380 | ||
376 | if (netif_msg_pktdata(adapter)) | 381 | if (netif_msg_pktdata(adapter)) |
377 | print_hex_dump(KERN_INFO, "", | 382 | print_hex_dump(KERN_INFO, "", |
@@ -400,18 +405,18 @@ rx_ring_summary: | |||
400 | * 63 48 47 40 39 32 31 16 15 0 | 405 | * 63 48 47 40 39 32 31 16 15 0 |
401 | */ | 406 | */ |
402 | printk(KERN_INFO "Rl[desc] [address 63:0 ] " | 407 | printk(KERN_INFO "Rl[desc] [address 63:0 ] " |
403 | "[vl er S cks ln] [bi->dma ] [bi->skb] " | 408 | "[vl er S cks ln] [bi->dma ] [bi->skb] " |
404 | "<-- Legacy format\n"); | 409 | "<-- Legacy format\n"); |
405 | for (i = 0; rx_ring->desc && (i < rx_ring->count); i++) { | 410 | for (i = 0; rx_ring->desc && (i < rx_ring->count); i++) { |
406 | rx_desc = E1000_RX_DESC(*rx_ring, i); | 411 | rx_desc = E1000_RX_DESC(*rx_ring, i); |
407 | buffer_info = &rx_ring->buffer_info[i]; | 412 | buffer_info = &rx_ring->buffer_info[i]; |
408 | u0 = (struct my_u0 *)rx_desc; | 413 | u0 = (struct my_u0 *)rx_desc; |
409 | printk(KERN_INFO "Rl[0x%03X] %016llX %016llX " | 414 | printk(KERN_INFO "Rl[0x%03X] %016llX %016llX " |
410 | "%016llX %p", i, | 415 | "%016llX %p", i, |
411 | (unsigned long long)le64_to_cpu(u0->a), | 416 | (unsigned long long)le64_to_cpu(u0->a), |
412 | (unsigned long long)le64_to_cpu(u0->b), | 417 | (unsigned long long)le64_to_cpu(u0->b), |
413 | (unsigned long long)buffer_info->dma, | 418 | (unsigned long long)buffer_info->dma, |
414 | buffer_info->skb); | 419 | buffer_info->skb); |
415 | if (i == rx_ring->next_to_use) | 420 | if (i == rx_ring->next_to_use) |
416 | printk(KERN_CONT " NTU\n"); | 421 | printk(KERN_CONT " NTU\n"); |
417 | else if (i == rx_ring->next_to_clean) | 422 | else if (i == rx_ring->next_to_clean) |
@@ -421,9 +426,10 @@ rx_ring_summary: | |||
421 | 426 | ||
422 | if (netif_msg_pktdata(adapter)) | 427 | if (netif_msg_pktdata(adapter)) |
423 | print_hex_dump(KERN_INFO, "", | 428 | print_hex_dump(KERN_INFO, "", |
424 | DUMP_PREFIX_ADDRESS, | 429 | DUMP_PREFIX_ADDRESS, |
425 | 16, 1, phys_to_virt(buffer_info->dma), | 430 | 16, 1, |
426 | adapter->rx_buffer_len, true); | 431 | phys_to_virt(buffer_info->dma), |
432 | adapter->rx_buffer_len, true); | ||
427 | } | 433 | } |
428 | } | 434 | } |
429 | 435 | ||
@@ -450,8 +456,7 @@ static int e1000_desc_unused(struct e1000_ring *ring) | |||
450 | * @skb: pointer to sk_buff to be indicated to stack | 456 | * @skb: pointer to sk_buff to be indicated to stack |
451 | **/ | 457 | **/ |
452 | static void e1000_receive_skb(struct e1000_adapter *adapter, | 458 | static void e1000_receive_skb(struct e1000_adapter *adapter, |
453 | struct net_device *netdev, | 459 | struct net_device *netdev, struct sk_buff *skb, |
454 | struct sk_buff *skb, | ||
455 | u8 status, __le16 vlan) | 460 | u8 status, __le16 vlan) |
456 | { | 461 | { |
457 | skb->protocol = eth_type_trans(skb, netdev); | 462 | skb->protocol = eth_type_trans(skb, netdev); |
@@ -464,7 +469,7 @@ static void e1000_receive_skb(struct e1000_adapter *adapter, | |||
464 | } | 469 | } |
465 | 470 | ||
466 | /** | 471 | /** |
467 | * e1000_rx_checksum - Receive Checksum Offload for 82543 | 472 | * e1000_rx_checksum - Receive Checksum Offload |
468 | * @adapter: board private structure | 473 | * @adapter: board private structure |
469 | * @status_err: receive descriptor status and error fields | 474 | * @status_err: receive descriptor status and error fields |
470 | * @csum: receive descriptor csum field | 475 | * @csum: receive descriptor csum field |
@@ -548,7 +553,7 @@ map_skb: | |||
548 | adapter->rx_buffer_len, | 553 | adapter->rx_buffer_len, |
549 | DMA_FROM_DEVICE); | 554 | DMA_FROM_DEVICE); |
550 | if (dma_mapping_error(&pdev->dev, buffer_info->dma)) { | 555 | if (dma_mapping_error(&pdev->dev, buffer_info->dma)) { |
551 | dev_err(&pdev->dev, "RX DMA map failed\n"); | 556 | dev_err(&pdev->dev, "Rx DMA map failed\n"); |
552 | adapter->rx_dma_failed++; | 557 | adapter->rx_dma_failed++; |
553 | break; | 558 | break; |
554 | } | 559 | } |
@@ -601,7 +606,8 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter, | |||
601 | ps_page = &buffer_info->ps_pages[j]; | 606 | ps_page = &buffer_info->ps_pages[j]; |
602 | if (j >= adapter->rx_ps_pages) { | 607 | if (j >= adapter->rx_ps_pages) { |
603 | /* all unused desc entries get hw null ptr */ | 608 | /* all unused desc entries get hw null ptr */ |
604 | rx_desc->read.buffer_addr[j+1] = ~cpu_to_le64(0); | 609 | rx_desc->read.buffer_addr[j + 1] = |
610 | ~cpu_to_le64(0); | ||
605 | continue; | 611 | continue; |
606 | } | 612 | } |
607 | if (!ps_page->page) { | 613 | if (!ps_page->page) { |
@@ -617,7 +623,7 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter, | |||
617 | if (dma_mapping_error(&pdev->dev, | 623 | if (dma_mapping_error(&pdev->dev, |
618 | ps_page->dma)) { | 624 | ps_page->dma)) { |
619 | dev_err(&adapter->pdev->dev, | 625 | dev_err(&adapter->pdev->dev, |
620 | "RX DMA page map failed\n"); | 626 | "Rx DMA page map failed\n"); |
621 | adapter->rx_dma_failed++; | 627 | adapter->rx_dma_failed++; |
622 | goto no_buffers; | 628 | goto no_buffers; |
623 | } | 629 | } |
@@ -627,8 +633,8 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter, | |||
627 | * didn't change because each write-back | 633 | * didn't change because each write-back |
628 | * erases this info. | 634 | * erases this info. |
629 | */ | 635 | */ |
630 | rx_desc->read.buffer_addr[j+1] = | 636 | rx_desc->read.buffer_addr[j + 1] = |
631 | cpu_to_le64(ps_page->dma); | 637 | cpu_to_le64(ps_page->dma); |
632 | } | 638 | } |
633 | 639 | ||
634 | skb = netdev_alloc_skb_ip_align(netdev, | 640 | skb = netdev_alloc_skb_ip_align(netdev, |
@@ -644,7 +650,7 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter, | |||
644 | adapter->rx_ps_bsize0, | 650 | adapter->rx_ps_bsize0, |
645 | DMA_FROM_DEVICE); | 651 | DMA_FROM_DEVICE); |
646 | if (dma_mapping_error(&pdev->dev, buffer_info->dma)) { | 652 | if (dma_mapping_error(&pdev->dev, buffer_info->dma)) { |
647 | dev_err(&pdev->dev, "RX DMA map failed\n"); | 653 | dev_err(&pdev->dev, "Rx DMA map failed\n"); |
648 | adapter->rx_dma_failed++; | 654 | adapter->rx_dma_failed++; |
649 | /* cleanup skb */ | 655 | /* cleanup skb */ |
650 | dev_kfree_skb_any(skb); | 656 | dev_kfree_skb_any(skb); |
@@ -662,7 +668,7 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter, | |||
662 | * such as IA-64). | 668 | * such as IA-64). |
663 | */ | 669 | */ |
664 | wmb(); | 670 | wmb(); |
665 | writel(i<<1, adapter->hw.hw_addr + rx_ring->tail); | 671 | writel(i << 1, adapter->hw.hw_addr + rx_ring->tail); |
666 | } | 672 | } |
667 | 673 | ||
668 | i++; | 674 | i++; |
@@ -1106,11 +1112,10 @@ static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, | |||
1106 | cleaned = 1; | 1112 | cleaned = 1; |
1107 | cleaned_count++; | 1113 | cleaned_count++; |
1108 | dma_unmap_single(&pdev->dev, buffer_info->dma, | 1114 | dma_unmap_single(&pdev->dev, buffer_info->dma, |
1109 | adapter->rx_ps_bsize0, | 1115 | adapter->rx_ps_bsize0, DMA_FROM_DEVICE); |
1110 | DMA_FROM_DEVICE); | ||
1111 | buffer_info->dma = 0; | 1116 | buffer_info->dma = 0; |
1112 | 1117 | ||
1113 | /* see !EOP comment in other rx routine */ | 1118 | /* see !EOP comment in other Rx routine */ |
1114 | if (!(staterr & E1000_RXD_STAT_EOP)) | 1119 | if (!(staterr & E1000_RXD_STAT_EOP)) |
1115 | adapter->flags2 |= FLAG2_IS_DISCARDING; | 1120 | adapter->flags2 |= FLAG2_IS_DISCARDING; |
1116 | 1121 | ||
@@ -2610,7 +2615,7 @@ static void e1000_init_manageability_pt(struct e1000_adapter *adapter) | |||
2610 | } | 2615 | } |
2611 | 2616 | ||
2612 | /** | 2617 | /** |
2613 | * e1000_configure_tx - Configure 8254x Transmit Unit after Reset | 2618 | * e1000_configure_tx - Configure Transmit Unit after Reset |
2614 | * @adapter: board private structure | 2619 | * @adapter: board private structure |
2615 | * | 2620 | * |
2616 | * Configure the Tx unit of the MAC after a reset. | 2621 | * Configure the Tx unit of the MAC after a reset. |
@@ -2663,7 +2668,7 @@ static void e1000_configure_tx(struct e1000_adapter *adapter) | |||
2663 | * hthresh = 1 ==> prefetch when one or more available | 2668 | * hthresh = 1 ==> prefetch when one or more available |
2664 | * pthresh = 0x1f ==> prefetch if internal cache 31 or less | 2669 | * pthresh = 0x1f ==> prefetch if internal cache 31 or less |
2665 | * BEWARE: this seems to work but should be considered first if | 2670 | * BEWARE: this seems to work but should be considered first if |
2666 | * there are tx hangs or other tx related bugs | 2671 | * there are Tx hangs or other Tx related bugs |
2667 | */ | 2672 | */ |
2668 | txdctl |= E1000_TXDCTL_DMA_BURST_ENABLE; | 2673 | txdctl |= E1000_TXDCTL_DMA_BURST_ENABLE; |
2669 | ew32(TXDCTL(0), txdctl); | 2674 | ew32(TXDCTL(0), txdctl); |
@@ -2877,7 +2882,7 @@ static void e1000_configure_rx(struct e1000_adapter *adapter) | |||
2877 | if (adapter->rx_ps_pages) { | 2882 | if (adapter->rx_ps_pages) { |
2878 | /* this is a 32 byte descriptor */ | 2883 | /* this is a 32 byte descriptor */ |
2879 | rdlen = rx_ring->count * | 2884 | rdlen = rx_ring->count * |
2880 | sizeof(union e1000_rx_desc_packet_split); | 2885 | sizeof(union e1000_rx_desc_packet_split); |
2881 | adapter->clean_rx = e1000_clean_rx_irq_ps; | 2886 | adapter->clean_rx = e1000_clean_rx_irq_ps; |
2882 | adapter->alloc_rx_buf = e1000_alloc_rx_buffers_ps; | 2887 | adapter->alloc_rx_buf = e1000_alloc_rx_buffers_ps; |
2883 | } else if (adapter->netdev->mtu > ETH_FRAME_LEN + ETH_FCS_LEN) { | 2888 | } else if (adapter->netdev->mtu > ETH_FRAME_LEN + ETH_FCS_LEN) { |
@@ -2900,7 +2905,7 @@ static void e1000_configure_rx(struct e1000_adapter *adapter) | |||
2900 | /* | 2905 | /* |
2901 | * set the writeback threshold (only takes effect if the RDTR | 2906 | * set the writeback threshold (only takes effect if the RDTR |
2902 | * is set). set GRAN=1 and write back up to 0x4 worth, and | 2907 | * is set). set GRAN=1 and write back up to 0x4 worth, and |
2903 | * enable prefetching of 0x20 rx descriptors | 2908 | * enable prefetching of 0x20 Rx descriptors |
2904 | * granularity = 01 | 2909 | * granularity = 01 |
2905 | * wthresh = 04, | 2910 | * wthresh = 04, |
2906 | * hthresh = 04, | 2911 | * hthresh = 04, |
@@ -2981,12 +2986,10 @@ static void e1000_configure_rx(struct e1000_adapter *adapter) | |||
2981 | * excessive C-state transition latencies result in | 2986 | * excessive C-state transition latencies result in |
2982 | * dropped transactions. | 2987 | * dropped transactions. |
2983 | */ | 2988 | */ |
2984 | pm_qos_update_request( | 2989 | pm_qos_update_request(&adapter->netdev->pm_qos_req, 55); |
2985 | &adapter->netdev->pm_qos_req, 55); | ||
2986 | } else { | 2990 | } else { |
2987 | pm_qos_update_request( | 2991 | pm_qos_update_request(&adapter->netdev->pm_qos_req, |
2988 | &adapter->netdev->pm_qos_req, | 2992 | PM_QOS_DEFAULT_VALUE); |
2989 | PM_QOS_DEFAULT_VALUE); | ||
2990 | } | 2993 | } |
2991 | } | 2994 | } |
2992 | 2995 | ||
@@ -3152,7 +3155,7 @@ void e1000e_reset(struct e1000_adapter *adapter) | |||
3152 | /* lower 16 bits has Rx packet buffer allocation size in KB */ | 3155 | /* lower 16 bits has Rx packet buffer allocation size in KB */ |
3153 | pba &= 0xffff; | 3156 | pba &= 0xffff; |
3154 | /* | 3157 | /* |
3155 | * the Tx fifo also stores 16 bytes of information about the tx | 3158 | * the Tx fifo also stores 16 bytes of information about the Tx |
3156 | * but don't include ethernet FCS because hardware appends it | 3159 | * but don't include ethernet FCS because hardware appends it |
3157 | */ | 3160 | */ |
3158 | min_tx_space = (adapter->max_frame_size + | 3161 | min_tx_space = (adapter->max_frame_size + |
@@ -3175,7 +3178,7 @@ void e1000e_reset(struct e1000_adapter *adapter) | |||
3175 | pba -= min_tx_space - tx_space; | 3178 | pba -= min_tx_space - tx_space; |
3176 | 3179 | ||
3177 | /* | 3180 | /* |
3178 | * if short on Rx space, Rx wins and must trump tx | 3181 | * if short on Rx space, Rx wins and must trump Tx |
3179 | * adjustment or use Early Receive if available | 3182 | * adjustment or use Early Receive if available |
3180 | */ | 3183 | */ |
3181 | if ((pba < min_rx_space) && | 3184 | if ((pba < min_rx_space) && |
@@ -4039,11 +4042,11 @@ static void e1000_print_link_info(struct e1000_adapter *adapter) | |||
4039 | adapter->netdev->name, | 4042 | adapter->netdev->name, |
4040 | adapter->link_speed, | 4043 | adapter->link_speed, |
4041 | (adapter->link_duplex == FULL_DUPLEX) ? | 4044 | (adapter->link_duplex == FULL_DUPLEX) ? |
4042 | "Full Duplex" : "Half Duplex", | 4045 | "Full Duplex" : "Half Duplex", |
4043 | ((ctrl & E1000_CTRL_TFCE) && (ctrl & E1000_CTRL_RFCE)) ? | 4046 | ((ctrl & E1000_CTRL_TFCE) && (ctrl & E1000_CTRL_RFCE)) ? |
4044 | "RX/TX" : | 4047 | "Rx/Tx" : |
4045 | ((ctrl & E1000_CTRL_RFCE) ? "RX" : | 4048 | ((ctrl & E1000_CTRL_RFCE) ? "Rx" : |
4046 | ((ctrl & E1000_CTRL_TFCE) ? "TX" : "None" ))); | 4049 | ((ctrl & E1000_CTRL_TFCE) ? "Tx" : "None"))); |
4047 | } | 4050 | } |
4048 | 4051 | ||
4049 | static bool e1000e_has_link(struct e1000_adapter *adapter) | 4052 | static bool e1000e_has_link(struct e1000_adapter *adapter) |
@@ -4338,7 +4341,7 @@ link_up: | |||
4338 | /* Force detection of hung controller every watchdog period */ | 4341 | /* Force detection of hung controller every watchdog period */ |
4339 | adapter->detect_tx_hung = 1; | 4342 | adapter->detect_tx_hung = 1; |
4340 | 4343 | ||
4341 | /* flush partial descriptors to memory before detecting tx hang */ | 4344 | /* flush partial descriptors to memory before detecting Tx hang */ |
4342 | if (adapter->flags2 & FLAG2_DMA_BURST) { | 4345 | if (adapter->flags2 & FLAG2_DMA_BURST) { |
4343 | ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD); | 4346 | ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD); |
4344 | ew32(RDTR, adapter->rx_int_delay | E1000_RDTR_FPD); | 4347 | ew32(RDTR, adapter->rx_int_delay | E1000_RDTR_FPD); |
@@ -4529,7 +4532,7 @@ static int e1000_tx_map(struct e1000_adapter *adapter, | |||
4529 | buffer_info->next_to_watch = i; | 4532 | buffer_info->next_to_watch = i; |
4530 | buffer_info->dma = dma_map_single(&pdev->dev, | 4533 | buffer_info->dma = dma_map_single(&pdev->dev, |
4531 | skb->data + offset, | 4534 | skb->data + offset, |
4532 | size, DMA_TO_DEVICE); | 4535 | size, DMA_TO_DEVICE); |
4533 | buffer_info->mapped_as_page = false; | 4536 | buffer_info->mapped_as_page = false; |
4534 | if (dma_mapping_error(&pdev->dev, buffer_info->dma)) | 4537 | if (dma_mapping_error(&pdev->dev, buffer_info->dma)) |
4535 | goto dma_error; | 4538 | goto dma_error; |
@@ -4576,7 +4579,7 @@ static int e1000_tx_map(struct e1000_adapter *adapter, | |||
4576 | } | 4579 | } |
4577 | } | 4580 | } |
4578 | 4581 | ||
4579 | segs = skb_shinfo(skb)->gso_segs ?: 1; | 4582 | segs = skb_shinfo(skb)->gso_segs ? : 1; |
4580 | /* multiply data chunks by size of headers */ | 4583 | /* multiply data chunks by size of headers */ |
4581 | bytecount = ((segs - 1) * skb_headlen(skb)) + skb->len; | 4584 | bytecount = ((segs - 1) * skb_headlen(skb)) + skb->len; |
4582 | 4585 | ||
@@ -4588,13 +4591,13 @@ static int e1000_tx_map(struct e1000_adapter *adapter, | |||
4588 | return count; | 4591 | return count; |
4589 | 4592 | ||
4590 | dma_error: | 4593 | dma_error: |
4591 | dev_err(&pdev->dev, "TX DMA map failed\n"); | 4594 | dev_err(&pdev->dev, "Tx DMA map failed\n"); |
4592 | buffer_info->dma = 0; | 4595 | buffer_info->dma = 0; |
4593 | if (count) | 4596 | if (count) |
4594 | count--; | 4597 | count--; |
4595 | 4598 | ||
4596 | while (count--) { | 4599 | while (count--) { |
4597 | if (i==0) | 4600 | if (i == 0) |
4598 | i += tx_ring->count; | 4601 | i += tx_ring->count; |
4599 | i--; | 4602 | i--; |
4600 | buffer_info = &tx_ring->buffer_info[i]; | 4603 | buffer_info = &tx_ring->buffer_info[i]; |
@@ -6193,7 +6196,7 @@ static int __init e1000_init_module(void) | |||
6193 | int ret; | 6196 | int ret; |
6194 | pr_info("Intel(R) PRO/1000 Network Driver - %s\n", | 6197 | pr_info("Intel(R) PRO/1000 Network Driver - %s\n", |
6195 | e1000e_driver_version); | 6198 | e1000e_driver_version); |
6196 | pr_info("Copyright (c) 1999 - 2010 Intel Corporation.\n"); | 6199 | pr_info("Copyright(c) 1999 - 2011 Intel Corporation.\n"); |
6197 | ret = pci_register_driver(&e1000_driver); | 6200 | ret = pci_register_driver(&e1000_driver); |
6198 | 6201 | ||
6199 | return ret; | 6202 | return ret; |
diff --git a/drivers/net/e1000e/param.c b/drivers/net/e1000e/param.c index a9612b0e4bca..4dd9b63273f6 100644 --- a/drivers/net/e1000e/param.c +++ b/drivers/net/e1000e/param.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************************* | 1 | /******************************************************************************* |
2 | 2 | ||
3 | Intel PRO/1000 Linux driver | 3 | Intel PRO/1000 Linux driver |
4 | Copyright(c) 1999 - 2010 Intel Corporation. | 4 | Copyright(c) 1999 - 2011 Intel Corporation. |
5 | 5 | ||
6 | This program is free software; you can redistribute it and/or modify it | 6 | This program is free software; you can redistribute it and/or modify it |
7 | under the terms and conditions of the GNU General Public License, | 7 | under the terms and conditions of the GNU General Public License, |
@@ -62,10 +62,9 @@ MODULE_PARM_DESC(copybreak, | |||
62 | module_param_array_named(X, X, int, &num_##X, 0); \ | 62 | module_param_array_named(X, X, int, &num_##X, 0); \ |
63 | MODULE_PARM_DESC(X, desc); | 63 | MODULE_PARM_DESC(X, desc); |
64 | 64 | ||
65 | |||
66 | /* | 65 | /* |
67 | * Transmit Interrupt Delay in units of 1.024 microseconds | 66 | * Transmit Interrupt Delay in units of 1.024 microseconds |
68 | * Tx interrupt delay needs to typically be set to something non zero | 67 | * Tx interrupt delay needs to typically be set to something non-zero |
69 | * | 68 | * |
70 | * Valid Range: 0-65535 | 69 | * Valid Range: 0-65535 |
71 | */ | 70 | */ |
@@ -112,6 +111,7 @@ E1000_PARAM(InterruptThrottleRate, "Interrupt Throttling Rate"); | |||
112 | #define DEFAULT_ITR 3 | 111 | #define DEFAULT_ITR 3 |
113 | #define MAX_ITR 100000 | 112 | #define MAX_ITR 100000 |
114 | #define MIN_ITR 100 | 113 | #define MIN_ITR 100 |
114 | |||
115 | /* IntMode (Interrupt Mode) | 115 | /* IntMode (Interrupt Mode) |
116 | * | 116 | * |
117 | * Valid Range: 0 - 2 | 117 | * Valid Range: 0 - 2 |
diff --git a/drivers/net/e1000e/phy.c b/drivers/net/e1000e/phy.c index a640f1c369ae..326788eab2f7 100644 --- a/drivers/net/e1000e/phy.c +++ b/drivers/net/e1000e/phy.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************************* | 1 | /******************************************************************************* |
2 | 2 | ||
3 | Intel PRO/1000 Linux driver | 3 | Intel PRO/1000 Linux driver |
4 | Copyright(c) 1999 - 2010 Intel Corporation. | 4 | Copyright(c) 1999 - 2011 Intel Corporation. |
5 | 5 | ||
6 | This program is free software; you can redistribute it and/or modify it | 6 | This program is free software; you can redistribute it and/or modify it |
7 | under the terms and conditions of the GNU General Public License, | 7 | under the terms and conditions of the GNU General Public License, |
@@ -640,7 +640,7 @@ s32 e1000_copper_link_setup_82577(struct e1000_hw *hw) | |||
640 | s32 ret_val; | 640 | s32 ret_val; |
641 | u16 phy_data; | 641 | u16 phy_data; |
642 | 642 | ||
643 | /* Enable CRS on TX. This must be set for half-duplex operation. */ | 643 | /* Enable CRS on Tx. This must be set for half-duplex operation. */ |
644 | ret_val = e1e_rphy(hw, I82577_CFG_REG, &phy_data); | 644 | ret_val = e1e_rphy(hw, I82577_CFG_REG, &phy_data); |
645 | if (ret_val) | 645 | if (ret_val) |
646 | goto out; | 646 | goto out; |
diff --git a/drivers/net/ehea/ehea.h b/drivers/net/ehea/ehea.h index a724a2d14506..6c7257bd73fc 100644 --- a/drivers/net/ehea/ehea.h +++ b/drivers/net/ehea/ehea.h | |||
@@ -40,7 +40,7 @@ | |||
40 | #include <asm/io.h> | 40 | #include <asm/io.h> |
41 | 41 | ||
42 | #define DRV_NAME "ehea" | 42 | #define DRV_NAME "ehea" |
43 | #define DRV_VERSION "EHEA_0106" | 43 | #define DRV_VERSION "EHEA_0107" |
44 | 44 | ||
45 | /* eHEA capability flags */ | 45 | /* eHEA capability flags */ |
46 | #define DLPAR_PORT_ADD_REM 1 | 46 | #define DLPAR_PORT_ADD_REM 1 |
diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c index 1032b5bbe238..f75d3144b8a5 100644 --- a/drivers/net/ehea/ehea_main.c +++ b/drivers/net/ehea/ehea_main.c | |||
@@ -437,7 +437,7 @@ static void ehea_init_fill_rq1(struct ehea_port_res *pr, int nr_rq1a) | |||
437 | } | 437 | } |
438 | } | 438 | } |
439 | /* Ring doorbell */ | 439 | /* Ring doorbell */ |
440 | ehea_update_rq1a(pr->qp, i); | 440 | ehea_update_rq1a(pr->qp, i - 1); |
441 | } | 441 | } |
442 | 442 | ||
443 | static int ehea_refill_rq_def(struct ehea_port_res *pr, | 443 | static int ehea_refill_rq_def(struct ehea_port_res *pr, |
@@ -1329,9 +1329,7 @@ static int ehea_fill_port_res(struct ehea_port_res *pr) | |||
1329 | int ret; | 1329 | int ret; |
1330 | struct ehea_qp_init_attr *init_attr = &pr->qp->init_attr; | 1330 | struct ehea_qp_init_attr *init_attr = &pr->qp->init_attr; |
1331 | 1331 | ||
1332 | ehea_init_fill_rq1(pr, init_attr->act_nr_rwqes_rq1 | 1332 | ehea_init_fill_rq1(pr, pr->rq1_skba.len); |
1333 | - init_attr->act_nr_rwqes_rq2 | ||
1334 | - init_attr->act_nr_rwqes_rq3 - 1); | ||
1335 | 1333 | ||
1336 | ret = ehea_refill_rq2(pr, init_attr->act_nr_rwqes_rq2 - 1); | 1334 | ret = ehea_refill_rq2(pr, init_attr->act_nr_rwqes_rq2 - 1); |
1337 | 1335 | ||
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c index 45c4b7bfcf39..f1d4b450e797 100644 --- a/drivers/net/gianfar.c +++ b/drivers/net/gianfar.c | |||
@@ -433,7 +433,6 @@ static void gfar_init_mac(struct net_device *ndev) | |||
433 | static struct net_device_stats *gfar_get_stats(struct net_device *dev) | 433 | static struct net_device_stats *gfar_get_stats(struct net_device *dev) |
434 | { | 434 | { |
435 | struct gfar_private *priv = netdev_priv(dev); | 435 | struct gfar_private *priv = netdev_priv(dev); |
436 | struct netdev_queue *txq; | ||
437 | unsigned long rx_packets = 0, rx_bytes = 0, rx_dropped = 0; | 436 | unsigned long rx_packets = 0, rx_bytes = 0, rx_dropped = 0; |
438 | unsigned long tx_packets = 0, tx_bytes = 0; | 437 | unsigned long tx_packets = 0, tx_bytes = 0; |
439 | int i = 0; | 438 | int i = 0; |
@@ -449,9 +448,8 @@ static struct net_device_stats *gfar_get_stats(struct net_device *dev) | |||
449 | dev->stats.rx_dropped = rx_dropped; | 448 | dev->stats.rx_dropped = rx_dropped; |
450 | 449 | ||
451 | for (i = 0; i < priv->num_tx_queues; i++) { | 450 | for (i = 0; i < priv->num_tx_queues; i++) { |
452 | txq = netdev_get_tx_queue(dev, i); | 451 | tx_bytes += priv->tx_queue[i]->stats.tx_bytes; |
453 | tx_bytes += txq->tx_bytes; | 452 | tx_packets += priv->tx_queue[i]->stats.tx_packets; |
454 | tx_packets += txq->tx_packets; | ||
455 | } | 453 | } |
456 | 454 | ||
457 | dev->stats.tx_bytes = tx_bytes; | 455 | dev->stats.tx_bytes = tx_bytes; |
@@ -2108,8 +2106,8 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
2108 | } | 2106 | } |
2109 | 2107 | ||
2110 | /* Update transmit stats */ | 2108 | /* Update transmit stats */ |
2111 | txq->tx_bytes += skb->len; | 2109 | tx_queue->stats.tx_bytes += skb->len; |
2112 | txq->tx_packets ++; | 2110 | tx_queue->stats.tx_packets++; |
2113 | 2111 | ||
2114 | txbdp = txbdp_start = tx_queue->cur_tx; | 2112 | txbdp = txbdp_start = tx_queue->cur_tx; |
2115 | lstatus = txbdp->lstatus; | 2113 | lstatus = txbdp->lstatus; |
diff --git a/drivers/net/gianfar.h b/drivers/net/gianfar.h index 68984eb88ae0..54de4135e932 100644 --- a/drivers/net/gianfar.h +++ b/drivers/net/gianfar.h | |||
@@ -907,12 +907,21 @@ enum { | |||
907 | MQ_MG_MODE | 907 | MQ_MG_MODE |
908 | }; | 908 | }; |
909 | 909 | ||
910 | /* | ||
911 | * Per TX queue stats | ||
912 | */ | ||
913 | struct tx_q_stats { | ||
914 | unsigned long tx_packets; | ||
915 | unsigned long tx_bytes; | ||
916 | }; | ||
917 | |||
910 | /** | 918 | /** |
911 | * struct gfar_priv_tx_q - per tx queue structure | 919 | * struct gfar_priv_tx_q - per tx queue structure |
912 | * @txlock: per queue tx spin lock | 920 | * @txlock: per queue tx spin lock |
913 | * @tx_skbuff:skb pointers | 921 | * @tx_skbuff:skb pointers |
914 | * @skb_curtx: to be used skb pointer | 922 | * @skb_curtx: to be used skb pointer |
915 | * @skb_dirtytx:the last used skb pointer | 923 | * @skb_dirtytx:the last used skb pointer |
924 | * @stats: bytes/packets stats | ||
916 | * @qindex: index of this queue | 925 | * @qindex: index of this queue |
917 | * @dev: back pointer to the dev structure | 926 | * @dev: back pointer to the dev structure |
918 | * @grp: back pointer to the group to which this queue belongs | 927 | * @grp: back pointer to the group to which this queue belongs |
@@ -934,6 +943,7 @@ struct gfar_priv_tx_q { | |||
934 | struct txbd8 *tx_bd_base; | 943 | struct txbd8 *tx_bd_base; |
935 | struct txbd8 *cur_tx; | 944 | struct txbd8 *cur_tx; |
936 | struct txbd8 *dirty_tx; | 945 | struct txbd8 *dirty_tx; |
946 | struct tx_q_stats stats; | ||
937 | struct net_device *dev; | 947 | struct net_device *dev; |
938 | struct gfar_priv_grp *grp; | 948 | struct gfar_priv_grp *grp; |
939 | u16 skb_curtx; | 949 | u16 skb_curtx; |
diff --git a/drivers/net/greth.c b/drivers/net/greth.c index 27d6960ce09e..fdb0333f5cb6 100644 --- a/drivers/net/greth.c +++ b/drivers/net/greth.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * Aeroflex Gaisler GRETH 10/100/1G Ethernet MAC. | 2 | * Aeroflex Gaisler GRETH 10/100/1G Ethernet MAC. |
3 | * | 3 | * |
4 | * 2005-2009 (c) Aeroflex Gaisler AB | 4 | * 2005-2010 (c) Aeroflex Gaisler AB |
5 | * | 5 | * |
6 | * This driver supports GRETH 10/100 and GRETH 10/100/1G Ethernet MACs | 6 | * This driver supports GRETH 10/100 and GRETH 10/100/1G Ethernet MACs |
7 | * available in the GRLIB VHDL IP core library. | 7 | * available in the GRLIB VHDL IP core library. |
@@ -356,6 +356,8 @@ static int greth_open(struct net_device *dev) | |||
356 | dev_dbg(&dev->dev, " starting queue\n"); | 356 | dev_dbg(&dev->dev, " starting queue\n"); |
357 | netif_start_queue(dev); | 357 | netif_start_queue(dev); |
358 | 358 | ||
359 | GRETH_REGSAVE(greth->regs->status, 0xFF); | ||
360 | |||
359 | napi_enable(&greth->napi); | 361 | napi_enable(&greth->napi); |
360 | 362 | ||
361 | greth_enable_irqs(greth); | 363 | greth_enable_irqs(greth); |
@@ -371,7 +373,9 @@ static int greth_close(struct net_device *dev) | |||
371 | 373 | ||
372 | napi_disable(&greth->napi); | 374 | napi_disable(&greth->napi); |
373 | 375 | ||
376 | greth_disable_irqs(greth); | ||
374 | greth_disable_tx(greth); | 377 | greth_disable_tx(greth); |
378 | greth_disable_rx(greth); | ||
375 | 379 | ||
376 | netif_stop_queue(dev); | 380 | netif_stop_queue(dev); |
377 | 381 | ||
@@ -388,12 +392,20 @@ greth_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
388 | struct greth_private *greth = netdev_priv(dev); | 392 | struct greth_private *greth = netdev_priv(dev); |
389 | struct greth_bd *bdp; | 393 | struct greth_bd *bdp; |
390 | int err = NETDEV_TX_OK; | 394 | int err = NETDEV_TX_OK; |
391 | u32 status, dma_addr; | 395 | u32 status, dma_addr, ctrl; |
396 | unsigned long flags; | ||
392 | 397 | ||
393 | bdp = greth->tx_bd_base + greth->tx_next; | 398 | /* Clean TX Ring */ |
399 | greth_clean_tx(greth->netdev); | ||
394 | 400 | ||
395 | if (unlikely(greth->tx_free <= 0)) { | 401 | if (unlikely(greth->tx_free <= 0)) { |
402 | spin_lock_irqsave(&greth->devlock, flags);/*save from poll/irq*/ | ||
403 | ctrl = GRETH_REGLOAD(greth->regs->control); | ||
404 | /* Enable TX IRQ only if not already in poll() routine */ | ||
405 | if (ctrl & GRETH_RXI) | ||
406 | GRETH_REGSAVE(greth->regs->control, ctrl | GRETH_TXI); | ||
396 | netif_stop_queue(dev); | 407 | netif_stop_queue(dev); |
408 | spin_unlock_irqrestore(&greth->devlock, flags); | ||
397 | return NETDEV_TX_BUSY; | 409 | return NETDEV_TX_BUSY; |
398 | } | 410 | } |
399 | 411 | ||
@@ -406,13 +418,14 @@ greth_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
406 | goto out; | 418 | goto out; |
407 | } | 419 | } |
408 | 420 | ||
421 | bdp = greth->tx_bd_base + greth->tx_next; | ||
409 | dma_addr = greth_read_bd(&bdp->addr); | 422 | dma_addr = greth_read_bd(&bdp->addr); |
410 | 423 | ||
411 | memcpy((unsigned char *) phys_to_virt(dma_addr), skb->data, skb->len); | 424 | memcpy((unsigned char *) phys_to_virt(dma_addr), skb->data, skb->len); |
412 | 425 | ||
413 | dma_sync_single_for_device(greth->dev, dma_addr, skb->len, DMA_TO_DEVICE); | 426 | dma_sync_single_for_device(greth->dev, dma_addr, skb->len, DMA_TO_DEVICE); |
414 | 427 | ||
415 | status = GRETH_BD_EN | (skb->len & GRETH_BD_LEN); | 428 | status = GRETH_BD_EN | GRETH_BD_IE | (skb->len & GRETH_BD_LEN); |
416 | 429 | ||
417 | /* Wrap around descriptor ring */ | 430 | /* Wrap around descriptor ring */ |
418 | if (greth->tx_next == GRETH_TXBD_NUM_MASK) { | 431 | if (greth->tx_next == GRETH_TXBD_NUM_MASK) { |
@@ -422,22 +435,11 @@ greth_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
422 | greth->tx_next = NEXT_TX(greth->tx_next); | 435 | greth->tx_next = NEXT_TX(greth->tx_next); |
423 | greth->tx_free--; | 436 | greth->tx_free--; |
424 | 437 | ||
425 | /* No more descriptors */ | ||
426 | if (unlikely(greth->tx_free == 0)) { | ||
427 | |||
428 | /* Free transmitted descriptors */ | ||
429 | greth_clean_tx(dev); | ||
430 | |||
431 | /* If nothing was cleaned, stop queue & wait for irq */ | ||
432 | if (unlikely(greth->tx_free == 0)) { | ||
433 | status |= GRETH_BD_IE; | ||
434 | netif_stop_queue(dev); | ||
435 | } | ||
436 | } | ||
437 | |||
438 | /* Write descriptor control word and enable transmission */ | 438 | /* Write descriptor control word and enable transmission */ |
439 | greth_write_bd(&bdp->stat, status); | 439 | greth_write_bd(&bdp->stat, status); |
440 | spin_lock_irqsave(&greth->devlock, flags); /*save from poll/irq*/ | ||
440 | greth_enable_tx(greth); | 441 | greth_enable_tx(greth); |
442 | spin_unlock_irqrestore(&greth->devlock, flags); | ||
441 | 443 | ||
442 | out: | 444 | out: |
443 | dev_kfree_skb(skb); | 445 | dev_kfree_skb(skb); |
@@ -450,13 +452,23 @@ greth_start_xmit_gbit(struct sk_buff *skb, struct net_device *dev) | |||
450 | { | 452 | { |
451 | struct greth_private *greth = netdev_priv(dev); | 453 | struct greth_private *greth = netdev_priv(dev); |
452 | struct greth_bd *bdp; | 454 | struct greth_bd *bdp; |
453 | u32 status = 0, dma_addr; | 455 | u32 status = 0, dma_addr, ctrl; |
454 | int curr_tx, nr_frags, i, err = NETDEV_TX_OK; | 456 | int curr_tx, nr_frags, i, err = NETDEV_TX_OK; |
457 | unsigned long flags; | ||
455 | 458 | ||
456 | nr_frags = skb_shinfo(skb)->nr_frags; | 459 | nr_frags = skb_shinfo(skb)->nr_frags; |
457 | 460 | ||
461 | /* Clean TX Ring */ | ||
462 | greth_clean_tx_gbit(dev); | ||
463 | |||
458 | if (greth->tx_free < nr_frags + 1) { | 464 | if (greth->tx_free < nr_frags + 1) { |
465 | spin_lock_irqsave(&greth->devlock, flags);/*save from poll/irq*/ | ||
466 | ctrl = GRETH_REGLOAD(greth->regs->control); | ||
467 | /* Enable TX IRQ only if not already in poll() routine */ | ||
468 | if (ctrl & GRETH_RXI) | ||
469 | GRETH_REGSAVE(greth->regs->control, ctrl | GRETH_TXI); | ||
459 | netif_stop_queue(dev); | 470 | netif_stop_queue(dev); |
471 | spin_unlock_irqrestore(&greth->devlock, flags); | ||
460 | err = NETDEV_TX_BUSY; | 472 | err = NETDEV_TX_BUSY; |
461 | goto out; | 473 | goto out; |
462 | } | 474 | } |
@@ -499,7 +511,7 @@ greth_start_xmit_gbit(struct sk_buff *skb, struct net_device *dev) | |||
499 | greth->tx_skbuff[curr_tx] = NULL; | 511 | greth->tx_skbuff[curr_tx] = NULL; |
500 | bdp = greth->tx_bd_base + curr_tx; | 512 | bdp = greth->tx_bd_base + curr_tx; |
501 | 513 | ||
502 | status = GRETH_TXBD_CSALL; | 514 | status = GRETH_TXBD_CSALL | GRETH_BD_EN; |
503 | status |= frag->size & GRETH_BD_LEN; | 515 | status |= frag->size & GRETH_BD_LEN; |
504 | 516 | ||
505 | /* Wrap around descriptor ring */ | 517 | /* Wrap around descriptor ring */ |
@@ -509,14 +521,8 @@ greth_start_xmit_gbit(struct sk_buff *skb, struct net_device *dev) | |||
509 | /* More fragments left */ | 521 | /* More fragments left */ |
510 | if (i < nr_frags - 1) | 522 | if (i < nr_frags - 1) |
511 | status |= GRETH_TXBD_MORE; | 523 | status |= GRETH_TXBD_MORE; |
512 | 524 | else | |
513 | /* ... last fragment, check if out of descriptors */ | 525 | status |= GRETH_BD_IE; /* enable IRQ on last fragment */ |
514 | else if (greth->tx_free - nr_frags - 1 < (MAX_SKB_FRAGS + 1)) { | ||
515 | |||
516 | /* Enable interrupts and stop queue */ | ||
517 | status |= GRETH_BD_IE; | ||
518 | netif_stop_queue(dev); | ||
519 | } | ||
520 | 526 | ||
521 | greth_write_bd(&bdp->stat, status); | 527 | greth_write_bd(&bdp->stat, status); |
522 | 528 | ||
@@ -536,26 +542,29 @@ greth_start_xmit_gbit(struct sk_buff *skb, struct net_device *dev) | |||
536 | 542 | ||
537 | wmb(); | 543 | wmb(); |
538 | 544 | ||
539 | /* Enable the descriptors that we configured ... */ | 545 | /* Enable the descriptor chain by enabling the first descriptor */ |
540 | for (i = 0; i < nr_frags + 1; i++) { | 546 | bdp = greth->tx_bd_base + greth->tx_next; |
541 | bdp = greth->tx_bd_base + greth->tx_next; | 547 | greth_write_bd(&bdp->stat, greth_read_bd(&bdp->stat) | GRETH_BD_EN); |
542 | greth_write_bd(&bdp->stat, greth_read_bd(&bdp->stat) | GRETH_BD_EN); | 548 | greth->tx_next = curr_tx; |
543 | greth->tx_next = NEXT_TX(greth->tx_next); | 549 | greth->tx_free -= nr_frags + 1; |
544 | greth->tx_free--; | ||
545 | } | ||
546 | 550 | ||
551 | wmb(); | ||
552 | |||
553 | spin_lock_irqsave(&greth->devlock, flags); /*save from poll/irq*/ | ||
547 | greth_enable_tx(greth); | 554 | greth_enable_tx(greth); |
555 | spin_unlock_irqrestore(&greth->devlock, flags); | ||
548 | 556 | ||
549 | return NETDEV_TX_OK; | 557 | return NETDEV_TX_OK; |
550 | 558 | ||
551 | frag_map_error: | 559 | frag_map_error: |
552 | /* Unmap SKB mappings that succeeded */ | 560 | /* Unmap SKB mappings that succeeded and disable descriptor */ |
553 | for (i = 0; greth->tx_next + i != curr_tx; i++) { | 561 | for (i = 0; greth->tx_next + i != curr_tx; i++) { |
554 | bdp = greth->tx_bd_base + greth->tx_next + i; | 562 | bdp = greth->tx_bd_base + greth->tx_next + i; |
555 | dma_unmap_single(greth->dev, | 563 | dma_unmap_single(greth->dev, |
556 | greth_read_bd(&bdp->addr), | 564 | greth_read_bd(&bdp->addr), |
557 | greth_read_bd(&bdp->stat) & GRETH_BD_LEN, | 565 | greth_read_bd(&bdp->stat) & GRETH_BD_LEN, |
558 | DMA_TO_DEVICE); | 566 | DMA_TO_DEVICE); |
567 | greth_write_bd(&bdp->stat, 0); | ||
559 | } | 568 | } |
560 | map_error: | 569 | map_error: |
561 | if (net_ratelimit()) | 570 | if (net_ratelimit()) |
@@ -565,12 +574,11 @@ out: | |||
565 | return err; | 574 | return err; |
566 | } | 575 | } |
567 | 576 | ||
568 | |||
569 | static irqreturn_t greth_interrupt(int irq, void *dev_id) | 577 | static irqreturn_t greth_interrupt(int irq, void *dev_id) |
570 | { | 578 | { |
571 | struct net_device *dev = dev_id; | 579 | struct net_device *dev = dev_id; |
572 | struct greth_private *greth; | 580 | struct greth_private *greth; |
573 | u32 status; | 581 | u32 status, ctrl; |
574 | irqreturn_t retval = IRQ_NONE; | 582 | irqreturn_t retval = IRQ_NONE; |
575 | 583 | ||
576 | greth = netdev_priv(dev); | 584 | greth = netdev_priv(dev); |
@@ -580,13 +588,15 @@ static irqreturn_t greth_interrupt(int irq, void *dev_id) | |||
580 | /* Get the interrupt events that caused us to be here. */ | 588 | /* Get the interrupt events that caused us to be here. */ |
581 | status = GRETH_REGLOAD(greth->regs->status); | 589 | status = GRETH_REGLOAD(greth->regs->status); |
582 | 590 | ||
583 | /* Handle rx and tx interrupts through poll */ | 591 | /* Must see if interrupts are enabled also, INT_TX|INT_RX flags may be |
584 | if (status & (GRETH_INT_RX | GRETH_INT_TX)) { | 592 | * set regardless of whether IRQ is enabled or not. Especially |
585 | 593 | * important when shared IRQ. | |
586 | /* Clear interrupt status */ | 594 | */ |
587 | GRETH_REGORIN(greth->regs->status, | 595 | ctrl = GRETH_REGLOAD(greth->regs->control); |
588 | status & (GRETH_INT_RX | GRETH_INT_TX)); | ||
589 | 596 | ||
597 | /* Handle rx and tx interrupts through poll */ | ||
598 | if (((status & (GRETH_INT_RE | GRETH_INT_RX)) && (ctrl & GRETH_RXI)) || | ||
599 | ((status & (GRETH_INT_TE | GRETH_INT_TX)) && (ctrl & GRETH_TXI))) { | ||
590 | retval = IRQ_HANDLED; | 600 | retval = IRQ_HANDLED; |
591 | 601 | ||
592 | /* Disable interrupts and schedule poll() */ | 602 | /* Disable interrupts and schedule poll() */ |
@@ -610,6 +620,8 @@ static void greth_clean_tx(struct net_device *dev) | |||
610 | 620 | ||
611 | while (1) { | 621 | while (1) { |
612 | bdp = greth->tx_bd_base + greth->tx_last; | 622 | bdp = greth->tx_bd_base + greth->tx_last; |
623 | GRETH_REGSAVE(greth->regs->status, GRETH_INT_TE | GRETH_INT_TX); | ||
624 | mb(); | ||
613 | stat = greth_read_bd(&bdp->stat); | 625 | stat = greth_read_bd(&bdp->stat); |
614 | 626 | ||
615 | if (unlikely(stat & GRETH_BD_EN)) | 627 | if (unlikely(stat & GRETH_BD_EN)) |
@@ -670,7 +682,10 @@ static void greth_clean_tx_gbit(struct net_device *dev) | |||
670 | 682 | ||
671 | /* We only clean fully completed SKBs */ | 683 | /* We only clean fully completed SKBs */ |
672 | bdp_last_frag = greth->tx_bd_base + SKIP_TX(greth->tx_last, nr_frags); | 684 | bdp_last_frag = greth->tx_bd_base + SKIP_TX(greth->tx_last, nr_frags); |
673 | stat = bdp_last_frag->stat; | 685 | |
686 | GRETH_REGSAVE(greth->regs->status, GRETH_INT_TE | GRETH_INT_TX); | ||
687 | mb(); | ||
688 | stat = greth_read_bd(&bdp_last_frag->stat); | ||
674 | 689 | ||
675 | if (stat & GRETH_BD_EN) | 690 | if (stat & GRETH_BD_EN) |
676 | break; | 691 | break; |
@@ -702,21 +717,9 @@ static void greth_clean_tx_gbit(struct net_device *dev) | |||
702 | greth->tx_free += nr_frags+1; | 717 | greth->tx_free += nr_frags+1; |
703 | dev_kfree_skb(skb); | 718 | dev_kfree_skb(skb); |
704 | } | 719 | } |
705 | if (greth->tx_free > (MAX_SKB_FRAGS + 1)) { | ||
706 | netif_wake_queue(dev); | ||
707 | } | ||
708 | } | ||
709 | 720 | ||
710 | static int greth_pending_packets(struct greth_private *greth) | 721 | if (netif_queue_stopped(dev) && (greth->tx_free > (MAX_SKB_FRAGS+1))) |
711 | { | 722 | netif_wake_queue(dev); |
712 | struct greth_bd *bdp; | ||
713 | u32 status; | ||
714 | bdp = greth->rx_bd_base + greth->rx_cur; | ||
715 | status = greth_read_bd(&bdp->stat); | ||
716 | if (status & GRETH_BD_EN) | ||
717 | return 0; | ||
718 | else | ||
719 | return 1; | ||
720 | } | 723 | } |
721 | 724 | ||
722 | static int greth_rx(struct net_device *dev, int limit) | 725 | static int greth_rx(struct net_device *dev, int limit) |
@@ -727,20 +730,24 @@ static int greth_rx(struct net_device *dev, int limit) | |||
727 | int pkt_len; | 730 | int pkt_len; |
728 | int bad, count; | 731 | int bad, count; |
729 | u32 status, dma_addr; | 732 | u32 status, dma_addr; |
733 | unsigned long flags; | ||
730 | 734 | ||
731 | greth = netdev_priv(dev); | 735 | greth = netdev_priv(dev); |
732 | 736 | ||
733 | for (count = 0; count < limit; ++count) { | 737 | for (count = 0; count < limit; ++count) { |
734 | 738 | ||
735 | bdp = greth->rx_bd_base + greth->rx_cur; | 739 | bdp = greth->rx_bd_base + greth->rx_cur; |
740 | GRETH_REGSAVE(greth->regs->status, GRETH_INT_RE | GRETH_INT_RX); | ||
741 | mb(); | ||
736 | status = greth_read_bd(&bdp->stat); | 742 | status = greth_read_bd(&bdp->stat); |
737 | dma_addr = greth_read_bd(&bdp->addr); | ||
738 | bad = 0; | ||
739 | 743 | ||
740 | if (unlikely(status & GRETH_BD_EN)) { | 744 | if (unlikely(status & GRETH_BD_EN)) { |
741 | break; | 745 | break; |
742 | } | 746 | } |
743 | 747 | ||
748 | dma_addr = greth_read_bd(&bdp->addr); | ||
749 | bad = 0; | ||
750 | |||
744 | /* Check status for errors. */ | 751 | /* Check status for errors. */ |
745 | if (unlikely(status & GRETH_RXBD_STATUS)) { | 752 | if (unlikely(status & GRETH_RXBD_STATUS)) { |
746 | if (status & GRETH_RXBD_ERR_FT) { | 753 | if (status & GRETH_RXBD_ERR_FT) { |
@@ -802,7 +809,9 @@ static int greth_rx(struct net_device *dev, int limit) | |||
802 | 809 | ||
803 | dma_sync_single_for_device(greth->dev, dma_addr, MAX_FRAME_SIZE, DMA_FROM_DEVICE); | 810 | dma_sync_single_for_device(greth->dev, dma_addr, MAX_FRAME_SIZE, DMA_FROM_DEVICE); |
804 | 811 | ||
812 | spin_lock_irqsave(&greth->devlock, flags); /* save from XMIT */ | ||
805 | greth_enable_rx(greth); | 813 | greth_enable_rx(greth); |
814 | spin_unlock_irqrestore(&greth->devlock, flags); | ||
806 | 815 | ||
807 | greth->rx_cur = NEXT_RX(greth->rx_cur); | 816 | greth->rx_cur = NEXT_RX(greth->rx_cur); |
808 | } | 817 | } |
@@ -836,6 +845,7 @@ static int greth_rx_gbit(struct net_device *dev, int limit) | |||
836 | int pkt_len; | 845 | int pkt_len; |
837 | int bad, count = 0; | 846 | int bad, count = 0; |
838 | u32 status, dma_addr; | 847 | u32 status, dma_addr; |
848 | unsigned long flags; | ||
839 | 849 | ||
840 | greth = netdev_priv(dev); | 850 | greth = netdev_priv(dev); |
841 | 851 | ||
@@ -843,6 +853,8 @@ static int greth_rx_gbit(struct net_device *dev, int limit) | |||
843 | 853 | ||
844 | bdp = greth->rx_bd_base + greth->rx_cur; | 854 | bdp = greth->rx_bd_base + greth->rx_cur; |
845 | skb = greth->rx_skbuff[greth->rx_cur]; | 855 | skb = greth->rx_skbuff[greth->rx_cur]; |
856 | GRETH_REGSAVE(greth->regs->status, GRETH_INT_RE | GRETH_INT_RX); | ||
857 | mb(); | ||
846 | status = greth_read_bd(&bdp->stat); | 858 | status = greth_read_bd(&bdp->stat); |
847 | bad = 0; | 859 | bad = 0; |
848 | 860 | ||
@@ -865,10 +877,9 @@ static int greth_rx_gbit(struct net_device *dev, int limit) | |||
865 | } | 877 | } |
866 | } | 878 | } |
867 | 879 | ||
868 | /* Allocate new skb to replace current */ | 880 | /* Allocate new skb to replace current, not needed if the |
869 | newskb = netdev_alloc_skb(dev, MAX_FRAME_SIZE + NET_IP_ALIGN); | 881 | * current skb can be reused */ |
870 | 882 | if (!bad && (newskb=netdev_alloc_skb(dev, MAX_FRAME_SIZE + NET_IP_ALIGN))) { | |
871 | if (!bad && newskb) { | ||
872 | skb_reserve(newskb, NET_IP_ALIGN); | 883 | skb_reserve(newskb, NET_IP_ALIGN); |
873 | 884 | ||
874 | dma_addr = dma_map_single(greth->dev, | 885 | dma_addr = dma_map_single(greth->dev, |
@@ -905,11 +916,22 @@ static int greth_rx_gbit(struct net_device *dev, int limit) | |||
905 | if (net_ratelimit()) | 916 | if (net_ratelimit()) |
906 | dev_warn(greth->dev, "Could not create DMA mapping, dropping packet\n"); | 917 | dev_warn(greth->dev, "Could not create DMA mapping, dropping packet\n"); |
907 | dev_kfree_skb(newskb); | 918 | dev_kfree_skb(newskb); |
919 | /* reusing current skb, so it is a drop */ | ||
908 | dev->stats.rx_dropped++; | 920 | dev->stats.rx_dropped++; |
909 | } | 921 | } |
922 | } else if (bad) { | ||
923 | /* Bad Frame transfer, the skb is reused */ | ||
924 | dev->stats.rx_dropped++; | ||
910 | } else { | 925 | } else { |
926 | /* Failed Allocating a new skb. This is rather stupid | ||
927 | * but the current "filled" skb is reused, as if | ||
928 | * transfer failure. One could argue that RX descriptor | ||
929 | * table handling should be divided into cleaning and | ||
930 | * filling as the TX part of the driver | ||
931 | */ | ||
911 | if (net_ratelimit()) | 932 | if (net_ratelimit()) |
912 | dev_warn(greth->dev, "Could not allocate SKB, dropping packet\n"); | 933 | dev_warn(greth->dev, "Could not allocate SKB, dropping packet\n"); |
934 | /* reusing current skb, so it is a drop */ | ||
913 | dev->stats.rx_dropped++; | 935 | dev->stats.rx_dropped++; |
914 | } | 936 | } |
915 | 937 | ||
@@ -920,7 +942,9 @@ static int greth_rx_gbit(struct net_device *dev, int limit) | |||
920 | 942 | ||
921 | wmb(); | 943 | wmb(); |
922 | greth_write_bd(&bdp->stat, status); | 944 | greth_write_bd(&bdp->stat, status); |
945 | spin_lock_irqsave(&greth->devlock, flags); | ||
923 | greth_enable_rx(greth); | 946 | greth_enable_rx(greth); |
947 | spin_unlock_irqrestore(&greth->devlock, flags); | ||
924 | greth->rx_cur = NEXT_RX(greth->rx_cur); | 948 | greth->rx_cur = NEXT_RX(greth->rx_cur); |
925 | } | 949 | } |
926 | 950 | ||
@@ -932,15 +956,18 @@ static int greth_poll(struct napi_struct *napi, int budget) | |||
932 | { | 956 | { |
933 | struct greth_private *greth; | 957 | struct greth_private *greth; |
934 | int work_done = 0; | 958 | int work_done = 0; |
959 | unsigned long flags; | ||
960 | u32 mask, ctrl; | ||
935 | greth = container_of(napi, struct greth_private, napi); | 961 | greth = container_of(napi, struct greth_private, napi); |
936 | 962 | ||
937 | if (greth->gbit_mac) { | 963 | restart_txrx_poll: |
938 | greth_clean_tx_gbit(greth->netdev); | 964 | if (netif_queue_stopped(greth->netdev)) { |
939 | } else { | 965 | if (greth->gbit_mac) |
940 | greth_clean_tx(greth->netdev); | 966 | greth_clean_tx_gbit(greth->netdev); |
967 | else | ||
968 | greth_clean_tx(greth->netdev); | ||
941 | } | 969 | } |
942 | 970 | ||
943 | restart_poll: | ||
944 | if (greth->gbit_mac) { | 971 | if (greth->gbit_mac) { |
945 | work_done += greth_rx_gbit(greth->netdev, budget - work_done); | 972 | work_done += greth_rx_gbit(greth->netdev, budget - work_done); |
946 | } else { | 973 | } else { |
@@ -949,15 +976,29 @@ restart_poll: | |||
949 | 976 | ||
950 | if (work_done < budget) { | 977 | if (work_done < budget) { |
951 | 978 | ||
952 | napi_complete(napi); | 979 | spin_lock_irqsave(&greth->devlock, flags); |
980 | |||
981 | ctrl = GRETH_REGLOAD(greth->regs->control); | ||
982 | if (netif_queue_stopped(greth->netdev)) { | ||
983 | GRETH_REGSAVE(greth->regs->control, | ||
984 | ctrl | GRETH_TXI | GRETH_RXI); | ||
985 | mask = GRETH_INT_RX | GRETH_INT_RE | | ||
986 | GRETH_INT_TX | GRETH_INT_TE; | ||
987 | } else { | ||
988 | GRETH_REGSAVE(greth->regs->control, ctrl | GRETH_RXI); | ||
989 | mask = GRETH_INT_RX | GRETH_INT_RE; | ||
990 | } | ||
953 | 991 | ||
954 | if (greth_pending_packets(greth)) { | 992 | if (GRETH_REGLOAD(greth->regs->status) & mask) { |
955 | napi_reschedule(napi); | 993 | GRETH_REGSAVE(greth->regs->control, ctrl); |
956 | goto restart_poll; | 994 | spin_unlock_irqrestore(&greth->devlock, flags); |
995 | goto restart_txrx_poll; | ||
996 | } else { | ||
997 | __napi_complete(napi); | ||
998 | spin_unlock_irqrestore(&greth->devlock, flags); | ||
957 | } | 999 | } |
958 | } | 1000 | } |
959 | 1001 | ||
960 | greth_enable_irqs(greth); | ||
961 | return work_done; | 1002 | return work_done; |
962 | } | 1003 | } |
963 | 1004 | ||
@@ -1152,11 +1193,11 @@ static const struct ethtool_ops greth_ethtool_ops = { | |||
1152 | }; | 1193 | }; |
1153 | 1194 | ||
1154 | static struct net_device_ops greth_netdev_ops = { | 1195 | static struct net_device_ops greth_netdev_ops = { |
1155 | .ndo_open = greth_open, | 1196 | .ndo_open = greth_open, |
1156 | .ndo_stop = greth_close, | 1197 | .ndo_stop = greth_close, |
1157 | .ndo_start_xmit = greth_start_xmit, | 1198 | .ndo_start_xmit = greth_start_xmit, |
1158 | .ndo_set_mac_address = greth_set_mac_add, | 1199 | .ndo_set_mac_address = greth_set_mac_add, |
1159 | .ndo_validate_addr = eth_validate_addr, | 1200 | .ndo_validate_addr = eth_validate_addr, |
1160 | }; | 1201 | }; |
1161 | 1202 | ||
1162 | static inline int wait_for_mdio(struct greth_private *greth) | 1203 | static inline int wait_for_mdio(struct greth_private *greth) |
@@ -1217,29 +1258,26 @@ static void greth_link_change(struct net_device *dev) | |||
1217 | struct greth_private *greth = netdev_priv(dev); | 1258 | struct greth_private *greth = netdev_priv(dev); |
1218 | struct phy_device *phydev = greth->phy; | 1259 | struct phy_device *phydev = greth->phy; |
1219 | unsigned long flags; | 1260 | unsigned long flags; |
1220 | |||
1221 | int status_change = 0; | 1261 | int status_change = 0; |
1262 | u32 ctrl; | ||
1222 | 1263 | ||
1223 | spin_lock_irqsave(&greth->devlock, flags); | 1264 | spin_lock_irqsave(&greth->devlock, flags); |
1224 | 1265 | ||
1225 | if (phydev->link) { | 1266 | if (phydev->link) { |
1226 | 1267 | ||
1227 | if ((greth->speed != phydev->speed) || (greth->duplex != phydev->duplex)) { | 1268 | if ((greth->speed != phydev->speed) || (greth->duplex != phydev->duplex)) { |
1228 | 1269 | ctrl = GRETH_REGLOAD(greth->regs->control) & | |
1229 | GRETH_REGANDIN(greth->regs->control, | 1270 | ~(GRETH_CTRL_FD | GRETH_CTRL_SP | GRETH_CTRL_GB); |
1230 | ~(GRETH_CTRL_FD | GRETH_CTRL_SP | GRETH_CTRL_GB)); | ||
1231 | 1271 | ||
1232 | if (phydev->duplex) | 1272 | if (phydev->duplex) |
1233 | GRETH_REGORIN(greth->regs->control, GRETH_CTRL_FD); | 1273 | ctrl |= GRETH_CTRL_FD; |
1234 | |||
1235 | if (phydev->speed == SPEED_100) { | ||
1236 | |||
1237 | GRETH_REGORIN(greth->regs->control, GRETH_CTRL_SP); | ||
1238 | } | ||
1239 | 1274 | ||
1275 | if (phydev->speed == SPEED_100) | ||
1276 | ctrl |= GRETH_CTRL_SP; | ||
1240 | else if (phydev->speed == SPEED_1000) | 1277 | else if (phydev->speed == SPEED_1000) |
1241 | GRETH_REGORIN(greth->regs->control, GRETH_CTRL_GB); | 1278 | ctrl |= GRETH_CTRL_GB; |
1242 | 1279 | ||
1280 | GRETH_REGSAVE(greth->regs->control, ctrl); | ||
1243 | greth->speed = phydev->speed; | 1281 | greth->speed = phydev->speed; |
1244 | greth->duplex = phydev->duplex; | 1282 | greth->duplex = phydev->duplex; |
1245 | status_change = 1; | 1283 | status_change = 1; |
@@ -1600,6 +1638,9 @@ static struct of_device_id greth_of_match[] = { | |||
1600 | { | 1638 | { |
1601 | .name = "GAISLER_ETHMAC", | 1639 | .name = "GAISLER_ETHMAC", |
1602 | }, | 1640 | }, |
1641 | { | ||
1642 | .name = "01_01d", | ||
1643 | }, | ||
1603 | {}, | 1644 | {}, |
1604 | }; | 1645 | }; |
1605 | 1646 | ||
diff --git a/drivers/net/greth.h b/drivers/net/greth.h index 03ad903cd676..be0f2062bd14 100644 --- a/drivers/net/greth.h +++ b/drivers/net/greth.h | |||
@@ -23,6 +23,7 @@ | |||
23 | #define GRETH_BD_LEN 0x7FF | 23 | #define GRETH_BD_LEN 0x7FF |
24 | 24 | ||
25 | #define GRETH_TXEN 0x1 | 25 | #define GRETH_TXEN 0x1 |
26 | #define GRETH_INT_TE 0x2 | ||
26 | #define GRETH_INT_TX 0x8 | 27 | #define GRETH_INT_TX 0x8 |
27 | #define GRETH_TXI 0x4 | 28 | #define GRETH_TXI 0x4 |
28 | #define GRETH_TXBD_STATUS 0x0001C000 | 29 | #define GRETH_TXBD_STATUS 0x0001C000 |
@@ -35,6 +36,7 @@ | |||
35 | #define GRETH_TXBD_ERR_UE 0x4000 | 36 | #define GRETH_TXBD_ERR_UE 0x4000 |
36 | #define GRETH_TXBD_ERR_AL 0x8000 | 37 | #define GRETH_TXBD_ERR_AL 0x8000 |
37 | 38 | ||
39 | #define GRETH_INT_RE 0x1 | ||
38 | #define GRETH_INT_RX 0x4 | 40 | #define GRETH_INT_RX 0x4 |
39 | #define GRETH_RXEN 0x2 | 41 | #define GRETH_RXEN 0x2 |
40 | #define GRETH_RXI 0x8 | 42 | #define GRETH_RXI 0x8 |
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index a060610a42db..602078b84892 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c | |||
@@ -6667,8 +6667,6 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, | |||
6667 | struct ixgbe_adapter *adapter, | 6667 | struct ixgbe_adapter *adapter, |
6668 | struct ixgbe_ring *tx_ring) | 6668 | struct ixgbe_ring *tx_ring) |
6669 | { | 6669 | { |
6670 | struct net_device *netdev = tx_ring->netdev; | ||
6671 | struct netdev_queue *txq; | ||
6672 | unsigned int first; | 6670 | unsigned int first; |
6673 | unsigned int tx_flags = 0; | 6671 | unsigned int tx_flags = 0; |
6674 | u8 hdr_len = 0; | 6672 | u8 hdr_len = 0; |
@@ -6765,9 +6763,6 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, | |||
6765 | /* add the ATR filter if ATR is on */ | 6763 | /* add the ATR filter if ATR is on */ |
6766 | if (test_bit(__IXGBE_TX_FDIR_INIT_DONE, &tx_ring->state)) | 6764 | if (test_bit(__IXGBE_TX_FDIR_INIT_DONE, &tx_ring->state)) |
6767 | ixgbe_atr(tx_ring, skb, tx_flags, protocol); | 6765 | ixgbe_atr(tx_ring, skb, tx_flags, protocol); |
6768 | txq = netdev_get_tx_queue(netdev, tx_ring->queue_index); | ||
6769 | txq->tx_bytes += skb->len; | ||
6770 | txq->tx_packets++; | ||
6771 | ixgbe_tx_queue(tx_ring, tx_flags, count, skb->len, hdr_len); | 6766 | ixgbe_tx_queue(tx_ring, tx_flags, count, skb->len, hdr_len); |
6772 | ixgbe_maybe_stop_tx(tx_ring, DESC_NEEDED); | 6767 | ixgbe_maybe_stop_tx(tx_ring, DESC_NEEDED); |
6773 | 6768 | ||
@@ -6925,8 +6920,6 @@ static struct rtnl_link_stats64 *ixgbe_get_stats64(struct net_device *netdev, | |||
6925 | struct ixgbe_adapter *adapter = netdev_priv(netdev); | 6920 | struct ixgbe_adapter *adapter = netdev_priv(netdev); |
6926 | int i; | 6921 | int i; |
6927 | 6922 | ||
6928 | /* accurate rx/tx bytes/packets stats */ | ||
6929 | dev_txq_stats_fold(netdev, stats); | ||
6930 | rcu_read_lock(); | 6923 | rcu_read_lock(); |
6931 | for (i = 0; i < adapter->num_rx_queues; i++) { | 6924 | for (i = 0; i < adapter->num_rx_queues; i++) { |
6932 | struct ixgbe_ring *ring = ACCESS_ONCE(adapter->rx_ring[i]); | 6925 | struct ixgbe_ring *ring = ACCESS_ONCE(adapter->rx_ring[i]); |
@@ -6943,6 +6936,22 @@ static struct rtnl_link_stats64 *ixgbe_get_stats64(struct net_device *netdev, | |||
6943 | stats->rx_bytes += bytes; | 6936 | stats->rx_bytes += bytes; |
6944 | } | 6937 | } |
6945 | } | 6938 | } |
6939 | |||
6940 | for (i = 0; i < adapter->num_tx_queues; i++) { | ||
6941 | struct ixgbe_ring *ring = ACCESS_ONCE(adapter->tx_ring[i]); | ||
6942 | u64 bytes, packets; | ||
6943 | unsigned int start; | ||
6944 | |||
6945 | if (ring) { | ||
6946 | do { | ||
6947 | start = u64_stats_fetch_begin_bh(&ring->syncp); | ||
6948 | packets = ring->stats.packets; | ||
6949 | bytes = ring->stats.bytes; | ||
6950 | } while (u64_stats_fetch_retry_bh(&ring->syncp, start)); | ||
6951 | stats->tx_packets += packets; | ||
6952 | stats->tx_bytes += bytes; | ||
6953 | } | ||
6954 | } | ||
6946 | rcu_read_unlock(); | 6955 | rcu_read_unlock(); |
6947 | /* following stats updated by ixgbe_watchdog_task() */ | 6956 | /* following stats updated by ixgbe_watchdog_task() */ |
6948 | stats->multicast = netdev->stats.multicast; | 6957 | stats->multicast = netdev->stats.multicast; |
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c index 21845affea13..5933621ac3ff 100644 --- a/drivers/net/macvtap.c +++ b/drivers/net/macvtap.c | |||
@@ -585,7 +585,7 @@ err: | |||
585 | rcu_read_lock_bh(); | 585 | rcu_read_lock_bh(); |
586 | vlan = rcu_dereference(q->vlan); | 586 | vlan = rcu_dereference(q->vlan); |
587 | if (vlan) | 587 | if (vlan) |
588 | netdev_get_tx_queue(vlan->dev, 0)->tx_dropped++; | 588 | vlan->dev->stats.tx_dropped++; |
589 | rcu_read_unlock_bh(); | 589 | rcu_read_unlock_bh(); |
590 | 590 | ||
591 | return err; | 591 | return err; |
diff --git a/drivers/net/mlx4/en_netdev.c b/drivers/net/mlx4/en_netdev.c index 6d6806b361e3..897f576b8b17 100644 --- a/drivers/net/mlx4/en_netdev.c +++ b/drivers/net/mlx4/en_netdev.c | |||
@@ -972,7 +972,8 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, | |||
972 | int i; | 972 | int i; |
973 | int err; | 973 | int err; |
974 | 974 | ||
975 | dev = alloc_etherdev_mq(sizeof(struct mlx4_en_priv), prof->tx_ring_num); | 975 | dev = alloc_etherdev_mqs(sizeof(struct mlx4_en_priv), |
976 | prof->tx_ring_num, prof->rx_ring_num); | ||
976 | if (dev == NULL) { | 977 | if (dev == NULL) { |
977 | mlx4_err(mdev, "Net device allocation failed\n"); | 978 | mlx4_err(mdev, "Net device allocation failed\n"); |
978 | return -ENOMEM; | 979 | return -ENOMEM; |
diff --git a/drivers/net/pcmcia/pcnet_cs.c b/drivers/net/pcmcia/pcnet_cs.c index 2c158910f7ea..e953793a33ff 100644 --- a/drivers/net/pcmcia/pcnet_cs.c +++ b/drivers/net/pcmcia/pcnet_cs.c | |||
@@ -1536,6 +1536,7 @@ static struct pcmcia_device_id pcnet_ids[] = { | |||
1536 | PCMCIA_DEVICE_PROD_ID12("CONTEC", "C-NET(PC)C-10L", 0x21cab552, 0xf6f90722), | 1536 | PCMCIA_DEVICE_PROD_ID12("CONTEC", "C-NET(PC)C-10L", 0x21cab552, 0xf6f90722), |
1537 | PCMCIA_DEVICE_PROD_ID12("corega", "FEther PCC-TXF", 0x0a21501a, 0xa51564a2), | 1537 | PCMCIA_DEVICE_PROD_ID12("corega", "FEther PCC-TXF", 0x0a21501a, 0xa51564a2), |
1538 | PCMCIA_DEVICE_PROD_ID12("corega", "Ether CF-TD", 0x0a21501a, 0x6589340a), | 1538 | PCMCIA_DEVICE_PROD_ID12("corega", "Ether CF-TD", 0x0a21501a, 0x6589340a), |
1539 | PCMCIA_DEVICE_PROD_ID12("corega K.K.", "corega Ether CF-TD LAN Card", 0x5261440f, 0x8797663b), | ||
1539 | PCMCIA_DEVICE_PROD_ID12("corega K.K.", "corega EtherII PCC-T", 0x5261440f, 0xfa9d85bd), | 1540 | PCMCIA_DEVICE_PROD_ID12("corega K.K.", "corega EtherII PCC-T", 0x5261440f, 0xfa9d85bd), |
1540 | PCMCIA_DEVICE_PROD_ID12("corega K.K.", "corega EtherII PCC-TD", 0x5261440f, 0xc49bd73d), | 1541 | PCMCIA_DEVICE_PROD_ID12("corega K.K.", "corega EtherII PCC-TD", 0x5261440f, 0xc49bd73d), |
1541 | PCMCIA_DEVICE_PROD_ID12("Corega K.K.", "corega EtherII PCC-TD", 0xd4fdcbd8, 0xc49bd73d), | 1542 | PCMCIA_DEVICE_PROD_ID12("Corega K.K.", "corega EtherII PCC-TD", 0xd4fdcbd8, 0xc49bd73d), |
diff --git a/drivers/net/ppp_async.c b/drivers/net/ppp_async.c index 78d70a6481bf..a1b82c9c67d2 100644 --- a/drivers/net/ppp_async.c +++ b/drivers/net/ppp_async.c | |||
@@ -32,6 +32,7 @@ | |||
32 | #include <linux/init.h> | 32 | #include <linux/init.h> |
33 | #include <linux/jiffies.h> | 33 | #include <linux/jiffies.h> |
34 | #include <linux/slab.h> | 34 | #include <linux/slab.h> |
35 | #include <asm/unaligned.h> | ||
35 | #include <asm/uaccess.h> | 36 | #include <asm/uaccess.h> |
36 | #include <asm/string.h> | 37 | #include <asm/string.h> |
37 | 38 | ||
@@ -542,7 +543,7 @@ ppp_async_encode(struct asyncppp *ap) | |||
542 | data = ap->tpkt->data; | 543 | data = ap->tpkt->data; |
543 | count = ap->tpkt->len; | 544 | count = ap->tpkt->len; |
544 | fcs = ap->tfcs; | 545 | fcs = ap->tfcs; |
545 | proto = (data[0] << 8) + data[1]; | 546 | proto = get_unaligned_be16(data); |
546 | 547 | ||
547 | /* | 548 | /* |
548 | * LCP packets with code values between 1 (configure-reqest) | 549 | * LCP packets with code values between 1 (configure-reqest) |
@@ -963,7 +964,7 @@ static void async_lcp_peek(struct asyncppp *ap, unsigned char *data, | |||
963 | code = data[0]; | 964 | code = data[0]; |
964 | if (code != CONFACK && code != CONFREQ) | 965 | if (code != CONFACK && code != CONFREQ) |
965 | return; | 966 | return; |
966 | dlen = (data[2] << 8) + data[3]; | 967 | dlen = get_unaligned_be16(data + 2); |
967 | if (len < dlen) | 968 | if (len < dlen) |
968 | return; /* packet got truncated or length is bogus */ | 969 | return; /* packet got truncated or length is bogus */ |
969 | 970 | ||
@@ -997,15 +998,14 @@ static void async_lcp_peek(struct asyncppp *ap, unsigned char *data, | |||
997 | while (dlen >= 2 && dlen >= data[1] && data[1] >= 2) { | 998 | while (dlen >= 2 && dlen >= data[1] && data[1] >= 2) { |
998 | switch (data[0]) { | 999 | switch (data[0]) { |
999 | case LCP_MRU: | 1000 | case LCP_MRU: |
1000 | val = (data[2] << 8) + data[3]; | 1001 | val = get_unaligned_be16(data + 2); |
1001 | if (inbound) | 1002 | if (inbound) |
1002 | ap->mru = val; | 1003 | ap->mru = val; |
1003 | else | 1004 | else |
1004 | ap->chan.mtu = val; | 1005 | ap->chan.mtu = val; |
1005 | break; | 1006 | break; |
1006 | case LCP_ASYNCMAP: | 1007 | case LCP_ASYNCMAP: |
1007 | val = (data[2] << 24) + (data[3] << 16) | 1008 | val = get_unaligned_be32(data + 2); |
1008 | + (data[4] << 8) + data[5]; | ||
1009 | if (inbound) | 1009 | if (inbound) |
1010 | ap->raccm = val; | 1010 | ap->raccm = val; |
1011 | else | 1011 | else |
diff --git a/drivers/net/ppp_deflate.c b/drivers/net/ppp_deflate.c index 695bc83e0cfd..43583309a65d 100644 --- a/drivers/net/ppp_deflate.c +++ b/drivers/net/ppp_deflate.c | |||
@@ -41,6 +41,7 @@ | |||
41 | #include <linux/ppp-comp.h> | 41 | #include <linux/ppp-comp.h> |
42 | 42 | ||
43 | #include <linux/zlib.h> | 43 | #include <linux/zlib.h> |
44 | #include <asm/unaligned.h> | ||
44 | 45 | ||
45 | /* | 46 | /* |
46 | * State for a Deflate (de)compressor. | 47 | * State for a Deflate (de)compressor. |
@@ -232,11 +233,9 @@ static int z_compress(void *arg, unsigned char *rptr, unsigned char *obuf, | |||
232 | */ | 233 | */ |
233 | wptr[0] = PPP_ADDRESS(rptr); | 234 | wptr[0] = PPP_ADDRESS(rptr); |
234 | wptr[1] = PPP_CONTROL(rptr); | 235 | wptr[1] = PPP_CONTROL(rptr); |
235 | wptr[2] = PPP_COMP >> 8; | 236 | put_unaligned_be16(PPP_COMP, wptr + 2); |
236 | wptr[3] = PPP_COMP; | ||
237 | wptr += PPP_HDRLEN; | 237 | wptr += PPP_HDRLEN; |
238 | wptr[0] = state->seqno >> 8; | 238 | put_unaligned_be16(state->seqno, wptr); |
239 | wptr[1] = state->seqno; | ||
240 | wptr += DEFLATE_OVHD; | 239 | wptr += DEFLATE_OVHD; |
241 | olen = PPP_HDRLEN + DEFLATE_OVHD; | 240 | olen = PPP_HDRLEN + DEFLATE_OVHD; |
242 | state->strm.next_out = wptr; | 241 | state->strm.next_out = wptr; |
@@ -451,7 +450,7 @@ static int z_decompress(void *arg, unsigned char *ibuf, int isize, | |||
451 | } | 450 | } |
452 | 451 | ||
453 | /* Check the sequence number. */ | 452 | /* Check the sequence number. */ |
454 | seq = (ibuf[PPP_HDRLEN] << 8) + ibuf[PPP_HDRLEN+1]; | 453 | seq = get_unaligned_be16(ibuf + PPP_HDRLEN); |
455 | if (seq != (state->seqno & 0xffff)) { | 454 | if (seq != (state->seqno & 0xffff)) { |
456 | if (state->debug) | 455 | if (state->debug) |
457 | printk(KERN_DEBUG "z_decompress%d: bad seq # %d, expected %d\n", | 456 | printk(KERN_DEBUG "z_decompress%d: bad seq # %d, expected %d\n", |
diff --git a/drivers/net/ppp_generic.c b/drivers/net/ppp_generic.c index 6456484c0299..c7a6c4466978 100644 --- a/drivers/net/ppp_generic.c +++ b/drivers/net/ppp_generic.c | |||
@@ -46,6 +46,7 @@ | |||
46 | #include <linux/device.h> | 46 | #include <linux/device.h> |
47 | #include <linux/mutex.h> | 47 | #include <linux/mutex.h> |
48 | #include <linux/slab.h> | 48 | #include <linux/slab.h> |
49 | #include <asm/unaligned.h> | ||
49 | #include <net/slhc_vj.h> | 50 | #include <net/slhc_vj.h> |
50 | #include <asm/atomic.h> | 51 | #include <asm/atomic.h> |
51 | 52 | ||
@@ -210,7 +211,7 @@ struct ppp_net { | |||
210 | }; | 211 | }; |
211 | 212 | ||
212 | /* Get the PPP protocol number from a skb */ | 213 | /* Get the PPP protocol number from a skb */ |
213 | #define PPP_PROTO(skb) (((skb)->data[0] << 8) + (skb)->data[1]) | 214 | #define PPP_PROTO(skb) get_unaligned_be16((skb)->data) |
214 | 215 | ||
215 | /* We limit the length of ppp->file.rq to this (arbitrary) value */ | 216 | /* We limit the length of ppp->file.rq to this (arbitrary) value */ |
216 | #define PPP_MAX_RQLEN 32 | 217 | #define PPP_MAX_RQLEN 32 |
@@ -964,8 +965,7 @@ ppp_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
964 | 965 | ||
965 | pp = skb_push(skb, 2); | 966 | pp = skb_push(skb, 2); |
966 | proto = npindex_to_proto[npi]; | 967 | proto = npindex_to_proto[npi]; |
967 | pp[0] = proto >> 8; | 968 | put_unaligned_be16(proto, pp); |
968 | pp[1] = proto; | ||
969 | 969 | ||
970 | netif_stop_queue(dev); | 970 | netif_stop_queue(dev); |
971 | skb_queue_tail(&ppp->file.xq, skb); | 971 | skb_queue_tail(&ppp->file.xq, skb); |
@@ -1473,8 +1473,7 @@ static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb) | |||
1473 | q = skb_put(frag, flen + hdrlen); | 1473 | q = skb_put(frag, flen + hdrlen); |
1474 | 1474 | ||
1475 | /* make the MP header */ | 1475 | /* make the MP header */ |
1476 | q[0] = PPP_MP >> 8; | 1476 | put_unaligned_be16(PPP_MP, q); |
1477 | q[1] = PPP_MP; | ||
1478 | if (ppp->flags & SC_MP_XSHORTSEQ) { | 1477 | if (ppp->flags & SC_MP_XSHORTSEQ) { |
1479 | q[2] = bits + ((ppp->nxseq >> 8) & 0xf); | 1478 | q[2] = bits + ((ppp->nxseq >> 8) & 0xf); |
1480 | q[3] = ppp->nxseq; | 1479 | q[3] = ppp->nxseq; |
diff --git a/drivers/net/ppp_mppe.c b/drivers/net/ppp_mppe.c index 6d1a1b80cc3e..9a1849a83e2a 100644 --- a/drivers/net/ppp_mppe.c +++ b/drivers/net/ppp_mppe.c | |||
@@ -55,6 +55,7 @@ | |||
55 | #include <linux/ppp_defs.h> | 55 | #include <linux/ppp_defs.h> |
56 | #include <linux/ppp-comp.h> | 56 | #include <linux/ppp-comp.h> |
57 | #include <linux/scatterlist.h> | 57 | #include <linux/scatterlist.h> |
58 | #include <asm/unaligned.h> | ||
58 | 59 | ||
59 | #include "ppp_mppe.h" | 60 | #include "ppp_mppe.h" |
60 | 61 | ||
@@ -395,16 +396,14 @@ mppe_compress(void *arg, unsigned char *ibuf, unsigned char *obuf, | |||
395 | */ | 396 | */ |
396 | obuf[0] = PPP_ADDRESS(ibuf); | 397 | obuf[0] = PPP_ADDRESS(ibuf); |
397 | obuf[1] = PPP_CONTROL(ibuf); | 398 | obuf[1] = PPP_CONTROL(ibuf); |
398 | obuf[2] = PPP_COMP >> 8; /* isize + MPPE_OVHD + 1 */ | 399 | put_unaligned_be16(PPP_COMP, obuf + 2); |
399 | obuf[3] = PPP_COMP; /* isize + MPPE_OVHD + 2 */ | ||
400 | obuf += PPP_HDRLEN; | 400 | obuf += PPP_HDRLEN; |
401 | 401 | ||
402 | state->ccount = (state->ccount + 1) % MPPE_CCOUNT_SPACE; | 402 | state->ccount = (state->ccount + 1) % MPPE_CCOUNT_SPACE; |
403 | if (state->debug >= 7) | 403 | if (state->debug >= 7) |
404 | printk(KERN_DEBUG "mppe_compress[%d]: ccount %d\n", state->unit, | 404 | printk(KERN_DEBUG "mppe_compress[%d]: ccount %d\n", state->unit, |
405 | state->ccount); | 405 | state->ccount); |
406 | obuf[0] = state->ccount >> 8; | 406 | put_unaligned_be16(state->ccount, obuf); |
407 | obuf[1] = state->ccount & 0xff; | ||
408 | 407 | ||
409 | if (!state->stateful || /* stateless mode */ | 408 | if (!state->stateful || /* stateless mode */ |
410 | ((state->ccount & 0xff) == 0xff) || /* "flag" packet */ | 409 | ((state->ccount & 0xff) == 0xff) || /* "flag" packet */ |
diff --git a/drivers/net/ppp_synctty.c b/drivers/net/ppp_synctty.c index 4c95ec3fb8d4..4e6b72f57de8 100644 --- a/drivers/net/ppp_synctty.c +++ b/drivers/net/ppp_synctty.c | |||
@@ -45,6 +45,7 @@ | |||
45 | #include <linux/completion.h> | 45 | #include <linux/completion.h> |
46 | #include <linux/init.h> | 46 | #include <linux/init.h> |
47 | #include <linux/slab.h> | 47 | #include <linux/slab.h> |
48 | #include <asm/unaligned.h> | ||
48 | #include <asm/uaccess.h> | 49 | #include <asm/uaccess.h> |
49 | 50 | ||
50 | #define PPP_VERSION "2.4.2" | 51 | #define PPP_VERSION "2.4.2" |
@@ -563,7 +564,7 @@ ppp_sync_txmunge(struct syncppp *ap, struct sk_buff *skb) | |||
563 | int islcp; | 564 | int islcp; |
564 | 565 | ||
565 | data = skb->data; | 566 | data = skb->data; |
566 | proto = (data[0] << 8) + data[1]; | 567 | proto = get_unaligned_be16(data); |
567 | 568 | ||
568 | /* LCP packets with codes between 1 (configure-request) | 569 | /* LCP packets with codes between 1 (configure-request) |
569 | * and 7 (code-reject) must be sent as though no options | 570 | * and 7 (code-reject) must be sent as though no options |
diff --git a/drivers/net/qlcnic/qlcnic.h b/drivers/net/qlcnic/qlcnic.h index 9c2a02d204dc..44e316fd67b8 100644 --- a/drivers/net/qlcnic/qlcnic.h +++ b/drivers/net/qlcnic/qlcnic.h | |||
@@ -34,8 +34,8 @@ | |||
34 | 34 | ||
35 | #define _QLCNIC_LINUX_MAJOR 5 | 35 | #define _QLCNIC_LINUX_MAJOR 5 |
36 | #define _QLCNIC_LINUX_MINOR 0 | 36 | #define _QLCNIC_LINUX_MINOR 0 |
37 | #define _QLCNIC_LINUX_SUBVERSION 14 | 37 | #define _QLCNIC_LINUX_SUBVERSION 15 |
38 | #define QLCNIC_LINUX_VERSIONID "5.0.14" | 38 | #define QLCNIC_LINUX_VERSIONID "5.0.15" |
39 | #define QLCNIC_DRV_IDC_VER 0x01 | 39 | #define QLCNIC_DRV_IDC_VER 0x01 |
40 | #define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\ | 40 | #define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\ |
41 | (_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION)) | 41 | (_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION)) |
@@ -289,6 +289,26 @@ struct uni_data_desc{ | |||
289 | u32 reserved[5]; | 289 | u32 reserved[5]; |
290 | }; | 290 | }; |
291 | 291 | ||
292 | /* Flash Defines and Structures */ | ||
293 | #define QLCNIC_FLT_LOCATION 0x3F1000 | ||
294 | #define QLCNIC_FW_IMAGE_REGION 0x74 | ||
295 | struct qlcnic_flt_header { | ||
296 | u16 version; | ||
297 | u16 len; | ||
298 | u16 checksum; | ||
299 | u16 reserved; | ||
300 | }; | ||
301 | |||
302 | struct qlcnic_flt_entry { | ||
303 | u8 region; | ||
304 | u8 reserved0; | ||
305 | u8 attrib; | ||
306 | u8 reserved1; | ||
307 | u32 size; | ||
308 | u32 start_addr; | ||
309 | u32 end_add; | ||
310 | }; | ||
311 | |||
292 | /* Magic number to let user know flash is programmed */ | 312 | /* Magic number to let user know flash is programmed */ |
293 | #define QLCNIC_BDINFO_MAGIC 0x12345678 | 313 | #define QLCNIC_BDINFO_MAGIC 0x12345678 |
294 | 314 | ||
diff --git a/drivers/net/qlcnic/qlcnic_ethtool.c b/drivers/net/qlcnic/qlcnic_ethtool.c index 1e7af709d395..4c14510e2a87 100644 --- a/drivers/net/qlcnic/qlcnic_ethtool.c +++ b/drivers/net/qlcnic/qlcnic_ethtool.c | |||
@@ -672,7 +672,7 @@ qlcnic_diag_test(struct net_device *dev, struct ethtool_test *eth_test, | |||
672 | if (data[1]) | 672 | if (data[1]) |
673 | eth_test->flags |= ETH_TEST_FL_FAILED; | 673 | eth_test->flags |= ETH_TEST_FL_FAILED; |
674 | 674 | ||
675 | if (eth_test->flags == ETH_TEST_FL_OFFLINE) { | 675 | if (eth_test->flags & ETH_TEST_FL_OFFLINE) { |
676 | data[2] = qlcnic_irq_test(dev); | 676 | data[2] = qlcnic_irq_test(dev); |
677 | if (data[2]) | 677 | if (data[2]) |
678 | eth_test->flags |= ETH_TEST_FL_FAILED; | 678 | eth_test->flags |= ETH_TEST_FL_FAILED; |
diff --git a/drivers/net/qlcnic/qlcnic_init.c b/drivers/net/qlcnic/qlcnic_init.c index 9b9c7c39d3ee..a7f1d5b7e811 100644 --- a/drivers/net/qlcnic/qlcnic_init.c +++ b/drivers/net/qlcnic/qlcnic_init.c | |||
@@ -627,12 +627,73 @@ qlcnic_setup_idc_param(struct qlcnic_adapter *adapter) { | |||
627 | return 0; | 627 | return 0; |
628 | } | 628 | } |
629 | 629 | ||
630 | static int qlcnic_get_flt_entry(struct qlcnic_adapter *adapter, u8 region, | ||
631 | struct qlcnic_flt_entry *region_entry) | ||
632 | { | ||
633 | struct qlcnic_flt_header flt_hdr; | ||
634 | struct qlcnic_flt_entry *flt_entry; | ||
635 | int i = 0, ret; | ||
636 | u32 entry_size; | ||
637 | |||
638 | memset(region_entry, 0, sizeof(struct qlcnic_flt_entry)); | ||
639 | ret = qlcnic_rom_fast_read_words(adapter, QLCNIC_FLT_LOCATION, | ||
640 | (u8 *)&flt_hdr, | ||
641 | sizeof(struct qlcnic_flt_header)); | ||
642 | if (ret) { | ||
643 | dev_warn(&adapter->pdev->dev, | ||
644 | "error reading flash layout header\n"); | ||
645 | return -EIO; | ||
646 | } | ||
647 | |||
648 | entry_size = flt_hdr.len - sizeof(struct qlcnic_flt_header); | ||
649 | flt_entry = (struct qlcnic_flt_entry *)vzalloc(entry_size); | ||
650 | if (flt_entry == NULL) { | ||
651 | dev_warn(&adapter->pdev->dev, "error allocating memory\n"); | ||
652 | return -EIO; | ||
653 | } | ||
654 | |||
655 | ret = qlcnic_rom_fast_read_words(adapter, QLCNIC_FLT_LOCATION + | ||
656 | sizeof(struct qlcnic_flt_header), | ||
657 | (u8 *)flt_entry, entry_size); | ||
658 | if (ret) { | ||
659 | dev_warn(&adapter->pdev->dev, | ||
660 | "error reading flash layout entries\n"); | ||
661 | goto err_out; | ||
662 | } | ||
663 | |||
664 | while (i < (entry_size/sizeof(struct qlcnic_flt_entry))) { | ||
665 | if (flt_entry[i].region == region) | ||
666 | break; | ||
667 | i++; | ||
668 | } | ||
669 | if (i >= (entry_size/sizeof(struct qlcnic_flt_entry))) { | ||
670 | dev_warn(&adapter->pdev->dev, | ||
671 | "region=%x not found in %d regions\n", region, i); | ||
672 | ret = -EIO; | ||
673 | goto err_out; | ||
674 | } | ||
675 | memcpy(region_entry, &flt_entry[i], sizeof(struct qlcnic_flt_entry)); | ||
676 | |||
677 | err_out: | ||
678 | vfree(flt_entry); | ||
679 | return ret; | ||
680 | } | ||
681 | |||
630 | int | 682 | int |
631 | qlcnic_check_flash_fw_ver(struct qlcnic_adapter *adapter) | 683 | qlcnic_check_flash_fw_ver(struct qlcnic_adapter *adapter) |
632 | { | 684 | { |
685 | struct qlcnic_flt_entry fw_entry; | ||
633 | u32 ver = -1, min_ver; | 686 | u32 ver = -1, min_ver; |
687 | int ret; | ||
634 | 688 | ||
635 | qlcnic_rom_fast_read(adapter, QLCNIC_FW_VERSION_OFFSET, (int *)&ver); | 689 | ret = qlcnic_get_flt_entry(adapter, QLCNIC_FW_IMAGE_REGION, &fw_entry); |
690 | if (!ret) | ||
691 | /* 0-4:-signature, 4-8:-fw version */ | ||
692 | qlcnic_rom_fast_read(adapter, fw_entry.start_addr + 4, | ||
693 | (int *)&ver); | ||
694 | else | ||
695 | qlcnic_rom_fast_read(adapter, QLCNIC_FW_VERSION_OFFSET, | ||
696 | (int *)&ver); | ||
636 | 697 | ||
637 | ver = QLCNIC_DECODE_VERSION(ver); | 698 | ver = QLCNIC_DECODE_VERSION(ver); |
638 | min_ver = QLCNIC_MIN_FW_VERSION; | 699 | min_ver = QLCNIC_MIN_FW_VERSION; |
diff --git a/drivers/net/qlcnic/qlcnic_main.c b/drivers/net/qlcnic/qlcnic_main.c index 11e3a46c0911..37c04b4fade3 100644 --- a/drivers/net/qlcnic/qlcnic_main.c +++ b/drivers/net/qlcnic/qlcnic_main.c | |||
@@ -31,15 +31,15 @@ static const char qlcnic_driver_string[] = "QLogic 1/10 GbE " | |||
31 | 31 | ||
32 | static struct workqueue_struct *qlcnic_wq; | 32 | static struct workqueue_struct *qlcnic_wq; |
33 | static int qlcnic_mac_learn; | 33 | static int qlcnic_mac_learn; |
34 | module_param(qlcnic_mac_learn, int, 0644); | 34 | module_param(qlcnic_mac_learn, int, 0444); |
35 | MODULE_PARM_DESC(qlcnic_mac_learn, "Mac Filter (0=disabled, 1=enabled)"); | 35 | MODULE_PARM_DESC(qlcnic_mac_learn, "Mac Filter (0=disabled, 1=enabled)"); |
36 | 36 | ||
37 | static int use_msi = 1; | 37 | static int use_msi = 1; |
38 | module_param(use_msi, int, 0644); | 38 | module_param(use_msi, int, 0444); |
39 | MODULE_PARM_DESC(use_msi, "MSI interrupt (0=disabled, 1=enabled"); | 39 | MODULE_PARM_DESC(use_msi, "MSI interrupt (0=disabled, 1=enabled"); |
40 | 40 | ||
41 | static int use_msi_x = 1; | 41 | static int use_msi_x = 1; |
42 | module_param(use_msi_x, int, 0644); | 42 | module_param(use_msi_x, int, 0444); |
43 | MODULE_PARM_DESC(use_msi_x, "MSI-X interrupt (0=disabled, 1=enabled"); | 43 | MODULE_PARM_DESC(use_msi_x, "MSI-X interrupt (0=disabled, 1=enabled"); |
44 | 44 | ||
45 | static int auto_fw_reset = AUTO_FW_RESET_ENABLED; | 45 | static int auto_fw_reset = AUTO_FW_RESET_ENABLED; |
@@ -47,11 +47,11 @@ module_param(auto_fw_reset, int, 0644); | |||
47 | MODULE_PARM_DESC(auto_fw_reset, "Auto firmware reset (0=disabled, 1=enabled"); | 47 | MODULE_PARM_DESC(auto_fw_reset, "Auto firmware reset (0=disabled, 1=enabled"); |
48 | 48 | ||
49 | static int load_fw_file; | 49 | static int load_fw_file; |
50 | module_param(load_fw_file, int, 0644); | 50 | module_param(load_fw_file, int, 0444); |
51 | MODULE_PARM_DESC(load_fw_file, "Load firmware from (0=flash, 1=file"); | 51 | MODULE_PARM_DESC(load_fw_file, "Load firmware from (0=flash, 1=file"); |
52 | 52 | ||
53 | static int qlcnic_config_npars; | 53 | static int qlcnic_config_npars; |
54 | module_param(qlcnic_config_npars, int, 0644); | 54 | module_param(qlcnic_config_npars, int, 0444); |
55 | MODULE_PARM_DESC(qlcnic_config_npars, "Configure NPARs (0=disabled, 1=enabled"); | 55 | MODULE_PARM_DESC(qlcnic_config_npars, "Configure NPARs (0=disabled, 1=enabled"); |
56 | 56 | ||
57 | static int __devinit qlcnic_probe(struct pci_dev *pdev, | 57 | static int __devinit qlcnic_probe(struct pci_dev *pdev, |
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c index dd758cdb55c4..bde7d61f1930 100644 --- a/drivers/net/r8169.c +++ b/drivers/net/r8169.c | |||
@@ -554,6 +554,8 @@ struct rtl8169_private { | |||
554 | struct mii_if_info mii; | 554 | struct mii_if_info mii; |
555 | struct rtl8169_counters counters; | 555 | struct rtl8169_counters counters; |
556 | u32 saved_wolopts; | 556 | u32 saved_wolopts; |
557 | |||
558 | const struct firmware *fw; | ||
557 | }; | 559 | }; |
558 | 560 | ||
559 | MODULE_AUTHOR("Realtek and the Linux r8169 crew <netdev@vger.kernel.org>"); | 561 | MODULE_AUTHOR("Realtek and the Linux r8169 crew <netdev@vger.kernel.org>"); |
@@ -1632,42 +1634,163 @@ rtl_phy_write_fw(struct rtl8169_private *tp, const struct firmware *fw) | |||
1632 | { | 1634 | { |
1633 | __le32 *phytable = (__le32 *)fw->data; | 1635 | __le32 *phytable = (__le32 *)fw->data; |
1634 | struct net_device *dev = tp->dev; | 1636 | struct net_device *dev = tp->dev; |
1635 | size_t i; | 1637 | size_t index, fw_size = fw->size / sizeof(*phytable); |
1638 | u32 predata, count; | ||
1636 | 1639 | ||
1637 | if (fw->size % sizeof(*phytable)) { | 1640 | if (fw->size % sizeof(*phytable)) { |
1638 | netif_err(tp, probe, dev, "odd sized firmware %zd\n", fw->size); | 1641 | netif_err(tp, probe, dev, "odd sized firmware %zd\n", fw->size); |
1639 | return; | 1642 | return; |
1640 | } | 1643 | } |
1641 | 1644 | ||
1642 | for (i = 0; i < fw->size / sizeof(*phytable); i++) { | 1645 | for (index = 0; index < fw_size; index++) { |
1643 | u32 action = le32_to_cpu(phytable[i]); | 1646 | u32 action = le32_to_cpu(phytable[index]); |
1647 | u32 regno = (action & 0x0fff0000) >> 16; | ||
1644 | 1648 | ||
1645 | if (!action) | 1649 | switch(action & 0xf0000000) { |
1650 | case PHY_READ: | ||
1651 | case PHY_DATA_OR: | ||
1652 | case PHY_DATA_AND: | ||
1653 | case PHY_READ_EFUSE: | ||
1654 | case PHY_CLEAR_READCOUNT: | ||
1655 | case PHY_WRITE: | ||
1656 | case PHY_WRITE_PREVIOUS: | ||
1657 | case PHY_DELAY_MS: | ||
1658 | break; | ||
1659 | |||
1660 | case PHY_BJMPN: | ||
1661 | if (regno > index) { | ||
1662 | netif_err(tp, probe, tp->dev, | ||
1663 | "Out of range of firmware\n"); | ||
1664 | return; | ||
1665 | } | ||
1666 | break; | ||
1667 | case PHY_READCOUNT_EQ_SKIP: | ||
1668 | if (index + 2 >= fw_size) { | ||
1669 | netif_err(tp, probe, tp->dev, | ||
1670 | "Out of range of firmware\n"); | ||
1671 | return; | ||
1672 | } | ||
1673 | break; | ||
1674 | case PHY_COMP_EQ_SKIPN: | ||
1675 | case PHY_COMP_NEQ_SKIPN: | ||
1676 | case PHY_SKIPN: | ||
1677 | if (index + 1 + regno >= fw_size) { | ||
1678 | netif_err(tp, probe, tp->dev, | ||
1679 | "Out of range of firmware\n"); | ||
1680 | return; | ||
1681 | } | ||
1646 | break; | 1682 | break; |
1647 | 1683 | ||
1648 | if ((action & 0xf0000000) != PHY_WRITE) { | 1684 | case PHY_READ_MAC_BYTE: |
1649 | netif_err(tp, probe, dev, | 1685 | case PHY_WRITE_MAC_BYTE: |
1650 | "unknown action 0x%08x\n", action); | 1686 | case PHY_WRITE_ERI_WORD: |
1687 | default: | ||
1688 | netif_err(tp, probe, tp->dev, | ||
1689 | "Invalid action 0x%08x\n", action); | ||
1651 | return; | 1690 | return; |
1652 | } | 1691 | } |
1653 | } | 1692 | } |
1654 | 1693 | ||
1655 | while (i-- != 0) { | 1694 | predata = 0; |
1656 | u32 action = le32_to_cpu(*phytable); | 1695 | count = 0; |
1696 | |||
1697 | for (index = 0; index < fw_size; ) { | ||
1698 | u32 action = le32_to_cpu(phytable[index]); | ||
1657 | u32 data = action & 0x0000ffff; | 1699 | u32 data = action & 0x0000ffff; |
1658 | u32 reg = (action & 0x0fff0000) >> 16; | 1700 | u32 regno = (action & 0x0fff0000) >> 16; |
1701 | |||
1702 | if (!action) | ||
1703 | break; | ||
1659 | 1704 | ||
1660 | switch(action & 0xf0000000) { | 1705 | switch(action & 0xf0000000) { |
1706 | case PHY_READ: | ||
1707 | predata = rtl_readphy(tp, regno); | ||
1708 | count++; | ||
1709 | index++; | ||
1710 | break; | ||
1711 | case PHY_DATA_OR: | ||
1712 | predata |= data; | ||
1713 | index++; | ||
1714 | break; | ||
1715 | case PHY_DATA_AND: | ||
1716 | predata &= data; | ||
1717 | index++; | ||
1718 | break; | ||
1719 | case PHY_BJMPN: | ||
1720 | index -= regno; | ||
1721 | break; | ||
1722 | case PHY_READ_EFUSE: | ||
1723 | predata = rtl8168d_efuse_read(tp->mmio_addr, regno); | ||
1724 | index++; | ||
1725 | break; | ||
1726 | case PHY_CLEAR_READCOUNT: | ||
1727 | count = 0; | ||
1728 | index++; | ||
1729 | break; | ||
1661 | case PHY_WRITE: | 1730 | case PHY_WRITE: |
1662 | rtl_writephy(tp, reg, data); | 1731 | rtl_writephy(tp, regno, data); |
1663 | phytable++; | 1732 | index++; |
1664 | break; | 1733 | break; |
1734 | case PHY_READCOUNT_EQ_SKIP: | ||
1735 | if (count == data) | ||
1736 | index += 2; | ||
1737 | else | ||
1738 | index += 1; | ||
1739 | break; | ||
1740 | case PHY_COMP_EQ_SKIPN: | ||
1741 | if (predata == data) | ||
1742 | index += regno; | ||
1743 | index++; | ||
1744 | break; | ||
1745 | case PHY_COMP_NEQ_SKIPN: | ||
1746 | if (predata != data) | ||
1747 | index += regno; | ||
1748 | index++; | ||
1749 | break; | ||
1750 | case PHY_WRITE_PREVIOUS: | ||
1751 | rtl_writephy(tp, regno, predata); | ||
1752 | index++; | ||
1753 | break; | ||
1754 | case PHY_SKIPN: | ||
1755 | index += regno + 1; | ||
1756 | break; | ||
1757 | case PHY_DELAY_MS: | ||
1758 | mdelay(data); | ||
1759 | index++; | ||
1760 | break; | ||
1761 | |||
1762 | case PHY_READ_MAC_BYTE: | ||
1763 | case PHY_WRITE_MAC_BYTE: | ||
1764 | case PHY_WRITE_ERI_WORD: | ||
1665 | default: | 1765 | default: |
1666 | BUG(); | 1766 | BUG(); |
1667 | } | 1767 | } |
1668 | } | 1768 | } |
1669 | } | 1769 | } |
1670 | 1770 | ||
1771 | static void rtl_release_firmware(struct rtl8169_private *tp) | ||
1772 | { | ||
1773 | release_firmware(tp->fw); | ||
1774 | tp->fw = NULL; | ||
1775 | } | ||
1776 | |||
1777 | static int rtl_apply_firmware(struct rtl8169_private *tp, const char *fw_name) | ||
1778 | { | ||
1779 | const struct firmware **fw = &tp->fw; | ||
1780 | int rc = !*fw; | ||
1781 | |||
1782 | if (rc) { | ||
1783 | rc = request_firmware(fw, fw_name, &tp->pci_dev->dev); | ||
1784 | if (rc < 0) | ||
1785 | goto out; | ||
1786 | } | ||
1787 | |||
1788 | /* TODO: release firmware once rtl_phy_write_fw signals failures. */ | ||
1789 | rtl_phy_write_fw(tp, *fw); | ||
1790 | out: | ||
1791 | return rc; | ||
1792 | } | ||
1793 | |||
1671 | static void rtl8169s_hw_phy_config(struct rtl8169_private *tp) | 1794 | static void rtl8169s_hw_phy_config(struct rtl8169_private *tp) |
1672 | { | 1795 | { |
1673 | static const struct phy_reg phy_reg_init[] = { | 1796 | static const struct phy_reg phy_reg_init[] = { |
@@ -2041,7 +2164,6 @@ static void rtl8168d_1_hw_phy_config(struct rtl8169_private *tp) | |||
2041 | { 0x0d, 0xf880 } | 2164 | { 0x0d, 0xf880 } |
2042 | }; | 2165 | }; |
2043 | void __iomem *ioaddr = tp->mmio_addr; | 2166 | void __iomem *ioaddr = tp->mmio_addr; |
2044 | const struct firmware *fw; | ||
2045 | 2167 | ||
2046 | rtl_writephy_batch(tp, phy_reg_init_0, ARRAY_SIZE(phy_reg_init_0)); | 2168 | rtl_writephy_batch(tp, phy_reg_init_0, ARRAY_SIZE(phy_reg_init_0)); |
2047 | 2169 | ||
@@ -2105,11 +2227,8 @@ static void rtl8168d_1_hw_phy_config(struct rtl8169_private *tp) | |||
2105 | 2227 | ||
2106 | rtl_writephy(tp, 0x1f, 0x0005); | 2228 | rtl_writephy(tp, 0x1f, 0x0005); |
2107 | rtl_writephy(tp, 0x05, 0x001b); | 2229 | rtl_writephy(tp, 0x05, 0x001b); |
2108 | if (rtl_readphy(tp, 0x06) == 0xbf00 && | 2230 | if ((rtl_readphy(tp, 0x06) != 0xbf00) || |
2109 | request_firmware(&fw, FIRMWARE_8168D_1, &tp->pci_dev->dev) == 0) { | 2231 | (rtl_apply_firmware(tp, FIRMWARE_8168D_1) < 0)) { |
2110 | rtl_phy_write_fw(tp, fw); | ||
2111 | release_firmware(fw); | ||
2112 | } else { | ||
2113 | netif_warn(tp, probe, tp->dev, "unable to apply firmware patch\n"); | 2232 | netif_warn(tp, probe, tp->dev, "unable to apply firmware patch\n"); |
2114 | } | 2233 | } |
2115 | 2234 | ||
@@ -2159,7 +2278,6 @@ static void rtl8168d_2_hw_phy_config(struct rtl8169_private *tp) | |||
2159 | { 0x0d, 0xf880 } | 2278 | { 0x0d, 0xf880 } |
2160 | }; | 2279 | }; |
2161 | void __iomem *ioaddr = tp->mmio_addr; | 2280 | void __iomem *ioaddr = tp->mmio_addr; |
2162 | const struct firmware *fw; | ||
2163 | 2281 | ||
2164 | rtl_writephy_batch(tp, phy_reg_init_0, ARRAY_SIZE(phy_reg_init_0)); | 2282 | rtl_writephy_batch(tp, phy_reg_init_0, ARRAY_SIZE(phy_reg_init_0)); |
2165 | 2283 | ||
@@ -2214,11 +2332,8 @@ static void rtl8168d_2_hw_phy_config(struct rtl8169_private *tp) | |||
2214 | 2332 | ||
2215 | rtl_writephy(tp, 0x1f, 0x0005); | 2333 | rtl_writephy(tp, 0x1f, 0x0005); |
2216 | rtl_writephy(tp, 0x05, 0x001b); | 2334 | rtl_writephy(tp, 0x05, 0x001b); |
2217 | if (rtl_readphy(tp, 0x06) == 0xb300 && | 2335 | if ((rtl_readphy(tp, 0x06) != 0xb300) || |
2218 | request_firmware(&fw, FIRMWARE_8168D_2, &tp->pci_dev->dev) == 0) { | 2336 | (rtl_apply_firmware(tp, FIRMWARE_8168D_2) < 0)) { |
2219 | rtl_phy_write_fw(tp, fw); | ||
2220 | release_firmware(fw); | ||
2221 | } else { | ||
2222 | netif_warn(tp, probe, tp->dev, "unable to apply firmware patch\n"); | 2337 | netif_warn(tp, probe, tp->dev, "unable to apply firmware patch\n"); |
2223 | } | 2338 | } |
2224 | 2339 | ||
@@ -3102,6 +3217,8 @@ static void __devexit rtl8169_remove_one(struct pci_dev *pdev) | |||
3102 | 3217 | ||
3103 | cancel_delayed_work_sync(&tp->task); | 3218 | cancel_delayed_work_sync(&tp->task); |
3104 | 3219 | ||
3220 | rtl_release_firmware(tp); | ||
3221 | |||
3105 | unregister_netdev(dev); | 3222 | unregister_netdev(dev); |
3106 | 3223 | ||
3107 | if (pci_dev_run_wake(pdev)) | 3224 | if (pci_dev_run_wake(pdev)) |
diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c index 711449c6e675..002bac743843 100644 --- a/drivers/net/sfc/efx.c +++ b/drivers/net/sfc/efx.c | |||
@@ -1153,6 +1153,9 @@ static int efx_wanted_channels(void) | |||
1153 | int count; | 1153 | int count; |
1154 | int cpu; | 1154 | int cpu; |
1155 | 1155 | ||
1156 | if (rss_cpus) | ||
1157 | return rss_cpus; | ||
1158 | |||
1156 | if (unlikely(!zalloc_cpumask_var(&core_mask, GFP_KERNEL))) { | 1159 | if (unlikely(!zalloc_cpumask_var(&core_mask, GFP_KERNEL))) { |
1157 | printk(KERN_WARNING | 1160 | printk(KERN_WARNING |
1158 | "sfc: RSS disabled due to allocation failure\n"); | 1161 | "sfc: RSS disabled due to allocation failure\n"); |
@@ -1266,27 +1269,18 @@ static void efx_remove_interrupts(struct efx_nic *efx) | |||
1266 | efx->legacy_irq = 0; | 1269 | efx->legacy_irq = 0; |
1267 | } | 1270 | } |
1268 | 1271 | ||
1269 | struct efx_tx_queue * | ||
1270 | efx_get_tx_queue(struct efx_nic *efx, unsigned index, unsigned type) | ||
1271 | { | ||
1272 | unsigned tx_channel_offset = | ||
1273 | separate_tx_channels ? efx->n_channels - efx->n_tx_channels : 0; | ||
1274 | EFX_BUG_ON_PARANOID(index >= efx->n_tx_channels || | ||
1275 | type >= EFX_TXQ_TYPES); | ||
1276 | return &efx->channel[tx_channel_offset + index]->tx_queue[type]; | ||
1277 | } | ||
1278 | |||
1279 | static void efx_set_channels(struct efx_nic *efx) | 1272 | static void efx_set_channels(struct efx_nic *efx) |
1280 | { | 1273 | { |
1281 | struct efx_channel *channel; | 1274 | struct efx_channel *channel; |
1282 | struct efx_tx_queue *tx_queue; | 1275 | struct efx_tx_queue *tx_queue; |
1283 | unsigned tx_channel_offset = | 1276 | |
1277 | efx->tx_channel_offset = | ||
1284 | separate_tx_channels ? efx->n_channels - efx->n_tx_channels : 0; | 1278 | separate_tx_channels ? efx->n_channels - efx->n_tx_channels : 0; |
1285 | 1279 | ||
1286 | /* Channel pointers were set in efx_init_struct() but we now | 1280 | /* Channel pointers were set in efx_init_struct() but we now |
1287 | * need to clear them for TX queues in any RX-only channels. */ | 1281 | * need to clear them for TX queues in any RX-only channels. */ |
1288 | efx_for_each_channel(channel, efx) { | 1282 | efx_for_each_channel(channel, efx) { |
1289 | if (channel->channel - tx_channel_offset >= | 1283 | if (channel->channel - efx->tx_channel_offset >= |
1290 | efx->n_tx_channels) { | 1284 | efx->n_tx_channels) { |
1291 | efx_for_each_channel_tx_queue(tx_queue, channel) | 1285 | efx_for_each_channel_tx_queue(tx_queue, channel) |
1292 | tx_queue->channel = NULL; | 1286 | tx_queue->channel = NULL; |
diff --git a/drivers/net/sfc/net_driver.h b/drivers/net/sfc/net_driver.h index bdce66ddf93a..28df8665256a 100644 --- a/drivers/net/sfc/net_driver.h +++ b/drivers/net/sfc/net_driver.h | |||
@@ -735,6 +735,7 @@ struct efx_nic { | |||
735 | unsigned next_buffer_table; | 735 | unsigned next_buffer_table; |
736 | unsigned n_channels; | 736 | unsigned n_channels; |
737 | unsigned n_rx_channels; | 737 | unsigned n_rx_channels; |
738 | unsigned tx_channel_offset; | ||
738 | unsigned n_tx_channels; | 739 | unsigned n_tx_channels; |
739 | unsigned int rx_buffer_len; | 740 | unsigned int rx_buffer_len; |
740 | unsigned int rx_buffer_order; | 741 | unsigned int rx_buffer_order; |
@@ -929,8 +930,13 @@ efx_get_channel(struct efx_nic *efx, unsigned index) | |||
929 | _channel = (_channel->channel + 1 < (_efx)->n_channels) ? \ | 930 | _channel = (_channel->channel + 1 < (_efx)->n_channels) ? \ |
930 | (_efx)->channel[_channel->channel + 1] : NULL) | 931 | (_efx)->channel[_channel->channel + 1] : NULL) |
931 | 932 | ||
932 | extern struct efx_tx_queue * | 933 | static inline struct efx_tx_queue * |
933 | efx_get_tx_queue(struct efx_nic *efx, unsigned index, unsigned type); | 934 | efx_get_tx_queue(struct efx_nic *efx, unsigned index, unsigned type) |
935 | { | ||
936 | EFX_BUG_ON_PARANOID(index >= efx->n_tx_channels || | ||
937 | type >= EFX_TXQ_TYPES); | ||
938 | return &efx->channel[efx->tx_channel_offset + index]->tx_queue[type]; | ||
939 | } | ||
934 | 940 | ||
935 | static inline struct efx_tx_queue * | 941 | static inline struct efx_tx_queue * |
936 | efx_channel_get_tx_queue(struct efx_channel *channel, unsigned type) | 942 | efx_channel_get_tx_queue(struct efx_channel *channel, unsigned type) |
diff --git a/drivers/net/tile/tilepro.c b/drivers/net/tile/tilepro.c index 0e6bac5ec65b..7cb301da7474 100644 --- a/drivers/net/tile/tilepro.c +++ b/drivers/net/tile/tilepro.c | |||
@@ -142,14 +142,6 @@ | |||
142 | MODULE_AUTHOR("Tilera"); | 142 | MODULE_AUTHOR("Tilera"); |
143 | MODULE_LICENSE("GPL"); | 143 | MODULE_LICENSE("GPL"); |
144 | 144 | ||
145 | |||
146 | #define IS_MULTICAST(mac_addr) \ | ||
147 | (((u8 *)(mac_addr))[0] & 0x01) | ||
148 | |||
149 | #define IS_BROADCAST(mac_addr) \ | ||
150 | (((u16 *)(mac_addr))[0] == 0xffff) | ||
151 | |||
152 | |||
153 | /* | 145 | /* |
154 | * Queue of incoming packets for a specific cpu and device. | 146 | * Queue of incoming packets for a specific cpu and device. |
155 | * | 147 | * |
@@ -795,7 +787,7 @@ static bool tile_net_poll_aux(struct tile_net_cpu *info, int index) | |||
795 | /* | 787 | /* |
796 | * FIXME: Implement HW multicast filter. | 788 | * FIXME: Implement HW multicast filter. |
797 | */ | 789 | */ |
798 | if (!IS_MULTICAST(buf) && !IS_BROADCAST(buf)) { | 790 | if (is_unicast_ether_addr(buf)) { |
799 | /* Filter packets not for our address. */ | 791 | /* Filter packets not for our address. */ |
800 | const u8 *mine = dev->dev_addr; | 792 | const u8 *mine = dev->dev_addr; |
801 | filter = compare_ether_addr(mine, buf); | 793 | filter = compare_ether_addr(mine, buf); |
diff --git a/drivers/net/ucc_geth.c b/drivers/net/ucc_geth.c index acbdab3d66ca..dc6cb974f25d 100644 --- a/drivers/net/ucc_geth.c +++ b/drivers/net/ucc_geth.c | |||
@@ -2031,7 +2031,7 @@ static void ucc_geth_set_multi(struct net_device *dev) | |||
2031 | netdev_for_each_mc_addr(ha, dev) { | 2031 | netdev_for_each_mc_addr(ha, dev) { |
2032 | /* Only support group multicast for now. | 2032 | /* Only support group multicast for now. |
2033 | */ | 2033 | */ |
2034 | if (!(ha->addr[0] & 1)) | 2034 | if (!is_multicast_ether_addr(ha->addr)) |
2035 | continue; | 2035 | continue; |
2036 | 2036 | ||
2037 | /* Ask CPM to run CRC and set bit in | 2037 | /* Ask CPM to run CRC and set bit in |
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c index 593c104ab199..d776c4a8d3c1 100644 --- a/drivers/net/usb/cdc_ncm.c +++ b/drivers/net/usb/cdc_ncm.c | |||
@@ -1021,13 +1021,15 @@ static int cdc_ncm_rx_fixup(struct usbnet *dev, struct sk_buff *skb_in) | |||
1021 | (temp > CDC_NCM_MAX_DATAGRAM_SIZE) || (temp < ETH_HLEN)) { | 1021 | (temp > CDC_NCM_MAX_DATAGRAM_SIZE) || (temp < ETH_HLEN)) { |
1022 | pr_debug("invalid frame detected (ignored)" | 1022 | pr_debug("invalid frame detected (ignored)" |
1023 | "offset[%u]=%u, length=%u, skb=%p\n", | 1023 | "offset[%u]=%u, length=%u, skb=%p\n", |
1024 | x, offset, temp, skb); | 1024 | x, offset, temp, skb_in); |
1025 | if (!x) | 1025 | if (!x) |
1026 | goto error; | 1026 | goto error; |
1027 | break; | 1027 | break; |
1028 | 1028 | ||
1029 | } else { | 1029 | } else { |
1030 | skb = skb_clone(skb_in, GFP_ATOMIC); | 1030 | skb = skb_clone(skb_in, GFP_ATOMIC); |
1031 | if (!skb) | ||
1032 | goto error; | ||
1031 | skb->len = temp; | 1033 | skb->len = temp; |
1032 | skb->data = ((u8 *)skb_in->data) + offset; | 1034 | skb->data = ((u8 *)skb_in->data) + offset; |
1033 | skb_set_tail_pointer(skb, temp); | 1035 | skb_set_tail_pointer(skb, temp); |
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c index d143e8b72b5b..cc14b4a75048 100644 --- a/drivers/net/vmxnet3/vmxnet3_drv.c +++ b/drivers/net/vmxnet3/vmxnet3_drv.c | |||
@@ -48,6 +48,9 @@ static atomic_t devices_found; | |||
48 | static int enable_mq = 1; | 48 | static int enable_mq = 1; |
49 | static int irq_share_mode; | 49 | static int irq_share_mode; |
50 | 50 | ||
51 | static void | ||
52 | vmxnet3_write_mac_addr(struct vmxnet3_adapter *adapter, u8 *mac); | ||
53 | |||
51 | /* | 54 | /* |
52 | * Enable/Disable the given intr | 55 | * Enable/Disable the given intr |
53 | */ | 56 | */ |
@@ -139,9 +142,13 @@ vmxnet3_check_link(struct vmxnet3_adapter *adapter, bool affectTxQueue) | |||
139 | { | 142 | { |
140 | u32 ret; | 143 | u32 ret; |
141 | int i; | 144 | int i; |
145 | unsigned long flags; | ||
142 | 146 | ||
147 | spin_lock_irqsave(&adapter->cmd_lock, flags); | ||
143 | VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_LINK); | 148 | VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_LINK); |
144 | ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD); | 149 | ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD); |
150 | spin_unlock_irqrestore(&adapter->cmd_lock, flags); | ||
151 | |||
145 | adapter->link_speed = ret >> 16; | 152 | adapter->link_speed = ret >> 16; |
146 | if (ret & 1) { /* Link is up. */ | 153 | if (ret & 1) { /* Link is up. */ |
147 | printk(KERN_INFO "%s: NIC Link is Up %d Mbps\n", | 154 | printk(KERN_INFO "%s: NIC Link is Up %d Mbps\n", |
@@ -183,8 +190,10 @@ vmxnet3_process_events(struct vmxnet3_adapter *adapter) | |||
183 | 190 | ||
184 | /* Check if there is an error on xmit/recv queues */ | 191 | /* Check if there is an error on xmit/recv queues */ |
185 | if (events & (VMXNET3_ECR_TQERR | VMXNET3_ECR_RQERR)) { | 192 | if (events & (VMXNET3_ECR_TQERR | VMXNET3_ECR_RQERR)) { |
193 | spin_lock(&adapter->cmd_lock); | ||
186 | VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, | 194 | VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, |
187 | VMXNET3_CMD_GET_QUEUE_STATUS); | 195 | VMXNET3_CMD_GET_QUEUE_STATUS); |
196 | spin_unlock(&adapter->cmd_lock); | ||
188 | 197 | ||
189 | for (i = 0; i < adapter->num_tx_queues; i++) | 198 | for (i = 0; i < adapter->num_tx_queues; i++) |
190 | if (adapter->tqd_start[i].status.stopped) | 199 | if (adapter->tqd_start[i].status.stopped) |
@@ -804,30 +813,25 @@ vmxnet3_parse_and_copy_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq, | |||
804 | skb_transport_header(skb))->doff * 4; | 813 | skb_transport_header(skb))->doff * 4; |
805 | ctx->copy_size = ctx->eth_ip_hdr_size + ctx->l4_hdr_size; | 814 | ctx->copy_size = ctx->eth_ip_hdr_size + ctx->l4_hdr_size; |
806 | } else { | 815 | } else { |
807 | unsigned int pull_size; | ||
808 | |||
809 | if (skb->ip_summed == CHECKSUM_PARTIAL) { | 816 | if (skb->ip_summed == CHECKSUM_PARTIAL) { |
810 | ctx->eth_ip_hdr_size = skb_checksum_start_offset(skb); | 817 | ctx->eth_ip_hdr_size = skb_checksum_start_offset(skb); |
811 | 818 | ||
812 | if (ctx->ipv4) { | 819 | if (ctx->ipv4) { |
813 | struct iphdr *iph = (struct iphdr *) | 820 | struct iphdr *iph = (struct iphdr *) |
814 | skb_network_header(skb); | 821 | skb_network_header(skb); |
815 | if (iph->protocol == IPPROTO_TCP) { | 822 | if (iph->protocol == IPPROTO_TCP) |
816 | pull_size = ctx->eth_ip_hdr_size + | ||
817 | sizeof(struct tcphdr); | ||
818 | |||
819 | if (unlikely(!pskb_may_pull(skb, | ||
820 | pull_size))) { | ||
821 | goto err; | ||
822 | } | ||
823 | ctx->l4_hdr_size = ((struct tcphdr *) | 823 | ctx->l4_hdr_size = ((struct tcphdr *) |
824 | skb_transport_header(skb))->doff * 4; | 824 | skb_transport_header(skb))->doff * 4; |
825 | } else if (iph->protocol == IPPROTO_UDP) { | 825 | else if (iph->protocol == IPPROTO_UDP) |
826 | /* | ||
827 | * Use tcp header size so that bytes to | ||
828 | * be copied are more than required by | ||
829 | * the device. | ||
830 | */ | ||
826 | ctx->l4_hdr_size = | 831 | ctx->l4_hdr_size = |
827 | sizeof(struct udphdr); | 832 | sizeof(struct tcphdr); |
828 | } else { | 833 | else |
829 | ctx->l4_hdr_size = 0; | 834 | ctx->l4_hdr_size = 0; |
830 | } | ||
831 | } else { | 835 | } else { |
832 | /* for simplicity, don't copy L4 headers */ | 836 | /* for simplicity, don't copy L4 headers */ |
833 | ctx->l4_hdr_size = 0; | 837 | ctx->l4_hdr_size = 0; |
@@ -1859,18 +1863,14 @@ vmxnet3_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp) | |||
1859 | struct vmxnet3_adapter *adapter = netdev_priv(netdev); | 1863 | struct vmxnet3_adapter *adapter = netdev_priv(netdev); |
1860 | struct Vmxnet3_DriverShared *shared = adapter->shared; | 1864 | struct Vmxnet3_DriverShared *shared = adapter->shared; |
1861 | u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable; | 1865 | u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable; |
1866 | unsigned long flags; | ||
1862 | 1867 | ||
1863 | if (grp) { | 1868 | if (grp) { |
1864 | /* add vlan rx stripping. */ | 1869 | /* add vlan rx stripping. */ |
1865 | if (adapter->netdev->features & NETIF_F_HW_VLAN_RX) { | 1870 | if (adapter->netdev->features & NETIF_F_HW_VLAN_RX) { |
1866 | int i; | 1871 | int i; |
1867 | struct Vmxnet3_DSDevRead *devRead = &shared->devRead; | ||
1868 | adapter->vlan_grp = grp; | 1872 | adapter->vlan_grp = grp; |
1869 | 1873 | ||
1870 | /* update FEATURES to device */ | ||
1871 | devRead->misc.uptFeatures |= UPT1_F_RXVLAN; | ||
1872 | VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, | ||
1873 | VMXNET3_CMD_UPDATE_FEATURE); | ||
1874 | /* | 1874 | /* |
1875 | * Clear entire vfTable; then enable untagged pkts. | 1875 | * Clear entire vfTable; then enable untagged pkts. |
1876 | * Note: setting one entry in vfTable to non-zero turns | 1876 | * Note: setting one entry in vfTable to non-zero turns |
@@ -1880,8 +1880,10 @@ vmxnet3_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp) | |||
1880 | vfTable[i] = 0; | 1880 | vfTable[i] = 0; |
1881 | 1881 | ||
1882 | VMXNET3_SET_VFTABLE_ENTRY(vfTable, 0); | 1882 | VMXNET3_SET_VFTABLE_ENTRY(vfTable, 0); |
1883 | spin_lock_irqsave(&adapter->cmd_lock, flags); | ||
1883 | VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, | 1884 | VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, |
1884 | VMXNET3_CMD_UPDATE_VLAN_FILTERS); | 1885 | VMXNET3_CMD_UPDATE_VLAN_FILTERS); |
1886 | spin_unlock_irqrestore(&adapter->cmd_lock, flags); | ||
1885 | } else { | 1887 | } else { |
1886 | printk(KERN_ERR "%s: vlan_rx_register when device has " | 1888 | printk(KERN_ERR "%s: vlan_rx_register when device has " |
1887 | "no NETIF_F_HW_VLAN_RX\n", netdev->name); | 1889 | "no NETIF_F_HW_VLAN_RX\n", netdev->name); |
@@ -1900,13 +1902,10 @@ vmxnet3_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp) | |||
1900 | */ | 1902 | */ |
1901 | vfTable[i] = 0; | 1903 | vfTable[i] = 0; |
1902 | } | 1904 | } |
1905 | spin_lock_irqsave(&adapter->cmd_lock, flags); | ||
1903 | VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, | 1906 | VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, |
1904 | VMXNET3_CMD_UPDATE_VLAN_FILTERS); | 1907 | VMXNET3_CMD_UPDATE_VLAN_FILTERS); |
1905 | 1908 | spin_unlock_irqrestore(&adapter->cmd_lock, flags); | |
1906 | /* update FEATURES to device */ | ||
1907 | devRead->misc.uptFeatures &= ~UPT1_F_RXVLAN; | ||
1908 | VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, | ||
1909 | VMXNET3_CMD_UPDATE_FEATURE); | ||
1910 | } | 1909 | } |
1911 | } | 1910 | } |
1912 | } | 1911 | } |
@@ -1939,10 +1938,13 @@ vmxnet3_vlan_rx_add_vid(struct net_device *netdev, u16 vid) | |||
1939 | { | 1938 | { |
1940 | struct vmxnet3_adapter *adapter = netdev_priv(netdev); | 1939 | struct vmxnet3_adapter *adapter = netdev_priv(netdev); |
1941 | u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable; | 1940 | u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable; |
1941 | unsigned long flags; | ||
1942 | 1942 | ||
1943 | VMXNET3_SET_VFTABLE_ENTRY(vfTable, vid); | 1943 | VMXNET3_SET_VFTABLE_ENTRY(vfTable, vid); |
1944 | spin_lock_irqsave(&adapter->cmd_lock, flags); | ||
1944 | VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, | 1945 | VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, |
1945 | VMXNET3_CMD_UPDATE_VLAN_FILTERS); | 1946 | VMXNET3_CMD_UPDATE_VLAN_FILTERS); |
1947 | spin_unlock_irqrestore(&adapter->cmd_lock, flags); | ||
1946 | } | 1948 | } |
1947 | 1949 | ||
1948 | 1950 | ||
@@ -1951,10 +1953,13 @@ vmxnet3_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) | |||
1951 | { | 1953 | { |
1952 | struct vmxnet3_adapter *adapter = netdev_priv(netdev); | 1954 | struct vmxnet3_adapter *adapter = netdev_priv(netdev); |
1953 | u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable; | 1955 | u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable; |
1956 | unsigned long flags; | ||
1954 | 1957 | ||
1955 | VMXNET3_CLEAR_VFTABLE_ENTRY(vfTable, vid); | 1958 | VMXNET3_CLEAR_VFTABLE_ENTRY(vfTable, vid); |
1959 | spin_lock_irqsave(&adapter->cmd_lock, flags); | ||
1956 | VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, | 1960 | VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, |
1957 | VMXNET3_CMD_UPDATE_VLAN_FILTERS); | 1961 | VMXNET3_CMD_UPDATE_VLAN_FILTERS); |
1962 | spin_unlock_irqrestore(&adapter->cmd_lock, flags); | ||
1958 | } | 1963 | } |
1959 | 1964 | ||
1960 | 1965 | ||
@@ -1985,6 +1990,7 @@ static void | |||
1985 | vmxnet3_set_mc(struct net_device *netdev) | 1990 | vmxnet3_set_mc(struct net_device *netdev) |
1986 | { | 1991 | { |
1987 | struct vmxnet3_adapter *adapter = netdev_priv(netdev); | 1992 | struct vmxnet3_adapter *adapter = netdev_priv(netdev); |
1993 | unsigned long flags; | ||
1988 | struct Vmxnet3_RxFilterConf *rxConf = | 1994 | struct Vmxnet3_RxFilterConf *rxConf = |
1989 | &adapter->shared->devRead.rxFilterConf; | 1995 | &adapter->shared->devRead.rxFilterConf; |
1990 | u8 *new_table = NULL; | 1996 | u8 *new_table = NULL; |
@@ -2020,6 +2026,7 @@ vmxnet3_set_mc(struct net_device *netdev) | |||
2020 | rxConf->mfTablePA = 0; | 2026 | rxConf->mfTablePA = 0; |
2021 | } | 2027 | } |
2022 | 2028 | ||
2029 | spin_lock_irqsave(&adapter->cmd_lock, flags); | ||
2023 | if (new_mode != rxConf->rxMode) { | 2030 | if (new_mode != rxConf->rxMode) { |
2024 | rxConf->rxMode = cpu_to_le32(new_mode); | 2031 | rxConf->rxMode = cpu_to_le32(new_mode); |
2025 | VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, | 2032 | VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, |
@@ -2028,6 +2035,7 @@ vmxnet3_set_mc(struct net_device *netdev) | |||
2028 | 2035 | ||
2029 | VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, | 2036 | VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, |
2030 | VMXNET3_CMD_UPDATE_MAC_FILTERS); | 2037 | VMXNET3_CMD_UPDATE_MAC_FILTERS); |
2038 | spin_unlock_irqrestore(&adapter->cmd_lock, flags); | ||
2031 | 2039 | ||
2032 | kfree(new_table); | 2040 | kfree(new_table); |
2033 | } | 2041 | } |
@@ -2080,10 +2088,8 @@ vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter) | |||
2080 | devRead->misc.uptFeatures |= UPT1_F_LRO; | 2088 | devRead->misc.uptFeatures |= UPT1_F_LRO; |
2081 | devRead->misc.maxNumRxSG = cpu_to_le16(1 + MAX_SKB_FRAGS); | 2089 | devRead->misc.maxNumRxSG = cpu_to_le16(1 + MAX_SKB_FRAGS); |
2082 | } | 2090 | } |
2083 | if ((adapter->netdev->features & NETIF_F_HW_VLAN_RX) && | 2091 | if (adapter->netdev->features & NETIF_F_HW_VLAN_RX) |
2084 | adapter->vlan_grp) { | ||
2085 | devRead->misc.uptFeatures |= UPT1_F_RXVLAN; | 2092 | devRead->misc.uptFeatures |= UPT1_F_RXVLAN; |
2086 | } | ||
2087 | 2093 | ||
2088 | devRead->misc.mtu = cpu_to_le32(adapter->netdev->mtu); | 2094 | devRead->misc.mtu = cpu_to_le32(adapter->netdev->mtu); |
2089 | devRead->misc.queueDescPA = cpu_to_le64(adapter->queue_desc_pa); | 2095 | devRead->misc.queueDescPA = cpu_to_le64(adapter->queue_desc_pa); |
@@ -2168,6 +2174,8 @@ vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter) | |||
2168 | /* rx filter settings */ | 2174 | /* rx filter settings */ |
2169 | devRead->rxFilterConf.rxMode = 0; | 2175 | devRead->rxFilterConf.rxMode = 0; |
2170 | vmxnet3_restore_vlan(adapter); | 2176 | vmxnet3_restore_vlan(adapter); |
2177 | vmxnet3_write_mac_addr(adapter, adapter->netdev->dev_addr); | ||
2178 | |||
2171 | /* the rest are already zeroed */ | 2179 | /* the rest are already zeroed */ |
2172 | } | 2180 | } |
2173 | 2181 | ||
@@ -2177,6 +2185,7 @@ vmxnet3_activate_dev(struct vmxnet3_adapter *adapter) | |||
2177 | { | 2185 | { |
2178 | int err, i; | 2186 | int err, i; |
2179 | u32 ret; | 2187 | u32 ret; |
2188 | unsigned long flags; | ||
2180 | 2189 | ||
2181 | dev_dbg(&adapter->netdev->dev, "%s: skb_buf_size %d, rx_buf_per_pkt %d," | 2190 | dev_dbg(&adapter->netdev->dev, "%s: skb_buf_size %d, rx_buf_per_pkt %d," |
2182 | " ring sizes %u %u %u\n", adapter->netdev->name, | 2191 | " ring sizes %u %u %u\n", adapter->netdev->name, |
@@ -2206,9 +2215,11 @@ vmxnet3_activate_dev(struct vmxnet3_adapter *adapter) | |||
2206 | adapter->shared_pa)); | 2215 | adapter->shared_pa)); |
2207 | VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAH, VMXNET3_GET_ADDR_HI( | 2216 | VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAH, VMXNET3_GET_ADDR_HI( |
2208 | adapter->shared_pa)); | 2217 | adapter->shared_pa)); |
2218 | spin_lock_irqsave(&adapter->cmd_lock, flags); | ||
2209 | VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, | 2219 | VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, |
2210 | VMXNET3_CMD_ACTIVATE_DEV); | 2220 | VMXNET3_CMD_ACTIVATE_DEV); |
2211 | ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD); | 2221 | ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD); |
2222 | spin_unlock_irqrestore(&adapter->cmd_lock, flags); | ||
2212 | 2223 | ||
2213 | if (ret != 0) { | 2224 | if (ret != 0) { |
2214 | printk(KERN_ERR "Failed to activate dev %s: error %u\n", | 2225 | printk(KERN_ERR "Failed to activate dev %s: error %u\n", |
@@ -2255,7 +2266,10 @@ rq_err: | |||
2255 | void | 2266 | void |
2256 | vmxnet3_reset_dev(struct vmxnet3_adapter *adapter) | 2267 | vmxnet3_reset_dev(struct vmxnet3_adapter *adapter) |
2257 | { | 2268 | { |
2269 | unsigned long flags; | ||
2270 | spin_lock_irqsave(&adapter->cmd_lock, flags); | ||
2258 | VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_RESET_DEV); | 2271 | VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_RESET_DEV); |
2272 | spin_unlock_irqrestore(&adapter->cmd_lock, flags); | ||
2259 | } | 2273 | } |
2260 | 2274 | ||
2261 | 2275 | ||
@@ -2263,12 +2277,15 @@ int | |||
2263 | vmxnet3_quiesce_dev(struct vmxnet3_adapter *adapter) | 2277 | vmxnet3_quiesce_dev(struct vmxnet3_adapter *adapter) |
2264 | { | 2278 | { |
2265 | int i; | 2279 | int i; |
2280 | unsigned long flags; | ||
2266 | if (test_and_set_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state)) | 2281 | if (test_and_set_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state)) |
2267 | return 0; | 2282 | return 0; |
2268 | 2283 | ||
2269 | 2284 | ||
2285 | spin_lock_irqsave(&adapter->cmd_lock, flags); | ||
2270 | VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, | 2286 | VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, |
2271 | VMXNET3_CMD_QUIESCE_DEV); | 2287 | VMXNET3_CMD_QUIESCE_DEV); |
2288 | spin_unlock_irqrestore(&adapter->cmd_lock, flags); | ||
2272 | vmxnet3_disable_all_intrs(adapter); | 2289 | vmxnet3_disable_all_intrs(adapter); |
2273 | 2290 | ||
2274 | for (i = 0; i < adapter->num_rx_queues; i++) | 2291 | for (i = 0; i < adapter->num_rx_queues; i++) |
@@ -2426,7 +2443,7 @@ vmxnet3_adjust_rx_ring_size(struct vmxnet3_adapter *adapter) | |||
2426 | sz = adapter->rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN; | 2443 | sz = adapter->rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN; |
2427 | ring0_size = adapter->rx_queue[0].rx_ring[0].size; | 2444 | ring0_size = adapter->rx_queue[0].rx_ring[0].size; |
2428 | ring0_size = (ring0_size + sz - 1) / sz * sz; | 2445 | ring0_size = (ring0_size + sz - 1) / sz * sz; |
2429 | ring0_size = min_t(u32, rq->rx_ring[0].size, VMXNET3_RX_RING_MAX_SIZE / | 2446 | ring0_size = min_t(u32, ring0_size, VMXNET3_RX_RING_MAX_SIZE / |
2430 | sz * sz); | 2447 | sz * sz); |
2431 | ring1_size = adapter->rx_queue[0].rx_ring[1].size; | 2448 | ring1_size = adapter->rx_queue[0].rx_ring[1].size; |
2432 | comp_size = ring0_size + ring1_size; | 2449 | comp_size = ring0_size + ring1_size; |
@@ -2695,7 +2712,7 @@ vmxnet3_acquire_msix_vectors(struct vmxnet3_adapter *adapter, | |||
2695 | break; | 2712 | break; |
2696 | } else { | 2713 | } else { |
2697 | /* If fails to enable required number of MSI-x vectors | 2714 | /* If fails to enable required number of MSI-x vectors |
2698 | * try enabling 3 of them. One each for rx, tx and event | 2715 | * try enabling minimum number of vectors required. |
2699 | */ | 2716 | */ |
2700 | vectors = vector_threshold; | 2717 | vectors = vector_threshold; |
2701 | printk(KERN_ERR "Failed to enable %d MSI-X for %s, try" | 2718 | printk(KERN_ERR "Failed to enable %d MSI-X for %s, try" |
@@ -2718,9 +2735,11 @@ vmxnet3_alloc_intr_resources(struct vmxnet3_adapter *adapter) | |||
2718 | u32 cfg; | 2735 | u32 cfg; |
2719 | 2736 | ||
2720 | /* intr settings */ | 2737 | /* intr settings */ |
2738 | spin_lock(&adapter->cmd_lock); | ||
2721 | VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, | 2739 | VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, |
2722 | VMXNET3_CMD_GET_CONF_INTR); | 2740 | VMXNET3_CMD_GET_CONF_INTR); |
2723 | cfg = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD); | 2741 | cfg = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD); |
2742 | spin_unlock(&adapter->cmd_lock); | ||
2724 | adapter->intr.type = cfg & 0x3; | 2743 | adapter->intr.type = cfg & 0x3; |
2725 | adapter->intr.mask_mode = (cfg >> 2) & 0x3; | 2744 | adapter->intr.mask_mode = (cfg >> 2) & 0x3; |
2726 | 2745 | ||
@@ -2755,7 +2774,7 @@ vmxnet3_alloc_intr_resources(struct vmxnet3_adapter *adapter) | |||
2755 | */ | 2774 | */ |
2756 | if (err == VMXNET3_LINUX_MIN_MSIX_VECT) { | 2775 | if (err == VMXNET3_LINUX_MIN_MSIX_VECT) { |
2757 | if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE | 2776 | if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE |
2758 | || adapter->num_rx_queues != 2) { | 2777 | || adapter->num_rx_queues != 1) { |
2759 | adapter->share_intr = VMXNET3_INTR_TXSHARE; | 2778 | adapter->share_intr = VMXNET3_INTR_TXSHARE; |
2760 | printk(KERN_ERR "Number of rx queues : 1\n"); | 2779 | printk(KERN_ERR "Number of rx queues : 1\n"); |
2761 | adapter->num_rx_queues = 1; | 2780 | adapter->num_rx_queues = 1; |
@@ -2905,6 +2924,7 @@ vmxnet3_probe_device(struct pci_dev *pdev, | |||
2905 | adapter->netdev = netdev; | 2924 | adapter->netdev = netdev; |
2906 | adapter->pdev = pdev; | 2925 | adapter->pdev = pdev; |
2907 | 2926 | ||
2927 | spin_lock_init(&adapter->cmd_lock); | ||
2908 | adapter->shared = pci_alloc_consistent(adapter->pdev, | 2928 | adapter->shared = pci_alloc_consistent(adapter->pdev, |
2909 | sizeof(struct Vmxnet3_DriverShared), | 2929 | sizeof(struct Vmxnet3_DriverShared), |
2910 | &adapter->shared_pa); | 2930 | &adapter->shared_pa); |
@@ -3108,11 +3128,15 @@ vmxnet3_suspend(struct device *device) | |||
3108 | u8 *arpreq; | 3128 | u8 *arpreq; |
3109 | struct in_device *in_dev; | 3129 | struct in_device *in_dev; |
3110 | struct in_ifaddr *ifa; | 3130 | struct in_ifaddr *ifa; |
3131 | unsigned long flags; | ||
3111 | int i = 0; | 3132 | int i = 0; |
3112 | 3133 | ||
3113 | if (!netif_running(netdev)) | 3134 | if (!netif_running(netdev)) |
3114 | return 0; | 3135 | return 0; |
3115 | 3136 | ||
3137 | for (i = 0; i < adapter->num_rx_queues; i++) | ||
3138 | napi_disable(&adapter->rx_queue[i].napi); | ||
3139 | |||
3116 | vmxnet3_disable_all_intrs(adapter); | 3140 | vmxnet3_disable_all_intrs(adapter); |
3117 | vmxnet3_free_irqs(adapter); | 3141 | vmxnet3_free_irqs(adapter); |
3118 | vmxnet3_free_intr_resources(adapter); | 3142 | vmxnet3_free_intr_resources(adapter); |
@@ -3188,8 +3212,10 @@ skip_arp: | |||
3188 | adapter->shared->devRead.pmConfDesc.confPA = cpu_to_le64(virt_to_phys( | 3212 | adapter->shared->devRead.pmConfDesc.confPA = cpu_to_le64(virt_to_phys( |
3189 | pmConf)); | 3213 | pmConf)); |
3190 | 3214 | ||
3215 | spin_lock_irqsave(&adapter->cmd_lock, flags); | ||
3191 | VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, | 3216 | VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, |
3192 | VMXNET3_CMD_UPDATE_PMCFG); | 3217 | VMXNET3_CMD_UPDATE_PMCFG); |
3218 | spin_unlock_irqrestore(&adapter->cmd_lock, flags); | ||
3193 | 3219 | ||
3194 | pci_save_state(pdev); | 3220 | pci_save_state(pdev); |
3195 | pci_enable_wake(pdev, pci_choose_state(pdev, PMSG_SUSPEND), | 3221 | pci_enable_wake(pdev, pci_choose_state(pdev, PMSG_SUSPEND), |
@@ -3204,7 +3230,8 @@ skip_arp: | |||
3204 | static int | 3230 | static int |
3205 | vmxnet3_resume(struct device *device) | 3231 | vmxnet3_resume(struct device *device) |
3206 | { | 3232 | { |
3207 | int err; | 3233 | int err, i = 0; |
3234 | unsigned long flags; | ||
3208 | struct pci_dev *pdev = to_pci_dev(device); | 3235 | struct pci_dev *pdev = to_pci_dev(device); |
3209 | struct net_device *netdev = pci_get_drvdata(pdev); | 3236 | struct net_device *netdev = pci_get_drvdata(pdev); |
3210 | struct vmxnet3_adapter *adapter = netdev_priv(netdev); | 3237 | struct vmxnet3_adapter *adapter = netdev_priv(netdev); |
@@ -3232,10 +3259,14 @@ vmxnet3_resume(struct device *device) | |||
3232 | 3259 | ||
3233 | pci_enable_wake(pdev, PCI_D0, 0); | 3260 | pci_enable_wake(pdev, PCI_D0, 0); |
3234 | 3261 | ||
3262 | spin_lock_irqsave(&adapter->cmd_lock, flags); | ||
3235 | VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, | 3263 | VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, |
3236 | VMXNET3_CMD_UPDATE_PMCFG); | 3264 | VMXNET3_CMD_UPDATE_PMCFG); |
3265 | spin_unlock_irqrestore(&adapter->cmd_lock, flags); | ||
3237 | vmxnet3_alloc_intr_resources(adapter); | 3266 | vmxnet3_alloc_intr_resources(adapter); |
3238 | vmxnet3_request_irqs(adapter); | 3267 | vmxnet3_request_irqs(adapter); |
3268 | for (i = 0; i < adapter->num_rx_queues; i++) | ||
3269 | napi_enable(&adapter->rx_queue[i].napi); | ||
3239 | vmxnet3_enable_all_intrs(adapter); | 3270 | vmxnet3_enable_all_intrs(adapter); |
3240 | 3271 | ||
3241 | return 0; | 3272 | return 0; |
diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c index 8e17fc8a7fe7..81254be85b92 100644 --- a/drivers/net/vmxnet3/vmxnet3_ethtool.c +++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c | |||
@@ -45,6 +45,7 @@ static int | |||
45 | vmxnet3_set_rx_csum(struct net_device *netdev, u32 val) | 45 | vmxnet3_set_rx_csum(struct net_device *netdev, u32 val) |
46 | { | 46 | { |
47 | struct vmxnet3_adapter *adapter = netdev_priv(netdev); | 47 | struct vmxnet3_adapter *adapter = netdev_priv(netdev); |
48 | unsigned long flags; | ||
48 | 49 | ||
49 | if (adapter->rxcsum != val) { | 50 | if (adapter->rxcsum != val) { |
50 | adapter->rxcsum = val; | 51 | adapter->rxcsum = val; |
@@ -56,8 +57,10 @@ vmxnet3_set_rx_csum(struct net_device *netdev, u32 val) | |||
56 | adapter->shared->devRead.misc.uptFeatures &= | 57 | adapter->shared->devRead.misc.uptFeatures &= |
57 | ~UPT1_F_RXCSUM; | 58 | ~UPT1_F_RXCSUM; |
58 | 59 | ||
60 | spin_lock_irqsave(&adapter->cmd_lock, flags); | ||
59 | VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, | 61 | VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, |
60 | VMXNET3_CMD_UPDATE_FEATURE); | 62 | VMXNET3_CMD_UPDATE_FEATURE); |
63 | spin_unlock_irqrestore(&adapter->cmd_lock, flags); | ||
61 | } | 64 | } |
62 | } | 65 | } |
63 | return 0; | 66 | return 0; |
@@ -68,76 +71,78 @@ vmxnet3_set_rx_csum(struct net_device *netdev, u32 val) | |||
68 | static const struct vmxnet3_stat_desc | 71 | static const struct vmxnet3_stat_desc |
69 | vmxnet3_tq_dev_stats[] = { | 72 | vmxnet3_tq_dev_stats[] = { |
70 | /* description, offset */ | 73 | /* description, offset */ |
71 | { "TSO pkts tx", offsetof(struct UPT1_TxStats, TSOPktsTxOK) }, | 74 | { "Tx Queue#", 0 }, |
72 | { "TSO bytes tx", offsetof(struct UPT1_TxStats, TSOBytesTxOK) }, | 75 | { " TSO pkts tx", offsetof(struct UPT1_TxStats, TSOPktsTxOK) }, |
73 | { "ucast pkts tx", offsetof(struct UPT1_TxStats, ucastPktsTxOK) }, | 76 | { " TSO bytes tx", offsetof(struct UPT1_TxStats, TSOBytesTxOK) }, |
74 | { "ucast bytes tx", offsetof(struct UPT1_TxStats, ucastBytesTxOK) }, | 77 | { " ucast pkts tx", offsetof(struct UPT1_TxStats, ucastPktsTxOK) }, |
75 | { "mcast pkts tx", offsetof(struct UPT1_TxStats, mcastPktsTxOK) }, | 78 | { " ucast bytes tx", offsetof(struct UPT1_TxStats, ucastBytesTxOK) }, |
76 | { "mcast bytes tx", offsetof(struct UPT1_TxStats, mcastBytesTxOK) }, | 79 | { " mcast pkts tx", offsetof(struct UPT1_TxStats, mcastPktsTxOK) }, |
77 | { "bcast pkts tx", offsetof(struct UPT1_TxStats, bcastPktsTxOK) }, | 80 | { " mcast bytes tx", offsetof(struct UPT1_TxStats, mcastBytesTxOK) }, |
78 | { "bcast bytes tx", offsetof(struct UPT1_TxStats, bcastBytesTxOK) }, | 81 | { " bcast pkts tx", offsetof(struct UPT1_TxStats, bcastPktsTxOK) }, |
79 | { "pkts tx err", offsetof(struct UPT1_TxStats, pktsTxError) }, | 82 | { " bcast bytes tx", offsetof(struct UPT1_TxStats, bcastBytesTxOK) }, |
80 | { "pkts tx discard", offsetof(struct UPT1_TxStats, pktsTxDiscard) }, | 83 | { " pkts tx err", offsetof(struct UPT1_TxStats, pktsTxError) }, |
84 | { " pkts tx discard", offsetof(struct UPT1_TxStats, pktsTxDiscard) }, | ||
81 | }; | 85 | }; |
82 | 86 | ||
83 | /* per tq stats maintained by the driver */ | 87 | /* per tq stats maintained by the driver */ |
84 | static const struct vmxnet3_stat_desc | 88 | static const struct vmxnet3_stat_desc |
85 | vmxnet3_tq_driver_stats[] = { | 89 | vmxnet3_tq_driver_stats[] = { |
86 | /* description, offset */ | 90 | /* description, offset */ |
87 | {"drv dropped tx total", offsetof(struct vmxnet3_tq_driver_stats, | 91 | {" drv dropped tx total", offsetof(struct vmxnet3_tq_driver_stats, |
88 | drop_total) }, | 92 | drop_total) }, |
89 | { " too many frags", offsetof(struct vmxnet3_tq_driver_stats, | 93 | { " too many frags", offsetof(struct vmxnet3_tq_driver_stats, |
90 | drop_too_many_frags) }, | 94 | drop_too_many_frags) }, |
91 | { " giant hdr", offsetof(struct vmxnet3_tq_driver_stats, | 95 | { " giant hdr", offsetof(struct vmxnet3_tq_driver_stats, |
92 | drop_oversized_hdr) }, | 96 | drop_oversized_hdr) }, |
93 | { " hdr err", offsetof(struct vmxnet3_tq_driver_stats, | 97 | { " hdr err", offsetof(struct vmxnet3_tq_driver_stats, |
94 | drop_hdr_inspect_err) }, | 98 | drop_hdr_inspect_err) }, |
95 | { " tso", offsetof(struct vmxnet3_tq_driver_stats, | 99 | { " tso", offsetof(struct vmxnet3_tq_driver_stats, |
96 | drop_tso) }, | 100 | drop_tso) }, |
97 | { "ring full", offsetof(struct vmxnet3_tq_driver_stats, | 101 | { " ring full", offsetof(struct vmxnet3_tq_driver_stats, |
98 | tx_ring_full) }, | 102 | tx_ring_full) }, |
99 | { "pkts linearized", offsetof(struct vmxnet3_tq_driver_stats, | 103 | { " pkts linearized", offsetof(struct vmxnet3_tq_driver_stats, |
100 | linearized) }, | 104 | linearized) }, |
101 | { "hdr cloned", offsetof(struct vmxnet3_tq_driver_stats, | 105 | { " hdr cloned", offsetof(struct vmxnet3_tq_driver_stats, |
102 | copy_skb_header) }, | 106 | copy_skb_header) }, |
103 | { "giant hdr", offsetof(struct vmxnet3_tq_driver_stats, | 107 | { " giant hdr", offsetof(struct vmxnet3_tq_driver_stats, |
104 | oversized_hdr) }, | 108 | oversized_hdr) }, |
105 | }; | 109 | }; |
106 | 110 | ||
107 | /* per rq stats maintained by the device */ | 111 | /* per rq stats maintained by the device */ |
108 | static const struct vmxnet3_stat_desc | 112 | static const struct vmxnet3_stat_desc |
109 | vmxnet3_rq_dev_stats[] = { | 113 | vmxnet3_rq_dev_stats[] = { |
110 | { "LRO pkts rx", offsetof(struct UPT1_RxStats, LROPktsRxOK) }, | 114 | { "Rx Queue#", 0 }, |
111 | { "LRO byte rx", offsetof(struct UPT1_RxStats, LROBytesRxOK) }, | 115 | { " LRO pkts rx", offsetof(struct UPT1_RxStats, LROPktsRxOK) }, |
112 | { "ucast pkts rx", offsetof(struct UPT1_RxStats, ucastPktsRxOK) }, | 116 | { " LRO byte rx", offsetof(struct UPT1_RxStats, LROBytesRxOK) }, |
113 | { "ucast bytes rx", offsetof(struct UPT1_RxStats, ucastBytesRxOK) }, | 117 | { " ucast pkts rx", offsetof(struct UPT1_RxStats, ucastPktsRxOK) }, |
114 | { "mcast pkts rx", offsetof(struct UPT1_RxStats, mcastPktsRxOK) }, | 118 | { " ucast bytes rx", offsetof(struct UPT1_RxStats, ucastBytesRxOK) }, |
115 | { "mcast bytes rx", offsetof(struct UPT1_RxStats, mcastBytesRxOK) }, | 119 | { " mcast pkts rx", offsetof(struct UPT1_RxStats, mcastPktsRxOK) }, |
116 | { "bcast pkts rx", offsetof(struct UPT1_RxStats, bcastPktsRxOK) }, | 120 | { " mcast bytes rx", offsetof(struct UPT1_RxStats, mcastBytesRxOK) }, |
117 | { "bcast bytes rx", offsetof(struct UPT1_RxStats, bcastBytesRxOK) }, | 121 | { " bcast pkts rx", offsetof(struct UPT1_RxStats, bcastPktsRxOK) }, |
118 | { "pkts rx out of buf", offsetof(struct UPT1_RxStats, pktsRxOutOfBuf) }, | 122 | { " bcast bytes rx", offsetof(struct UPT1_RxStats, bcastBytesRxOK) }, |
119 | { "pkts rx err", offsetof(struct UPT1_RxStats, pktsRxError) }, | 123 | { " pkts rx OOB", offsetof(struct UPT1_RxStats, pktsRxOutOfBuf) }, |
124 | { " pkts rx err", offsetof(struct UPT1_RxStats, pktsRxError) }, | ||
120 | }; | 125 | }; |
121 | 126 | ||
122 | /* per rq stats maintained by the driver */ | 127 | /* per rq stats maintained by the driver */ |
123 | static const struct vmxnet3_stat_desc | 128 | static const struct vmxnet3_stat_desc |
124 | vmxnet3_rq_driver_stats[] = { | 129 | vmxnet3_rq_driver_stats[] = { |
125 | /* description, offset */ | 130 | /* description, offset */ |
126 | { "drv dropped rx total", offsetof(struct vmxnet3_rq_driver_stats, | 131 | { " drv dropped rx total", offsetof(struct vmxnet3_rq_driver_stats, |
127 | drop_total) }, | 132 | drop_total) }, |
128 | { " err", offsetof(struct vmxnet3_rq_driver_stats, | 133 | { " err", offsetof(struct vmxnet3_rq_driver_stats, |
129 | drop_err) }, | 134 | drop_err) }, |
130 | { " fcs", offsetof(struct vmxnet3_rq_driver_stats, | 135 | { " fcs", offsetof(struct vmxnet3_rq_driver_stats, |
131 | drop_fcs) }, | 136 | drop_fcs) }, |
132 | { "rx buf alloc fail", offsetof(struct vmxnet3_rq_driver_stats, | 137 | { " rx buf alloc fail", offsetof(struct vmxnet3_rq_driver_stats, |
133 | rx_buf_alloc_failure) }, | 138 | rx_buf_alloc_failure) }, |
134 | }; | 139 | }; |
135 | 140 | ||
136 | /* gloabl stats maintained by the driver */ | 141 | /* gloabl stats maintained by the driver */ |
137 | static const struct vmxnet3_stat_desc | 142 | static const struct vmxnet3_stat_desc |
138 | vmxnet3_global_stats[] = { | 143 | vmxnet3_global_stats[] = { |
139 | /* description, offset */ | 144 | /* description, offset */ |
140 | { "tx timeout count", offsetof(struct vmxnet3_adapter, | 145 | { "tx timeout count", offsetof(struct vmxnet3_adapter, |
141 | tx_timeout_count) } | 146 | tx_timeout_count) } |
142 | }; | 147 | }; |
143 | 148 | ||
@@ -151,12 +156,15 @@ vmxnet3_get_stats(struct net_device *netdev) | |||
151 | struct UPT1_TxStats *devTxStats; | 156 | struct UPT1_TxStats *devTxStats; |
152 | struct UPT1_RxStats *devRxStats; | 157 | struct UPT1_RxStats *devRxStats; |
153 | struct net_device_stats *net_stats = &netdev->stats; | 158 | struct net_device_stats *net_stats = &netdev->stats; |
159 | unsigned long flags; | ||
154 | int i; | 160 | int i; |
155 | 161 | ||
156 | adapter = netdev_priv(netdev); | 162 | adapter = netdev_priv(netdev); |
157 | 163 | ||
158 | /* Collect the dev stats into the shared area */ | 164 | /* Collect the dev stats into the shared area */ |
165 | spin_lock_irqsave(&adapter->cmd_lock, flags); | ||
159 | VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_STATS); | 166 | VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_STATS); |
167 | spin_unlock_irqrestore(&adapter->cmd_lock, flags); | ||
160 | 168 | ||
161 | memset(net_stats, 0, sizeof(*net_stats)); | 169 | memset(net_stats, 0, sizeof(*net_stats)); |
162 | for (i = 0; i < adapter->num_tx_queues; i++) { | 170 | for (i = 0; i < adapter->num_tx_queues; i++) { |
@@ -193,12 +201,15 @@ vmxnet3_get_stats(struct net_device *netdev) | |||
193 | static int | 201 | static int |
194 | vmxnet3_get_sset_count(struct net_device *netdev, int sset) | 202 | vmxnet3_get_sset_count(struct net_device *netdev, int sset) |
195 | { | 203 | { |
204 | struct vmxnet3_adapter *adapter = netdev_priv(netdev); | ||
196 | switch (sset) { | 205 | switch (sset) { |
197 | case ETH_SS_STATS: | 206 | case ETH_SS_STATS: |
198 | return ARRAY_SIZE(vmxnet3_tq_dev_stats) + | 207 | return (ARRAY_SIZE(vmxnet3_tq_dev_stats) + |
199 | ARRAY_SIZE(vmxnet3_tq_driver_stats) + | 208 | ARRAY_SIZE(vmxnet3_tq_driver_stats)) * |
200 | ARRAY_SIZE(vmxnet3_rq_dev_stats) + | 209 | adapter->num_tx_queues + |
201 | ARRAY_SIZE(vmxnet3_rq_driver_stats) + | 210 | (ARRAY_SIZE(vmxnet3_rq_dev_stats) + |
211 | ARRAY_SIZE(vmxnet3_rq_driver_stats)) * | ||
212 | adapter->num_rx_queues + | ||
202 | ARRAY_SIZE(vmxnet3_global_stats); | 213 | ARRAY_SIZE(vmxnet3_global_stats); |
203 | default: | 214 | default: |
204 | return -EOPNOTSUPP; | 215 | return -EOPNOTSUPP; |
@@ -206,10 +217,16 @@ vmxnet3_get_sset_count(struct net_device *netdev, int sset) | |||
206 | } | 217 | } |
207 | 218 | ||
208 | 219 | ||
220 | /* Should be multiple of 4 */ | ||
221 | #define NUM_TX_REGS 8 | ||
222 | #define NUM_RX_REGS 12 | ||
223 | |||
209 | static int | 224 | static int |
210 | vmxnet3_get_regs_len(struct net_device *netdev) | 225 | vmxnet3_get_regs_len(struct net_device *netdev) |
211 | { | 226 | { |
212 | return 20 * sizeof(u32); | 227 | struct vmxnet3_adapter *adapter = netdev_priv(netdev); |
228 | return (adapter->num_tx_queues * NUM_TX_REGS * sizeof(u32) + | ||
229 | adapter->num_rx_queues * NUM_RX_REGS * sizeof(u32)); | ||
213 | } | 230 | } |
214 | 231 | ||
215 | 232 | ||
@@ -240,29 +257,37 @@ vmxnet3_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) | |||
240 | static void | 257 | static void |
241 | vmxnet3_get_strings(struct net_device *netdev, u32 stringset, u8 *buf) | 258 | vmxnet3_get_strings(struct net_device *netdev, u32 stringset, u8 *buf) |
242 | { | 259 | { |
260 | struct vmxnet3_adapter *adapter = netdev_priv(netdev); | ||
243 | if (stringset == ETH_SS_STATS) { | 261 | if (stringset == ETH_SS_STATS) { |
244 | int i; | 262 | int i, j; |
245 | 263 | for (j = 0; j < adapter->num_tx_queues; j++) { | |
246 | for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_dev_stats); i++) { | 264 | for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_dev_stats); i++) { |
247 | memcpy(buf, vmxnet3_tq_dev_stats[i].desc, | 265 | memcpy(buf, vmxnet3_tq_dev_stats[i].desc, |
248 | ETH_GSTRING_LEN); | 266 | ETH_GSTRING_LEN); |
249 | buf += ETH_GSTRING_LEN; | 267 | buf += ETH_GSTRING_LEN; |
250 | } | 268 | } |
251 | for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_driver_stats); i++) { | 269 | for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_driver_stats); |
252 | memcpy(buf, vmxnet3_tq_driver_stats[i].desc, | 270 | i++) { |
253 | ETH_GSTRING_LEN); | 271 | memcpy(buf, vmxnet3_tq_driver_stats[i].desc, |
254 | buf += ETH_GSTRING_LEN; | 272 | ETH_GSTRING_LEN); |
255 | } | 273 | buf += ETH_GSTRING_LEN; |
256 | for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_dev_stats); i++) { | 274 | } |
257 | memcpy(buf, vmxnet3_rq_dev_stats[i].desc, | ||
258 | ETH_GSTRING_LEN); | ||
259 | buf += ETH_GSTRING_LEN; | ||
260 | } | 275 | } |
261 | for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_driver_stats); i++) { | 276 | |
262 | memcpy(buf, vmxnet3_rq_driver_stats[i].desc, | 277 | for (j = 0; j < adapter->num_rx_queues; j++) { |
263 | ETH_GSTRING_LEN); | 278 | for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_dev_stats); i++) { |
264 | buf += ETH_GSTRING_LEN; | 279 | memcpy(buf, vmxnet3_rq_dev_stats[i].desc, |
280 | ETH_GSTRING_LEN); | ||
281 | buf += ETH_GSTRING_LEN; | ||
282 | } | ||
283 | for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_driver_stats); | ||
284 | i++) { | ||
285 | memcpy(buf, vmxnet3_rq_driver_stats[i].desc, | ||
286 | ETH_GSTRING_LEN); | ||
287 | buf += ETH_GSTRING_LEN; | ||
288 | } | ||
265 | } | 289 | } |
290 | |||
266 | for (i = 0; i < ARRAY_SIZE(vmxnet3_global_stats); i++) { | 291 | for (i = 0; i < ARRAY_SIZE(vmxnet3_global_stats); i++) { |
267 | memcpy(buf, vmxnet3_global_stats[i].desc, | 292 | memcpy(buf, vmxnet3_global_stats[i].desc, |
268 | ETH_GSTRING_LEN); | 293 | ETH_GSTRING_LEN); |
@@ -277,6 +302,7 @@ vmxnet3_set_flags(struct net_device *netdev, u32 data) | |||
277 | struct vmxnet3_adapter *adapter = netdev_priv(netdev); | 302 | struct vmxnet3_adapter *adapter = netdev_priv(netdev); |
278 | u8 lro_requested = (data & ETH_FLAG_LRO) == 0 ? 0 : 1; | 303 | u8 lro_requested = (data & ETH_FLAG_LRO) == 0 ? 0 : 1; |
279 | u8 lro_present = (netdev->features & NETIF_F_LRO) == 0 ? 0 : 1; | 304 | u8 lro_present = (netdev->features & NETIF_F_LRO) == 0 ? 0 : 1; |
305 | unsigned long flags; | ||
280 | 306 | ||
281 | if (data & ~ETH_FLAG_LRO) | 307 | if (data & ~ETH_FLAG_LRO) |
282 | return -EOPNOTSUPP; | 308 | return -EOPNOTSUPP; |
@@ -292,8 +318,10 @@ vmxnet3_set_flags(struct net_device *netdev, u32 data) | |||
292 | else | 318 | else |
293 | adapter->shared->devRead.misc.uptFeatures &= | 319 | adapter->shared->devRead.misc.uptFeatures &= |
294 | ~UPT1_F_LRO; | 320 | ~UPT1_F_LRO; |
321 | spin_lock_irqsave(&adapter->cmd_lock, flags); | ||
295 | VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, | 322 | VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, |
296 | VMXNET3_CMD_UPDATE_FEATURE); | 323 | VMXNET3_CMD_UPDATE_FEATURE); |
324 | spin_unlock_irqrestore(&adapter->cmd_lock, flags); | ||
297 | } | 325 | } |
298 | return 0; | 326 | return 0; |
299 | } | 327 | } |
@@ -303,30 +331,41 @@ vmxnet3_get_ethtool_stats(struct net_device *netdev, | |||
303 | struct ethtool_stats *stats, u64 *buf) | 331 | struct ethtool_stats *stats, u64 *buf) |
304 | { | 332 | { |
305 | struct vmxnet3_adapter *adapter = netdev_priv(netdev); | 333 | struct vmxnet3_adapter *adapter = netdev_priv(netdev); |
334 | unsigned long flags; | ||
306 | u8 *base; | 335 | u8 *base; |
307 | int i; | 336 | int i; |
308 | int j = 0; | 337 | int j = 0; |
309 | 338 | ||
339 | spin_lock_irqsave(&adapter->cmd_lock, flags); | ||
310 | VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_STATS); | 340 | VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_STATS); |
341 | spin_unlock_irqrestore(&adapter->cmd_lock, flags); | ||
311 | 342 | ||
312 | /* this does assume each counter is 64-bit wide */ | 343 | /* this does assume each counter is 64-bit wide */ |
313 | /* TODO change this for multiple queues */ | 344 | for (j = 0; j < adapter->num_tx_queues; j++) { |
314 | 345 | base = (u8 *)&adapter->tqd_start[j].stats; | |
315 | base = (u8 *)&adapter->tqd_start[j].stats; | 346 | *buf++ = (u64)j; |
316 | for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_dev_stats); i++) | 347 | for (i = 1; i < ARRAY_SIZE(vmxnet3_tq_dev_stats); i++) |
317 | *buf++ = *(u64 *)(base + vmxnet3_tq_dev_stats[i].offset); | 348 | *buf++ = *(u64 *)(base + |
318 | 349 | vmxnet3_tq_dev_stats[i].offset); | |
319 | base = (u8 *)&adapter->tx_queue[j].stats; | 350 | |
320 | for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_driver_stats); i++) | 351 | base = (u8 *)&adapter->tx_queue[j].stats; |
321 | *buf++ = *(u64 *)(base + vmxnet3_tq_driver_stats[i].offset); | 352 | for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_driver_stats); i++) |
322 | 353 | *buf++ = *(u64 *)(base + | |
323 | base = (u8 *)&adapter->rqd_start[j].stats; | 354 | vmxnet3_tq_driver_stats[i].offset); |
324 | for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_dev_stats); i++) | 355 | } |
325 | *buf++ = *(u64 *)(base + vmxnet3_rq_dev_stats[i].offset); | ||
326 | 356 | ||
327 | base = (u8 *)&adapter->rx_queue[j].stats; | 357 | for (j = 0; j < adapter->num_tx_queues; j++) { |
328 | for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_driver_stats); i++) | 358 | base = (u8 *)&adapter->rqd_start[j].stats; |
329 | *buf++ = *(u64 *)(base + vmxnet3_rq_driver_stats[i].offset); | 359 | *buf++ = (u64) j; |
360 | for (i = 1; i < ARRAY_SIZE(vmxnet3_rq_dev_stats); i++) | ||
361 | *buf++ = *(u64 *)(base + | ||
362 | vmxnet3_rq_dev_stats[i].offset); | ||
363 | |||
364 | base = (u8 *)&adapter->rx_queue[j].stats; | ||
365 | for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_driver_stats); i++) | ||
366 | *buf++ = *(u64 *)(base + | ||
367 | vmxnet3_rq_driver_stats[i].offset); | ||
368 | } | ||
330 | 369 | ||
331 | base = (u8 *)adapter; | 370 | base = (u8 *)adapter; |
332 | for (i = 0; i < ARRAY_SIZE(vmxnet3_global_stats); i++) | 371 | for (i = 0; i < ARRAY_SIZE(vmxnet3_global_stats); i++) |
@@ -339,7 +378,7 @@ vmxnet3_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p) | |||
339 | { | 378 | { |
340 | struct vmxnet3_adapter *adapter = netdev_priv(netdev); | 379 | struct vmxnet3_adapter *adapter = netdev_priv(netdev); |
341 | u32 *buf = p; | 380 | u32 *buf = p; |
342 | int i = 0; | 381 | int i = 0, j = 0; |
343 | 382 | ||
344 | memset(p, 0, vmxnet3_get_regs_len(netdev)); | 383 | memset(p, 0, vmxnet3_get_regs_len(netdev)); |
345 | 384 | ||
@@ -348,31 +387,35 @@ vmxnet3_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p) | |||
348 | /* Update vmxnet3_get_regs_len if we want to dump more registers */ | 387 | /* Update vmxnet3_get_regs_len if we want to dump more registers */ |
349 | 388 | ||
350 | /* make each ring use multiple of 16 bytes */ | 389 | /* make each ring use multiple of 16 bytes */ |
351 | /* TODO change this for multiple queues */ | 390 | for (i = 0; i < adapter->num_tx_queues; i++) { |
352 | buf[0] = adapter->tx_queue[i].tx_ring.next2fill; | 391 | buf[j++] = adapter->tx_queue[i].tx_ring.next2fill; |
353 | buf[1] = adapter->tx_queue[i].tx_ring.next2comp; | 392 | buf[j++] = adapter->tx_queue[i].tx_ring.next2comp; |
354 | buf[2] = adapter->tx_queue[i].tx_ring.gen; | 393 | buf[j++] = adapter->tx_queue[i].tx_ring.gen; |
355 | buf[3] = 0; | 394 | buf[j++] = 0; |
356 | 395 | ||
357 | buf[4] = adapter->tx_queue[i].comp_ring.next2proc; | 396 | buf[j++] = adapter->tx_queue[i].comp_ring.next2proc; |
358 | buf[5] = adapter->tx_queue[i].comp_ring.gen; | 397 | buf[j++] = adapter->tx_queue[i].comp_ring.gen; |
359 | buf[6] = adapter->tx_queue[i].stopped; | 398 | buf[j++] = adapter->tx_queue[i].stopped; |
360 | buf[7] = 0; | 399 | buf[j++] = 0; |
361 | 400 | } | |
362 | buf[8] = adapter->rx_queue[i].rx_ring[0].next2fill; | 401 | |
363 | buf[9] = adapter->rx_queue[i].rx_ring[0].next2comp; | 402 | for (i = 0; i < adapter->num_rx_queues; i++) { |
364 | buf[10] = adapter->rx_queue[i].rx_ring[0].gen; | 403 | buf[j++] = adapter->rx_queue[i].rx_ring[0].next2fill; |
365 | buf[11] = 0; | 404 | buf[j++] = adapter->rx_queue[i].rx_ring[0].next2comp; |
366 | 405 | buf[j++] = adapter->rx_queue[i].rx_ring[0].gen; | |
367 | buf[12] = adapter->rx_queue[i].rx_ring[1].next2fill; | 406 | buf[j++] = 0; |
368 | buf[13] = adapter->rx_queue[i].rx_ring[1].next2comp; | 407 | |
369 | buf[14] = adapter->rx_queue[i].rx_ring[1].gen; | 408 | buf[j++] = adapter->rx_queue[i].rx_ring[1].next2fill; |
370 | buf[15] = 0; | 409 | buf[j++] = adapter->rx_queue[i].rx_ring[1].next2comp; |
371 | 410 | buf[j++] = adapter->rx_queue[i].rx_ring[1].gen; | |
372 | buf[16] = adapter->rx_queue[i].comp_ring.next2proc; | 411 | buf[j++] = 0; |
373 | buf[17] = adapter->rx_queue[i].comp_ring.gen; | 412 | |
374 | buf[18] = 0; | 413 | buf[j++] = adapter->rx_queue[i].comp_ring.next2proc; |
375 | buf[19] = 0; | 414 | buf[j++] = adapter->rx_queue[i].comp_ring.gen; |
415 | buf[j++] = 0; | ||
416 | buf[j++] = 0; | ||
417 | } | ||
418 | |||
376 | } | 419 | } |
377 | 420 | ||
378 | 421 | ||
@@ -574,6 +617,7 @@ vmxnet3_set_rss_indir(struct net_device *netdev, | |||
574 | const struct ethtool_rxfh_indir *p) | 617 | const struct ethtool_rxfh_indir *p) |
575 | { | 618 | { |
576 | unsigned int i; | 619 | unsigned int i; |
620 | unsigned long flags; | ||
577 | struct vmxnet3_adapter *adapter = netdev_priv(netdev); | 621 | struct vmxnet3_adapter *adapter = netdev_priv(netdev); |
578 | struct UPT1_RSSConf *rssConf = adapter->rss_conf; | 622 | struct UPT1_RSSConf *rssConf = adapter->rss_conf; |
579 | 623 | ||
@@ -592,8 +636,10 @@ vmxnet3_set_rss_indir(struct net_device *netdev, | |||
592 | for (i = 0; i < rssConf->indTableSize; i++) | 636 | for (i = 0; i < rssConf->indTableSize; i++) |
593 | rssConf->indTable[i] = p->ring_index[i]; | 637 | rssConf->indTable[i] = p->ring_index[i]; |
594 | 638 | ||
639 | spin_lock_irqsave(&adapter->cmd_lock, flags); | ||
595 | VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, | 640 | VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, |
596 | VMXNET3_CMD_UPDATE_RSSIDT); | 641 | VMXNET3_CMD_UPDATE_RSSIDT); |
642 | spin_unlock_irqrestore(&adapter->cmd_lock, flags); | ||
597 | 643 | ||
598 | return 0; | 644 | return 0; |
599 | 645 | ||
diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h index 7fadeed37f03..fb5d245ac878 100644 --- a/drivers/net/vmxnet3/vmxnet3_int.h +++ b/drivers/net/vmxnet3/vmxnet3_int.h | |||
@@ -68,10 +68,10 @@ | |||
68 | /* | 68 | /* |
69 | * Version numbers | 69 | * Version numbers |
70 | */ | 70 | */ |
71 | #define VMXNET3_DRIVER_VERSION_STRING "1.0.16.0-k" | 71 | #define VMXNET3_DRIVER_VERSION_STRING "1.0.25.0-k" |
72 | 72 | ||
73 | /* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */ | 73 | /* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */ |
74 | #define VMXNET3_DRIVER_VERSION_NUM 0x01001000 | 74 | #define VMXNET3_DRIVER_VERSION_NUM 0x01001900 |
75 | 75 | ||
76 | #if defined(CONFIG_PCI_MSI) | 76 | #if defined(CONFIG_PCI_MSI) |
77 | /* RSS only makes sense if MSI-X is supported. */ | 77 | /* RSS only makes sense if MSI-X is supported. */ |
@@ -289,7 +289,7 @@ struct vmxnet3_rx_queue { | |||
289 | 289 | ||
290 | #define VMXNET3_LINUX_MAX_MSIX_VECT (VMXNET3_DEVICE_MAX_TX_QUEUES + \ | 290 | #define VMXNET3_LINUX_MAX_MSIX_VECT (VMXNET3_DEVICE_MAX_TX_QUEUES + \ |
291 | VMXNET3_DEVICE_MAX_RX_QUEUES + 1) | 291 | VMXNET3_DEVICE_MAX_RX_QUEUES + 1) |
292 | #define VMXNET3_LINUX_MIN_MSIX_VECT 3 /* 1 for each : tx, rx and event */ | 292 | #define VMXNET3_LINUX_MIN_MSIX_VECT 2 /* 1 for tx-rx pair and 1 for event */ |
293 | 293 | ||
294 | 294 | ||
295 | struct vmxnet3_intr { | 295 | struct vmxnet3_intr { |
@@ -317,6 +317,7 @@ struct vmxnet3_adapter { | |||
317 | struct vmxnet3_rx_queue rx_queue[VMXNET3_DEVICE_MAX_RX_QUEUES]; | 317 | struct vmxnet3_rx_queue rx_queue[VMXNET3_DEVICE_MAX_RX_QUEUES]; |
318 | struct vlan_group *vlan_grp; | 318 | struct vlan_group *vlan_grp; |
319 | struct vmxnet3_intr intr; | 319 | struct vmxnet3_intr intr; |
320 | spinlock_t cmd_lock; | ||
320 | struct Vmxnet3_DriverShared *shared; | 321 | struct Vmxnet3_DriverShared *shared; |
321 | struct Vmxnet3_PMConf *pm_conf; | 322 | struct Vmxnet3_PMConf *pm_conf; |
322 | struct Vmxnet3_TxQueueDesc *tqd_start; /* all tx queue desc */ | 323 | struct Vmxnet3_TxQueueDesc *tqd_start; /* all tx queue desc */ |
diff --git a/drivers/net/vxge/vxge-main.c b/drivers/net/vxge/vxge-main.c index 1ac9b568f1b0..c81a6512c683 100644 --- a/drivers/net/vxge/vxge-main.c +++ b/drivers/net/vxge/vxge-main.c | |||
@@ -4120,6 +4120,7 @@ int vxge_fw_upgrade(struct vxgedev *vdev, char *fw_name, int override) | |||
4120 | "hotplug event.\n"); | 4120 | "hotplug event.\n"); |
4121 | 4121 | ||
4122 | out: | 4122 | out: |
4123 | release_firmware(fw); | ||
4123 | return ret; | 4124 | return ret; |
4124 | } | 4125 | } |
4125 | 4126 | ||
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c index 7a7a1b664781..2ac8f6aff5a4 100644 --- a/drivers/s390/net/qeth_l2_main.c +++ b/drivers/s390/net/qeth_l2_main.c | |||
@@ -831,12 +831,14 @@ tx_drop: | |||
831 | return NETDEV_TX_OK; | 831 | return NETDEV_TX_OK; |
832 | } | 832 | } |
833 | 833 | ||
834 | static int qeth_l2_open(struct net_device *dev) | 834 | static int __qeth_l2_open(struct net_device *dev) |
835 | { | 835 | { |
836 | struct qeth_card *card = dev->ml_priv; | 836 | struct qeth_card *card = dev->ml_priv; |
837 | int rc = 0; | 837 | int rc = 0; |
838 | 838 | ||
839 | QETH_CARD_TEXT(card, 4, "qethopen"); | 839 | QETH_CARD_TEXT(card, 4, "qethopen"); |
840 | if (card->state == CARD_STATE_UP) | ||
841 | return rc; | ||
840 | if (card->state != CARD_STATE_SOFTSETUP) | 842 | if (card->state != CARD_STATE_SOFTSETUP) |
841 | return -ENODEV; | 843 | return -ENODEV; |
842 | 844 | ||
@@ -857,6 +859,18 @@ static int qeth_l2_open(struct net_device *dev) | |||
857 | return rc; | 859 | return rc; |
858 | } | 860 | } |
859 | 861 | ||
862 | static int qeth_l2_open(struct net_device *dev) | ||
863 | { | ||
864 | struct qeth_card *card = dev->ml_priv; | ||
865 | |||
866 | QETH_CARD_TEXT(card, 5, "qethope_"); | ||
867 | if (qeth_wait_for_threads(card, QETH_RECOVER_THREAD)) { | ||
868 | QETH_CARD_TEXT(card, 3, "openREC"); | ||
869 | return -ERESTARTSYS; | ||
870 | } | ||
871 | return __qeth_l2_open(dev); | ||
872 | } | ||
873 | |||
860 | static int qeth_l2_stop(struct net_device *dev) | 874 | static int qeth_l2_stop(struct net_device *dev) |
861 | { | 875 | { |
862 | struct qeth_card *card = dev->ml_priv; | 876 | struct qeth_card *card = dev->ml_priv; |
@@ -1046,7 +1060,7 @@ contin: | |||
1046 | if (recover_flag == CARD_STATE_RECOVER) { | 1060 | if (recover_flag == CARD_STATE_RECOVER) { |
1047 | if (recovery_mode && | 1061 | if (recovery_mode && |
1048 | card->info.type != QETH_CARD_TYPE_OSN) { | 1062 | card->info.type != QETH_CARD_TYPE_OSN) { |
1049 | qeth_l2_open(card->dev); | 1063 | __qeth_l2_open(card->dev); |
1050 | } else { | 1064 | } else { |
1051 | rtnl_lock(); | 1065 | rtnl_lock(); |
1052 | dev_open(card->dev); | 1066 | dev_open(card->dev); |
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c index e227e465bfc4..d09b0c44fc3d 100644 --- a/drivers/s390/net/qeth_l3_main.c +++ b/drivers/s390/net/qeth_l3_main.c | |||
@@ -2998,7 +2998,9 @@ static inline void qeth_l3_hdr_csum(struct qeth_card *card, | |||
2998 | */ | 2998 | */ |
2999 | if (iph->protocol == IPPROTO_UDP) | 2999 | if (iph->protocol == IPPROTO_UDP) |
3000 | hdr->hdr.l3.ext_flags |= QETH_HDR_EXT_UDP; | 3000 | hdr->hdr.l3.ext_flags |= QETH_HDR_EXT_UDP; |
3001 | hdr->hdr.l3.ext_flags |= QETH_HDR_EXT_CSUM_TRANSP_REQ; | 3001 | hdr->hdr.l3.ext_flags |= QETH_HDR_EXT_CSUM_TRANSP_REQ | |
3002 | QETH_HDR_EXT_CSUM_HDR_REQ; | ||
3003 | iph->check = 0; | ||
3002 | if (card->options.performance_stats) | 3004 | if (card->options.performance_stats) |
3003 | card->perf_stats.tx_csum++; | 3005 | card->perf_stats.tx_csum++; |
3004 | } | 3006 | } |
@@ -3240,12 +3242,14 @@ tx_drop: | |||
3240 | return NETDEV_TX_OK; | 3242 | return NETDEV_TX_OK; |
3241 | } | 3243 | } |
3242 | 3244 | ||
3243 | static int qeth_l3_open(struct net_device *dev) | 3245 | static int __qeth_l3_open(struct net_device *dev) |
3244 | { | 3246 | { |
3245 | struct qeth_card *card = dev->ml_priv; | 3247 | struct qeth_card *card = dev->ml_priv; |
3246 | int rc = 0; | 3248 | int rc = 0; |
3247 | 3249 | ||
3248 | QETH_CARD_TEXT(card, 4, "qethopen"); | 3250 | QETH_CARD_TEXT(card, 4, "qethopen"); |
3251 | if (card->state == CARD_STATE_UP) | ||
3252 | return rc; | ||
3249 | if (card->state != CARD_STATE_SOFTSETUP) | 3253 | if (card->state != CARD_STATE_SOFTSETUP) |
3250 | return -ENODEV; | 3254 | return -ENODEV; |
3251 | card->data.state = CH_STATE_UP; | 3255 | card->data.state = CH_STATE_UP; |
@@ -3260,6 +3264,18 @@ static int qeth_l3_open(struct net_device *dev) | |||
3260 | return rc; | 3264 | return rc; |
3261 | } | 3265 | } |
3262 | 3266 | ||
3267 | static int qeth_l3_open(struct net_device *dev) | ||
3268 | { | ||
3269 | struct qeth_card *card = dev->ml_priv; | ||
3270 | |||
3271 | QETH_CARD_TEXT(card, 5, "qethope_"); | ||
3272 | if (qeth_wait_for_threads(card, QETH_RECOVER_THREAD)) { | ||
3273 | QETH_CARD_TEXT(card, 3, "openREC"); | ||
3274 | return -ERESTARTSYS; | ||
3275 | } | ||
3276 | return __qeth_l3_open(dev); | ||
3277 | } | ||
3278 | |||
3263 | static int qeth_l3_stop(struct net_device *dev) | 3279 | static int qeth_l3_stop(struct net_device *dev) |
3264 | { | 3280 | { |
3265 | struct qeth_card *card = dev->ml_priv; | 3281 | struct qeth_card *card = dev->ml_priv; |
@@ -3564,7 +3580,7 @@ contin: | |||
3564 | netif_carrier_off(card->dev); | 3580 | netif_carrier_off(card->dev); |
3565 | if (recover_flag == CARD_STATE_RECOVER) { | 3581 | if (recover_flag == CARD_STATE_RECOVER) { |
3566 | if (recovery_mode) | 3582 | if (recovery_mode) |
3567 | qeth_l3_open(card->dev); | 3583 | __qeth_l3_open(card->dev); |
3568 | else { | 3584 | else { |
3569 | rtnl_lock(); | 3585 | rtnl_lock(); |
3570 | dev_open(card->dev); | 3586 | dev_open(card->dev); |
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c index 38244f59cdd9..ade0568c07a4 100644 --- a/drivers/vhost/vhost.c +++ b/drivers/vhost/vhost.c | |||
@@ -97,22 +97,26 @@ void vhost_poll_stop(struct vhost_poll *poll) | |||
97 | remove_wait_queue(poll->wqh, &poll->wait); | 97 | remove_wait_queue(poll->wqh, &poll->wait); |
98 | } | 98 | } |
99 | 99 | ||
100 | static bool vhost_work_seq_done(struct vhost_dev *dev, struct vhost_work *work, | ||
101 | unsigned seq) | ||
102 | { | ||
103 | int left; | ||
104 | spin_lock_irq(&dev->work_lock); | ||
105 | left = seq - work->done_seq; | ||
106 | spin_unlock_irq(&dev->work_lock); | ||
107 | return left <= 0; | ||
108 | } | ||
109 | |||
100 | static void vhost_work_flush(struct vhost_dev *dev, struct vhost_work *work) | 110 | static void vhost_work_flush(struct vhost_dev *dev, struct vhost_work *work) |
101 | { | 111 | { |
102 | unsigned seq; | 112 | unsigned seq; |
103 | int left; | ||
104 | int flushing; | 113 | int flushing; |
105 | 114 | ||
106 | spin_lock_irq(&dev->work_lock); | 115 | spin_lock_irq(&dev->work_lock); |
107 | seq = work->queue_seq; | 116 | seq = work->queue_seq; |
108 | work->flushing++; | 117 | work->flushing++; |
109 | spin_unlock_irq(&dev->work_lock); | 118 | spin_unlock_irq(&dev->work_lock); |
110 | wait_event(work->done, ({ | 119 | wait_event(work->done, vhost_work_seq_done(dev, work, seq)); |
111 | spin_lock_irq(&dev->work_lock); | ||
112 | left = seq - work->done_seq <= 0; | ||
113 | spin_unlock_irq(&dev->work_lock); | ||
114 | left; | ||
115 | })); | ||
116 | spin_lock_irq(&dev->work_lock); | 120 | spin_lock_irq(&dev->work_lock); |
117 | flushing = --work->flushing; | 121 | flushing = --work->flushing; |
118 | spin_unlock_irq(&dev->work_lock); | 122 | spin_unlock_irq(&dev->work_lock); |
diff --git a/include/linux/bfin_mac.h b/include/linux/bfin_mac.h index 904dec7d03a1..a69554ef8476 100644 --- a/include/linux/bfin_mac.h +++ b/include/linux/bfin_mac.h | |||
@@ -24,6 +24,7 @@ struct bfin_mii_bus_platform_data { | |||
24 | const unsigned short *mac_peripherals; | 24 | const unsigned short *mac_peripherals; |
25 | int phy_mode; | 25 | int phy_mode; |
26 | unsigned int phy_mask; | 26 | unsigned int phy_mask; |
27 | unsigned short vlan1_mask, vlan2_mask; | ||
27 | }; | 28 | }; |
28 | 29 | ||
29 | #endif | 30 | #endif |
diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h index f16a01081e15..ab68f785fd19 100644 --- a/include/linux/etherdevice.h +++ b/include/linux/etherdevice.h | |||
@@ -48,8 +48,10 @@ extern int eth_validate_addr(struct net_device *dev); | |||
48 | 48 | ||
49 | 49 | ||
50 | 50 | ||
51 | extern struct net_device *alloc_etherdev_mq(int sizeof_priv, unsigned int queue_count); | 51 | extern struct net_device *alloc_etherdev_mqs(int sizeof_priv, unsigned int txqs, |
52 | unsigned int rxqs); | ||
52 | #define alloc_etherdev(sizeof_priv) alloc_etherdev_mq(sizeof_priv, 1) | 53 | #define alloc_etherdev(sizeof_priv) alloc_etherdev_mq(sizeof_priv, 1) |
54 | #define alloc_etherdev_mq(sizeof_priv, count) alloc_etherdev_mqs(sizeof_priv, count, count) | ||
53 | 55 | ||
54 | /** | 56 | /** |
55 | * is_zero_ether_addr - Determine if give Ethernet address is all zeros. | 57 | * is_zero_ether_addr - Determine if give Ethernet address is all zeros. |
@@ -97,6 +99,17 @@ static inline int is_broadcast_ether_addr(const u8 *addr) | |||
97 | } | 99 | } |
98 | 100 | ||
99 | /** | 101 | /** |
102 | * is_unicast_ether_addr - Determine if the Ethernet address is unicast | ||
103 | * @addr: Pointer to a six-byte array containing the Ethernet address | ||
104 | * | ||
105 | * Return true if the address is a unicast address. | ||
106 | */ | ||
107 | static inline int is_unicast_ether_addr(const u8 *addr) | ||
108 | { | ||
109 | return !is_multicast_ether_addr(addr); | ||
110 | } | ||
111 | |||
112 | /** | ||
100 | * is_valid_ether_addr - Determine if the given Ethernet address is valid | 113 | * is_valid_ether_addr - Determine if the given Ethernet address is valid |
101 | * @addr: Pointer to a six-byte array containing the Ethernet address | 114 | * @addr: Pointer to a six-byte array containing the Ethernet address |
102 | * | 115 | * |
diff --git a/include/linux/if_bridge.h b/include/linux/if_bridge.h index f7e73c338c40..dd3f20139640 100644 --- a/include/linux/if_bridge.h +++ b/include/linux/if_bridge.h | |||
@@ -103,7 +103,7 @@ struct __fdb_entry { | |||
103 | 103 | ||
104 | extern void brioctl_set(int (*ioctl_hook)(struct net *, unsigned int, void __user *)); | 104 | extern void brioctl_set(int (*ioctl_hook)(struct net *, unsigned int, void __user *)); |
105 | 105 | ||
106 | typedef int (*br_should_route_hook_t)(struct sk_buff *skb); | 106 | typedef int br_should_route_hook_t(struct sk_buff *skb); |
107 | extern br_should_route_hook_t __rcu *br_should_route_hook; | 107 | extern br_should_route_hook_t __rcu *br_should_route_hook; |
108 | 108 | ||
109 | #endif | 109 | #endif |
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index de2bfe6da359..d971346b0340 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h | |||
@@ -520,9 +520,6 @@ struct netdev_queue { | |||
520 | * please use this field instead of dev->trans_start | 520 | * please use this field instead of dev->trans_start |
521 | */ | 521 | */ |
522 | unsigned long trans_start; | 522 | unsigned long trans_start; |
523 | u64 tx_bytes; | ||
524 | u64 tx_packets; | ||
525 | u64 tx_dropped; | ||
526 | } ____cacheline_aligned_in_smp; | 523 | } ____cacheline_aligned_in_smp; |
527 | 524 | ||
528 | static inline int netdev_queue_numa_node_read(const struct netdev_queue *q) | 525 | static inline int netdev_queue_numa_node_read(const struct netdev_queue *q) |
@@ -2191,11 +2188,15 @@ static inline void netif_addr_unlock_bh(struct net_device *dev) | |||
2191 | extern void ether_setup(struct net_device *dev); | 2188 | extern void ether_setup(struct net_device *dev); |
2192 | 2189 | ||
2193 | /* Support for loadable net-drivers */ | 2190 | /* Support for loadable net-drivers */ |
2194 | extern struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name, | 2191 | extern struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name, |
2195 | void (*setup)(struct net_device *), | 2192 | void (*setup)(struct net_device *), |
2196 | unsigned int queue_count); | 2193 | unsigned int txqs, unsigned int rxqs); |
2197 | #define alloc_netdev(sizeof_priv, name, setup) \ | 2194 | #define alloc_netdev(sizeof_priv, name, setup) \ |
2198 | alloc_netdev_mq(sizeof_priv, name, setup, 1) | 2195 | alloc_netdev_mqs(sizeof_priv, name, setup, 1, 1) |
2196 | |||
2197 | #define alloc_netdev_mq(sizeof_priv, name, setup, count) \ | ||
2198 | alloc_netdev_mqs(sizeof_priv, name, setup, count, count) | ||
2199 | |||
2199 | extern int register_netdev(struct net_device *dev); | 2200 | extern int register_netdev(struct net_device *dev); |
2200 | extern void unregister_netdev(struct net_device *dev); | 2201 | extern void unregister_netdev(struct net_device *dev); |
2201 | 2202 | ||
@@ -2261,8 +2262,6 @@ extern void dev_load(struct net *net, const char *name); | |||
2261 | extern void dev_mcast_init(void); | 2262 | extern void dev_mcast_init(void); |
2262 | extern struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev, | 2263 | extern struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev, |
2263 | struct rtnl_link_stats64 *storage); | 2264 | struct rtnl_link_stats64 *storage); |
2264 | extern void dev_txq_stats_fold(const struct net_device *dev, | ||
2265 | struct rtnl_link_stats64 *stats); | ||
2266 | 2265 | ||
2267 | extern int netdev_max_backlog; | 2266 | extern int netdev_max_backlog; |
2268 | extern int netdev_tstamp_prequeue; | 2267 | extern int netdev_tstamp_prequeue; |
diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h index 742bec051440..6712e713b299 100644 --- a/include/linux/netfilter/x_tables.h +++ b/include/linux/netfilter/x_tables.h | |||
@@ -472,7 +472,7 @@ extern void xt_free_table_info(struct xt_table_info *info); | |||
472 | * necessary for reading the counters. | 472 | * necessary for reading the counters. |
473 | */ | 473 | */ |
474 | struct xt_info_lock { | 474 | struct xt_info_lock { |
475 | spinlock_t lock; | 475 | seqlock_t lock; |
476 | unsigned char readers; | 476 | unsigned char readers; |
477 | }; | 477 | }; |
478 | DECLARE_PER_CPU(struct xt_info_lock, xt_info_locks); | 478 | DECLARE_PER_CPU(struct xt_info_lock, xt_info_locks); |
@@ -497,7 +497,7 @@ static inline void xt_info_rdlock_bh(void) | |||
497 | local_bh_disable(); | 497 | local_bh_disable(); |
498 | lock = &__get_cpu_var(xt_info_locks); | 498 | lock = &__get_cpu_var(xt_info_locks); |
499 | if (likely(!lock->readers++)) | 499 | if (likely(!lock->readers++)) |
500 | spin_lock(&lock->lock); | 500 | write_seqlock(&lock->lock); |
501 | } | 501 | } |
502 | 502 | ||
503 | static inline void xt_info_rdunlock_bh(void) | 503 | static inline void xt_info_rdunlock_bh(void) |
@@ -505,7 +505,7 @@ static inline void xt_info_rdunlock_bh(void) | |||
505 | struct xt_info_lock *lock = &__get_cpu_var(xt_info_locks); | 505 | struct xt_info_lock *lock = &__get_cpu_var(xt_info_locks); |
506 | 506 | ||
507 | if (likely(!--lock->readers)) | 507 | if (likely(!--lock->readers)) |
508 | spin_unlock(&lock->lock); | 508 | write_sequnlock(&lock->lock); |
509 | local_bh_enable(); | 509 | local_bh_enable(); |
510 | } | 510 | } |
511 | 511 | ||
@@ -516,12 +516,12 @@ static inline void xt_info_rdunlock_bh(void) | |||
516 | */ | 516 | */ |
517 | static inline void xt_info_wrlock(unsigned int cpu) | 517 | static inline void xt_info_wrlock(unsigned int cpu) |
518 | { | 518 | { |
519 | spin_lock(&per_cpu(xt_info_locks, cpu).lock); | 519 | write_seqlock(&per_cpu(xt_info_locks, cpu).lock); |
520 | } | 520 | } |
521 | 521 | ||
522 | static inline void xt_info_wrunlock(unsigned int cpu) | 522 | static inline void xt_info_wrunlock(unsigned int cpu) |
523 | { | 523 | { |
524 | spin_unlock(&per_cpu(xt_info_locks, cpu).lock); | 524 | write_sequnlock(&per_cpu(xt_info_locks, cpu).lock); |
525 | } | 525 | } |
526 | 526 | ||
527 | /* | 527 | /* |
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 20ec0a64cb9f..bf221d65d9ad 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h | |||
@@ -255,6 +255,11 @@ typedef unsigned int sk_buff_data_t; | |||
255 | typedef unsigned char *sk_buff_data_t; | 255 | typedef unsigned char *sk_buff_data_t; |
256 | #endif | 256 | #endif |
257 | 257 | ||
258 | #if defined(CONFIG_NF_DEFRAG_IPV4) || defined(CONFIG_NF_DEFRAG_IPV4_MODULE) || \ | ||
259 | defined(CONFIG_NF_DEFRAG_IPV6) || defined(CONFIG_NF_DEFRAG_IPV6_MODULE) | ||
260 | #define NET_SKBUFF_NF_DEFRAG_NEEDED 1 | ||
261 | #endif | ||
262 | |||
258 | /** | 263 | /** |
259 | * struct sk_buff - socket buffer | 264 | * struct sk_buff - socket buffer |
260 | * @next: Next buffer in list | 265 | * @next: Next buffer in list |
@@ -362,6 +367,8 @@ struct sk_buff { | |||
362 | void (*destructor)(struct sk_buff *skb); | 367 | void (*destructor)(struct sk_buff *skb); |
363 | #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) | 368 | #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) |
364 | struct nf_conntrack *nfct; | 369 | struct nf_conntrack *nfct; |
370 | #endif | ||
371 | #ifdef NET_SKBUFF_NF_DEFRAG_NEEDED | ||
365 | struct sk_buff *nfct_reasm; | 372 | struct sk_buff *nfct_reasm; |
366 | #endif | 373 | #endif |
367 | #ifdef CONFIG_BRIDGE_NETFILTER | 374 | #ifdef CONFIG_BRIDGE_NETFILTER |
@@ -2057,6 +2064,8 @@ static inline void nf_conntrack_get(struct nf_conntrack *nfct) | |||
2057 | if (nfct) | 2064 | if (nfct) |
2058 | atomic_inc(&nfct->use); | 2065 | atomic_inc(&nfct->use); |
2059 | } | 2066 | } |
2067 | #endif | ||
2068 | #ifdef NET_SKBUFF_NF_DEFRAG_NEEDED | ||
2060 | static inline void nf_conntrack_get_reasm(struct sk_buff *skb) | 2069 | static inline void nf_conntrack_get_reasm(struct sk_buff *skb) |
2061 | { | 2070 | { |
2062 | if (skb) | 2071 | if (skb) |
@@ -2085,6 +2094,8 @@ static inline void nf_reset(struct sk_buff *skb) | |||
2085 | #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) | 2094 | #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) |
2086 | nf_conntrack_put(skb->nfct); | 2095 | nf_conntrack_put(skb->nfct); |
2087 | skb->nfct = NULL; | 2096 | skb->nfct = NULL; |
2097 | #endif | ||
2098 | #ifdef NET_SKBUFF_NF_DEFRAG_NEEDED | ||
2088 | nf_conntrack_put_reasm(skb->nfct_reasm); | 2099 | nf_conntrack_put_reasm(skb->nfct_reasm); |
2089 | skb->nfct_reasm = NULL; | 2100 | skb->nfct_reasm = NULL; |
2090 | #endif | 2101 | #endif |
@@ -2101,6 +2112,8 @@ static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src) | |||
2101 | dst->nfct = src->nfct; | 2112 | dst->nfct = src->nfct; |
2102 | nf_conntrack_get(src->nfct); | 2113 | nf_conntrack_get(src->nfct); |
2103 | dst->nfctinfo = src->nfctinfo; | 2114 | dst->nfctinfo = src->nfctinfo; |
2115 | #endif | ||
2116 | #ifdef NET_SKBUFF_NF_DEFRAG_NEEDED | ||
2104 | dst->nfct_reasm = src->nfct_reasm; | 2117 | dst->nfct_reasm = src->nfct_reasm; |
2105 | nf_conntrack_get_reasm(src->nfct_reasm); | 2118 | nf_conntrack_get_reasm(src->nfct_reasm); |
2106 | #endif | 2119 | #endif |
@@ -2114,6 +2127,8 @@ static inline void nf_copy(struct sk_buff *dst, const struct sk_buff *src) | |||
2114 | { | 2127 | { |
2115 | #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) | 2128 | #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) |
2116 | nf_conntrack_put(dst->nfct); | 2129 | nf_conntrack_put(dst->nfct); |
2130 | #endif | ||
2131 | #ifdef NET_SKBUFF_NF_DEFRAG_NEEDED | ||
2117 | nf_conntrack_put_reasm(dst->nfct_reasm); | 2132 | nf_conntrack_put_reasm(dst->nfct_reasm); |
2118 | #endif | 2133 | #endif |
2119 | #ifdef CONFIG_BRIDGE_NETFILTER | 2134 | #ifdef CONFIG_BRIDGE_NETFILTER |
diff --git a/include/net/ah.h b/include/net/ah.h index f0129f79a31a..ca95b98969dd 100644 --- a/include/net/ah.h +++ b/include/net/ah.h | |||
@@ -4,7 +4,7 @@ | |||
4 | #include <linux/skbuff.h> | 4 | #include <linux/skbuff.h> |
5 | 5 | ||
6 | /* This is the maximum truncated ICV length that we know of. */ | 6 | /* This is the maximum truncated ICV length that we know of. */ |
7 | #define MAX_AH_AUTH_LEN 12 | 7 | #define MAX_AH_AUTH_LEN 64 |
8 | 8 | ||
9 | struct crypto_ahash; | 9 | struct crypto_ahash; |
10 | 10 | ||
diff --git a/include/net/arp.h b/include/net/arp.h index f4cf6ce66586..91f0568a04ef 100644 --- a/include/net/arp.h +++ b/include/net/arp.h | |||
@@ -25,5 +25,6 @@ extern struct sk_buff *arp_create(int type, int ptype, __be32 dest_ip, | |||
25 | const unsigned char *src_hw, | 25 | const unsigned char *src_hw, |
26 | const unsigned char *target_hw); | 26 | const unsigned char *target_hw); |
27 | extern void arp_xmit(struct sk_buff *skb); | 27 | extern void arp_xmit(struct sk_buff *skb); |
28 | int arp_invalidate(struct net_device *dev, __be32 ip); | ||
28 | 29 | ||
29 | #endif /* _ARP_H */ | 30 | #endif /* _ARP_H */ |
diff --git a/include/net/netfilter/ipv6/nf_conntrack_ipv6.h b/include/net/netfilter/ipv6/nf_conntrack_ipv6.h index 1ee717eb5b09..a4c993685795 100644 --- a/include/net/netfilter/ipv6/nf_conntrack_ipv6.h +++ b/include/net/netfilter/ipv6/nf_conntrack_ipv6.h | |||
@@ -7,16 +7,6 @@ extern struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp6; | |||
7 | extern struct nf_conntrack_l4proto nf_conntrack_l4proto_udp6; | 7 | extern struct nf_conntrack_l4proto nf_conntrack_l4proto_udp6; |
8 | extern struct nf_conntrack_l4proto nf_conntrack_l4proto_icmpv6; | 8 | extern struct nf_conntrack_l4proto nf_conntrack_l4proto_icmpv6; |
9 | 9 | ||
10 | extern int nf_ct_frag6_init(void); | ||
11 | extern void nf_ct_frag6_cleanup(void); | ||
12 | extern struct sk_buff *nf_ct_frag6_gather(struct sk_buff *skb, u32 user); | ||
13 | extern void nf_ct_frag6_output(unsigned int hooknum, struct sk_buff *skb, | ||
14 | struct net_device *in, | ||
15 | struct net_device *out, | ||
16 | int (*okfn)(struct sk_buff *)); | ||
17 | |||
18 | struct inet_frags_ctl; | ||
19 | |||
20 | #include <linux/sysctl.h> | 10 | #include <linux/sysctl.h> |
21 | extern struct ctl_table nf_ct_ipv6_sysctl_table[]; | 11 | extern struct ctl_table nf_ct_ipv6_sysctl_table[]; |
22 | 12 | ||
diff --git a/include/net/netfilter/ipv6/nf_defrag_ipv6.h b/include/net/netfilter/ipv6/nf_defrag_ipv6.h index 94dd54d76b48..fd79c9a1779d 100644 --- a/include/net/netfilter/ipv6/nf_defrag_ipv6.h +++ b/include/net/netfilter/ipv6/nf_defrag_ipv6.h | |||
@@ -3,4 +3,14 @@ | |||
3 | 3 | ||
4 | extern void nf_defrag_ipv6_enable(void); | 4 | extern void nf_defrag_ipv6_enable(void); |
5 | 5 | ||
6 | extern int nf_ct_frag6_init(void); | ||
7 | extern void nf_ct_frag6_cleanup(void); | ||
8 | extern struct sk_buff *nf_ct_frag6_gather(struct sk_buff *skb, u32 user); | ||
9 | extern void nf_ct_frag6_output(unsigned int hooknum, struct sk_buff *skb, | ||
10 | struct net_device *in, | ||
11 | struct net_device *out, | ||
12 | int (*okfn)(struct sk_buff *)); | ||
13 | |||
14 | struct inet_frags_ctl; | ||
15 | |||
6 | #endif /* _NF_DEFRAG_IPV6_H */ | 16 | #endif /* _NF_DEFRAG_IPV6_H */ |
diff --git a/include/net/phonet/phonet.h b/include/net/phonet/phonet.h index d5df797f9540..5395e09187df 100644 --- a/include/net/phonet/phonet.h +++ b/include/net/phonet/phonet.h | |||
@@ -107,8 +107,8 @@ struct phonet_protocol { | |||
107 | int sock_type; | 107 | int sock_type; |
108 | }; | 108 | }; |
109 | 109 | ||
110 | int phonet_proto_register(int protocol, struct phonet_protocol *pp); | 110 | int phonet_proto_register(unsigned int protocol, struct phonet_protocol *pp); |
111 | void phonet_proto_unregister(int protocol, struct phonet_protocol *pp); | 111 | void phonet_proto_unregister(unsigned int protocol, struct phonet_protocol *pp); |
112 | 112 | ||
113 | int phonet_sysctl_init(void); | 113 | int phonet_sysctl_init(void); |
114 | void phonet_sysctl_exit(void); | 114 | void phonet_sysctl_exit(void); |
diff --git a/include/net/red.h b/include/net/red.h index 995108e54d9f..3319f16b3beb 100644 --- a/include/net/red.h +++ b/include/net/red.h | |||
@@ -97,7 +97,6 @@ struct red_stats { | |||
97 | u32 forced_mark; /* Forced marks, qavg > max_thresh */ | 97 | u32 forced_mark; /* Forced marks, qavg > max_thresh */ |
98 | u32 pdrop; /* Drops due to queue limits */ | 98 | u32 pdrop; /* Drops due to queue limits */ |
99 | u32 other; /* Drops due to drop() calls */ | 99 | u32 other; /* Drops due to drop() calls */ |
100 | u32 backlog; | ||
101 | }; | 100 | }; |
102 | 101 | ||
103 | struct red_parms { | 102 | struct red_parms { |
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h index 0af57ebae762..e9eee99d8b1f 100644 --- a/include/net/sch_generic.h +++ b/include/net/sch_generic.h | |||
@@ -207,7 +207,7 @@ static inline int qdisc_qlen(struct Qdisc *q) | |||
207 | return q->q.qlen; | 207 | return q->q.qlen; |
208 | } | 208 | } |
209 | 209 | ||
210 | static inline struct qdisc_skb_cb *qdisc_skb_cb(struct sk_buff *skb) | 210 | static inline struct qdisc_skb_cb *qdisc_skb_cb(const struct sk_buff *skb) |
211 | { | 211 | { |
212 | return (struct qdisc_skb_cb *)skb->cb; | 212 | return (struct qdisc_skb_cb *)skb->cb; |
213 | } | 213 | } |
@@ -394,7 +394,7 @@ static inline bool qdisc_tx_is_noop(const struct net_device *dev) | |||
394 | return true; | 394 | return true; |
395 | } | 395 | } |
396 | 396 | ||
397 | static inline unsigned int qdisc_pkt_len(struct sk_buff *skb) | 397 | static inline unsigned int qdisc_pkt_len(const struct sk_buff *skb) |
398 | { | 398 | { |
399 | return qdisc_skb_cb(skb)->pkt_len; | 399 | return qdisc_skb_cb(skb)->pkt_len; |
400 | } | 400 | } |
@@ -426,10 +426,18 @@ static inline int qdisc_enqueue_root(struct sk_buff *skb, struct Qdisc *sch) | |||
426 | return qdisc_enqueue(skb, sch) & NET_XMIT_MASK; | 426 | return qdisc_enqueue(skb, sch) & NET_XMIT_MASK; |
427 | } | 427 | } |
428 | 428 | ||
429 | static inline void __qdisc_update_bstats(struct Qdisc *sch, unsigned int len) | 429 | |
430 | static inline void bstats_update(struct gnet_stats_basic_packed *bstats, | ||
431 | const struct sk_buff *skb) | ||
432 | { | ||
433 | bstats->bytes += qdisc_pkt_len(skb); | ||
434 | bstats->packets += skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1; | ||
435 | } | ||
436 | |||
437 | static inline void qdisc_bstats_update(struct Qdisc *sch, | ||
438 | const struct sk_buff *skb) | ||
430 | { | 439 | { |
431 | sch->bstats.bytes += len; | 440 | bstats_update(&sch->bstats, skb); |
432 | sch->bstats.packets++; | ||
433 | } | 441 | } |
434 | 442 | ||
435 | static inline int __qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch, | 443 | static inline int __qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch, |
@@ -437,7 +445,7 @@ static inline int __qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch, | |||
437 | { | 445 | { |
438 | __skb_queue_tail(list, skb); | 446 | __skb_queue_tail(list, skb); |
439 | sch->qstats.backlog += qdisc_pkt_len(skb); | 447 | sch->qstats.backlog += qdisc_pkt_len(skb); |
440 | __qdisc_update_bstats(sch, qdisc_pkt_len(skb)); | 448 | qdisc_bstats_update(sch, skb); |
441 | 449 | ||
442 | return NET_XMIT_SUCCESS; | 450 | return NET_XMIT_SUCCESS; |
443 | } | 451 | } |
diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c index bb86d2932394..6da5daeebab7 100644 --- a/net/ax25/af_ax25.c +++ b/net/ax25/af_ax25.c | |||
@@ -1392,7 +1392,7 @@ static int ax25_getname(struct socket *sock, struct sockaddr *uaddr, | |||
1392 | ax25_cb *ax25; | 1392 | ax25_cb *ax25; |
1393 | int err = 0; | 1393 | int err = 0; |
1394 | 1394 | ||
1395 | memset(fsa, 0, sizeof(fsa)); | 1395 | memset(fsa, 0, sizeof(*fsa)); |
1396 | lock_sock(sk); | 1396 | lock_sock(sk); |
1397 | ax25 = ax25_sk(sk); | 1397 | ax25 = ax25_sk(sk); |
1398 | 1398 | ||
diff --git a/net/batman-adv/main.h b/net/batman-adv/main.h index d4d9926c2201..65106fb61b8f 100644 --- a/net/batman-adv/main.h +++ b/net/batman-adv/main.h | |||
@@ -151,9 +151,9 @@ int debug_log(struct bat_priv *bat_priv, char *fmt, ...); | |||
151 | } \ | 151 | } \ |
152 | while (0) | 152 | while (0) |
153 | #else /* !CONFIG_BATMAN_ADV_DEBUG */ | 153 | #else /* !CONFIG_BATMAN_ADV_DEBUG */ |
154 | static inline void bat_dbg(char type __attribute__((unused)), | 154 | static inline void bat_dbg(char type __always_unused, |
155 | struct bat_priv *bat_priv __attribute__((unused)), | 155 | struct bat_priv *bat_priv __always_unused, |
156 | char *fmt __attribute__((unused)), ...) | 156 | char *fmt __always_unused, ...) |
157 | { | 157 | { |
158 | } | 158 | } |
159 | #endif | 159 | #endif |
diff --git a/net/batman-adv/packet.h b/net/batman-adv/packet.h index b49fdf70a6d5..2284e8129cb2 100644 --- a/net/batman-adv/packet.h +++ b/net/batman-adv/packet.h | |||
@@ -63,7 +63,7 @@ struct batman_packet { | |||
63 | uint8_t num_hna; | 63 | uint8_t num_hna; |
64 | uint8_t gw_flags; /* flags related to gateway class */ | 64 | uint8_t gw_flags; /* flags related to gateway class */ |
65 | uint8_t align; | 65 | uint8_t align; |
66 | } __attribute__((packed)); | 66 | } __packed; |
67 | 67 | ||
68 | #define BAT_PACKET_LEN sizeof(struct batman_packet) | 68 | #define BAT_PACKET_LEN sizeof(struct batman_packet) |
69 | 69 | ||
@@ -76,7 +76,7 @@ struct icmp_packet { | |||
76 | uint8_t orig[6]; | 76 | uint8_t orig[6]; |
77 | uint16_t seqno; | 77 | uint16_t seqno; |
78 | uint8_t uid; | 78 | uint8_t uid; |
79 | } __attribute__((packed)); | 79 | } __packed; |
80 | 80 | ||
81 | #define BAT_RR_LEN 16 | 81 | #define BAT_RR_LEN 16 |
82 | 82 | ||
@@ -93,14 +93,14 @@ struct icmp_packet_rr { | |||
93 | uint8_t uid; | 93 | uint8_t uid; |
94 | uint8_t rr_cur; | 94 | uint8_t rr_cur; |
95 | uint8_t rr[BAT_RR_LEN][ETH_ALEN]; | 95 | uint8_t rr[BAT_RR_LEN][ETH_ALEN]; |
96 | } __attribute__((packed)); | 96 | } __packed; |
97 | 97 | ||
98 | struct unicast_packet { | 98 | struct unicast_packet { |
99 | uint8_t packet_type; | 99 | uint8_t packet_type; |
100 | uint8_t version; /* batman version field */ | 100 | uint8_t version; /* batman version field */ |
101 | uint8_t dest[6]; | 101 | uint8_t dest[6]; |
102 | uint8_t ttl; | 102 | uint8_t ttl; |
103 | } __attribute__((packed)); | 103 | } __packed; |
104 | 104 | ||
105 | struct unicast_frag_packet { | 105 | struct unicast_frag_packet { |
106 | uint8_t packet_type; | 106 | uint8_t packet_type; |
@@ -110,7 +110,7 @@ struct unicast_frag_packet { | |||
110 | uint8_t flags; | 110 | uint8_t flags; |
111 | uint8_t orig[6]; | 111 | uint8_t orig[6]; |
112 | uint16_t seqno; | 112 | uint16_t seqno; |
113 | } __attribute__((packed)); | 113 | } __packed; |
114 | 114 | ||
115 | struct bcast_packet { | 115 | struct bcast_packet { |
116 | uint8_t packet_type; | 116 | uint8_t packet_type; |
@@ -118,7 +118,7 @@ struct bcast_packet { | |||
118 | uint8_t orig[6]; | 118 | uint8_t orig[6]; |
119 | uint8_t ttl; | 119 | uint8_t ttl; |
120 | uint32_t seqno; | 120 | uint32_t seqno; |
121 | } __attribute__((packed)); | 121 | } __packed; |
122 | 122 | ||
123 | struct vis_packet { | 123 | struct vis_packet { |
124 | uint8_t packet_type; | 124 | uint8_t packet_type; |
@@ -131,6 +131,6 @@ struct vis_packet { | |||
131 | * neighbors */ | 131 | * neighbors */ |
132 | uint8_t target_orig[6]; /* who should receive this packet */ | 132 | uint8_t target_orig[6]; /* who should receive this packet */ |
133 | uint8_t sender_orig[6]; /* who sent or rebroadcasted this packet */ | 133 | uint8_t sender_orig[6]; /* who sent or rebroadcasted this packet */ |
134 | } __attribute__((packed)); | 134 | } __packed; |
135 | 135 | ||
136 | #endif /* _NET_BATMAN_ADV_PACKET_H_ */ | 136 | #endif /* _NET_BATMAN_ADV_PACKET_H_ */ |
diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h index 97cb23dd3e69..bf3f6f5a12c4 100644 --- a/net/batman-adv/types.h +++ b/net/batman-adv/types.h | |||
@@ -246,13 +246,13 @@ struct vis_info { | |||
246 | /* this packet might be part of the vis send queue. */ | 246 | /* this packet might be part of the vis send queue. */ |
247 | struct sk_buff *skb_packet; | 247 | struct sk_buff *skb_packet; |
248 | /* vis_info may follow here*/ | 248 | /* vis_info may follow here*/ |
249 | } __attribute__((packed)); | 249 | } __packed; |
250 | 250 | ||
251 | struct vis_info_entry { | 251 | struct vis_info_entry { |
252 | uint8_t src[ETH_ALEN]; | 252 | uint8_t src[ETH_ALEN]; |
253 | uint8_t dest[ETH_ALEN]; | 253 | uint8_t dest[ETH_ALEN]; |
254 | uint8_t quality; /* quality = 0 means HNA */ | 254 | uint8_t quality; /* quality = 0 means HNA */ |
255 | } __attribute__((packed)); | 255 | } __packed; |
256 | 256 | ||
257 | struct recvlist_node { | 257 | struct recvlist_node { |
258 | struct list_head list; | 258 | struct list_head list; |
diff --git a/net/batman-adv/unicast.c b/net/batman-adv/unicast.c index dc2e28bed844..ee41fef04b21 100644 --- a/net/batman-adv/unicast.c +++ b/net/batman-adv/unicast.c | |||
@@ -229,10 +229,12 @@ int frag_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv, | |||
229 | if (!bat_priv->primary_if) | 229 | if (!bat_priv->primary_if) |
230 | goto dropped; | 230 | goto dropped; |
231 | 231 | ||
232 | unicast_packet = (struct unicast_packet *) skb->data; | 232 | frag_skb = dev_alloc_skb(data_len - (data_len / 2) + ucf_hdr_len); |
233 | if (!frag_skb) | ||
234 | goto dropped; | ||
233 | 235 | ||
236 | unicast_packet = (struct unicast_packet *) skb->data; | ||
234 | memcpy(&tmp_uc, unicast_packet, uc_hdr_len); | 237 | memcpy(&tmp_uc, unicast_packet, uc_hdr_len); |
235 | frag_skb = dev_alloc_skb(data_len - (data_len / 2) + ucf_hdr_len); | ||
236 | skb_split(skb, frag_skb, data_len / 2); | 238 | skb_split(skb, frag_skb, data_len / 2); |
237 | 239 | ||
238 | if (my_skb_head_push(skb, ucf_hdr_len - uc_hdr_len) < 0 || | 240 | if (my_skb_head_push(skb, ucf_hdr_len - uc_hdr_len) < 0 || |
diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c index 1bf0cf503796..8184c031d028 100644 --- a/net/caif/caif_socket.c +++ b/net/caif/caif_socket.c | |||
@@ -740,12 +740,12 @@ static int setsockopt(struct socket *sock, | |||
740 | if (cf_sk->sk.sk_protocol != CAIFPROTO_UTIL) | 740 | if (cf_sk->sk.sk_protocol != CAIFPROTO_UTIL) |
741 | return -ENOPROTOOPT; | 741 | return -ENOPROTOOPT; |
742 | lock_sock(&(cf_sk->sk)); | 742 | lock_sock(&(cf_sk->sk)); |
743 | cf_sk->conn_req.param.size = ol; | ||
744 | if (ol > sizeof(cf_sk->conn_req.param.data) || | 743 | if (ol > sizeof(cf_sk->conn_req.param.data) || |
745 | copy_from_user(&cf_sk->conn_req.param.data, ov, ol)) { | 744 | copy_from_user(&cf_sk->conn_req.param.data, ov, ol)) { |
746 | release_sock(&cf_sk->sk); | 745 | release_sock(&cf_sk->sk); |
747 | return -EINVAL; | 746 | return -EINVAL; |
748 | } | 747 | } |
748 | cf_sk->conn_req.param.size = ol; | ||
749 | release_sock(&cf_sk->sk); | 749 | release_sock(&cf_sk->sk); |
750 | return 0; | 750 | return 0; |
751 | 751 | ||
diff --git a/net/caif/cfcnfg.c b/net/caif/cfcnfg.c index 21ede141018a..c665de778b60 100644 --- a/net/caif/cfcnfg.c +++ b/net/caif/cfcnfg.c | |||
@@ -191,6 +191,7 @@ int cfcnfg_disconn_adapt_layer(struct cfcnfg *cnfg, struct cflayer *adap_layer) | |||
191 | struct cflayer *servl = NULL; | 191 | struct cflayer *servl = NULL; |
192 | struct cfcnfg_phyinfo *phyinfo = NULL; | 192 | struct cfcnfg_phyinfo *phyinfo = NULL; |
193 | u8 phyid = 0; | 193 | u8 phyid = 0; |
194 | |||
194 | caif_assert(adap_layer != NULL); | 195 | caif_assert(adap_layer != NULL); |
195 | channel_id = adap_layer->id; | 196 | channel_id = adap_layer->id; |
196 | if (adap_layer->dn == NULL || channel_id == 0) { | 197 | if (adap_layer->dn == NULL || channel_id == 0) { |
@@ -199,16 +200,16 @@ int cfcnfg_disconn_adapt_layer(struct cfcnfg *cnfg, struct cflayer *adap_layer) | |||
199 | goto end; | 200 | goto end; |
200 | } | 201 | } |
201 | servl = cfmuxl_remove_uplayer(cnfg->mux, channel_id); | 202 | servl = cfmuxl_remove_uplayer(cnfg->mux, channel_id); |
202 | if (servl == NULL) | ||
203 | goto end; | ||
204 | layer_set_up(servl, NULL); | ||
205 | ret = cfctrl_linkdown_req(cnfg->ctrl, channel_id, adap_layer); | ||
206 | if (servl == NULL) { | 203 | if (servl == NULL) { |
207 | pr_err("PROTOCOL ERROR - Error removing service_layer Channel_Id(%d)", | 204 | pr_err("PROTOCOL ERROR - Error removing service_layer Channel_Id(%d)", |
208 | channel_id); | 205 | channel_id); |
209 | ret = -EINVAL; | 206 | ret = -EINVAL; |
210 | goto end; | 207 | goto end; |
211 | } | 208 | } |
209 | layer_set_up(servl, NULL); | ||
210 | ret = cfctrl_linkdown_req(cnfg->ctrl, channel_id, adap_layer); | ||
211 | if (ret) | ||
212 | goto end; | ||
212 | caif_assert(channel_id == servl->id); | 213 | caif_assert(channel_id == servl->id); |
213 | if (adap_layer->dn != NULL) { | 214 | if (adap_layer->dn != NULL) { |
214 | phyid = cfsrvl_getphyid(adap_layer->dn); | 215 | phyid = cfsrvl_getphyid(adap_layer->dn); |
diff --git a/net/caif/chnl_net.c b/net/caif/chnl_net.c index 84a422c98941..fa9dab372b68 100644 --- a/net/caif/chnl_net.c +++ b/net/caif/chnl_net.c | |||
@@ -76,6 +76,8 @@ static int chnl_recv_cb(struct cflayer *layr, struct cfpkt *pkt) | |||
76 | struct chnl_net *priv = container_of(layr, struct chnl_net, chnl); | 76 | struct chnl_net *priv = container_of(layr, struct chnl_net, chnl); |
77 | int pktlen; | 77 | int pktlen; |
78 | int err = 0; | 78 | int err = 0; |
79 | const u8 *ip_version; | ||
80 | u8 buf; | ||
79 | 81 | ||
80 | priv = container_of(layr, struct chnl_net, chnl); | 82 | priv = container_of(layr, struct chnl_net, chnl); |
81 | 83 | ||
@@ -90,7 +92,21 @@ static int chnl_recv_cb(struct cflayer *layr, struct cfpkt *pkt) | |||
90 | * send the packet to the net stack. | 92 | * send the packet to the net stack. |
91 | */ | 93 | */ |
92 | skb->dev = priv->netdev; | 94 | skb->dev = priv->netdev; |
93 | skb->protocol = htons(ETH_P_IP); | 95 | |
96 | /* check the version of IP */ | ||
97 | ip_version = skb_header_pointer(skb, 0, 1, &buf); | ||
98 | if (!ip_version) | ||
99 | return -EINVAL; | ||
100 | switch (*ip_version >> 4) { | ||
101 | case 4: | ||
102 | skb->protocol = htons(ETH_P_IP); | ||
103 | break; | ||
104 | case 6: | ||
105 | skb->protocol = htons(ETH_P_IPV6); | ||
106 | break; | ||
107 | default: | ||
108 | return -EINVAL; | ||
109 | } | ||
94 | 110 | ||
95 | /* If we change the header in loop mode, the checksum is corrupted. */ | 111 | /* If we change the header in loop mode, the checksum is corrupted. */ |
96 | if (priv->conn_req.protocol == CAIFPROTO_DATAGRAM_LOOP) | 112 | if (priv->conn_req.protocol == CAIFPROTO_DATAGRAM_LOOP) |
diff --git a/net/can/bcm.c b/net/can/bcm.c index 9d5e8accfab1..092dc88a7c64 100644 --- a/net/can/bcm.c +++ b/net/can/bcm.c | |||
@@ -1256,6 +1256,9 @@ static int bcm_sendmsg(struct kiocb *iocb, struct socket *sock, | |||
1256 | struct sockaddr_can *addr = | 1256 | struct sockaddr_can *addr = |
1257 | (struct sockaddr_can *)msg->msg_name; | 1257 | (struct sockaddr_can *)msg->msg_name; |
1258 | 1258 | ||
1259 | if (msg->msg_namelen < sizeof(*addr)) | ||
1260 | return -EINVAL; | ||
1261 | |||
1259 | if (addr->can_family != AF_CAN) | 1262 | if (addr->can_family != AF_CAN) |
1260 | return -EINVAL; | 1263 | return -EINVAL; |
1261 | 1264 | ||
diff --git a/net/can/raw.c b/net/can/raw.c index e88f610fdb7b..883e9d74fddf 100644 --- a/net/can/raw.c +++ b/net/can/raw.c | |||
@@ -649,6 +649,9 @@ static int raw_sendmsg(struct kiocb *iocb, struct socket *sock, | |||
649 | struct sockaddr_can *addr = | 649 | struct sockaddr_can *addr = |
650 | (struct sockaddr_can *)msg->msg_name; | 650 | (struct sockaddr_can *)msg->msg_name; |
651 | 651 | ||
652 | if (msg->msg_namelen < sizeof(*addr)) | ||
653 | return -EINVAL; | ||
654 | |||
652 | if (addr->can_family != AF_CAN) | 655 | if (addr->can_family != AF_CAN) |
653 | return -EINVAL; | 656 | return -EINVAL; |
654 | 657 | ||
diff --git a/net/core/dev.c b/net/core/dev.c index 3fe443be4b15..83507c265e48 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -2297,7 +2297,10 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q, | |||
2297 | */ | 2297 | */ |
2298 | if (!(dev->priv_flags & IFF_XMIT_DST_RELEASE)) | 2298 | if (!(dev->priv_flags & IFF_XMIT_DST_RELEASE)) |
2299 | skb_dst_force(skb); | 2299 | skb_dst_force(skb); |
2300 | __qdisc_update_bstats(q, skb->len); | 2300 | |
2301 | qdisc_skb_cb(skb)->pkt_len = skb->len; | ||
2302 | qdisc_bstats_update(q, skb); | ||
2303 | |||
2301 | if (sch_direct_xmit(skb, q, dev, txq, root_lock)) { | 2304 | if (sch_direct_xmit(skb, q, dev, txq, root_lock)) { |
2302 | if (unlikely(contended)) { | 2305 | if (unlikely(contended)) { |
2303 | spin_unlock(&q->busylock); | 2306 | spin_unlock(&q->busylock); |
@@ -5520,34 +5523,6 @@ void netdev_run_todo(void) | |||
5520 | } | 5523 | } |
5521 | } | 5524 | } |
5522 | 5525 | ||
5523 | /** | ||
5524 | * dev_txq_stats_fold - fold tx_queues stats | ||
5525 | * @dev: device to get statistics from | ||
5526 | * @stats: struct rtnl_link_stats64 to hold results | ||
5527 | */ | ||
5528 | void dev_txq_stats_fold(const struct net_device *dev, | ||
5529 | struct rtnl_link_stats64 *stats) | ||
5530 | { | ||
5531 | u64 tx_bytes = 0, tx_packets = 0, tx_dropped = 0; | ||
5532 | unsigned int i; | ||
5533 | struct netdev_queue *txq; | ||
5534 | |||
5535 | for (i = 0; i < dev->num_tx_queues; i++) { | ||
5536 | txq = netdev_get_tx_queue(dev, i); | ||
5537 | spin_lock_bh(&txq->_xmit_lock); | ||
5538 | tx_bytes += txq->tx_bytes; | ||
5539 | tx_packets += txq->tx_packets; | ||
5540 | tx_dropped += txq->tx_dropped; | ||
5541 | spin_unlock_bh(&txq->_xmit_lock); | ||
5542 | } | ||
5543 | if (tx_bytes || tx_packets || tx_dropped) { | ||
5544 | stats->tx_bytes = tx_bytes; | ||
5545 | stats->tx_packets = tx_packets; | ||
5546 | stats->tx_dropped = tx_dropped; | ||
5547 | } | ||
5548 | } | ||
5549 | EXPORT_SYMBOL(dev_txq_stats_fold); | ||
5550 | |||
5551 | /* Convert net_device_stats to rtnl_link_stats64. They have the same | 5526 | /* Convert net_device_stats to rtnl_link_stats64. They have the same |
5552 | * fields in the same order, with only the type differing. | 5527 | * fields in the same order, with only the type differing. |
5553 | */ | 5528 | */ |
@@ -5591,7 +5566,6 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev, | |||
5591 | netdev_stats_to_stats64(storage, ops->ndo_get_stats(dev)); | 5566 | netdev_stats_to_stats64(storage, ops->ndo_get_stats(dev)); |
5592 | } else { | 5567 | } else { |
5593 | netdev_stats_to_stats64(storage, &dev->stats); | 5568 | netdev_stats_to_stats64(storage, &dev->stats); |
5594 | dev_txq_stats_fold(dev, storage); | ||
5595 | } | 5569 | } |
5596 | storage->rx_dropped += atomic_long_read(&dev->rx_dropped); | 5570 | storage->rx_dropped += atomic_long_read(&dev->rx_dropped); |
5597 | return storage; | 5571 | return storage; |
@@ -5617,18 +5591,20 @@ struct netdev_queue *dev_ingress_queue_create(struct net_device *dev) | |||
5617 | } | 5591 | } |
5618 | 5592 | ||
5619 | /** | 5593 | /** |
5620 | * alloc_netdev_mq - allocate network device | 5594 | * alloc_netdev_mqs - allocate network device |
5621 | * @sizeof_priv: size of private data to allocate space for | 5595 | * @sizeof_priv: size of private data to allocate space for |
5622 | * @name: device name format string | 5596 | * @name: device name format string |
5623 | * @setup: callback to initialize device | 5597 | * @setup: callback to initialize device |
5624 | * @queue_count: the number of subqueues to allocate | 5598 | * @txqs: the number of TX subqueues to allocate |
5599 | * @rxqs: the number of RX subqueues to allocate | ||
5625 | * | 5600 | * |
5626 | * Allocates a struct net_device with private data area for driver use | 5601 | * Allocates a struct net_device with private data area for driver use |
5627 | * and performs basic initialization. Also allocates subquue structs | 5602 | * and performs basic initialization. Also allocates subquue structs |
5628 | * for each queue on the device at the end of the netdevice. | 5603 | * for each queue on the device. |
5629 | */ | 5604 | */ |
5630 | struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name, | 5605 | struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name, |
5631 | void (*setup)(struct net_device *), unsigned int queue_count) | 5606 | void (*setup)(struct net_device *), |
5607 | unsigned int txqs, unsigned int rxqs) | ||
5632 | { | 5608 | { |
5633 | struct net_device *dev; | 5609 | struct net_device *dev; |
5634 | size_t alloc_size; | 5610 | size_t alloc_size; |
@@ -5636,12 +5612,20 @@ struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name, | |||
5636 | 5612 | ||
5637 | BUG_ON(strlen(name) >= sizeof(dev->name)); | 5613 | BUG_ON(strlen(name) >= sizeof(dev->name)); |
5638 | 5614 | ||
5639 | if (queue_count < 1) { | 5615 | if (txqs < 1) { |
5640 | pr_err("alloc_netdev: Unable to allocate device " | 5616 | pr_err("alloc_netdev: Unable to allocate device " |
5641 | "with zero queues.\n"); | 5617 | "with zero queues.\n"); |
5642 | return NULL; | 5618 | return NULL; |
5643 | } | 5619 | } |
5644 | 5620 | ||
5621 | #ifdef CONFIG_RPS | ||
5622 | if (rxqs < 1) { | ||
5623 | pr_err("alloc_netdev: Unable to allocate device " | ||
5624 | "with zero RX queues.\n"); | ||
5625 | return NULL; | ||
5626 | } | ||
5627 | #endif | ||
5628 | |||
5645 | alloc_size = sizeof(struct net_device); | 5629 | alloc_size = sizeof(struct net_device); |
5646 | if (sizeof_priv) { | 5630 | if (sizeof_priv) { |
5647 | /* ensure 32-byte alignment of private area */ | 5631 | /* ensure 32-byte alignment of private area */ |
@@ -5672,14 +5656,14 @@ struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name, | |||
5672 | 5656 | ||
5673 | dev_net_set(dev, &init_net); | 5657 | dev_net_set(dev, &init_net); |
5674 | 5658 | ||
5675 | dev->num_tx_queues = queue_count; | 5659 | dev->num_tx_queues = txqs; |
5676 | dev->real_num_tx_queues = queue_count; | 5660 | dev->real_num_tx_queues = txqs; |
5677 | if (netif_alloc_netdev_queues(dev)) | 5661 | if (netif_alloc_netdev_queues(dev)) |
5678 | goto free_pcpu; | 5662 | goto free_pcpu; |
5679 | 5663 | ||
5680 | #ifdef CONFIG_RPS | 5664 | #ifdef CONFIG_RPS |
5681 | dev->num_rx_queues = queue_count; | 5665 | dev->num_rx_queues = rxqs; |
5682 | dev->real_num_rx_queues = queue_count; | 5666 | dev->real_num_rx_queues = rxqs; |
5683 | if (netif_alloc_rx_queues(dev)) | 5667 | if (netif_alloc_rx_queues(dev)) |
5684 | goto free_pcpu; | 5668 | goto free_pcpu; |
5685 | #endif | 5669 | #endif |
@@ -5707,7 +5691,7 @@ free_p: | |||
5707 | kfree(p); | 5691 | kfree(p); |
5708 | return NULL; | 5692 | return NULL; |
5709 | } | 5693 | } |
5710 | EXPORT_SYMBOL(alloc_netdev_mq); | 5694 | EXPORT_SYMBOL(alloc_netdev_mqs); |
5711 | 5695 | ||
5712 | /** | 5696 | /** |
5713 | * free_netdev - free network device | 5697 | * free_netdev - free network device |
diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 19d6c21220fd..d31bb36ae0dc 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c | |||
@@ -380,6 +380,8 @@ static void skb_release_head_state(struct sk_buff *skb) | |||
380 | } | 380 | } |
381 | #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) | 381 | #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) |
382 | nf_conntrack_put(skb->nfct); | 382 | nf_conntrack_put(skb->nfct); |
383 | #endif | ||
384 | #ifdef NET_SKBUFF_NF_DEFRAG_NEEDED | ||
383 | nf_conntrack_put_reasm(skb->nfct_reasm); | 385 | nf_conntrack_put_reasm(skb->nfct_reasm); |
384 | #endif | 386 | #endif |
385 | #ifdef CONFIG_BRIDGE_NETFILTER | 387 | #ifdef CONFIG_BRIDGE_NETFILTER |
diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c index f00ef2f1d814..44d2b42fda56 100644 --- a/net/ethernet/eth.c +++ b/net/ethernet/eth.c | |||
@@ -347,10 +347,11 @@ void ether_setup(struct net_device *dev) | |||
347 | EXPORT_SYMBOL(ether_setup); | 347 | EXPORT_SYMBOL(ether_setup); |
348 | 348 | ||
349 | /** | 349 | /** |
350 | * alloc_etherdev_mq - Allocates and sets up an Ethernet device | 350 | * alloc_etherdev_mqs - Allocates and sets up an Ethernet device |
351 | * @sizeof_priv: Size of additional driver-private structure to be allocated | 351 | * @sizeof_priv: Size of additional driver-private structure to be allocated |
352 | * for this Ethernet device | 352 | * for this Ethernet device |
353 | * @queue_count: The number of queues this device has. | 353 | * @txqs: The number of TX queues this device has. |
354 | * @rxqs: The number of RX queues this device has. | ||
354 | * | 355 | * |
355 | * Fill in the fields of the device structure with Ethernet-generic | 356 | * Fill in the fields of the device structure with Ethernet-generic |
356 | * values. Basically does everything except registering the device. | 357 | * values. Basically does everything except registering the device. |
@@ -360,11 +361,12 @@ EXPORT_SYMBOL(ether_setup); | |||
360 | * this private data area. | 361 | * this private data area. |
361 | */ | 362 | */ |
362 | 363 | ||
363 | struct net_device *alloc_etherdev_mq(int sizeof_priv, unsigned int queue_count) | 364 | struct net_device *alloc_etherdev_mqs(int sizeof_priv, unsigned int txqs, |
365 | unsigned int rxqs) | ||
364 | { | 366 | { |
365 | return alloc_netdev_mq(sizeof_priv, "eth%d", ether_setup, queue_count); | 367 | return alloc_netdev_mqs(sizeof_priv, "eth%d", ether_setup, txqs, rxqs); |
366 | } | 368 | } |
367 | EXPORT_SYMBOL(alloc_etherdev_mq); | 369 | EXPORT_SYMBOL(alloc_etherdev_mqs); |
368 | 370 | ||
369 | static size_t _format_mac_addr(char *buf, int buflen, | 371 | static size_t _format_mac_addr(char *buf, int buflen, |
370 | const unsigned char *addr, int len) | 372 | const unsigned char *addr, int len) |
diff --git a/net/ipv4/ah4.c b/net/ipv4/ah4.c index 880a5ec6dce0..86961bec70ab 100644 --- a/net/ipv4/ah4.c +++ b/net/ipv4/ah4.c | |||
@@ -314,14 +314,15 @@ static int ah_input(struct xfrm_state *x, struct sk_buff *skb) | |||
314 | 314 | ||
315 | skb->ip_summed = CHECKSUM_NONE; | 315 | skb->ip_summed = CHECKSUM_NONE; |
316 | 316 | ||
317 | ah = (struct ip_auth_hdr *)skb->data; | ||
318 | iph = ip_hdr(skb); | ||
319 | ihl = ip_hdrlen(skb); | ||
320 | 317 | ||
321 | if ((err = skb_cow_data(skb, 0, &trailer)) < 0) | 318 | if ((err = skb_cow_data(skb, 0, &trailer)) < 0) |
322 | goto out; | 319 | goto out; |
323 | nfrags = err; | 320 | nfrags = err; |
324 | 321 | ||
322 | ah = (struct ip_auth_hdr *)skb->data; | ||
323 | iph = ip_hdr(skb); | ||
324 | ihl = ip_hdrlen(skb); | ||
325 | |||
325 | work_iph = ah_alloc_tmp(ahash, nfrags, ihl + ahp->icv_trunc_len); | 326 | work_iph = ah_alloc_tmp(ahash, nfrags, ihl + ahp->icv_trunc_len); |
326 | if (!work_iph) | 327 | if (!work_iph) |
327 | goto out; | 328 | goto out; |
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c index a2fc7b961dbc..04c8b69fd426 100644 --- a/net/ipv4/arp.c +++ b/net/ipv4/arp.c | |||
@@ -1143,6 +1143,23 @@ static int arp_req_get(struct arpreq *r, struct net_device *dev) | |||
1143 | return err; | 1143 | return err; |
1144 | } | 1144 | } |
1145 | 1145 | ||
1146 | int arp_invalidate(struct net_device *dev, __be32 ip) | ||
1147 | { | ||
1148 | struct neighbour *neigh = neigh_lookup(&arp_tbl, &ip, dev); | ||
1149 | int err = -ENXIO; | ||
1150 | |||
1151 | if (neigh) { | ||
1152 | if (neigh->nud_state & ~NUD_NOARP) | ||
1153 | err = neigh_update(neigh, NULL, NUD_FAILED, | ||
1154 | NEIGH_UPDATE_F_OVERRIDE| | ||
1155 | NEIGH_UPDATE_F_ADMIN); | ||
1156 | neigh_release(neigh); | ||
1157 | } | ||
1158 | |||
1159 | return err; | ||
1160 | } | ||
1161 | EXPORT_SYMBOL(arp_invalidate); | ||
1162 | |||
1146 | static int arp_req_delete_public(struct net *net, struct arpreq *r, | 1163 | static int arp_req_delete_public(struct net *net, struct arpreq *r, |
1147 | struct net_device *dev) | 1164 | struct net_device *dev) |
1148 | { | 1165 | { |
@@ -1163,7 +1180,6 @@ static int arp_req_delete(struct net *net, struct arpreq *r, | |||
1163 | { | 1180 | { |
1164 | int err; | 1181 | int err; |
1165 | __be32 ip; | 1182 | __be32 ip; |
1166 | struct neighbour *neigh; | ||
1167 | 1183 | ||
1168 | if (r->arp_flags & ATF_PUBL) | 1184 | if (r->arp_flags & ATF_PUBL) |
1169 | return arp_req_delete_public(net, r, dev); | 1185 | return arp_req_delete_public(net, r, dev); |
@@ -1181,16 +1197,7 @@ static int arp_req_delete(struct net *net, struct arpreq *r, | |||
1181 | if (!dev) | 1197 | if (!dev) |
1182 | return -EINVAL; | 1198 | return -EINVAL; |
1183 | } | 1199 | } |
1184 | err = -ENXIO; | 1200 | return arp_invalidate(dev, ip); |
1185 | neigh = neigh_lookup(&arp_tbl, &ip, dev); | ||
1186 | if (neigh) { | ||
1187 | if (neigh->nud_state & ~NUD_NOARP) | ||
1188 | err = neigh_update(neigh, NULL, NUD_FAILED, | ||
1189 | NEIGH_UPDATE_F_OVERRIDE| | ||
1190 | NEIGH_UPDATE_F_ADMIN); | ||
1191 | neigh_release(neigh); | ||
1192 | } | ||
1193 | return err; | ||
1194 | } | 1201 | } |
1195 | 1202 | ||
1196 | /* | 1203 | /* |
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c index 25e318153f14..97e5fb765265 100644 --- a/net/ipv4/inet_connection_sock.c +++ b/net/ipv4/inet_connection_sock.c | |||
@@ -73,7 +73,7 @@ int inet_csk_bind_conflict(const struct sock *sk, | |||
73 | !sk2->sk_bound_dev_if || | 73 | !sk2->sk_bound_dev_if || |
74 | sk->sk_bound_dev_if == sk2->sk_bound_dev_if)) { | 74 | sk->sk_bound_dev_if == sk2->sk_bound_dev_if)) { |
75 | if (!reuse || !sk2->sk_reuse || | 75 | if (!reuse || !sk2->sk_reuse || |
76 | sk2->sk_state == TCP_LISTEN) { | 76 | ((1 << sk2->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))) { |
77 | const __be32 sk2_rcv_saddr = sk_rcv_saddr(sk2); | 77 | const __be32 sk2_rcv_saddr = sk_rcv_saddr(sk2); |
78 | if (!sk2_rcv_saddr || !sk_rcv_saddr(sk) || | 78 | if (!sk2_rcv_saddr || !sk_rcv_saddr(sk) || |
79 | sk2_rcv_saddr == sk_rcv_saddr(sk)) | 79 | sk2_rcv_saddr == sk_rcv_saddr(sk)) |
@@ -122,7 +122,8 @@ again: | |||
122 | (tb->num_owners < smallest_size || smallest_size == -1)) { | 122 | (tb->num_owners < smallest_size || smallest_size == -1)) { |
123 | smallest_size = tb->num_owners; | 123 | smallest_size = tb->num_owners; |
124 | smallest_rover = rover; | 124 | smallest_rover = rover; |
125 | if (atomic_read(&hashinfo->bsockets) > (high - low) + 1) { | 125 | if (atomic_read(&hashinfo->bsockets) > (high - low) + 1 && |
126 | !inet_csk(sk)->icsk_af_ops->bind_conflict(sk, tb)) { | ||
126 | spin_unlock(&head->lock); | 127 | spin_unlock(&head->lock); |
127 | snum = smallest_rover; | 128 | snum = smallest_rover; |
128 | goto have_snum; | 129 | goto have_snum; |
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c index 3fac340a28d5..e855fffaed95 100644 --- a/net/ipv4/netfilter/arp_tables.c +++ b/net/ipv4/netfilter/arp_tables.c | |||
@@ -710,42 +710,25 @@ static void get_counters(const struct xt_table_info *t, | |||
710 | struct arpt_entry *iter; | 710 | struct arpt_entry *iter; |
711 | unsigned int cpu; | 711 | unsigned int cpu; |
712 | unsigned int i; | 712 | unsigned int i; |
713 | unsigned int curcpu = get_cpu(); | ||
714 | |||
715 | /* Instead of clearing (by a previous call to memset()) | ||
716 | * the counters and using adds, we set the counters | ||
717 | * with data used by 'current' CPU | ||
718 | * | ||
719 | * Bottom half has to be disabled to prevent deadlock | ||
720 | * if new softirq were to run and call ipt_do_table | ||
721 | */ | ||
722 | local_bh_disable(); | ||
723 | i = 0; | ||
724 | xt_entry_foreach(iter, t->entries[curcpu], t->size) { | ||
725 | SET_COUNTER(counters[i], iter->counters.bcnt, | ||
726 | iter->counters.pcnt); | ||
727 | ++i; | ||
728 | } | ||
729 | local_bh_enable(); | ||
730 | /* Processing counters from other cpus, we can let bottom half enabled, | ||
731 | * (preemption is disabled) | ||
732 | */ | ||
733 | 713 | ||
734 | for_each_possible_cpu(cpu) { | 714 | for_each_possible_cpu(cpu) { |
735 | if (cpu == curcpu) | 715 | seqlock_t *lock = &per_cpu(xt_info_locks, cpu).lock; |
736 | continue; | 716 | |
737 | i = 0; | 717 | i = 0; |
738 | local_bh_disable(); | ||
739 | xt_info_wrlock(cpu); | ||
740 | xt_entry_foreach(iter, t->entries[cpu], t->size) { | 718 | xt_entry_foreach(iter, t->entries[cpu], t->size) { |
741 | ADD_COUNTER(counters[i], iter->counters.bcnt, | 719 | u64 bcnt, pcnt; |
742 | iter->counters.pcnt); | 720 | unsigned int start; |
721 | |||
722 | do { | ||
723 | start = read_seqbegin(lock); | ||
724 | bcnt = iter->counters.bcnt; | ||
725 | pcnt = iter->counters.pcnt; | ||
726 | } while (read_seqretry(lock, start)); | ||
727 | |||
728 | ADD_COUNTER(counters[i], bcnt, pcnt); | ||
743 | ++i; | 729 | ++i; |
744 | } | 730 | } |
745 | xt_info_wrunlock(cpu); | ||
746 | local_bh_enable(); | ||
747 | } | 731 | } |
748 | put_cpu(); | ||
749 | } | 732 | } |
750 | 733 | ||
751 | static struct xt_counters *alloc_counters(const struct xt_table *table) | 734 | static struct xt_counters *alloc_counters(const struct xt_table *table) |
@@ -759,7 +742,7 @@ static struct xt_counters *alloc_counters(const struct xt_table *table) | |||
759 | * about). | 742 | * about). |
760 | */ | 743 | */ |
761 | countersize = sizeof(struct xt_counters) * private->number; | 744 | countersize = sizeof(struct xt_counters) * private->number; |
762 | counters = vmalloc(countersize); | 745 | counters = vzalloc(countersize); |
763 | 746 | ||
764 | if (counters == NULL) | 747 | if (counters == NULL) |
765 | return ERR_PTR(-ENOMEM); | 748 | return ERR_PTR(-ENOMEM); |
@@ -1007,7 +990,7 @@ static int __do_replace(struct net *net, const char *name, | |||
1007 | struct arpt_entry *iter; | 990 | struct arpt_entry *iter; |
1008 | 991 | ||
1009 | ret = 0; | 992 | ret = 0; |
1010 | counters = vmalloc(num_counters * sizeof(struct xt_counters)); | 993 | counters = vzalloc(num_counters * sizeof(struct xt_counters)); |
1011 | if (!counters) { | 994 | if (!counters) { |
1012 | ret = -ENOMEM; | 995 | ret = -ENOMEM; |
1013 | goto out; | 996 | goto out; |
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c index a846d633b3b6..652efea013dc 100644 --- a/net/ipv4/netfilter/ip_tables.c +++ b/net/ipv4/netfilter/ip_tables.c | |||
@@ -884,42 +884,25 @@ get_counters(const struct xt_table_info *t, | |||
884 | struct ipt_entry *iter; | 884 | struct ipt_entry *iter; |
885 | unsigned int cpu; | 885 | unsigned int cpu; |
886 | unsigned int i; | 886 | unsigned int i; |
887 | unsigned int curcpu = get_cpu(); | ||
888 | |||
889 | /* Instead of clearing (by a previous call to memset()) | ||
890 | * the counters and using adds, we set the counters | ||
891 | * with data used by 'current' CPU. | ||
892 | * | ||
893 | * Bottom half has to be disabled to prevent deadlock | ||
894 | * if new softirq were to run and call ipt_do_table | ||
895 | */ | ||
896 | local_bh_disable(); | ||
897 | i = 0; | ||
898 | xt_entry_foreach(iter, t->entries[curcpu], t->size) { | ||
899 | SET_COUNTER(counters[i], iter->counters.bcnt, | ||
900 | iter->counters.pcnt); | ||
901 | ++i; | ||
902 | } | ||
903 | local_bh_enable(); | ||
904 | /* Processing counters from other cpus, we can let bottom half enabled, | ||
905 | * (preemption is disabled) | ||
906 | */ | ||
907 | 887 | ||
908 | for_each_possible_cpu(cpu) { | 888 | for_each_possible_cpu(cpu) { |
909 | if (cpu == curcpu) | 889 | seqlock_t *lock = &per_cpu(xt_info_locks, cpu).lock; |
910 | continue; | 890 | |
911 | i = 0; | 891 | i = 0; |
912 | local_bh_disable(); | ||
913 | xt_info_wrlock(cpu); | ||
914 | xt_entry_foreach(iter, t->entries[cpu], t->size) { | 892 | xt_entry_foreach(iter, t->entries[cpu], t->size) { |
915 | ADD_COUNTER(counters[i], iter->counters.bcnt, | 893 | u64 bcnt, pcnt; |
916 | iter->counters.pcnt); | 894 | unsigned int start; |
895 | |||
896 | do { | ||
897 | start = read_seqbegin(lock); | ||
898 | bcnt = iter->counters.bcnt; | ||
899 | pcnt = iter->counters.pcnt; | ||
900 | } while (read_seqretry(lock, start)); | ||
901 | |||
902 | ADD_COUNTER(counters[i], bcnt, pcnt); | ||
917 | ++i; /* macro does multi eval of i */ | 903 | ++i; /* macro does multi eval of i */ |
918 | } | 904 | } |
919 | xt_info_wrunlock(cpu); | ||
920 | local_bh_enable(); | ||
921 | } | 905 | } |
922 | put_cpu(); | ||
923 | } | 906 | } |
924 | 907 | ||
925 | static struct xt_counters *alloc_counters(const struct xt_table *table) | 908 | static struct xt_counters *alloc_counters(const struct xt_table *table) |
@@ -932,7 +915,7 @@ static struct xt_counters *alloc_counters(const struct xt_table *table) | |||
932 | (other than comefrom, which userspace doesn't care | 915 | (other than comefrom, which userspace doesn't care |
933 | about). */ | 916 | about). */ |
934 | countersize = sizeof(struct xt_counters) * private->number; | 917 | countersize = sizeof(struct xt_counters) * private->number; |
935 | counters = vmalloc(countersize); | 918 | counters = vzalloc(countersize); |
936 | 919 | ||
937 | if (counters == NULL) | 920 | if (counters == NULL) |
938 | return ERR_PTR(-ENOMEM); | 921 | return ERR_PTR(-ENOMEM); |
@@ -1203,7 +1186,7 @@ __do_replace(struct net *net, const char *name, unsigned int valid_hooks, | |||
1203 | struct ipt_entry *iter; | 1186 | struct ipt_entry *iter; |
1204 | 1187 | ||
1205 | ret = 0; | 1188 | ret = 0; |
1206 | counters = vmalloc(num_counters * sizeof(struct xt_counters)); | 1189 | counters = vzalloc(num_counters * sizeof(struct xt_counters)); |
1207 | if (!counters) { | 1190 | if (!counters) { |
1208 | ret = -ENOMEM; | 1191 | ret = -ENOMEM; |
1209 | goto out; | 1192 | goto out; |
diff --git a/net/ipv6/ah6.c b/net/ipv6/ah6.c index ee82d4ef26ce..1aba54ae53c4 100644 --- a/net/ipv6/ah6.c +++ b/net/ipv6/ah6.c | |||
@@ -538,14 +538,16 @@ static int ah6_input(struct xfrm_state *x, struct sk_buff *skb) | |||
538 | if (!pskb_may_pull(skb, ah_hlen)) | 538 | if (!pskb_may_pull(skb, ah_hlen)) |
539 | goto out; | 539 | goto out; |
540 | 540 | ||
541 | ip6h = ipv6_hdr(skb); | ||
542 | |||
543 | skb_push(skb, hdr_len); | ||
544 | 541 | ||
545 | if ((err = skb_cow_data(skb, 0, &trailer)) < 0) | 542 | if ((err = skb_cow_data(skb, 0, &trailer)) < 0) |
546 | goto out; | 543 | goto out; |
547 | nfrags = err; | 544 | nfrags = err; |
548 | 545 | ||
546 | ah = (struct ip_auth_hdr *)skb->data; | ||
547 | ip6h = ipv6_hdr(skb); | ||
548 | |||
549 | skb_push(skb, hdr_len); | ||
550 | |||
549 | work_iph = ah_alloc_tmp(ahash, nfrags, hdr_len + ahp->icv_trunc_len); | 551 | work_iph = ah_alloc_tmp(ahash, nfrags, hdr_len + ahp->icv_trunc_len); |
550 | if (!work_iph) | 552 | if (!work_iph) |
551 | goto out; | 553 | goto out; |
diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c index e46305d1815a..d144e629d2b4 100644 --- a/net/ipv6/inet6_connection_sock.c +++ b/net/ipv6/inet6_connection_sock.c | |||
@@ -44,7 +44,7 @@ int inet6_csk_bind_conflict(const struct sock *sk, | |||
44 | !sk2->sk_bound_dev_if || | 44 | !sk2->sk_bound_dev_if || |
45 | sk->sk_bound_dev_if == sk2->sk_bound_dev_if) && | 45 | sk->sk_bound_dev_if == sk2->sk_bound_dev_if) && |
46 | (!sk->sk_reuse || !sk2->sk_reuse || | 46 | (!sk->sk_reuse || !sk2->sk_reuse || |
47 | sk2->sk_state == TCP_LISTEN) && | 47 | ((1 << sk2->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))) && |
48 | ipv6_rcv_saddr_equal(sk, sk2)) | 48 | ipv6_rcv_saddr_equal(sk, sk2)) |
49 | break; | 49 | break; |
50 | } | 50 | } |
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index 94b5bf132b2e..5f8d242be3f3 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c | |||
@@ -401,6 +401,9 @@ int ip6_forward(struct sk_buff *skb) | |||
401 | goto drop; | 401 | goto drop; |
402 | } | 402 | } |
403 | 403 | ||
404 | if (skb->pkt_type != PACKET_HOST) | ||
405 | goto drop; | ||
406 | |||
404 | skb_forward_csum(skb); | 407 | skb_forward_csum(skb); |
405 | 408 | ||
406 | /* | 409 | /* |
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c index 455582384ece..7d227c644f72 100644 --- a/net/ipv6/netfilter/ip6_tables.c +++ b/net/ipv6/netfilter/ip6_tables.c | |||
@@ -897,42 +897,25 @@ get_counters(const struct xt_table_info *t, | |||
897 | struct ip6t_entry *iter; | 897 | struct ip6t_entry *iter; |
898 | unsigned int cpu; | 898 | unsigned int cpu; |
899 | unsigned int i; | 899 | unsigned int i; |
900 | unsigned int curcpu = get_cpu(); | ||
901 | |||
902 | /* Instead of clearing (by a previous call to memset()) | ||
903 | * the counters and using adds, we set the counters | ||
904 | * with data used by 'current' CPU | ||
905 | * | ||
906 | * Bottom half has to be disabled to prevent deadlock | ||
907 | * if new softirq were to run and call ipt_do_table | ||
908 | */ | ||
909 | local_bh_disable(); | ||
910 | i = 0; | ||
911 | xt_entry_foreach(iter, t->entries[curcpu], t->size) { | ||
912 | SET_COUNTER(counters[i], iter->counters.bcnt, | ||
913 | iter->counters.pcnt); | ||
914 | ++i; | ||
915 | } | ||
916 | local_bh_enable(); | ||
917 | /* Processing counters from other cpus, we can let bottom half enabled, | ||
918 | * (preemption is disabled) | ||
919 | */ | ||
920 | 900 | ||
921 | for_each_possible_cpu(cpu) { | 901 | for_each_possible_cpu(cpu) { |
922 | if (cpu == curcpu) | 902 | seqlock_t *lock = &per_cpu(xt_info_locks, cpu).lock; |
923 | continue; | 903 | |
924 | i = 0; | 904 | i = 0; |
925 | local_bh_disable(); | ||
926 | xt_info_wrlock(cpu); | ||
927 | xt_entry_foreach(iter, t->entries[cpu], t->size) { | 905 | xt_entry_foreach(iter, t->entries[cpu], t->size) { |
928 | ADD_COUNTER(counters[i], iter->counters.bcnt, | 906 | u64 bcnt, pcnt; |
929 | iter->counters.pcnt); | 907 | unsigned int start; |
908 | |||
909 | do { | ||
910 | start = read_seqbegin(lock); | ||
911 | bcnt = iter->counters.bcnt; | ||
912 | pcnt = iter->counters.pcnt; | ||
913 | } while (read_seqretry(lock, start)); | ||
914 | |||
915 | ADD_COUNTER(counters[i], bcnt, pcnt); | ||
930 | ++i; | 916 | ++i; |
931 | } | 917 | } |
932 | xt_info_wrunlock(cpu); | ||
933 | local_bh_enable(); | ||
934 | } | 918 | } |
935 | put_cpu(); | ||
936 | } | 919 | } |
937 | 920 | ||
938 | static struct xt_counters *alloc_counters(const struct xt_table *table) | 921 | static struct xt_counters *alloc_counters(const struct xt_table *table) |
@@ -945,7 +928,7 @@ static struct xt_counters *alloc_counters(const struct xt_table *table) | |||
945 | (other than comefrom, which userspace doesn't care | 928 | (other than comefrom, which userspace doesn't care |
946 | about). */ | 929 | about). */ |
947 | countersize = sizeof(struct xt_counters) * private->number; | 930 | countersize = sizeof(struct xt_counters) * private->number; |
948 | counters = vmalloc(countersize); | 931 | counters = vzalloc(countersize); |
949 | 932 | ||
950 | if (counters == NULL) | 933 | if (counters == NULL) |
951 | return ERR_PTR(-ENOMEM); | 934 | return ERR_PTR(-ENOMEM); |
@@ -1216,7 +1199,7 @@ __do_replace(struct net *net, const char *name, unsigned int valid_hooks, | |||
1216 | struct ip6t_entry *iter; | 1199 | struct ip6t_entry *iter; |
1217 | 1200 | ||
1218 | ret = 0; | 1201 | ret = 0; |
1219 | counters = vmalloc(num_counters * sizeof(struct xt_counters)); | 1202 | counters = vzalloc(num_counters * sizeof(struct xt_counters)); |
1220 | if (!counters) { | 1203 | if (!counters) { |
1221 | ret = -ENOMEM; | 1204 | ret = -ENOMEM; |
1222 | goto out; | 1205 | goto out; |
diff --git a/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c b/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c index 99abfb53bab9..97c5b21b9674 100644 --- a/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c +++ b/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c | |||
@@ -19,13 +19,15 @@ | |||
19 | 19 | ||
20 | #include <linux/netfilter_ipv6.h> | 20 | #include <linux/netfilter_ipv6.h> |
21 | #include <linux/netfilter_bridge.h> | 21 | #include <linux/netfilter_bridge.h> |
22 | #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) | ||
22 | #include <net/netfilter/nf_conntrack.h> | 23 | #include <net/netfilter/nf_conntrack.h> |
23 | #include <net/netfilter/nf_conntrack_helper.h> | 24 | #include <net/netfilter/nf_conntrack_helper.h> |
24 | #include <net/netfilter/nf_conntrack_l4proto.h> | 25 | #include <net/netfilter/nf_conntrack_l4proto.h> |
25 | #include <net/netfilter/nf_conntrack_l3proto.h> | 26 | #include <net/netfilter/nf_conntrack_l3proto.h> |
26 | #include <net/netfilter/nf_conntrack_core.h> | 27 | #include <net/netfilter/nf_conntrack_core.h> |
27 | #include <net/netfilter/nf_conntrack_zones.h> | ||
28 | #include <net/netfilter/ipv6/nf_conntrack_ipv6.h> | 28 | #include <net/netfilter/ipv6/nf_conntrack_ipv6.h> |
29 | #endif | ||
30 | #include <net/netfilter/nf_conntrack_zones.h> | ||
29 | #include <net/netfilter/ipv6/nf_defrag_ipv6.h> | 31 | #include <net/netfilter/ipv6/nf_defrag_ipv6.h> |
30 | 32 | ||
31 | static enum ip6_defrag_users nf_ct6_defrag_user(unsigned int hooknum, | 33 | static enum ip6_defrag_users nf_ct6_defrag_user(unsigned int hooknum, |
@@ -33,8 +35,10 @@ static enum ip6_defrag_users nf_ct6_defrag_user(unsigned int hooknum, | |||
33 | { | 35 | { |
34 | u16 zone = NF_CT_DEFAULT_ZONE; | 36 | u16 zone = NF_CT_DEFAULT_ZONE; |
35 | 37 | ||
38 | #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) | ||
36 | if (skb->nfct) | 39 | if (skb->nfct) |
37 | zone = nf_ct_zone((struct nf_conn *)skb->nfct); | 40 | zone = nf_ct_zone((struct nf_conn *)skb->nfct); |
41 | #endif | ||
38 | 42 | ||
39 | #ifdef CONFIG_BRIDGE_NETFILTER | 43 | #ifdef CONFIG_BRIDGE_NETFILTER |
40 | if (skb->nf_bridge && | 44 | if (skb->nf_bridge && |
@@ -56,9 +60,11 @@ static unsigned int ipv6_defrag(unsigned int hooknum, | |||
56 | { | 60 | { |
57 | struct sk_buff *reasm; | 61 | struct sk_buff *reasm; |
58 | 62 | ||
63 | #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) | ||
59 | /* Previously seen (loopback)? */ | 64 | /* Previously seen (loopback)? */ |
60 | if (skb->nfct && !nf_ct_is_template((struct nf_conn *)skb->nfct)) | 65 | if (skb->nfct && !nf_ct_is_template((struct nf_conn *)skb->nfct)) |
61 | return NF_ACCEPT; | 66 | return NF_ACCEPT; |
67 | #endif | ||
62 | 68 | ||
63 | reasm = nf_ct_frag6_gather(skb, nf_ct6_defrag_user(hooknum, skb)); | 69 | reasm = nf_ct_frag6_gather(skb, nf_ct6_defrag_user(hooknum, skb)); |
64 | /* queued */ | 70 | /* queued */ |
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c index 746140264b2d..2b7eef37875c 100644 --- a/net/netfilter/nf_conntrack_netlink.c +++ b/net/netfilter/nf_conntrack_netlink.c | |||
@@ -645,25 +645,23 @@ ctnetlink_dump_table(struct sk_buff *skb, struct netlink_callback *cb) | |||
645 | struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh); | 645 | struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh); |
646 | u_int8_t l3proto = nfmsg->nfgen_family; | 646 | u_int8_t l3proto = nfmsg->nfgen_family; |
647 | 647 | ||
648 | rcu_read_lock(); | 648 | spin_lock_bh(&nf_conntrack_lock); |
649 | last = (struct nf_conn *)cb->args[1]; | 649 | last = (struct nf_conn *)cb->args[1]; |
650 | for (; cb->args[0] < net->ct.htable_size; cb->args[0]++) { | 650 | for (; cb->args[0] < net->ct.htable_size; cb->args[0]++) { |
651 | restart: | 651 | restart: |
652 | hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[cb->args[0]], | 652 | hlist_nulls_for_each_entry(h, n, &net->ct.hash[cb->args[0]], |
653 | hnnode) { | 653 | hnnode) { |
654 | if (NF_CT_DIRECTION(h) != IP_CT_DIR_ORIGINAL) | 654 | if (NF_CT_DIRECTION(h) != IP_CT_DIR_ORIGINAL) |
655 | continue; | 655 | continue; |
656 | ct = nf_ct_tuplehash_to_ctrack(h); | 656 | ct = nf_ct_tuplehash_to_ctrack(h); |
657 | if (!atomic_inc_not_zero(&ct->ct_general.use)) | ||
658 | continue; | ||
659 | /* Dump entries of a given L3 protocol number. | 657 | /* Dump entries of a given L3 protocol number. |
660 | * If it is not specified, ie. l3proto == 0, | 658 | * If it is not specified, ie. l3proto == 0, |
661 | * then dump everything. */ | 659 | * then dump everything. */ |
662 | if (l3proto && nf_ct_l3num(ct) != l3proto) | 660 | if (l3proto && nf_ct_l3num(ct) != l3proto) |
663 | goto releasect; | 661 | continue; |
664 | if (cb->args[1]) { | 662 | if (cb->args[1]) { |
665 | if (ct != last) | 663 | if (ct != last) |
666 | goto releasect; | 664 | continue; |
667 | cb->args[1] = 0; | 665 | cb->args[1] = 0; |
668 | } | 666 | } |
669 | if (ctnetlink_fill_info(skb, NETLINK_CB(cb->skb).pid, | 667 | if (ctnetlink_fill_info(skb, NETLINK_CB(cb->skb).pid, |
@@ -681,8 +679,6 @@ restart: | |||
681 | if (acct) | 679 | if (acct) |
682 | memset(acct, 0, sizeof(struct nf_conn_counter[IP_CT_DIR_MAX])); | 680 | memset(acct, 0, sizeof(struct nf_conn_counter[IP_CT_DIR_MAX])); |
683 | } | 681 | } |
684 | releasect: | ||
685 | nf_ct_put(ct); | ||
686 | } | 682 | } |
687 | if (cb->args[1]) { | 683 | if (cb->args[1]) { |
688 | cb->args[1] = 0; | 684 | cb->args[1] = 0; |
@@ -690,7 +686,7 @@ releasect: | |||
690 | } | 686 | } |
691 | } | 687 | } |
692 | out: | 688 | out: |
693 | rcu_read_unlock(); | 689 | spin_unlock_bh(&nf_conntrack_lock); |
694 | if (last) | 690 | if (last) |
695 | nf_ct_put(last); | 691 | nf_ct_put(last); |
696 | 692 | ||
@@ -976,7 +972,8 @@ ctnetlink_get_conntrack(struct sock *ctnl, struct sk_buff *skb, | |||
976 | free: | 972 | free: |
977 | kfree_skb(skb2); | 973 | kfree_skb(skb2); |
978 | out: | 974 | out: |
979 | return err; | 975 | /* this avoids a loop in nfnetlink. */ |
976 | return err == -EAGAIN ? -ENOBUFS : err; | ||
980 | } | 977 | } |
981 | 978 | ||
982 | #ifdef CONFIG_NF_NAT_NEEDED | 979 | #ifdef CONFIG_NF_NAT_NEEDED |
diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c index 80463507420e..c94237631077 100644 --- a/net/netfilter/x_tables.c +++ b/net/netfilter/x_tables.c | |||
@@ -1325,7 +1325,8 @@ static int __init xt_init(void) | |||
1325 | 1325 | ||
1326 | for_each_possible_cpu(i) { | 1326 | for_each_possible_cpu(i) { |
1327 | struct xt_info_lock *lock = &per_cpu(xt_info_locks, i); | 1327 | struct xt_info_lock *lock = &per_cpu(xt_info_locks, i); |
1328 | spin_lock_init(&lock->lock); | 1328 | |
1329 | seqlock_init(&lock->lock); | ||
1329 | lock->readers = 0; | 1330 | lock->readers = 0; |
1330 | } | 1331 | } |
1331 | 1332 | ||
diff --git a/net/phonet/af_phonet.c b/net/phonet/af_phonet.c index fd95beb72f5d..1072b2c19d31 100644 --- a/net/phonet/af_phonet.c +++ b/net/phonet/af_phonet.c | |||
@@ -37,7 +37,7 @@ | |||
37 | /* Transport protocol registration */ | 37 | /* Transport protocol registration */ |
38 | static struct phonet_protocol *proto_tab[PHONET_NPROTO] __read_mostly; | 38 | static struct phonet_protocol *proto_tab[PHONET_NPROTO] __read_mostly; |
39 | 39 | ||
40 | static struct phonet_protocol *phonet_proto_get(int protocol) | 40 | static struct phonet_protocol *phonet_proto_get(unsigned int protocol) |
41 | { | 41 | { |
42 | struct phonet_protocol *pp; | 42 | struct phonet_protocol *pp; |
43 | 43 | ||
@@ -458,7 +458,7 @@ static struct packet_type phonet_packet_type __read_mostly = { | |||
458 | 458 | ||
459 | static DEFINE_MUTEX(proto_tab_lock); | 459 | static DEFINE_MUTEX(proto_tab_lock); |
460 | 460 | ||
461 | int __init_or_module phonet_proto_register(int protocol, | 461 | int __init_or_module phonet_proto_register(unsigned int protocol, |
462 | struct phonet_protocol *pp) | 462 | struct phonet_protocol *pp) |
463 | { | 463 | { |
464 | int err = 0; | 464 | int err = 0; |
@@ -481,7 +481,7 @@ int __init_or_module phonet_proto_register(int protocol, | |||
481 | } | 481 | } |
482 | EXPORT_SYMBOL(phonet_proto_register); | 482 | EXPORT_SYMBOL(phonet_proto_register); |
483 | 483 | ||
484 | void phonet_proto_unregister(int protocol, struct phonet_protocol *pp) | 484 | void phonet_proto_unregister(unsigned int protocol, struct phonet_protocol *pp) |
485 | { | 485 | { |
486 | mutex_lock(&proto_tab_lock); | 486 | mutex_lock(&proto_tab_lock); |
487 | BUG_ON(proto_tab[protocol] != pp); | 487 | BUG_ON(proto_tab[protocol] != pp); |
diff --git a/net/sched/act_csum.c b/net/sched/act_csum.c index 67dc7ce9b63a..83ddfc07e45d 100644 --- a/net/sched/act_csum.c +++ b/net/sched/act_csum.c | |||
@@ -508,8 +508,7 @@ static int tcf_csum(struct sk_buff *skb, | |||
508 | 508 | ||
509 | spin_lock(&p->tcf_lock); | 509 | spin_lock(&p->tcf_lock); |
510 | p->tcf_tm.lastuse = jiffies; | 510 | p->tcf_tm.lastuse = jiffies; |
511 | p->tcf_bstats.bytes += qdisc_pkt_len(skb); | 511 | bstats_update(&p->tcf_bstats, skb); |
512 | p->tcf_bstats.packets++; | ||
513 | action = p->tcf_action; | 512 | action = p->tcf_action; |
514 | update_flags = p->update_flags; | 513 | update_flags = p->update_flags; |
515 | spin_unlock(&p->tcf_lock); | 514 | spin_unlock(&p->tcf_lock); |
diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c index 8daef9632255..c2a7c20e81c1 100644 --- a/net/sched/act_ipt.c +++ b/net/sched/act_ipt.c | |||
@@ -209,8 +209,7 @@ static int tcf_ipt(struct sk_buff *skb, struct tc_action *a, | |||
209 | spin_lock(&ipt->tcf_lock); | 209 | spin_lock(&ipt->tcf_lock); |
210 | 210 | ||
211 | ipt->tcf_tm.lastuse = jiffies; | 211 | ipt->tcf_tm.lastuse = jiffies; |
212 | ipt->tcf_bstats.bytes += qdisc_pkt_len(skb); | 212 | bstats_update(&ipt->tcf_bstats, skb); |
213 | ipt->tcf_bstats.packets++; | ||
214 | 213 | ||
215 | /* yes, we have to worry about both in and out dev | 214 | /* yes, we have to worry about both in and out dev |
216 | worry later - danger - this API seems to have changed | 215 | worry later - danger - this API seems to have changed |
diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c index 0c311be92827..d765067e99db 100644 --- a/net/sched/act_mirred.c +++ b/net/sched/act_mirred.c | |||
@@ -165,8 +165,7 @@ static int tcf_mirred(struct sk_buff *skb, struct tc_action *a, | |||
165 | 165 | ||
166 | spin_lock(&m->tcf_lock); | 166 | spin_lock(&m->tcf_lock); |
167 | m->tcf_tm.lastuse = jiffies; | 167 | m->tcf_tm.lastuse = jiffies; |
168 | m->tcf_bstats.bytes += qdisc_pkt_len(skb); | 168 | bstats_update(&m->tcf_bstats, skb); |
169 | m->tcf_bstats.packets++; | ||
170 | 169 | ||
171 | dev = m->tcfm_dev; | 170 | dev = m->tcfm_dev; |
172 | if (!dev) { | 171 | if (!dev) { |
diff --git a/net/sched/act_nat.c b/net/sched/act_nat.c index 186eb837e600..178a4bd7b7cb 100644 --- a/net/sched/act_nat.c +++ b/net/sched/act_nat.c | |||
@@ -125,8 +125,7 @@ static int tcf_nat(struct sk_buff *skb, struct tc_action *a, | |||
125 | egress = p->flags & TCA_NAT_FLAG_EGRESS; | 125 | egress = p->flags & TCA_NAT_FLAG_EGRESS; |
126 | action = p->tcf_action; | 126 | action = p->tcf_action; |
127 | 127 | ||
128 | p->tcf_bstats.bytes += qdisc_pkt_len(skb); | 128 | bstats_update(&p->tcf_bstats, skb); |
129 | p->tcf_bstats.packets++; | ||
130 | 129 | ||
131 | spin_unlock(&p->tcf_lock); | 130 | spin_unlock(&p->tcf_lock); |
132 | 131 | ||
diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c index a0593c9640db..445bef716f77 100644 --- a/net/sched/act_pedit.c +++ b/net/sched/act_pedit.c | |||
@@ -187,8 +187,7 @@ static int tcf_pedit(struct sk_buff *skb, struct tc_action *a, | |||
187 | bad: | 187 | bad: |
188 | p->tcf_qstats.overlimits++; | 188 | p->tcf_qstats.overlimits++; |
189 | done: | 189 | done: |
190 | p->tcf_bstats.bytes += qdisc_pkt_len(skb); | 190 | bstats_update(&p->tcf_bstats, skb); |
191 | p->tcf_bstats.packets++; | ||
192 | spin_unlock(&p->tcf_lock); | 191 | spin_unlock(&p->tcf_lock); |
193 | return p->tcf_action; | 192 | return p->tcf_action; |
194 | } | 193 | } |
diff --git a/net/sched/act_police.c b/net/sched/act_police.c index 7ebf7439b478..e2f08b1e2e58 100644 --- a/net/sched/act_police.c +++ b/net/sched/act_police.c | |||
@@ -298,8 +298,7 @@ static int tcf_act_police(struct sk_buff *skb, struct tc_action *a, | |||
298 | 298 | ||
299 | spin_lock(&police->tcf_lock); | 299 | spin_lock(&police->tcf_lock); |
300 | 300 | ||
301 | police->tcf_bstats.bytes += qdisc_pkt_len(skb); | 301 | bstats_update(&police->tcf_bstats, skb); |
302 | police->tcf_bstats.packets++; | ||
303 | 302 | ||
304 | if (police->tcfp_ewma_rate && | 303 | if (police->tcfp_ewma_rate && |
305 | police->tcf_rate_est.bps >= police->tcfp_ewma_rate) { | 304 | police->tcf_rate_est.bps >= police->tcfp_ewma_rate) { |
diff --git a/net/sched/act_simple.c b/net/sched/act_simple.c index 97e84f3ee775..7287cff7af3e 100644 --- a/net/sched/act_simple.c +++ b/net/sched/act_simple.c | |||
@@ -42,8 +42,7 @@ static int tcf_simp(struct sk_buff *skb, struct tc_action *a, struct tcf_result | |||
42 | 42 | ||
43 | spin_lock(&d->tcf_lock); | 43 | spin_lock(&d->tcf_lock); |
44 | d->tcf_tm.lastuse = jiffies; | 44 | d->tcf_tm.lastuse = jiffies; |
45 | d->tcf_bstats.bytes += qdisc_pkt_len(skb); | 45 | bstats_update(&d->tcf_bstats, skb); |
46 | d->tcf_bstats.packets++; | ||
47 | 46 | ||
48 | /* print policy string followed by _ then packet count | 47 | /* print policy string followed by _ then packet count |
49 | * Example if this was the 3rd packet and the string was "hello" | 48 | * Example if this was the 3rd packet and the string was "hello" |
diff --git a/net/sched/act_skbedit.c b/net/sched/act_skbedit.c index 66cbf4eb8855..836f5fee9e58 100644 --- a/net/sched/act_skbedit.c +++ b/net/sched/act_skbedit.c | |||
@@ -46,8 +46,7 @@ static int tcf_skbedit(struct sk_buff *skb, struct tc_action *a, | |||
46 | 46 | ||
47 | spin_lock(&d->tcf_lock); | 47 | spin_lock(&d->tcf_lock); |
48 | d->tcf_tm.lastuse = jiffies; | 48 | d->tcf_tm.lastuse = jiffies; |
49 | d->tcf_bstats.bytes += qdisc_pkt_len(skb); | 49 | bstats_update(&d->tcf_bstats, skb); |
50 | d->tcf_bstats.packets++; | ||
51 | 50 | ||
52 | if (d->flags & SKBEDIT_F_PRIORITY) | 51 | if (d->flags & SKBEDIT_F_PRIORITY) |
53 | skb->priority = d->priority; | 52 | skb->priority = d->priority; |
diff --git a/net/sched/sch_atm.c b/net/sched/sch_atm.c index 282540778aa8..943d733409d0 100644 --- a/net/sched/sch_atm.c +++ b/net/sched/sch_atm.c | |||
@@ -422,10 +422,8 @@ drop: __maybe_unused | |||
422 | } | 422 | } |
423 | return ret; | 423 | return ret; |
424 | } | 424 | } |
425 | sch->bstats.bytes += qdisc_pkt_len(skb); | 425 | qdisc_bstats_update(sch, skb); |
426 | sch->bstats.packets++; | 426 | bstats_update(&flow->bstats, skb); |
427 | flow->bstats.bytes += qdisc_pkt_len(skb); | ||
428 | flow->bstats.packets++; | ||
429 | /* | 427 | /* |
430 | * Okay, this may seem weird. We pretend we've dropped the packet if | 428 | * Okay, this may seem weird. We pretend we've dropped the packet if |
431 | * it goes via ATM. The reason for this is that the outer qdisc | 429 | * it goes via ATM. The reason for this is that the outer qdisc |
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c index eb7631590865..c80d1c210c5d 100644 --- a/net/sched/sch_cbq.c +++ b/net/sched/sch_cbq.c | |||
@@ -390,8 +390,7 @@ cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |||
390 | ret = qdisc_enqueue(skb, cl->q); | 390 | ret = qdisc_enqueue(skb, cl->q); |
391 | if (ret == NET_XMIT_SUCCESS) { | 391 | if (ret == NET_XMIT_SUCCESS) { |
392 | sch->q.qlen++; | 392 | sch->q.qlen++; |
393 | sch->bstats.packets++; | 393 | qdisc_bstats_update(sch, skb); |
394 | sch->bstats.bytes += qdisc_pkt_len(skb); | ||
395 | cbq_mark_toplevel(q, cl); | 394 | cbq_mark_toplevel(q, cl); |
396 | if (!cl->next_alive) | 395 | if (!cl->next_alive) |
397 | cbq_activate_class(cl); | 396 | cbq_activate_class(cl); |
@@ -650,8 +649,7 @@ static int cbq_reshape_fail(struct sk_buff *skb, struct Qdisc *child) | |||
650 | ret = qdisc_enqueue(skb, cl->q); | 649 | ret = qdisc_enqueue(skb, cl->q); |
651 | if (ret == NET_XMIT_SUCCESS) { | 650 | if (ret == NET_XMIT_SUCCESS) { |
652 | sch->q.qlen++; | 651 | sch->q.qlen++; |
653 | sch->bstats.packets++; | 652 | qdisc_bstats_update(sch, skb); |
654 | sch->bstats.bytes += qdisc_pkt_len(skb); | ||
655 | if (!cl->next_alive) | 653 | if (!cl->next_alive) |
656 | cbq_activate_class(cl); | 654 | cbq_activate_class(cl); |
657 | return 0; | 655 | return 0; |
diff --git a/net/sched/sch_drr.c b/net/sched/sch_drr.c index aa8b5313f8cf..de55e642eafc 100644 --- a/net/sched/sch_drr.c +++ b/net/sched/sch_drr.c | |||
@@ -351,7 +351,6 @@ static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |||
351 | { | 351 | { |
352 | struct drr_sched *q = qdisc_priv(sch); | 352 | struct drr_sched *q = qdisc_priv(sch); |
353 | struct drr_class *cl; | 353 | struct drr_class *cl; |
354 | unsigned int len; | ||
355 | int err; | 354 | int err; |
356 | 355 | ||
357 | cl = drr_classify(skb, sch, &err); | 356 | cl = drr_classify(skb, sch, &err); |
@@ -362,7 +361,6 @@ static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |||
362 | return err; | 361 | return err; |
363 | } | 362 | } |
364 | 363 | ||
365 | len = qdisc_pkt_len(skb); | ||
366 | err = qdisc_enqueue(skb, cl->qdisc); | 364 | err = qdisc_enqueue(skb, cl->qdisc); |
367 | if (unlikely(err != NET_XMIT_SUCCESS)) { | 365 | if (unlikely(err != NET_XMIT_SUCCESS)) { |
368 | if (net_xmit_drop_count(err)) { | 366 | if (net_xmit_drop_count(err)) { |
@@ -377,10 +375,8 @@ static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |||
377 | cl->deficit = cl->quantum; | 375 | cl->deficit = cl->quantum; |
378 | } | 376 | } |
379 | 377 | ||
380 | cl->bstats.packets++; | 378 | bstats_update(&cl->bstats, skb); |
381 | cl->bstats.bytes += len; | 379 | qdisc_bstats_update(sch, skb); |
382 | sch->bstats.packets++; | ||
383 | sch->bstats.bytes += len; | ||
384 | 380 | ||
385 | sch->q.qlen++; | 381 | sch->q.qlen++; |
386 | return err; | 382 | return err; |
diff --git a/net/sched/sch_dsmark.c b/net/sched/sch_dsmark.c index 1d295d62bb5c..60f4bdd4408e 100644 --- a/net/sched/sch_dsmark.c +++ b/net/sched/sch_dsmark.c | |||
@@ -260,8 +260,7 @@ static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |||
260 | return err; | 260 | return err; |
261 | } | 261 | } |
262 | 262 | ||
263 | sch->bstats.bytes += qdisc_pkt_len(skb); | 263 | qdisc_bstats_update(sch, skb); |
264 | sch->bstats.packets++; | ||
265 | sch->q.qlen++; | 264 | sch->q.qlen++; |
266 | 265 | ||
267 | return NET_XMIT_SUCCESS; | 266 | return NET_XMIT_SUCCESS; |
diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c index 069c62b7bb36..2e45791d4f6c 100644 --- a/net/sched/sch_hfsc.c +++ b/net/sched/sch_hfsc.c | |||
@@ -1599,10 +1599,8 @@ hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |||
1599 | if (cl->qdisc->q.qlen == 1) | 1599 | if (cl->qdisc->q.qlen == 1) |
1600 | set_active(cl, qdisc_pkt_len(skb)); | 1600 | set_active(cl, qdisc_pkt_len(skb)); |
1601 | 1601 | ||
1602 | cl->bstats.packets++; | 1602 | bstats_update(&cl->bstats, skb); |
1603 | cl->bstats.bytes += qdisc_pkt_len(skb); | 1603 | qdisc_bstats_update(sch, skb); |
1604 | sch->bstats.packets++; | ||
1605 | sch->bstats.bytes += qdisc_pkt_len(skb); | ||
1606 | sch->q.qlen++; | 1604 | sch->q.qlen++; |
1607 | 1605 | ||
1608 | return NET_XMIT_SUCCESS; | 1606 | return NET_XMIT_SUCCESS; |
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c index 01b519d6c52d..984c1b0c6836 100644 --- a/net/sched/sch_htb.c +++ b/net/sched/sch_htb.c | |||
@@ -569,15 +569,12 @@ static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |||
569 | } | 569 | } |
570 | return ret; | 570 | return ret; |
571 | } else { | 571 | } else { |
572 | cl->bstats.packets += | 572 | bstats_update(&cl->bstats, skb); |
573 | skb_is_gso(skb)?skb_shinfo(skb)->gso_segs:1; | ||
574 | cl->bstats.bytes += qdisc_pkt_len(skb); | ||
575 | htb_activate(q, cl); | 573 | htb_activate(q, cl); |
576 | } | 574 | } |
577 | 575 | ||
578 | sch->q.qlen++; | 576 | sch->q.qlen++; |
579 | sch->bstats.packets += skb_is_gso(skb)?skb_shinfo(skb)->gso_segs:1; | 577 | qdisc_bstats_update(sch, skb); |
580 | sch->bstats.bytes += qdisc_pkt_len(skb); | ||
581 | return NET_XMIT_SUCCESS; | 578 | return NET_XMIT_SUCCESS; |
582 | } | 579 | } |
583 | 580 | ||
@@ -648,12 +645,10 @@ static void htb_charge_class(struct htb_sched *q, struct htb_class *cl, | |||
648 | htb_add_to_wait_tree(q, cl, diff); | 645 | htb_add_to_wait_tree(q, cl, diff); |
649 | } | 646 | } |
650 | 647 | ||
651 | /* update byte stats except for leaves which are already updated */ | 648 | /* update basic stats except for leaves which are already updated */ |
652 | if (cl->level) { | 649 | if (cl->level) |
653 | cl->bstats.bytes += bytes; | 650 | bstats_update(&cl->bstats, skb); |
654 | cl->bstats.packets += skb_is_gso(skb)? | 651 | |
655 | skb_shinfo(skb)->gso_segs:1; | ||
656 | } | ||
657 | cl = cl->parent; | 652 | cl = cl->parent; |
658 | } | 653 | } |
659 | } | 654 | } |
diff --git a/net/sched/sch_ingress.c b/net/sched/sch_ingress.c index f10e34a68445..bce1665239b8 100644 --- a/net/sched/sch_ingress.c +++ b/net/sched/sch_ingress.c | |||
@@ -63,8 +63,7 @@ static int ingress_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |||
63 | 63 | ||
64 | result = tc_classify(skb, p->filter_list, &res); | 64 | result = tc_classify(skb, p->filter_list, &res); |
65 | 65 | ||
66 | sch->bstats.packets++; | 66 | qdisc_bstats_update(sch, skb); |
67 | sch->bstats.bytes += qdisc_pkt_len(skb); | ||
68 | switch (result) { | 67 | switch (result) { |
69 | case TC_ACT_SHOT: | 68 | case TC_ACT_SHOT: |
70 | result = TC_ACT_SHOT; | 69 | result = TC_ACT_SHOT; |
diff --git a/net/sched/sch_multiq.c b/net/sched/sch_multiq.c index 32690deab5d0..21f13da24763 100644 --- a/net/sched/sch_multiq.c +++ b/net/sched/sch_multiq.c | |||
@@ -83,8 +83,7 @@ multiq_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |||
83 | 83 | ||
84 | ret = qdisc_enqueue(skb, qdisc); | 84 | ret = qdisc_enqueue(skb, qdisc); |
85 | if (ret == NET_XMIT_SUCCESS) { | 85 | if (ret == NET_XMIT_SUCCESS) { |
86 | sch->bstats.bytes += qdisc_pkt_len(skb); | 86 | qdisc_bstats_update(sch, skb); |
87 | sch->bstats.packets++; | ||
88 | sch->q.qlen++; | 87 | sch->q.qlen++; |
89 | return NET_XMIT_SUCCESS; | 88 | return NET_XMIT_SUCCESS; |
90 | } | 89 | } |
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c index e5593c083a78..1c4bce863479 100644 --- a/net/sched/sch_netem.c +++ b/net/sched/sch_netem.c | |||
@@ -240,8 +240,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |||
240 | 240 | ||
241 | if (likely(ret == NET_XMIT_SUCCESS)) { | 241 | if (likely(ret == NET_XMIT_SUCCESS)) { |
242 | sch->q.qlen++; | 242 | sch->q.qlen++; |
243 | sch->bstats.bytes += qdisc_pkt_len(skb); | 243 | qdisc_bstats_update(sch, skb); |
244 | sch->bstats.packets++; | ||
245 | } else if (net_xmit_drop_count(ret)) { | 244 | } else if (net_xmit_drop_count(ret)) { |
246 | sch->qstats.drops++; | 245 | sch->qstats.drops++; |
247 | } | 246 | } |
@@ -477,8 +476,7 @@ static int tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch) | |||
477 | __skb_queue_after(list, skb, nskb); | 476 | __skb_queue_after(list, skb, nskb); |
478 | 477 | ||
479 | sch->qstats.backlog += qdisc_pkt_len(nskb); | 478 | sch->qstats.backlog += qdisc_pkt_len(nskb); |
480 | sch->bstats.bytes += qdisc_pkt_len(nskb); | 479 | qdisc_bstats_update(sch, nskb); |
481 | sch->bstats.packets++; | ||
482 | 480 | ||
483 | return NET_XMIT_SUCCESS; | 481 | return NET_XMIT_SUCCESS; |
484 | } | 482 | } |
diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c index b1c95bce33ce..966158d49dd1 100644 --- a/net/sched/sch_prio.c +++ b/net/sched/sch_prio.c | |||
@@ -84,8 +84,7 @@ prio_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |||
84 | 84 | ||
85 | ret = qdisc_enqueue(skb, qdisc); | 85 | ret = qdisc_enqueue(skb, qdisc); |
86 | if (ret == NET_XMIT_SUCCESS) { | 86 | if (ret == NET_XMIT_SUCCESS) { |
87 | sch->bstats.bytes += qdisc_pkt_len(skb); | 87 | qdisc_bstats_update(sch, skb); |
88 | sch->bstats.packets++; | ||
89 | sch->q.qlen++; | 88 | sch->q.qlen++; |
90 | return NET_XMIT_SUCCESS; | 89 | return NET_XMIT_SUCCESS; |
91 | } | 90 | } |
diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c index a67ba3c5a0cc..a6009c5a2c97 100644 --- a/net/sched/sch_red.c +++ b/net/sched/sch_red.c | |||
@@ -94,8 +94,7 @@ static int red_enqueue(struct sk_buff *skb, struct Qdisc* sch) | |||
94 | 94 | ||
95 | ret = qdisc_enqueue(skb, child); | 95 | ret = qdisc_enqueue(skb, child); |
96 | if (likely(ret == NET_XMIT_SUCCESS)) { | 96 | if (likely(ret == NET_XMIT_SUCCESS)) { |
97 | sch->bstats.bytes += qdisc_pkt_len(skb); | 97 | qdisc_bstats_update(sch, skb); |
98 | sch->bstats.packets++; | ||
99 | sch->q.qlen++; | 98 | sch->q.qlen++; |
100 | } else if (net_xmit_drop_count(ret)) { | 99 | } else if (net_xmit_drop_count(ret)) { |
101 | q->stats.pdrop++; | 100 | q->stats.pdrop++; |
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c index d54ac94066c2..239ec53a634d 100644 --- a/net/sched/sch_sfq.c +++ b/net/sched/sch_sfq.c | |||
@@ -403,8 +403,7 @@ sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |||
403 | slot->allot = q->scaled_quantum; | 403 | slot->allot = q->scaled_quantum; |
404 | } | 404 | } |
405 | if (++sch->q.qlen <= q->limit) { | 405 | if (++sch->q.qlen <= q->limit) { |
406 | sch->bstats.bytes += qdisc_pkt_len(skb); | 406 | qdisc_bstats_update(sch, skb); |
407 | sch->bstats.packets++; | ||
408 | return NET_XMIT_SUCCESS; | 407 | return NET_XMIT_SUCCESS; |
409 | } | 408 | } |
410 | 409 | ||
diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c index 641a30d64635..77565e721811 100644 --- a/net/sched/sch_tbf.c +++ b/net/sched/sch_tbf.c | |||
@@ -134,8 +134,7 @@ static int tbf_enqueue(struct sk_buff *skb, struct Qdisc* sch) | |||
134 | } | 134 | } |
135 | 135 | ||
136 | sch->q.qlen++; | 136 | sch->q.qlen++; |
137 | sch->bstats.bytes += qdisc_pkt_len(skb); | 137 | qdisc_bstats_update(sch, skb); |
138 | sch->bstats.packets++; | ||
139 | return NET_XMIT_SUCCESS; | 138 | return NET_XMIT_SUCCESS; |
140 | } | 139 | } |
141 | 140 | ||
diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c index 106479a7c94a..84ce48eadff4 100644 --- a/net/sched/sch_teql.c +++ b/net/sched/sch_teql.c | |||
@@ -59,6 +59,10 @@ struct teql_master | |||
59 | struct net_device *dev; | 59 | struct net_device *dev; |
60 | struct Qdisc *slaves; | 60 | struct Qdisc *slaves; |
61 | struct list_head master_list; | 61 | struct list_head master_list; |
62 | unsigned long tx_bytes; | ||
63 | unsigned long tx_packets; | ||
64 | unsigned long tx_errors; | ||
65 | unsigned long tx_dropped; | ||
62 | }; | 66 | }; |
63 | 67 | ||
64 | struct teql_sched_data | 68 | struct teql_sched_data |
@@ -83,8 +87,7 @@ teql_enqueue(struct sk_buff *skb, struct Qdisc* sch) | |||
83 | 87 | ||
84 | if (q->q.qlen < dev->tx_queue_len) { | 88 | if (q->q.qlen < dev->tx_queue_len) { |
85 | __skb_queue_tail(&q->q, skb); | 89 | __skb_queue_tail(&q->q, skb); |
86 | sch->bstats.bytes += qdisc_pkt_len(skb); | 90 | qdisc_bstats_update(sch, skb); |
87 | sch->bstats.packets++; | ||
88 | return NET_XMIT_SUCCESS; | 91 | return NET_XMIT_SUCCESS; |
89 | } | 92 | } |
90 | 93 | ||
@@ -275,7 +278,6 @@ static inline int teql_resolve(struct sk_buff *skb, | |||
275 | static netdev_tx_t teql_master_xmit(struct sk_buff *skb, struct net_device *dev) | 278 | static netdev_tx_t teql_master_xmit(struct sk_buff *skb, struct net_device *dev) |
276 | { | 279 | { |
277 | struct teql_master *master = netdev_priv(dev); | 280 | struct teql_master *master = netdev_priv(dev); |
278 | struct netdev_queue *txq = netdev_get_tx_queue(dev, 0); | ||
279 | struct Qdisc *start, *q; | 281 | struct Qdisc *start, *q; |
280 | int busy; | 282 | int busy; |
281 | int nores; | 283 | int nores; |
@@ -315,8 +317,8 @@ restart: | |||
315 | __netif_tx_unlock(slave_txq); | 317 | __netif_tx_unlock(slave_txq); |
316 | master->slaves = NEXT_SLAVE(q); | 318 | master->slaves = NEXT_SLAVE(q); |
317 | netif_wake_queue(dev); | 319 | netif_wake_queue(dev); |
318 | txq->tx_packets++; | 320 | master->tx_packets++; |
319 | txq->tx_bytes += length; | 321 | master->tx_bytes += length; |
320 | return NETDEV_TX_OK; | 322 | return NETDEV_TX_OK; |
321 | } | 323 | } |
322 | __netif_tx_unlock(slave_txq); | 324 | __netif_tx_unlock(slave_txq); |
@@ -343,10 +345,10 @@ restart: | |||
343 | netif_stop_queue(dev); | 345 | netif_stop_queue(dev); |
344 | return NETDEV_TX_BUSY; | 346 | return NETDEV_TX_BUSY; |
345 | } | 347 | } |
346 | dev->stats.tx_errors++; | 348 | master->tx_errors++; |
347 | 349 | ||
348 | drop: | 350 | drop: |
349 | txq->tx_dropped++; | 351 | master->tx_dropped++; |
350 | dev_kfree_skb(skb); | 352 | dev_kfree_skb(skb); |
351 | return NETDEV_TX_OK; | 353 | return NETDEV_TX_OK; |
352 | } | 354 | } |
@@ -399,6 +401,18 @@ static int teql_master_close(struct net_device *dev) | |||
399 | return 0; | 401 | return 0; |
400 | } | 402 | } |
401 | 403 | ||
404 | static struct rtnl_link_stats64 *teql_master_stats64(struct net_device *dev, | ||
405 | struct rtnl_link_stats64 *stats) | ||
406 | { | ||
407 | struct teql_master *m = netdev_priv(dev); | ||
408 | |||
409 | stats->tx_packets = m->tx_packets; | ||
410 | stats->tx_bytes = m->tx_bytes; | ||
411 | stats->tx_errors = m->tx_errors; | ||
412 | stats->tx_dropped = m->tx_dropped; | ||
413 | return stats; | ||
414 | } | ||
415 | |||
402 | static int teql_master_mtu(struct net_device *dev, int new_mtu) | 416 | static int teql_master_mtu(struct net_device *dev, int new_mtu) |
403 | { | 417 | { |
404 | struct teql_master *m = netdev_priv(dev); | 418 | struct teql_master *m = netdev_priv(dev); |
@@ -423,6 +437,7 @@ static const struct net_device_ops teql_netdev_ops = { | |||
423 | .ndo_open = teql_master_open, | 437 | .ndo_open = teql_master_open, |
424 | .ndo_stop = teql_master_close, | 438 | .ndo_stop = teql_master_close, |
425 | .ndo_start_xmit = teql_master_xmit, | 439 | .ndo_start_xmit = teql_master_xmit, |
440 | .ndo_get_stats64 = teql_master_stats64, | ||
426 | .ndo_change_mtu = teql_master_mtu, | 441 | .ndo_change_mtu = teql_master_mtu, |
427 | }; | 442 | }; |
428 | 443 | ||
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c index 6a8da81ff66f..d5e1e0b08890 100644 --- a/net/xfrm/xfrm_user.c +++ b/net/xfrm/xfrm_user.c | |||
@@ -26,6 +26,7 @@ | |||
26 | #include <net/sock.h> | 26 | #include <net/sock.h> |
27 | #include <net/xfrm.h> | 27 | #include <net/xfrm.h> |
28 | #include <net/netlink.h> | 28 | #include <net/netlink.h> |
29 | #include <net/ah.h> | ||
29 | #include <asm/uaccess.h> | 30 | #include <asm/uaccess.h> |
30 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) | 31 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) |
31 | #include <linux/in6.h> | 32 | #include <linux/in6.h> |
@@ -302,7 +303,8 @@ static int attach_auth_trunc(struct xfrm_algo_auth **algpp, u8 *props, | |||
302 | algo = xfrm_aalg_get_byname(ualg->alg_name, 1); | 303 | algo = xfrm_aalg_get_byname(ualg->alg_name, 1); |
303 | if (!algo) | 304 | if (!algo) |
304 | return -ENOSYS; | 305 | return -ENOSYS; |
305 | if (ualg->alg_trunc_len > algo->uinfo.auth.icv_fullbits) | 306 | if ((ualg->alg_trunc_len / 8) > MAX_AH_AUTH_LEN || |
307 | ualg->alg_trunc_len > algo->uinfo.auth.icv_fullbits) | ||
306 | return -EINVAL; | 308 | return -EINVAL; |
307 | *props = algo->desc.sadb_alg_id; | 309 | *props = algo->desc.sadb_alg_id; |
308 | 310 | ||