diff options
author | Peter P Waskiewicz Jr <peter.p.waskiewicz.jr@intel.com> | 2009-06-04 07:10:35 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2009-06-07 08:20:16 -0400 |
commit | da4dd0f7ca3fa667b7bba5fd34adceaf3fb84a9b (patch) | |
tree | 97c8c08ffe2d6f23b6ea87f539b9326c7b005b5a /drivers/net/ixgbe/ixgbe_ethtool.c | |
parent | 1479ad4fbfbc801898dce1ac2d4d44f0c774ecc5 (diff) |
ixgbe: Add ethtool offline test support
This patch adds support for the ethtool internal test engine.
Signed-off-by: Peter P Waskiewicz Jr <peter.p.waskiewicz.jr@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ixgbe/ixgbe_ethtool.c')
-rw-r--r-- | drivers/net/ixgbe/ixgbe_ethtool.c | 824 |
1 files changed, 824 insertions, 0 deletions
diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c index 35255b8e90b7..583cc5a3c4f9 100644 --- a/drivers/net/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ixgbe/ixgbe_ethtool.c | |||
@@ -118,6 +118,13 @@ static struct ixgbe_stats ixgbe_gstrings_stats[] = { | |||
118 | IXGBE_PB_STATS_LEN + \ | 118 | IXGBE_PB_STATS_LEN + \ |
119 | IXGBE_QUEUE_STATS_LEN) | 119 | IXGBE_QUEUE_STATS_LEN) |
120 | 120 | ||
121 | static const char ixgbe_gstrings_test[][ETH_GSTRING_LEN] = { | ||
122 | "Register test (offline)", "Eeprom test (offline)", | ||
123 | "Interrupt test (offline)", "Loopback test (offline)", | ||
124 | "Link test (on/offline)" | ||
125 | }; | ||
126 | #define IXGBE_TEST_LEN sizeof(ixgbe_gstrings_test) / ETH_GSTRING_LEN | ||
127 | |||
121 | static int ixgbe_get_settings(struct net_device *netdev, | 128 | static int ixgbe_get_settings(struct net_device *netdev, |
122 | struct ethtool_cmd *ecmd) | 129 | struct ethtool_cmd *ecmd) |
123 | { | 130 | { |
@@ -743,6 +750,7 @@ static void ixgbe_get_drvinfo(struct net_device *netdev, | |||
743 | strncpy(drvinfo->fw_version, firmware_version, 32); | 750 | strncpy(drvinfo->fw_version, firmware_version, 32); |
744 | strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 32); | 751 | strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 32); |
745 | drvinfo->n_stats = IXGBE_STATS_LEN; | 752 | drvinfo->n_stats = IXGBE_STATS_LEN; |
753 | drvinfo->testinfo_len = IXGBE_TEST_LEN; | ||
746 | drvinfo->regdump_len = ixgbe_get_regs_len(netdev); | 754 | drvinfo->regdump_len = ixgbe_get_regs_len(netdev); |
747 | } | 755 | } |
748 | 756 | ||
@@ -884,6 +892,8 @@ err_setup: | |||
884 | static int ixgbe_get_sset_count(struct net_device *netdev, int sset) | 892 | static int ixgbe_get_sset_count(struct net_device *netdev, int sset) |
885 | { | 893 | { |
886 | switch (sset) { | 894 | switch (sset) { |
895 | case ETH_SS_TEST: | ||
896 | return IXGBE_TEST_LEN; | ||
887 | case ETH_SS_STATS: | 897 | case ETH_SS_STATS: |
888 | return IXGBE_STATS_LEN; | 898 | return IXGBE_STATS_LEN; |
889 | default: | 899 | default: |
@@ -938,6 +948,10 @@ static void ixgbe_get_strings(struct net_device *netdev, u32 stringset, | |||
938 | int i; | 948 | int i; |
939 | 949 | ||
940 | switch (stringset) { | 950 | switch (stringset) { |
951 | case ETH_SS_TEST: | ||
952 | memcpy(data, *ixgbe_gstrings_test, | ||
953 | IXGBE_TEST_LEN * ETH_GSTRING_LEN); | ||
954 | break; | ||
941 | case ETH_SS_STATS: | 955 | case ETH_SS_STATS: |
942 | for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) { | 956 | for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) { |
943 | memcpy(p, ixgbe_gstrings_stats[i].stat_string, | 957 | memcpy(p, ixgbe_gstrings_stats[i].stat_string, |
@@ -975,6 +989,815 @@ static void ixgbe_get_strings(struct net_device *netdev, u32 stringset, | |||
975 | } | 989 | } |
976 | } | 990 | } |
977 | 991 | ||
992 | static int ixgbe_link_test(struct ixgbe_adapter *adapter, u64 *data) | ||
993 | { | ||
994 | struct ixgbe_hw *hw = &adapter->hw; | ||
995 | bool link_up; | ||
996 | u32 link_speed = 0; | ||
997 | *data = 0; | ||
998 | |||
999 | hw->mac.ops.check_link(hw, &link_speed, &link_up, true); | ||
1000 | if (link_up) | ||
1001 | return *data; | ||
1002 | else | ||
1003 | *data = 1; | ||
1004 | return *data; | ||
1005 | } | ||
1006 | |||
1007 | /* ethtool register test data */ | ||
1008 | struct ixgbe_reg_test { | ||
1009 | u16 reg; | ||
1010 | u8 array_len; | ||
1011 | u8 test_type; | ||
1012 | u32 mask; | ||
1013 | u32 write; | ||
1014 | }; | ||
1015 | |||
1016 | /* In the hardware, registers are laid out either singly, in arrays | ||
1017 | * spaced 0x40 bytes apart, or in contiguous tables. We assume | ||
1018 | * most tests take place on arrays or single registers (handled | ||
1019 | * as a single-element array) and special-case the tables. | ||
1020 | * Table tests are always pattern tests. | ||
1021 | * | ||
1022 | * We also make provision for some required setup steps by specifying | ||
1023 | * registers to be written without any read-back testing. | ||
1024 | */ | ||
1025 | |||
1026 | #define PATTERN_TEST 1 | ||
1027 | #define SET_READ_TEST 2 | ||
1028 | #define WRITE_NO_TEST 3 | ||
1029 | #define TABLE32_TEST 4 | ||
1030 | #define TABLE64_TEST_LO 5 | ||
1031 | #define TABLE64_TEST_HI 6 | ||
1032 | |||
1033 | /* default 82599 register test */ | ||
1034 | static struct ixgbe_reg_test reg_test_82599[] = { | ||
1035 | { IXGBE_FCRTL_82599(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 }, | ||
1036 | { IXGBE_FCRTH_82599(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 }, | ||
1037 | { IXGBE_PFCTOP, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, | ||
1038 | { IXGBE_VLNCTRL, 1, PATTERN_TEST, 0x00000000, 0x00000000 }, | ||
1039 | { IXGBE_RDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFF80 }, | ||
1040 | { IXGBE_RDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, | ||
1041 | { IXGBE_RDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, | ||
1042 | { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, IXGBE_RXDCTL_ENABLE }, | ||
1043 | { IXGBE_RDT(0), 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, | ||
1044 | { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, 0 }, | ||
1045 | { IXGBE_FCRTH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 }, | ||
1046 | { IXGBE_FCTTV(0), 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, | ||
1047 | { IXGBE_TDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, | ||
1048 | { IXGBE_TDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, | ||
1049 | { IXGBE_TDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFF80 }, | ||
1050 | { IXGBE_RXCTRL, 1, SET_READ_TEST, 0x00000001, 0x00000001 }, | ||
1051 | { IXGBE_RAL(0), 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF }, | ||
1052 | { IXGBE_RAL(0), 16, TABLE64_TEST_HI, 0x8001FFFF, 0x800CFFFF }, | ||
1053 | { IXGBE_MTA(0), 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, | ||
1054 | { 0, 0, 0, 0 } | ||
1055 | }; | ||
1056 | |||
1057 | /* default 82598 register test */ | ||
1058 | static struct ixgbe_reg_test reg_test_82598[] = { | ||
1059 | { IXGBE_FCRTL(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 }, | ||
1060 | { IXGBE_FCRTH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 }, | ||
1061 | { IXGBE_PFCTOP, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, | ||
1062 | { IXGBE_VLNCTRL, 1, PATTERN_TEST, 0x00000000, 0x00000000 }, | ||
1063 | { IXGBE_RDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, | ||
1064 | { IXGBE_RDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, | ||
1065 | { IXGBE_RDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, | ||
1066 | /* Enable all four RX queues before testing. */ | ||
1067 | { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, IXGBE_RXDCTL_ENABLE }, | ||
1068 | /* RDH is read-only for 82598, only test RDT. */ | ||
1069 | { IXGBE_RDT(0), 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, | ||
1070 | { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, 0 }, | ||
1071 | { IXGBE_FCRTH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 }, | ||
1072 | { IXGBE_FCTTV(0), 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, | ||
1073 | { IXGBE_TIPG, 1, PATTERN_TEST, 0x000000FF, 0x000000FF }, | ||
1074 | { IXGBE_TDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, | ||
1075 | { IXGBE_TDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, | ||
1076 | { IXGBE_TDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, | ||
1077 | { IXGBE_RXCTRL, 1, SET_READ_TEST, 0x00000003, 0x00000003 }, | ||
1078 | { IXGBE_DTXCTL, 1, SET_READ_TEST, 0x00000005, 0x00000005 }, | ||
1079 | { IXGBE_RAL(0), 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF }, | ||
1080 | { IXGBE_RAL(0), 16, TABLE64_TEST_HI, 0x800CFFFF, 0x800CFFFF }, | ||
1081 | { IXGBE_MTA(0), 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, | ||
1082 | { 0, 0, 0, 0 } | ||
1083 | }; | ||
1084 | |||
1085 | #define REG_PATTERN_TEST(R, M, W) \ | ||
1086 | { \ | ||
1087 | u32 pat, val, before; \ | ||
1088 | const u32 _test[] = {0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF}; \ | ||
1089 | for (pat = 0; pat < ARRAY_SIZE(_test); pat++) { \ | ||
1090 | before = readl(adapter->hw.hw_addr + R); \ | ||
1091 | writel((_test[pat] & W), (adapter->hw.hw_addr + R)); \ | ||
1092 | val = readl(adapter->hw.hw_addr + R); \ | ||
1093 | if (val != (_test[pat] & W & M)) { \ | ||
1094 | DPRINTK(DRV, ERR, "pattern test reg %04X failed: got "\ | ||
1095 | "0x%08X expected 0x%08X\n", \ | ||
1096 | R, val, (_test[pat] & W & M)); \ | ||
1097 | *data = R; \ | ||
1098 | writel(before, adapter->hw.hw_addr + R); \ | ||
1099 | return 1; \ | ||
1100 | } \ | ||
1101 | writel(before, adapter->hw.hw_addr + R); \ | ||
1102 | } \ | ||
1103 | } | ||
1104 | |||
1105 | #define REG_SET_AND_CHECK(R, M, W) \ | ||
1106 | { \ | ||
1107 | u32 val, before; \ | ||
1108 | before = readl(adapter->hw.hw_addr + R); \ | ||
1109 | writel((W & M), (adapter->hw.hw_addr + R)); \ | ||
1110 | val = readl(adapter->hw.hw_addr + R); \ | ||
1111 | if ((W & M) != (val & M)) { \ | ||
1112 | DPRINTK(DRV, ERR, "set/check reg %04X test failed: got 0x%08X "\ | ||
1113 | "expected 0x%08X\n", R, (val & M), (W & M)); \ | ||
1114 | *data = R; \ | ||
1115 | writel(before, (adapter->hw.hw_addr + R)); \ | ||
1116 | return 1; \ | ||
1117 | } \ | ||
1118 | writel(before, (adapter->hw.hw_addr + R)); \ | ||
1119 | } | ||
1120 | |||
1121 | static int ixgbe_reg_test(struct ixgbe_adapter *adapter, u64 *data) | ||
1122 | { | ||
1123 | struct ixgbe_reg_test *test; | ||
1124 | u32 value, before, after; | ||
1125 | u32 i, toggle; | ||
1126 | |||
1127 | if (adapter->hw.mac.type == ixgbe_mac_82599EB) { | ||
1128 | toggle = 0x7FFFF30F; | ||
1129 | test = reg_test_82599; | ||
1130 | } else { | ||
1131 | toggle = 0x7FFFF3FF; | ||
1132 | test = reg_test_82598; | ||
1133 | } | ||
1134 | |||
1135 | /* | ||
1136 | * Because the status register is such a special case, | ||
1137 | * we handle it separately from the rest of the register | ||
1138 | * tests. Some bits are read-only, some toggle, and some | ||
1139 | * are writeable on newer MACs. | ||
1140 | */ | ||
1141 | before = IXGBE_READ_REG(&adapter->hw, IXGBE_STATUS); | ||
1142 | value = (IXGBE_READ_REG(&adapter->hw, IXGBE_STATUS) & toggle); | ||
1143 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_STATUS, toggle); | ||
1144 | after = IXGBE_READ_REG(&adapter->hw, IXGBE_STATUS) & toggle; | ||
1145 | if (value != after) { | ||
1146 | DPRINTK(DRV, ERR, "failed STATUS register test got: " | ||
1147 | "0x%08X expected: 0x%08X\n", after, value); | ||
1148 | *data = 1; | ||
1149 | return 1; | ||
1150 | } | ||
1151 | /* restore previous status */ | ||
1152 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_STATUS, before); | ||
1153 | |||
1154 | /* | ||
1155 | * Perform the remainder of the register test, looping through | ||
1156 | * the test table until we either fail or reach the null entry. | ||
1157 | */ | ||
1158 | while (test->reg) { | ||
1159 | for (i = 0; i < test->array_len; i++) { | ||
1160 | switch (test->test_type) { | ||
1161 | case PATTERN_TEST: | ||
1162 | REG_PATTERN_TEST(test->reg + (i * 0x40), | ||
1163 | test->mask, | ||
1164 | test->write); | ||
1165 | break; | ||
1166 | case SET_READ_TEST: | ||
1167 | REG_SET_AND_CHECK(test->reg + (i * 0x40), | ||
1168 | test->mask, | ||
1169 | test->write); | ||
1170 | break; | ||
1171 | case WRITE_NO_TEST: | ||
1172 | writel(test->write, | ||
1173 | (adapter->hw.hw_addr + test->reg) | ||
1174 | + (i * 0x40)); | ||
1175 | break; | ||
1176 | case TABLE32_TEST: | ||
1177 | REG_PATTERN_TEST(test->reg + (i * 4), | ||
1178 | test->mask, | ||
1179 | test->write); | ||
1180 | break; | ||
1181 | case TABLE64_TEST_LO: | ||
1182 | REG_PATTERN_TEST(test->reg + (i * 8), | ||
1183 | test->mask, | ||
1184 | test->write); | ||
1185 | break; | ||
1186 | case TABLE64_TEST_HI: | ||
1187 | REG_PATTERN_TEST((test->reg + 4) + (i * 8), | ||
1188 | test->mask, | ||
1189 | test->write); | ||
1190 | break; | ||
1191 | } | ||
1192 | } | ||
1193 | test++; | ||
1194 | } | ||
1195 | |||
1196 | *data = 0; | ||
1197 | return 0; | ||
1198 | } | ||
1199 | |||
1200 | static int ixgbe_eeprom_test(struct ixgbe_adapter *adapter, u64 *data) | ||
1201 | { | ||
1202 | struct ixgbe_hw *hw = &adapter->hw; | ||
1203 | if (hw->eeprom.ops.validate_checksum(hw, NULL)) | ||
1204 | *data = 1; | ||
1205 | else | ||
1206 | *data = 0; | ||
1207 | return *data; | ||
1208 | } | ||
1209 | |||
1210 | static irqreturn_t ixgbe_test_intr(int irq, void *data) | ||
1211 | { | ||
1212 | struct net_device *netdev = (struct net_device *) data; | ||
1213 | struct ixgbe_adapter *adapter = netdev_priv(netdev); | ||
1214 | |||
1215 | adapter->test_icr |= IXGBE_READ_REG(&adapter->hw, IXGBE_EICR); | ||
1216 | |||
1217 | return IRQ_HANDLED; | ||
1218 | } | ||
1219 | |||
1220 | static int ixgbe_intr_test(struct ixgbe_adapter *adapter, u64 *data) | ||
1221 | { | ||
1222 | struct net_device *netdev = adapter->netdev; | ||
1223 | u32 mask, i = 0, shared_int = true; | ||
1224 | u32 irq = adapter->pdev->irq; | ||
1225 | |||
1226 | *data = 0; | ||
1227 | |||
1228 | /* Hook up test interrupt handler just for this test */ | ||
1229 | if (adapter->msix_entries) { | ||
1230 | /* NOTE: we don't test MSI-X interrupts here, yet */ | ||
1231 | return 0; | ||
1232 | } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) { | ||
1233 | shared_int = false; | ||
1234 | if (request_irq(irq, &ixgbe_test_intr, 0, netdev->name, | ||
1235 | netdev)) { | ||
1236 | *data = 1; | ||
1237 | return -1; | ||
1238 | } | ||
1239 | } else if (!request_irq(irq, &ixgbe_test_intr, IRQF_PROBE_SHARED, | ||
1240 | netdev->name, netdev)) { | ||
1241 | shared_int = false; | ||
1242 | } else if (request_irq(irq, &ixgbe_test_intr, IRQF_SHARED, | ||
1243 | netdev->name, netdev)) { | ||
1244 | *data = 1; | ||
1245 | return -1; | ||
1246 | } | ||
1247 | DPRINTK(HW, INFO, "testing %s interrupt\n", | ||
1248 | (shared_int ? "shared" : "unshared")); | ||
1249 | |||
1250 | /* Disable all the interrupts */ | ||
1251 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFFFFFF); | ||
1252 | msleep(10); | ||
1253 | |||
1254 | /* Test each interrupt */ | ||
1255 | for (; i < 10; i++) { | ||
1256 | /* Interrupt to test */ | ||
1257 | mask = 1 << i; | ||
1258 | |||
1259 | if (!shared_int) { | ||
1260 | /* | ||
1261 | * Disable the interrupts to be reported in | ||
1262 | * the cause register and then force the same | ||
1263 | * interrupt and see if one gets posted. If | ||
1264 | * an interrupt was posted to the bus, the | ||
1265 | * test failed. | ||
1266 | */ | ||
1267 | adapter->test_icr = 0; | ||
1268 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, | ||
1269 | ~mask & 0x00007FFF); | ||
1270 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, | ||
1271 | ~mask & 0x00007FFF); | ||
1272 | msleep(10); | ||
1273 | |||
1274 | if (adapter->test_icr & mask) { | ||
1275 | *data = 3; | ||
1276 | break; | ||
1277 | } | ||
1278 | } | ||
1279 | |||
1280 | /* | ||
1281 | * Enable the interrupt to be reported in the cause | ||
1282 | * register and then force the same interrupt and see | ||
1283 | * if one gets posted. If an interrupt was not posted | ||
1284 | * to the bus, the test failed. | ||
1285 | */ | ||
1286 | adapter->test_icr = 0; | ||
1287 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask); | ||
1288 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask); | ||
1289 | msleep(10); | ||
1290 | |||
1291 | if (!(adapter->test_icr &mask)) { | ||
1292 | *data = 4; | ||
1293 | break; | ||
1294 | } | ||
1295 | |||
1296 | if (!shared_int) { | ||
1297 | /* | ||
1298 | * Disable the other interrupts to be reported in | ||
1299 | * the cause register and then force the other | ||
1300 | * interrupts and see if any get posted. If | ||
1301 | * an interrupt was posted to the bus, the | ||
1302 | * test failed. | ||
1303 | */ | ||
1304 | adapter->test_icr = 0; | ||
1305 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, | ||
1306 | ~mask & 0x00007FFF); | ||
1307 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, | ||
1308 | ~mask & 0x00007FFF); | ||
1309 | msleep(10); | ||
1310 | |||
1311 | if (adapter->test_icr) { | ||
1312 | *data = 5; | ||
1313 | break; | ||
1314 | } | ||
1315 | } | ||
1316 | } | ||
1317 | |||
1318 | /* Disable all the interrupts */ | ||
1319 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFFFFFF); | ||
1320 | msleep(10); | ||
1321 | |||
1322 | /* Unhook test interrupt handler */ | ||
1323 | free_irq(irq, netdev); | ||
1324 | |||
1325 | return *data; | ||
1326 | } | ||
1327 | |||
1328 | static void ixgbe_free_desc_rings(struct ixgbe_adapter *adapter) | ||
1329 | { | ||
1330 | struct ixgbe_ring *tx_ring = &adapter->test_tx_ring; | ||
1331 | struct ixgbe_ring *rx_ring = &adapter->test_rx_ring; | ||
1332 | struct ixgbe_hw *hw = &adapter->hw; | ||
1333 | struct pci_dev *pdev = adapter->pdev; | ||
1334 | u32 reg_ctl; | ||
1335 | int i; | ||
1336 | |||
1337 | /* shut down the DMA engines now so they can be reinitialized later */ | ||
1338 | |||
1339 | /* first Rx */ | ||
1340 | reg_ctl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); | ||
1341 | reg_ctl &= ~IXGBE_RXCTRL_RXEN; | ||
1342 | IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, reg_ctl); | ||
1343 | reg_ctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(0)); | ||
1344 | reg_ctl &= ~IXGBE_RXDCTL_ENABLE; | ||
1345 | IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(0), reg_ctl); | ||
1346 | |||
1347 | /* now Tx */ | ||
1348 | reg_ctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(0)); | ||
1349 | reg_ctl &= ~IXGBE_TXDCTL_ENABLE; | ||
1350 | IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(0), reg_ctl); | ||
1351 | if (hw->mac.type == ixgbe_mac_82599EB) { | ||
1352 | reg_ctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); | ||
1353 | reg_ctl &= ~IXGBE_DMATXCTL_TE; | ||
1354 | IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg_ctl); | ||
1355 | } | ||
1356 | |||
1357 | ixgbe_reset(adapter); | ||
1358 | |||
1359 | if (tx_ring->desc && tx_ring->tx_buffer_info) { | ||
1360 | for (i = 0; i < tx_ring->count; i++) { | ||
1361 | struct ixgbe_tx_buffer *buf = | ||
1362 | &(tx_ring->tx_buffer_info[i]); | ||
1363 | if (buf->dma) | ||
1364 | pci_unmap_single(pdev, buf->dma, buf->length, | ||
1365 | PCI_DMA_TODEVICE); | ||
1366 | if (buf->skb) | ||
1367 | dev_kfree_skb(buf->skb); | ||
1368 | } | ||
1369 | } | ||
1370 | |||
1371 | if (rx_ring->desc && rx_ring->rx_buffer_info) { | ||
1372 | for (i = 0; i < rx_ring->count; i++) { | ||
1373 | struct ixgbe_rx_buffer *buf = | ||
1374 | &(rx_ring->rx_buffer_info[i]); | ||
1375 | if (buf->dma) | ||
1376 | pci_unmap_single(pdev, buf->dma, | ||
1377 | IXGBE_RXBUFFER_2048, | ||
1378 | PCI_DMA_FROMDEVICE); | ||
1379 | if (buf->skb) | ||
1380 | dev_kfree_skb(buf->skb); | ||
1381 | } | ||
1382 | } | ||
1383 | |||
1384 | if (tx_ring->desc) { | ||
1385 | pci_free_consistent(pdev, tx_ring->size, tx_ring->desc, | ||
1386 | tx_ring->dma); | ||
1387 | tx_ring->desc = NULL; | ||
1388 | } | ||
1389 | if (rx_ring->desc) { | ||
1390 | pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, | ||
1391 | rx_ring->dma); | ||
1392 | rx_ring->desc = NULL; | ||
1393 | } | ||
1394 | |||
1395 | kfree(tx_ring->tx_buffer_info); | ||
1396 | tx_ring->tx_buffer_info = NULL; | ||
1397 | kfree(rx_ring->rx_buffer_info); | ||
1398 | rx_ring->rx_buffer_info = NULL; | ||
1399 | |||
1400 | return; | ||
1401 | } | ||
1402 | |||
1403 | static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter) | ||
1404 | { | ||
1405 | struct ixgbe_ring *tx_ring = &adapter->test_tx_ring; | ||
1406 | struct ixgbe_ring *rx_ring = &adapter->test_rx_ring; | ||
1407 | struct pci_dev *pdev = adapter->pdev; | ||
1408 | u32 rctl, reg_data; | ||
1409 | int i, ret_val; | ||
1410 | |||
1411 | /* Setup Tx descriptor ring and Tx buffers */ | ||
1412 | |||
1413 | if (!tx_ring->count) | ||
1414 | tx_ring->count = IXGBE_DEFAULT_TXD; | ||
1415 | |||
1416 | tx_ring->tx_buffer_info = kcalloc(tx_ring->count, | ||
1417 | sizeof(struct ixgbe_tx_buffer), | ||
1418 | GFP_KERNEL); | ||
1419 | if (!(tx_ring->tx_buffer_info)) { | ||
1420 | ret_val = 1; | ||
1421 | goto err_nomem; | ||
1422 | } | ||
1423 | |||
1424 | tx_ring->size = tx_ring->count * sizeof(struct ixgbe_legacy_tx_desc); | ||
1425 | tx_ring->size = ALIGN(tx_ring->size, 4096); | ||
1426 | if (!(tx_ring->desc = pci_alloc_consistent(pdev, tx_ring->size, | ||
1427 | &tx_ring->dma))) { | ||
1428 | ret_val = 2; | ||
1429 | goto err_nomem; | ||
1430 | } | ||
1431 | tx_ring->next_to_use = tx_ring->next_to_clean = 0; | ||
1432 | |||
1433 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_TDBAL(0), | ||
1434 | ((u64) tx_ring->dma & 0x00000000FFFFFFFF)); | ||
1435 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_TDBAH(0), | ||
1436 | ((u64) tx_ring->dma >> 32)); | ||
1437 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_TDLEN(0), | ||
1438 | tx_ring->count * sizeof(struct ixgbe_legacy_tx_desc)); | ||
1439 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_TDH(0), 0); | ||
1440 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_TDT(0), 0); | ||
1441 | |||
1442 | reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_HLREG0); | ||
1443 | reg_data |= IXGBE_HLREG0_TXPADEN; | ||
1444 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_HLREG0, reg_data); | ||
1445 | |||
1446 | if (adapter->hw.mac.type == ixgbe_mac_82599EB) { | ||
1447 | reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_DMATXCTL); | ||
1448 | reg_data |= IXGBE_DMATXCTL_TE; | ||
1449 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_DMATXCTL, reg_data); | ||
1450 | } | ||
1451 | reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_TXDCTL(0)); | ||
1452 | reg_data |= IXGBE_TXDCTL_ENABLE; | ||
1453 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_TXDCTL(0), reg_data); | ||
1454 | |||
1455 | for (i = 0; i < tx_ring->count; i++) { | ||
1456 | struct ixgbe_legacy_tx_desc *desc = IXGBE_TX_DESC(*tx_ring, i); | ||
1457 | struct sk_buff *skb; | ||
1458 | unsigned int size = 1024; | ||
1459 | |||
1460 | skb = alloc_skb(size, GFP_KERNEL); | ||
1461 | if (!skb) { | ||
1462 | ret_val = 3; | ||
1463 | goto err_nomem; | ||
1464 | } | ||
1465 | skb_put(skb, size); | ||
1466 | tx_ring->tx_buffer_info[i].skb = skb; | ||
1467 | tx_ring->tx_buffer_info[i].length = skb->len; | ||
1468 | tx_ring->tx_buffer_info[i].dma = | ||
1469 | pci_map_single(pdev, skb->data, skb->len, | ||
1470 | PCI_DMA_TODEVICE); | ||
1471 | desc->buffer_addr = cpu_to_le64(tx_ring->tx_buffer_info[i].dma); | ||
1472 | desc->lower.data = cpu_to_le32(skb->len); | ||
1473 | desc->lower.data |= cpu_to_le32(IXGBE_TXD_CMD_EOP | | ||
1474 | IXGBE_TXD_CMD_IFCS | | ||
1475 | IXGBE_TXD_CMD_RS); | ||
1476 | desc->upper.data = 0; | ||
1477 | } | ||
1478 | |||
1479 | /* Setup Rx Descriptor ring and Rx buffers */ | ||
1480 | |||
1481 | if (!rx_ring->count) | ||
1482 | rx_ring->count = IXGBE_DEFAULT_RXD; | ||
1483 | |||
1484 | rx_ring->rx_buffer_info = kcalloc(rx_ring->count, | ||
1485 | sizeof(struct ixgbe_rx_buffer), | ||
1486 | GFP_KERNEL); | ||
1487 | if (!(rx_ring->rx_buffer_info)) { | ||
1488 | ret_val = 4; | ||
1489 | goto err_nomem; | ||
1490 | } | ||
1491 | |||
1492 | rx_ring->size = rx_ring->count * sizeof(struct ixgbe_legacy_rx_desc); | ||
1493 | rx_ring->size = ALIGN(rx_ring->size, 4096); | ||
1494 | if (!(rx_ring->desc = pci_alloc_consistent(pdev, rx_ring->size, | ||
1495 | &rx_ring->dma))) { | ||
1496 | ret_val = 5; | ||
1497 | goto err_nomem; | ||
1498 | } | ||
1499 | rx_ring->next_to_use = rx_ring->next_to_clean = 0; | ||
1500 | |||
1501 | rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_RXCTRL); | ||
1502 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXCTRL, rctl & ~IXGBE_RXCTRL_RXEN); | ||
1503 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_RDBAL(0), | ||
1504 | ((u64)rx_ring->dma & 0xFFFFFFFF)); | ||
1505 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_RDBAH(0), | ||
1506 | ((u64) rx_ring->dma >> 32)); | ||
1507 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_RDLEN(0), rx_ring->size); | ||
1508 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_RDH(0), 0); | ||
1509 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_RDT(0), 0); | ||
1510 | |||
1511 | reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL); | ||
1512 | reg_data |= IXGBE_FCTRL_BAM | IXGBE_FCTRL_SBP | IXGBE_FCTRL_MPE; | ||
1513 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_data); | ||
1514 | |||
1515 | reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_HLREG0); | ||
1516 | reg_data &= ~IXGBE_HLREG0_LPBK; | ||
1517 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_HLREG0, reg_data); | ||
1518 | |||
1519 | reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_RDRXCTL); | ||
1520 | #define IXGBE_RDRXCTL_RDMTS_MASK 0x00000003 /* Receive Descriptor Minimum | ||
1521 | Threshold Size mask */ | ||
1522 | reg_data &= ~IXGBE_RDRXCTL_RDMTS_MASK; | ||
1523 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_RDRXCTL, reg_data); | ||
1524 | |||
1525 | reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_MCSTCTRL); | ||
1526 | #define IXGBE_MCSTCTRL_MO_MASK 0x00000003 /* Multicast Offset mask */ | ||
1527 | reg_data &= ~IXGBE_MCSTCTRL_MO_MASK; | ||
1528 | reg_data |= adapter->hw.mac.mc_filter_type; | ||
1529 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_MCSTCTRL, reg_data); | ||
1530 | |||
1531 | reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_RXDCTL(0)); | ||
1532 | reg_data |= IXGBE_RXDCTL_ENABLE; | ||
1533 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXDCTL(0), reg_data); | ||
1534 | if (adapter->hw.mac.type == ixgbe_mac_82599EB) { | ||
1535 | int j = adapter->rx_ring[0].reg_idx; | ||
1536 | u32 k; | ||
1537 | for (k = 0; k < 10; k++) { | ||
1538 | if (IXGBE_READ_REG(&adapter->hw, | ||
1539 | IXGBE_RXDCTL(j)) & IXGBE_RXDCTL_ENABLE) | ||
1540 | break; | ||
1541 | else | ||
1542 | msleep(1); | ||
1543 | } | ||
1544 | } | ||
1545 | |||
1546 | rctl |= IXGBE_RXCTRL_RXEN | IXGBE_RXCTRL_DMBYPS; | ||
1547 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXCTRL, rctl); | ||
1548 | |||
1549 | for (i = 0; i < rx_ring->count; i++) { | ||
1550 | struct ixgbe_legacy_rx_desc *rx_desc = | ||
1551 | IXGBE_RX_DESC(*rx_ring, i); | ||
1552 | struct sk_buff *skb; | ||
1553 | |||
1554 | skb = alloc_skb(IXGBE_RXBUFFER_2048 + NET_IP_ALIGN, GFP_KERNEL); | ||
1555 | if (!skb) { | ||
1556 | ret_val = 6; | ||
1557 | goto err_nomem; | ||
1558 | } | ||
1559 | skb_reserve(skb, NET_IP_ALIGN); | ||
1560 | rx_ring->rx_buffer_info[i].skb = skb; | ||
1561 | rx_ring->rx_buffer_info[i].dma = | ||
1562 | pci_map_single(pdev, skb->data, IXGBE_RXBUFFER_2048, | ||
1563 | PCI_DMA_FROMDEVICE); | ||
1564 | rx_desc->buffer_addr = | ||
1565 | cpu_to_le64(rx_ring->rx_buffer_info[i].dma); | ||
1566 | memset(skb->data, 0x00, skb->len); | ||
1567 | } | ||
1568 | |||
1569 | return 0; | ||
1570 | |||
1571 | err_nomem: | ||
1572 | ixgbe_free_desc_rings(adapter); | ||
1573 | return ret_val; | ||
1574 | } | ||
1575 | |||
1576 | static int ixgbe_setup_loopback_test(struct ixgbe_adapter *adapter) | ||
1577 | { | ||
1578 | struct ixgbe_hw *hw = &adapter->hw; | ||
1579 | u32 reg_data; | ||
1580 | |||
1581 | /* right now we only support MAC loopback in the driver */ | ||
1582 | |||
1583 | /* Setup MAC loopback */ | ||
1584 | reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_HLREG0); | ||
1585 | reg_data |= IXGBE_HLREG0_LPBK; | ||
1586 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_HLREG0, reg_data); | ||
1587 | |||
1588 | reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_AUTOC); | ||
1589 | reg_data &= ~IXGBE_AUTOC_LMS_MASK; | ||
1590 | reg_data |= IXGBE_AUTOC_LMS_10G_LINK_NO_AN | IXGBE_AUTOC_FLU; | ||
1591 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_AUTOC, reg_data); | ||
1592 | |||
1593 | /* Disable Atlas Tx lanes; re-enabled in reset path */ | ||
1594 | if (hw->mac.type == ixgbe_mac_82598EB) { | ||
1595 | u8 atlas; | ||
1596 | |||
1597 | hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, &atlas); | ||
1598 | atlas |= IXGBE_ATLAS_PDN_TX_REG_EN; | ||
1599 | hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, atlas); | ||
1600 | |||
1601 | hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, &atlas); | ||
1602 | atlas |= IXGBE_ATLAS_PDN_TX_10G_QL_ALL; | ||
1603 | hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, atlas); | ||
1604 | |||
1605 | hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, &atlas); | ||
1606 | atlas |= IXGBE_ATLAS_PDN_TX_1G_QL_ALL; | ||
1607 | hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, atlas); | ||
1608 | |||
1609 | hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, &atlas); | ||
1610 | atlas |= IXGBE_ATLAS_PDN_TX_AN_QL_ALL; | ||
1611 | hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, atlas); | ||
1612 | } | ||
1613 | |||
1614 | return 0; | ||
1615 | } | ||
1616 | |||
1617 | static void ixgbe_loopback_cleanup(struct ixgbe_adapter *adapter) | ||
1618 | { | ||
1619 | u32 reg_data; | ||
1620 | |||
1621 | reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_HLREG0); | ||
1622 | reg_data &= ~IXGBE_HLREG0_LPBK; | ||
1623 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_HLREG0, reg_data); | ||
1624 | } | ||
1625 | |||
1626 | static void ixgbe_create_lbtest_frame(struct sk_buff *skb, | ||
1627 | unsigned int frame_size) | ||
1628 | { | ||
1629 | memset(skb->data, 0xFF, frame_size); | ||
1630 | frame_size &= ~1; | ||
1631 | memset(&skb->data[frame_size / 2], 0xAA, frame_size / 2 - 1); | ||
1632 | memset(&skb->data[frame_size / 2 + 10], 0xBE, 1); | ||
1633 | memset(&skb->data[frame_size / 2 + 12], 0xAF, 1); | ||
1634 | } | ||
1635 | |||
1636 | static int ixgbe_check_lbtest_frame(struct sk_buff *skb, | ||
1637 | unsigned int frame_size) | ||
1638 | { | ||
1639 | frame_size &= ~1; | ||
1640 | if (*(skb->data + 3) == 0xFF) { | ||
1641 | if ((*(skb->data + frame_size / 2 + 10) == 0xBE) && | ||
1642 | (*(skb->data + frame_size / 2 + 12) == 0xAF)) { | ||
1643 | return 0; | ||
1644 | } | ||
1645 | } | ||
1646 | return 13; | ||
1647 | } | ||
1648 | |||
1649 | static int ixgbe_run_loopback_test(struct ixgbe_adapter *adapter) | ||
1650 | { | ||
1651 | struct ixgbe_ring *tx_ring = &adapter->test_tx_ring; | ||
1652 | struct ixgbe_ring *rx_ring = &adapter->test_rx_ring; | ||
1653 | struct pci_dev *pdev = adapter->pdev; | ||
1654 | int i, j, k, l, lc, good_cnt, ret_val = 0; | ||
1655 | unsigned long time; | ||
1656 | |||
1657 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_RDT(0), rx_ring->count - 1); | ||
1658 | |||
1659 | /* | ||
1660 | * Calculate the loop count based on the largest descriptor ring | ||
1661 | * The idea is to wrap the largest ring a number of times using 64 | ||
1662 | * send/receive pairs during each loop | ||
1663 | */ | ||
1664 | |||
1665 | if (rx_ring->count <= tx_ring->count) | ||
1666 | lc = ((tx_ring->count / 64) * 2) + 1; | ||
1667 | else | ||
1668 | lc = ((rx_ring->count / 64) * 2) + 1; | ||
1669 | |||
1670 | k = l = 0; | ||
1671 | for (j = 0; j <= lc; j++) { | ||
1672 | for (i = 0; i < 64; i++) { | ||
1673 | ixgbe_create_lbtest_frame( | ||
1674 | tx_ring->tx_buffer_info[k].skb, | ||
1675 | 1024); | ||
1676 | pci_dma_sync_single_for_device(pdev, | ||
1677 | tx_ring->tx_buffer_info[k].dma, | ||
1678 | tx_ring->tx_buffer_info[k].length, | ||
1679 | PCI_DMA_TODEVICE); | ||
1680 | if (unlikely(++k == tx_ring->count)) | ||
1681 | k = 0; | ||
1682 | } | ||
1683 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_TDT(0), k); | ||
1684 | msleep(200); | ||
1685 | /* set the start time for the receive */ | ||
1686 | time = jiffies; | ||
1687 | good_cnt = 0; | ||
1688 | do { | ||
1689 | /* receive the sent packets */ | ||
1690 | pci_dma_sync_single_for_cpu(pdev, | ||
1691 | rx_ring->rx_buffer_info[l].dma, | ||
1692 | IXGBE_RXBUFFER_2048, | ||
1693 | PCI_DMA_FROMDEVICE); | ||
1694 | ret_val = ixgbe_check_lbtest_frame( | ||
1695 | rx_ring->rx_buffer_info[l].skb, 1024); | ||
1696 | if (!ret_val) | ||
1697 | good_cnt++; | ||
1698 | if (++l == rx_ring->count) | ||
1699 | l = 0; | ||
1700 | /* | ||
1701 | * time + 20 msecs (200 msecs on 2.4) is more than | ||
1702 | * enough time to complete the receives, if it's | ||
1703 | * exceeded, break and error off | ||
1704 | */ | ||
1705 | } while (good_cnt < 64 && jiffies < (time + 20)); | ||
1706 | if (good_cnt != 64) { | ||
1707 | /* ret_val is the same as mis-compare */ | ||
1708 | ret_val = 13; | ||
1709 | break; | ||
1710 | } | ||
1711 | if (jiffies >= (time + 20)) { | ||
1712 | /* Error code for time out error */ | ||
1713 | ret_val = 14; | ||
1714 | break; | ||
1715 | } | ||
1716 | } | ||
1717 | |||
1718 | return ret_val; | ||
1719 | } | ||
1720 | |||
1721 | static int ixgbe_loopback_test(struct ixgbe_adapter *adapter, u64 *data) | ||
1722 | { | ||
1723 | *data = ixgbe_setup_desc_rings(adapter); | ||
1724 | if (*data) | ||
1725 | goto out; | ||
1726 | *data = ixgbe_setup_loopback_test(adapter); | ||
1727 | if (*data) | ||
1728 | goto err_loopback; | ||
1729 | *data = ixgbe_run_loopback_test(adapter); | ||
1730 | ixgbe_loopback_cleanup(adapter); | ||
1731 | |||
1732 | err_loopback: | ||
1733 | ixgbe_free_desc_rings(adapter); | ||
1734 | out: | ||
1735 | return *data; | ||
1736 | } | ||
1737 | |||
1738 | static void ixgbe_diag_test(struct net_device *netdev, | ||
1739 | struct ethtool_test *eth_test, u64 *data) | ||
1740 | { | ||
1741 | struct ixgbe_adapter *adapter = netdev_priv(netdev); | ||
1742 | bool if_running = netif_running(netdev); | ||
1743 | |||
1744 | set_bit(__IXGBE_TESTING, &adapter->state); | ||
1745 | if (eth_test->flags == ETH_TEST_FL_OFFLINE) { | ||
1746 | /* Offline tests */ | ||
1747 | |||
1748 | DPRINTK(HW, INFO, "offline testing starting\n"); | ||
1749 | |||
1750 | /* Link test performed before hardware reset so autoneg doesn't | ||
1751 | * interfere with test result */ | ||
1752 | if (ixgbe_link_test(adapter, &data[4])) | ||
1753 | eth_test->flags |= ETH_TEST_FL_FAILED; | ||
1754 | |||
1755 | if (if_running) | ||
1756 | /* indicate we're in test mode */ | ||
1757 | dev_close(netdev); | ||
1758 | else | ||
1759 | ixgbe_reset(adapter); | ||
1760 | |||
1761 | DPRINTK(HW, INFO, "register testing starting\n"); | ||
1762 | if (ixgbe_reg_test(adapter, &data[0])) | ||
1763 | eth_test->flags |= ETH_TEST_FL_FAILED; | ||
1764 | |||
1765 | ixgbe_reset(adapter); | ||
1766 | DPRINTK(HW, INFO, "eeprom testing starting\n"); | ||
1767 | if (ixgbe_eeprom_test(adapter, &data[1])) | ||
1768 | eth_test->flags |= ETH_TEST_FL_FAILED; | ||
1769 | |||
1770 | ixgbe_reset(adapter); | ||
1771 | DPRINTK(HW, INFO, "interrupt testing starting\n"); | ||
1772 | if (ixgbe_intr_test(adapter, &data[2])) | ||
1773 | eth_test->flags |= ETH_TEST_FL_FAILED; | ||
1774 | |||
1775 | ixgbe_reset(adapter); | ||
1776 | DPRINTK(HW, INFO, "loopback testing starting\n"); | ||
1777 | if (ixgbe_loopback_test(adapter, &data[3])) | ||
1778 | eth_test->flags |= ETH_TEST_FL_FAILED; | ||
1779 | |||
1780 | ixgbe_reset(adapter); | ||
1781 | |||
1782 | clear_bit(__IXGBE_TESTING, &adapter->state); | ||
1783 | if (if_running) | ||
1784 | dev_open(netdev); | ||
1785 | } else { | ||
1786 | DPRINTK(HW, INFO, "online testing starting\n"); | ||
1787 | /* Online tests */ | ||
1788 | if (ixgbe_link_test(adapter, &data[4])) | ||
1789 | eth_test->flags |= ETH_TEST_FL_FAILED; | ||
1790 | |||
1791 | /* Online tests aren't run; pass by default */ | ||
1792 | data[0] = 0; | ||
1793 | data[1] = 0; | ||
1794 | data[2] = 0; | ||
1795 | data[3] = 0; | ||
1796 | |||
1797 | clear_bit(__IXGBE_TESTING, &adapter->state); | ||
1798 | } | ||
1799 | msleep_interruptible(4 * 1000); | ||
1800 | } | ||
978 | 1801 | ||
979 | static int ixgbe_wol_exclusion(struct ixgbe_adapter *adapter, | 1802 | static int ixgbe_wol_exclusion(struct ixgbe_adapter *adapter, |
980 | struct ethtool_wolinfo *wol) | 1803 | struct ethtool_wolinfo *wol) |
@@ -1201,6 +2024,7 @@ static const struct ethtool_ops ixgbe_ethtool_ops = { | |||
1201 | .set_msglevel = ixgbe_set_msglevel, | 2024 | .set_msglevel = ixgbe_set_msglevel, |
1202 | .get_tso = ethtool_op_get_tso, | 2025 | .get_tso = ethtool_op_get_tso, |
1203 | .set_tso = ixgbe_set_tso, | 2026 | .set_tso = ixgbe_set_tso, |
2027 | .self_test = ixgbe_diag_test, | ||
1204 | .get_strings = ixgbe_get_strings, | 2028 | .get_strings = ixgbe_get_strings, |
1205 | .phys_id = ixgbe_phys_id, | 2029 | .phys_id = ixgbe_phys_id, |
1206 | .get_sset_count = ixgbe_get_sset_count, | 2030 | .get_sset_count = ixgbe_get_sset_count, |