diff options
Diffstat (limited to 'drivers')
222 files changed, 5388 insertions, 6474 deletions
diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c index de4c8499cbac..288547a3c566 100644 --- a/drivers/bluetooth/ath3k.c +++ b/drivers/bluetooth/ath3k.c | |||
@@ -65,6 +65,7 @@ static const struct usb_device_id ath3k_table[] = { | |||
65 | /* Atheros AR3011 with sflash firmware*/ | 65 | /* Atheros AR3011 with sflash firmware*/ |
66 | { USB_DEVICE(0x0489, 0xE027) }, | 66 | { USB_DEVICE(0x0489, 0xE027) }, |
67 | { USB_DEVICE(0x0489, 0xE03D) }, | 67 | { USB_DEVICE(0x0489, 0xE03D) }, |
68 | { USB_DEVICE(0x04F2, 0xAFF1) }, | ||
68 | { USB_DEVICE(0x0930, 0x0215) }, | 69 | { USB_DEVICE(0x0930, 0x0215) }, |
69 | { USB_DEVICE(0x0CF3, 0x3002) }, | 70 | { USB_DEVICE(0x0CF3, 0x3002) }, |
70 | { USB_DEVICE(0x0CF3, 0xE019) }, | 71 | { USB_DEVICE(0x0CF3, 0xE019) }, |
diff --git a/drivers/bluetooth/btmrvl_drv.h b/drivers/bluetooth/btmrvl_drv.h index e75f8ee2512c..086f0ec89580 100644 --- a/drivers/bluetooth/btmrvl_drv.h +++ b/drivers/bluetooth/btmrvl_drv.h | |||
@@ -111,6 +111,7 @@ struct btmrvl_private { | |||
111 | 111 | ||
112 | /* Vendor specific Bluetooth commands */ | 112 | /* Vendor specific Bluetooth commands */ |
113 | #define BT_CMD_PSCAN_WIN_REPORT_ENABLE 0xFC03 | 113 | #define BT_CMD_PSCAN_WIN_REPORT_ENABLE 0xFC03 |
114 | #define BT_CMD_ROUTE_SCO_TO_HOST 0xFC1D | ||
114 | #define BT_CMD_SET_BDADDR 0xFC22 | 115 | #define BT_CMD_SET_BDADDR 0xFC22 |
115 | #define BT_CMD_AUTO_SLEEP_MODE 0xFC23 | 116 | #define BT_CMD_AUTO_SLEEP_MODE 0xFC23 |
116 | #define BT_CMD_HOST_SLEEP_CONFIG 0xFC59 | 117 | #define BT_CMD_HOST_SLEEP_CONFIG 0xFC59 |
diff --git a/drivers/bluetooth/btmrvl_main.c b/drivers/bluetooth/btmrvl_main.c index 413597789c61..de05deb444ce 100644 --- a/drivers/bluetooth/btmrvl_main.c +++ b/drivers/bluetooth/btmrvl_main.c | |||
@@ -230,6 +230,18 @@ int btmrvl_send_module_cfg_cmd(struct btmrvl_private *priv, u8 subcmd) | |||
230 | } | 230 | } |
231 | EXPORT_SYMBOL_GPL(btmrvl_send_module_cfg_cmd); | 231 | EXPORT_SYMBOL_GPL(btmrvl_send_module_cfg_cmd); |
232 | 232 | ||
233 | static int btmrvl_enable_sco_routing_to_host(struct btmrvl_private *priv) | ||
234 | { | ||
235 | int ret; | ||
236 | u8 subcmd = 0; | ||
237 | |||
238 | ret = btmrvl_send_sync_cmd(priv, BT_CMD_ROUTE_SCO_TO_HOST, &subcmd, 1); | ||
239 | if (ret) | ||
240 | BT_ERR("BT_CMD_ROUTE_SCO_TO_HOST command failed: %#x", ret); | ||
241 | |||
242 | return ret; | ||
243 | } | ||
244 | |||
233 | int btmrvl_pscan_window_reporting(struct btmrvl_private *priv, u8 subcmd) | 245 | int btmrvl_pscan_window_reporting(struct btmrvl_private *priv, u8 subcmd) |
234 | { | 246 | { |
235 | struct btmrvl_sdio_card *card = priv->btmrvl_dev.card; | 247 | struct btmrvl_sdio_card *card = priv->btmrvl_dev.card; |
@@ -558,6 +570,8 @@ static int btmrvl_setup(struct hci_dev *hdev) | |||
558 | 570 | ||
559 | btmrvl_check_device_tree(priv); | 571 | btmrvl_check_device_tree(priv); |
560 | 572 | ||
573 | btmrvl_enable_sco_routing_to_host(priv); | ||
574 | |||
561 | btmrvl_pscan_window_reporting(priv, 0x01); | 575 | btmrvl_pscan_window_reporting(priv, 0x01); |
562 | 576 | ||
563 | priv->btmrvl_dev.psmode = 1; | 577 | priv->btmrvl_dev.psmode = 1; |
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index 8bfc4c2bba87..8c1bf6190533 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c | |||
@@ -159,6 +159,7 @@ static const struct usb_device_id blacklist_table[] = { | |||
159 | /* Atheros 3011 with sflash firmware */ | 159 | /* Atheros 3011 with sflash firmware */ |
160 | { USB_DEVICE(0x0489, 0xe027), .driver_info = BTUSB_IGNORE }, | 160 | { USB_DEVICE(0x0489, 0xe027), .driver_info = BTUSB_IGNORE }, |
161 | { USB_DEVICE(0x0489, 0xe03d), .driver_info = BTUSB_IGNORE }, | 161 | { USB_DEVICE(0x0489, 0xe03d), .driver_info = BTUSB_IGNORE }, |
162 | { USB_DEVICE(0x04f2, 0xaff1), .driver_info = BTUSB_IGNORE }, | ||
162 | { USB_DEVICE(0x0930, 0x0215), .driver_info = BTUSB_IGNORE }, | 163 | { USB_DEVICE(0x0930, 0x0215), .driver_info = BTUSB_IGNORE }, |
163 | { USB_DEVICE(0x0cf3, 0x3002), .driver_info = BTUSB_IGNORE }, | 164 | { USB_DEVICE(0x0cf3, 0x3002), .driver_info = BTUSB_IGNORE }, |
164 | { USB_DEVICE(0x0cf3, 0xe019), .driver_info = BTUSB_IGNORE }, | 165 | { USB_DEVICE(0x0cf3, 0xe019), .driver_info = BTUSB_IGNORE }, |
@@ -339,16 +340,6 @@ struct btusb_data { | |||
339 | int (*recv_bulk)(struct btusb_data *data, void *buffer, int count); | 340 | int (*recv_bulk)(struct btusb_data *data, void *buffer, int count); |
340 | }; | 341 | }; |
341 | 342 | ||
342 | static int btusb_wait_on_bit_timeout(void *word, int bit, unsigned long timeout, | ||
343 | unsigned mode) | ||
344 | { | ||
345 | might_sleep(); | ||
346 | if (!test_bit(bit, word)) | ||
347 | return 0; | ||
348 | return out_of_line_wait_on_bit_timeout(word, bit, bit_wait_timeout, | ||
349 | mode, timeout); | ||
350 | } | ||
351 | |||
352 | static inline void btusb_free_frags(struct btusb_data *data) | 343 | static inline void btusb_free_frags(struct btusb_data *data) |
353 | { | 344 | { |
354 | unsigned long flags; | 345 | unsigned long flags; |
@@ -2197,9 +2188,9 @@ static int btusb_setup_intel_new(struct hci_dev *hdev) | |||
2197 | * and thus just timeout if that happens and fail the setup | 2188 | * and thus just timeout if that happens and fail the setup |
2198 | * of this device. | 2189 | * of this device. |
2199 | */ | 2190 | */ |
2200 | err = btusb_wait_on_bit_timeout(&data->flags, BTUSB_DOWNLOADING, | 2191 | err = wait_on_bit_timeout(&data->flags, BTUSB_DOWNLOADING, |
2201 | msecs_to_jiffies(5000), | 2192 | TASK_INTERRUPTIBLE, |
2202 | TASK_INTERRUPTIBLE); | 2193 | msecs_to_jiffies(5000)); |
2203 | if (err == 1) { | 2194 | if (err == 1) { |
2204 | BT_ERR("%s: Firmware loading interrupted", hdev->name); | 2195 | BT_ERR("%s: Firmware loading interrupted", hdev->name); |
2205 | err = -EINTR; | 2196 | err = -EINTR; |
@@ -2250,9 +2241,9 @@ done: | |||
2250 | */ | 2241 | */ |
2251 | BT_INFO("%s: Waiting for device to boot", hdev->name); | 2242 | BT_INFO("%s: Waiting for device to boot", hdev->name); |
2252 | 2243 | ||
2253 | err = btusb_wait_on_bit_timeout(&data->flags, BTUSB_BOOTING, | 2244 | err = wait_on_bit_timeout(&data->flags, BTUSB_BOOTING, |
2254 | msecs_to_jiffies(1000), | 2245 | TASK_INTERRUPTIBLE, |
2255 | TASK_INTERRUPTIBLE); | 2246 | msecs_to_jiffies(1000)); |
2256 | 2247 | ||
2257 | if (err == 1) { | 2248 | if (err == 1) { |
2258 | BT_ERR("%s: Device boot interrupted", hdev->name); | 2249 | BT_ERR("%s: Device boot interrupted", hdev->name); |
@@ -2332,6 +2323,27 @@ static int btusb_set_bdaddr_intel(struct hci_dev *hdev, const bdaddr_t *bdaddr) | |||
2332 | return 0; | 2323 | return 0; |
2333 | } | 2324 | } |
2334 | 2325 | ||
2326 | static int btusb_shutdown_intel(struct hci_dev *hdev) | ||
2327 | { | ||
2328 | struct sk_buff *skb; | ||
2329 | long ret; | ||
2330 | |||
2331 | /* Some platforms have an issue with BT LED when the interface is | ||
2332 | * down or BT radio is turned off, which takes 5 seconds to BT LED | ||
2333 | * goes off. This command turns off the BT LED immediately. | ||
2334 | */ | ||
2335 | skb = __hci_cmd_sync(hdev, 0xfc3f, 0, NULL, HCI_INIT_TIMEOUT); | ||
2336 | if (IS_ERR(skb)) { | ||
2337 | ret = PTR_ERR(skb); | ||
2338 | BT_ERR("%s: turning off Intel device LED failed (%ld)", | ||
2339 | hdev->name, ret); | ||
2340 | return ret; | ||
2341 | } | ||
2342 | kfree_skb(skb); | ||
2343 | |||
2344 | return 0; | ||
2345 | } | ||
2346 | |||
2335 | static int btusb_set_bdaddr_marvell(struct hci_dev *hdev, | 2347 | static int btusb_set_bdaddr_marvell(struct hci_dev *hdev, |
2336 | const bdaddr_t *bdaddr) | 2348 | const bdaddr_t *bdaddr) |
2337 | { | 2349 | { |
@@ -2355,6 +2367,23 @@ static int btusb_set_bdaddr_marvell(struct hci_dev *hdev, | |||
2355 | return 0; | 2367 | return 0; |
2356 | } | 2368 | } |
2357 | 2369 | ||
2370 | static const struct { | ||
2371 | u16 subver; | ||
2372 | const char *name; | ||
2373 | } bcm_subver_table[] = { | ||
2374 | { 0x210b, "BCM43142A0" }, /* 001.001.011 */ | ||
2375 | { 0x2112, "BCM4314A0" }, /* 001.001.018 */ | ||
2376 | { 0x2118, "BCM20702A0" }, /* 001.001.024 */ | ||
2377 | { 0x2126, "BCM4335A0" }, /* 001.001.038 */ | ||
2378 | { 0x220e, "BCM20702A1" }, /* 001.002.014 */ | ||
2379 | { 0x230f, "BCM4354A2" }, /* 001.003.015 */ | ||
2380 | { 0x4106, "BCM4335B0" }, /* 002.001.006 */ | ||
2381 | { 0x410e, "BCM20702B0" }, /* 002.001.014 */ | ||
2382 | { 0x6109, "BCM4335C0" }, /* 003.001.009 */ | ||
2383 | { 0x610c, "BCM4354" }, /* 003.001.012 */ | ||
2384 | { } | ||
2385 | }; | ||
2386 | |||
2358 | #define BDADDR_BCM20702A0 (&(bdaddr_t) {{0x00, 0xa0, 0x02, 0x70, 0x20, 0x00}}) | 2387 | #define BDADDR_BCM20702A0 (&(bdaddr_t) {{0x00, 0xa0, 0x02, 0x70, 0x20, 0x00}}) |
2359 | 2388 | ||
2360 | static int btusb_setup_bcm_patchram(struct hci_dev *hdev) | 2389 | static int btusb_setup_bcm_patchram(struct hci_dev *hdev) |
@@ -2367,29 +2396,20 @@ static int btusb_setup_bcm_patchram(struct hci_dev *hdev) | |||
2367 | size_t fw_size; | 2396 | size_t fw_size; |
2368 | const struct hci_command_hdr *cmd; | 2397 | const struct hci_command_hdr *cmd; |
2369 | const u8 *cmd_param; | 2398 | const u8 *cmd_param; |
2370 | u16 opcode; | 2399 | u16 opcode, subver, rev; |
2400 | const char *hw_name = NULL; | ||
2371 | struct sk_buff *skb; | 2401 | struct sk_buff *skb; |
2372 | struct hci_rp_read_local_version *ver; | 2402 | struct hci_rp_read_local_version *ver; |
2373 | struct hci_rp_read_bd_addr *bda; | 2403 | struct hci_rp_read_bd_addr *bda; |
2374 | long ret; | 2404 | long ret; |
2375 | 2405 | int i; | |
2376 | snprintf(fw_name, sizeof(fw_name), "brcm/%s-%04x-%04x.hcd", | ||
2377 | udev->product ? udev->product : "BCM", | ||
2378 | le16_to_cpu(udev->descriptor.idVendor), | ||
2379 | le16_to_cpu(udev->descriptor.idProduct)); | ||
2380 | |||
2381 | ret = request_firmware(&fw, fw_name, &hdev->dev); | ||
2382 | if (ret < 0) { | ||
2383 | BT_INFO("%s: BCM: patch %s not found", hdev->name, fw_name); | ||
2384 | return 0; | ||
2385 | } | ||
2386 | 2406 | ||
2387 | /* Reset */ | 2407 | /* Reset */ |
2388 | skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL, HCI_INIT_TIMEOUT); | 2408 | skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL, HCI_INIT_TIMEOUT); |
2389 | if (IS_ERR(skb)) { | 2409 | if (IS_ERR(skb)) { |
2390 | ret = PTR_ERR(skb); | 2410 | ret = PTR_ERR(skb); |
2391 | BT_ERR("%s: HCI_OP_RESET failed (%ld)", hdev->name, ret); | 2411 | BT_ERR("%s: HCI_OP_RESET failed (%ld)", hdev->name, ret); |
2392 | goto done; | 2412 | return ret; |
2393 | } | 2413 | } |
2394 | kfree_skb(skb); | 2414 | kfree_skb(skb); |
2395 | 2415 | ||
@@ -2400,23 +2420,43 @@ static int btusb_setup_bcm_patchram(struct hci_dev *hdev) | |||
2400 | ret = PTR_ERR(skb); | 2420 | ret = PTR_ERR(skb); |
2401 | BT_ERR("%s: HCI_OP_READ_LOCAL_VERSION failed (%ld)", | 2421 | BT_ERR("%s: HCI_OP_READ_LOCAL_VERSION failed (%ld)", |
2402 | hdev->name, ret); | 2422 | hdev->name, ret); |
2403 | goto done; | 2423 | return ret; |
2404 | } | 2424 | } |
2405 | 2425 | ||
2406 | if (skb->len != sizeof(*ver)) { | 2426 | if (skb->len != sizeof(*ver)) { |
2407 | BT_ERR("%s: HCI_OP_READ_LOCAL_VERSION event length mismatch", | 2427 | BT_ERR("%s: HCI_OP_READ_LOCAL_VERSION event length mismatch", |
2408 | hdev->name); | 2428 | hdev->name); |
2409 | kfree_skb(skb); | 2429 | kfree_skb(skb); |
2410 | ret = -EIO; | 2430 | return -EIO; |
2411 | goto done; | ||
2412 | } | 2431 | } |
2413 | 2432 | ||
2414 | ver = (struct hci_rp_read_local_version *)skb->data; | 2433 | ver = (struct hci_rp_read_local_version *)skb->data; |
2415 | BT_INFO("%s: BCM: patching hci_ver=%02x hci_rev=%04x lmp_ver=%02x " | 2434 | rev = le16_to_cpu(ver->hci_rev); |
2416 | "lmp_subver=%04x", hdev->name, ver->hci_ver, ver->hci_rev, | 2435 | subver = le16_to_cpu(ver->lmp_subver); |
2417 | ver->lmp_ver, ver->lmp_subver); | ||
2418 | kfree_skb(skb); | 2436 | kfree_skb(skb); |
2419 | 2437 | ||
2438 | for (i = 0; bcm_subver_table[i].name; i++) { | ||
2439 | if (subver == bcm_subver_table[i].subver) { | ||
2440 | hw_name = bcm_subver_table[i].name; | ||
2441 | break; | ||
2442 | } | ||
2443 | } | ||
2444 | |||
2445 | BT_INFO("%s: %s (%3.3u.%3.3u.%3.3u) build %4.4u", hdev->name, | ||
2446 | hw_name ? : "BCM", (subver & 0x7000) >> 13, | ||
2447 | (subver & 0x1f00) >> 8, (subver & 0x00ff), rev & 0x0fff); | ||
2448 | |||
2449 | snprintf(fw_name, sizeof(fw_name), "brcm/%s-%4.4x-%4.4x.hcd", | ||
2450 | hw_name ? : "BCM", | ||
2451 | le16_to_cpu(udev->descriptor.idVendor), | ||
2452 | le16_to_cpu(udev->descriptor.idProduct)); | ||
2453 | |||
2454 | ret = request_firmware(&fw, fw_name, &hdev->dev); | ||
2455 | if (ret < 0) { | ||
2456 | BT_INFO("%s: BCM: patch %s not found", hdev->name, fw_name); | ||
2457 | return 0; | ||
2458 | } | ||
2459 | |||
2420 | /* Start Download */ | 2460 | /* Start Download */ |
2421 | skb = __hci_cmd_sync(hdev, 0xfc2e, 0, NULL, HCI_INIT_TIMEOUT); | 2461 | skb = __hci_cmd_sync(hdev, 0xfc2e, 0, NULL, HCI_INIT_TIMEOUT); |
2422 | if (IS_ERR(skb)) { | 2462 | if (IS_ERR(skb)) { |
@@ -2494,11 +2534,14 @@ reset_fw: | |||
2494 | } | 2534 | } |
2495 | 2535 | ||
2496 | ver = (struct hci_rp_read_local_version *)skb->data; | 2536 | ver = (struct hci_rp_read_local_version *)skb->data; |
2497 | BT_INFO("%s: BCM: firmware hci_ver=%02x hci_rev=%04x lmp_ver=%02x " | 2537 | rev = le16_to_cpu(ver->hci_rev); |
2498 | "lmp_subver=%04x", hdev->name, ver->hci_ver, ver->hci_rev, | 2538 | subver = le16_to_cpu(ver->lmp_subver); |
2499 | ver->lmp_ver, ver->lmp_subver); | ||
2500 | kfree_skb(skb); | 2539 | kfree_skb(skb); |
2501 | 2540 | ||
2541 | BT_INFO("%s: %s (%3.3u.%3.3u.%3.3u) build %4.4u", hdev->name, | ||
2542 | hw_name ? : "BCM", (subver & 0x7000) >> 13, | ||
2543 | (subver & 0x1f00) >> 8, (subver & 0x00ff), rev & 0x0fff); | ||
2544 | |||
2502 | /* Read BD Address */ | 2545 | /* Read BD Address */ |
2503 | skb = __hci_cmd_sync(hdev, HCI_OP_READ_BD_ADDR, 0, NULL, | 2546 | skb = __hci_cmd_sync(hdev, HCI_OP_READ_BD_ADDR, 0, NULL, |
2504 | HCI_INIT_TIMEOUT); | 2547 | HCI_INIT_TIMEOUT); |
@@ -2709,6 +2752,7 @@ static int btusb_probe(struct usb_interface *intf, | |||
2709 | 2752 | ||
2710 | if (id->driver_info & BTUSB_INTEL) { | 2753 | if (id->driver_info & BTUSB_INTEL) { |
2711 | hdev->setup = btusb_setup_intel; | 2754 | hdev->setup = btusb_setup_intel; |
2755 | hdev->shutdown = btusb_shutdown_intel; | ||
2712 | hdev->set_bdaddr = btusb_set_bdaddr_intel; | 2756 | hdev->set_bdaddr = btusb_set_bdaddr_intel; |
2713 | set_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks); | 2757 | set_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks); |
2714 | } | 2758 | } |
diff --git a/drivers/firewire/net.c b/drivers/firewire/net.c index 2c68da1ceeee..f4ea80d602f7 100644 --- a/drivers/firewire/net.c +++ b/drivers/firewire/net.c | |||
@@ -237,18 +237,6 @@ static int fwnet_header_create(struct sk_buff *skb, struct net_device *net, | |||
237 | return -net->hard_header_len; | 237 | return -net->hard_header_len; |
238 | } | 238 | } |
239 | 239 | ||
240 | static int fwnet_header_rebuild(struct sk_buff *skb) | ||
241 | { | ||
242 | struct fwnet_header *h = (struct fwnet_header *)skb->data; | ||
243 | |||
244 | if (get_unaligned_be16(&h->h_proto) == ETH_P_IP) | ||
245 | return arp_find((unsigned char *)&h->h_dest, skb); | ||
246 | |||
247 | dev_notice(&skb->dev->dev, "unable to resolve type %04x addresses\n", | ||
248 | be16_to_cpu(h->h_proto)); | ||
249 | return 0; | ||
250 | } | ||
251 | |||
252 | static int fwnet_header_cache(const struct neighbour *neigh, | 240 | static int fwnet_header_cache(const struct neighbour *neigh, |
253 | struct hh_cache *hh, __be16 type) | 241 | struct hh_cache *hh, __be16 type) |
254 | { | 242 | { |
@@ -282,7 +270,6 @@ static int fwnet_header_parse(const struct sk_buff *skb, unsigned char *haddr) | |||
282 | 270 | ||
283 | static const struct header_ops fwnet_header_ops = { | 271 | static const struct header_ops fwnet_header_ops = { |
284 | .create = fwnet_header_create, | 272 | .create = fwnet_header_create, |
285 | .rebuild = fwnet_header_rebuild, | ||
286 | .cache = fwnet_header_cache, | 273 | .cache = fwnet_header_cache, |
287 | .cache_update = fwnet_header_cache_update, | 274 | .cache_update = fwnet_header_cache_update, |
288 | .parse = fwnet_header_parse, | 275 | .parse = fwnet_header_parse, |
diff --git a/drivers/isdn/i4l/isdn_net.c b/drivers/isdn/i4l/isdn_net.c index 94affa5e6f28..546b7e81161d 100644 --- a/drivers/isdn/i4l/isdn_net.c +++ b/drivers/isdn/i4l/isdn_net.c | |||
@@ -1951,38 +1951,6 @@ static int isdn_net_header(struct sk_buff *skb, struct net_device *dev, | |||
1951 | return len; | 1951 | return len; |
1952 | } | 1952 | } |
1953 | 1953 | ||
1954 | /* We don't need to send arp, because we have point-to-point connections. */ | ||
1955 | static int | ||
1956 | isdn_net_rebuild_header(struct sk_buff *skb) | ||
1957 | { | ||
1958 | struct net_device *dev = skb->dev; | ||
1959 | isdn_net_local *lp = netdev_priv(dev); | ||
1960 | int ret = 0; | ||
1961 | |||
1962 | if (lp->p_encap == ISDN_NET_ENCAP_ETHER) { | ||
1963 | struct ethhdr *eth = (struct ethhdr *) skb->data; | ||
1964 | |||
1965 | /* | ||
1966 | * Only ARP/IP is currently supported | ||
1967 | */ | ||
1968 | |||
1969 | if (eth->h_proto != htons(ETH_P_IP)) { | ||
1970 | printk(KERN_WARNING | ||
1971 | "isdn_net: %s don't know how to resolve type %d addresses?\n", | ||
1972 | dev->name, (int) eth->h_proto); | ||
1973 | memcpy(eth->h_source, dev->dev_addr, dev->addr_len); | ||
1974 | return 0; | ||
1975 | } | ||
1976 | /* | ||
1977 | * Try to get ARP to resolve the header. | ||
1978 | */ | ||
1979 | #ifdef CONFIG_INET | ||
1980 | ret = arp_find(eth->h_dest, skb); | ||
1981 | #endif | ||
1982 | } | ||
1983 | return ret; | ||
1984 | } | ||
1985 | |||
1986 | static int isdn_header_cache(const struct neighbour *neigh, struct hh_cache *hh, | 1954 | static int isdn_header_cache(const struct neighbour *neigh, struct hh_cache *hh, |
1987 | __be16 type) | 1955 | __be16 type) |
1988 | { | 1956 | { |
@@ -2005,7 +1973,6 @@ static void isdn_header_cache_update(struct hh_cache *hh, | |||
2005 | 1973 | ||
2006 | static const struct header_ops isdn_header_ops = { | 1974 | static const struct header_ops isdn_header_ops = { |
2007 | .create = isdn_net_header, | 1975 | .create = isdn_net_header, |
2008 | .rebuild = isdn_net_rebuild_header, | ||
2009 | .cache = isdn_header_cache, | 1976 | .cache = isdn_header_cache, |
2010 | .cache_update = isdn_header_cache_update, | 1977 | .cache_update = isdn_header_cache_update, |
2011 | }; | 1978 | }; |
diff --git a/drivers/isdn/mISDN/socket.c b/drivers/isdn/mISDN/socket.c index 84b35925ee4d..8dc7290089bb 100644 --- a/drivers/isdn/mISDN/socket.c +++ b/drivers/isdn/mISDN/socket.c | |||
@@ -112,8 +112,8 @@ mISDN_sock_cmsg(struct sock *sk, struct msghdr *msg, struct sk_buff *skb) | |||
112 | } | 112 | } |
113 | 113 | ||
114 | static int | 114 | static int |
115 | mISDN_sock_recvmsg(struct kiocb *iocb, struct socket *sock, | 115 | mISDN_sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, |
116 | struct msghdr *msg, size_t len, int flags) | 116 | int flags) |
117 | { | 117 | { |
118 | struct sk_buff *skb; | 118 | struct sk_buff *skb; |
119 | struct sock *sk = sock->sk; | 119 | struct sock *sk = sock->sk; |
@@ -173,8 +173,7 @@ mISDN_sock_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
173 | } | 173 | } |
174 | 174 | ||
175 | static int | 175 | static int |
176 | mISDN_sock_sendmsg(struct kiocb *iocb, struct socket *sock, | 176 | mISDN_sock_sendmsg(struct socket *sock, struct msghdr *msg, size_t len) |
177 | struct msghdr *msg, size_t len) | ||
178 | { | 177 | { |
179 | struct sock *sk = sock->sk; | 178 | struct sock *sk = sock->sk; |
180 | struct sk_buff *skb; | 179 | struct sk_buff *skb; |
diff --git a/drivers/media/dvb-core/dvb_net.c b/drivers/media/dvb-core/dvb_net.c index 686d3277dad1..4a77cb02dffc 100644 --- a/drivers/media/dvb-core/dvb_net.c +++ b/drivers/media/dvb-core/dvb_net.c | |||
@@ -1190,7 +1190,6 @@ static int dvb_net_stop(struct net_device *dev) | |||
1190 | static const struct header_ops dvb_header_ops = { | 1190 | static const struct header_ops dvb_header_ops = { |
1191 | .create = eth_header, | 1191 | .create = eth_header, |
1192 | .parse = eth_header_parse, | 1192 | .parse = eth_header_parse, |
1193 | .rebuild = eth_rebuild_header, | ||
1194 | }; | 1193 | }; |
1195 | 1194 | ||
1196 | 1195 | ||
diff --git a/drivers/net/arcnet/arcnet.c b/drivers/net/arcnet/arcnet.c index 09de683c167e..10f71c732b59 100644 --- a/drivers/net/arcnet/arcnet.c +++ b/drivers/net/arcnet/arcnet.c | |||
@@ -104,7 +104,6 @@ EXPORT_SYMBOL(arcnet_timeout); | |||
104 | static int arcnet_header(struct sk_buff *skb, struct net_device *dev, | 104 | static int arcnet_header(struct sk_buff *skb, struct net_device *dev, |
105 | unsigned short type, const void *daddr, | 105 | unsigned short type, const void *daddr, |
106 | const void *saddr, unsigned len); | 106 | const void *saddr, unsigned len); |
107 | static int arcnet_rebuild_header(struct sk_buff *skb); | ||
108 | static int go_tx(struct net_device *dev); | 107 | static int go_tx(struct net_device *dev); |
109 | 108 | ||
110 | static int debug = ARCNET_DEBUG; | 109 | static int debug = ARCNET_DEBUG; |
@@ -312,7 +311,6 @@ static int choose_mtu(void) | |||
312 | 311 | ||
313 | static const struct header_ops arcnet_header_ops = { | 312 | static const struct header_ops arcnet_header_ops = { |
314 | .create = arcnet_header, | 313 | .create = arcnet_header, |
315 | .rebuild = arcnet_rebuild_header, | ||
316 | }; | 314 | }; |
317 | 315 | ||
318 | static const struct net_device_ops arcnet_netdev_ops = { | 316 | static const struct net_device_ops arcnet_netdev_ops = { |
@@ -538,59 +536,6 @@ static int arcnet_header(struct sk_buff *skb, struct net_device *dev, | |||
538 | return proto->build_header(skb, dev, type, _daddr); | 536 | return proto->build_header(skb, dev, type, _daddr); |
539 | } | 537 | } |
540 | 538 | ||
541 | |||
542 | /* | ||
543 | * Rebuild the ARCnet hard header. This is called after an ARP (or in the | ||
544 | * future other address resolution) has completed on this sk_buff. We now | ||
545 | * let ARP fill in the destination field. | ||
546 | */ | ||
547 | static int arcnet_rebuild_header(struct sk_buff *skb) | ||
548 | { | ||
549 | struct net_device *dev = skb->dev; | ||
550 | struct arcnet_local *lp = netdev_priv(dev); | ||
551 | int status = 0; /* default is failure */ | ||
552 | unsigned short type; | ||
553 | uint8_t daddr=0; | ||
554 | struct ArcProto *proto; | ||
555 | /* | ||
556 | * XXX: Why not use skb->mac_len? | ||
557 | */ | ||
558 | if (skb->network_header - skb->mac_header != 2) { | ||
559 | BUGMSG(D_NORMAL, | ||
560 | "rebuild_header: shouldn't be here! (hdrsize=%d)\n", | ||
561 | (int)(skb->network_header - skb->mac_header)); | ||
562 | return 0; | ||
563 | } | ||
564 | type = *(uint16_t *) skb_pull(skb, 2); | ||
565 | BUGMSG(D_DURING, "rebuild header for protocol %Xh\n", type); | ||
566 | |||
567 | if (type == ETH_P_IP) { | ||
568 | #ifdef CONFIG_INET | ||
569 | BUGMSG(D_DURING, "rebuild header for ethernet protocol %Xh\n", type); | ||
570 | status = arp_find(&daddr, skb) ? 1 : 0; | ||
571 | BUGMSG(D_DURING, " rebuilt: dest is %d; protocol %Xh\n", | ||
572 | daddr, type); | ||
573 | #endif | ||
574 | } else { | ||
575 | BUGMSG(D_NORMAL, | ||
576 | "I don't understand ethernet protocol %Xh addresses!\n", type); | ||
577 | dev->stats.tx_errors++; | ||
578 | dev->stats.tx_aborted_errors++; | ||
579 | } | ||
580 | |||
581 | /* if we couldn't resolve the address... give up. */ | ||
582 | if (!status) | ||
583 | return 0; | ||
584 | |||
585 | /* add the _real_ header this time! */ | ||
586 | proto = arc_proto_map[lp->default_proto[daddr]]; | ||
587 | proto->build_header(skb, dev, type, daddr); | ||
588 | |||
589 | return 1; /* success */ | ||
590 | } | ||
591 | |||
592 | |||
593 | |||
594 | /* Called by the kernel in order to transmit a packet. */ | 539 | /* Called by the kernel in order to transmit a packet. */ |
595 | netdev_tx_t arcnet_send_packet(struct sk_buff *skb, | 540 | netdev_tx_t arcnet_send_packet(struct sk_buff *skb, |
596 | struct net_device *dev) | 541 | struct net_device *dev) |
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c index cfc4a9c1000a..f61b2870cddf 100644 --- a/drivers/net/bonding/bond_3ad.c +++ b/drivers/net/bonding/bond_3ad.c | |||
@@ -38,6 +38,7 @@ | |||
38 | #define AD_STANDBY 0x2 | 38 | #define AD_STANDBY 0x2 |
39 | #define AD_MAX_TX_IN_SECOND 3 | 39 | #define AD_MAX_TX_IN_SECOND 3 |
40 | #define AD_COLLECTOR_MAX_DELAY 0 | 40 | #define AD_COLLECTOR_MAX_DELAY 0 |
41 | #define AD_MONITOR_CHURNED 0x1000 | ||
41 | 42 | ||
42 | /* Timer definitions (43.4.4 in the 802.3ad standard) */ | 43 | /* Timer definitions (43.4.4 in the 802.3ad standard) */ |
43 | #define AD_FAST_PERIODIC_TIME 1 | 44 | #define AD_FAST_PERIODIC_TIME 1 |
@@ -1013,16 +1014,19 @@ static void ad_rx_machine(struct lacpdu *lacpdu, struct port *port) | |||
1013 | /* check if state machine should change state */ | 1014 | /* check if state machine should change state */ |
1014 | 1015 | ||
1015 | /* first, check if port was reinitialized */ | 1016 | /* first, check if port was reinitialized */ |
1016 | if (port->sm_vars & AD_PORT_BEGIN) | 1017 | if (port->sm_vars & AD_PORT_BEGIN) { |
1017 | port->sm_rx_state = AD_RX_INITIALIZE; | 1018 | port->sm_rx_state = AD_RX_INITIALIZE; |
1019 | port->sm_vars |= AD_MONITOR_CHURNED; | ||
1018 | /* check if port is not enabled */ | 1020 | /* check if port is not enabled */ |
1019 | else if (!(port->sm_vars & AD_PORT_BEGIN) | 1021 | } else if (!(port->sm_vars & AD_PORT_BEGIN) |
1020 | && !port->is_enabled && !(port->sm_vars & AD_PORT_MOVED)) | 1022 | && !port->is_enabled && !(port->sm_vars & AD_PORT_MOVED)) |
1021 | port->sm_rx_state = AD_RX_PORT_DISABLED; | 1023 | port->sm_rx_state = AD_RX_PORT_DISABLED; |
1022 | /* check if new lacpdu arrived */ | 1024 | /* check if new lacpdu arrived */ |
1023 | else if (lacpdu && ((port->sm_rx_state == AD_RX_EXPIRED) || | 1025 | else if (lacpdu && ((port->sm_rx_state == AD_RX_EXPIRED) || |
1024 | (port->sm_rx_state == AD_RX_DEFAULTED) || | 1026 | (port->sm_rx_state == AD_RX_DEFAULTED) || |
1025 | (port->sm_rx_state == AD_RX_CURRENT))) { | 1027 | (port->sm_rx_state == AD_RX_CURRENT))) { |
1028 | if (port->sm_rx_state != AD_RX_CURRENT) | ||
1029 | port->sm_vars |= AD_MONITOR_CHURNED; | ||
1026 | port->sm_rx_timer_counter = 0; | 1030 | port->sm_rx_timer_counter = 0; |
1027 | port->sm_rx_state = AD_RX_CURRENT; | 1031 | port->sm_rx_state = AD_RX_CURRENT; |
1028 | } else { | 1032 | } else { |
@@ -1100,9 +1104,11 @@ static void ad_rx_machine(struct lacpdu *lacpdu, struct port *port) | |||
1100 | */ | 1104 | */ |
1101 | port->partner_oper.port_state &= ~AD_STATE_SYNCHRONIZATION; | 1105 | port->partner_oper.port_state &= ~AD_STATE_SYNCHRONIZATION; |
1102 | port->sm_vars &= ~AD_PORT_MATCHED; | 1106 | port->sm_vars &= ~AD_PORT_MATCHED; |
1107 | port->partner_oper.port_state |= AD_STATE_LACP_TIMEOUT; | ||
1103 | port->partner_oper.port_state |= AD_STATE_LACP_ACTIVITY; | 1108 | port->partner_oper.port_state |= AD_STATE_LACP_ACTIVITY; |
1104 | port->sm_rx_timer_counter = __ad_timer_to_ticks(AD_CURRENT_WHILE_TIMER, (u16)(AD_SHORT_TIMEOUT)); | 1109 | port->sm_rx_timer_counter = __ad_timer_to_ticks(AD_CURRENT_WHILE_TIMER, (u16)(AD_SHORT_TIMEOUT)); |
1105 | port->actor_oper_port_state |= AD_STATE_EXPIRED; | 1110 | port->actor_oper_port_state |= AD_STATE_EXPIRED; |
1111 | port->sm_vars |= AD_MONITOR_CHURNED; | ||
1106 | break; | 1112 | break; |
1107 | case AD_RX_DEFAULTED: | 1113 | case AD_RX_DEFAULTED: |
1108 | __update_default_selected(port); | 1114 | __update_default_selected(port); |
@@ -1132,6 +1138,45 @@ static void ad_rx_machine(struct lacpdu *lacpdu, struct port *port) | |||
1132 | } | 1138 | } |
1133 | 1139 | ||
1134 | /** | 1140 | /** |
1141 | * ad_churn_machine - handle port churn's state machine | ||
1142 | * @port: the port we're looking at | ||
1143 | * | ||
1144 | */ | ||
1145 | static void ad_churn_machine(struct port *port) | ||
1146 | { | ||
1147 | if (port->sm_vars & AD_MONITOR_CHURNED) { | ||
1148 | port->sm_vars &= ~AD_MONITOR_CHURNED; | ||
1149 | port->sm_churn_actor_state = AD_CHURN_MONITOR; | ||
1150 | port->sm_churn_partner_state = AD_CHURN_MONITOR; | ||
1151 | port->sm_churn_actor_timer_counter = | ||
1152 | __ad_timer_to_ticks(AD_ACTOR_CHURN_TIMER, 0); | ||
1153 | port->sm_churn_partner_timer_counter = | ||
1154 | __ad_timer_to_ticks(AD_PARTNER_CHURN_TIMER, 0); | ||
1155 | return; | ||
1156 | } | ||
1157 | if (port->sm_churn_actor_timer_counter && | ||
1158 | !(--port->sm_churn_actor_timer_counter) && | ||
1159 | port->sm_churn_actor_state == AD_CHURN_MONITOR) { | ||
1160 | if (port->actor_oper_port_state & AD_STATE_SYNCHRONIZATION) { | ||
1161 | port->sm_churn_actor_state = AD_NO_CHURN; | ||
1162 | } else { | ||
1163 | port->churn_actor_count++; | ||
1164 | port->sm_churn_actor_state = AD_CHURN; | ||
1165 | } | ||
1166 | } | ||
1167 | if (port->sm_churn_partner_timer_counter && | ||
1168 | !(--port->sm_churn_partner_timer_counter) && | ||
1169 | port->sm_churn_partner_state == AD_CHURN_MONITOR) { | ||
1170 | if (port->partner_oper.port_state & AD_STATE_SYNCHRONIZATION) { | ||
1171 | port->sm_churn_partner_state = AD_NO_CHURN; | ||
1172 | } else { | ||
1173 | port->churn_partner_count++; | ||
1174 | port->sm_churn_partner_state = AD_CHURN; | ||
1175 | } | ||
1176 | } | ||
1177 | } | ||
1178 | |||
1179 | /** | ||
1135 | * ad_tx_machine - handle a port's tx state machine | 1180 | * ad_tx_machine - handle a port's tx state machine |
1136 | * @port: the port we're looking at | 1181 | * @port: the port we're looking at |
1137 | */ | 1182 | */ |
@@ -1745,6 +1790,13 @@ static void ad_initialize_port(struct port *port, int lacp_fast) | |||
1745 | port->next_port_in_aggregator = NULL; | 1790 | port->next_port_in_aggregator = NULL; |
1746 | port->transaction_id = 0; | 1791 | port->transaction_id = 0; |
1747 | 1792 | ||
1793 | port->sm_churn_actor_timer_counter = 0; | ||
1794 | port->sm_churn_actor_state = 0; | ||
1795 | port->churn_actor_count = 0; | ||
1796 | port->sm_churn_partner_timer_counter = 0; | ||
1797 | port->sm_churn_partner_state = 0; | ||
1798 | port->churn_partner_count = 0; | ||
1799 | |||
1748 | memcpy(&port->lacpdu, &lacpdu, sizeof(lacpdu)); | 1800 | memcpy(&port->lacpdu, &lacpdu, sizeof(lacpdu)); |
1749 | } | 1801 | } |
1750 | } | 1802 | } |
@@ -2164,6 +2216,7 @@ void bond_3ad_state_machine_handler(struct work_struct *work) | |||
2164 | ad_port_selection_logic(port, &update_slave_arr); | 2216 | ad_port_selection_logic(port, &update_slave_arr); |
2165 | ad_mux_machine(port, &update_slave_arr); | 2217 | ad_mux_machine(port, &update_slave_arr); |
2166 | ad_tx_machine(port); | 2218 | ad_tx_machine(port); |
2219 | ad_churn_machine(port); | ||
2167 | 2220 | ||
2168 | /* turn off the BEGIN bit, since we already handled it */ | 2221 | /* turn off the BEGIN bit, since we already handled it */ |
2169 | if (port->sm_vars & AD_PORT_BEGIN) | 2222 | if (port->sm_vars & AD_PORT_BEGIN) |
@@ -2485,6 +2538,9 @@ int bond_3ad_lacpdu_recv(const struct sk_buff *skb, struct bonding *bond, | |||
2485 | if (skb->protocol != PKT_TYPE_LACPDU) | 2538 | if (skb->protocol != PKT_TYPE_LACPDU) |
2486 | return RX_HANDLER_ANOTHER; | 2539 | return RX_HANDLER_ANOTHER; |
2487 | 2540 | ||
2541 | if (!MAC_ADDRESS_EQUAL(eth_hdr(skb)->h_dest, lacpdu_mcast_addr)) | ||
2542 | return RX_HANDLER_ANOTHER; | ||
2543 | |||
2488 | lacpdu = skb_header_pointer(skb, 0, sizeof(_lacpdu), &_lacpdu); | 2544 | lacpdu = skb_header_pointer(skb, 0, sizeof(_lacpdu), &_lacpdu); |
2489 | if (!lacpdu) | 2545 | if (!lacpdu) |
2490 | return RX_HANDLER_ANOTHER; | 2546 | return RX_HANDLER_ANOTHER; |
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index b979c265fc51..675b082283d6 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c | |||
@@ -2900,6 +2900,8 @@ static int bond_slave_netdev_event(unsigned long event, | |||
2900 | if (old_duplex != slave->duplex) | 2900 | if (old_duplex != slave->duplex) |
2901 | bond_3ad_adapter_duplex_changed(slave); | 2901 | bond_3ad_adapter_duplex_changed(slave); |
2902 | } | 2902 | } |
2903 | /* Fallthrough */ | ||
2904 | case NETDEV_DOWN: | ||
2903 | /* Refresh slave-array if applicable! | 2905 | /* Refresh slave-array if applicable! |
2904 | * If the setup does not use miimon or arpmon (mode-specific!), | 2906 | * If the setup does not use miimon or arpmon (mode-specific!), |
2905 | * then these events will not cause the slave-array to be | 2907 | * then these events will not cause the slave-array to be |
@@ -2911,10 +2913,6 @@ static int bond_slave_netdev_event(unsigned long event, | |||
2911 | if (bond_mode_uses_xmit_hash(bond)) | 2913 | if (bond_mode_uses_xmit_hash(bond)) |
2912 | bond_update_slave_arr(bond, NULL); | 2914 | bond_update_slave_arr(bond, NULL); |
2913 | break; | 2915 | break; |
2914 | case NETDEV_DOWN: | ||
2915 | if (bond_mode_uses_xmit_hash(bond)) | ||
2916 | bond_update_slave_arr(bond, NULL); | ||
2917 | break; | ||
2918 | case NETDEV_CHANGEMTU: | 2916 | case NETDEV_CHANGEMTU: |
2919 | /* TODO: Should slaves be allowed to | 2917 | /* TODO: Should slaves be allowed to |
2920 | * independently alter their MTU? For | 2918 | * independently alter their MTU? For |
diff --git a/drivers/net/bonding/bond_procfs.c b/drivers/net/bonding/bond_procfs.c index 976f5ad2a0f2..62694cfc05b6 100644 --- a/drivers/net/bonding/bond_procfs.c +++ b/drivers/net/bonding/bond_procfs.c | |||
@@ -176,18 +176,51 @@ static void bond_info_show_slave(struct seq_file *seq, | |||
176 | slave->link_failure_count); | 176 | slave->link_failure_count); |
177 | 177 | ||
178 | seq_printf(seq, "Permanent HW addr: %pM\n", slave->perm_hwaddr); | 178 | seq_printf(seq, "Permanent HW addr: %pM\n", slave->perm_hwaddr); |
179 | seq_printf(seq, "Slave queue ID: %d\n", slave->queue_id); | ||
179 | 180 | ||
180 | if (BOND_MODE(bond) == BOND_MODE_8023AD) { | 181 | if (BOND_MODE(bond) == BOND_MODE_8023AD) { |
181 | const struct aggregator *agg | 182 | const struct port *port = &SLAVE_AD_INFO(slave)->port; |
182 | = SLAVE_AD_INFO(slave)->port.aggregator; | 183 | const struct aggregator *agg = port->aggregator; |
183 | 184 | ||
184 | if (agg) | 185 | if (agg) { |
185 | seq_printf(seq, "Aggregator ID: %d\n", | 186 | seq_printf(seq, "Aggregator ID: %d\n", |
186 | agg->aggregator_identifier); | 187 | agg->aggregator_identifier); |
187 | else | 188 | seq_printf(seq, "Actor Churn State: %s\n", |
189 | bond_3ad_churn_desc(port->sm_churn_actor_state)); | ||
190 | seq_printf(seq, "Partner Churn State: %s\n", | ||
191 | bond_3ad_churn_desc(port->sm_churn_partner_state)); | ||
192 | seq_printf(seq, "Actor Churned Count: %d\n", | ||
193 | port->churn_actor_count); | ||
194 | seq_printf(seq, "Partner Churned Count: %d\n", | ||
195 | port->churn_partner_count); | ||
196 | |||
197 | seq_puts(seq, "details actor lacp pdu:\n"); | ||
198 | seq_printf(seq, " system priority: %d\n", | ||
199 | port->actor_system_priority); | ||
200 | seq_printf(seq, " port key: %d\n", | ||
201 | port->actor_oper_port_key); | ||
202 | seq_printf(seq, " port priority: %d\n", | ||
203 | port->actor_port_priority); | ||
204 | seq_printf(seq, " port number: %d\n", | ||
205 | port->actor_port_number); | ||
206 | seq_printf(seq, " port state: %d\n", | ||
207 | port->actor_oper_port_state); | ||
208 | |||
209 | seq_puts(seq, "details partner lacp pdu:\n"); | ||
210 | seq_printf(seq, " system priority: %d\n", | ||
211 | port->partner_oper.system_priority); | ||
212 | seq_printf(seq, " oper key: %d\n", | ||
213 | port->partner_oper.key); | ||
214 | seq_printf(seq, " port priority: %d\n", | ||
215 | port->partner_oper.port_priority); | ||
216 | seq_printf(seq, " port number: %d\n", | ||
217 | port->partner_oper.port_number); | ||
218 | seq_printf(seq, " port state: %d\n", | ||
219 | port->partner_oper.port_state); | ||
220 | } else { | ||
188 | seq_puts(seq, "Aggregator ID: N/A\n"); | 221 | seq_puts(seq, "Aggregator ID: N/A\n"); |
222 | } | ||
189 | } | 223 | } |
190 | seq_printf(seq, "Slave queue ID: %d\n", slave->queue_id); | ||
191 | } | 224 | } |
192 | 225 | ||
193 | static int bond_info_seq_show(struct seq_file *seq, void *v) | 226 | static int bond_info_seq_show(struct seq_file *seq, void *v) |
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c index 4daffb284931..cedb572bf25a 100644 --- a/drivers/net/dsa/bcm_sf2.c +++ b/drivers/net/dsa/bcm_sf2.c | |||
@@ -23,6 +23,7 @@ | |||
23 | #include <linux/of_address.h> | 23 | #include <linux/of_address.h> |
24 | #include <net/dsa.h> | 24 | #include <net/dsa.h> |
25 | #include <linux/ethtool.h> | 25 | #include <linux/ethtool.h> |
26 | #include <linux/if_bridge.h> | ||
26 | 27 | ||
27 | #include "bcm_sf2.h" | 28 | #include "bcm_sf2.h" |
28 | #include "bcm_sf2_regs.h" | 29 | #include "bcm_sf2_regs.h" |
@@ -299,10 +300,14 @@ static int bcm_sf2_port_setup(struct dsa_switch *ds, int port, | |||
299 | if (port == 7) | 300 | if (port == 7) |
300 | intrl2_1_mask_clear(priv, P_IRQ_MASK(P7_IRQ_OFF)); | 301 | intrl2_1_mask_clear(priv, P_IRQ_MASK(P7_IRQ_OFF)); |
301 | 302 | ||
302 | /* Set this port, and only this one to be in the default VLAN */ | 303 | /* Set this port, and only this one to be in the default VLAN, |
304 | * if member of a bridge, restore its membership prior to | ||
305 | * bringing down this port. | ||
306 | */ | ||
303 | reg = core_readl(priv, CORE_PORT_VLAN_CTL_PORT(port)); | 307 | reg = core_readl(priv, CORE_PORT_VLAN_CTL_PORT(port)); |
304 | reg &= ~PORT_VLAN_CTRL_MASK; | 308 | reg &= ~PORT_VLAN_CTRL_MASK; |
305 | reg |= (1 << port); | 309 | reg |= (1 << port); |
310 | reg |= priv->port_sts[port].vlan_ctl_mask; | ||
306 | core_writel(priv, reg, CORE_PORT_VLAN_CTL_PORT(port)); | 311 | core_writel(priv, reg, CORE_PORT_VLAN_CTL_PORT(port)); |
307 | 312 | ||
308 | bcm_sf2_imp_vlan_setup(ds, cpu_port); | 313 | bcm_sf2_imp_vlan_setup(ds, cpu_port); |
@@ -400,6 +405,151 @@ static int bcm_sf2_sw_set_eee(struct dsa_switch *ds, int port, | |||
400 | return 0; | 405 | return 0; |
401 | } | 406 | } |
402 | 407 | ||
408 | /* Fast-ageing of ARL entries for a given port, equivalent to an ARL | ||
409 | * flush for that port. | ||
410 | */ | ||
411 | static int bcm_sf2_sw_fast_age_port(struct dsa_switch *ds, int port) | ||
412 | { | ||
413 | struct bcm_sf2_priv *priv = ds_to_priv(ds); | ||
414 | unsigned int timeout = 1000; | ||
415 | u32 reg; | ||
416 | |||
417 | core_writel(priv, port, CORE_FAST_AGE_PORT); | ||
418 | |||
419 | reg = core_readl(priv, CORE_FAST_AGE_CTRL); | ||
420 | reg |= EN_AGE_PORT | FAST_AGE_STR_DONE; | ||
421 | core_writel(priv, reg, CORE_FAST_AGE_CTRL); | ||
422 | |||
423 | do { | ||
424 | reg = core_readl(priv, CORE_FAST_AGE_CTRL); | ||
425 | if (!(reg & FAST_AGE_STR_DONE)) | ||
426 | break; | ||
427 | |||
428 | cpu_relax(); | ||
429 | } while (timeout--); | ||
430 | |||
431 | if (!timeout) | ||
432 | return -ETIMEDOUT; | ||
433 | |||
434 | return 0; | ||
435 | } | ||
436 | |||
437 | static int bcm_sf2_sw_br_join(struct dsa_switch *ds, int port, | ||
438 | u32 br_port_mask) | ||
439 | { | ||
440 | struct bcm_sf2_priv *priv = ds_to_priv(ds); | ||
441 | unsigned int i; | ||
442 | u32 reg, p_ctl; | ||
443 | |||
444 | p_ctl = core_readl(priv, CORE_PORT_VLAN_CTL_PORT(port)); | ||
445 | |||
446 | for (i = 0; i < priv->hw_params.num_ports; i++) { | ||
447 | if (!((1 << i) & br_port_mask)) | ||
448 | continue; | ||
449 | |||
450 | /* Add this local port to the remote port VLAN control | ||
451 | * membership and update the remote port bitmask | ||
452 | */ | ||
453 | reg = core_readl(priv, CORE_PORT_VLAN_CTL_PORT(i)); | ||
454 | reg |= 1 << port; | ||
455 | core_writel(priv, reg, CORE_PORT_VLAN_CTL_PORT(i)); | ||
456 | priv->port_sts[i].vlan_ctl_mask = reg; | ||
457 | |||
458 | p_ctl |= 1 << i; | ||
459 | } | ||
460 | |||
461 | /* Configure the local port VLAN control membership to include | ||
462 | * remote ports and update the local port bitmask | ||
463 | */ | ||
464 | core_writel(priv, p_ctl, CORE_PORT_VLAN_CTL_PORT(port)); | ||
465 | priv->port_sts[port].vlan_ctl_mask = p_ctl; | ||
466 | |||
467 | return 0; | ||
468 | } | ||
469 | |||
470 | static int bcm_sf2_sw_br_leave(struct dsa_switch *ds, int port, | ||
471 | u32 br_port_mask) | ||
472 | { | ||
473 | struct bcm_sf2_priv *priv = ds_to_priv(ds); | ||
474 | unsigned int i; | ||
475 | u32 reg, p_ctl; | ||
476 | |||
477 | p_ctl = core_readl(priv, CORE_PORT_VLAN_CTL_PORT(port)); | ||
478 | |||
479 | for (i = 0; i < priv->hw_params.num_ports; i++) { | ||
480 | /* Don't touch the remaining ports */ | ||
481 | if (!((1 << i) & br_port_mask)) | ||
482 | continue; | ||
483 | |||
484 | reg = core_readl(priv, CORE_PORT_VLAN_CTL_PORT(i)); | ||
485 | reg &= ~(1 << port); | ||
486 | core_writel(priv, reg, CORE_PORT_VLAN_CTL_PORT(i)); | ||
487 | priv->port_sts[port].vlan_ctl_mask = reg; | ||
488 | |||
489 | /* Prevent self removal to preserve isolation */ | ||
490 | if (port != i) | ||
491 | p_ctl &= ~(1 << i); | ||
492 | } | ||
493 | |||
494 | core_writel(priv, p_ctl, CORE_PORT_VLAN_CTL_PORT(port)); | ||
495 | priv->port_sts[port].vlan_ctl_mask = p_ctl; | ||
496 | |||
497 | return 0; | ||
498 | } | ||
499 | |||
500 | static int bcm_sf2_sw_br_set_stp_state(struct dsa_switch *ds, int port, | ||
501 | u8 state) | ||
502 | { | ||
503 | struct bcm_sf2_priv *priv = ds_to_priv(ds); | ||
504 | u8 hw_state, cur_hw_state; | ||
505 | int ret = 0; | ||
506 | u32 reg; | ||
507 | |||
508 | reg = core_readl(priv, CORE_G_PCTL_PORT(port)); | ||
509 | cur_hw_state = reg >> G_MISTP_STATE_SHIFT; | ||
510 | |||
511 | switch (state) { | ||
512 | case BR_STATE_DISABLED: | ||
513 | hw_state = G_MISTP_DIS_STATE; | ||
514 | break; | ||
515 | case BR_STATE_LISTENING: | ||
516 | hw_state = G_MISTP_LISTEN_STATE; | ||
517 | break; | ||
518 | case BR_STATE_LEARNING: | ||
519 | hw_state = G_MISTP_LEARN_STATE; | ||
520 | break; | ||
521 | case BR_STATE_FORWARDING: | ||
522 | hw_state = G_MISTP_FWD_STATE; | ||
523 | break; | ||
524 | case BR_STATE_BLOCKING: | ||
525 | hw_state = G_MISTP_BLOCK_STATE; | ||
526 | break; | ||
527 | default: | ||
528 | pr_err("%s: invalid STP state: %d\n", __func__, state); | ||
529 | return -EINVAL; | ||
530 | } | ||
531 | |||
532 | /* Fast-age ARL entries if we are moving a port from Learning or | ||
533 | * Forwarding state to Disabled, Blocking or Listening state | ||
534 | */ | ||
535 | if (cur_hw_state != hw_state) { | ||
536 | if (cur_hw_state & 4 && !(hw_state & 4)) { | ||
537 | ret = bcm_sf2_sw_fast_age_port(ds, port); | ||
538 | if (ret) { | ||
539 | pr_err("%s: fast-ageing failed\n", __func__); | ||
540 | return ret; | ||
541 | } | ||
542 | } | ||
543 | } | ||
544 | |||
545 | reg = core_readl(priv, CORE_G_PCTL_PORT(port)); | ||
546 | reg &= ~(G_MISTP_STATE_MASK << G_MISTP_STATE_SHIFT); | ||
547 | reg |= hw_state; | ||
548 | core_writel(priv, reg, CORE_G_PCTL_PORT(port)); | ||
549 | |||
550 | return 0; | ||
551 | } | ||
552 | |||
403 | static irqreturn_t bcm_sf2_switch_0_isr(int irq, void *dev_id) | 553 | static irqreturn_t bcm_sf2_switch_0_isr(int irq, void *dev_id) |
404 | { | 554 | { |
405 | struct bcm_sf2_priv *priv = dev_id; | 555 | struct bcm_sf2_priv *priv = dev_id; |
@@ -916,6 +1066,9 @@ static struct dsa_switch_driver bcm_sf2_switch_driver = { | |||
916 | .port_disable = bcm_sf2_port_disable, | 1066 | .port_disable = bcm_sf2_port_disable, |
917 | .get_eee = bcm_sf2_sw_get_eee, | 1067 | .get_eee = bcm_sf2_sw_get_eee, |
918 | .set_eee = bcm_sf2_sw_set_eee, | 1068 | .set_eee = bcm_sf2_sw_set_eee, |
1069 | .port_join_bridge = bcm_sf2_sw_br_join, | ||
1070 | .port_leave_bridge = bcm_sf2_sw_br_leave, | ||
1071 | .port_stp_update = bcm_sf2_sw_br_set_stp_state, | ||
919 | }; | 1072 | }; |
920 | 1073 | ||
921 | static int __init bcm_sf2_init(void) | 1074 | static int __init bcm_sf2_init(void) |
diff --git a/drivers/net/dsa/bcm_sf2.h b/drivers/net/dsa/bcm_sf2.h index 7b7053d3c5fa..22e2ebf31333 100644 --- a/drivers/net/dsa/bcm_sf2.h +++ b/drivers/net/dsa/bcm_sf2.h | |||
@@ -46,6 +46,8 @@ struct bcm_sf2_port_status { | |||
46 | unsigned int link; | 46 | unsigned int link; |
47 | 47 | ||
48 | struct ethtool_eee eee; | 48 | struct ethtool_eee eee; |
49 | |||
50 | u32 vlan_ctl_mask; | ||
49 | }; | 51 | }; |
50 | 52 | ||
51 | struct bcm_sf2_priv { | 53 | struct bcm_sf2_priv { |
diff --git a/drivers/net/dsa/bcm_sf2_regs.h b/drivers/net/dsa/bcm_sf2_regs.h index cabdfa5e217a..fa4e6e78c9ea 100644 --- a/drivers/net/dsa/bcm_sf2_regs.h +++ b/drivers/net/dsa/bcm_sf2_regs.h | |||
@@ -163,6 +163,21 @@ | |||
163 | #define EN_CHIP_RST (1 << 6) | 163 | #define EN_CHIP_RST (1 << 6) |
164 | #define EN_SW_RESET (1 << 4) | 164 | #define EN_SW_RESET (1 << 4) |
165 | 165 | ||
166 | #define CORE_FAST_AGE_CTRL 0x00220 | ||
167 | #define EN_FAST_AGE_STATIC (1 << 0) | ||
168 | #define EN_AGE_DYNAMIC (1 << 1) | ||
169 | #define EN_AGE_PORT (1 << 2) | ||
170 | #define EN_AGE_VLAN (1 << 3) | ||
171 | #define EN_AGE_SPT (1 << 4) | ||
172 | #define EN_AGE_MCAST (1 << 5) | ||
173 | #define FAST_AGE_STR_DONE (1 << 7) | ||
174 | |||
175 | #define CORE_FAST_AGE_PORT 0x00224 | ||
176 | #define AGE_PORT_MASK 0xf | ||
177 | |||
178 | #define CORE_FAST_AGE_VID 0x00228 | ||
179 | #define AGE_VID_MASK 0x3fff | ||
180 | |||
166 | #define CORE_LNKSTS 0x00400 | 181 | #define CORE_LNKSTS 0x00400 |
167 | #define LNK_STS_MASK 0x1ff | 182 | #define LNK_STS_MASK 0x1ff |
168 | 183 | ||
diff --git a/drivers/net/dsa/mv88e6171.c b/drivers/net/dsa/mv88e6171.c index aa33d16f2e22..9808c860a797 100644 --- a/drivers/net/dsa/mv88e6171.c +++ b/drivers/net/dsa/mv88e6171.c | |||
@@ -51,8 +51,11 @@ static int mv88e6171_switch_reset(struct dsa_switch *ds) | |||
51 | /* Wait for transmit queues to drain. */ | 51 | /* Wait for transmit queues to drain. */ |
52 | usleep_range(2000, 4000); | 52 | usleep_range(2000, 4000); |
53 | 53 | ||
54 | /* Reset the switch. */ | 54 | /* Reset the switch. Keep PPU active. The PPU needs to be |
55 | REG_WRITE(REG_GLOBAL, 0x04, 0xc400); | 55 | * active to support indirect phy register accesses through |
56 | * global registers 0x18 and 0x19. | ||
57 | */ | ||
58 | REG_WRITE(REG_GLOBAL, 0x04, 0xc000); | ||
56 | 59 | ||
57 | /* Wait up to one second for reset to complete. */ | 60 | /* Wait up to one second for reset to complete. */ |
58 | timeout = jiffies + 1 * HZ; | 61 | timeout = jiffies + 1 * HZ; |
@@ -83,11 +86,10 @@ static int mv88e6171_setup_global(struct dsa_switch *ds) | |||
83 | int ret; | 86 | int ret; |
84 | int i; | 87 | int i; |
85 | 88 | ||
86 | /* Disable the PHY polling unit (since there won't be any | 89 | /* Discard packets with excessive collisions, mask all |
87 | * external PHYs to poll), don't discard packets with | 90 | * interrupt sources, enable PPU. |
88 | * excessive collisions, and mask all interrupt sources. | ||
89 | */ | 91 | */ |
90 | REG_WRITE(REG_GLOBAL, 0x04, 0x0000); | 92 | REG_WRITE(REG_GLOBAL, 0x04, 0x6000); |
91 | 93 | ||
92 | /* Set the default address aging time to 5 minutes, and | 94 | /* Set the default address aging time to 5 minutes, and |
93 | * enable address learn messages to be sent to all message | 95 | * enable address learn messages to be sent to all message |
@@ -336,7 +338,7 @@ mv88e6171_phy_read(struct dsa_switch *ds, int port, int regnum) | |||
336 | int ret; | 338 | int ret; |
337 | 339 | ||
338 | mutex_lock(&ps->phy_mutex); | 340 | mutex_lock(&ps->phy_mutex); |
339 | ret = mv88e6xxx_phy_read(ds, addr, regnum); | 341 | ret = mv88e6xxx_phy_read_indirect(ds, addr, regnum); |
340 | mutex_unlock(&ps->phy_mutex); | 342 | mutex_unlock(&ps->phy_mutex); |
341 | return ret; | 343 | return ret; |
342 | } | 344 | } |
@@ -350,7 +352,7 @@ mv88e6171_phy_write(struct dsa_switch *ds, | |||
350 | int ret; | 352 | int ret; |
351 | 353 | ||
352 | mutex_lock(&ps->phy_mutex); | 354 | mutex_lock(&ps->phy_mutex); |
353 | ret = mv88e6xxx_phy_write(ds, addr, regnum, val); | 355 | ret = mv88e6xxx_phy_write_indirect(ds, addr, regnum, val); |
354 | mutex_unlock(&ps->phy_mutex); | 356 | mutex_unlock(&ps->phy_mutex); |
355 | return ret; | 357 | return ret; |
356 | } | 358 | } |
diff --git a/drivers/net/dsa/mv88e6352.c b/drivers/net/dsa/mv88e6352.c index e13adc7b3dda..1ebd8f96072a 100644 --- a/drivers/net/dsa/mv88e6352.c +++ b/drivers/net/dsa/mv88e6352.c | |||
@@ -22,59 +22,6 @@ | |||
22 | #include <net/dsa.h> | 22 | #include <net/dsa.h> |
23 | #include "mv88e6xxx.h" | 23 | #include "mv88e6xxx.h" |
24 | 24 | ||
25 | static int mv88e6352_wait(struct dsa_switch *ds, int reg, int offset, u16 mask) | ||
26 | { | ||
27 | unsigned long timeout = jiffies + HZ / 10; | ||
28 | |||
29 | while (time_before(jiffies, timeout)) { | ||
30 | int ret; | ||
31 | |||
32 | ret = REG_READ(reg, offset); | ||
33 | if (!(ret & mask)) | ||
34 | return 0; | ||
35 | |||
36 | usleep_range(1000, 2000); | ||
37 | } | ||
38 | return -ETIMEDOUT; | ||
39 | } | ||
40 | |||
41 | static inline int mv88e6352_phy_wait(struct dsa_switch *ds) | ||
42 | { | ||
43 | return mv88e6352_wait(ds, REG_GLOBAL2, 0x18, 0x8000); | ||
44 | } | ||
45 | |||
46 | static inline int mv88e6352_eeprom_load_wait(struct dsa_switch *ds) | ||
47 | { | ||
48 | return mv88e6352_wait(ds, REG_GLOBAL2, 0x14, 0x0800); | ||
49 | } | ||
50 | |||
51 | static inline int mv88e6352_eeprom_busy_wait(struct dsa_switch *ds) | ||
52 | { | ||
53 | return mv88e6352_wait(ds, REG_GLOBAL2, 0x14, 0x8000); | ||
54 | } | ||
55 | |||
56 | static int __mv88e6352_phy_read(struct dsa_switch *ds, int addr, int regnum) | ||
57 | { | ||
58 | int ret; | ||
59 | |||
60 | REG_WRITE(REG_GLOBAL2, 0x18, 0x9800 | (addr << 5) | regnum); | ||
61 | |||
62 | ret = mv88e6352_phy_wait(ds); | ||
63 | if (ret < 0) | ||
64 | return ret; | ||
65 | |||
66 | return REG_READ(REG_GLOBAL2, 0x19); | ||
67 | } | ||
68 | |||
69 | static int __mv88e6352_phy_write(struct dsa_switch *ds, int addr, int regnum, | ||
70 | u16 val) | ||
71 | { | ||
72 | REG_WRITE(REG_GLOBAL2, 0x19, val); | ||
73 | REG_WRITE(REG_GLOBAL2, 0x18, 0x9400 | (addr << 5) | regnum); | ||
74 | |||
75 | return mv88e6352_phy_wait(ds); | ||
76 | } | ||
77 | |||
78 | static char *mv88e6352_probe(struct device *host_dev, int sw_addr) | 25 | static char *mv88e6352_probe(struct device *host_dev, int sw_addr) |
79 | { | 26 | { |
80 | struct mii_bus *bus = dsa_host_dev_to_mii_bus(host_dev); | 27 | struct mii_bus *bus = dsa_host_dev_to_mii_bus(host_dev); |
@@ -346,12 +293,12 @@ static int mv88e6352_phy_page_read(struct dsa_switch *ds, | |||
346 | int ret; | 293 | int ret; |
347 | 294 | ||
348 | mutex_lock(&ps->phy_mutex); | 295 | mutex_lock(&ps->phy_mutex); |
349 | ret = __mv88e6352_phy_write(ds, port, 0x16, page); | 296 | ret = mv88e6xxx_phy_write_indirect(ds, port, 0x16, page); |
350 | if (ret < 0) | 297 | if (ret < 0) |
351 | goto error; | 298 | goto error; |
352 | ret = __mv88e6352_phy_read(ds, port, reg); | 299 | ret = mv88e6xxx_phy_read_indirect(ds, port, reg); |
353 | error: | 300 | error: |
354 | __mv88e6352_phy_write(ds, port, 0x16, 0x0); | 301 | mv88e6xxx_phy_write_indirect(ds, port, 0x16, 0x0); |
355 | mutex_unlock(&ps->phy_mutex); | 302 | mutex_unlock(&ps->phy_mutex); |
356 | return ret; | 303 | return ret; |
357 | } | 304 | } |
@@ -363,13 +310,13 @@ static int mv88e6352_phy_page_write(struct dsa_switch *ds, | |||
363 | int ret; | 310 | int ret; |
364 | 311 | ||
365 | mutex_lock(&ps->phy_mutex); | 312 | mutex_lock(&ps->phy_mutex); |
366 | ret = __mv88e6352_phy_write(ds, port, 0x16, page); | 313 | ret = mv88e6xxx_phy_write_indirect(ds, port, 0x16, page); |
367 | if (ret < 0) | 314 | if (ret < 0) |
368 | goto error; | 315 | goto error; |
369 | 316 | ||
370 | ret = __mv88e6352_phy_write(ds, port, reg, val); | 317 | ret = mv88e6xxx_phy_write_indirect(ds, port, reg, val); |
371 | error: | 318 | error: |
372 | __mv88e6352_phy_write(ds, port, 0x16, 0x0); | 319 | mv88e6xxx_phy_write_indirect(ds, port, 0x16, 0x0); |
373 | mutex_unlock(&ps->phy_mutex); | 320 | mutex_unlock(&ps->phy_mutex); |
374 | return ret; | 321 | return ret; |
375 | } | 322 | } |
@@ -482,7 +429,7 @@ mv88e6352_phy_read(struct dsa_switch *ds, int port, int regnum) | |||
482 | return addr; | 429 | return addr; |
483 | 430 | ||
484 | mutex_lock(&ps->phy_mutex); | 431 | mutex_lock(&ps->phy_mutex); |
485 | ret = __mv88e6352_phy_read(ds, addr, regnum); | 432 | ret = mv88e6xxx_phy_read_indirect(ds, addr, regnum); |
486 | mutex_unlock(&ps->phy_mutex); | 433 | mutex_unlock(&ps->phy_mutex); |
487 | 434 | ||
488 | return ret; | 435 | return ret; |
@@ -499,7 +446,7 @@ mv88e6352_phy_write(struct dsa_switch *ds, int port, int regnum, u16 val) | |||
499 | return addr; | 446 | return addr; |
500 | 447 | ||
501 | mutex_lock(&ps->phy_mutex); | 448 | mutex_lock(&ps->phy_mutex); |
502 | ret = __mv88e6352_phy_write(ds, addr, regnum, val); | 449 | ret = mv88e6xxx_phy_write_indirect(ds, addr, regnum, val); |
503 | mutex_unlock(&ps->phy_mutex); | 450 | mutex_unlock(&ps->phy_mutex); |
504 | 451 | ||
505 | return ret; | 452 | return ret; |
@@ -553,7 +500,7 @@ static int mv88e6352_read_eeprom_word(struct dsa_switch *ds, int addr) | |||
553 | if (ret < 0) | 500 | if (ret < 0) |
554 | goto error; | 501 | goto error; |
555 | 502 | ||
556 | ret = mv88e6352_eeprom_busy_wait(ds); | 503 | ret = mv88e6xxx_eeprom_busy_wait(ds); |
557 | if (ret < 0) | 504 | if (ret < 0) |
558 | goto error; | 505 | goto error; |
559 | 506 | ||
@@ -576,7 +523,7 @@ static int mv88e6352_get_eeprom(struct dsa_switch *ds, | |||
576 | 523 | ||
577 | eeprom->magic = 0xc3ec4951; | 524 | eeprom->magic = 0xc3ec4951; |
578 | 525 | ||
579 | ret = mv88e6352_eeprom_load_wait(ds); | 526 | ret = mv88e6xxx_eeprom_load_wait(ds); |
580 | if (ret < 0) | 527 | if (ret < 0) |
581 | return ret; | 528 | return ret; |
582 | 529 | ||
@@ -657,7 +604,7 @@ static int mv88e6352_write_eeprom_word(struct dsa_switch *ds, int addr, | |||
657 | if (ret < 0) | 604 | if (ret < 0) |
658 | goto error; | 605 | goto error; |
659 | 606 | ||
660 | ret = mv88e6352_eeprom_busy_wait(ds); | 607 | ret = mv88e6xxx_eeprom_busy_wait(ds); |
661 | error: | 608 | error: |
662 | mutex_unlock(&ps->eeprom_mutex); | 609 | mutex_unlock(&ps->eeprom_mutex); |
663 | return ret; | 610 | return ret; |
@@ -681,7 +628,7 @@ static int mv88e6352_set_eeprom(struct dsa_switch *ds, | |||
681 | len = eeprom->len; | 628 | len = eeprom->len; |
682 | eeprom->len = 0; | 629 | eeprom->len = 0; |
683 | 630 | ||
684 | ret = mv88e6352_eeprom_load_wait(ds); | 631 | ret = mv88e6xxx_eeprom_load_wait(ds); |
685 | if (ret < 0) | 632 | if (ret < 0) |
686 | return ret; | 633 | return ret; |
687 | 634 | ||
diff --git a/drivers/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx.c index 3e7e31a6abb7..a83ace0803e7 100644 --- a/drivers/net/dsa/mv88e6xxx.c +++ b/drivers/net/dsa/mv88e6xxx.c | |||
@@ -596,6 +596,59 @@ error: | |||
596 | } | 596 | } |
597 | #endif /* CONFIG_NET_DSA_HWMON */ | 597 | #endif /* CONFIG_NET_DSA_HWMON */ |
598 | 598 | ||
599 | static int mv88e6xxx_wait(struct dsa_switch *ds, int reg, int offset, u16 mask) | ||
600 | { | ||
601 | unsigned long timeout = jiffies + HZ / 10; | ||
602 | |||
603 | while (time_before(jiffies, timeout)) { | ||
604 | int ret; | ||
605 | |||
606 | ret = REG_READ(reg, offset); | ||
607 | if (!(ret & mask)) | ||
608 | return 0; | ||
609 | |||
610 | usleep_range(1000, 2000); | ||
611 | } | ||
612 | return -ETIMEDOUT; | ||
613 | } | ||
614 | |||
615 | int mv88e6xxx_phy_wait(struct dsa_switch *ds) | ||
616 | { | ||
617 | return mv88e6xxx_wait(ds, REG_GLOBAL2, 0x18, 0x8000); | ||
618 | } | ||
619 | |||
620 | int mv88e6xxx_eeprom_load_wait(struct dsa_switch *ds) | ||
621 | { | ||
622 | return mv88e6xxx_wait(ds, REG_GLOBAL2, 0x14, 0x0800); | ||
623 | } | ||
624 | |||
625 | int mv88e6xxx_eeprom_busy_wait(struct dsa_switch *ds) | ||
626 | { | ||
627 | return mv88e6xxx_wait(ds, REG_GLOBAL2, 0x14, 0x8000); | ||
628 | } | ||
629 | |||
630 | int mv88e6xxx_phy_read_indirect(struct dsa_switch *ds, int addr, int regnum) | ||
631 | { | ||
632 | int ret; | ||
633 | |||
634 | REG_WRITE(REG_GLOBAL2, 0x18, 0x9800 | (addr << 5) | regnum); | ||
635 | |||
636 | ret = mv88e6xxx_phy_wait(ds); | ||
637 | if (ret < 0) | ||
638 | return ret; | ||
639 | |||
640 | return REG_READ(REG_GLOBAL2, 0x19); | ||
641 | } | ||
642 | |||
643 | int mv88e6xxx_phy_write_indirect(struct dsa_switch *ds, int addr, int regnum, | ||
644 | u16 val) | ||
645 | { | ||
646 | REG_WRITE(REG_GLOBAL2, 0x19, val); | ||
647 | REG_WRITE(REG_GLOBAL2, 0x18, 0x9400 | (addr << 5) | regnum); | ||
648 | |||
649 | return mv88e6xxx_phy_wait(ds); | ||
650 | } | ||
651 | |||
599 | static int __init mv88e6xxx_init(void) | 652 | static int __init mv88e6xxx_init(void) |
600 | { | 653 | { |
601 | #if IS_ENABLED(CONFIG_NET_DSA_MV88E6131) | 654 | #if IS_ENABLED(CONFIG_NET_DSA_MV88E6131) |
diff --git a/drivers/net/dsa/mv88e6xxx.h b/drivers/net/dsa/mv88e6xxx.h index 03e397efde36..72942271bb67 100644 --- a/drivers/net/dsa/mv88e6xxx.h +++ b/drivers/net/dsa/mv88e6xxx.h | |||
@@ -82,6 +82,12 @@ int mv88e6xxx_get_regs_len(struct dsa_switch *ds, int port); | |||
82 | void mv88e6xxx_get_regs(struct dsa_switch *ds, int port, | 82 | void mv88e6xxx_get_regs(struct dsa_switch *ds, int port, |
83 | struct ethtool_regs *regs, void *_p); | 83 | struct ethtool_regs *regs, void *_p); |
84 | int mv88e6xxx_get_temp(struct dsa_switch *ds, int *temp); | 84 | int mv88e6xxx_get_temp(struct dsa_switch *ds, int *temp); |
85 | int mv88e6xxx_phy_wait(struct dsa_switch *ds); | ||
86 | int mv88e6xxx_eeprom_load_wait(struct dsa_switch *ds); | ||
87 | int mv88e6xxx_eeprom_busy_wait(struct dsa_switch *ds); | ||
88 | int mv88e6xxx_phy_read_indirect(struct dsa_switch *ds, int addr, int regnum); | ||
89 | int mv88e6xxx_phy_write_indirect(struct dsa_switch *ds, int addr, int regnum, | ||
90 | u16 val); | ||
85 | 91 | ||
86 | extern struct dsa_switch_driver mv88e6131_switch_driver; | 92 | extern struct dsa_switch_driver mv88e6131_switch_driver; |
87 | extern struct dsa_switch_driver mv88e6123_61_65_switch_driver; | 93 | extern struct dsa_switch_driver mv88e6123_61_65_switch_driver; |
diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c index 6725dc00750b..fd9296a5014d 100644 --- a/drivers/net/ethernet/altera/altera_tse_main.c +++ b/drivers/net/ethernet/altera/altera_tse_main.c | |||
@@ -105,11 +105,11 @@ static int altera_tse_mdio_read(struct mii_bus *bus, int mii_id, int regnum) | |||
105 | 105 | ||
106 | /* set MDIO address */ | 106 | /* set MDIO address */ |
107 | csrwr32((mii_id & 0x1f), priv->mac_dev, | 107 | csrwr32((mii_id & 0x1f), priv->mac_dev, |
108 | tse_csroffs(mdio_phy0_addr)); | 108 | tse_csroffs(mdio_phy1_addr)); |
109 | 109 | ||
110 | /* get the data */ | 110 | /* get the data */ |
111 | return csrrd32(priv->mac_dev, | 111 | return csrrd32(priv->mac_dev, |
112 | tse_csroffs(mdio_phy0) + regnum * 4) & 0xffff; | 112 | tse_csroffs(mdio_phy1) + regnum * 4) & 0xffff; |
113 | } | 113 | } |
114 | 114 | ||
115 | static int altera_tse_mdio_write(struct mii_bus *bus, int mii_id, int regnum, | 115 | static int altera_tse_mdio_write(struct mii_bus *bus, int mii_id, int regnum, |
@@ -120,10 +120,10 @@ static int altera_tse_mdio_write(struct mii_bus *bus, int mii_id, int regnum, | |||
120 | 120 | ||
121 | /* set MDIO address */ | 121 | /* set MDIO address */ |
122 | csrwr32((mii_id & 0x1f), priv->mac_dev, | 122 | csrwr32((mii_id & 0x1f), priv->mac_dev, |
123 | tse_csroffs(mdio_phy0_addr)); | 123 | tse_csroffs(mdio_phy1_addr)); |
124 | 124 | ||
125 | /* write the data */ | 125 | /* write the data */ |
126 | csrwr32(value, priv->mac_dev, tse_csroffs(mdio_phy0) + regnum * 4); | 126 | csrwr32(value, priv->mac_dev, tse_csroffs(mdio_phy1) + regnum * 4); |
127 | return 0; | 127 | return 0; |
128 | } | 128 | } |
129 | 129 | ||
@@ -1098,8 +1098,12 @@ static int tse_open(struct net_device *dev) | |||
1098 | 1098 | ||
1099 | spin_lock(&priv->mac_cfg_lock); | 1099 | spin_lock(&priv->mac_cfg_lock); |
1100 | ret = reset_mac(priv); | 1100 | ret = reset_mac(priv); |
1101 | /* Note that reset_mac will fail if the clocks are gated by the PHY | ||
1102 | * due to the PHY being put into isolation or power down mode. | ||
1103 | * This is not an error if reset fails due to no clock. | ||
1104 | */ | ||
1101 | if (ret) | 1105 | if (ret) |
1102 | netdev_err(dev, "Cannot reset MAC core (error: %d)\n", ret); | 1106 | netdev_dbg(dev, "Cannot reset MAC core (error: %d)\n", ret); |
1103 | 1107 | ||
1104 | ret = init_mac(priv); | 1108 | ret = init_mac(priv); |
1105 | spin_unlock(&priv->mac_cfg_lock); | 1109 | spin_unlock(&priv->mac_cfg_lock); |
@@ -1203,8 +1207,12 @@ static int tse_shutdown(struct net_device *dev) | |||
1203 | spin_lock(&priv->tx_lock); | 1207 | spin_lock(&priv->tx_lock); |
1204 | 1208 | ||
1205 | ret = reset_mac(priv); | 1209 | ret = reset_mac(priv); |
1210 | /* Note that reset_mac will fail if the clocks are gated by the PHY | ||
1211 | * due to the PHY being put into isolation or power down mode. | ||
1212 | * This is not an error if reset fails due to no clock. | ||
1213 | */ | ||
1206 | if (ret) | 1214 | if (ret) |
1207 | netdev_err(dev, "Cannot reset MAC core (error: %d)\n", ret); | 1215 | netdev_dbg(dev, "Cannot reset MAC core (error: %d)\n", ret); |
1208 | priv->dmaops->reset_dma(priv); | 1216 | priv->dmaops->reset_dma(priv); |
1209 | free_skbufs(dev); | 1217 | free_skbufs(dev); |
1210 | 1218 | ||
diff --git a/drivers/net/ethernet/amd/pcnet32.c b/drivers/net/ethernet/amd/pcnet32.c index 11d6e6561df1..8eb37e0194b5 100644 --- a/drivers/net/ethernet/amd/pcnet32.c +++ b/drivers/net/ethernet/amd/pcnet32.c | |||
@@ -1708,7 +1708,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev) | |||
1708 | 1708 | ||
1709 | /* if the ethernet address is not valid, force to 00:00:00:00:00:00 */ | 1709 | /* if the ethernet address is not valid, force to 00:00:00:00:00:00 */ |
1710 | if (!is_valid_ether_addr(dev->dev_addr)) | 1710 | if (!is_valid_ether_addr(dev->dev_addr)) |
1711 | memset(dev->dev_addr, 0, ETH_ALEN); | 1711 | eth_zero_addr(dev->dev_addr); |
1712 | 1712 | ||
1713 | if (pcnet32_debug & NETIF_MSG_PROBE) { | 1713 | if (pcnet32_debug & NETIF_MSG_PROBE) { |
1714 | pr_cont(" %pM", dev->dev_addr); | 1714 | pr_cont(" %pM", dev->dev_addr); |
diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig index 41a3c9804427..ee4fdfe65e9e 100644 --- a/drivers/net/ethernet/broadcom/Kconfig +++ b/drivers/net/ethernet/broadcom/Kconfig | |||
@@ -71,12 +71,12 @@ config BCMGENET | |||
71 | Broadcom BCM7xxx Set Top Box family chipset. | 71 | Broadcom BCM7xxx Set Top Box family chipset. |
72 | 72 | ||
73 | config BNX2 | 73 | config BNX2 |
74 | tristate "QLogic NetXtremeII support" | 74 | tristate "QLogic bnx2 support" |
75 | depends on PCI | 75 | depends on PCI |
76 | select CRC32 | 76 | select CRC32 |
77 | select FW_LOADER | 77 | select FW_LOADER |
78 | ---help--- | 78 | ---help--- |
79 | This driver supports QLogic NetXtremeII gigabit Ethernet cards. | 79 | This driver supports QLogic bnx2 gigabit Ethernet cards. |
80 | 80 | ||
81 | To compile this driver as a module, choose M here: the module | 81 | To compile this driver as a module, choose M here: the module |
82 | will be called bnx2. This is recommended. | 82 | will be called bnx2. This is recommended. |
@@ -87,8 +87,8 @@ config CNIC | |||
87 | select BNX2 | 87 | select BNX2 |
88 | select UIO | 88 | select UIO |
89 | ---help--- | 89 | ---help--- |
90 | This driver supports offload features of QLogic NetXtremeII | 90 | This driver supports offload features of QLogic bnx2 gigabit |
91 | gigabit Ethernet cards. | 91 | Ethernet cards. |
92 | 92 | ||
93 | To compile this driver as a module, choose M here: the module | 93 | To compile this driver as a module, choose M here: the module |
94 | will be called cnic. This is recommended. | 94 | will be called cnic. This is recommended. |
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c index 02bf0b86995b..2b66ef3d8217 100644 --- a/drivers/net/ethernet/broadcom/bnx2.c +++ b/drivers/net/ethernet/broadcom/bnx2.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /* bnx2.c: QLogic NX2 network driver. | 1 | /* bnx2.c: QLogic bnx2 network driver. |
2 | * | 2 | * |
3 | * Copyright (c) 2004-2014 Broadcom Corporation | 3 | * Copyright (c) 2004-2014 Broadcom Corporation |
4 | * Copyright (c) 2014 QLogic Corporation | 4 | * Copyright (c) 2014-2015 QLogic Corporation |
5 | * | 5 | * |
6 | * This program is free software; you can redistribute it and/or modify | 6 | * This program is free software; you can redistribute it and/or modify |
7 | * it under the terms of the GNU General Public License as published by | 7 | * it under the terms of the GNU General Public License as published by |
@@ -58,8 +58,8 @@ | |||
58 | #include "bnx2_fw.h" | 58 | #include "bnx2_fw.h" |
59 | 59 | ||
60 | #define DRV_MODULE_NAME "bnx2" | 60 | #define DRV_MODULE_NAME "bnx2" |
61 | #define DRV_MODULE_VERSION "2.2.5" | 61 | #define DRV_MODULE_VERSION "2.2.6" |
62 | #define DRV_MODULE_RELDATE "December 20, 2013" | 62 | #define DRV_MODULE_RELDATE "January 29, 2014" |
63 | #define FW_MIPS_FILE_06 "bnx2/bnx2-mips-06-6.2.3.fw" | 63 | #define FW_MIPS_FILE_06 "bnx2/bnx2-mips-06-6.2.3.fw" |
64 | #define FW_RV2P_FILE_06 "bnx2/bnx2-rv2p-06-6.0.15.fw" | 64 | #define FW_RV2P_FILE_06 "bnx2/bnx2-rv2p-06-6.0.15.fw" |
65 | #define FW_MIPS_FILE_09 "bnx2/bnx2-mips-09-6.2.1b.fw" | 65 | #define FW_MIPS_FILE_09 "bnx2/bnx2-mips-09-6.2.1b.fw" |
@@ -72,10 +72,10 @@ | |||
72 | #define TX_TIMEOUT (5*HZ) | 72 | #define TX_TIMEOUT (5*HZ) |
73 | 73 | ||
74 | static char version[] = | 74 | static char version[] = |
75 | "QLogic NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; | 75 | "QLogic " DRV_MODULE_NAME " Gigabit Ethernet Driver v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; |
76 | 76 | ||
77 | MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>"); | 77 | MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>"); |
78 | MODULE_DESCRIPTION("QLogic NetXtreme II BCM5706/5708/5709/5716 Driver"); | 78 | MODULE_DESCRIPTION("QLogic BCM5706/5708/5709/5716 Driver"); |
79 | MODULE_LICENSE("GPL"); | 79 | MODULE_LICENSE("GPL"); |
80 | MODULE_VERSION(DRV_MODULE_VERSION); | 80 | MODULE_VERSION(DRV_MODULE_VERSION); |
81 | MODULE_FIRMWARE(FW_MIPS_FILE_06); | 81 | MODULE_FIRMWARE(FW_MIPS_FILE_06); |
@@ -4984,8 +4984,6 @@ bnx2_init_chip(struct bnx2 *bp) | |||
4984 | 4984 | ||
4985 | bp->idle_chk_status_idx = 0xffff; | 4985 | bp->idle_chk_status_idx = 0xffff; |
4986 | 4986 | ||
4987 | bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE; | ||
4988 | |||
4989 | /* Set up how to generate a link change interrupt. */ | 4987 | /* Set up how to generate a link change interrupt. */ |
4990 | BNX2_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK); | 4988 | BNX2_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK); |
4991 | 4989 | ||
@@ -7710,17 +7708,6 @@ bnx2_set_phys_id(struct net_device *dev, enum ethtool_phys_id_state state) | |||
7710 | return 0; | 7708 | return 0; |
7711 | } | 7709 | } |
7712 | 7710 | ||
7713 | static netdev_features_t | ||
7714 | bnx2_fix_features(struct net_device *dev, netdev_features_t features) | ||
7715 | { | ||
7716 | struct bnx2 *bp = netdev_priv(dev); | ||
7717 | |||
7718 | if (!(bp->flags & BNX2_FLAG_CAN_KEEP_VLAN)) | ||
7719 | features |= NETIF_F_HW_VLAN_CTAG_RX; | ||
7720 | |||
7721 | return features; | ||
7722 | } | ||
7723 | |||
7724 | static int | 7711 | static int |
7725 | bnx2_set_features(struct net_device *dev, netdev_features_t features) | 7712 | bnx2_set_features(struct net_device *dev, netdev_features_t features) |
7726 | { | 7713 | { |
@@ -8527,7 +8514,6 @@ static const struct net_device_ops bnx2_netdev_ops = { | |||
8527 | .ndo_validate_addr = eth_validate_addr, | 8514 | .ndo_validate_addr = eth_validate_addr, |
8528 | .ndo_set_mac_address = bnx2_change_mac_addr, | 8515 | .ndo_set_mac_address = bnx2_change_mac_addr, |
8529 | .ndo_change_mtu = bnx2_change_mtu, | 8516 | .ndo_change_mtu = bnx2_change_mtu, |
8530 | .ndo_fix_features = bnx2_fix_features, | ||
8531 | .ndo_set_features = bnx2_set_features, | 8517 | .ndo_set_features = bnx2_set_features, |
8532 | .ndo_tx_timeout = bnx2_tx_timeout, | 8518 | .ndo_tx_timeout = bnx2_tx_timeout, |
8533 | #ifdef CONFIG_NET_POLL_CONTROLLER | 8519 | #ifdef CONFIG_NET_POLL_CONTROLLER |
@@ -8578,6 +8564,9 @@ bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
8578 | dev->features |= dev->hw_features; | 8564 | dev->features |= dev->hw_features; |
8579 | dev->priv_flags |= IFF_UNICAST_FLT; | 8565 | dev->priv_flags |= IFF_UNICAST_FLT; |
8580 | 8566 | ||
8567 | if (!(bp->flags & BNX2_FLAG_CAN_KEEP_VLAN)) | ||
8568 | dev->hw_features &= ~NETIF_F_HW_VLAN_CTAG_RX; | ||
8569 | |||
8581 | if ((rc = register_netdev(dev))) { | 8570 | if ((rc = register_netdev(dev))) { |
8582 | dev_err(&pdev->dev, "Cannot register net device\n"); | 8571 | dev_err(&pdev->dev, "Cannot register net device\n"); |
8583 | goto error; | 8572 | goto error; |
diff --git a/drivers/net/ethernet/broadcom/bnx2.h b/drivers/net/ethernet/broadcom/bnx2.h index 28df35d35893..f92f76c44756 100644 --- a/drivers/net/ethernet/broadcom/bnx2.h +++ b/drivers/net/ethernet/broadcom/bnx2.h | |||
@@ -1,7 +1,7 @@ | |||
1 | /* bnx2.h: QLogic NX2 network driver. | 1 | /* bnx2.h: QLogic bnx2 network driver. |
2 | * | 2 | * |
3 | * Copyright (c) 2004-2014 Broadcom Corporation | 3 | * Copyright (c) 2004-2014 Broadcom Corporation |
4 | * Copyright (c) 2014 QLogic Corporation | 4 | * Copyright (c) 2014-2015 QLogic Corporation |
5 | * | 5 | * |
6 | * This program is free software; you can redistribute it and/or modify | 6 | * This program is free software; you can redistribute it and/or modify |
7 | * it under the terms of the GNU General Public License as published by | 7 | * it under the terms of the GNU General Public License as published by |
diff --git a/drivers/net/ethernet/broadcom/bnx2_fw.h b/drivers/net/ethernet/broadcom/bnx2_fw.h index 7db79c28b5ff..b0f2ccadaffd 100644 --- a/drivers/net/ethernet/broadcom/bnx2_fw.h +++ b/drivers/net/ethernet/broadcom/bnx2_fw.h | |||
@@ -1,7 +1,7 @@ | |||
1 | /* bnx2_fw.h: QLogic NX2 network driver. | 1 | /* bnx2_fw.h: QLogic bnx2 network driver. |
2 | * | 2 | * |
3 | * Copyright (c) 2004, 2005, 2006, 2007 Broadcom Corporation | 3 | * Copyright (c) 2004, 2005, 2006, 2007 Broadcom Corporation |
4 | * Copyright (c) 2014 QLogic Corporation | 4 | * Copyright (c) 2014-2015 QLogic Corporation |
5 | * | 5 | * |
6 | * This program is free software; you can redistribute it and/or modify | 6 | * This program is free software; you can redistribute it and/or modify |
7 | * it under the terms of the GNU General Public License as published by | 7 | * it under the terms of the GNU General Public License as published by |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c index ffe4e003e636..e3d853cab7c9 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c | |||
@@ -2446,7 +2446,7 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode) | |||
2446 | } | 2446 | } |
2447 | packet = skb_put(skb, pkt_size); | 2447 | packet = skb_put(skb, pkt_size); |
2448 | memcpy(packet, bp->dev->dev_addr, ETH_ALEN); | 2448 | memcpy(packet, bp->dev->dev_addr, ETH_ALEN); |
2449 | memset(packet + ETH_ALEN, 0, ETH_ALEN); | 2449 | eth_zero_addr(packet + ETH_ALEN); |
2450 | memset(packet + 2*ETH_ALEN, 0x77, (ETH_HLEN - 2*ETH_ALEN)); | 2450 | memset(packet + 2*ETH_ALEN, 0x77, (ETH_HLEN - 2*ETH_ALEN)); |
2451 | for (i = ETH_HLEN; i < pkt_size; i++) | 2451 | for (i = ETH_HLEN; i < pkt_size; i++) |
2452 | packet[i] = (unsigned char) (i & 0xff); | 2452 | packet[i] = (unsigned char) (i & 0xff); |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index 7155e1d2c208..98dcb03fe1b8 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c | |||
@@ -11546,13 +11546,13 @@ static void bnx2x_get_cnic_mac_hwinfo(struct bnx2x *bp) | |||
11546 | /* Disable iSCSI OOO if MAC configuration is invalid. */ | 11546 | /* Disable iSCSI OOO if MAC configuration is invalid. */ |
11547 | if (!is_valid_ether_addr(iscsi_mac)) { | 11547 | if (!is_valid_ether_addr(iscsi_mac)) { |
11548 | bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG; | 11548 | bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG; |
11549 | memset(iscsi_mac, 0, ETH_ALEN); | 11549 | eth_zero_addr(iscsi_mac); |
11550 | } | 11550 | } |
11551 | 11551 | ||
11552 | /* Disable FCoE if MAC configuration is invalid. */ | 11552 | /* Disable FCoE if MAC configuration is invalid. */ |
11553 | if (!is_valid_ether_addr(fip_mac)) { | 11553 | if (!is_valid_ether_addr(fip_mac)) { |
11554 | bp->flags |= NO_FCOE_FLAG; | 11554 | bp->flags |= NO_FCOE_FLAG; |
11555 | memset(bp->fip_mac, 0, ETH_ALEN); | 11555 | eth_zero_addr(bp->fip_mac); |
11556 | } | 11556 | } |
11557 | } | 11557 | } |
11558 | 11558 | ||
@@ -11563,7 +11563,7 @@ static void bnx2x_get_mac_hwinfo(struct bnx2x *bp) | |||
11563 | int port = BP_PORT(bp); | 11563 | int port = BP_PORT(bp); |
11564 | 11564 | ||
11565 | /* Zero primary MAC configuration */ | 11565 | /* Zero primary MAC configuration */ |
11566 | memset(bp->dev->dev_addr, 0, ETH_ALEN); | 11566 | eth_zero_addr(bp->dev->dev_addr); |
11567 | 11567 | ||
11568 | if (BP_NOMCP(bp)) { | 11568 | if (BP_NOMCP(bp)) { |
11569 | BNX2X_ERROR("warning: random MAC workaround active\n"); | 11569 | BNX2X_ERROR("warning: random MAC workaround active\n"); |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c index e5aca2de1871..8638d6c97caa 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c | |||
@@ -2693,7 +2693,7 @@ int bnx2x_get_vf_config(struct net_device *dev, int vfidx, | |||
2693 | memcpy(&ivi->mac, bulletin->mac, ETH_ALEN); | 2693 | memcpy(&ivi->mac, bulletin->mac, ETH_ALEN); |
2694 | else | 2694 | else |
2695 | /* function has not been loaded yet. Show mac as 0s */ | 2695 | /* function has not been loaded yet. Show mac as 0s */ |
2696 | memset(&ivi->mac, 0, ETH_ALEN); | 2696 | eth_zero_addr(ivi->mac); |
2697 | 2697 | ||
2698 | /* vlan */ | 2698 | /* vlan */ |
2699 | if (bulletin->valid_bitmap & (1 << VLAN_VALID)) | 2699 | if (bulletin->valid_bitmap & (1 << VLAN_VALID)) |
diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c index f05fab65d78a..17c145fdf3ff 100644 --- a/drivers/net/ethernet/broadcom/cnic.c +++ b/drivers/net/ethernet/broadcom/cnic.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /* cnic.c: QLogic CNIC core network driver. | 1 | /* cnic.c: QLogic CNIC core network driver. |
2 | * | 2 | * |
3 | * Copyright (c) 2006-2014 Broadcom Corporation | 3 | * Copyright (c) 2006-2014 Broadcom Corporation |
4 | * Copyright (c) 2014 QLogic Corporation | 4 | * Copyright (c) 2014-2015 QLogic Corporation |
5 | * | 5 | * |
6 | * This program is free software; you can redistribute it and/or modify | 6 | * This program is free software; you can redistribute it and/or modify |
7 | * it under the terms of the GNU General Public License as published by | 7 | * it under the terms of the GNU General Public License as published by |
@@ -58,11 +58,11 @@ | |||
58 | #define CNIC_MODULE_NAME "cnic" | 58 | #define CNIC_MODULE_NAME "cnic" |
59 | 59 | ||
60 | static char version[] = | 60 | static char version[] = |
61 | "QLogic NetXtreme II CNIC Driver " CNIC_MODULE_NAME " v" CNIC_MODULE_VERSION " (" CNIC_MODULE_RELDATE ")\n"; | 61 | "QLogic " CNIC_MODULE_NAME "Driver v" CNIC_MODULE_VERSION " (" CNIC_MODULE_RELDATE ")\n"; |
62 | 62 | ||
63 | MODULE_AUTHOR("Michael Chan <mchan@broadcom.com> and John(Zongxi) " | 63 | MODULE_AUTHOR("Michael Chan <mchan@broadcom.com> and John(Zongxi) " |
64 | "Chen (zongxi@broadcom.com"); | 64 | "Chen (zongxi@broadcom.com"); |
65 | MODULE_DESCRIPTION("QLogic NetXtreme II CNIC Driver"); | 65 | MODULE_DESCRIPTION("QLogic cnic Driver"); |
66 | MODULE_LICENSE("GPL"); | 66 | MODULE_LICENSE("GPL"); |
67 | MODULE_VERSION(CNIC_MODULE_VERSION); | 67 | MODULE_VERSION(CNIC_MODULE_VERSION); |
68 | 68 | ||
diff --git a/drivers/net/ethernet/broadcom/cnic_if.h b/drivers/net/ethernet/broadcom/cnic_if.h index 8bb36c1c4d68..ef6125b0ee3e 100644 --- a/drivers/net/ethernet/broadcom/cnic_if.h +++ b/drivers/net/ethernet/broadcom/cnic_if.h | |||
@@ -1,7 +1,7 @@ | |||
1 | /* cnic_if.h: QLogic CNIC core network driver. | 1 | /* cnic_if.h: QLogic cnic core network driver. |
2 | * | 2 | * |
3 | * Copyright (c) 2006-2014 Broadcom Corporation | 3 | * Copyright (c) 2006-2014 Broadcom Corporation |
4 | * Copyright (c) 2014 QLogic Corporation | 4 | * Copyright (c) 2014-2015 QLogic Corporation |
5 | * | 5 | * |
6 | * This program is free software; you can redistribute it and/or modify | 6 | * This program is free software; you can redistribute it and/or modify |
7 | * it under the terms of the GNU General Public License as published by | 7 | * it under the terms of the GNU General Public License as published by |
@@ -15,8 +15,8 @@ | |||
15 | 15 | ||
16 | #include "bnx2x/bnx2x_mfw_req.h" | 16 | #include "bnx2x/bnx2x_mfw_req.h" |
17 | 17 | ||
18 | #define CNIC_MODULE_VERSION "2.5.20" | 18 | #define CNIC_MODULE_VERSION "2.5.21" |
19 | #define CNIC_MODULE_RELDATE "March 14, 2014" | 19 | #define CNIC_MODULE_RELDATE "January 29, 2015" |
20 | 20 | ||
21 | #define CNIC_ULP_RDMA 0 | 21 | #define CNIC_ULP_RDMA 0 |
22 | #define CNIC_ULP_ISCSI 1 | 22 | #define CNIC_ULP_ISCSI 1 |
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c index 6befde61c203..84feb241d60b 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c | |||
@@ -54,8 +54,8 @@ | |||
54 | /* Default highest priority queue for multi queue support */ | 54 | /* Default highest priority queue for multi queue support */ |
55 | #define GENET_Q0_PRIORITY 0 | 55 | #define GENET_Q0_PRIORITY 0 |
56 | 56 | ||
57 | #define GENET_DEFAULT_BD_CNT \ | 57 | #define GENET_Q16_TX_BD_CNT \ |
58 | (TOTAL_DESC - priv->hw_params->tx_queues * priv->hw_params->bds_cnt) | 58 | (TOTAL_DESC - priv->hw_params->tx_queues * priv->hw_params->tx_bds_per_q) |
59 | 59 | ||
60 | #define RX_BUF_LENGTH 2048 | 60 | #define RX_BUF_LENGTH 2048 |
61 | #define SKB_ALIGNMENT 32 | 61 | #define SKB_ALIGNMENT 32 |
@@ -923,7 +923,7 @@ static struct enet_cb *bcmgenet_get_txcb(struct bcmgenet_priv *priv, | |||
923 | 923 | ||
924 | tx_cb_ptr = ring->cbs; | 924 | tx_cb_ptr = ring->cbs; |
925 | tx_cb_ptr += ring->write_ptr - ring->cb_ptr; | 925 | tx_cb_ptr += ring->write_ptr - ring->cb_ptr; |
926 | tx_cb_ptr->bd_addr = priv->tx_bds + ring->write_ptr * DMA_DESC_SIZE; | 926 | |
927 | /* Advancing local write pointer */ | 927 | /* Advancing local write pointer */ |
928 | if (ring->write_ptr == ring->end_ptr) | 928 | if (ring->write_ptr == ring->end_ptr) |
929 | ring->write_ptr = ring->cb_ptr; | 929 | ring->write_ptr = ring->cb_ptr; |
@@ -1710,17 +1710,14 @@ static int init_umac(struct bcmgenet_priv *priv) | |||
1710 | return 0; | 1710 | return 0; |
1711 | } | 1711 | } |
1712 | 1712 | ||
1713 | /* Initialize all house-keeping variables for a TX ring, along | 1713 | /* Initialize a Tx ring along with corresponding hardware registers */ |
1714 | * with corresponding hardware registers | ||
1715 | */ | ||
1716 | static void bcmgenet_init_tx_ring(struct bcmgenet_priv *priv, | 1714 | static void bcmgenet_init_tx_ring(struct bcmgenet_priv *priv, |
1717 | unsigned int index, unsigned int size, | 1715 | unsigned int index, unsigned int size, |
1718 | unsigned int write_ptr, unsigned int end_ptr) | 1716 | unsigned int start_ptr, unsigned int end_ptr) |
1719 | { | 1717 | { |
1720 | struct bcmgenet_tx_ring *ring = &priv->tx_rings[index]; | 1718 | struct bcmgenet_tx_ring *ring = &priv->tx_rings[index]; |
1721 | u32 words_per_bd = WORDS_PER_BD(priv); | 1719 | u32 words_per_bd = WORDS_PER_BD(priv); |
1722 | u32 flow_period_val = 0; | 1720 | u32 flow_period_val = 0; |
1723 | unsigned int first_bd; | ||
1724 | 1721 | ||
1725 | spin_lock_init(&ring->lock); | 1722 | spin_lock_init(&ring->lock); |
1726 | ring->priv = priv; | 1723 | ring->priv = priv; |
@@ -1735,12 +1732,12 @@ static void bcmgenet_init_tx_ring(struct bcmgenet_priv *priv, | |||
1735 | ring->int_enable = bcmgenet_tx_ring_int_enable; | 1732 | ring->int_enable = bcmgenet_tx_ring_int_enable; |
1736 | ring->int_disable = bcmgenet_tx_ring_int_disable; | 1733 | ring->int_disable = bcmgenet_tx_ring_int_disable; |
1737 | } | 1734 | } |
1738 | ring->cbs = priv->tx_cbs + write_ptr; | 1735 | ring->cbs = priv->tx_cbs + start_ptr; |
1739 | ring->size = size; | 1736 | ring->size = size; |
1740 | ring->c_index = 0; | 1737 | ring->c_index = 0; |
1741 | ring->free_bds = size; | 1738 | ring->free_bds = size; |
1742 | ring->write_ptr = write_ptr; | 1739 | ring->write_ptr = start_ptr; |
1743 | ring->cb_ptr = write_ptr; | 1740 | ring->cb_ptr = start_ptr; |
1744 | ring->end_ptr = end_ptr - 1; | 1741 | ring->end_ptr = end_ptr - 1; |
1745 | ring->prod_index = 0; | 1742 | ring->prod_index = 0; |
1746 | 1743 | ||
@@ -1754,19 +1751,16 @@ static void bcmgenet_init_tx_ring(struct bcmgenet_priv *priv, | |||
1754 | /* Disable rate control for now */ | 1751 | /* Disable rate control for now */ |
1755 | bcmgenet_tdma_ring_writel(priv, index, flow_period_val, | 1752 | bcmgenet_tdma_ring_writel(priv, index, flow_period_val, |
1756 | TDMA_FLOW_PERIOD); | 1753 | TDMA_FLOW_PERIOD); |
1757 | /* Unclassified traffic goes to ring 16 */ | ||
1758 | bcmgenet_tdma_ring_writel(priv, index, | 1754 | bcmgenet_tdma_ring_writel(priv, index, |
1759 | ((size << DMA_RING_SIZE_SHIFT) | | 1755 | ((size << DMA_RING_SIZE_SHIFT) | |
1760 | RX_BUF_LENGTH), DMA_RING_BUF_SIZE); | 1756 | RX_BUF_LENGTH), DMA_RING_BUF_SIZE); |
1761 | 1757 | ||
1762 | first_bd = write_ptr; | ||
1763 | |||
1764 | /* Set start and end address, read and write pointers */ | 1758 | /* Set start and end address, read and write pointers */ |
1765 | bcmgenet_tdma_ring_writel(priv, index, first_bd * words_per_bd, | 1759 | bcmgenet_tdma_ring_writel(priv, index, start_ptr * words_per_bd, |
1766 | DMA_START_ADDR); | 1760 | DMA_START_ADDR); |
1767 | bcmgenet_tdma_ring_writel(priv, index, first_bd * words_per_bd, | 1761 | bcmgenet_tdma_ring_writel(priv, index, start_ptr * words_per_bd, |
1768 | TDMA_READ_PTR); | 1762 | TDMA_READ_PTR); |
1769 | bcmgenet_tdma_ring_writel(priv, index, first_bd, | 1763 | bcmgenet_tdma_ring_writel(priv, index, start_ptr * words_per_bd, |
1770 | TDMA_WRITE_PTR); | 1764 | TDMA_WRITE_PTR); |
1771 | bcmgenet_tdma_ring_writel(priv, index, end_ptr * words_per_bd - 1, | 1765 | bcmgenet_tdma_ring_writel(priv, index, end_ptr * words_per_bd - 1, |
1772 | DMA_END_ADDR); | 1766 | DMA_END_ADDR); |
@@ -1825,78 +1819,73 @@ static int bcmgenet_init_rx_ring(struct bcmgenet_priv *priv, | |||
1825 | return ret; | 1819 | return ret; |
1826 | } | 1820 | } |
1827 | 1821 | ||
1828 | /* init multi xmit queues, only available for GENET2+ | 1822 | /* Initialize Tx queues |
1829 | * the queue is partitioned as follows: | ||
1830 | * | 1823 | * |
1831 | * queue 0 - 3 is priority based, each one has 32 descriptors, | 1824 | * Queues 0-3 are priority-based, each one has 32 descriptors, |
1832 | * with queue 0 being the highest priority queue. | 1825 | * with queue 0 being the highest priority queue. |
1833 | * | 1826 | * |
1834 | * queue 16 is the default tx queue with GENET_DEFAULT_BD_CNT | 1827 | * Queue 16 is the default Tx queue with |
1835 | * descriptors: 256 - (number of tx queues * bds per queues) = 128 | 1828 | * GENET_Q16_TX_BD_CNT = 256 - 4 * 32 = 128 descriptors. |
1836 | * descriptors. | ||
1837 | * | 1829 | * |
1838 | * The transmit control block pool is then partitioned as following: | 1830 | * The transmit control block pool is then partitioned as follows: |
1839 | * - tx_cbs[0...127] are for queue 16 | 1831 | * - Tx queue 0 uses tx_cbs[0..31] |
1840 | * - tx_ring_cbs[0] points to tx_cbs[128..159] | 1832 | * - Tx queue 1 uses tx_cbs[32..63] |
1841 | * - tx_ring_cbs[1] points to tx_cbs[160..191] | 1833 | * - Tx queue 2 uses tx_cbs[64..95] |
1842 | * - tx_ring_cbs[2] points to tx_cbs[192..223] | 1834 | * - Tx queue 3 uses tx_cbs[96..127] |
1843 | * - tx_ring_cbs[3] points to tx_cbs[224..255] | 1835 | * - Tx queue 16 uses tx_cbs[128..255] |
1844 | */ | 1836 | */ |
1845 | static void bcmgenet_init_multiq(struct net_device *dev) | 1837 | static void bcmgenet_init_tx_queues(struct net_device *dev) |
1846 | { | 1838 | { |
1847 | struct bcmgenet_priv *priv = netdev_priv(dev); | 1839 | struct bcmgenet_priv *priv = netdev_priv(dev); |
1848 | unsigned int i, dma_enable; | 1840 | u32 i, dma_enable; |
1849 | u32 reg, dma_ctrl, ring_cfg = 0; | 1841 | u32 dma_ctrl, ring_cfg; |
1850 | u32 dma_priority[3] = {0, 0, 0}; | 1842 | u32 dma_priority[3] = {0, 0, 0}; |
1851 | 1843 | ||
1852 | if (!netif_is_multiqueue(dev)) { | ||
1853 | netdev_warn(dev, "called with non multi queue aware HW\n"); | ||
1854 | return; | ||
1855 | } | ||
1856 | |||
1857 | dma_ctrl = bcmgenet_tdma_readl(priv, DMA_CTRL); | 1844 | dma_ctrl = bcmgenet_tdma_readl(priv, DMA_CTRL); |
1858 | dma_enable = dma_ctrl & DMA_EN; | 1845 | dma_enable = dma_ctrl & DMA_EN; |
1859 | dma_ctrl &= ~DMA_EN; | 1846 | dma_ctrl &= ~DMA_EN; |
1860 | bcmgenet_tdma_writel(priv, dma_ctrl, DMA_CTRL); | 1847 | bcmgenet_tdma_writel(priv, dma_ctrl, DMA_CTRL); |
1861 | 1848 | ||
1849 | dma_ctrl = 0; | ||
1850 | ring_cfg = 0; | ||
1851 | |||
1862 | /* Enable strict priority arbiter mode */ | 1852 | /* Enable strict priority arbiter mode */ |
1863 | bcmgenet_tdma_writel(priv, DMA_ARBITER_SP, DMA_ARB_CTRL); | 1853 | bcmgenet_tdma_writel(priv, DMA_ARBITER_SP, DMA_ARB_CTRL); |
1864 | 1854 | ||
1855 | /* Initialize Tx priority queues */ | ||
1865 | for (i = 0; i < priv->hw_params->tx_queues; i++) { | 1856 | for (i = 0; i < priv->hw_params->tx_queues; i++) { |
1866 | /* first 64 tx_cbs are reserved for default tx queue | 1857 | bcmgenet_init_tx_ring(priv, i, priv->hw_params->tx_bds_per_q, |
1867 | * (ring 16) | 1858 | i * priv->hw_params->tx_bds_per_q, |
1868 | */ | 1859 | (i + 1) * priv->hw_params->tx_bds_per_q); |
1869 | bcmgenet_init_tx_ring(priv, i, priv->hw_params->bds_cnt, | 1860 | ring_cfg |= (1 << i); |
1870 | i * priv->hw_params->bds_cnt, | 1861 | dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT)); |
1871 | (i + 1) * priv->hw_params->bds_cnt); | ||
1872 | |||
1873 | /* Configure ring as descriptor ring and setup priority */ | ||
1874 | ring_cfg |= 1 << i; | ||
1875 | dma_ctrl |= 1 << (i + DMA_RING_BUF_EN_SHIFT); | ||
1876 | |||
1877 | dma_priority[DMA_PRIO_REG_INDEX(i)] |= | 1862 | dma_priority[DMA_PRIO_REG_INDEX(i)] |= |
1878 | ((GENET_Q0_PRIORITY + i) << DMA_PRIO_REG_SHIFT(i)); | 1863 | ((GENET_Q0_PRIORITY + i) << DMA_PRIO_REG_SHIFT(i)); |
1879 | } | 1864 | } |
1880 | 1865 | ||
1881 | /* Set ring 16 priority and program the hardware registers */ | 1866 | /* Initialize Tx default queue 16 */ |
1867 | bcmgenet_init_tx_ring(priv, DESC_INDEX, GENET_Q16_TX_BD_CNT, | ||
1868 | priv->hw_params->tx_queues * | ||
1869 | priv->hw_params->tx_bds_per_q, | ||
1870 | TOTAL_DESC); | ||
1871 | ring_cfg |= (1 << DESC_INDEX); | ||
1872 | dma_ctrl |= (1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT)); | ||
1882 | dma_priority[DMA_PRIO_REG_INDEX(DESC_INDEX)] |= | 1873 | dma_priority[DMA_PRIO_REG_INDEX(DESC_INDEX)] |= |
1883 | ((GENET_Q0_PRIORITY + priv->hw_params->tx_queues) << | 1874 | ((GENET_Q0_PRIORITY + priv->hw_params->tx_queues) << |
1884 | DMA_PRIO_REG_SHIFT(DESC_INDEX)); | 1875 | DMA_PRIO_REG_SHIFT(DESC_INDEX)); |
1876 | |||
1877 | /* Set Tx queue priorities */ | ||
1885 | bcmgenet_tdma_writel(priv, dma_priority[0], DMA_PRIORITY_0); | 1878 | bcmgenet_tdma_writel(priv, dma_priority[0], DMA_PRIORITY_0); |
1886 | bcmgenet_tdma_writel(priv, dma_priority[1], DMA_PRIORITY_1); | 1879 | bcmgenet_tdma_writel(priv, dma_priority[1], DMA_PRIORITY_1); |
1887 | bcmgenet_tdma_writel(priv, dma_priority[2], DMA_PRIORITY_2); | 1880 | bcmgenet_tdma_writel(priv, dma_priority[2], DMA_PRIORITY_2); |
1888 | 1881 | ||
1889 | /* Enable rings */ | 1882 | /* Enable Tx queues */ |
1890 | reg = bcmgenet_tdma_readl(priv, DMA_RING_CFG); | 1883 | bcmgenet_tdma_writel(priv, ring_cfg, DMA_RING_CFG); |
1891 | reg |= ring_cfg; | ||
1892 | bcmgenet_tdma_writel(priv, reg, DMA_RING_CFG); | ||
1893 | 1884 | ||
1894 | /* Configure ring as descriptor ring and re-enable DMA if enabled */ | 1885 | /* Enable Tx DMA */ |
1895 | reg = bcmgenet_tdma_readl(priv, DMA_CTRL); | ||
1896 | reg |= dma_ctrl; | ||
1897 | if (dma_enable) | 1886 | if (dma_enable) |
1898 | reg |= DMA_EN; | 1887 | dma_ctrl |= DMA_EN; |
1899 | bcmgenet_tdma_writel(priv, reg, DMA_CTRL); | 1888 | bcmgenet_tdma_writel(priv, dma_ctrl, DMA_CTRL); |
1900 | } | 1889 | } |
1901 | 1890 | ||
1902 | static int bcmgenet_dma_teardown(struct bcmgenet_priv *priv) | 1891 | static int bcmgenet_dma_teardown(struct bcmgenet_priv *priv) |
@@ -1985,6 +1974,8 @@ static void bcmgenet_fini_dma(struct bcmgenet_priv *priv) | |||
1985 | static int bcmgenet_init_dma(struct bcmgenet_priv *priv) | 1974 | static int bcmgenet_init_dma(struct bcmgenet_priv *priv) |
1986 | { | 1975 | { |
1987 | int ret; | 1976 | int ret; |
1977 | unsigned int i; | ||
1978 | struct enet_cb *cb; | ||
1988 | 1979 | ||
1989 | netif_dbg(priv, hw, priv->dev, "bcmgenet: init_edma\n"); | 1980 | netif_dbg(priv, hw, priv->dev, "bcmgenet: init_edma\n"); |
1990 | 1981 | ||
@@ -2011,14 +2002,13 @@ static int bcmgenet_init_dma(struct bcmgenet_priv *priv) | |||
2011 | return -ENOMEM; | 2002 | return -ENOMEM; |
2012 | } | 2003 | } |
2013 | 2004 | ||
2014 | /* initialize multi xmit queue */ | 2005 | for (i = 0; i < priv->num_tx_bds; i++) { |
2015 | bcmgenet_init_multiq(priv->dev); | 2006 | cb = priv->tx_cbs + i; |
2007 | cb->bd_addr = priv->tx_bds + i * DMA_DESC_SIZE; | ||
2008 | } | ||
2016 | 2009 | ||
2017 | /* initialize special ring 16 */ | 2010 | /* Initialize Tx queues */ |
2018 | bcmgenet_init_tx_ring(priv, DESC_INDEX, GENET_DEFAULT_BD_CNT, | 2011 | bcmgenet_init_tx_queues(priv->dev); |
2019 | priv->hw_params->tx_queues * | ||
2020 | priv->hw_params->bds_cnt, | ||
2021 | TOTAL_DESC); | ||
2022 | 2012 | ||
2023 | return 0; | 2013 | return 0; |
2024 | } | 2014 | } |
@@ -2499,8 +2489,8 @@ static const struct net_device_ops bcmgenet_netdev_ops = { | |||
2499 | static struct bcmgenet_hw_params bcmgenet_hw_params[] = { | 2489 | static struct bcmgenet_hw_params bcmgenet_hw_params[] = { |
2500 | [GENET_V1] = { | 2490 | [GENET_V1] = { |
2501 | .tx_queues = 0, | 2491 | .tx_queues = 0, |
2492 | .tx_bds_per_q = 0, | ||
2502 | .rx_queues = 0, | 2493 | .rx_queues = 0, |
2503 | .bds_cnt = 0, | ||
2504 | .bp_in_en_shift = 16, | 2494 | .bp_in_en_shift = 16, |
2505 | .bp_in_mask = 0xffff, | 2495 | .bp_in_mask = 0xffff, |
2506 | .hfb_filter_cnt = 16, | 2496 | .hfb_filter_cnt = 16, |
@@ -2512,8 +2502,8 @@ static struct bcmgenet_hw_params bcmgenet_hw_params[] = { | |||
2512 | }, | 2502 | }, |
2513 | [GENET_V2] = { | 2503 | [GENET_V2] = { |
2514 | .tx_queues = 4, | 2504 | .tx_queues = 4, |
2505 | .tx_bds_per_q = 32, | ||
2515 | .rx_queues = 4, | 2506 | .rx_queues = 4, |
2516 | .bds_cnt = 32, | ||
2517 | .bp_in_en_shift = 16, | 2507 | .bp_in_en_shift = 16, |
2518 | .bp_in_mask = 0xffff, | 2508 | .bp_in_mask = 0xffff, |
2519 | .hfb_filter_cnt = 16, | 2509 | .hfb_filter_cnt = 16, |
@@ -2528,8 +2518,8 @@ static struct bcmgenet_hw_params bcmgenet_hw_params[] = { | |||
2528 | }, | 2518 | }, |
2529 | [GENET_V3] = { | 2519 | [GENET_V3] = { |
2530 | .tx_queues = 4, | 2520 | .tx_queues = 4, |
2521 | .tx_bds_per_q = 32, | ||
2531 | .rx_queues = 4, | 2522 | .rx_queues = 4, |
2532 | .bds_cnt = 32, | ||
2533 | .bp_in_en_shift = 17, | 2523 | .bp_in_en_shift = 17, |
2534 | .bp_in_mask = 0x1ffff, | 2524 | .bp_in_mask = 0x1ffff, |
2535 | .hfb_filter_cnt = 48, | 2525 | .hfb_filter_cnt = 48, |
@@ -2544,8 +2534,8 @@ static struct bcmgenet_hw_params bcmgenet_hw_params[] = { | |||
2544 | }, | 2534 | }, |
2545 | [GENET_V4] = { | 2535 | [GENET_V4] = { |
2546 | .tx_queues = 4, | 2536 | .tx_queues = 4, |
2537 | .tx_bds_per_q = 32, | ||
2547 | .rx_queues = 4, | 2538 | .rx_queues = 4, |
2548 | .bds_cnt = 32, | ||
2549 | .bp_in_en_shift = 17, | 2539 | .bp_in_en_shift = 17, |
2550 | .bp_in_mask = 0x1ffff, | 2540 | .bp_in_mask = 0x1ffff, |
2551 | .hfb_filter_cnt = 48, | 2541 | .hfb_filter_cnt = 48, |
@@ -2645,14 +2635,15 @@ static void bcmgenet_set_hw_params(struct bcmgenet_priv *priv) | |||
2645 | #endif | 2635 | #endif |
2646 | 2636 | ||
2647 | pr_debug("Configuration for version: %d\n" | 2637 | pr_debug("Configuration for version: %d\n" |
2648 | "TXq: %1d, RXq: %1d, BDs: %1d\n" | 2638 | "TXq: %1d, TXqBDs: %1d, RXq: %1d\n" |
2649 | "BP << en: %2d, BP msk: 0x%05x\n" | 2639 | "BP << en: %2d, BP msk: 0x%05x\n" |
2650 | "HFB count: %2d, QTAQ msk: 0x%05x\n" | 2640 | "HFB count: %2d, QTAQ msk: 0x%05x\n" |
2651 | "TBUF: 0x%04x, HFB: 0x%04x, HFBreg: 0x%04x\n" | 2641 | "TBUF: 0x%04x, HFB: 0x%04x, HFBreg: 0x%04x\n" |
2652 | "RDMA: 0x%05x, TDMA: 0x%05x\n" | 2642 | "RDMA: 0x%05x, TDMA: 0x%05x\n" |
2653 | "Words/BD: %d\n", | 2643 | "Words/BD: %d\n", |
2654 | priv->version, | 2644 | priv->version, |
2655 | params->tx_queues, params->rx_queues, params->bds_cnt, | 2645 | params->tx_queues, params->tx_bds_per_q, |
2646 | params->rx_queues, | ||
2656 | params->bp_in_en_shift, params->bp_in_mask, | 2647 | params->bp_in_en_shift, params->bp_in_mask, |
2657 | params->hfb_filter_cnt, params->qtag_mask, | 2648 | params->hfb_filter_cnt, params->qtag_mask, |
2658 | params->tbuf_offset, params->hfb_offset, | 2649 | params->tbuf_offset, params->hfb_offset, |
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h index 0d370d168aee..016bd12bf493 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h | |||
@@ -503,8 +503,8 @@ enum bcmgenet_version { | |||
503 | */ | 503 | */ |
504 | struct bcmgenet_hw_params { | 504 | struct bcmgenet_hw_params { |
505 | u8 tx_queues; | 505 | u8 tx_queues; |
506 | u8 tx_bds_per_q; | ||
506 | u8 rx_queues; | 507 | u8 rx_queues; |
507 | u8 bds_cnt; | ||
508 | u8 bp_in_en_shift; | 508 | u8 bp_in_en_shift; |
509 | u32 bp_in_mask; | 509 | u32 bp_in_mask; |
510 | u8 hfb_filter_cnt; | 510 | u8 hfb_filter_cnt; |
diff --git a/drivers/net/ethernet/brocade/Kconfig b/drivers/net/ethernet/brocade/Kconfig index 264155778857..4e8c0b6c57d0 100644 --- a/drivers/net/ethernet/brocade/Kconfig +++ b/drivers/net/ethernet/brocade/Kconfig | |||
@@ -1,9 +1,9 @@ | |||
1 | # | 1 | # |
2 | # Brocade device configuration | 2 | # QLogic BR-series device configuration |
3 | # | 3 | # |
4 | 4 | ||
5 | config NET_VENDOR_BROCADE | 5 | config NET_VENDOR_BROCADE |
6 | bool "Brocade devices" | 6 | bool "QLogic BR-series devices" |
7 | default y | 7 | default y |
8 | depends on PCI | 8 | depends on PCI |
9 | ---help--- | 9 | ---help--- |
@@ -13,8 +13,8 @@ config NET_VENDOR_BROCADE | |||
13 | 13 | ||
14 | Note that the answer to this question doesn't directly affect the | 14 | Note that the answer to this question doesn't directly affect the |
15 | kernel: saying N will just cause the configurator to skip all | 15 | kernel: saying N will just cause the configurator to skip all |
16 | the questions about Brocade cards. If you say Y, you will be asked for | 16 | the questions about QLogic BR-series cards. If you say Y, you will be |
17 | your specific card in the following questions. | 17 | asked for your specific card in the following questions. |
18 | 18 | ||
19 | if NET_VENDOR_BROCADE | 19 | if NET_VENDOR_BROCADE |
20 | 20 | ||
diff --git a/drivers/net/ethernet/brocade/Makefile b/drivers/net/ethernet/brocade/Makefile index b58238d2df6a..fec10f9b4558 100644 --- a/drivers/net/ethernet/brocade/Makefile +++ b/drivers/net/ethernet/brocade/Makefile | |||
@@ -1,5 +1,5 @@ | |||
1 | # | 1 | # |
2 | # Makefile for the Brocade device drivers. | 2 | # Makefile for the QLogic BR-series device drivers. |
3 | # | 3 | # |
4 | 4 | ||
5 | obj-$(CONFIG_BNA) += bna/ | 5 | obj-$(CONFIG_BNA) += bna/ |
diff --git a/drivers/net/ethernet/brocade/bna/Kconfig b/drivers/net/ethernet/brocade/bna/Kconfig index dc2eb526fbf7..fe01279a8843 100644 --- a/drivers/net/ethernet/brocade/bna/Kconfig +++ b/drivers/net/ethernet/brocade/bna/Kconfig | |||
@@ -1,17 +1,17 @@ | |||
1 | # | 1 | # |
2 | # Brocade network device configuration | 2 | # QLogic BR-series network device configuration |
3 | # | 3 | # |
4 | 4 | ||
5 | config BNA | 5 | config BNA |
6 | tristate "Brocade 1010/1020 10Gb Ethernet Driver support" | 6 | tristate "QLogic BR-series 1010/1020/1860 10Gb Ethernet Driver support" |
7 | depends on PCI | 7 | depends on PCI |
8 | ---help--- | 8 | ---help--- |
9 | This driver supports Brocade 1010/1020 10Gb CEE capable Ethernet | 9 | This driver supports QLogic BR-series 1010/1020/1860 10Gb CEE capable |
10 | cards. | 10 | Ethernet cards. |
11 | To compile this driver as a module, choose M here: the module | 11 | To compile this driver as a module, choose M here: the module |
12 | will be called bna. | 12 | will be called bna. |
13 | 13 | ||
14 | For general information and support, go to the Brocade support | 14 | For general information and support, go to the QLogic support |
15 | website at: | 15 | website at: |
16 | 16 | ||
17 | <http://support.brocade.com> | 17 | <http://support.qlogic.com> |
diff --git a/drivers/net/ethernet/brocade/bna/Makefile b/drivers/net/ethernet/brocade/bna/Makefile index 6027302ae73a..6e10b99733a2 100644 --- a/drivers/net/ethernet/brocade/bna/Makefile +++ b/drivers/net/ethernet/brocade/bna/Makefile | |||
@@ -1,5 +1,6 @@ | |||
1 | # | 1 | # |
2 | # Copyright (c) 2005-2010 Brocade Communications Systems, Inc. | 2 | # Copyright (c) 2005-2014 Brocade Communications Systems, Inc. |
3 | # Copyright (c) 2014-2015 QLogic Corporation. | ||
3 | # All rights reserved. | 4 | # All rights reserved. |
4 | # | 5 | # |
5 | 6 | ||
diff --git a/drivers/net/ethernet/brocade/bna/bfa_cee.c b/drivers/net/ethernet/brocade/bna/bfa_cee.c index 550d2521ba76..cf9f3956f198 100644 --- a/drivers/net/ethernet/brocade/bna/bfa_cee.c +++ b/drivers/net/ethernet/brocade/bna/bfa_cee.c | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * Linux network driver for Brocade Converged Network Adapter. | 2 | * Linux network driver for QLogic BR-series Converged Network Adapter. |
3 | * | 3 | * |
4 | * This program is free software; you can redistribute it and/or modify it | 4 | * This program is free software; you can redistribute it and/or modify it |
5 | * under the terms of the GNU General Public License (GPL) Version 2 as | 5 | * under the terms of the GNU General Public License (GPL) Version 2 as |
@@ -11,9 +11,10 @@ | |||
11 | * General Public License for more details. | 11 | * General Public License for more details. |
12 | */ | 12 | */ |
13 | /* | 13 | /* |
14 | * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. | 14 | * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. |
15 | * Copyright (c) 2014-2015 QLogic Corporation | ||
15 | * All rights reserved | 16 | * All rights reserved |
16 | * www.brocade.com | 17 | * www.qlogic.com |
17 | */ | 18 | */ |
18 | 19 | ||
19 | #include "bfa_cee.h" | 20 | #include "bfa_cee.h" |
diff --git a/drivers/net/ethernet/brocade/bna/bfa_cee.h b/drivers/net/ethernet/brocade/bna/bfa_cee.h index 93fde633d6f3..d04eef5d5a77 100644 --- a/drivers/net/ethernet/brocade/bna/bfa_cee.h +++ b/drivers/net/ethernet/brocade/bna/bfa_cee.h | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * Linux network driver for Brocade Converged Network Adapter. | 2 | * Linux network driver for QLogic BR-series Converged Network Adapter. |
3 | * | 3 | * |
4 | * This program is free software; you can redistribute it and/or modify it | 4 | * This program is free software; you can redistribute it and/or modify it |
5 | * under the terms of the GNU General Public License (GPL) Version 2 as | 5 | * under the terms of the GNU General Public License (GPL) Version 2 as |
@@ -11,9 +11,10 @@ | |||
11 | * General Public License for more details. | 11 | * General Public License for more details. |
12 | */ | 12 | */ |
13 | /* | 13 | /* |
14 | * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. | 14 | * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. |
15 | * Copyright (c) 2014-2015 QLogic Corporation | ||
15 | * All rights reserved | 16 | * All rights reserved |
16 | * www.brocade.com | 17 | * www.qlogic.com |
17 | */ | 18 | */ |
18 | 19 | ||
19 | #ifndef __BFA_CEE_H__ | 20 | #ifndef __BFA_CEE_H__ |
diff --git a/drivers/net/ethernet/brocade/bna/bfa_cs.h b/drivers/net/ethernet/brocade/bna/bfa_cs.h index ad004a4c3897..af25d8e8fae0 100644 --- a/drivers/net/ethernet/brocade/bna/bfa_cs.h +++ b/drivers/net/ethernet/brocade/bna/bfa_cs.h | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * Linux network driver for Brocade Converged Network Adapter. | 2 | * Linux network driver for QLogic BR-series Converged Network Adapter. |
3 | * | 3 | * |
4 | * This program is free software; you can redistribute it and/or modify it | 4 | * This program is free software; you can redistribute it and/or modify it |
5 | * under the terms of the GNU General Public License (GPL) Version 2 as | 5 | * under the terms of the GNU General Public License (GPL) Version 2 as |
@@ -11,9 +11,10 @@ | |||
11 | * General Public License for more details. | 11 | * General Public License for more details. |
12 | */ | 12 | */ |
13 | /* | 13 | /* |
14 | * Copyright (c) 2005-2011 Brocade Communications Systems, Inc. | 14 | * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. |
15 | * Copyright (c) 2014-2015 QLogic Corporation | ||
15 | * All rights reserved | 16 | * All rights reserved |
16 | * www.brocade.com | 17 | * www.qlogic.com |
17 | */ | 18 | */ |
18 | 19 | ||
19 | /* BFA common services */ | 20 | /* BFA common services */ |
diff --git a/drivers/net/ethernet/brocade/bna/bfa_defs.h b/drivers/net/ethernet/brocade/bna/bfa_defs.h index b7d8127c198f..3bfd9da92630 100644 --- a/drivers/net/ethernet/brocade/bna/bfa_defs.h +++ b/drivers/net/ethernet/brocade/bna/bfa_defs.h | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * Linux network driver for Brocade Converged Network Adapter. | 2 | * Linux network driver for QLogic BR-series Converged Network Adapter. |
3 | * | 3 | * |
4 | * This program is free software; you can redistribute it and/or modify it | 4 | * This program is free software; you can redistribute it and/or modify it |
5 | * under the terms of the GNU General Public License (GPL) Version 2 as | 5 | * under the terms of the GNU General Public License (GPL) Version 2 as |
@@ -11,9 +11,10 @@ | |||
11 | * General Public License for more details. | 11 | * General Public License for more details. |
12 | */ | 12 | */ |
13 | /* | 13 | /* |
14 | * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. | 14 | * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. |
15 | * Copyright (c) 2014-2015 QLogic Corporation | ||
15 | * All rights reserved | 16 | * All rights reserved |
16 | * www.brocade.com | 17 | * www.qlogic.com |
17 | */ | 18 | */ |
18 | 19 | ||
19 | #ifndef __BFA_DEFS_H__ | 20 | #ifndef __BFA_DEFS_H__ |
diff --git a/drivers/net/ethernet/brocade/bna/bfa_defs_cna.h b/drivers/net/ethernet/brocade/bna/bfa_defs_cna.h index b39c5f23974b..63e300f5ba41 100644 --- a/drivers/net/ethernet/brocade/bna/bfa_defs_cna.h +++ b/drivers/net/ethernet/brocade/bna/bfa_defs_cna.h | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * Linux network driver for Brocade Converged Network Adapter. | 2 | * Linux network driver for QLogic BR-series Converged Network Adapter. |
3 | * | 3 | * |
4 | * This program is free software; you can redistribute it and/or modify it | 4 | * This program is free software; you can redistribute it and/or modify it |
5 | * under the terms of the GNU General Public License (GPL) Version 2 as | 5 | * under the terms of the GNU General Public License (GPL) Version 2 as |
@@ -11,9 +11,10 @@ | |||
11 | * General Public License for more details. | 11 | * General Public License for more details. |
12 | */ | 12 | */ |
13 | /* | 13 | /* |
14 | * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. | 14 | * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. |
15 | * Copyright (c) 2014-2015 QLogic Corporation | ||
15 | * All rights reserved | 16 | * All rights reserved |
16 | * www.brocade.com | 17 | * www.qlogic.com |
17 | */ | 18 | */ |
18 | #ifndef __BFA_DEFS_CNA_H__ | 19 | #ifndef __BFA_DEFS_CNA_H__ |
19 | #define __BFA_DEFS_CNA_H__ | 20 | #define __BFA_DEFS_CNA_H__ |
diff --git a/drivers/net/ethernet/brocade/bna/bfa_defs_mfg_comm.h b/drivers/net/ethernet/brocade/bna/bfa_defs_mfg_comm.h index 7fb396fe679d..7a45cd0b594d 100644 --- a/drivers/net/ethernet/brocade/bna/bfa_defs_mfg_comm.h +++ b/drivers/net/ethernet/brocade/bna/bfa_defs_mfg_comm.h | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * Linux network driver for Brocade Converged Network Adapter. | 2 | * Linux network driver for QLogic BR-series Converged Network Adapter. |
3 | * | 3 | * |
4 | * This program is free software; you can redistribute it and/or modify it | 4 | * This program is free software; you can redistribute it and/or modify it |
5 | * under the terms of the GNU General Public License (GPL) Version 2 as | 5 | * under the terms of the GNU General Public License (GPL) Version 2 as |
@@ -11,9 +11,10 @@ | |||
11 | * General Public License for more details. | 11 | * General Public License for more details. |
12 | */ | 12 | */ |
13 | /* | 13 | /* |
14 | * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. | 14 | * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. |
15 | * Copyright (c) 2014-2015 QLogic Corporation | ||
15 | * All rights reserved | 16 | * All rights reserved |
16 | * www.brocade.com | 17 | * www.qlogic.com |
17 | */ | 18 | */ |
18 | #ifndef __BFA_DEFS_MFG_COMM_H__ | 19 | #ifndef __BFA_DEFS_MFG_COMM_H__ |
19 | #define __BFA_DEFS_MFG_COMM_H__ | 20 | #define __BFA_DEFS_MFG_COMM_H__ |
diff --git a/drivers/net/ethernet/brocade/bna/bfa_defs_status.h b/drivers/net/ethernet/brocade/bna/bfa_defs_status.h index ea9af9ae754d..a43b56002752 100644 --- a/drivers/net/ethernet/brocade/bna/bfa_defs_status.h +++ b/drivers/net/ethernet/brocade/bna/bfa_defs_status.h | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * Linux network driver for Brocade Converged Network Adapter. | 2 | * Linux network driver for QLogic BR-series Converged Network Adapter. |
3 | * | 3 | * |
4 | * This program is free software; you can redistribute it and/or modify it | 4 | * This program is free software; you can redistribute it and/or modify it |
5 | * under the terms of the GNU General Public License (GPL) Version 2 as | 5 | * under the terms of the GNU General Public License (GPL) Version 2 as |
@@ -11,9 +11,10 @@ | |||
11 | * General Public License for more details. | 11 | * General Public License for more details. |
12 | */ | 12 | */ |
13 | /* | 13 | /* |
14 | * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. | 14 | * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. |
15 | * Copyright (c) 2014-2015 QLogic Corporation | ||
15 | * All rights reserved | 16 | * All rights reserved |
16 | * www.brocade.com | 17 | * www.qlogic.com |
17 | */ | 18 | */ |
18 | #ifndef __BFA_DEFS_STATUS_H__ | 19 | #ifndef __BFA_DEFS_STATUS_H__ |
19 | #define __BFA_DEFS_STATUS_H__ | 20 | #define __BFA_DEFS_STATUS_H__ |
diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc.c b/drivers/net/ethernet/brocade/bna/bfa_ioc.c index 354ae9792bad..f2d13238b02e 100644 --- a/drivers/net/ethernet/brocade/bna/bfa_ioc.c +++ b/drivers/net/ethernet/brocade/bna/bfa_ioc.c | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * Linux network driver for Brocade Converged Network Adapter. | 2 | * Linux network driver for QLogic BR-series Converged Network Adapter. |
3 | * | 3 | * |
4 | * This program is free software; you can redistribute it and/or modify it | 4 | * This program is free software; you can redistribute it and/or modify it |
5 | * under the terms of the GNU General Public License (GPL) Version 2 as | 5 | * under the terms of the GNU General Public License (GPL) Version 2 as |
@@ -11,9 +11,10 @@ | |||
11 | * General Public License for more details. | 11 | * General Public License for more details. |
12 | */ | 12 | */ |
13 | /* | 13 | /* |
14 | * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. | 14 | * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. |
15 | * Copyright (c) 2014-2015 QLogic Corporation | ||
15 | * All rights reserved | 16 | * All rights reserved |
16 | * www.brocade.com | 17 | * www.qlogic.com |
17 | */ | 18 | */ |
18 | 19 | ||
19 | #include "bfa_ioc.h" | 20 | #include "bfa_ioc.h" |
@@ -2763,7 +2764,7 @@ bfa_nw_ioc_notify_register(struct bfa_ioc *ioc, | |||
2763 | list_add_tail(¬ify->qe, &ioc->notify_q); | 2764 | list_add_tail(¬ify->qe, &ioc->notify_q); |
2764 | } | 2765 | } |
2765 | 2766 | ||
2766 | #define BFA_MFG_NAME "Brocade" | 2767 | #define BFA_MFG_NAME "QLogic" |
2767 | static void | 2768 | static void |
2768 | bfa_ioc_get_adapter_attr(struct bfa_ioc *ioc, | 2769 | bfa_ioc_get_adapter_attr(struct bfa_ioc *ioc, |
2769 | struct bfa_adapter_attr *ad_attr) | 2770 | struct bfa_adapter_attr *ad_attr) |
diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc.h b/drivers/net/ethernet/brocade/bna/bfa_ioc.h index 20cff7df4b55..effb7156e7a4 100644 --- a/drivers/net/ethernet/brocade/bna/bfa_ioc.h +++ b/drivers/net/ethernet/brocade/bna/bfa_ioc.h | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * Linux network driver for Brocade Converged Network Adapter. | 2 | * Linux network driver for QLogic BR-series Converged Network Adapter. |
3 | * | 3 | * |
4 | * This program is free software; you can redistribute it and/or modify it | 4 | * This program is free software; you can redistribute it and/or modify it |
5 | * under the terms of the GNU General Public License (GPL) Version 2 as | 5 | * under the terms of the GNU General Public License (GPL) Version 2 as |
@@ -11,9 +11,10 @@ | |||
11 | * General Public License for more details. | 11 | * General Public License for more details. |
12 | */ | 12 | */ |
13 | /* | 13 | /* |
14 | * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. | 14 | * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. |
15 | * Copyright (c) 2014-2015 QLogic Corporation | ||
15 | * All rights reserved | 16 | * All rights reserved |
16 | * www.brocade.com | 17 | * www.qlogic.com |
17 | */ | 18 | */ |
18 | 19 | ||
19 | #ifndef __BFA_IOC_H__ | 20 | #ifndef __BFA_IOC_H__ |
diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c b/drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c index d639558455cb..66c8507d7717 100644 --- a/drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c +++ b/drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * Linux network driver for Brocade Converged Network Adapter. | 2 | * Linux network driver for QLogic BR-series Converged Network Adapter. |
3 | * | 3 | * |
4 | * This program is free software; you can redistribute it and/or modify it | 4 | * This program is free software; you can redistribute it and/or modify it |
5 | * under the terms of the GNU General Public License (GPL) Version 2 as | 5 | * under the terms of the GNU General Public License (GPL) Version 2 as |
@@ -11,9 +11,10 @@ | |||
11 | * General Public License for more details. | 11 | * General Public License for more details. |
12 | */ | 12 | */ |
13 | /* | 13 | /* |
14 | * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. | 14 | * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. |
15 | * Copyright (c) 2014-2015 QLogic Corporation | ||
15 | * All rights reserved | 16 | * All rights reserved |
16 | * www.brocade.com | 17 | * www.qlogic.com |
17 | */ | 18 | */ |
18 | 19 | ||
19 | #include "bfa_ioc.h" | 20 | #include "bfa_ioc.h" |
diff --git a/drivers/net/ethernet/brocade/bna/bfa_msgq.c b/drivers/net/ethernet/brocade/bna/bfa_msgq.c index 55067d0d25cf..c07d5b9372f4 100644 --- a/drivers/net/ethernet/brocade/bna/bfa_msgq.c +++ b/drivers/net/ethernet/brocade/bna/bfa_msgq.c | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * Linux network driver for Brocade Converged Network Adapter. | 2 | * Linux network driver for QLogic BR-series Converged Network Adapter. |
3 | * | 3 | * |
4 | * This program is free software; you can redistribute it and/or modify it | 4 | * This program is free software; you can redistribute it and/or modify it |
5 | * under the terms of the GNU General Public License (GPL) Version 2 as | 5 | * under the terms of the GNU General Public License (GPL) Version 2 as |
@@ -11,9 +11,10 @@ | |||
11 | * General Public License for more details. | 11 | * General Public License for more details. |
12 | */ | 12 | */ |
13 | /* | 13 | /* |
14 | * Copyright (c) 2005-2011 Brocade Communications Systems, Inc. | 14 | * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. |
15 | * Copyright (c) 2014-2015 QLogic Corporation | ||
15 | * All rights reserved | 16 | * All rights reserved |
16 | * www.brocade.com | 17 | * www.qlogic.com |
17 | */ | 18 | */ |
18 | 19 | ||
19 | /* MSGQ module source file. */ | 20 | /* MSGQ module source file. */ |
diff --git a/drivers/net/ethernet/brocade/bna/bfa_msgq.h b/drivers/net/ethernet/brocade/bna/bfa_msgq.h index a6a565a366dc..66bc8b5acd57 100644 --- a/drivers/net/ethernet/brocade/bna/bfa_msgq.h +++ b/drivers/net/ethernet/brocade/bna/bfa_msgq.h | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * Linux network driver for Brocade Converged Network Adapter. | 2 | * Linux network driver for QLogic BR-series Converged Network Adapter. |
3 | * | 3 | * |
4 | * This program is free software; you can redistribute it and/or modify it | 4 | * This program is free software; you can redistribute it and/or modify it |
5 | * under the terms of the GNU General Public License (GPL) Version 2 as | 5 | * under the terms of the GNU General Public License (GPL) Version 2 as |
@@ -11,9 +11,10 @@ | |||
11 | * General Public License for more details. | 11 | * General Public License for more details. |
12 | */ | 12 | */ |
13 | /* | 13 | /* |
14 | * Copyright (c) 2005-2011 Brocade Communications Systems, Inc. | 14 | * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. |
15 | * Copyright (c) 2014-2015 QLogic Corporation | ||
15 | * All rights reserved | 16 | * All rights reserved |
16 | * www.brocade.com | 17 | * www.qlogic.com |
17 | */ | 18 | */ |
18 | 19 | ||
19 | #ifndef __BFA_MSGQ_H__ | 20 | #ifndef __BFA_MSGQ_H__ |
diff --git a/drivers/net/ethernet/brocade/bna/bfi.h b/drivers/net/ethernet/brocade/bna/bfi.h index 8c563a77cdf6..f1e1129e6241 100644 --- a/drivers/net/ethernet/brocade/bna/bfi.h +++ b/drivers/net/ethernet/brocade/bna/bfi.h | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * Linux network driver for Brocade Converged Network Adapter. | 2 | * Linux network driver for QLogic BR-series Converged Network Adapter. |
3 | * | 3 | * |
4 | * This program is free software; you can redistribute it and/or modify it | 4 | * This program is free software; you can redistribute it and/or modify it |
5 | * under the terms of the GNU General Public License (GPL) Version 2 as | 5 | * under the terms of the GNU General Public License (GPL) Version 2 as |
@@ -11,9 +11,10 @@ | |||
11 | * General Public License for more details. | 11 | * General Public License for more details. |
12 | */ | 12 | */ |
13 | /* | 13 | /* |
14 | * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. | 14 | * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. |
15 | * Copyright (c) 2014-2015 QLogic Corporation | ||
15 | * All rights reserved | 16 | * All rights reserved |
16 | * www.brocade.com | 17 | * www.qlogic.com |
17 | */ | 18 | */ |
18 | #ifndef __BFI_H__ | 19 | #ifndef __BFI_H__ |
19 | #define __BFI_H__ | 20 | #define __BFI_H__ |
diff --git a/drivers/net/ethernet/brocade/bna/bfi_cna.h b/drivers/net/ethernet/brocade/bna/bfi_cna.h index 6704a4392973..bd605bee72ee 100644 --- a/drivers/net/ethernet/brocade/bna/bfi_cna.h +++ b/drivers/net/ethernet/brocade/bna/bfi_cna.h | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * Linux network driver for Brocade Converged Network Adapter. | 2 | * Linux network driver for QLogic BR-series Converged Network Adapter. |
3 | * | 3 | * |
4 | * This program is free software; you can redistribute it and/or modify it | 4 | * This program is free software; you can redistribute it and/or modify it |
5 | * under the terms of the GNU General Public License (GPL) Version 2 as | 5 | * under the terms of the GNU General Public License (GPL) Version 2 as |
@@ -11,9 +11,10 @@ | |||
11 | * General Public License for more details. | 11 | * General Public License for more details. |
12 | */ | 12 | */ |
13 | /* | 13 | /* |
14 | * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. | 14 | * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. |
15 | * Copyright (c) 2014-2015 QLogic Corporation | ||
15 | * All rights reserved | 16 | * All rights reserved |
16 | * www.brocade.com | 17 | * www.qlogic.com |
17 | */ | 18 | */ |
18 | #ifndef __BFI_CNA_H__ | 19 | #ifndef __BFI_CNA_H__ |
19 | #define __BFI_CNA_H__ | 20 | #define __BFI_CNA_H__ |
diff --git a/drivers/net/ethernet/brocade/bna/bfi_enet.h b/drivers/net/ethernet/brocade/bna/bfi_enet.h index ae072dc5d238..bccca3bbadb8 100644 --- a/drivers/net/ethernet/brocade/bna/bfi_enet.h +++ b/drivers/net/ethernet/brocade/bna/bfi_enet.h | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * Linux network driver for Brocade Converged Network Adapter. | 2 | * Linux network driver for QLogic BR-series Converged Network Adapter. |
3 | * | 3 | * |
4 | * This program is free software; you can redistribute it and/or modify it | 4 | * This program is free software; you can redistribute it and/or modify it |
5 | * under the terms of the GNU General Public License (GPL) Version 2 as | 5 | * under the terms of the GNU General Public License (GPL) Version 2 as |
@@ -11,9 +11,10 @@ | |||
11 | * General Public License for more details. | 11 | * General Public License for more details. |
12 | */ | 12 | */ |
13 | /* | 13 | /* |
14 | * Copyright (c) 2005-2011 Brocade Communications Systems, Inc. | 14 | * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. |
15 | * Copyright (c) 2014-2015 QLogic Corporation | ||
15 | * All rights reserved | 16 | * All rights reserved |
16 | * www.brocade.com | 17 | * www.qlogic.com |
17 | */ | 18 | */ |
18 | 19 | ||
19 | /* BNA Hardware and Firmware Interface */ | 20 | /* BNA Hardware and Firmware Interface */ |
diff --git a/drivers/net/ethernet/brocade/bna/bfi_reg.h b/drivers/net/ethernet/brocade/bna/bfi_reg.h index c49fa312ddbd..2835b51eabec 100644 --- a/drivers/net/ethernet/brocade/bna/bfi_reg.h +++ b/drivers/net/ethernet/brocade/bna/bfi_reg.h | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * Linux network driver for Brocade Converged Network Adapter. | 2 | * Linux network driver for QLogic BR-series Converged Network Adapter. |
3 | * | 3 | * |
4 | * This program is free software; you can redistribute it and/or modify it | 4 | * This program is free software; you can redistribute it and/or modify it |
5 | * under the terms of the GNU General Public License (GPL) Version 2 as | 5 | * under the terms of the GNU General Public License (GPL) Version 2 as |
@@ -11,13 +11,14 @@ | |||
11 | * General Public License for more details. | 11 | * General Public License for more details. |
12 | */ | 12 | */ |
13 | /* | 13 | /* |
14 | * Copyright (c) 2005-2011 Brocade Communications Systems, Inc. | 14 | * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. |
15 | * Copyright (c) 2014-2015 QLogic Corporation | ||
15 | * All rights reserved | 16 | * All rights reserved |
16 | * www.brocade.com | 17 | * www.qlogic.com |
17 | */ | 18 | */ |
18 | 19 | ||
19 | /* | 20 | /* |
20 | * bfi_reg.h ASIC register defines for all Brocade adapter ASICs | 21 | * bfi_reg.h ASIC register defines for all QLogic BR-series adapter ASICs |
21 | */ | 22 | */ |
22 | 23 | ||
23 | #ifndef __BFI_REG_H__ | 24 | #ifndef __BFI_REG_H__ |
@@ -221,7 +222,7 @@ enum { | |||
221 | #define __PMM_1T_RESET_P 0x00000001 | 222 | #define __PMM_1T_RESET_P 0x00000001 |
222 | #define PMM_1T_RESET_REG_P1 0x00023c1c | 223 | #define PMM_1T_RESET_REG_P1 0x00023c1c |
223 | 224 | ||
224 | /* Brocade 1860 Adapter specific defines */ | 225 | /* QLogic BR-series 1860 Adapter specific defines */ |
225 | #define CT2_PCI_CPQ_BASE 0x00030000 | 226 | #define CT2_PCI_CPQ_BASE 0x00030000 |
226 | #define CT2_PCI_APP_BASE 0x00030100 | 227 | #define CT2_PCI_APP_BASE 0x00030100 |
227 | #define CT2_PCI_ETH_BASE 0x00030400 | 228 | #define CT2_PCI_ETH_BASE 0x00030400 |
@@ -264,7 +265,7 @@ enum { | |||
264 | #define CT2_HOSTFN_MSIX_VT_INDEX_MBOX_ERR (CT2_PCI_APP_BASE + 0x38) | 265 | #define CT2_HOSTFN_MSIX_VT_INDEX_MBOX_ERR (CT2_PCI_APP_BASE + 0x38) |
265 | 266 | ||
266 | /* | 267 | /* |
267 | * Brocade 1860 adapter CPQ block registers | 268 | * QLogic BR-series 1860 adapter CPQ block registers |
268 | */ | 269 | */ |
269 | #define CT2_HOSTFN_LPU0_MBOX0 (CT2_PCI_CPQ_BASE + 0x00) | 270 | #define CT2_HOSTFN_LPU0_MBOX0 (CT2_PCI_CPQ_BASE + 0x00) |
270 | #define CT2_HOSTFN_LPU1_MBOX0 (CT2_PCI_CPQ_BASE + 0x20) | 271 | #define CT2_HOSTFN_LPU1_MBOX0 (CT2_PCI_CPQ_BASE + 0x20) |
diff --git a/drivers/net/ethernet/brocade/bna/bna.h b/drivers/net/ethernet/brocade/bna/bna.h index 1f512190d696..8ba72b1f36d9 100644 --- a/drivers/net/ethernet/brocade/bna/bna.h +++ b/drivers/net/ethernet/brocade/bna/bna.h | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * Linux network driver for Brocade Converged Network Adapter. | 2 | * Linux network driver for QLogic BR-series Converged Network Adapter. |
3 | * | 3 | * |
4 | * This program is free software; you can redistribute it and/or modify it | 4 | * This program is free software; you can redistribute it and/or modify it |
5 | * under the terms of the GNU General Public License (GPL) Version 2 as | 5 | * under the terms of the GNU General Public License (GPL) Version 2 as |
@@ -11,9 +11,10 @@ | |||
11 | * General Public License for more details. | 11 | * General Public License for more details. |
12 | */ | 12 | */ |
13 | /* | 13 | /* |
14 | * Copyright (c) 2005-2011 Brocade Communications Systems, Inc. | 14 | * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. |
15 | * Copyright (c) 2014-2015 QLogic Corporation | ||
15 | * All rights reserved | 16 | * All rights reserved |
16 | * www.brocade.com | 17 | * www.qlogic.com |
17 | */ | 18 | */ |
18 | #ifndef __BNA_H__ | 19 | #ifndef __BNA_H__ |
19 | #define __BNA_H__ | 20 | #define __BNA_H__ |
diff --git a/drivers/net/ethernet/brocade/bna/bna_enet.c b/drivers/net/ethernet/brocade/bna/bna_enet.c index 903466ef41c0..deb8da6ab9cc 100644 --- a/drivers/net/ethernet/brocade/bna/bna_enet.c +++ b/drivers/net/ethernet/brocade/bna/bna_enet.c | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * Linux network driver for Brocade Converged Network Adapter. | 2 | * Linux network driver for QLogic BR-series Converged Network Adapter. |
3 | * | 3 | * |
4 | * This program is free software; you can redistribute it and/or modify it | 4 | * This program is free software; you can redistribute it and/or modify it |
5 | * under the terms of the GNU General Public License (GPL) Version 2 as | 5 | * under the terms of the GNU General Public License (GPL) Version 2 as |
@@ -11,9 +11,10 @@ | |||
11 | * General Public License for more details. | 11 | * General Public License for more details. |
12 | */ | 12 | */ |
13 | /* | 13 | /* |
14 | * Copyright (c) 2005-2011 Brocade Communications Systems, Inc. | 14 | * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. |
15 | * Copyright (c) 2014-2015 QLogic Corporation | ||
15 | * All rights reserved | 16 | * All rights reserved |
16 | * www.brocade.com | 17 | * www.qlogic.com |
17 | */ | 18 | */ |
18 | #include "bna.h" | 19 | #include "bna.h" |
19 | 20 | ||
diff --git a/drivers/net/ethernet/brocade/bna/bna_hw_defs.h b/drivers/net/ethernet/brocade/bna/bna_hw_defs.h index 2702d02e98d9..c5feab130d6d 100644 --- a/drivers/net/ethernet/brocade/bna/bna_hw_defs.h +++ b/drivers/net/ethernet/brocade/bna/bna_hw_defs.h | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * Linux network driver for Brocade Converged Network Adapter. | 2 | * Linux network driver for QLogic BR-series Converged Network Adapter. |
3 | * | 3 | * |
4 | * This program is free software; you can redistribute it and/or modify it | 4 | * This program is free software; you can redistribute it and/or modify it |
5 | * under the terms of the GNU General Public License (GPL) Version 2 as | 5 | * under the terms of the GNU General Public License (GPL) Version 2 as |
@@ -11,9 +11,10 @@ | |||
11 | * General Public License for more details. | 11 | * General Public License for more details. |
12 | */ | 12 | */ |
13 | /* | 13 | /* |
14 | * Copyright (c) 2005-2011 Brocade Communications Systems, Inc. | 14 | * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. |
15 | * Copyright (c) 2014-2015 QLogic Corporation | ||
15 | * All rights reserved | 16 | * All rights reserved |
16 | * www.brocade.com | 17 | * www.qlogic.com |
17 | */ | 18 | */ |
18 | 19 | ||
19 | /* File for interrupt macros and functions */ | 20 | /* File for interrupt macros and functions */ |
diff --git a/drivers/net/ethernet/brocade/bna/bna_tx_rx.c b/drivers/net/ethernet/brocade/bna/bna_tx_rx.c index 5fac411c52f4..8ab3a5f62706 100644 --- a/drivers/net/ethernet/brocade/bna/bna_tx_rx.c +++ b/drivers/net/ethernet/brocade/bna/bna_tx_rx.c | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * Linux network driver for Brocade Converged Network Adapter. | 2 | * Linux network driver for QLogic BR-series Converged Network Adapter. |
3 | * | 3 | * |
4 | * This program is free software; you can redistribute it and/or modify it | 4 | * This program is free software; you can redistribute it and/or modify it |
5 | * under the terms of the GNU General Public License (GPL) Version 2 as | 5 | * under the terms of the GNU General Public License (GPL) Version 2 as |
@@ -11,9 +11,10 @@ | |||
11 | * General Public License for more details. | 11 | * General Public License for more details. |
12 | */ | 12 | */ |
13 | /* | 13 | /* |
14 | * Copyright (c) 2005-2011 Brocade Communications Systems, Inc. | 14 | * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. |
15 | * Copyright (c) 2014-2015 QLogic Corporation | ||
15 | * All rights reserved | 16 | * All rights reserved |
16 | * www.brocade.com | 17 | * www.qlogic.com |
17 | */ | 18 | */ |
18 | #include "bna.h" | 19 | #include "bna.h" |
19 | #include "bfi.h" | 20 | #include "bfi.h" |
diff --git a/drivers/net/ethernet/brocade/bna/bna_types.h b/drivers/net/ethernet/brocade/bna/bna_types.h index 621547cd3504..d0a7a566f5d6 100644 --- a/drivers/net/ethernet/brocade/bna/bna_types.h +++ b/drivers/net/ethernet/brocade/bna/bna_types.h | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * Linux network driver for Brocade Converged Network Adapter. | 2 | * Linux network driver for QLogic BR-series Converged Network Adapter. |
3 | * | 3 | * |
4 | * This program is free software; you can redistribute it and/or modify it | 4 | * This program is free software; you can redistribute it and/or modify it |
5 | * under the terms of the GNU General Public License (GPL) Version 2 as | 5 | * under the terms of the GNU General Public License (GPL) Version 2 as |
@@ -11,9 +11,10 @@ | |||
11 | * General Public License for more details. | 11 | * General Public License for more details. |
12 | */ | 12 | */ |
13 | /* | 13 | /* |
14 | * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. | 14 | * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. |
15 | * Copyright (c) 2014-2015 QLogic Corporation | ||
15 | * All rights reserved | 16 | * All rights reserved |
16 | * www.brocade.com | 17 | * www.qlogic.com |
17 | */ | 18 | */ |
18 | #ifndef __BNA_TYPES_H__ | 19 | #ifndef __BNA_TYPES_H__ |
19 | #define __BNA_TYPES_H__ | 20 | #define __BNA_TYPES_H__ |
diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c index 7714d7790089..37072a83f9d6 100644 --- a/drivers/net/ethernet/brocade/bna/bnad.c +++ b/drivers/net/ethernet/brocade/bna/bnad.c | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * Linux network driver for Brocade Converged Network Adapter. | 2 | * Linux network driver for QLogic BR-series Converged Network Adapter. |
3 | * | 3 | * |
4 | * This program is free software; you can redistribute it and/or modify it | 4 | * This program is free software; you can redistribute it and/or modify it |
5 | * under the terms of the GNU General Public License (GPL) Version 2 as | 5 | * under the terms of the GNU General Public License (GPL) Version 2 as |
@@ -11,9 +11,10 @@ | |||
11 | * General Public License for more details. | 11 | * General Public License for more details. |
12 | */ | 12 | */ |
13 | /* | 13 | /* |
14 | * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. | 14 | * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. |
15 | * Copyright (c) 2014-2015 QLogic Corporation | ||
15 | * All rights reserved | 16 | * All rights reserved |
16 | * www.brocade.com | 17 | * www.qlogic.com |
17 | */ | 18 | */ |
18 | #include <linux/bitops.h> | 19 | #include <linux/bitops.h> |
19 | #include <linux/netdevice.h> | 20 | #include <linux/netdevice.h> |
@@ -3867,7 +3868,7 @@ bnad_module_init(void) | |||
3867 | { | 3868 | { |
3868 | int err; | 3869 | int err; |
3869 | 3870 | ||
3870 | pr_info("Brocade 10G Ethernet driver - version: %s\n", | 3871 | pr_info("QLogic BR-series 10G Ethernet driver - version: %s\n", |
3871 | BNAD_VERSION); | 3872 | BNAD_VERSION); |
3872 | 3873 | ||
3873 | bfa_nw_ioc_auto_recover(bnad_ioc_auto_recover); | 3874 | bfa_nw_ioc_auto_recover(bnad_ioc_auto_recover); |
@@ -3894,7 +3895,7 @@ module_exit(bnad_module_exit); | |||
3894 | 3895 | ||
3895 | MODULE_AUTHOR("Brocade"); | 3896 | MODULE_AUTHOR("Brocade"); |
3896 | MODULE_LICENSE("GPL"); | 3897 | MODULE_LICENSE("GPL"); |
3897 | MODULE_DESCRIPTION("Brocade 10G PCIe Ethernet driver"); | 3898 | MODULE_DESCRIPTION("QLogic BR-series 10G PCIe Ethernet driver"); |
3898 | MODULE_VERSION(BNAD_VERSION); | 3899 | MODULE_VERSION(BNAD_VERSION); |
3899 | MODULE_FIRMWARE(CNA_FW_FILE_CT); | 3900 | MODULE_FIRMWARE(CNA_FW_FILE_CT); |
3900 | MODULE_FIRMWARE(CNA_FW_FILE_CT2); | 3901 | MODULE_FIRMWARE(CNA_FW_FILE_CT2); |
diff --git a/drivers/net/ethernet/brocade/bna/bnad.h b/drivers/net/ethernet/brocade/bna/bnad.h index 2842c188e0da..7ead6c23edb6 100644 --- a/drivers/net/ethernet/brocade/bna/bnad.h +++ b/drivers/net/ethernet/brocade/bna/bnad.h | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * Linux network driver for Brocade Converged Network Adapter. | 2 | * Linux network driver for QLogic BR-series Converged Network Adapter. |
3 | * | 3 | * |
4 | * This program is free software; you can redistribute it and/or modify it | 4 | * This program is free software; you can redistribute it and/or modify it |
5 | * under the terms of the GNU General Public License (GPL) Version 2 as | 5 | * under the terms of the GNU General Public License (GPL) Version 2 as |
@@ -11,9 +11,10 @@ | |||
11 | * General Public License for more details. | 11 | * General Public License for more details. |
12 | */ | 12 | */ |
13 | /* | 13 | /* |
14 | * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. | 14 | * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. |
15 | * Copyright (c) 2014-2015 QLogic Corporation | ||
15 | * All rights reserved | 16 | * All rights reserved |
16 | * www.brocade.com | 17 | * www.qlogic.com |
17 | */ | 18 | */ |
18 | #ifndef __BNAD_H__ | 19 | #ifndef __BNAD_H__ |
19 | #define __BNAD_H__ | 20 | #define __BNAD_H__ |
@@ -71,7 +72,7 @@ struct bnad_rx_ctrl { | |||
71 | #define BNAD_NAME "bna" | 72 | #define BNAD_NAME "bna" |
72 | #define BNAD_NAME_LEN 64 | 73 | #define BNAD_NAME_LEN 64 |
73 | 74 | ||
74 | #define BNAD_VERSION "3.2.23.0" | 75 | #define BNAD_VERSION "3.2.25.1" |
75 | 76 | ||
76 | #define BNAD_MAILBOX_MSIX_INDEX 0 | 77 | #define BNAD_MAILBOX_MSIX_INDEX 0 |
77 | #define BNAD_MAILBOX_MSIX_VECTORS 1 | 78 | #define BNAD_MAILBOX_MSIX_VECTORS 1 |
diff --git a/drivers/net/ethernet/brocade/bna/bnad_debugfs.c b/drivers/net/ethernet/brocade/bna/bnad_debugfs.c index 619083a860a4..72c89550417c 100644 --- a/drivers/net/ethernet/brocade/bna/bnad_debugfs.c +++ b/drivers/net/ethernet/brocade/bna/bnad_debugfs.c | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * Linux network driver for Brocade Converged Network Adapter. | 2 | * Linux network driver for QLogic BR-series Converged Network Adapter. |
3 | * | 3 | * |
4 | * This program is free software; you can redistribute it and/or modify it | 4 | * This program is free software; you can redistribute it and/or modify it |
5 | * under the terms of the GNU General Public License (GPL) Version 2 as | 5 | * under the terms of the GNU General Public License (GPL) Version 2 as |
@@ -11,9 +11,10 @@ | |||
11 | * General Public License for more details. | 11 | * General Public License for more details. |
12 | */ | 12 | */ |
13 | /* | 13 | /* |
14 | * Copyright (c) 2005-2011 Brocade Communications Systems, Inc. | 14 | * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. |
15 | * Copyright (c) 2014-2015 QLogic Corporation | ||
15 | * All rights reserved | 16 | * All rights reserved |
16 | * www.brocade.com | 17 | * www.qlogic.com |
17 | */ | 18 | */ |
18 | 19 | ||
19 | #include <linux/debugfs.h> | 20 | #include <linux/debugfs.h> |
diff --git a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c index d26adac6ab99..12f344debd1c 100644 --- a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c +++ b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * Linux network driver for Brocade Converged Network Adapter. | 2 | * Linux network driver for QLogic BR-series Converged Network Adapter. |
3 | * | 3 | * |
4 | * This program is free software; you can redistribute it and/or modify it | 4 | * This program is free software; you can redistribute it and/or modify it |
5 | * under the terms of the GNU General Public License (GPL) Version 2 as | 5 | * under the terms of the GNU General Public License (GPL) Version 2 as |
@@ -11,9 +11,10 @@ | |||
11 | * General Public License for more details. | 11 | * General Public License for more details. |
12 | */ | 12 | */ |
13 | /* | 13 | /* |
14 | * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. | 14 | * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. |
15 | * Copyright (c) 2014-2015 QLogic Corporation | ||
15 | * All rights reserved | 16 | * All rights reserved |
16 | * www.brocade.com | 17 | * www.qlogic.com |
17 | */ | 18 | */ |
18 | 19 | ||
19 | #include "cna.h" | 20 | #include "cna.h" |
diff --git a/drivers/net/ethernet/brocade/bna/cna.h b/drivers/net/ethernet/brocade/bna/cna.h index b3ff6d507951..28e7d0ffeab1 100644 --- a/drivers/net/ethernet/brocade/bna/cna.h +++ b/drivers/net/ethernet/brocade/bna/cna.h | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * Linux network driver for Brocade Converged Network Adapter. | 2 | * Linux network driver for QLogic BR-series Converged Network Adapter. |
3 | * | 3 | * |
4 | * This program is free software; you can redistribute it and/or modify it | 4 | * This program is free software; you can redistribute it and/or modify it |
5 | * under the terms of the GNU General Public License (GPL) Version 2 as | 5 | * under the terms of the GNU General Public License (GPL) Version 2 as |
@@ -11,9 +11,10 @@ | |||
11 | * General Public License for more details. | 11 | * General Public License for more details. |
12 | */ | 12 | */ |
13 | /* | 13 | /* |
14 | * Copyright (c) 2006-2010 Brocade Communications Systems, Inc. | 14 | * Copyright (c) 2006-2014 Brocade Communications Systems, Inc. |
15 | * Copyright (c) 2014-2015 QLogic Corporation | ||
15 | * All rights reserved | 16 | * All rights reserved |
16 | * www.brocade.com | 17 | * www.qlogic.com |
17 | */ | 18 | */ |
18 | 19 | ||
19 | #ifndef __CNA_H__ | 20 | #ifndef __CNA_H__ |
@@ -37,8 +38,8 @@ | |||
37 | 38 | ||
38 | extern char bfa_version[]; | 39 | extern char bfa_version[]; |
39 | 40 | ||
40 | #define CNA_FW_FILE_CT "ctfw-3.2.3.0.bin" | 41 | #define CNA_FW_FILE_CT "ctfw-3.2.5.1.bin" |
41 | #define CNA_FW_FILE_CT2 "ct2fw-3.2.3.0.bin" | 42 | #define CNA_FW_FILE_CT2 "ct2fw-3.2.5.1.bin" |
42 | #define FC_SYMNAME_MAX 256 /*!< max name server symbolic name size */ | 43 | #define FC_SYMNAME_MAX 256 /*!< max name server symbolic name size */ |
43 | 44 | ||
44 | #pragma pack(1) | 45 | #pragma pack(1) |
diff --git a/drivers/net/ethernet/brocade/bna/cna_fwimg.c b/drivers/net/ethernet/brocade/bna/cna_fwimg.c index 6f72771caea6..ebf462d8082f 100644 --- a/drivers/net/ethernet/brocade/bna/cna_fwimg.c +++ b/drivers/net/ethernet/brocade/bna/cna_fwimg.c | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * Linux network driver for Brocade Converged Network Adapter. | 2 | * Linux network driver for QLogic BR-series Converged Network Adapter. |
3 | * | 3 | * |
4 | * This program is free software; you can redistribute it and/or modify it | 4 | * This program is free software; you can redistribute it and/or modify it |
5 | * under the terms of the GNU General Public License (GPL) Version 2 as | 5 | * under the terms of the GNU General Public License (GPL) Version 2 as |
@@ -11,9 +11,10 @@ | |||
11 | * General Public License for more details. | 11 | * General Public License for more details. |
12 | */ | 12 | */ |
13 | /* | 13 | /* |
14 | * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. | 14 | * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. |
15 | * Copyright (c) 2014-2015 QLogic Corporation | ||
15 | * All rights reserved | 16 | * All rights reserved |
16 | * www.brocade.com | 17 | * www.qlogic.com |
17 | */ | 18 | */ |
18 | #include <linux/firmware.h> | 19 | #include <linux/firmware.h> |
19 | #include "bnad.h" | 20 | #include "bnad.h" |
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c index ad76b8e35a00..1fe8b946243a 100644 --- a/drivers/net/ethernet/cadence/macb.c +++ b/drivers/net/ethernet/cadence/macb.c | |||
@@ -449,7 +449,7 @@ static void macb_update_stats(struct macb *bp) | |||
449 | WARN_ON((unsigned long)(end - p - 1) != (MACB_TPF - MACB_PFR) / 4); | 449 | WARN_ON((unsigned long)(end - p - 1) != (MACB_TPF - MACB_PFR) / 4); |
450 | 450 | ||
451 | for(; p < end; p++, reg++) | 451 | for(; p < end; p++, reg++) |
452 | *p += __raw_readl(reg); | 452 | *p += readl_relaxed(reg); |
453 | } | 453 | } |
454 | 454 | ||
455 | static int macb_halt_tx(struct macb *bp) | 455 | static int macb_halt_tx(struct macb *bp) |
@@ -1578,6 +1578,7 @@ static u32 macb_dbw(struct macb *bp) | |||
1578 | static void macb_configure_dma(struct macb *bp) | 1578 | static void macb_configure_dma(struct macb *bp) |
1579 | { | 1579 | { |
1580 | u32 dmacfg; | 1580 | u32 dmacfg; |
1581 | u32 tmp, ncr; | ||
1581 | 1582 | ||
1582 | if (macb_is_gem(bp)) { | 1583 | if (macb_is_gem(bp)) { |
1583 | dmacfg = gem_readl(bp, DMACFG) & ~GEM_BF(RXBS, -1L); | 1584 | dmacfg = gem_readl(bp, DMACFG) & ~GEM_BF(RXBS, -1L); |
@@ -1585,7 +1586,24 @@ static void macb_configure_dma(struct macb *bp) | |||
1585 | if (bp->dma_burst_length) | 1586 | if (bp->dma_burst_length) |
1586 | dmacfg = GEM_BFINS(FBLDO, bp->dma_burst_length, dmacfg); | 1587 | dmacfg = GEM_BFINS(FBLDO, bp->dma_burst_length, dmacfg); |
1587 | dmacfg |= GEM_BIT(TXPBMS) | GEM_BF(RXBMS, -1L); | 1588 | dmacfg |= GEM_BIT(TXPBMS) | GEM_BF(RXBMS, -1L); |
1588 | dmacfg &= ~GEM_BIT(ENDIA); | 1589 | dmacfg &= ~GEM_BIT(ENDIA_PKT); |
1590 | |||
1591 | /* Find the CPU endianness by using the loopback bit of net_ctrl | ||
1592 | * register. save it first. When the CPU is in big endian we | ||
1593 | * need to program swaped mode for management descriptor access. | ||
1594 | */ | ||
1595 | ncr = macb_readl(bp, NCR); | ||
1596 | __raw_writel(MACB_BIT(LLB), bp->regs + MACB_NCR); | ||
1597 | tmp = __raw_readl(bp->regs + MACB_NCR); | ||
1598 | |||
1599 | if (tmp == MACB_BIT(LLB)) | ||
1600 | dmacfg &= ~GEM_BIT(ENDIA_DESC); | ||
1601 | else | ||
1602 | dmacfg |= GEM_BIT(ENDIA_DESC); /* CPU in big endian */ | ||
1603 | |||
1604 | /* Restore net_ctrl */ | ||
1605 | macb_writel(bp, NCR, ncr); | ||
1606 | |||
1589 | if (bp->dev->features & NETIF_F_HW_CSUM) | 1607 | if (bp->dev->features & NETIF_F_HW_CSUM) |
1590 | dmacfg |= GEM_BIT(TXCOEN); | 1608 | dmacfg |= GEM_BIT(TXCOEN); |
1591 | else | 1609 | else |
@@ -1832,14 +1850,14 @@ static void gem_update_stats(struct macb *bp) | |||
1832 | 1850 | ||
1833 | for (i = 0; i < GEM_STATS_LEN; ++i, ++p) { | 1851 | for (i = 0; i < GEM_STATS_LEN; ++i, ++p) { |
1834 | u32 offset = gem_statistics[i].offset; | 1852 | u32 offset = gem_statistics[i].offset; |
1835 | u64 val = __raw_readl(bp->regs + offset); | 1853 | u64 val = readl_relaxed(bp->regs + offset); |
1836 | 1854 | ||
1837 | bp->ethtool_stats[i] += val; | 1855 | bp->ethtool_stats[i] += val; |
1838 | *p += val; | 1856 | *p += val; |
1839 | 1857 | ||
1840 | if (offset == GEM_OCTTXL || offset == GEM_OCTRXL) { | 1858 | if (offset == GEM_OCTTXL || offset == GEM_OCTRXL) { |
1841 | /* Add GEM_OCTTXH, GEM_OCTRXH */ | 1859 | /* Add GEM_OCTTXH, GEM_OCTRXH */ |
1842 | val = __raw_readl(bp->regs + offset + 4); | 1860 | val = readl_relaxed(bp->regs + offset + 4); |
1843 | bp->ethtool_stats[i] += ((u64)val) << 32; | 1861 | bp->ethtool_stats[i] += ((u64)val) << 32; |
1844 | *(++p) += val; | 1862 | *(++p) += val; |
1845 | } | 1863 | } |
@@ -2191,12 +2209,14 @@ static void macb_probe_queues(void __iomem *mem, | |||
2191 | *num_queues = 1; | 2209 | *num_queues = 1; |
2192 | 2210 | ||
2193 | /* is it macb or gem ? */ | 2211 | /* is it macb or gem ? */ |
2194 | mid = __raw_readl(mem + MACB_MID); | 2212 | mid = readl_relaxed(mem + MACB_MID); |
2213 | |||
2195 | if (MACB_BFEXT(IDNUM, mid) != 0x2) | 2214 | if (MACB_BFEXT(IDNUM, mid) != 0x2) |
2196 | return; | 2215 | return; |
2197 | 2216 | ||
2198 | /* bit 0 is never set but queue 0 always exists */ | 2217 | /* bit 0 is never set but queue 0 always exists */ |
2199 | *queue_mask = __raw_readl(mem + GEM_DCFG6) & 0xff; | 2218 | *queue_mask = readl_relaxed(mem + GEM_DCFG6) & 0xff; |
2219 | |||
2200 | *queue_mask |= 0x1; | 2220 | *queue_mask |= 0x1; |
2201 | 2221 | ||
2202 | for (hw_q = 1; hw_q < MACB_MAX_QUEUES; ++hw_q) | 2222 | for (hw_q = 1; hw_q < MACB_MAX_QUEUES; ++hw_q) |
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h index 31dc080f2437..83241c8ec5dc 100644 --- a/drivers/net/ethernet/cadence/macb.h +++ b/drivers/net/ethernet/cadence/macb.h | |||
@@ -229,8 +229,10 @@ | |||
229 | /* Bitfields in DMACFG. */ | 229 | /* Bitfields in DMACFG. */ |
230 | #define GEM_FBLDO_OFFSET 0 /* fixed burst length for DMA */ | 230 | #define GEM_FBLDO_OFFSET 0 /* fixed burst length for DMA */ |
231 | #define GEM_FBLDO_SIZE 5 | 231 | #define GEM_FBLDO_SIZE 5 |
232 | #define GEM_ENDIA_OFFSET 7 /* endian swap mode for packet data access */ | 232 | #define GEM_ENDIA_DESC_OFFSET 6 /* endian swap mode for management descriptor access */ |
233 | #define GEM_ENDIA_SIZE 1 | 233 | #define GEM_ENDIA_DESC_SIZE 1 |
234 | #define GEM_ENDIA_PKT_OFFSET 7 /* endian swap mode for packet data access */ | ||
235 | #define GEM_ENDIA_PKT_SIZE 1 | ||
234 | #define GEM_RXBMS_OFFSET 8 /* RX packet buffer memory size select */ | 236 | #define GEM_RXBMS_OFFSET 8 /* RX packet buffer memory size select */ |
235 | #define GEM_RXBMS_SIZE 2 | 237 | #define GEM_RXBMS_SIZE 2 |
236 | #define GEM_TXPBMS_OFFSET 10 /* TX packet buffer memory size select */ | 238 | #define GEM_TXPBMS_OFFSET 10 /* TX packet buffer memory size select */ |
@@ -423,17 +425,17 @@ | |||
423 | 425 | ||
424 | /* Register access macros */ | 426 | /* Register access macros */ |
425 | #define macb_readl(port,reg) \ | 427 | #define macb_readl(port,reg) \ |
426 | __raw_readl((port)->regs + MACB_##reg) | 428 | readl_relaxed((port)->regs + MACB_##reg) |
427 | #define macb_writel(port,reg,value) \ | 429 | #define macb_writel(port,reg,value) \ |
428 | __raw_writel((value), (port)->regs + MACB_##reg) | 430 | writel_relaxed((value), (port)->regs + MACB_##reg) |
429 | #define gem_readl(port, reg) \ | 431 | #define gem_readl(port, reg) \ |
430 | __raw_readl((port)->regs + GEM_##reg) | 432 | readl_relaxed((port)->regs + GEM_##reg) |
431 | #define gem_writel(port, reg, value) \ | 433 | #define gem_writel(port, reg, value) \ |
432 | __raw_writel((value), (port)->regs + GEM_##reg) | 434 | writel_relaxed((value), (port)->regs + GEM_##reg) |
433 | #define queue_readl(queue, reg) \ | 435 | #define queue_readl(queue, reg) \ |
434 | __raw_readl((queue)->bp->regs + (queue)->reg) | 436 | readl_relaxed((queue)->bp->regs + (queue)->reg) |
435 | #define queue_writel(queue, reg, value) \ | 437 | #define queue_writel(queue, reg, value) \ |
436 | __raw_writel((value), (queue)->bp->regs + (queue)->reg) | 438 | writel_relaxed((value), (queue)->bp->regs + (queue)->reg) |
437 | 439 | ||
438 | /* Conditional GEM/MACB macros. These perform the operation to the correct | 440 | /* Conditional GEM/MACB macros. These perform the operation to the correct |
439 | * register dependent on whether the device is a GEM or a MACB. For registers | 441 | * register dependent on whether the device is a GEM or a MACB. For registers |
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c index a5179bfcdc2c..204bd182473b 100644 --- a/drivers/net/ethernet/cisco/enic/enic_main.c +++ b/drivers/net/ethernet/cisco/enic/enic_main.c | |||
@@ -893,7 +893,7 @@ static int enic_set_vf_port(struct net_device *netdev, int vf, | |||
893 | } else { | 893 | } else { |
894 | memset(pp, 0, sizeof(*pp)); | 894 | memset(pp, 0, sizeof(*pp)); |
895 | if (vf == PORT_SELF_VF) | 895 | if (vf == PORT_SELF_VF) |
896 | memset(netdev->dev_addr, 0, ETH_ALEN); | 896 | eth_zero_addr(netdev->dev_addr); |
897 | } | 897 | } |
898 | } else { | 898 | } else { |
899 | /* Set flag to indicate that the port assoc/disassoc | 899 | /* Set flag to indicate that the port assoc/disassoc |
@@ -903,14 +903,14 @@ static int enic_set_vf_port(struct net_device *netdev, int vf, | |||
903 | 903 | ||
904 | /* If DISASSOCIATE, clean up all assigned/saved macaddresses */ | 904 | /* If DISASSOCIATE, clean up all assigned/saved macaddresses */ |
905 | if (pp->request == PORT_REQUEST_DISASSOCIATE) { | 905 | if (pp->request == PORT_REQUEST_DISASSOCIATE) { |
906 | memset(pp->mac_addr, 0, ETH_ALEN); | 906 | eth_zero_addr(pp->mac_addr); |
907 | if (vf == PORT_SELF_VF) | 907 | if (vf == PORT_SELF_VF) |
908 | memset(netdev->dev_addr, 0, ETH_ALEN); | 908 | eth_zero_addr(netdev->dev_addr); |
909 | } | 909 | } |
910 | } | 910 | } |
911 | 911 | ||
912 | if (vf == PORT_SELF_VF) | 912 | if (vf == PORT_SELF_VF) |
913 | memset(pp->vf_mac, 0, ETH_ALEN); | 913 | eth_zero_addr(pp->vf_mac); |
914 | 914 | ||
915 | return err; | 915 | return err; |
916 | } | 916 | } |
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h index 27de37aa90af..fac806a15a61 100644 --- a/drivers/net/ethernet/emulex/benet/be.h +++ b/drivers/net/ethernet/emulex/benet/be.h | |||
@@ -238,10 +238,17 @@ struct be_tx_stats { | |||
238 | struct u64_stats_sync sync_compl; | 238 | struct u64_stats_sync sync_compl; |
239 | }; | 239 | }; |
240 | 240 | ||
241 | /* Structure to hold some data of interest obtained from a TX CQE */ | ||
242 | struct be_tx_compl_info { | ||
243 | u8 status; /* Completion status */ | ||
244 | u16 end_index; /* Completed TXQ Index */ | ||
245 | }; | ||
246 | |||
241 | struct be_tx_obj { | 247 | struct be_tx_obj { |
242 | u32 db_offset; | 248 | u32 db_offset; |
243 | struct be_queue_info q; | 249 | struct be_queue_info q; |
244 | struct be_queue_info cq; | 250 | struct be_queue_info cq; |
251 | struct be_tx_compl_info txcp; | ||
245 | /* Remember the skbs that were transmitted */ | 252 | /* Remember the skbs that were transmitted */ |
246 | struct sk_buff *sent_skb_list[TX_Q_LEN]; | 253 | struct sk_buff *sent_skb_list[TX_Q_LEN]; |
247 | struct be_tx_stats stats; | 254 | struct be_tx_stats stats; |
@@ -369,6 +376,7 @@ enum vf_state { | |||
369 | #define BE_FLAGS_VXLAN_OFFLOADS BIT(8) | 376 | #define BE_FLAGS_VXLAN_OFFLOADS BIT(8) |
370 | #define BE_FLAGS_SETUP_DONE BIT(9) | 377 | #define BE_FLAGS_SETUP_DONE BIT(9) |
371 | #define BE_FLAGS_EVT_INCOMPATIBLE_SFP BIT(10) | 378 | #define BE_FLAGS_EVT_INCOMPATIBLE_SFP BIT(10) |
379 | #define BE_FLAGS_ERR_DETECTION_SCHEDULED BIT(11) | ||
372 | 380 | ||
373 | #define BE_UC_PMAC_COUNT 30 | 381 | #define BE_UC_PMAC_COUNT 30 |
374 | #define BE_VF_UC_PMAC_COUNT 2 | 382 | #define BE_VF_UC_PMAC_COUNT 2 |
@@ -417,6 +425,39 @@ struct rss_info { | |||
417 | u8 rss_hkey[RSS_HASH_KEY_LEN]; | 425 | u8 rss_hkey[RSS_HASH_KEY_LEN]; |
418 | }; | 426 | }; |
419 | 427 | ||
428 | /* Macros to read/write the 'features' word of be_wrb_params structure. | ||
429 | */ | ||
430 | #define BE_WRB_F_BIT(name) BE_WRB_F_##name##_BIT | ||
431 | #define BE_WRB_F_MASK(name) BIT_MASK(BE_WRB_F_##name##_BIT) | ||
432 | |||
433 | #define BE_WRB_F_GET(word, name) \ | ||
434 | (((word) & (BE_WRB_F_MASK(name))) >> BE_WRB_F_BIT(name)) | ||
435 | |||
436 | #define BE_WRB_F_SET(word, name, val) \ | ||
437 | ((word) |= (((val) << BE_WRB_F_BIT(name)) & BE_WRB_F_MASK(name))) | ||
438 | |||
439 | /* Feature/offload bits */ | ||
440 | enum { | ||
441 | BE_WRB_F_CRC_BIT, /* Ethernet CRC */ | ||
442 | BE_WRB_F_IPCS_BIT, /* IP csum */ | ||
443 | BE_WRB_F_TCPCS_BIT, /* TCP csum */ | ||
444 | BE_WRB_F_UDPCS_BIT, /* UDP csum */ | ||
445 | BE_WRB_F_LSO_BIT, /* LSO */ | ||
446 | BE_WRB_F_LSO6_BIT, /* LSO6 */ | ||
447 | BE_WRB_F_VLAN_BIT, /* VLAN */ | ||
448 | BE_WRB_F_VLAN_SKIP_HW_BIT /* Skip VLAN tag (workaround) */ | ||
449 | }; | ||
450 | |||
451 | /* The structure below provides a HW-agnostic abstraction of WRB params | ||
452 | * retrieved from a TX skb. This is in turn passed to chip specific routines | ||
453 | * during transmit, to set the corresponding params in the WRB. | ||
454 | */ | ||
455 | struct be_wrb_params { | ||
456 | u32 features; /* Feature bits */ | ||
457 | u16 vlan_tag; /* VLAN tag */ | ||
458 | u16 lso_mss; /* MSS for LSO */ | ||
459 | }; | ||
460 | |||
420 | struct be_adapter { | 461 | struct be_adapter { |
421 | struct pci_dev *pdev; | 462 | struct pci_dev *pdev; |
422 | struct net_device *netdev; | 463 | struct net_device *netdev; |
@@ -461,7 +502,7 @@ struct be_adapter { | |||
461 | struct delayed_work work; | 502 | struct delayed_work work; |
462 | u16 work_counter; | 503 | u16 work_counter; |
463 | 504 | ||
464 | struct delayed_work func_recovery_work; | 505 | struct delayed_work be_err_detection_work; |
465 | u32 flags; | 506 | u32 flags; |
466 | u32 cmd_privileges; | 507 | u32 cmd_privileges; |
467 | /* Ethtool knobs and info */ | 508 | /* Ethtool knobs and info */ |
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c index 36916cfa70f9..be00695b3be7 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.c +++ b/drivers/net/ethernet/emulex/benet/be_cmds.c | |||
@@ -635,73 +635,16 @@ static int lancer_wait_ready(struct be_adapter *adapter) | |||
635 | for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) { | 635 | for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) { |
636 | sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET); | 636 | sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET); |
637 | if (sliport_status & SLIPORT_STATUS_RDY_MASK) | 637 | if (sliport_status & SLIPORT_STATUS_RDY_MASK) |
638 | break; | 638 | return 0; |
639 | |||
640 | msleep(1000); | ||
641 | } | ||
642 | |||
643 | if (i == SLIPORT_READY_TIMEOUT) | ||
644 | return sliport_status ? : -1; | ||
645 | |||
646 | return 0; | ||
647 | } | ||
648 | |||
649 | static bool lancer_provisioning_error(struct be_adapter *adapter) | ||
650 | { | ||
651 | u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0; | ||
652 | |||
653 | sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET); | ||
654 | if (sliport_status & SLIPORT_STATUS_ERR_MASK) { | ||
655 | sliport_err1 = ioread32(adapter->db + SLIPORT_ERROR1_OFFSET); | ||
656 | sliport_err2 = ioread32(adapter->db + SLIPORT_ERROR2_OFFSET); | ||
657 | |||
658 | if (sliport_err1 == SLIPORT_ERROR_NO_RESOURCE1 && | ||
659 | sliport_err2 == SLIPORT_ERROR_NO_RESOURCE2) | ||
660 | return true; | ||
661 | } | ||
662 | return false; | ||
663 | } | ||
664 | |||
665 | int lancer_test_and_set_rdy_state(struct be_adapter *adapter) | ||
666 | { | ||
667 | int status; | ||
668 | u32 sliport_status, err, reset_needed; | ||
669 | bool resource_error; | ||
670 | 639 | ||
671 | resource_error = lancer_provisioning_error(adapter); | 640 | if (sliport_status & SLIPORT_STATUS_ERR_MASK && |
672 | if (resource_error) | 641 | !(sliport_status & SLIPORT_STATUS_RN_MASK)) |
673 | return -EAGAIN; | 642 | return -EIO; |
674 | 643 | ||
675 | status = lancer_wait_ready(adapter); | 644 | msleep(1000); |
676 | if (!status) { | ||
677 | sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET); | ||
678 | err = sliport_status & SLIPORT_STATUS_ERR_MASK; | ||
679 | reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK; | ||
680 | if (err && reset_needed) { | ||
681 | iowrite32(SLI_PORT_CONTROL_IP_MASK, | ||
682 | adapter->db + SLIPORT_CONTROL_OFFSET); | ||
683 | |||
684 | /* check if adapter has corrected the error */ | ||
685 | status = lancer_wait_ready(adapter); | ||
686 | sliport_status = ioread32(adapter->db + | ||
687 | SLIPORT_STATUS_OFFSET); | ||
688 | sliport_status &= (SLIPORT_STATUS_ERR_MASK | | ||
689 | SLIPORT_STATUS_RN_MASK); | ||
690 | if (status || sliport_status) | ||
691 | status = -1; | ||
692 | } else if (err || reset_needed) { | ||
693 | status = -1; | ||
694 | } | ||
695 | } | 645 | } |
696 | /* Stop error recovery if error is not recoverable. | ||
697 | * No resource error is temporary errors and will go away | ||
698 | * when PF provisions resources. | ||
699 | */ | ||
700 | resource_error = lancer_provisioning_error(adapter); | ||
701 | if (resource_error) | ||
702 | status = -EAGAIN; | ||
703 | 646 | ||
704 | return status; | 647 | return sliport_status ? : -1; |
705 | } | 648 | } |
706 | 649 | ||
707 | int be_fw_wait_ready(struct be_adapter *adapter) | 650 | int be_fw_wait_ready(struct be_adapter *adapter) |
@@ -720,6 +663,10 @@ int be_fw_wait_ready(struct be_adapter *adapter) | |||
720 | } | 663 | } |
721 | 664 | ||
722 | do { | 665 | do { |
666 | /* There's no means to poll POST state on BE2/3 VFs */ | ||
667 | if (BEx_chip(adapter) && be_virtfn(adapter)) | ||
668 | return 0; | ||
669 | |||
723 | stage = be_POST_stage_get(adapter); | 670 | stage = be_POST_stage_get(adapter); |
724 | if (stage == POST_STAGE_ARMFW_RDY) | 671 | if (stage == POST_STAGE_ARMFW_RDY) |
725 | return 0; | 672 | return 0; |
@@ -734,7 +681,7 @@ int be_fw_wait_ready(struct be_adapter *adapter) | |||
734 | 681 | ||
735 | err: | 682 | err: |
736 | dev_err(dev, "POST timeout; stage=%#x\n", stage); | 683 | dev_err(dev, "POST timeout; stage=%#x\n", stage); |
737 | return -1; | 684 | return -ETIMEDOUT; |
738 | } | 685 | } |
739 | 686 | ||
740 | static inline struct be_sge *nonembedded_sgl(struct be_mcc_wrb *wrb) | 687 | static inline struct be_sge *nonembedded_sgl(struct be_mcc_wrb *wrb) |
@@ -2126,16 +2073,12 @@ int be_cmd_reset_function(struct be_adapter *adapter) | |||
2126 | int status; | 2073 | int status; |
2127 | 2074 | ||
2128 | if (lancer_chip(adapter)) { | 2075 | if (lancer_chip(adapter)) { |
2076 | iowrite32(SLI_PORT_CONTROL_IP_MASK, | ||
2077 | adapter->db + SLIPORT_CONTROL_OFFSET); | ||
2129 | status = lancer_wait_ready(adapter); | 2078 | status = lancer_wait_ready(adapter); |
2130 | if (!status) { | 2079 | if (status) |
2131 | iowrite32(SLI_PORT_CONTROL_IP_MASK, | ||
2132 | adapter->db + SLIPORT_CONTROL_OFFSET); | ||
2133 | status = lancer_test_and_set_rdy_state(adapter); | ||
2134 | } | ||
2135 | if (status) { | ||
2136 | dev_err(&adapter->pdev->dev, | 2080 | dev_err(&adapter->pdev->dev, |
2137 | "Adapter in non recoverable error\n"); | 2081 | "Adapter in non recoverable error\n"); |
2138 | } | ||
2139 | return status; | 2082 | return status; |
2140 | } | 2083 | } |
2141 | 2084 | ||
@@ -3133,7 +3076,7 @@ int be_cmd_get_perm_mac(struct be_adapter *adapter, u8 *mac) | |||
3133 | int status; | 3076 | int status; |
3134 | bool pmac_valid = false; | 3077 | bool pmac_valid = false; |
3135 | 3078 | ||
3136 | memset(mac, 0, ETH_ALEN); | 3079 | eth_zero_addr(mac); |
3137 | 3080 | ||
3138 | if (BEx_chip(adapter)) { | 3081 | if (BEx_chip(adapter)) { |
3139 | if (be_physfn(adapter)) | 3082 | if (be_physfn(adapter)) |
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index 0a816859aca5..7eccebc676e2 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c | |||
@@ -727,48 +727,86 @@ static u16 skb_ip_proto(struct sk_buff *skb) | |||
727 | ip_hdr(skb)->protocol : ipv6_hdr(skb)->nexthdr; | 727 | ip_hdr(skb)->protocol : ipv6_hdr(skb)->nexthdr; |
728 | } | 728 | } |
729 | 729 | ||
730 | static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr, | 730 | static inline bool be_is_txq_full(struct be_tx_obj *txo) |
731 | struct sk_buff *skb, u32 wrb_cnt, u32 len, | ||
732 | bool skip_hw_vlan) | ||
733 | { | 731 | { |
734 | u16 vlan_tag, proto; | 732 | return atomic_read(&txo->q.used) + BE_MAX_TX_FRAG_COUNT >= txo->q.len; |
733 | } | ||
735 | 734 | ||
736 | memset(hdr, 0, sizeof(*hdr)); | 735 | static inline bool be_can_txq_wake(struct be_tx_obj *txo) |
736 | { | ||
737 | return atomic_read(&txo->q.used) < txo->q.len / 2; | ||
738 | } | ||
737 | 739 | ||
738 | SET_TX_WRB_HDR_BITS(crc, hdr, 1); | 740 | static inline bool be_is_tx_compl_pending(struct be_tx_obj *txo) |
741 | { | ||
742 | return atomic_read(&txo->q.used) > txo->pend_wrb_cnt; | ||
743 | } | ||
744 | |||
745 | static void be_get_wrb_params_from_skb(struct be_adapter *adapter, | ||
746 | struct sk_buff *skb, | ||
747 | struct be_wrb_params *wrb_params) | ||
748 | { | ||
749 | u16 proto; | ||
739 | 750 | ||
740 | if (skb_is_gso(skb)) { | 751 | if (skb_is_gso(skb)) { |
741 | SET_TX_WRB_HDR_BITS(lso, hdr, 1); | 752 | BE_WRB_F_SET(wrb_params->features, LSO, 1); |
742 | SET_TX_WRB_HDR_BITS(lso_mss, hdr, skb_shinfo(skb)->gso_size); | 753 | wrb_params->lso_mss = skb_shinfo(skb)->gso_size; |
743 | if (skb_is_gso_v6(skb) && !lancer_chip(adapter)) | 754 | if (skb_is_gso_v6(skb) && !lancer_chip(adapter)) |
744 | SET_TX_WRB_HDR_BITS(lso6, hdr, 1); | 755 | BE_WRB_F_SET(wrb_params->features, LSO6, 1); |
745 | } else if (skb->ip_summed == CHECKSUM_PARTIAL) { | 756 | } else if (skb->ip_summed == CHECKSUM_PARTIAL) { |
746 | if (skb->encapsulation) { | 757 | if (skb->encapsulation) { |
747 | SET_TX_WRB_HDR_BITS(ipcs, hdr, 1); | 758 | BE_WRB_F_SET(wrb_params->features, IPCS, 1); |
748 | proto = skb_inner_ip_proto(skb); | 759 | proto = skb_inner_ip_proto(skb); |
749 | } else { | 760 | } else { |
750 | proto = skb_ip_proto(skb); | 761 | proto = skb_ip_proto(skb); |
751 | } | 762 | } |
752 | if (proto == IPPROTO_TCP) | 763 | if (proto == IPPROTO_TCP) |
753 | SET_TX_WRB_HDR_BITS(tcpcs, hdr, 1); | 764 | BE_WRB_F_SET(wrb_params->features, TCPCS, 1); |
754 | else if (proto == IPPROTO_UDP) | 765 | else if (proto == IPPROTO_UDP) |
755 | SET_TX_WRB_HDR_BITS(udpcs, hdr, 1); | 766 | BE_WRB_F_SET(wrb_params->features, UDPCS, 1); |
756 | } | 767 | } |
757 | 768 | ||
758 | if (skb_vlan_tag_present(skb)) { | 769 | if (skb_vlan_tag_present(skb)) { |
759 | SET_TX_WRB_HDR_BITS(vlan, hdr, 1); | 770 | BE_WRB_F_SET(wrb_params->features, VLAN, 1); |
760 | vlan_tag = be_get_tx_vlan_tag(adapter, skb); | 771 | wrb_params->vlan_tag = be_get_tx_vlan_tag(adapter, skb); |
761 | SET_TX_WRB_HDR_BITS(vlan_tag, hdr, vlan_tag); | ||
762 | } | 772 | } |
763 | 773 | ||
764 | SET_TX_WRB_HDR_BITS(num_wrb, hdr, wrb_cnt); | 774 | BE_WRB_F_SET(wrb_params->features, CRC, 1); |
765 | SET_TX_WRB_HDR_BITS(len, hdr, len); | 775 | } |
776 | |||
777 | static void wrb_fill_hdr(struct be_adapter *adapter, | ||
778 | struct be_eth_hdr_wrb *hdr, | ||
779 | struct be_wrb_params *wrb_params, | ||
780 | struct sk_buff *skb) | ||
781 | { | ||
782 | memset(hdr, 0, sizeof(*hdr)); | ||
766 | 783 | ||
767 | /* Hack to skip HW VLAN tagging needs evt = 1, compl = 0 | 784 | SET_TX_WRB_HDR_BITS(crc, hdr, |
768 | * When this hack is not needed, the evt bit is set while ringing DB | 785 | BE_WRB_F_GET(wrb_params->features, CRC)); |
786 | SET_TX_WRB_HDR_BITS(ipcs, hdr, | ||
787 | BE_WRB_F_GET(wrb_params->features, IPCS)); | ||
788 | SET_TX_WRB_HDR_BITS(tcpcs, hdr, | ||
789 | BE_WRB_F_GET(wrb_params->features, TCPCS)); | ||
790 | SET_TX_WRB_HDR_BITS(udpcs, hdr, | ||
791 | BE_WRB_F_GET(wrb_params->features, UDPCS)); | ||
792 | |||
793 | SET_TX_WRB_HDR_BITS(lso, hdr, | ||
794 | BE_WRB_F_GET(wrb_params->features, LSO)); | ||
795 | SET_TX_WRB_HDR_BITS(lso6, hdr, | ||
796 | BE_WRB_F_GET(wrb_params->features, LSO6)); | ||
797 | SET_TX_WRB_HDR_BITS(lso_mss, hdr, wrb_params->lso_mss); | ||
798 | |||
799 | /* Hack to skip HW VLAN tagging needs evt = 1, compl = 0. When this | ||
800 | * hack is not needed, the evt bit is set while ringing DB. | ||
769 | */ | 801 | */ |
770 | if (skip_hw_vlan) | 802 | SET_TX_WRB_HDR_BITS(event, hdr, |
771 | SET_TX_WRB_HDR_BITS(event, hdr, 1); | 803 | BE_WRB_F_GET(wrb_params->features, VLAN_SKIP_HW)); |
804 | SET_TX_WRB_HDR_BITS(vlan, hdr, | ||
805 | BE_WRB_F_GET(wrb_params->features, VLAN)); | ||
806 | SET_TX_WRB_HDR_BITS(vlan_tag, hdr, wrb_params->vlan_tag); | ||
807 | |||
808 | SET_TX_WRB_HDR_BITS(num_wrb, hdr, skb_wrb_cnt(skb)); | ||
809 | SET_TX_WRB_HDR_BITS(len, hdr, skb->len); | ||
772 | } | 810 | } |
773 | 811 | ||
774 | static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb, | 812 | static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb, |
@@ -788,77 +826,124 @@ static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb, | |||
788 | } | 826 | } |
789 | } | 827 | } |
790 | 828 | ||
791 | /* Returns the number of WRBs used up by the skb */ | 829 | /* Grab a WRB header for xmit */ |
830 | static u16 be_tx_get_wrb_hdr(struct be_tx_obj *txo) | ||
831 | { | ||
832 | u16 head = txo->q.head; | ||
833 | |||
834 | queue_head_inc(&txo->q); | ||
835 | return head; | ||
836 | } | ||
837 | |||
838 | /* Set up the WRB header for xmit */ | ||
839 | static void be_tx_setup_wrb_hdr(struct be_adapter *adapter, | ||
840 | struct be_tx_obj *txo, | ||
841 | struct be_wrb_params *wrb_params, | ||
842 | struct sk_buff *skb, u16 head) | ||
843 | { | ||
844 | u32 num_frags = skb_wrb_cnt(skb); | ||
845 | struct be_queue_info *txq = &txo->q; | ||
846 | struct be_eth_hdr_wrb *hdr = queue_index_node(txq, head); | ||
847 | |||
848 | wrb_fill_hdr(adapter, hdr, wrb_params, skb); | ||
849 | be_dws_cpu_to_le(hdr, sizeof(*hdr)); | ||
850 | |||
851 | BUG_ON(txo->sent_skb_list[head]); | ||
852 | txo->sent_skb_list[head] = skb; | ||
853 | txo->last_req_hdr = head; | ||
854 | atomic_add(num_frags, &txq->used); | ||
855 | txo->last_req_wrb_cnt = num_frags; | ||
856 | txo->pend_wrb_cnt += num_frags; | ||
857 | } | ||
858 | |||
859 | /* Setup a WRB fragment (buffer descriptor) for xmit */ | ||
860 | static void be_tx_setup_wrb_frag(struct be_tx_obj *txo, dma_addr_t busaddr, | ||
861 | int len) | ||
862 | { | ||
863 | struct be_eth_wrb *wrb; | ||
864 | struct be_queue_info *txq = &txo->q; | ||
865 | |||
866 | wrb = queue_head_node(txq); | ||
867 | wrb_fill(wrb, busaddr, len); | ||
868 | queue_head_inc(txq); | ||
869 | } | ||
870 | |||
871 | /* Bring the queue back to the state it was in before be_xmit_enqueue() routine | ||
872 | * was invoked. The producer index is restored to the previous packet and the | ||
873 | * WRBs of the current packet are unmapped. Invoked to handle tx setup errors. | ||
874 | */ | ||
875 | static void be_xmit_restore(struct be_adapter *adapter, | ||
876 | struct be_tx_obj *txo, u16 head, bool map_single, | ||
877 | u32 copied) | ||
878 | { | ||
879 | struct device *dev; | ||
880 | struct be_eth_wrb *wrb; | ||
881 | struct be_queue_info *txq = &txo->q; | ||
882 | |||
883 | dev = &adapter->pdev->dev; | ||
884 | txq->head = head; | ||
885 | |||
886 | /* skip the first wrb (hdr); it's not mapped */ | ||
887 | queue_head_inc(txq); | ||
888 | while (copied) { | ||
889 | wrb = queue_head_node(txq); | ||
890 | unmap_tx_frag(dev, wrb, map_single); | ||
891 | map_single = false; | ||
892 | copied -= le32_to_cpu(wrb->frag_len); | ||
893 | queue_head_inc(txq); | ||
894 | } | ||
895 | |||
896 | txq->head = head; | ||
897 | } | ||
898 | |||
899 | /* Enqueue the given packet for transmit. This routine allocates WRBs for the | ||
900 | * packet, dma maps the packet buffers and sets up the WRBs. Returns the number | ||
901 | * of WRBs used up by the packet. | ||
902 | */ | ||
792 | static u32 be_xmit_enqueue(struct be_adapter *adapter, struct be_tx_obj *txo, | 903 | static u32 be_xmit_enqueue(struct be_adapter *adapter, struct be_tx_obj *txo, |
793 | struct sk_buff *skb, bool skip_hw_vlan) | 904 | struct sk_buff *skb, |
905 | struct be_wrb_params *wrb_params) | ||
794 | { | 906 | { |
795 | u32 i, copied = 0, wrb_cnt = skb_wrb_cnt(skb); | 907 | u32 i, copied = 0, wrb_cnt = skb_wrb_cnt(skb); |
796 | struct device *dev = &adapter->pdev->dev; | 908 | struct device *dev = &adapter->pdev->dev; |
797 | struct be_queue_info *txq = &txo->q; | 909 | struct be_queue_info *txq = &txo->q; |
798 | struct be_eth_hdr_wrb *hdr; | ||
799 | bool map_single = false; | 910 | bool map_single = false; |
800 | struct be_eth_wrb *wrb; | ||
801 | dma_addr_t busaddr; | ||
802 | u16 head = txq->head; | 911 | u16 head = txq->head; |
912 | dma_addr_t busaddr; | ||
913 | int len; | ||
803 | 914 | ||
804 | hdr = queue_head_node(txq); | 915 | head = be_tx_get_wrb_hdr(txo); |
805 | wrb_fill_hdr(adapter, hdr, skb, wrb_cnt, skb->len, skip_hw_vlan); | ||
806 | be_dws_cpu_to_le(hdr, sizeof(*hdr)); | ||
807 | |||
808 | queue_head_inc(txq); | ||
809 | 916 | ||
810 | if (skb->len > skb->data_len) { | 917 | if (skb->len > skb->data_len) { |
811 | int len = skb_headlen(skb); | 918 | len = skb_headlen(skb); |
812 | 919 | ||
813 | busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE); | 920 | busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE); |
814 | if (dma_mapping_error(dev, busaddr)) | 921 | if (dma_mapping_error(dev, busaddr)) |
815 | goto dma_err; | 922 | goto dma_err; |
816 | map_single = true; | 923 | map_single = true; |
817 | wrb = queue_head_node(txq); | 924 | be_tx_setup_wrb_frag(txo, busaddr, len); |
818 | wrb_fill(wrb, busaddr, len); | ||
819 | queue_head_inc(txq); | ||
820 | copied += len; | 925 | copied += len; |
821 | } | 926 | } |
822 | 927 | ||
823 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { | 928 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { |
824 | const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i]; | 929 | const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i]; |
930 | len = skb_frag_size(frag); | ||
825 | 931 | ||
826 | busaddr = skb_frag_dma_map(dev, frag, 0, | 932 | busaddr = skb_frag_dma_map(dev, frag, 0, len, DMA_TO_DEVICE); |
827 | skb_frag_size(frag), DMA_TO_DEVICE); | ||
828 | if (dma_mapping_error(dev, busaddr)) | 933 | if (dma_mapping_error(dev, busaddr)) |
829 | goto dma_err; | 934 | goto dma_err; |
830 | wrb = queue_head_node(txq); | 935 | be_tx_setup_wrb_frag(txo, busaddr, len); |
831 | wrb_fill(wrb, busaddr, skb_frag_size(frag)); | 936 | copied += len; |
832 | queue_head_inc(txq); | ||
833 | copied += skb_frag_size(frag); | ||
834 | } | 937 | } |
835 | 938 | ||
836 | BUG_ON(txo->sent_skb_list[head]); | 939 | be_tx_setup_wrb_hdr(adapter, txo, wrb_params, skb, head); |
837 | txo->sent_skb_list[head] = skb; | ||
838 | txo->last_req_hdr = head; | ||
839 | atomic_add(wrb_cnt, &txq->used); | ||
840 | txo->last_req_wrb_cnt = wrb_cnt; | ||
841 | txo->pend_wrb_cnt += wrb_cnt; | ||
842 | 940 | ||
843 | be_tx_stats_update(txo, skb); | 941 | be_tx_stats_update(txo, skb); |
844 | return wrb_cnt; | 942 | return wrb_cnt; |
845 | 943 | ||
846 | dma_err: | 944 | dma_err: |
847 | /* Bring the queue back to the state it was in before this | 945 | adapter->drv_stats.dma_map_errors++; |
848 | * routine was invoked. | 946 | be_xmit_restore(adapter, txo, head, map_single, copied); |
849 | */ | ||
850 | txq->head = head; | ||
851 | /* skip the first wrb (hdr); it's not mapped */ | ||
852 | queue_head_inc(txq); | ||
853 | while (copied) { | ||
854 | wrb = queue_head_node(txq); | ||
855 | unmap_tx_frag(dev, wrb, map_single); | ||
856 | map_single = false; | ||
857 | copied -= le32_to_cpu(wrb->frag_len); | ||
858 | adapter->drv_stats.dma_map_errors++; | ||
859 | queue_head_inc(txq); | ||
860 | } | ||
861 | txq->head = head; | ||
862 | return 0; | 947 | return 0; |
863 | } | 948 | } |
864 | 949 | ||
@@ -869,7 +954,8 @@ static inline int qnq_async_evt_rcvd(struct be_adapter *adapter) | |||
869 | 954 | ||
870 | static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter, | 955 | static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter, |
871 | struct sk_buff *skb, | 956 | struct sk_buff *skb, |
872 | bool *skip_hw_vlan) | 957 | struct be_wrb_params |
958 | *wrb_params) | ||
873 | { | 959 | { |
874 | u16 vlan_tag = 0; | 960 | u16 vlan_tag = 0; |
875 | 961 | ||
@@ -886,8 +972,7 @@ static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter, | |||
886 | /* f/w workaround to set skip_hw_vlan = 1, informs the F/W to | 972 | /* f/w workaround to set skip_hw_vlan = 1, informs the F/W to |
887 | * skip VLAN insertion | 973 | * skip VLAN insertion |
888 | */ | 974 | */ |
889 | if (skip_hw_vlan) | 975 | BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1); |
890 | *skip_hw_vlan = true; | ||
891 | } | 976 | } |
892 | 977 | ||
893 | if (vlan_tag) { | 978 | if (vlan_tag) { |
@@ -905,8 +990,7 @@ static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter, | |||
905 | vlan_tag); | 990 | vlan_tag); |
906 | if (unlikely(!skb)) | 991 | if (unlikely(!skb)) |
907 | return skb; | 992 | return skb; |
908 | if (skip_hw_vlan) | 993 | BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1); |
909 | *skip_hw_vlan = true; | ||
910 | } | 994 | } |
911 | 995 | ||
912 | return skb; | 996 | return skb; |
@@ -946,7 +1030,8 @@ static int be_ipv6_tx_stall_chk(struct be_adapter *adapter, struct sk_buff *skb) | |||
946 | 1030 | ||
947 | static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter, | 1031 | static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter, |
948 | struct sk_buff *skb, | 1032 | struct sk_buff *skb, |
949 | bool *skip_hw_vlan) | 1033 | struct be_wrb_params |
1034 | *wrb_params) | ||
950 | { | 1035 | { |
951 | struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data; | 1036 | struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data; |
952 | unsigned int eth_hdr_len; | 1037 | unsigned int eth_hdr_len; |
@@ -970,7 +1055,7 @@ static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter, | |||
970 | */ | 1055 | */ |
971 | if (be_pvid_tagging_enabled(adapter) && | 1056 | if (be_pvid_tagging_enabled(adapter) && |
972 | veh->h_vlan_proto == htons(ETH_P_8021Q)) | 1057 | veh->h_vlan_proto == htons(ETH_P_8021Q)) |
973 | *skip_hw_vlan = true; | 1058 | BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1); |
974 | 1059 | ||
975 | /* HW has a bug wherein it will calculate CSUM for VLAN | 1060 | /* HW has a bug wherein it will calculate CSUM for VLAN |
976 | * pkts even though it is disabled. | 1061 | * pkts even though it is disabled. |
@@ -978,7 +1063,7 @@ static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter, | |||
978 | */ | 1063 | */ |
979 | if (skb->ip_summed != CHECKSUM_PARTIAL && | 1064 | if (skb->ip_summed != CHECKSUM_PARTIAL && |
980 | skb_vlan_tag_present(skb)) { | 1065 | skb_vlan_tag_present(skb)) { |
981 | skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan); | 1066 | skb = be_insert_vlan_in_pkt(adapter, skb, wrb_params); |
982 | if (unlikely(!skb)) | 1067 | if (unlikely(!skb)) |
983 | goto err; | 1068 | goto err; |
984 | } | 1069 | } |
@@ -1000,7 +1085,7 @@ static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter, | |||
1000 | */ | 1085 | */ |
1001 | if (be_ipv6_tx_stall_chk(adapter, skb) && | 1086 | if (be_ipv6_tx_stall_chk(adapter, skb) && |
1002 | be_vlan_tag_tx_chk(adapter, skb)) { | 1087 | be_vlan_tag_tx_chk(adapter, skb)) { |
1003 | skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan); | 1088 | skb = be_insert_vlan_in_pkt(adapter, skb, wrb_params); |
1004 | if (unlikely(!skb)) | 1089 | if (unlikely(!skb)) |
1005 | goto err; | 1090 | goto err; |
1006 | } | 1091 | } |
@@ -1014,7 +1099,7 @@ err: | |||
1014 | 1099 | ||
1015 | static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter, | 1100 | static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter, |
1016 | struct sk_buff *skb, | 1101 | struct sk_buff *skb, |
1017 | bool *skip_hw_vlan) | 1102 | struct be_wrb_params *wrb_params) |
1018 | { | 1103 | { |
1019 | /* Lancer, SH-R ASICs have a bug wherein Packets that are 32 bytes or | 1104 | /* Lancer, SH-R ASICs have a bug wherein Packets that are 32 bytes or |
1020 | * less may cause a transmit stall on that port. So the work-around is | 1105 | * less may cause a transmit stall on that port. So the work-around is |
@@ -1026,7 +1111,7 @@ static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter, | |||
1026 | } | 1111 | } |
1027 | 1112 | ||
1028 | if (BEx_chip(adapter) || lancer_chip(adapter)) { | 1113 | if (BEx_chip(adapter) || lancer_chip(adapter)) { |
1029 | skb = be_lancer_xmit_workarounds(adapter, skb, skip_hw_vlan); | 1114 | skb = be_lancer_xmit_workarounds(adapter, skb, wrb_params); |
1030 | if (!skb) | 1115 | if (!skb) |
1031 | return NULL; | 1116 | return NULL; |
1032 | } | 1117 | } |
@@ -1060,24 +1145,26 @@ static void be_xmit_flush(struct be_adapter *adapter, struct be_tx_obj *txo) | |||
1060 | 1145 | ||
1061 | static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev) | 1146 | static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev) |
1062 | { | 1147 | { |
1063 | bool skip_hw_vlan = false, flush = !skb->xmit_more; | ||
1064 | struct be_adapter *adapter = netdev_priv(netdev); | 1148 | struct be_adapter *adapter = netdev_priv(netdev); |
1065 | u16 q_idx = skb_get_queue_mapping(skb); | 1149 | u16 q_idx = skb_get_queue_mapping(skb); |
1066 | struct be_tx_obj *txo = &adapter->tx_obj[q_idx]; | 1150 | struct be_tx_obj *txo = &adapter->tx_obj[q_idx]; |
1067 | struct be_queue_info *txq = &txo->q; | 1151 | struct be_wrb_params wrb_params = { 0 }; |
1152 | bool flush = !skb->xmit_more; | ||
1068 | u16 wrb_cnt; | 1153 | u16 wrb_cnt; |
1069 | 1154 | ||
1070 | skb = be_xmit_workarounds(adapter, skb, &skip_hw_vlan); | 1155 | skb = be_xmit_workarounds(adapter, skb, &wrb_params); |
1071 | if (unlikely(!skb)) | 1156 | if (unlikely(!skb)) |
1072 | goto drop; | 1157 | goto drop; |
1073 | 1158 | ||
1074 | wrb_cnt = be_xmit_enqueue(adapter, txo, skb, skip_hw_vlan); | 1159 | be_get_wrb_params_from_skb(adapter, skb, &wrb_params); |
1160 | |||
1161 | wrb_cnt = be_xmit_enqueue(adapter, txo, skb, &wrb_params); | ||
1075 | if (unlikely(!wrb_cnt)) { | 1162 | if (unlikely(!wrb_cnt)) { |
1076 | dev_kfree_skb_any(skb); | 1163 | dev_kfree_skb_any(skb); |
1077 | goto drop; | 1164 | goto drop; |
1078 | } | 1165 | } |
1079 | 1166 | ||
1080 | if ((atomic_read(&txq->used) + BE_MAX_TX_FRAG_COUNT) >= txq->len) { | 1167 | if (be_is_txq_full(txo)) { |
1081 | netif_stop_subqueue(netdev, q_idx); | 1168 | netif_stop_subqueue(netdev, q_idx); |
1082 | tx_stats(txo)->tx_stops++; | 1169 | tx_stats(txo)->tx_stops++; |
1083 | } | 1170 | } |
@@ -1991,18 +2078,23 @@ static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp, u32 frags_needed) | |||
1991 | } | 2078 | } |
1992 | } | 2079 | } |
1993 | 2080 | ||
1994 | static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq) | 2081 | static struct be_tx_compl_info *be_tx_compl_get(struct be_tx_obj *txo) |
1995 | { | 2082 | { |
1996 | struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq); | 2083 | struct be_queue_info *tx_cq = &txo->cq; |
2084 | struct be_tx_compl_info *txcp = &txo->txcp; | ||
2085 | struct be_eth_tx_compl *compl = queue_tail_node(tx_cq); | ||
1997 | 2086 | ||
1998 | if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0) | 2087 | if (compl->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0) |
1999 | return NULL; | 2088 | return NULL; |
2000 | 2089 | ||
2090 | /* Ensure load ordering of valid bit dword and other dwords below */ | ||
2001 | rmb(); | 2091 | rmb(); |
2002 | be_dws_le_to_cpu(txcp, sizeof(*txcp)); | 2092 | be_dws_le_to_cpu(compl, sizeof(*compl)); |
2003 | 2093 | ||
2004 | txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0; | 2094 | txcp->status = GET_TX_COMPL_BITS(status, compl); |
2095 | txcp->end_index = GET_TX_COMPL_BITS(wrb_index, compl); | ||
2005 | 2096 | ||
2097 | compl->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0; | ||
2006 | queue_tail_inc(tx_cq); | 2098 | queue_tail_inc(tx_cq); |
2007 | return txcp; | 2099 | return txcp; |
2008 | } | 2100 | } |
@@ -2123,9 +2215,9 @@ static void be_tx_compl_clean(struct be_adapter *adapter) | |||
2123 | { | 2215 | { |
2124 | u16 end_idx, notified_idx, cmpl = 0, timeo = 0, num_wrbs = 0; | 2216 | u16 end_idx, notified_idx, cmpl = 0, timeo = 0, num_wrbs = 0; |
2125 | struct device *dev = &adapter->pdev->dev; | 2217 | struct device *dev = &adapter->pdev->dev; |
2126 | struct be_tx_obj *txo; | 2218 | struct be_tx_compl_info *txcp; |
2127 | struct be_queue_info *txq; | 2219 | struct be_queue_info *txq; |
2128 | struct be_eth_tx_compl *txcp; | 2220 | struct be_tx_obj *txo; |
2129 | int i, pending_txqs; | 2221 | int i, pending_txqs; |
2130 | 2222 | ||
2131 | /* Stop polling for compls when HW has been silent for 10ms */ | 2223 | /* Stop polling for compls when HW has been silent for 10ms */ |
@@ -2136,10 +2228,10 @@ static void be_tx_compl_clean(struct be_adapter *adapter) | |||
2136 | cmpl = 0; | 2228 | cmpl = 0; |
2137 | num_wrbs = 0; | 2229 | num_wrbs = 0; |
2138 | txq = &txo->q; | 2230 | txq = &txo->q; |
2139 | while ((txcp = be_tx_compl_get(&txo->cq))) { | 2231 | while ((txcp = be_tx_compl_get(txo))) { |
2140 | end_idx = GET_TX_COMPL_BITS(wrb_index, txcp); | 2232 | num_wrbs += |
2141 | num_wrbs += be_tx_compl_process(adapter, txo, | 2233 | be_tx_compl_process(adapter, txo, |
2142 | end_idx); | 2234 | txcp->end_index); |
2143 | cmpl++; | 2235 | cmpl++; |
2144 | } | 2236 | } |
2145 | if (cmpl) { | 2237 | if (cmpl) { |
@@ -2147,7 +2239,7 @@ static void be_tx_compl_clean(struct be_adapter *adapter) | |||
2147 | atomic_sub(num_wrbs, &txq->used); | 2239 | atomic_sub(num_wrbs, &txq->used); |
2148 | timeo = 0; | 2240 | timeo = 0; |
2149 | } | 2241 | } |
2150 | if (atomic_read(&txq->used) == txo->pend_wrb_cnt) | 2242 | if (!be_is_tx_compl_pending(txo)) |
2151 | pending_txqs--; | 2243 | pending_txqs--; |
2152 | } | 2244 | } |
2153 | 2245 | ||
@@ -2498,7 +2590,7 @@ loop_continue: | |||
2498 | return work_done; | 2590 | return work_done; |
2499 | } | 2591 | } |
2500 | 2592 | ||
2501 | static inline void be_update_tx_err(struct be_tx_obj *txo, u32 status) | 2593 | static inline void be_update_tx_err(struct be_tx_obj *txo, u8 status) |
2502 | { | 2594 | { |
2503 | switch (status) { | 2595 | switch (status) { |
2504 | case BE_TX_COMP_HDR_PARSE_ERR: | 2596 | case BE_TX_COMP_HDR_PARSE_ERR: |
@@ -2513,7 +2605,7 @@ static inline void be_update_tx_err(struct be_tx_obj *txo, u32 status) | |||
2513 | } | 2605 | } |
2514 | } | 2606 | } |
2515 | 2607 | ||
2516 | static inline void lancer_update_tx_err(struct be_tx_obj *txo, u32 status) | 2608 | static inline void lancer_update_tx_err(struct be_tx_obj *txo, u8 status) |
2517 | { | 2609 | { |
2518 | switch (status) { | 2610 | switch (status) { |
2519 | case LANCER_TX_COMP_LSO_ERR: | 2611 | case LANCER_TX_COMP_LSO_ERR: |
@@ -2538,22 +2630,18 @@ static inline void lancer_update_tx_err(struct be_tx_obj *txo, u32 status) | |||
2538 | static void be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo, | 2630 | static void be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo, |
2539 | int idx) | 2631 | int idx) |
2540 | { | 2632 | { |
2541 | struct be_eth_tx_compl *txcp; | ||
2542 | int num_wrbs = 0, work_done = 0; | 2633 | int num_wrbs = 0, work_done = 0; |
2543 | u32 compl_status; | 2634 | struct be_tx_compl_info *txcp; |
2544 | u16 last_idx; | ||
2545 | 2635 | ||
2546 | while ((txcp = be_tx_compl_get(&txo->cq))) { | 2636 | while ((txcp = be_tx_compl_get(txo))) { |
2547 | last_idx = GET_TX_COMPL_BITS(wrb_index, txcp); | 2637 | num_wrbs += be_tx_compl_process(adapter, txo, txcp->end_index); |
2548 | num_wrbs += be_tx_compl_process(adapter, txo, last_idx); | ||
2549 | work_done++; | 2638 | work_done++; |
2550 | 2639 | ||
2551 | compl_status = GET_TX_COMPL_BITS(status, txcp); | 2640 | if (txcp->status) { |
2552 | if (compl_status) { | ||
2553 | if (lancer_chip(adapter)) | 2641 | if (lancer_chip(adapter)) |
2554 | lancer_update_tx_err(txo, compl_status); | 2642 | lancer_update_tx_err(txo, txcp->status); |
2555 | else | 2643 | else |
2556 | be_update_tx_err(txo, compl_status); | 2644 | be_update_tx_err(txo, txcp->status); |
2557 | } | 2645 | } |
2558 | } | 2646 | } |
2559 | 2647 | ||
@@ -2564,7 +2652,7 @@ static void be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo, | |||
2564 | /* As Tx wrbs have been freed up, wake up netdev queue | 2652 | /* As Tx wrbs have been freed up, wake up netdev queue |
2565 | * if it was stopped due to lack of tx wrbs. */ | 2653 | * if it was stopped due to lack of tx wrbs. */ |
2566 | if (__netif_subqueue_stopped(adapter->netdev, idx) && | 2654 | if (__netif_subqueue_stopped(adapter->netdev, idx) && |
2567 | atomic_read(&txo->q.used) < txo->q.len / 2) { | 2655 | be_can_txq_wake(txo)) { |
2568 | netif_wake_subqueue(adapter->netdev, idx); | 2656 | netif_wake_subqueue(adapter->netdev, idx); |
2569 | } | 2657 | } |
2570 | 2658 | ||
@@ -2756,12 +2844,12 @@ void be_detect_error(struct be_adapter *adapter) | |||
2756 | sliport_err2 = ioread32(adapter->db + | 2844 | sliport_err2 = ioread32(adapter->db + |
2757 | SLIPORT_ERROR2_OFFSET); | 2845 | SLIPORT_ERROR2_OFFSET); |
2758 | adapter->hw_error = true; | 2846 | adapter->hw_error = true; |
2847 | error_detected = true; | ||
2759 | /* Do not log error messages if its a FW reset */ | 2848 | /* Do not log error messages if its a FW reset */ |
2760 | if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 && | 2849 | if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 && |
2761 | sliport_err2 == SLIPORT_ERROR_FW_RESET2) { | 2850 | sliport_err2 == SLIPORT_ERROR_FW_RESET2) { |
2762 | dev_info(dev, "Firmware update in progress\n"); | 2851 | dev_info(dev, "Firmware update in progress\n"); |
2763 | } else { | 2852 | } else { |
2764 | error_detected = true; | ||
2765 | dev_err(dev, "Error detected in the card\n"); | 2853 | dev_err(dev, "Error detected in the card\n"); |
2766 | dev_err(dev, "ERR: sliport status 0x%x\n", | 2854 | dev_err(dev, "ERR: sliport status 0x%x\n", |
2767 | sliport_status); | 2855 | sliport_status); |
@@ -3130,7 +3218,7 @@ static int be_setup_wol(struct be_adapter *adapter, bool enable) | |||
3130 | int status = 0; | 3218 | int status = 0; |
3131 | u8 mac[ETH_ALEN]; | 3219 | u8 mac[ETH_ALEN]; |
3132 | 3220 | ||
3133 | memset(mac, 0, ETH_ALEN); | 3221 | eth_zero_addr(mac); |
3134 | 3222 | ||
3135 | cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config); | 3223 | cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config); |
3136 | cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma, | 3224 | cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma, |
@@ -3275,6 +3363,14 @@ static void be_cancel_worker(struct be_adapter *adapter) | |||
3275 | } | 3363 | } |
3276 | } | 3364 | } |
3277 | 3365 | ||
3366 | static void be_cancel_err_detection(struct be_adapter *adapter) | ||
3367 | { | ||
3368 | if (adapter->flags & BE_FLAGS_ERR_DETECTION_SCHEDULED) { | ||
3369 | cancel_delayed_work_sync(&adapter->be_err_detection_work); | ||
3370 | adapter->flags &= ~BE_FLAGS_ERR_DETECTION_SCHEDULED; | ||
3371 | } | ||
3372 | } | ||
3373 | |||
3278 | static void be_mac_clear(struct be_adapter *adapter) | 3374 | static void be_mac_clear(struct be_adapter *adapter) |
3279 | { | 3375 | { |
3280 | if (adapter->pmac_id) { | 3376 | if (adapter->pmac_id) { |
@@ -3683,13 +3779,25 @@ static void be_sriov_config(struct be_adapter *adapter) | |||
3683 | 3779 | ||
3684 | static int be_get_config(struct be_adapter *adapter) | 3780 | static int be_get_config(struct be_adapter *adapter) |
3685 | { | 3781 | { |
3782 | int status, level; | ||
3686 | u16 profile_id; | 3783 | u16 profile_id; |
3687 | int status; | 3784 | |
3785 | status = be_cmd_get_cntl_attributes(adapter); | ||
3786 | if (status) | ||
3787 | return status; | ||
3688 | 3788 | ||
3689 | status = be_cmd_query_fw_cfg(adapter); | 3789 | status = be_cmd_query_fw_cfg(adapter); |
3690 | if (status) | 3790 | if (status) |
3691 | return status; | 3791 | return status; |
3692 | 3792 | ||
3793 | if (BEx_chip(adapter)) { | ||
3794 | level = be_cmd_get_fw_log_level(adapter); | ||
3795 | adapter->msg_enable = | ||
3796 | level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0; | ||
3797 | } | ||
3798 | |||
3799 | be_cmd_get_acpi_wol_cap(adapter); | ||
3800 | |||
3693 | be_cmd_query_port_name(adapter); | 3801 | be_cmd_query_port_name(adapter); |
3694 | 3802 | ||
3695 | if (be_physfn(adapter)) { | 3803 | if (be_physfn(adapter)) { |
@@ -3747,6 +3855,13 @@ static void be_schedule_worker(struct be_adapter *adapter) | |||
3747 | adapter->flags |= BE_FLAGS_WORKER_SCHEDULED; | 3855 | adapter->flags |= BE_FLAGS_WORKER_SCHEDULED; |
3748 | } | 3856 | } |
3749 | 3857 | ||
3858 | static void be_schedule_err_detection(struct be_adapter *adapter) | ||
3859 | { | ||
3860 | schedule_delayed_work(&adapter->be_err_detection_work, | ||
3861 | msecs_to_jiffies(1000)); | ||
3862 | adapter->flags |= BE_FLAGS_ERR_DETECTION_SCHEDULED; | ||
3863 | } | ||
3864 | |||
3750 | static int be_setup_queues(struct be_adapter *adapter) | 3865 | static int be_setup_queues(struct be_adapter *adapter) |
3751 | { | 3866 | { |
3752 | struct net_device *netdev = adapter->netdev; | 3867 | struct net_device *netdev = adapter->netdev; |
@@ -3829,11 +3944,53 @@ static inline int fw_major_num(const char *fw_ver) | |||
3829 | return fw_major; | 3944 | return fw_major; |
3830 | } | 3945 | } |
3831 | 3946 | ||
3947 | /* If any VFs are already enabled don't FLR the PF */ | ||
3948 | static bool be_reset_required(struct be_adapter *adapter) | ||
3949 | { | ||
3950 | return pci_num_vf(adapter->pdev) ? false : true; | ||
3951 | } | ||
3952 | |||
3953 | /* Wait for the FW to be ready and perform the required initialization */ | ||
3954 | static int be_func_init(struct be_adapter *adapter) | ||
3955 | { | ||
3956 | int status; | ||
3957 | |||
3958 | status = be_fw_wait_ready(adapter); | ||
3959 | if (status) | ||
3960 | return status; | ||
3961 | |||
3962 | if (be_reset_required(adapter)) { | ||
3963 | status = be_cmd_reset_function(adapter); | ||
3964 | if (status) | ||
3965 | return status; | ||
3966 | |||
3967 | /* Wait for interrupts to quiesce after an FLR */ | ||
3968 | msleep(100); | ||
3969 | |||
3970 | /* We can clear all errors when function reset succeeds */ | ||
3971 | be_clear_all_error(adapter); | ||
3972 | } | ||
3973 | |||
3974 | /* Tell FW we're ready to fire cmds */ | ||
3975 | status = be_cmd_fw_init(adapter); | ||
3976 | if (status) | ||
3977 | return status; | ||
3978 | |||
3979 | /* Allow interrupts for other ULPs running on NIC function */ | ||
3980 | be_intr_set(adapter, true); | ||
3981 | |||
3982 | return 0; | ||
3983 | } | ||
3984 | |||
3832 | static int be_setup(struct be_adapter *adapter) | 3985 | static int be_setup(struct be_adapter *adapter) |
3833 | { | 3986 | { |
3834 | struct device *dev = &adapter->pdev->dev; | 3987 | struct device *dev = &adapter->pdev->dev; |
3835 | int status; | 3988 | int status; |
3836 | 3989 | ||
3990 | status = be_func_init(adapter); | ||
3991 | if (status) | ||
3992 | return status; | ||
3993 | |||
3837 | be_setup_init(adapter); | 3994 | be_setup_init(adapter); |
3838 | 3995 | ||
3839 | if (!lancer_chip(adapter)) | 3996 | if (!lancer_chip(adapter)) |
@@ -3879,8 +4036,6 @@ static int be_setup(struct be_adapter *adapter) | |||
3879 | 4036 | ||
3880 | be_set_rx_mode(adapter->netdev); | 4037 | be_set_rx_mode(adapter->netdev); |
3881 | 4038 | ||
3882 | be_cmd_get_acpi_wol_cap(adapter); | ||
3883 | |||
3884 | status = be_cmd_set_flow_control(adapter, adapter->tx_fc, | 4039 | status = be_cmd_set_flow_control(adapter, adapter->tx_fc, |
3885 | adapter->rx_fc); | 4040 | adapter->rx_fc); |
3886 | if (status) | 4041 | if (status) |
@@ -4790,6 +4945,142 @@ static void be_netdev_init(struct net_device *netdev) | |||
4790 | netdev->ethtool_ops = &be_ethtool_ops; | 4945 | netdev->ethtool_ops = &be_ethtool_ops; |
4791 | } | 4946 | } |
4792 | 4947 | ||
4948 | static void be_cleanup(struct be_adapter *adapter) | ||
4949 | { | ||
4950 | struct net_device *netdev = adapter->netdev; | ||
4951 | |||
4952 | rtnl_lock(); | ||
4953 | netif_device_detach(netdev); | ||
4954 | if (netif_running(netdev)) | ||
4955 | be_close(netdev); | ||
4956 | rtnl_unlock(); | ||
4957 | |||
4958 | be_clear(adapter); | ||
4959 | } | ||
4960 | |||
4961 | static int be_resume(struct be_adapter *adapter) | ||
4962 | { | ||
4963 | struct net_device *netdev = adapter->netdev; | ||
4964 | int status; | ||
4965 | |||
4966 | status = be_setup(adapter); | ||
4967 | if (status) | ||
4968 | return status; | ||
4969 | |||
4970 | if (netif_running(netdev)) { | ||
4971 | status = be_open(netdev); | ||
4972 | if (status) | ||
4973 | return status; | ||
4974 | } | ||
4975 | |||
4976 | netif_device_attach(netdev); | ||
4977 | |||
4978 | return 0; | ||
4979 | } | ||
4980 | |||
4981 | static int be_err_recover(struct be_adapter *adapter) | ||
4982 | { | ||
4983 | struct device *dev = &adapter->pdev->dev; | ||
4984 | int status; | ||
4985 | |||
4986 | status = be_resume(adapter); | ||
4987 | if (status) | ||
4988 | goto err; | ||
4989 | |||
4990 | dev_info(dev, "Adapter recovery successful\n"); | ||
4991 | return 0; | ||
4992 | err: | ||
4993 | if (be_physfn(adapter)) | ||
4994 | dev_err(dev, "Adapter recovery failed\n"); | ||
4995 | else | ||
4996 | dev_err(dev, "Re-trying adapter recovery\n"); | ||
4997 | |||
4998 | return status; | ||
4999 | } | ||
5000 | |||
5001 | static void be_err_detection_task(struct work_struct *work) | ||
5002 | { | ||
5003 | struct be_adapter *adapter = | ||
5004 | container_of(work, struct be_adapter, | ||
5005 | be_err_detection_work.work); | ||
5006 | int status = 0; | ||
5007 | |||
5008 | be_detect_error(adapter); | ||
5009 | |||
5010 | if (adapter->hw_error) { | ||
5011 | be_cleanup(adapter); | ||
5012 | |||
5013 | /* As of now error recovery support is in Lancer only */ | ||
5014 | if (lancer_chip(adapter)) | ||
5015 | status = be_err_recover(adapter); | ||
5016 | } | ||
5017 | |||
5018 | /* Always attempt recovery on VFs */ | ||
5019 | if (!status || be_virtfn(adapter)) | ||
5020 | be_schedule_err_detection(adapter); | ||
5021 | } | ||
5022 | |||
5023 | static void be_log_sfp_info(struct be_adapter *adapter) | ||
5024 | { | ||
5025 | int status; | ||
5026 | |||
5027 | status = be_cmd_query_sfp_info(adapter); | ||
5028 | if (!status) { | ||
5029 | dev_err(&adapter->pdev->dev, | ||
5030 | "Unqualified SFP+ detected on %c from %s part no: %s", | ||
5031 | adapter->port_name, adapter->phy.vendor_name, | ||
5032 | adapter->phy.vendor_pn); | ||
5033 | } | ||
5034 | adapter->flags &= ~BE_FLAGS_EVT_INCOMPATIBLE_SFP; | ||
5035 | } | ||
5036 | |||
5037 | static void be_worker(struct work_struct *work) | ||
5038 | { | ||
5039 | struct be_adapter *adapter = | ||
5040 | container_of(work, struct be_adapter, work.work); | ||
5041 | struct be_rx_obj *rxo; | ||
5042 | int i; | ||
5043 | |||
5044 | /* when interrupts are not yet enabled, just reap any pending | ||
5045 | * mcc completions | ||
5046 | */ | ||
5047 | if (!netif_running(adapter->netdev)) { | ||
5048 | local_bh_disable(); | ||
5049 | be_process_mcc(adapter); | ||
5050 | local_bh_enable(); | ||
5051 | goto reschedule; | ||
5052 | } | ||
5053 | |||
5054 | if (!adapter->stats_cmd_sent) { | ||
5055 | if (lancer_chip(adapter)) | ||
5056 | lancer_cmd_get_pport_stats(adapter, | ||
5057 | &adapter->stats_cmd); | ||
5058 | else | ||
5059 | be_cmd_get_stats(adapter, &adapter->stats_cmd); | ||
5060 | } | ||
5061 | |||
5062 | if (be_physfn(adapter) && | ||
5063 | MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0) | ||
5064 | be_cmd_get_die_temperature(adapter); | ||
5065 | |||
5066 | for_all_rx_queues(adapter, rxo, i) { | ||
5067 | /* Replenish RX-queues starved due to memory | ||
5068 | * allocation failures. | ||
5069 | */ | ||
5070 | if (rxo->rx_post_starved) | ||
5071 | be_post_rx_frags(rxo, GFP_KERNEL, MAX_RX_POST); | ||
5072 | } | ||
5073 | |||
5074 | be_eqd_update(adapter); | ||
5075 | |||
5076 | if (adapter->flags & BE_FLAGS_EVT_INCOMPATIBLE_SFP) | ||
5077 | be_log_sfp_info(adapter); | ||
5078 | |||
5079 | reschedule: | ||
5080 | adapter->work_counter++; | ||
5081 | schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000)); | ||
5082 | } | ||
5083 | |||
4793 | static void be_unmap_pci_bars(struct be_adapter *adapter) | 5084 | static void be_unmap_pci_bars(struct be_adapter *adapter) |
4794 | { | 5085 | { |
4795 | if (adapter->csr) | 5086 | if (adapter->csr) |
@@ -4821,6 +5112,12 @@ static int be_roce_map_pci_bars(struct be_adapter *adapter) | |||
4821 | static int be_map_pci_bars(struct be_adapter *adapter) | 5112 | static int be_map_pci_bars(struct be_adapter *adapter) |
4822 | { | 5113 | { |
4823 | u8 __iomem *addr; | 5114 | u8 __iomem *addr; |
5115 | u32 sli_intf; | ||
5116 | |||
5117 | pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf); | ||
5118 | adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >> | ||
5119 | SLI_INTF_FAMILY_SHIFT; | ||
5120 | adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0; | ||
4824 | 5121 | ||
4825 | if (BEx_chip(adapter) && be_physfn(adapter)) { | 5122 | if (BEx_chip(adapter) && be_physfn(adapter)) { |
4826 | adapter->csr = pci_iomap(adapter->pdev, 2, 0); | 5123 | adapter->csr = pci_iomap(adapter->pdev, 2, 0); |
@@ -4842,109 +5139,94 @@ pci_map_err: | |||
4842 | return -ENOMEM; | 5139 | return -ENOMEM; |
4843 | } | 5140 | } |
4844 | 5141 | ||
4845 | static void be_ctrl_cleanup(struct be_adapter *adapter) | 5142 | static void be_drv_cleanup(struct be_adapter *adapter) |
4846 | { | 5143 | { |
4847 | struct be_dma_mem *mem = &adapter->mbox_mem_alloced; | 5144 | struct be_dma_mem *mem = &adapter->mbox_mem_alloced; |
4848 | 5145 | struct device *dev = &adapter->pdev->dev; | |
4849 | be_unmap_pci_bars(adapter); | ||
4850 | 5146 | ||
4851 | if (mem->va) | 5147 | if (mem->va) |
4852 | dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va, | 5148 | dma_free_coherent(dev, mem->size, mem->va, mem->dma); |
4853 | mem->dma); | ||
4854 | 5149 | ||
4855 | mem = &adapter->rx_filter; | 5150 | mem = &adapter->rx_filter; |
4856 | if (mem->va) | 5151 | if (mem->va) |
4857 | dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va, | 5152 | dma_free_coherent(dev, mem->size, mem->va, mem->dma); |
4858 | mem->dma); | 5153 | |
5154 | mem = &adapter->stats_cmd; | ||
5155 | if (mem->va) | ||
5156 | dma_free_coherent(dev, mem->size, mem->va, mem->dma); | ||
4859 | } | 5157 | } |
4860 | 5158 | ||
4861 | static int be_ctrl_init(struct be_adapter *adapter) | 5159 | /* Allocate and initialize various fields in be_adapter struct */ |
5160 | static int be_drv_init(struct be_adapter *adapter) | ||
4862 | { | 5161 | { |
4863 | struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced; | 5162 | struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced; |
4864 | struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem; | 5163 | struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem; |
4865 | struct be_dma_mem *rx_filter = &adapter->rx_filter; | 5164 | struct be_dma_mem *rx_filter = &adapter->rx_filter; |
4866 | u32 sli_intf; | 5165 | struct be_dma_mem *stats_cmd = &adapter->stats_cmd; |
4867 | int status; | 5166 | struct device *dev = &adapter->pdev->dev; |
4868 | 5167 | int status = 0; | |
4869 | pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf); | ||
4870 | adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >> | ||
4871 | SLI_INTF_FAMILY_SHIFT; | ||
4872 | adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0; | ||
4873 | |||
4874 | status = be_map_pci_bars(adapter); | ||
4875 | if (status) | ||
4876 | goto done; | ||
4877 | 5168 | ||
4878 | mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16; | 5169 | mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16; |
4879 | mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev, | 5170 | mbox_mem_alloc->va = dma_alloc_coherent(dev, mbox_mem_alloc->size, |
4880 | mbox_mem_alloc->size, | ||
4881 | &mbox_mem_alloc->dma, | 5171 | &mbox_mem_alloc->dma, |
4882 | GFP_KERNEL); | 5172 | GFP_KERNEL); |
4883 | if (!mbox_mem_alloc->va) { | 5173 | if (!mbox_mem_alloc->va) |
4884 | status = -ENOMEM; | 5174 | return -ENOMEM; |
4885 | goto unmap_pci_bars; | 5175 | |
4886 | } | ||
4887 | mbox_mem_align->size = sizeof(struct be_mcc_mailbox); | 5176 | mbox_mem_align->size = sizeof(struct be_mcc_mailbox); |
4888 | mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16); | 5177 | mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16); |
4889 | mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16); | 5178 | mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16); |
4890 | memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox)); | 5179 | memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox)); |
4891 | 5180 | ||
4892 | rx_filter->size = sizeof(struct be_cmd_req_rx_filter); | 5181 | rx_filter->size = sizeof(struct be_cmd_req_rx_filter); |
4893 | rx_filter->va = dma_zalloc_coherent(&adapter->pdev->dev, | 5182 | rx_filter->va = dma_zalloc_coherent(dev, rx_filter->size, |
4894 | rx_filter->size, &rx_filter->dma, | 5183 | &rx_filter->dma, GFP_KERNEL); |
4895 | GFP_KERNEL); | ||
4896 | if (!rx_filter->va) { | 5184 | if (!rx_filter->va) { |
4897 | status = -ENOMEM; | 5185 | status = -ENOMEM; |
4898 | goto free_mbox; | 5186 | goto free_mbox; |
4899 | } | 5187 | } |
4900 | 5188 | ||
5189 | if (lancer_chip(adapter)) | ||
5190 | stats_cmd->size = sizeof(struct lancer_cmd_req_pport_stats); | ||
5191 | else if (BE2_chip(adapter)) | ||
5192 | stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v0); | ||
5193 | else if (BE3_chip(adapter)) | ||
5194 | stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v1); | ||
5195 | else | ||
5196 | stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v2); | ||
5197 | stats_cmd->va = dma_zalloc_coherent(dev, stats_cmd->size, | ||
5198 | &stats_cmd->dma, GFP_KERNEL); | ||
5199 | if (!stats_cmd->va) { | ||
5200 | status = -ENOMEM; | ||
5201 | goto free_rx_filter; | ||
5202 | } | ||
5203 | |||
4901 | mutex_init(&adapter->mbox_lock); | 5204 | mutex_init(&adapter->mbox_lock); |
4902 | spin_lock_init(&adapter->mcc_lock); | 5205 | spin_lock_init(&adapter->mcc_lock); |
4903 | spin_lock_init(&adapter->mcc_cq_lock); | 5206 | spin_lock_init(&adapter->mcc_cq_lock); |
4904 | |||
4905 | init_completion(&adapter->et_cmd_compl); | 5207 | init_completion(&adapter->et_cmd_compl); |
4906 | pci_save_state(adapter->pdev); | ||
4907 | return 0; | ||
4908 | |||
4909 | free_mbox: | ||
4910 | dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size, | ||
4911 | mbox_mem_alloc->va, mbox_mem_alloc->dma); | ||
4912 | 5208 | ||
4913 | unmap_pci_bars: | 5209 | pci_save_state(adapter->pdev); |
4914 | be_unmap_pci_bars(adapter); | ||
4915 | |||
4916 | done: | ||
4917 | return status; | ||
4918 | } | ||
4919 | |||
4920 | static void be_stats_cleanup(struct be_adapter *adapter) | ||
4921 | { | ||
4922 | struct be_dma_mem *cmd = &adapter->stats_cmd; | ||
4923 | 5210 | ||
4924 | if (cmd->va) | 5211 | INIT_DELAYED_WORK(&adapter->work, be_worker); |
4925 | dma_free_coherent(&adapter->pdev->dev, cmd->size, | 5212 | INIT_DELAYED_WORK(&adapter->be_err_detection_work, |
4926 | cmd->va, cmd->dma); | 5213 | be_err_detection_task); |
4927 | } | ||
4928 | 5214 | ||
4929 | static int be_stats_init(struct be_adapter *adapter) | 5215 | adapter->rx_fc = true; |
4930 | { | 5216 | adapter->tx_fc = true; |
4931 | struct be_dma_mem *cmd = &adapter->stats_cmd; | ||
4932 | 5217 | ||
4933 | if (lancer_chip(adapter)) | 5218 | /* Must be a power of 2 or else MODULO will BUG_ON */ |
4934 | cmd->size = sizeof(struct lancer_cmd_req_pport_stats); | 5219 | adapter->be_get_temp_freq = 64; |
4935 | else if (BE2_chip(adapter)) | 5220 | adapter->cfg_num_qs = netif_get_num_default_rss_queues(); |
4936 | cmd->size = sizeof(struct be_cmd_req_get_stats_v0); | ||
4937 | else if (BE3_chip(adapter)) | ||
4938 | cmd->size = sizeof(struct be_cmd_req_get_stats_v1); | ||
4939 | else | ||
4940 | /* ALL non-BE ASICs */ | ||
4941 | cmd->size = sizeof(struct be_cmd_req_get_stats_v2); | ||
4942 | 5221 | ||
4943 | cmd->va = dma_zalloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma, | ||
4944 | GFP_KERNEL); | ||
4945 | if (!cmd->va) | ||
4946 | return -ENOMEM; | ||
4947 | return 0; | 5222 | return 0; |
5223 | |||
5224 | free_rx_filter: | ||
5225 | dma_free_coherent(dev, rx_filter->size, rx_filter->va, rx_filter->dma); | ||
5226 | free_mbox: | ||
5227 | dma_free_coherent(dev, mbox_mem_alloc->size, mbox_mem_alloc->va, | ||
5228 | mbox_mem_alloc->dma); | ||
5229 | return status; | ||
4948 | } | 5230 | } |
4949 | 5231 | ||
4950 | static void be_remove(struct pci_dev *pdev) | 5232 | static void be_remove(struct pci_dev *pdev) |
@@ -4957,7 +5239,7 @@ static void be_remove(struct pci_dev *pdev) | |||
4957 | be_roce_dev_remove(adapter); | 5239 | be_roce_dev_remove(adapter); |
4958 | be_intr_set(adapter, false); | 5240 | be_intr_set(adapter, false); |
4959 | 5241 | ||
4960 | cancel_delayed_work_sync(&adapter->func_recovery_work); | 5242 | be_cancel_err_detection(adapter); |
4961 | 5243 | ||
4962 | unregister_netdev(adapter->netdev); | 5244 | unregister_netdev(adapter->netdev); |
4963 | 5245 | ||
@@ -4966,9 +5248,8 @@ static void be_remove(struct pci_dev *pdev) | |||
4966 | /* tell fw we're done with firing cmds */ | 5248 | /* tell fw we're done with firing cmds */ |
4967 | be_cmd_fw_clean(adapter); | 5249 | be_cmd_fw_clean(adapter); |
4968 | 5250 | ||
4969 | be_stats_cleanup(adapter); | 5251 | be_unmap_pci_bars(adapter); |
4970 | 5252 | be_drv_cleanup(adapter); | |
4971 | be_ctrl_cleanup(adapter); | ||
4972 | 5253 | ||
4973 | pci_disable_pcie_error_reporting(pdev); | 5254 | pci_disable_pcie_error_reporting(pdev); |
4974 | 5255 | ||
@@ -4978,156 +5259,6 @@ static void be_remove(struct pci_dev *pdev) | |||
4978 | free_netdev(adapter->netdev); | 5259 | free_netdev(adapter->netdev); |
4979 | } | 5260 | } |
4980 | 5261 | ||
4981 | static int be_get_initial_config(struct be_adapter *adapter) | ||
4982 | { | ||
4983 | int status, level; | ||
4984 | |||
4985 | status = be_cmd_get_cntl_attributes(adapter); | ||
4986 | if (status) | ||
4987 | return status; | ||
4988 | |||
4989 | /* Must be a power of 2 or else MODULO will BUG_ON */ | ||
4990 | adapter->be_get_temp_freq = 64; | ||
4991 | |||
4992 | if (BEx_chip(adapter)) { | ||
4993 | level = be_cmd_get_fw_log_level(adapter); | ||
4994 | adapter->msg_enable = | ||
4995 | level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0; | ||
4996 | } | ||
4997 | |||
4998 | adapter->cfg_num_qs = netif_get_num_default_rss_queues(); | ||
4999 | return 0; | ||
5000 | } | ||
5001 | |||
5002 | static int lancer_recover_func(struct be_adapter *adapter) | ||
5003 | { | ||
5004 | struct device *dev = &adapter->pdev->dev; | ||
5005 | int status; | ||
5006 | |||
5007 | status = lancer_test_and_set_rdy_state(adapter); | ||
5008 | if (status) | ||
5009 | goto err; | ||
5010 | |||
5011 | if (netif_running(adapter->netdev)) | ||
5012 | be_close(adapter->netdev); | ||
5013 | |||
5014 | be_clear(adapter); | ||
5015 | |||
5016 | be_clear_all_error(adapter); | ||
5017 | |||
5018 | status = be_setup(adapter); | ||
5019 | if (status) | ||
5020 | goto err; | ||
5021 | |||
5022 | if (netif_running(adapter->netdev)) { | ||
5023 | status = be_open(adapter->netdev); | ||
5024 | if (status) | ||
5025 | goto err; | ||
5026 | } | ||
5027 | |||
5028 | dev_err(dev, "Adapter recovery successful\n"); | ||
5029 | return 0; | ||
5030 | err: | ||
5031 | if (status == -EAGAIN) | ||
5032 | dev_err(dev, "Waiting for resource provisioning\n"); | ||
5033 | else | ||
5034 | dev_err(dev, "Adapter recovery failed\n"); | ||
5035 | |||
5036 | return status; | ||
5037 | } | ||
5038 | |||
5039 | static void be_func_recovery_task(struct work_struct *work) | ||
5040 | { | ||
5041 | struct be_adapter *adapter = | ||
5042 | container_of(work, struct be_adapter, func_recovery_work.work); | ||
5043 | int status = 0; | ||
5044 | |||
5045 | be_detect_error(adapter); | ||
5046 | |||
5047 | if (adapter->hw_error && lancer_chip(adapter)) { | ||
5048 | rtnl_lock(); | ||
5049 | netif_device_detach(adapter->netdev); | ||
5050 | rtnl_unlock(); | ||
5051 | |||
5052 | status = lancer_recover_func(adapter); | ||
5053 | if (!status) | ||
5054 | netif_device_attach(adapter->netdev); | ||
5055 | } | ||
5056 | |||
5057 | /* In Lancer, for all errors other than provisioning error (-EAGAIN), | ||
5058 | * no need to attempt further recovery. | ||
5059 | */ | ||
5060 | if (!status || status == -EAGAIN) | ||
5061 | schedule_delayed_work(&adapter->func_recovery_work, | ||
5062 | msecs_to_jiffies(1000)); | ||
5063 | } | ||
5064 | |||
5065 | static void be_log_sfp_info(struct be_adapter *adapter) | ||
5066 | { | ||
5067 | int status; | ||
5068 | |||
5069 | status = be_cmd_query_sfp_info(adapter); | ||
5070 | if (!status) { | ||
5071 | dev_err(&adapter->pdev->dev, | ||
5072 | "Unqualified SFP+ detected on %c from %s part no: %s", | ||
5073 | adapter->port_name, adapter->phy.vendor_name, | ||
5074 | adapter->phy.vendor_pn); | ||
5075 | } | ||
5076 | adapter->flags &= ~BE_FLAGS_EVT_INCOMPATIBLE_SFP; | ||
5077 | } | ||
5078 | |||
5079 | static void be_worker(struct work_struct *work) | ||
5080 | { | ||
5081 | struct be_adapter *adapter = | ||
5082 | container_of(work, struct be_adapter, work.work); | ||
5083 | struct be_rx_obj *rxo; | ||
5084 | int i; | ||
5085 | |||
5086 | /* when interrupts are not yet enabled, just reap any pending | ||
5087 | * mcc completions */ | ||
5088 | if (!netif_running(adapter->netdev)) { | ||
5089 | local_bh_disable(); | ||
5090 | be_process_mcc(adapter); | ||
5091 | local_bh_enable(); | ||
5092 | goto reschedule; | ||
5093 | } | ||
5094 | |||
5095 | if (!adapter->stats_cmd_sent) { | ||
5096 | if (lancer_chip(adapter)) | ||
5097 | lancer_cmd_get_pport_stats(adapter, | ||
5098 | &adapter->stats_cmd); | ||
5099 | else | ||
5100 | be_cmd_get_stats(adapter, &adapter->stats_cmd); | ||
5101 | } | ||
5102 | |||
5103 | if (be_physfn(adapter) && | ||
5104 | MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0) | ||
5105 | be_cmd_get_die_temperature(adapter); | ||
5106 | |||
5107 | for_all_rx_queues(adapter, rxo, i) { | ||
5108 | /* Replenish RX-queues starved due to memory | ||
5109 | * allocation failures. | ||
5110 | */ | ||
5111 | if (rxo->rx_post_starved) | ||
5112 | be_post_rx_frags(rxo, GFP_KERNEL, MAX_RX_POST); | ||
5113 | } | ||
5114 | |||
5115 | be_eqd_update(adapter); | ||
5116 | |||
5117 | if (adapter->flags & BE_FLAGS_EVT_INCOMPATIBLE_SFP) | ||
5118 | be_log_sfp_info(adapter); | ||
5119 | |||
5120 | reschedule: | ||
5121 | adapter->work_counter++; | ||
5122 | schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000)); | ||
5123 | } | ||
5124 | |||
5125 | /* If any VFs are already enabled don't FLR the PF */ | ||
5126 | static bool be_reset_required(struct be_adapter *adapter) | ||
5127 | { | ||
5128 | return pci_num_vf(adapter->pdev) ? false : true; | ||
5129 | } | ||
5130 | |||
5131 | static char *mc_name(struct be_adapter *adapter) | 5262 | static char *mc_name(struct be_adapter *adapter) |
5132 | { | 5263 | { |
5133 | char *str = ""; /* default */ | 5264 | char *str = ""; /* default */ |
@@ -5226,50 +5357,17 @@ static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id) | |||
5226 | if (!status) | 5357 | if (!status) |
5227 | dev_info(&pdev->dev, "PCIe error reporting enabled\n"); | 5358 | dev_info(&pdev->dev, "PCIe error reporting enabled\n"); |
5228 | 5359 | ||
5229 | status = be_ctrl_init(adapter); | 5360 | status = be_map_pci_bars(adapter); |
5230 | if (status) | 5361 | if (status) |
5231 | goto free_netdev; | 5362 | goto free_netdev; |
5232 | 5363 | ||
5233 | /* sync up with fw's ready state */ | 5364 | status = be_drv_init(adapter); |
5234 | if (be_physfn(adapter)) { | ||
5235 | status = be_fw_wait_ready(adapter); | ||
5236 | if (status) | ||
5237 | goto ctrl_clean; | ||
5238 | } | ||
5239 | |||
5240 | if (be_reset_required(adapter)) { | ||
5241 | status = be_cmd_reset_function(adapter); | ||
5242 | if (status) | ||
5243 | goto ctrl_clean; | ||
5244 | |||
5245 | /* Wait for interrupts to quiesce after an FLR */ | ||
5246 | msleep(100); | ||
5247 | } | ||
5248 | |||
5249 | /* Allow interrupts for other ULPs running on NIC function */ | ||
5250 | be_intr_set(adapter, true); | ||
5251 | |||
5252 | /* tell fw we're ready to fire cmds */ | ||
5253 | status = be_cmd_fw_init(adapter); | ||
5254 | if (status) | ||
5255 | goto ctrl_clean; | ||
5256 | |||
5257 | status = be_stats_init(adapter); | ||
5258 | if (status) | ||
5259 | goto ctrl_clean; | ||
5260 | |||
5261 | status = be_get_initial_config(adapter); | ||
5262 | if (status) | 5365 | if (status) |
5263 | goto stats_clean; | 5366 | goto unmap_bars; |
5264 | |||
5265 | INIT_DELAYED_WORK(&adapter->work, be_worker); | ||
5266 | INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task); | ||
5267 | adapter->rx_fc = true; | ||
5268 | adapter->tx_fc = true; | ||
5269 | 5367 | ||
5270 | status = be_setup(adapter); | 5368 | status = be_setup(adapter); |
5271 | if (status) | 5369 | if (status) |
5272 | goto stats_clean; | 5370 | goto drv_cleanup; |
5273 | 5371 | ||
5274 | be_netdev_init(netdev); | 5372 | be_netdev_init(netdev); |
5275 | status = register_netdev(netdev); | 5373 | status = register_netdev(netdev); |
@@ -5278,8 +5376,7 @@ static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id) | |||
5278 | 5376 | ||
5279 | be_roce_dev_add(adapter); | 5377 | be_roce_dev_add(adapter); |
5280 | 5378 | ||
5281 | schedule_delayed_work(&adapter->func_recovery_work, | 5379 | be_schedule_err_detection(adapter); |
5282 | msecs_to_jiffies(1000)); | ||
5283 | 5380 | ||
5284 | dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev), | 5381 | dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev), |
5285 | func_name(adapter), mc_name(adapter), adapter->port_name); | 5382 | func_name(adapter), mc_name(adapter), adapter->port_name); |
@@ -5288,10 +5385,10 @@ static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id) | |||
5288 | 5385 | ||
5289 | unsetup: | 5386 | unsetup: |
5290 | be_clear(adapter); | 5387 | be_clear(adapter); |
5291 | stats_clean: | 5388 | drv_cleanup: |
5292 | be_stats_cleanup(adapter); | 5389 | be_drv_cleanup(adapter); |
5293 | ctrl_clean: | 5390 | unmap_bars: |
5294 | be_ctrl_cleanup(adapter); | 5391 | be_unmap_pci_bars(adapter); |
5295 | free_netdev: | 5392 | free_netdev: |
5296 | free_netdev(netdev); | 5393 | free_netdev(netdev); |
5297 | rel_reg: | 5394 | rel_reg: |
@@ -5306,21 +5403,14 @@ do_none: | |||
5306 | static int be_suspend(struct pci_dev *pdev, pm_message_t state) | 5403 | static int be_suspend(struct pci_dev *pdev, pm_message_t state) |
5307 | { | 5404 | { |
5308 | struct be_adapter *adapter = pci_get_drvdata(pdev); | 5405 | struct be_adapter *adapter = pci_get_drvdata(pdev); |
5309 | struct net_device *netdev = adapter->netdev; | ||
5310 | 5406 | ||
5311 | if (adapter->wol_en) | 5407 | if (adapter->wol_en) |
5312 | be_setup_wol(adapter, true); | 5408 | be_setup_wol(adapter, true); |
5313 | 5409 | ||
5314 | be_intr_set(adapter, false); | 5410 | be_intr_set(adapter, false); |
5315 | cancel_delayed_work_sync(&adapter->func_recovery_work); | 5411 | be_cancel_err_detection(adapter); |
5316 | 5412 | ||
5317 | netif_device_detach(netdev); | 5413 | be_cleanup(adapter); |
5318 | if (netif_running(netdev)) { | ||
5319 | rtnl_lock(); | ||
5320 | be_close(netdev); | ||
5321 | rtnl_unlock(); | ||
5322 | } | ||
5323 | be_clear(adapter); | ||
5324 | 5414 | ||
5325 | pci_save_state(pdev); | 5415 | pci_save_state(pdev); |
5326 | pci_disable_device(pdev); | 5416 | pci_disable_device(pdev); |
@@ -5328,13 +5418,10 @@ static int be_suspend(struct pci_dev *pdev, pm_message_t state) | |||
5328 | return 0; | 5418 | return 0; |
5329 | } | 5419 | } |
5330 | 5420 | ||
5331 | static int be_resume(struct pci_dev *pdev) | 5421 | static int be_pci_resume(struct pci_dev *pdev) |
5332 | { | 5422 | { |
5333 | int status = 0; | ||
5334 | struct be_adapter *adapter = pci_get_drvdata(pdev); | 5423 | struct be_adapter *adapter = pci_get_drvdata(pdev); |
5335 | struct net_device *netdev = adapter->netdev; | 5424 | int status = 0; |
5336 | |||
5337 | netif_device_detach(netdev); | ||
5338 | 5425 | ||
5339 | status = pci_enable_device(pdev); | 5426 | status = pci_enable_device(pdev); |
5340 | if (status) | 5427 | if (status) |
@@ -5343,30 +5430,11 @@ static int be_resume(struct pci_dev *pdev) | |||
5343 | pci_set_power_state(pdev, PCI_D0); | 5430 | pci_set_power_state(pdev, PCI_D0); |
5344 | pci_restore_state(pdev); | 5431 | pci_restore_state(pdev); |
5345 | 5432 | ||
5346 | status = be_fw_wait_ready(adapter); | 5433 | status = be_resume(adapter); |
5347 | if (status) | ||
5348 | return status; | ||
5349 | |||
5350 | status = be_cmd_reset_function(adapter); | ||
5351 | if (status) | 5434 | if (status) |
5352 | return status; | 5435 | return status; |
5353 | 5436 | ||
5354 | be_intr_set(adapter, true); | 5437 | be_schedule_err_detection(adapter); |
5355 | /* tell fw we're ready to fire cmds */ | ||
5356 | status = be_cmd_fw_init(adapter); | ||
5357 | if (status) | ||
5358 | return status; | ||
5359 | |||
5360 | be_setup(adapter); | ||
5361 | if (netif_running(netdev)) { | ||
5362 | rtnl_lock(); | ||
5363 | be_open(netdev); | ||
5364 | rtnl_unlock(); | ||
5365 | } | ||
5366 | |||
5367 | schedule_delayed_work(&adapter->func_recovery_work, | ||
5368 | msecs_to_jiffies(1000)); | ||
5369 | netif_device_attach(netdev); | ||
5370 | 5438 | ||
5371 | if (adapter->wol_en) | 5439 | if (adapter->wol_en) |
5372 | be_setup_wol(adapter, false); | 5440 | be_setup_wol(adapter, false); |
@@ -5386,7 +5454,7 @@ static void be_shutdown(struct pci_dev *pdev) | |||
5386 | 5454 | ||
5387 | be_roce_dev_shutdown(adapter); | 5455 | be_roce_dev_shutdown(adapter); |
5388 | cancel_delayed_work_sync(&adapter->work); | 5456 | cancel_delayed_work_sync(&adapter->work); |
5389 | cancel_delayed_work_sync(&adapter->func_recovery_work); | 5457 | be_cancel_err_detection(adapter); |
5390 | 5458 | ||
5391 | netif_device_detach(adapter->netdev); | 5459 | netif_device_detach(adapter->netdev); |
5392 | 5460 | ||
@@ -5399,22 +5467,15 @@ static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev, | |||
5399 | pci_channel_state_t state) | 5467 | pci_channel_state_t state) |
5400 | { | 5468 | { |
5401 | struct be_adapter *adapter = pci_get_drvdata(pdev); | 5469 | struct be_adapter *adapter = pci_get_drvdata(pdev); |
5402 | struct net_device *netdev = adapter->netdev; | ||
5403 | 5470 | ||
5404 | dev_err(&adapter->pdev->dev, "EEH error detected\n"); | 5471 | dev_err(&adapter->pdev->dev, "EEH error detected\n"); |
5405 | 5472 | ||
5406 | if (!adapter->eeh_error) { | 5473 | if (!adapter->eeh_error) { |
5407 | adapter->eeh_error = true; | 5474 | adapter->eeh_error = true; |
5408 | 5475 | ||
5409 | cancel_delayed_work_sync(&adapter->func_recovery_work); | 5476 | be_cancel_err_detection(adapter); |
5410 | 5477 | ||
5411 | rtnl_lock(); | 5478 | be_cleanup(adapter); |
5412 | netif_device_detach(netdev); | ||
5413 | if (netif_running(netdev)) | ||
5414 | be_close(netdev); | ||
5415 | rtnl_unlock(); | ||
5416 | |||
5417 | be_clear(adapter); | ||
5418 | } | 5479 | } |
5419 | 5480 | ||
5420 | if (state == pci_channel_io_perm_failure) | 5481 | if (state == pci_channel_io_perm_failure) |
@@ -5465,40 +5526,16 @@ static void be_eeh_resume(struct pci_dev *pdev) | |||
5465 | { | 5526 | { |
5466 | int status = 0; | 5527 | int status = 0; |
5467 | struct be_adapter *adapter = pci_get_drvdata(pdev); | 5528 | struct be_adapter *adapter = pci_get_drvdata(pdev); |
5468 | struct net_device *netdev = adapter->netdev; | ||
5469 | 5529 | ||
5470 | dev_info(&adapter->pdev->dev, "EEH resume\n"); | 5530 | dev_info(&adapter->pdev->dev, "EEH resume\n"); |
5471 | 5531 | ||
5472 | pci_save_state(pdev); | 5532 | pci_save_state(pdev); |
5473 | 5533 | ||
5474 | status = be_cmd_reset_function(adapter); | 5534 | status = be_resume(adapter); |
5475 | if (status) | 5535 | if (status) |
5476 | goto err; | 5536 | goto err; |
5477 | 5537 | ||
5478 | /* On some BE3 FW versions, after a HW reset, | 5538 | be_schedule_err_detection(adapter); |
5479 | * interrupts will remain disabled for each function. | ||
5480 | * So, explicitly enable interrupts | ||
5481 | */ | ||
5482 | be_intr_set(adapter, true); | ||
5483 | |||
5484 | /* tell fw we're ready to fire cmds */ | ||
5485 | status = be_cmd_fw_init(adapter); | ||
5486 | if (status) | ||
5487 | goto err; | ||
5488 | |||
5489 | status = be_setup(adapter); | ||
5490 | if (status) | ||
5491 | goto err; | ||
5492 | |||
5493 | if (netif_running(netdev)) { | ||
5494 | status = be_open(netdev); | ||
5495 | if (status) | ||
5496 | goto err; | ||
5497 | } | ||
5498 | |||
5499 | schedule_delayed_work(&adapter->func_recovery_work, | ||
5500 | msecs_to_jiffies(1000)); | ||
5501 | netif_device_attach(netdev); | ||
5502 | return; | 5539 | return; |
5503 | err: | 5540 | err: |
5504 | dev_err(&adapter->pdev->dev, "EEH resume failed\n"); | 5541 | dev_err(&adapter->pdev->dev, "EEH resume failed\n"); |
@@ -5516,7 +5553,7 @@ static struct pci_driver be_driver = { | |||
5516 | .probe = be_probe, | 5553 | .probe = be_probe, |
5517 | .remove = be_remove, | 5554 | .remove = be_remove, |
5518 | .suspend = be_suspend, | 5555 | .suspend = be_suspend, |
5519 | .resume = be_resume, | 5556 | .resume = be_pci_resume, |
5520 | .shutdown = be_shutdown, | 5557 | .shutdown = be_shutdown, |
5521 | .err_handler = &be_eeh_handlers | 5558 | .err_handler = &be_eeh_handlers |
5522 | }; | 5559 | }; |
diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig index f4ff465584a0..7216a5370a1f 100644 --- a/drivers/net/ethernet/intel/Kconfig +++ b/drivers/net/ethernet/intel/Kconfig | |||
@@ -303,6 +303,15 @@ config I40E_FCOE | |||
303 | 303 | ||
304 | If unsure, say N. | 304 | If unsure, say N. |
305 | 305 | ||
306 | config I40E_CONFIGFS_FS | ||
307 | bool "Config File System Support (configfs)" | ||
308 | default n | ||
309 | depends on I40E && CONFIGFS_FS && !(I40E=y && CONFIGFS_FS=m) | ||
310 | ---help--- | ||
311 | Provides support for the configfs file system for additional | ||
312 | driver configuration. Say Y here if you want to use the | ||
313 | configuration file system in the driver. | ||
314 | |||
306 | config I40EVF | 315 | config I40EVF |
307 | tristate "Intel(R) XL710 X710 Virtual Function Ethernet support" | 316 | tristate "Intel(R) XL710 X710 Virtual Function Ethernet support" |
308 | depends on PCI_MSI | 317 | depends on PCI_MSI |
diff --git a/drivers/net/ethernet/intel/e1000e/defines.h b/drivers/net/ethernet/intel/e1000e/defines.h index bb7ab3c321d6..0570c668ec3d 100644 --- a/drivers/net/ethernet/intel/e1000e/defines.h +++ b/drivers/net/ethernet/intel/e1000e/defines.h | |||
@@ -141,6 +141,7 @@ | |||
141 | #define E1000_RCTL_LBM_TCVR 0x000000C0 /* tcvr loopback mode */ | 141 | #define E1000_RCTL_LBM_TCVR 0x000000C0 /* tcvr loopback mode */ |
142 | #define E1000_RCTL_DTYP_PS 0x00000400 /* Packet Split descriptor */ | 142 | #define E1000_RCTL_DTYP_PS 0x00000400 /* Packet Split descriptor */ |
143 | #define E1000_RCTL_RDMTS_HALF 0x00000000 /* Rx desc min threshold size */ | 143 | #define E1000_RCTL_RDMTS_HALF 0x00000000 /* Rx desc min threshold size */ |
144 | #define E1000_RCTL_RDMTS_HEX 0x00010000 | ||
144 | #define E1000_RCTL_MO_SHIFT 12 /* multicast offset shift */ | 145 | #define E1000_RCTL_MO_SHIFT 12 /* multicast offset shift */ |
145 | #define E1000_RCTL_MO_3 0x00003000 /* multicast offset 15:4 */ | 146 | #define E1000_RCTL_MO_3 0x00003000 /* multicast offset 15:4 */ |
146 | #define E1000_RCTL_BAM 0x00008000 /* broadcast enable */ | 147 | #define E1000_RCTL_BAM 0x00008000 /* broadcast enable */ |
diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h index 9416e5a7e0c8..a69f09e37b58 100644 --- a/drivers/net/ethernet/intel/e1000e/e1000.h +++ b/drivers/net/ethernet/intel/e1000e/e1000.h | |||
@@ -132,6 +132,7 @@ enum e1000_boards { | |||
132 | board_pchlan, | 132 | board_pchlan, |
133 | board_pch2lan, | 133 | board_pch2lan, |
134 | board_pch_lpt, | 134 | board_pch_lpt, |
135 | board_pch_spt | ||
135 | }; | 136 | }; |
136 | 137 | ||
137 | struct e1000_ps_page { | 138 | struct e1000_ps_page { |
@@ -501,6 +502,7 @@ extern const struct e1000_info e1000_ich10_info; | |||
501 | extern const struct e1000_info e1000_pch_info; | 502 | extern const struct e1000_info e1000_pch_info; |
502 | extern const struct e1000_info e1000_pch2_info; | 503 | extern const struct e1000_info e1000_pch2_info; |
503 | extern const struct e1000_info e1000_pch_lpt_info; | 504 | extern const struct e1000_info e1000_pch_lpt_info; |
505 | extern const struct e1000_info e1000_pch_spt_info; | ||
504 | extern const struct e1000_info e1000_es2_info; | 506 | extern const struct e1000_info e1000_es2_info; |
505 | 507 | ||
506 | void e1000e_ptp_init(struct e1000_adapter *adapter); | 508 | void e1000e_ptp_init(struct e1000_adapter *adapter); |
diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c index 865ce45f9ec3..11f486e4ff7b 100644 --- a/drivers/net/ethernet/intel/e1000e/ethtool.c +++ b/drivers/net/ethernet/intel/e1000e/ethtool.c | |||
@@ -896,18 +896,20 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data) | |||
896 | case e1000_pchlan: | 896 | case e1000_pchlan: |
897 | case e1000_pch2lan: | 897 | case e1000_pch2lan: |
898 | case e1000_pch_lpt: | 898 | case e1000_pch_lpt: |
899 | case e1000_pch_spt: | ||
899 | mask |= (1 << 18); | 900 | mask |= (1 << 18); |
900 | break; | 901 | break; |
901 | default: | 902 | default: |
902 | break; | 903 | break; |
903 | } | 904 | } |
904 | 905 | ||
905 | if (mac->type == e1000_pch_lpt) | 906 | if ((mac->type == e1000_pch_lpt) || (mac->type == e1000_pch_spt)) |
906 | wlock_mac = (er32(FWSM) & E1000_FWSM_WLOCK_MAC_MASK) >> | 907 | wlock_mac = (er32(FWSM) & E1000_FWSM_WLOCK_MAC_MASK) >> |
907 | E1000_FWSM_WLOCK_MAC_SHIFT; | 908 | E1000_FWSM_WLOCK_MAC_SHIFT; |
908 | 909 | ||
909 | for (i = 0; i < mac->rar_entry_count; i++) { | 910 | for (i = 0; i < mac->rar_entry_count; i++) { |
910 | if (mac->type == e1000_pch_lpt) { | 911 | if ((mac->type == e1000_pch_lpt) || |
912 | (mac->type == e1000_pch_spt)) { | ||
911 | /* Cannot test write-protected SHRAL[n] registers */ | 913 | /* Cannot test write-protected SHRAL[n] registers */ |
912 | if ((wlock_mac == 1) || (wlock_mac && (i > wlock_mac))) | 914 | if ((wlock_mac == 1) || (wlock_mac && (i > wlock_mac))) |
913 | continue; | 915 | continue; |
diff --git a/drivers/net/ethernet/intel/e1000e/hw.h b/drivers/net/ethernet/intel/e1000e/hw.h index 72f5475c4b90..19e8c487db06 100644 --- a/drivers/net/ethernet/intel/e1000e/hw.h +++ b/drivers/net/ethernet/intel/e1000e/hw.h | |||
@@ -87,6 +87,10 @@ struct e1000_hw; | |||
87 | #define E1000_DEV_ID_PCH_I218_V2 0x15A1 | 87 | #define E1000_DEV_ID_PCH_I218_V2 0x15A1 |
88 | #define E1000_DEV_ID_PCH_I218_LM3 0x15A2 /* Wildcat Point PCH */ | 88 | #define E1000_DEV_ID_PCH_I218_LM3 0x15A2 /* Wildcat Point PCH */ |
89 | #define E1000_DEV_ID_PCH_I218_V3 0x15A3 /* Wildcat Point PCH */ | 89 | #define E1000_DEV_ID_PCH_I218_V3 0x15A3 /* Wildcat Point PCH */ |
90 | #define E1000_DEV_ID_PCH_SPT_I219_LM 0x156F /* SPT PCH */ | ||
91 | #define E1000_DEV_ID_PCH_SPT_I219_V 0x1570 /* SPT PCH */ | ||
92 | #define E1000_DEV_ID_PCH_SPT_I219_LM2 0x15B7 /* SPT-H PCH */ | ||
93 | #define E1000_DEV_ID_PCH_SPT_I219_V2 0x15B8 /* SPT-H PCH */ | ||
90 | 94 | ||
91 | #define E1000_REVISION_4 4 | 95 | #define E1000_REVISION_4 4 |
92 | 96 | ||
@@ -108,6 +112,7 @@ enum e1000_mac_type { | |||
108 | e1000_pchlan, | 112 | e1000_pchlan, |
109 | e1000_pch2lan, | 113 | e1000_pch2lan, |
110 | e1000_pch_lpt, | 114 | e1000_pch_lpt, |
115 | e1000_pch_spt, | ||
111 | }; | 116 | }; |
112 | 117 | ||
113 | enum e1000_media_type { | 118 | enum e1000_media_type { |
@@ -153,6 +158,7 @@ enum e1000_bus_width { | |||
153 | e1000_bus_width_pcie_x1, | 158 | e1000_bus_width_pcie_x1, |
154 | e1000_bus_width_pcie_x2, | 159 | e1000_bus_width_pcie_x2, |
155 | e1000_bus_width_pcie_x4 = 4, | 160 | e1000_bus_width_pcie_x4 = 4, |
161 | e1000_bus_width_pcie_x8 = 8, | ||
156 | e1000_bus_width_32, | 162 | e1000_bus_width_32, |
157 | e1000_bus_width_64, | 163 | e1000_bus_width_64, |
158 | e1000_bus_width_reserved | 164 | e1000_bus_width_reserved |
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c index 48b74a549155..7523f510c7e4 100644 --- a/drivers/net/ethernet/intel/e1000e/ich8lan.c +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c | |||
@@ -123,6 +123,14 @@ static s32 e1000_read_flash_word_ich8lan(struct e1000_hw *hw, u32 offset, | |||
123 | u16 *data); | 123 | u16 *data); |
124 | static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset, | 124 | static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset, |
125 | u8 size, u16 *data); | 125 | u8 size, u16 *data); |
126 | static s32 e1000_read_flash_data32_ich8lan(struct e1000_hw *hw, u32 offset, | ||
127 | u32 *data); | ||
128 | static s32 e1000_read_flash_dword_ich8lan(struct e1000_hw *hw, | ||
129 | u32 offset, u32 *data); | ||
130 | static s32 e1000_write_flash_data32_ich8lan(struct e1000_hw *hw, | ||
131 | u32 offset, u32 data); | ||
132 | static s32 e1000_retry_write_flash_dword_ich8lan(struct e1000_hw *hw, | ||
133 | u32 offset, u32 dword); | ||
126 | static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw); | 134 | static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw); |
127 | static s32 e1000_cleanup_led_ich8lan(struct e1000_hw *hw); | 135 | static s32 e1000_cleanup_led_ich8lan(struct e1000_hw *hw); |
128 | static s32 e1000_led_on_ich8lan(struct e1000_hw *hw); | 136 | static s32 e1000_led_on_ich8lan(struct e1000_hw *hw); |
@@ -229,7 +237,8 @@ static bool e1000_phy_is_accessible_pchlan(struct e1000_hw *hw) | |||
229 | if (ret_val) | 237 | if (ret_val) |
230 | return false; | 238 | return false; |
231 | out: | 239 | out: |
232 | if (hw->mac.type == e1000_pch_lpt) { | 240 | if ((hw->mac.type == e1000_pch_lpt) || |
241 | (hw->mac.type == e1000_pch_spt)) { | ||
233 | /* Unforce SMBus mode in PHY */ | 242 | /* Unforce SMBus mode in PHY */ |
234 | e1e_rphy_locked(hw, CV_SMB_CTRL, &phy_reg); | 243 | e1e_rphy_locked(hw, CV_SMB_CTRL, &phy_reg); |
235 | phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS; | 244 | phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS; |
@@ -321,6 +330,7 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw) | |||
321 | */ | 330 | */ |
322 | switch (hw->mac.type) { | 331 | switch (hw->mac.type) { |
323 | case e1000_pch_lpt: | 332 | case e1000_pch_lpt: |
333 | case e1000_pch_spt: | ||
324 | if (e1000_phy_is_accessible_pchlan(hw)) | 334 | if (e1000_phy_is_accessible_pchlan(hw)) |
325 | break; | 335 | break; |
326 | 336 | ||
@@ -461,6 +471,7 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw) | |||
461 | /* fall-through */ | 471 | /* fall-through */ |
462 | case e1000_pch2lan: | 472 | case e1000_pch2lan: |
463 | case e1000_pch_lpt: | 473 | case e1000_pch_lpt: |
474 | case e1000_pch_spt: | ||
464 | /* In case the PHY needs to be in mdio slow mode, | 475 | /* In case the PHY needs to be in mdio slow mode, |
465 | * set slow mode and try to get the PHY id again. | 476 | * set slow mode and try to get the PHY id again. |
466 | */ | 477 | */ |
@@ -590,35 +601,50 @@ static s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw) | |||
590 | struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; | 601 | struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; |
591 | u32 gfpreg, sector_base_addr, sector_end_addr; | 602 | u32 gfpreg, sector_base_addr, sector_end_addr; |
592 | u16 i; | 603 | u16 i; |
604 | u32 nvm_size; | ||
593 | 605 | ||
594 | /* Can't read flash registers if the register set isn't mapped. */ | 606 | /* Can't read flash registers if the register set isn't mapped. */ |
595 | if (!hw->flash_address) { | ||
596 | e_dbg("ERROR: Flash registers not mapped\n"); | ||
597 | return -E1000_ERR_CONFIG; | ||
598 | } | ||
599 | |||
600 | nvm->type = e1000_nvm_flash_sw; | 607 | nvm->type = e1000_nvm_flash_sw; |
608 | /* in SPT, gfpreg doesn't exist. NVM size is taken from the | ||
609 | * STRAP register | ||
610 | */ | ||
611 | if (hw->mac.type == e1000_pch_spt) { | ||
612 | nvm->flash_base_addr = 0; | ||
613 | nvm_size = (((er32(STRAP) >> 1) & 0x1F) + 1) | ||
614 | * NVM_SIZE_MULTIPLIER; | ||
615 | nvm->flash_bank_size = nvm_size / 2; | ||
616 | /* Adjust to word count */ | ||
617 | nvm->flash_bank_size /= sizeof(u16); | ||
618 | /* Set the base address for flash register access */ | ||
619 | hw->flash_address = hw->hw_addr + E1000_FLASH_BASE_ADDR; | ||
620 | } else { | ||
621 | if (!hw->flash_address) { | ||
622 | e_dbg("ERROR: Flash registers not mapped\n"); | ||
623 | return -E1000_ERR_CONFIG; | ||
624 | } | ||
601 | 625 | ||
602 | gfpreg = er32flash(ICH_FLASH_GFPREG); | 626 | gfpreg = er32flash(ICH_FLASH_GFPREG); |
603 | 627 | ||
604 | /* sector_X_addr is a "sector"-aligned address (4096 bytes) | 628 | /* sector_X_addr is a "sector"-aligned address (4096 bytes) |
605 | * Add 1 to sector_end_addr since this sector is included in | 629 | * Add 1 to sector_end_addr since this sector is included in |
606 | * the overall size. | 630 | * the overall size. |
607 | */ | 631 | */ |
608 | sector_base_addr = gfpreg & FLASH_GFPREG_BASE_MASK; | 632 | sector_base_addr = gfpreg & FLASH_GFPREG_BASE_MASK; |
609 | sector_end_addr = ((gfpreg >> 16) & FLASH_GFPREG_BASE_MASK) + 1; | 633 | sector_end_addr = ((gfpreg >> 16) & FLASH_GFPREG_BASE_MASK) + 1; |
610 | 634 | ||
611 | /* flash_base_addr is byte-aligned */ | 635 | /* flash_base_addr is byte-aligned */ |
612 | nvm->flash_base_addr = sector_base_addr << FLASH_SECTOR_ADDR_SHIFT; | 636 | nvm->flash_base_addr = sector_base_addr |
637 | << FLASH_SECTOR_ADDR_SHIFT; | ||
613 | 638 | ||
614 | /* find total size of the NVM, then cut in half since the total | 639 | /* find total size of the NVM, then cut in half since the total |
615 | * size represents two separate NVM banks. | 640 | * size represents two separate NVM banks. |
616 | */ | 641 | */ |
617 | nvm->flash_bank_size = ((sector_end_addr - sector_base_addr) | 642 | nvm->flash_bank_size = ((sector_end_addr - sector_base_addr) |
618 | << FLASH_SECTOR_ADDR_SHIFT); | 643 | << FLASH_SECTOR_ADDR_SHIFT); |
619 | nvm->flash_bank_size /= 2; | 644 | nvm->flash_bank_size /= 2; |
620 | /* Adjust to word count */ | 645 | /* Adjust to word count */ |
621 | nvm->flash_bank_size /= sizeof(u16); | 646 | nvm->flash_bank_size /= sizeof(u16); |
647 | } | ||
622 | 648 | ||
623 | nvm->word_size = E1000_ICH8_SHADOW_RAM_WORDS; | 649 | nvm->word_size = E1000_ICH8_SHADOW_RAM_WORDS; |
624 | 650 | ||
@@ -682,6 +708,7 @@ static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw) | |||
682 | mac->ops.rar_set = e1000_rar_set_pch2lan; | 708 | mac->ops.rar_set = e1000_rar_set_pch2lan; |
683 | /* fall-through */ | 709 | /* fall-through */ |
684 | case e1000_pch_lpt: | 710 | case e1000_pch_lpt: |
711 | case e1000_pch_spt: | ||
685 | case e1000_pchlan: | 712 | case e1000_pchlan: |
686 | /* check management mode */ | 713 | /* check management mode */ |
687 | mac->ops.check_mng_mode = e1000_check_mng_mode_pchlan; | 714 | mac->ops.check_mng_mode = e1000_check_mng_mode_pchlan; |
@@ -699,7 +726,7 @@ static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw) | |||
699 | break; | 726 | break; |
700 | } | 727 | } |
701 | 728 | ||
702 | if (mac->type == e1000_pch_lpt) { | 729 | if ((mac->type == e1000_pch_lpt) || (mac->type == e1000_pch_spt)) { |
703 | mac->rar_entry_count = E1000_PCH_LPT_RAR_ENTRIES; | 730 | mac->rar_entry_count = E1000_PCH_LPT_RAR_ENTRIES; |
704 | mac->ops.rar_set = e1000_rar_set_pch_lpt; | 731 | mac->ops.rar_set = e1000_rar_set_pch_lpt; |
705 | mac->ops.setup_physical_interface = | 732 | mac->ops.setup_physical_interface = |
@@ -919,8 +946,9 @@ release: | |||
919 | /* clear FEXTNVM6 bit 8 on link down or 10/100 */ | 946 | /* clear FEXTNVM6 bit 8 on link down or 10/100 */ |
920 | fextnvm6 &= ~E1000_FEXTNVM6_REQ_PLL_CLK; | 947 | fextnvm6 &= ~E1000_FEXTNVM6_REQ_PLL_CLK; |
921 | 948 | ||
922 | if (!link || ((status & E1000_STATUS_SPEED_100) && | 949 | if ((hw->phy.revision > 5) || !link || |
923 | (status & E1000_STATUS_FD))) | 950 | ((status & E1000_STATUS_SPEED_100) && |
951 | (status & E1000_STATUS_FD))) | ||
924 | goto update_fextnvm6; | 952 | goto update_fextnvm6; |
925 | 953 | ||
926 | ret_val = e1e_rphy(hw, I217_INBAND_CTRL, ®); | 954 | ret_val = e1e_rphy(hw, I217_INBAND_CTRL, ®); |
@@ -1100,6 +1128,21 @@ s32 e1000_enable_ulp_lpt_lp(struct e1000_hw *hw, bool to_sx) | |||
1100 | if (ret_val) | 1128 | if (ret_val) |
1101 | goto out; | 1129 | goto out; |
1102 | 1130 | ||
1131 | /* Si workaround for ULP entry flow on i127/rev6 h/w. Enable | ||
1132 | * LPLU and disable Gig speed when entering ULP | ||
1133 | */ | ||
1134 | if ((hw->phy.type == e1000_phy_i217) && (hw->phy.revision == 6)) { | ||
1135 | ret_val = e1000_read_phy_reg_hv_locked(hw, HV_OEM_BITS, | ||
1136 | &phy_reg); | ||
1137 | if (ret_val) | ||
1138 | goto release; | ||
1139 | phy_reg |= HV_OEM_BITS_LPLU | HV_OEM_BITS_GBE_DIS; | ||
1140 | ret_val = e1000_write_phy_reg_hv_locked(hw, HV_OEM_BITS, | ||
1141 | phy_reg); | ||
1142 | if (ret_val) | ||
1143 | goto release; | ||
1144 | } | ||
1145 | |||
1103 | /* Force SMBus mode in PHY */ | 1146 | /* Force SMBus mode in PHY */ |
1104 | ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, &phy_reg); | 1147 | ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, &phy_reg); |
1105 | if (ret_val) | 1148 | if (ret_val) |
@@ -1302,7 +1345,8 @@ out: | |||
1302 | static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw) | 1345 | static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw) |
1303 | { | 1346 | { |
1304 | struct e1000_mac_info *mac = &hw->mac; | 1347 | struct e1000_mac_info *mac = &hw->mac; |
1305 | s32 ret_val; | 1348 | s32 ret_val, tipg_reg = 0; |
1349 | u16 emi_addr, emi_val = 0; | ||
1306 | bool link; | 1350 | bool link; |
1307 | u16 phy_reg; | 1351 | u16 phy_reg; |
1308 | 1352 | ||
@@ -1333,48 +1377,55 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw) | |||
1333 | * the IPG and reduce Rx latency in the PHY. | 1377 | * the IPG and reduce Rx latency in the PHY. |
1334 | */ | 1378 | */ |
1335 | if (((hw->mac.type == e1000_pch2lan) || | 1379 | if (((hw->mac.type == e1000_pch2lan) || |
1336 | (hw->mac.type == e1000_pch_lpt)) && link) { | 1380 | (hw->mac.type == e1000_pch_lpt) || |
1381 | (hw->mac.type == e1000_pch_spt)) && link) { | ||
1337 | u32 reg; | 1382 | u32 reg; |
1338 | 1383 | ||
1339 | reg = er32(STATUS); | 1384 | reg = er32(STATUS); |
1385 | tipg_reg = er32(TIPG); | ||
1386 | tipg_reg &= ~E1000_TIPG_IPGT_MASK; | ||
1387 | |||
1340 | if (!(reg & (E1000_STATUS_FD | E1000_STATUS_SPEED_MASK))) { | 1388 | if (!(reg & (E1000_STATUS_FD | E1000_STATUS_SPEED_MASK))) { |
1341 | u16 emi_addr; | 1389 | tipg_reg |= 0xFF; |
1390 | /* Reduce Rx latency in analog PHY */ | ||
1391 | emi_val = 0; | ||
1392 | } else { | ||
1342 | 1393 | ||
1343 | reg = er32(TIPG); | 1394 | /* Roll back the default values */ |
1344 | reg &= ~E1000_TIPG_IPGT_MASK; | 1395 | tipg_reg |= 0x08; |
1345 | reg |= 0xFF; | 1396 | emi_val = 1; |
1346 | ew32(TIPG, reg); | 1397 | } |
1347 | 1398 | ||
1348 | /* Reduce Rx latency in analog PHY */ | 1399 | ew32(TIPG, tipg_reg); |
1349 | ret_val = hw->phy.ops.acquire(hw); | ||
1350 | if (ret_val) | ||
1351 | return ret_val; | ||
1352 | 1400 | ||
1353 | if (hw->mac.type == e1000_pch2lan) | 1401 | ret_val = hw->phy.ops.acquire(hw); |
1354 | emi_addr = I82579_RX_CONFIG; | 1402 | if (ret_val) |
1355 | else | 1403 | return ret_val; |
1356 | emi_addr = I217_RX_CONFIG; | ||
1357 | 1404 | ||
1358 | ret_val = e1000_write_emi_reg_locked(hw, emi_addr, 0); | 1405 | if (hw->mac.type == e1000_pch2lan) |
1406 | emi_addr = I82579_RX_CONFIG; | ||
1407 | else | ||
1408 | emi_addr = I217_RX_CONFIG; | ||
1409 | ret_val = e1000_write_emi_reg_locked(hw, emi_addr, emi_val); | ||
1359 | 1410 | ||
1360 | hw->phy.ops.release(hw); | 1411 | hw->phy.ops.release(hw); |
1361 | 1412 | ||
1362 | if (ret_val) | 1413 | if (ret_val) |
1363 | return ret_val; | 1414 | return ret_val; |
1364 | } | ||
1365 | } | 1415 | } |
1366 | 1416 | ||
1367 | /* Work-around I218 hang issue */ | 1417 | /* Work-around I218 hang issue */ |
1368 | if ((hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPTLP_I218_LM) || | 1418 | if ((hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPTLP_I218_LM) || |
1369 | (hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPTLP_I218_V) || | 1419 | (hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPTLP_I218_V) || |
1370 | (hw->adapter->pdev->device == E1000_DEV_ID_PCH_I218_LM3) || | 1420 | (hw->adapter->pdev->device == E1000_DEV_ID_PCH_I218_LM3) || |
1371 | (hw->adapter->pdev->device == E1000_DEV_ID_PCH_I218_V3)) { | 1421 | (hw->adapter->pdev->device == E1000_DEV_ID_PCH_I218_V3) || |
1422 | (hw->mac.type == e1000_pch_spt)) { | ||
1372 | ret_val = e1000_k1_workaround_lpt_lp(hw, link); | 1423 | ret_val = e1000_k1_workaround_lpt_lp(hw, link); |
1373 | if (ret_val) | 1424 | if (ret_val) |
1374 | return ret_val; | 1425 | return ret_val; |
1375 | } | 1426 | } |
1376 | 1427 | if ((hw->mac.type == e1000_pch_lpt) || | |
1377 | if (hw->mac.type == e1000_pch_lpt) { | 1428 | (hw->mac.type == e1000_pch_spt)) { |
1378 | /* Set platform power management values for | 1429 | /* Set platform power management values for |
1379 | * Latency Tolerance Reporting (LTR) | 1430 | * Latency Tolerance Reporting (LTR) |
1380 | */ | 1431 | */ |
@@ -1386,6 +1437,19 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw) | |||
1386 | /* Clear link partner's EEE ability */ | 1437 | /* Clear link partner's EEE ability */ |
1387 | hw->dev_spec.ich8lan.eee_lp_ability = 0; | 1438 | hw->dev_spec.ich8lan.eee_lp_ability = 0; |
1388 | 1439 | ||
1440 | /* FEXTNVM6 K1-off workaround */ | ||
1441 | if (hw->mac.type == e1000_pch_spt) { | ||
1442 | u32 pcieanacfg = er32(PCIEANACFG); | ||
1443 | u32 fextnvm6 = er32(FEXTNVM6); | ||
1444 | |||
1445 | if (pcieanacfg & E1000_FEXTNVM6_K1_OFF_ENABLE) | ||
1446 | fextnvm6 |= E1000_FEXTNVM6_K1_OFF_ENABLE; | ||
1447 | else | ||
1448 | fextnvm6 &= ~E1000_FEXTNVM6_K1_OFF_ENABLE; | ||
1449 | |||
1450 | ew32(FEXTNVM6, fextnvm6); | ||
1451 | } | ||
1452 | |||
1389 | if (!link) | 1453 | if (!link) |
1390 | return 0; /* No link detected */ | 1454 | return 0; /* No link detected */ |
1391 | 1455 | ||
@@ -1479,6 +1543,7 @@ static s32 e1000_get_variants_ich8lan(struct e1000_adapter *adapter) | |||
1479 | case e1000_pchlan: | 1543 | case e1000_pchlan: |
1480 | case e1000_pch2lan: | 1544 | case e1000_pch2lan: |
1481 | case e1000_pch_lpt: | 1545 | case e1000_pch_lpt: |
1546 | case e1000_pch_spt: | ||
1482 | rc = e1000_init_phy_params_pchlan(hw); | 1547 | rc = e1000_init_phy_params_pchlan(hw); |
1483 | break; | 1548 | break; |
1484 | default: | 1549 | default: |
@@ -1929,6 +1994,7 @@ static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw) | |||
1929 | case e1000_pchlan: | 1994 | case e1000_pchlan: |
1930 | case e1000_pch2lan: | 1995 | case e1000_pch2lan: |
1931 | case e1000_pch_lpt: | 1996 | case e1000_pch_lpt: |
1997 | case e1000_pch_spt: | ||
1932 | sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M; | 1998 | sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M; |
1933 | break; | 1999 | break; |
1934 | default: | 2000 | default: |
@@ -2961,6 +3027,20 @@ static s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank) | |||
2961 | s32 ret_val; | 3027 | s32 ret_val; |
2962 | 3028 | ||
2963 | switch (hw->mac.type) { | 3029 | switch (hw->mac.type) { |
3030 | /* In SPT, read from the CTRL_EXT reg instead of | ||
3031 | * accessing the sector valid bits from the nvm | ||
3032 | */ | ||
3033 | case e1000_pch_spt: | ||
3034 | *bank = er32(CTRL_EXT) | ||
3035 | & E1000_CTRL_EXT_NVMVS; | ||
3036 | if ((*bank == 0) || (*bank == 1)) { | ||
3037 | e_dbg("ERROR: No valid NVM bank present\n"); | ||
3038 | return -E1000_ERR_NVM; | ||
3039 | } else { | ||
3040 | *bank = *bank - 2; | ||
3041 | return 0; | ||
3042 | } | ||
3043 | break; | ||
2964 | case e1000_ich8lan: | 3044 | case e1000_ich8lan: |
2965 | case e1000_ich9lan: | 3045 | case e1000_ich9lan: |
2966 | eecd = er32(EECD); | 3046 | eecd = er32(EECD); |
@@ -3008,6 +3088,99 @@ static s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank) | |||
3008 | } | 3088 | } |
3009 | 3089 | ||
3010 | /** | 3090 | /** |
3091 | * e1000_read_nvm_spt - NVM access for SPT | ||
3092 | * @hw: pointer to the HW structure | ||
3093 | * @offset: The offset (in bytes) of the word(s) to read. | ||
3094 | * @words: Size of data to read in words. | ||
3095 | * @data: pointer to the word(s) to read at offset. | ||
3096 | * | ||
3097 | * Reads a word(s) from the NVM | ||
3098 | **/ | ||
3099 | static s32 e1000_read_nvm_spt(struct e1000_hw *hw, u16 offset, u16 words, | ||
3100 | u16 *data) | ||
3101 | { | ||
3102 | struct e1000_nvm_info *nvm = &hw->nvm; | ||
3103 | struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; | ||
3104 | u32 act_offset; | ||
3105 | s32 ret_val = 0; | ||
3106 | u32 bank = 0; | ||
3107 | u32 dword = 0; | ||
3108 | u16 offset_to_read; | ||
3109 | u16 i; | ||
3110 | |||
3111 | if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) || | ||
3112 | (words == 0)) { | ||
3113 | e_dbg("nvm parameter(s) out of bounds\n"); | ||
3114 | ret_val = -E1000_ERR_NVM; | ||
3115 | goto out; | ||
3116 | } | ||
3117 | |||
3118 | nvm->ops.acquire(hw); | ||
3119 | |||
3120 | ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank); | ||
3121 | if (ret_val) { | ||
3122 | e_dbg("Could not detect valid bank, assuming bank 0\n"); | ||
3123 | bank = 0; | ||
3124 | } | ||
3125 | |||
3126 | act_offset = (bank) ? nvm->flash_bank_size : 0; | ||
3127 | act_offset += offset; | ||
3128 | |||
3129 | ret_val = 0; | ||
3130 | |||
3131 | for (i = 0; i < words; i += 2) { | ||
3132 | if (words - i == 1) { | ||
3133 | if (dev_spec->shadow_ram[offset + i].modified) { | ||
3134 | data[i] = | ||
3135 | dev_spec->shadow_ram[offset + i].value; | ||
3136 | } else { | ||
3137 | offset_to_read = act_offset + i - | ||
3138 | ((act_offset + i) % 2); | ||
3139 | ret_val = | ||
3140 | e1000_read_flash_dword_ich8lan(hw, | ||
3141 | offset_to_read, | ||
3142 | &dword); | ||
3143 | if (ret_val) | ||
3144 | break; | ||
3145 | if ((act_offset + i) % 2 == 0) | ||
3146 | data[i] = (u16)(dword & 0xFFFF); | ||
3147 | else | ||
3148 | data[i] = (u16)((dword >> 16) & 0xFFFF); | ||
3149 | } | ||
3150 | } else { | ||
3151 | offset_to_read = act_offset + i; | ||
3152 | if (!(dev_spec->shadow_ram[offset + i].modified) || | ||
3153 | !(dev_spec->shadow_ram[offset + i + 1].modified)) { | ||
3154 | ret_val = | ||
3155 | e1000_read_flash_dword_ich8lan(hw, | ||
3156 | offset_to_read, | ||
3157 | &dword); | ||
3158 | if (ret_val) | ||
3159 | break; | ||
3160 | } | ||
3161 | if (dev_spec->shadow_ram[offset + i].modified) | ||
3162 | data[i] = | ||
3163 | dev_spec->shadow_ram[offset + i].value; | ||
3164 | else | ||
3165 | data[i] = (u16)(dword & 0xFFFF); | ||
3166 | if (dev_spec->shadow_ram[offset + i].modified) | ||
3167 | data[i + 1] = | ||
3168 | dev_spec->shadow_ram[offset + i + 1].value; | ||
3169 | else | ||
3170 | data[i + 1] = (u16)(dword >> 16 & 0xFFFF); | ||
3171 | } | ||
3172 | } | ||
3173 | |||
3174 | nvm->ops.release(hw); | ||
3175 | |||
3176 | out: | ||
3177 | if (ret_val) | ||
3178 | e_dbg("NVM read error: %d\n", ret_val); | ||
3179 | |||
3180 | return ret_val; | ||
3181 | } | ||
3182 | |||
3183 | /** | ||
3011 | * e1000_read_nvm_ich8lan - Read word(s) from the NVM | 3184 | * e1000_read_nvm_ich8lan - Read word(s) from the NVM |
3012 | * @hw: pointer to the HW structure | 3185 | * @hw: pointer to the HW structure |
3013 | * @offset: The offset (in bytes) of the word(s) to read. | 3186 | * @offset: The offset (in bytes) of the word(s) to read. |
@@ -3090,8 +3263,10 @@ static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw) | |||
3090 | /* Clear FCERR and DAEL in hw status by writing 1 */ | 3263 | /* Clear FCERR and DAEL in hw status by writing 1 */ |
3091 | hsfsts.hsf_status.flcerr = 1; | 3264 | hsfsts.hsf_status.flcerr = 1; |
3092 | hsfsts.hsf_status.dael = 1; | 3265 | hsfsts.hsf_status.dael = 1; |
3093 | 3266 | if (hw->mac.type == e1000_pch_spt) | |
3094 | ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval); | 3267 | ew32flash(ICH_FLASH_HSFSTS, hsfsts.regval & 0xFFFF); |
3268 | else | ||
3269 | ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval); | ||
3095 | 3270 | ||
3096 | /* Either we should have a hardware SPI cycle in progress | 3271 | /* Either we should have a hardware SPI cycle in progress |
3097 | * bit to check against, in order to start a new cycle or | 3272 | * bit to check against, in order to start a new cycle or |
@@ -3107,7 +3282,10 @@ static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw) | |||
3107 | * Begin by setting Flash Cycle Done. | 3282 | * Begin by setting Flash Cycle Done. |
3108 | */ | 3283 | */ |
3109 | hsfsts.hsf_status.flcdone = 1; | 3284 | hsfsts.hsf_status.flcdone = 1; |
3110 | ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval); | 3285 | if (hw->mac.type == e1000_pch_spt) |
3286 | ew32flash(ICH_FLASH_HSFSTS, hsfsts.regval & 0xFFFF); | ||
3287 | else | ||
3288 | ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval); | ||
3111 | ret_val = 0; | 3289 | ret_val = 0; |
3112 | } else { | 3290 | } else { |
3113 | s32 i; | 3291 | s32 i; |
@@ -3128,7 +3306,11 @@ static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw) | |||
3128 | * now set the Flash Cycle Done. | 3306 | * now set the Flash Cycle Done. |
3129 | */ | 3307 | */ |
3130 | hsfsts.hsf_status.flcdone = 1; | 3308 | hsfsts.hsf_status.flcdone = 1; |
3131 | ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval); | 3309 | if (hw->mac.type == e1000_pch_spt) |
3310 | ew32flash(ICH_FLASH_HSFSTS, | ||
3311 | hsfsts.regval & 0xFFFF); | ||
3312 | else | ||
3313 | ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval); | ||
3132 | } else { | 3314 | } else { |
3133 | e_dbg("Flash controller busy, cannot get access\n"); | 3315 | e_dbg("Flash controller busy, cannot get access\n"); |
3134 | } | 3316 | } |
@@ -3151,9 +3333,16 @@ static s32 e1000_flash_cycle_ich8lan(struct e1000_hw *hw, u32 timeout) | |||
3151 | u32 i = 0; | 3333 | u32 i = 0; |
3152 | 3334 | ||
3153 | /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */ | 3335 | /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */ |
3154 | hsflctl.regval = er16flash(ICH_FLASH_HSFCTL); | 3336 | if (hw->mac.type == e1000_pch_spt) |
3337 | hsflctl.regval = er32flash(ICH_FLASH_HSFSTS) >> 16; | ||
3338 | else | ||
3339 | hsflctl.regval = er16flash(ICH_FLASH_HSFCTL); | ||
3155 | hsflctl.hsf_ctrl.flcgo = 1; | 3340 | hsflctl.hsf_ctrl.flcgo = 1; |
3156 | ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval); | 3341 | |
3342 | if (hw->mac.type == e1000_pch_spt) | ||
3343 | ew32flash(ICH_FLASH_HSFSTS, hsflctl.regval << 16); | ||
3344 | else | ||
3345 | ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval); | ||
3157 | 3346 | ||
3158 | /* wait till FDONE bit is set to 1 */ | 3347 | /* wait till FDONE bit is set to 1 */ |
3159 | do { | 3348 | do { |
@@ -3170,6 +3359,23 @@ static s32 e1000_flash_cycle_ich8lan(struct e1000_hw *hw, u32 timeout) | |||
3170 | } | 3359 | } |
3171 | 3360 | ||
3172 | /** | 3361 | /** |
3362 | * e1000_read_flash_dword_ich8lan - Read dword from flash | ||
3363 | * @hw: pointer to the HW structure | ||
3364 | * @offset: offset to data location | ||
3365 | * @data: pointer to the location for storing the data | ||
3366 | * | ||
3367 | * Reads the flash dword at offset into data. Offset is converted | ||
3368 | * to bytes before read. | ||
3369 | **/ | ||
3370 | static s32 e1000_read_flash_dword_ich8lan(struct e1000_hw *hw, u32 offset, | ||
3371 | u32 *data) | ||
3372 | { | ||
3373 | /* Must convert word offset into bytes. */ | ||
3374 | offset <<= 1; | ||
3375 | return e1000_read_flash_data32_ich8lan(hw, offset, data); | ||
3376 | } | ||
3377 | |||
3378 | /** | ||
3173 | * e1000_read_flash_word_ich8lan - Read word from flash | 3379 | * e1000_read_flash_word_ich8lan - Read word from flash |
3174 | * @hw: pointer to the HW structure | 3380 | * @hw: pointer to the HW structure |
3175 | * @offset: offset to data location | 3381 | * @offset: offset to data location |
@@ -3201,7 +3407,14 @@ static s32 e1000_read_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset, | |||
3201 | s32 ret_val; | 3407 | s32 ret_val; |
3202 | u16 word = 0; | 3408 | u16 word = 0; |
3203 | 3409 | ||
3204 | ret_val = e1000_read_flash_data_ich8lan(hw, offset, 1, &word); | 3410 | /* In SPT, only 32 bits access is supported, |
3411 | * so this function should not be called. | ||
3412 | */ | ||
3413 | if (hw->mac.type == e1000_pch_spt) | ||
3414 | return -E1000_ERR_NVM; | ||
3415 | else | ||
3416 | ret_val = e1000_read_flash_data_ich8lan(hw, offset, 1, &word); | ||
3417 | |||
3205 | if (ret_val) | 3418 | if (ret_val) |
3206 | return ret_val; | 3419 | return ret_val; |
3207 | 3420 | ||
@@ -3287,6 +3500,82 @@ static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset, | |||
3287 | } | 3500 | } |
3288 | 3501 | ||
3289 | /** | 3502 | /** |
3503 | * e1000_read_flash_data32_ich8lan - Read dword from NVM | ||
3504 | * @hw: pointer to the HW structure | ||
3505 | * @offset: The offset (in bytes) of the dword to read. | ||
3506 | * @data: Pointer to the dword to store the value read. | ||
3507 | * | ||
3508 | * Reads a byte or word from the NVM using the flash access registers. | ||
3509 | **/ | ||
3510 | |||
3511 | static s32 e1000_read_flash_data32_ich8lan(struct e1000_hw *hw, u32 offset, | ||
3512 | u32 *data) | ||
3513 | { | ||
3514 | union ich8_hws_flash_status hsfsts; | ||
3515 | union ich8_hws_flash_ctrl hsflctl; | ||
3516 | u32 flash_linear_addr; | ||
3517 | s32 ret_val = -E1000_ERR_NVM; | ||
3518 | u8 count = 0; | ||
3519 | |||
3520 | if (offset > ICH_FLASH_LINEAR_ADDR_MASK || | ||
3521 | hw->mac.type != e1000_pch_spt) | ||
3522 | return -E1000_ERR_NVM; | ||
3523 | flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) + | ||
3524 | hw->nvm.flash_base_addr); | ||
3525 | |||
3526 | do { | ||
3527 | udelay(1); | ||
3528 | /* Steps */ | ||
3529 | ret_val = e1000_flash_cycle_init_ich8lan(hw); | ||
3530 | if (ret_val) | ||
3531 | break; | ||
3532 | /* In SPT, This register is in Lan memory space, not flash. | ||
3533 | * Therefore, only 32 bit access is supported | ||
3534 | */ | ||
3535 | hsflctl.regval = er32flash(ICH_FLASH_HSFSTS) >> 16; | ||
3536 | |||
3537 | /* 0b/1b corresponds to 1 or 2 byte size, respectively. */ | ||
3538 | hsflctl.hsf_ctrl.fldbcount = sizeof(u32) - 1; | ||
3539 | hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_READ; | ||
3540 | /* In SPT, This register is in Lan memory space, not flash. | ||
3541 | * Therefore, only 32 bit access is supported | ||
3542 | */ | ||
3543 | ew32flash(ICH_FLASH_HSFSTS, (u32)hsflctl.regval << 16); | ||
3544 | ew32flash(ICH_FLASH_FADDR, flash_linear_addr); | ||
3545 | |||
3546 | ret_val = | ||
3547 | e1000_flash_cycle_ich8lan(hw, | ||
3548 | ICH_FLASH_READ_COMMAND_TIMEOUT); | ||
3549 | |||
3550 | /* Check if FCERR is set to 1, if set to 1, clear it | ||
3551 | * and try the whole sequence a few more times, else | ||
3552 | * read in (shift in) the Flash Data0, the order is | ||
3553 | * least significant byte first msb to lsb | ||
3554 | */ | ||
3555 | if (!ret_val) { | ||
3556 | *data = er32flash(ICH_FLASH_FDATA0); | ||
3557 | break; | ||
3558 | } else { | ||
3559 | /* If we've gotten here, then things are probably | ||
3560 | * completely hosed, but if the error condition is | ||
3561 | * detected, it won't hurt to give it another try... | ||
3562 | * ICH_FLASH_CYCLE_REPEAT_COUNT times. | ||
3563 | */ | ||
3564 | hsfsts.regval = er16flash(ICH_FLASH_HSFSTS); | ||
3565 | if (hsfsts.hsf_status.flcerr) { | ||
3566 | /* Repeat for some time before giving up. */ | ||
3567 | continue; | ||
3568 | } else if (!hsfsts.hsf_status.flcdone) { | ||
3569 | e_dbg("Timeout error - flash cycle did not complete.\n"); | ||
3570 | break; | ||
3571 | } | ||
3572 | } | ||
3573 | } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT); | ||
3574 | |||
3575 | return ret_val; | ||
3576 | } | ||
3577 | |||
3578 | /** | ||
3290 | * e1000_write_nvm_ich8lan - Write word(s) to the NVM | 3579 | * e1000_write_nvm_ich8lan - Write word(s) to the NVM |
3291 | * @hw: pointer to the HW structure | 3580 | * @hw: pointer to the HW structure |
3292 | * @offset: The offset (in bytes) of the word(s) to write. | 3581 | * @offset: The offset (in bytes) of the word(s) to write. |
@@ -3321,7 +3610,7 @@ static s32 e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words, | |||
3321 | } | 3610 | } |
3322 | 3611 | ||
3323 | /** | 3612 | /** |
3324 | * e1000_update_nvm_checksum_ich8lan - Update the checksum for NVM | 3613 | * e1000_update_nvm_checksum_spt - Update the checksum for NVM |
3325 | * @hw: pointer to the HW structure | 3614 | * @hw: pointer to the HW structure |
3326 | * | 3615 | * |
3327 | * The NVM checksum is updated by calling the generic update_nvm_checksum, | 3616 | * The NVM checksum is updated by calling the generic update_nvm_checksum, |
@@ -3331,13 +3620,13 @@ static s32 e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words, | |||
3331 | * After a successful commit, the shadow ram is cleared and is ready for | 3620 | * After a successful commit, the shadow ram is cleared and is ready for |
3332 | * future writes. | 3621 | * future writes. |
3333 | **/ | 3622 | **/ |
3334 | static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw) | 3623 | static s32 e1000_update_nvm_checksum_spt(struct e1000_hw *hw) |
3335 | { | 3624 | { |
3336 | struct e1000_nvm_info *nvm = &hw->nvm; | 3625 | struct e1000_nvm_info *nvm = &hw->nvm; |
3337 | struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; | 3626 | struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; |
3338 | u32 i, act_offset, new_bank_offset, old_bank_offset, bank; | 3627 | u32 i, act_offset, new_bank_offset, old_bank_offset, bank; |
3339 | s32 ret_val; | 3628 | s32 ret_val; |
3340 | u16 data; | 3629 | u32 dword = 0; |
3341 | 3630 | ||
3342 | ret_val = e1000e_update_nvm_checksum_generic(hw); | 3631 | ret_val = e1000e_update_nvm_checksum_generic(hw); |
3343 | if (ret_val) | 3632 | if (ret_val) |
@@ -3371,12 +3660,175 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw) | |||
3371 | if (ret_val) | 3660 | if (ret_val) |
3372 | goto release; | 3661 | goto release; |
3373 | } | 3662 | } |
3374 | 3663 | for (i = 0; i < E1000_ICH8_SHADOW_RAM_WORDS; i += 2) { | |
3375 | for (i = 0; i < E1000_ICH8_SHADOW_RAM_WORDS; i++) { | ||
3376 | /* Determine whether to write the value stored | 3664 | /* Determine whether to write the value stored |
3377 | * in the other NVM bank or a modified value stored | 3665 | * in the other NVM bank or a modified value stored |
3378 | * in the shadow RAM | 3666 | * in the shadow RAM |
3379 | */ | 3667 | */ |
3668 | ret_val = e1000_read_flash_dword_ich8lan(hw, | ||
3669 | i + old_bank_offset, | ||
3670 | &dword); | ||
3671 | |||
3672 | if (dev_spec->shadow_ram[i].modified) { | ||
3673 | dword &= 0xffff0000; | ||
3674 | dword |= (dev_spec->shadow_ram[i].value & 0xffff); | ||
3675 | } | ||
3676 | if (dev_spec->shadow_ram[i + 1].modified) { | ||
3677 | dword &= 0x0000ffff; | ||
3678 | dword |= ((dev_spec->shadow_ram[i + 1].value & 0xffff) | ||
3679 | << 16); | ||
3680 | } | ||
3681 | if (ret_val) | ||
3682 | break; | ||
3683 | |||
3684 | /* If the word is 0x13, then make sure the signature bits | ||
3685 | * (15:14) are 11b until the commit has completed. | ||
3686 | * This will allow us to write 10b which indicates the | ||
3687 | * signature is valid. We want to do this after the write | ||
3688 | * has completed so that we don't mark the segment valid | ||
3689 | * while the write is still in progress | ||
3690 | */ | ||
3691 | if (i == E1000_ICH_NVM_SIG_WORD - 1) | ||
3692 | dword |= E1000_ICH_NVM_SIG_MASK << 16; | ||
3693 | |||
3694 | /* Convert offset to bytes. */ | ||
3695 | act_offset = (i + new_bank_offset) << 1; | ||
3696 | |||
3697 | usleep_range(100, 200); | ||
3698 | |||
3699 | /* Write the data to the new bank. Offset in words */ | ||
3700 | act_offset = i + new_bank_offset; | ||
3701 | ret_val = e1000_retry_write_flash_dword_ich8lan(hw, act_offset, | ||
3702 | dword); | ||
3703 | if (ret_val) | ||
3704 | break; | ||
3705 | } | ||
3706 | |||
3707 | /* Don't bother writing the segment valid bits if sector | ||
3708 | * programming failed. | ||
3709 | */ | ||
3710 | if (ret_val) { | ||
3711 | /* Possibly read-only, see e1000e_write_protect_nvm_ich8lan() */ | ||
3712 | e_dbg("Flash commit failed.\n"); | ||
3713 | goto release; | ||
3714 | } | ||
3715 | |||
3716 | /* Finally validate the new segment by setting bit 15:14 | ||
3717 | * to 10b in word 0x13 , this can be done without an | ||
3718 | * erase as well since these bits are 11 to start with | ||
3719 | * and we need to change bit 14 to 0b | ||
3720 | */ | ||
3721 | act_offset = new_bank_offset + E1000_ICH_NVM_SIG_WORD; | ||
3722 | |||
3723 | /*offset in words but we read dword */ | ||
3724 | --act_offset; | ||
3725 | ret_val = e1000_read_flash_dword_ich8lan(hw, act_offset, &dword); | ||
3726 | |||
3727 | if (ret_val) | ||
3728 | goto release; | ||
3729 | |||
3730 | dword &= 0xBFFFFFFF; | ||
3731 | ret_val = e1000_retry_write_flash_dword_ich8lan(hw, act_offset, dword); | ||
3732 | |||
3733 | if (ret_val) | ||
3734 | goto release; | ||
3735 | |||
3736 | /* And invalidate the previously valid segment by setting | ||
3737 | * its signature word (0x13) high_byte to 0b. This can be | ||
3738 | * done without an erase because flash erase sets all bits | ||
3739 | * to 1's. We can write 1's to 0's without an erase | ||
3740 | */ | ||
3741 | act_offset = (old_bank_offset + E1000_ICH_NVM_SIG_WORD) * 2 + 1; | ||
3742 | |||
3743 | /* offset in words but we read dword */ | ||
3744 | act_offset = old_bank_offset + E1000_ICH_NVM_SIG_WORD - 1; | ||
3745 | ret_val = e1000_read_flash_dword_ich8lan(hw, act_offset, &dword); | ||
3746 | |||
3747 | if (ret_val) | ||
3748 | goto release; | ||
3749 | |||
3750 | dword &= 0x00FFFFFF; | ||
3751 | ret_val = e1000_retry_write_flash_dword_ich8lan(hw, act_offset, dword); | ||
3752 | |||
3753 | if (ret_val) | ||
3754 | goto release; | ||
3755 | |||
3756 | /* Great! Everything worked, we can now clear the cached entries. */ | ||
3757 | for (i = 0; i < E1000_ICH8_SHADOW_RAM_WORDS; i++) { | ||
3758 | dev_spec->shadow_ram[i].modified = false; | ||
3759 | dev_spec->shadow_ram[i].value = 0xFFFF; | ||
3760 | } | ||
3761 | |||
3762 | release: | ||
3763 | nvm->ops.release(hw); | ||
3764 | |||
3765 | /* Reload the EEPROM, or else modifications will not appear | ||
3766 | * until after the next adapter reset. | ||
3767 | */ | ||
3768 | if (!ret_val) { | ||
3769 | nvm->ops.reload(hw); | ||
3770 | usleep_range(10000, 20000); | ||
3771 | } | ||
3772 | |||
3773 | out: | ||
3774 | if (ret_val) | ||
3775 | e_dbg("NVM update error: %d\n", ret_val); | ||
3776 | |||
3777 | return ret_val; | ||
3778 | } | ||
3779 | |||
3780 | /** | ||
3781 | * e1000_update_nvm_checksum_ich8lan - Update the checksum for NVM | ||
3782 | * @hw: pointer to the HW structure | ||
3783 | * | ||
3784 | * The NVM checksum is updated by calling the generic update_nvm_checksum, | ||
3785 | * which writes the checksum to the shadow ram. The changes in the shadow | ||
3786 | * ram are then committed to the EEPROM by processing each bank at a time | ||
3787 | * checking for the modified bit and writing only the pending changes. | ||
3788 | * After a successful commit, the shadow ram is cleared and is ready for | ||
3789 | * future writes. | ||
3790 | **/ | ||
3791 | static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw) | ||
3792 | { | ||
3793 | struct e1000_nvm_info *nvm = &hw->nvm; | ||
3794 | struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; | ||
3795 | u32 i, act_offset, new_bank_offset, old_bank_offset, bank; | ||
3796 | s32 ret_val; | ||
3797 | u16 data = 0; | ||
3798 | |||
3799 | ret_val = e1000e_update_nvm_checksum_generic(hw); | ||
3800 | if (ret_val) | ||
3801 | goto out; | ||
3802 | |||
3803 | if (nvm->type != e1000_nvm_flash_sw) | ||
3804 | goto out; | ||
3805 | |||
3806 | nvm->ops.acquire(hw); | ||
3807 | |||
3808 | /* We're writing to the opposite bank so if we're on bank 1, | ||
3809 | * write to bank 0 etc. We also need to erase the segment that | ||
3810 | * is going to be written | ||
3811 | */ | ||
3812 | ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank); | ||
3813 | if (ret_val) { | ||
3814 | e_dbg("Could not detect valid bank, assuming bank 0\n"); | ||
3815 | bank = 0; | ||
3816 | } | ||
3817 | |||
3818 | if (bank == 0) { | ||
3819 | new_bank_offset = nvm->flash_bank_size; | ||
3820 | old_bank_offset = 0; | ||
3821 | ret_val = e1000_erase_flash_bank_ich8lan(hw, 1); | ||
3822 | if (ret_val) | ||
3823 | goto release; | ||
3824 | } else { | ||
3825 | old_bank_offset = nvm->flash_bank_size; | ||
3826 | new_bank_offset = 0; | ||
3827 | ret_val = e1000_erase_flash_bank_ich8lan(hw, 0); | ||
3828 | if (ret_val) | ||
3829 | goto release; | ||
3830 | } | ||
3831 | for (i = 0; i < E1000_ICH8_SHADOW_RAM_WORDS; i++) { | ||
3380 | if (dev_spec->shadow_ram[i].modified) { | 3832 | if (dev_spec->shadow_ram[i].modified) { |
3381 | data = dev_spec->shadow_ram[i].value; | 3833 | data = dev_spec->shadow_ram[i].value; |
3382 | } else { | 3834 | } else { |
@@ -3498,6 +3950,7 @@ static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw) | |||
3498 | */ | 3950 | */ |
3499 | switch (hw->mac.type) { | 3951 | switch (hw->mac.type) { |
3500 | case e1000_pch_lpt: | 3952 | case e1000_pch_lpt: |
3953 | case e1000_pch_spt: | ||
3501 | word = NVM_COMPAT; | 3954 | word = NVM_COMPAT; |
3502 | valid_csum_mask = NVM_COMPAT_VALID_CSUM; | 3955 | valid_csum_mask = NVM_COMPAT_VALID_CSUM; |
3503 | break; | 3956 | break; |
@@ -3583,9 +4036,13 @@ static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset, | |||
3583 | s32 ret_val; | 4036 | s32 ret_val; |
3584 | u8 count = 0; | 4037 | u8 count = 0; |
3585 | 4038 | ||
3586 | if (size < 1 || size > 2 || data > size * 0xff || | 4039 | if (hw->mac.type == e1000_pch_spt) { |
3587 | offset > ICH_FLASH_LINEAR_ADDR_MASK) | 4040 | if (size != 4 || offset > ICH_FLASH_LINEAR_ADDR_MASK) |
3588 | return -E1000_ERR_NVM; | 4041 | return -E1000_ERR_NVM; |
4042 | } else { | ||
4043 | if (size < 1 || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK) | ||
4044 | return -E1000_ERR_NVM; | ||
4045 | } | ||
3589 | 4046 | ||
3590 | flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) + | 4047 | flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) + |
3591 | hw->nvm.flash_base_addr); | 4048 | hw->nvm.flash_base_addr); |
@@ -3596,12 +4053,25 @@ static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset, | |||
3596 | ret_val = e1000_flash_cycle_init_ich8lan(hw); | 4053 | ret_val = e1000_flash_cycle_init_ich8lan(hw); |
3597 | if (ret_val) | 4054 | if (ret_val) |
3598 | break; | 4055 | break; |
4056 | /* In SPT, This register is in Lan memory space, not | ||
4057 | * flash. Therefore, only 32 bit access is supported | ||
4058 | */ | ||
4059 | if (hw->mac.type == e1000_pch_spt) | ||
4060 | hsflctl.regval = er32flash(ICH_FLASH_HSFSTS) >> 16; | ||
4061 | else | ||
4062 | hsflctl.regval = er16flash(ICH_FLASH_HSFCTL); | ||
3599 | 4063 | ||
3600 | hsflctl.regval = er16flash(ICH_FLASH_HSFCTL); | ||
3601 | /* 0b/1b corresponds to 1 or 2 byte size, respectively. */ | 4064 | /* 0b/1b corresponds to 1 or 2 byte size, respectively. */ |
3602 | hsflctl.hsf_ctrl.fldbcount = size - 1; | 4065 | hsflctl.hsf_ctrl.fldbcount = size - 1; |
3603 | hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_WRITE; | 4066 | hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_WRITE; |
3604 | ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval); | 4067 | /* In SPT, This register is in Lan memory space, |
4068 | * not flash. Therefore, only 32 bit access is | ||
4069 | * supported | ||
4070 | */ | ||
4071 | if (hw->mac.type == e1000_pch_spt) | ||
4072 | ew32flash(ICH_FLASH_HSFSTS, hsflctl.regval << 16); | ||
4073 | else | ||
4074 | ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval); | ||
3605 | 4075 | ||
3606 | ew32flash(ICH_FLASH_FADDR, flash_linear_addr); | 4076 | ew32flash(ICH_FLASH_FADDR, flash_linear_addr); |
3607 | 4077 | ||
@@ -3640,6 +4110,90 @@ static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset, | |||
3640 | } | 4110 | } |
3641 | 4111 | ||
3642 | /** | 4112 | /** |
4113 | * e1000_write_flash_data32_ich8lan - Writes 4 bytes to the NVM | ||
4114 | * @hw: pointer to the HW structure | ||
4115 | * @offset: The offset (in bytes) of the dwords to read. | ||
4116 | * @data: The 4 bytes to write to the NVM. | ||
4117 | * | ||
4118 | * Writes one/two/four bytes to the NVM using the flash access registers. | ||
4119 | **/ | ||
4120 | static s32 e1000_write_flash_data32_ich8lan(struct e1000_hw *hw, u32 offset, | ||
4121 | u32 data) | ||
4122 | { | ||
4123 | union ich8_hws_flash_status hsfsts; | ||
4124 | union ich8_hws_flash_ctrl hsflctl; | ||
4125 | u32 flash_linear_addr; | ||
4126 | s32 ret_val; | ||
4127 | u8 count = 0; | ||
4128 | |||
4129 | if (hw->mac.type == e1000_pch_spt) { | ||
4130 | if (offset > ICH_FLASH_LINEAR_ADDR_MASK) | ||
4131 | return -E1000_ERR_NVM; | ||
4132 | } | ||
4133 | flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) + | ||
4134 | hw->nvm.flash_base_addr); | ||
4135 | do { | ||
4136 | udelay(1); | ||
4137 | /* Steps */ | ||
4138 | ret_val = e1000_flash_cycle_init_ich8lan(hw); | ||
4139 | if (ret_val) | ||
4140 | break; | ||
4141 | |||
4142 | /* In SPT, This register is in Lan memory space, not | ||
4143 | * flash. Therefore, only 32 bit access is supported | ||
4144 | */ | ||
4145 | if (hw->mac.type == e1000_pch_spt) | ||
4146 | hsflctl.regval = er32flash(ICH_FLASH_HSFSTS) | ||
4147 | >> 16; | ||
4148 | else | ||
4149 | hsflctl.regval = er16flash(ICH_FLASH_HSFCTL); | ||
4150 | |||
4151 | hsflctl.hsf_ctrl.fldbcount = sizeof(u32) - 1; | ||
4152 | hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_WRITE; | ||
4153 | |||
4154 | /* In SPT, This register is in Lan memory space, | ||
4155 | * not flash. Therefore, only 32 bit access is | ||
4156 | * supported | ||
4157 | */ | ||
4158 | if (hw->mac.type == e1000_pch_spt) | ||
4159 | ew32flash(ICH_FLASH_HSFSTS, hsflctl.regval << 16); | ||
4160 | else | ||
4161 | ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval); | ||
4162 | |||
4163 | ew32flash(ICH_FLASH_FADDR, flash_linear_addr); | ||
4164 | |||
4165 | ew32flash(ICH_FLASH_FDATA0, data); | ||
4166 | |||
4167 | /* check if FCERR is set to 1 , if set to 1, clear it | ||
4168 | * and try the whole sequence a few more times else done | ||
4169 | */ | ||
4170 | ret_val = | ||
4171 | e1000_flash_cycle_ich8lan(hw, | ||
4172 | ICH_FLASH_WRITE_COMMAND_TIMEOUT); | ||
4173 | |||
4174 | if (!ret_val) | ||
4175 | break; | ||
4176 | |||
4177 | /* If we're here, then things are most likely | ||
4178 | * completely hosed, but if the error condition | ||
4179 | * is detected, it won't hurt to give it another | ||
4180 | * try...ICH_FLASH_CYCLE_REPEAT_COUNT times. | ||
4181 | */ | ||
4182 | hsfsts.regval = er16flash(ICH_FLASH_HSFSTS); | ||
4183 | |||
4184 | if (hsfsts.hsf_status.flcerr) | ||
4185 | /* Repeat for some time before giving up. */ | ||
4186 | continue; | ||
4187 | if (!hsfsts.hsf_status.flcdone) { | ||
4188 | e_dbg("Timeout error - flash cycle did not complete.\n"); | ||
4189 | break; | ||
4190 | } | ||
4191 | } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT); | ||
4192 | |||
4193 | return ret_val; | ||
4194 | } | ||
4195 | |||
4196 | /** | ||
3643 | * e1000_write_flash_byte_ich8lan - Write a single byte to NVM | 4197 | * e1000_write_flash_byte_ich8lan - Write a single byte to NVM |
3644 | * @hw: pointer to the HW structure | 4198 | * @hw: pointer to the HW structure |
3645 | * @offset: The index of the byte to read. | 4199 | * @offset: The index of the byte to read. |
@@ -3656,6 +4210,40 @@ static s32 e1000_write_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset, | |||
3656 | } | 4210 | } |
3657 | 4211 | ||
3658 | /** | 4212 | /** |
4213 | * e1000_retry_write_flash_dword_ich8lan - Writes a dword to NVM | ||
4214 | * @hw: pointer to the HW structure | ||
4215 | * @offset: The offset of the word to write. | ||
4216 | * @dword: The dword to write to the NVM. | ||
4217 | * | ||
4218 | * Writes a single dword to the NVM using the flash access registers. | ||
4219 | * Goes through a retry algorithm before giving up. | ||
4220 | **/ | ||
4221 | static s32 e1000_retry_write_flash_dword_ich8lan(struct e1000_hw *hw, | ||
4222 | u32 offset, u32 dword) | ||
4223 | { | ||
4224 | s32 ret_val; | ||
4225 | u16 program_retries; | ||
4226 | |||
4227 | /* Must convert word offset into bytes. */ | ||
4228 | offset <<= 1; | ||
4229 | ret_val = e1000_write_flash_data32_ich8lan(hw, offset, dword); | ||
4230 | |||
4231 | if (!ret_val) | ||
4232 | return ret_val; | ||
4233 | for (program_retries = 0; program_retries < 100; program_retries++) { | ||
4234 | e_dbg("Retrying Byte %8.8X at offset %u\n", dword, offset); | ||
4235 | usleep_range(100, 200); | ||
4236 | ret_val = e1000_write_flash_data32_ich8lan(hw, offset, dword); | ||
4237 | if (!ret_val) | ||
4238 | break; | ||
4239 | } | ||
4240 | if (program_retries == 100) | ||
4241 | return -E1000_ERR_NVM; | ||
4242 | |||
4243 | return 0; | ||
4244 | } | ||
4245 | |||
4246 | /** | ||
3659 | * e1000_retry_write_flash_byte_ich8lan - Writes a single byte to NVM | 4247 | * e1000_retry_write_flash_byte_ich8lan - Writes a single byte to NVM |
3660 | * @hw: pointer to the HW structure | 4248 | * @hw: pointer to the HW structure |
3661 | * @offset: The offset of the byte to write. | 4249 | * @offset: The offset of the byte to write. |
@@ -3759,9 +4347,18 @@ static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank) | |||
3759 | /* Write a value 11 (block Erase) in Flash | 4347 | /* Write a value 11 (block Erase) in Flash |
3760 | * Cycle field in hw flash control | 4348 | * Cycle field in hw flash control |
3761 | */ | 4349 | */ |
3762 | hsflctl.regval = er16flash(ICH_FLASH_HSFCTL); | 4350 | if (hw->mac.type == e1000_pch_spt) |
4351 | hsflctl.regval = | ||
4352 | er32flash(ICH_FLASH_HSFSTS) >> 16; | ||
4353 | else | ||
4354 | hsflctl.regval = er16flash(ICH_FLASH_HSFCTL); | ||
4355 | |||
3763 | hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_ERASE; | 4356 | hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_ERASE; |
3764 | ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval); | 4357 | if (hw->mac.type == e1000_pch_spt) |
4358 | ew32flash(ICH_FLASH_HSFSTS, | ||
4359 | hsflctl.regval << 16); | ||
4360 | else | ||
4361 | ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval); | ||
3765 | 4362 | ||
3766 | /* Write the last 24 bits of an index within the | 4363 | /* Write the last 24 bits of an index within the |
3767 | * block into Flash Linear address field in Flash | 4364 | * block into Flash Linear address field in Flash |
@@ -4180,7 +4777,8 @@ static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw) | |||
4180 | ew32(RFCTL, reg); | 4777 | ew32(RFCTL, reg); |
4181 | 4778 | ||
4182 | /* Enable ECC on Lynxpoint */ | 4779 | /* Enable ECC on Lynxpoint */ |
4183 | if (hw->mac.type == e1000_pch_lpt) { | 4780 | if ((hw->mac.type == e1000_pch_lpt) || |
4781 | (hw->mac.type == e1000_pch_spt)) { | ||
4184 | reg = er32(PBECCSTS); | 4782 | reg = er32(PBECCSTS); |
4185 | reg |= E1000_PBECCSTS_ECC_ENABLE; | 4783 | reg |= E1000_PBECCSTS_ECC_ENABLE; |
4186 | ew32(PBECCSTS, reg); | 4784 | ew32(PBECCSTS, reg); |
@@ -4583,7 +5181,8 @@ void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw) | |||
4583 | if ((device_id == E1000_DEV_ID_PCH_LPTLP_I218_LM) || | 5181 | if ((device_id == E1000_DEV_ID_PCH_LPTLP_I218_LM) || |
4584 | (device_id == E1000_DEV_ID_PCH_LPTLP_I218_V) || | 5182 | (device_id == E1000_DEV_ID_PCH_LPTLP_I218_V) || |
4585 | (device_id == E1000_DEV_ID_PCH_I218_LM3) || | 5183 | (device_id == E1000_DEV_ID_PCH_I218_LM3) || |
4586 | (device_id == E1000_DEV_ID_PCH_I218_V3)) { | 5184 | (device_id == E1000_DEV_ID_PCH_I218_V3) || |
5185 | (hw->mac.type == e1000_pch_spt)) { | ||
4587 | u32 fextnvm6 = er32(FEXTNVM6); | 5186 | u32 fextnvm6 = er32(FEXTNVM6); |
4588 | 5187 | ||
4589 | ew32(FEXTNVM6, fextnvm6 & ~E1000_FEXTNVM6_REQ_PLL_CLK); | 5188 | ew32(FEXTNVM6, fextnvm6 & ~E1000_FEXTNVM6_REQ_PLL_CLK); |
@@ -5058,6 +5657,17 @@ static const struct e1000_nvm_operations ich8_nvm_ops = { | |||
5058 | .write = e1000_write_nvm_ich8lan, | 5657 | .write = e1000_write_nvm_ich8lan, |
5059 | }; | 5658 | }; |
5060 | 5659 | ||
5660 | static const struct e1000_nvm_operations spt_nvm_ops = { | ||
5661 | .acquire = e1000_acquire_nvm_ich8lan, | ||
5662 | .release = e1000_release_nvm_ich8lan, | ||
5663 | .read = e1000_read_nvm_spt, | ||
5664 | .update = e1000_update_nvm_checksum_spt, | ||
5665 | .reload = e1000e_reload_nvm_generic, | ||
5666 | .valid_led_default = e1000_valid_led_default_ich8lan, | ||
5667 | .validate = e1000_validate_nvm_checksum_ich8lan, | ||
5668 | .write = e1000_write_nvm_ich8lan, | ||
5669 | }; | ||
5670 | |||
5061 | const struct e1000_info e1000_ich8_info = { | 5671 | const struct e1000_info e1000_ich8_info = { |
5062 | .mac = e1000_ich8lan, | 5672 | .mac = e1000_ich8lan, |
5063 | .flags = FLAG_HAS_WOL | 5673 | .flags = FLAG_HAS_WOL |
@@ -5166,3 +5776,23 @@ const struct e1000_info e1000_pch_lpt_info = { | |||
5166 | .phy_ops = &ich8_phy_ops, | 5776 | .phy_ops = &ich8_phy_ops, |
5167 | .nvm_ops = &ich8_nvm_ops, | 5777 | .nvm_ops = &ich8_nvm_ops, |
5168 | }; | 5778 | }; |
5779 | |||
5780 | const struct e1000_info e1000_pch_spt_info = { | ||
5781 | .mac = e1000_pch_spt, | ||
5782 | .flags = FLAG_IS_ICH | ||
5783 | | FLAG_HAS_WOL | ||
5784 | | FLAG_HAS_HW_TIMESTAMP | ||
5785 | | FLAG_HAS_CTRLEXT_ON_LOAD | ||
5786 | | FLAG_HAS_AMT | ||
5787 | | FLAG_HAS_FLASH | ||
5788 | | FLAG_HAS_JUMBO_FRAMES | ||
5789 | | FLAG_APME_IN_WUC, | ||
5790 | .flags2 = FLAG2_HAS_PHY_STATS | ||
5791 | | FLAG2_HAS_EEE, | ||
5792 | .pba = 26, | ||
5793 | .max_hw_frame_size = 9018, | ||
5794 | .get_variants = e1000_get_variants_ich8lan, | ||
5795 | .mac_ops = &ich8_mac_ops, | ||
5796 | .phy_ops = &ich8_phy_ops, | ||
5797 | .nvm_ops = &spt_nvm_ops, | ||
5798 | }; | ||
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.h b/drivers/net/ethernet/intel/e1000e/ich8lan.h index 8066a498eaac..770a573b9eea 100644 --- a/drivers/net/ethernet/intel/e1000e/ich8lan.h +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.h | |||
@@ -95,9 +95,18 @@ | |||
95 | 95 | ||
96 | #define E1000_FEXTNVM6_REQ_PLL_CLK 0x00000100 | 96 | #define E1000_FEXTNVM6_REQ_PLL_CLK 0x00000100 |
97 | #define E1000_FEXTNVM6_ENABLE_K1_ENTRY_CONDITION 0x00000200 | 97 | #define E1000_FEXTNVM6_ENABLE_K1_ENTRY_CONDITION 0x00000200 |
98 | #define E1000_FEXTNVM6_K1_OFF_ENABLE 0x80000000 | ||
99 | /* bit for disabling packet buffer read */ | ||
100 | #define E1000_FEXTNVM7_DISABLE_PB_READ 0x00040000 | ||
98 | 101 | ||
99 | #define E1000_FEXTNVM7_DISABLE_SMB_PERST 0x00000020 | 102 | #define E1000_FEXTNVM7_DISABLE_SMB_PERST 0x00000020 |
100 | 103 | ||
104 | #define K1_ENTRY_LATENCY 0 | ||
105 | #define K1_MIN_TIME 1 | ||
106 | #define NVM_SIZE_MULTIPLIER 4096 /*multiplier for NVMS field */ | ||
107 | #define E1000_FLASH_BASE_ADDR 0xE000 /*offset of NVM access regs */ | ||
108 | #define E1000_CTRL_EXT_NVMVS 0x3 /*NVM valid sector */ | ||
109 | |||
101 | #define PCIE_ICH8_SNOOP_ALL PCIE_NO_SNOOP_ALL | 110 | #define PCIE_ICH8_SNOOP_ALL PCIE_NO_SNOOP_ALL |
102 | 111 | ||
103 | #define E1000_ICH_RAR_ENTRIES 7 | 112 | #define E1000_ICH_RAR_ENTRIES 7 |
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index 1e8c40fd5c3d..6fa4fc05709e 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c | |||
@@ -70,6 +70,7 @@ static const struct e1000_info *e1000_info_tbl[] = { | |||
70 | [board_pchlan] = &e1000_pch_info, | 70 | [board_pchlan] = &e1000_pch_info, |
71 | [board_pch2lan] = &e1000_pch2_info, | 71 | [board_pch2lan] = &e1000_pch2_info, |
72 | [board_pch_lpt] = &e1000_pch_lpt_info, | 72 | [board_pch_lpt] = &e1000_pch_lpt_info, |
73 | [board_pch_spt] = &e1000_pch_spt_info, | ||
73 | }; | 74 | }; |
74 | 75 | ||
75 | struct e1000_reg_info { | 76 | struct e1000_reg_info { |
@@ -1796,7 +1797,8 @@ static irqreturn_t e1000_intr_msi(int __always_unused irq, void *data) | |||
1796 | } | 1797 | } |
1797 | 1798 | ||
1798 | /* Reset on uncorrectable ECC error */ | 1799 | /* Reset on uncorrectable ECC error */ |
1799 | if ((icr & E1000_ICR_ECCER) && (hw->mac.type == e1000_pch_lpt)) { | 1800 | if ((icr & E1000_ICR_ECCER) && ((hw->mac.type == e1000_pch_lpt) || |
1801 | (hw->mac.type == e1000_pch_spt))) { | ||
1800 | u32 pbeccsts = er32(PBECCSTS); | 1802 | u32 pbeccsts = er32(PBECCSTS); |
1801 | 1803 | ||
1802 | adapter->corr_errors += | 1804 | adapter->corr_errors += |
@@ -1876,7 +1878,8 @@ static irqreturn_t e1000_intr(int __always_unused irq, void *data) | |||
1876 | } | 1878 | } |
1877 | 1879 | ||
1878 | /* Reset on uncorrectable ECC error */ | 1880 | /* Reset on uncorrectable ECC error */ |
1879 | if ((icr & E1000_ICR_ECCER) && (hw->mac.type == e1000_pch_lpt)) { | 1881 | if ((icr & E1000_ICR_ECCER) && ((hw->mac.type == e1000_pch_lpt) || |
1882 | (hw->mac.type == e1000_pch_spt))) { | ||
1880 | u32 pbeccsts = er32(PBECCSTS); | 1883 | u32 pbeccsts = er32(PBECCSTS); |
1881 | 1884 | ||
1882 | adapter->corr_errors += | 1885 | adapter->corr_errors += |
@@ -2257,7 +2260,8 @@ static void e1000_irq_enable(struct e1000_adapter *adapter) | |||
2257 | if (adapter->msix_entries) { | 2260 | if (adapter->msix_entries) { |
2258 | ew32(EIAC_82574, adapter->eiac_mask & E1000_EIAC_MASK_82574); | 2261 | ew32(EIAC_82574, adapter->eiac_mask & E1000_EIAC_MASK_82574); |
2259 | ew32(IMS, adapter->eiac_mask | E1000_IMS_OTHER | E1000_IMS_LSC); | 2262 | ew32(IMS, adapter->eiac_mask | E1000_IMS_OTHER | E1000_IMS_LSC); |
2260 | } else if (hw->mac.type == e1000_pch_lpt) { | 2263 | } else if ((hw->mac.type == e1000_pch_lpt) || |
2264 | (hw->mac.type == e1000_pch_spt)) { | ||
2261 | ew32(IMS, IMS_ENABLE_MASK | E1000_IMS_ECCER); | 2265 | ew32(IMS, IMS_ENABLE_MASK | E1000_IMS_ECCER); |
2262 | } else { | 2266 | } else { |
2263 | ew32(IMS, IMS_ENABLE_MASK); | 2267 | ew32(IMS, IMS_ENABLE_MASK); |
@@ -3014,6 +3018,19 @@ static void e1000_configure_tx(struct e1000_adapter *adapter) | |||
3014 | ew32(TCTL, tctl); | 3018 | ew32(TCTL, tctl); |
3015 | 3019 | ||
3016 | hw->mac.ops.config_collision_dist(hw); | 3020 | hw->mac.ops.config_collision_dist(hw); |
3021 | |||
3022 | /* SPT Si errata workaround to avoid data corruption */ | ||
3023 | if (hw->mac.type == e1000_pch_spt) { | ||
3024 | u32 reg_val; | ||
3025 | |||
3026 | reg_val = er32(IOSFPC); | ||
3027 | reg_val |= E1000_RCTL_RDMTS_HEX; | ||
3028 | ew32(IOSFPC, reg_val); | ||
3029 | |||
3030 | reg_val = er32(TARC(0)); | ||
3031 | reg_val |= E1000_TARC0_CB_MULTIQ_3_REQ; | ||
3032 | ew32(TARC(0), reg_val); | ||
3033 | } | ||
3017 | } | 3034 | } |
3018 | 3035 | ||
3019 | /** | 3036 | /** |
@@ -3490,8 +3507,11 @@ s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca) | |||
3490 | struct e1000_hw *hw = &adapter->hw; | 3507 | struct e1000_hw *hw = &adapter->hw; |
3491 | u32 incvalue, incperiod, shift; | 3508 | u32 incvalue, incperiod, shift; |
3492 | 3509 | ||
3493 | /* Make sure clock is enabled on I217 before checking the frequency */ | 3510 | /* Make sure clock is enabled on I217/I218/I219 before checking |
3494 | if ((hw->mac.type == e1000_pch_lpt) && | 3511 | * the frequency |
3512 | */ | ||
3513 | if (((hw->mac.type == e1000_pch_lpt) || | ||
3514 | (hw->mac.type == e1000_pch_spt)) && | ||
3495 | !(er32(TSYNCTXCTL) & E1000_TSYNCTXCTL_ENABLED) && | 3515 | !(er32(TSYNCTXCTL) & E1000_TSYNCTXCTL_ENABLED) && |
3496 | !(er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_ENABLED)) { | 3516 | !(er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_ENABLED)) { |
3497 | u32 fextnvm7 = er32(FEXTNVM7); | 3517 | u32 fextnvm7 = er32(FEXTNVM7); |
@@ -3505,10 +3525,13 @@ s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca) | |||
3505 | switch (hw->mac.type) { | 3525 | switch (hw->mac.type) { |
3506 | case e1000_pch2lan: | 3526 | case e1000_pch2lan: |
3507 | case e1000_pch_lpt: | 3527 | case e1000_pch_lpt: |
3508 | /* On I217, the clock frequency is 25MHz or 96MHz as | 3528 | case e1000_pch_spt: |
3509 | * indicated by the System Clock Frequency Indication | 3529 | /* On I217, I218 and I219, the clock frequency is 25MHz |
3530 | * or 96MHz as indicated by the System Clock Frequency | ||
3531 | * Indication | ||
3510 | */ | 3532 | */ |
3511 | if ((hw->mac.type != e1000_pch_lpt) || | 3533 | if (((hw->mac.type != e1000_pch_lpt) && |
3534 | (hw->mac.type != e1000_pch_spt)) || | ||
3512 | (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI)) { | 3535 | (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI)) { |
3513 | /* Stable 96MHz frequency */ | 3536 | /* Stable 96MHz frequency */ |
3514 | incperiod = INCPERIOD_96MHz; | 3537 | incperiod = INCPERIOD_96MHz; |
@@ -3875,6 +3898,7 @@ void e1000e_reset(struct e1000_adapter *adapter) | |||
3875 | break; | 3898 | break; |
3876 | case e1000_pch2lan: | 3899 | case e1000_pch2lan: |
3877 | case e1000_pch_lpt: | 3900 | case e1000_pch_lpt: |
3901 | case e1000_pch_spt: | ||
3878 | fc->refresh_time = 0x0400; | 3902 | fc->refresh_time = 0x0400; |
3879 | 3903 | ||
3880 | if (adapter->netdev->mtu <= ETH_DATA_LEN) { | 3904 | if (adapter->netdev->mtu <= ETH_DATA_LEN) { |
@@ -4759,7 +4783,8 @@ static void e1000e_update_stats(struct e1000_adapter *adapter) | |||
4759 | adapter->stats.mgpdc += er32(MGTPDC); | 4783 | adapter->stats.mgpdc += er32(MGTPDC); |
4760 | 4784 | ||
4761 | /* Correctable ECC Errors */ | 4785 | /* Correctable ECC Errors */ |
4762 | if (hw->mac.type == e1000_pch_lpt) { | 4786 | if ((hw->mac.type == e1000_pch_lpt) || |
4787 | (hw->mac.type == e1000_pch_spt)) { | ||
4763 | u32 pbeccsts = er32(PBECCSTS); | 4788 | u32 pbeccsts = er32(PBECCSTS); |
4764 | 4789 | ||
4765 | adapter->corr_errors += | 4790 | adapter->corr_errors += |
@@ -6144,7 +6169,8 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool runtime) | |||
6144 | 6169 | ||
6145 | if (adapter->hw.phy.type == e1000_phy_igp_3) { | 6170 | if (adapter->hw.phy.type == e1000_phy_igp_3) { |
6146 | e1000e_igp3_phy_powerdown_workaround_ich8lan(&adapter->hw); | 6171 | e1000e_igp3_phy_powerdown_workaround_ich8lan(&adapter->hw); |
6147 | } else if (hw->mac.type == e1000_pch_lpt) { | 6172 | } else if ((hw->mac.type == e1000_pch_lpt) || |
6173 | (hw->mac.type == e1000_pch_spt)) { | ||
6148 | if (!(wufc & (E1000_WUFC_EX | E1000_WUFC_MC | E1000_WUFC_BC))) | 6174 | if (!(wufc & (E1000_WUFC_EX | E1000_WUFC_MC | E1000_WUFC_BC))) |
6149 | /* ULP does not support wake from unicast, multicast | 6175 | /* ULP does not support wake from unicast, multicast |
6150 | * or broadcast. | 6176 | * or broadcast. |
@@ -7213,6 +7239,10 @@ static const struct pci_device_id e1000_pci_tbl[] = { | |||
7213 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_I218_V2), board_pch_lpt }, | 7239 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_I218_V2), board_pch_lpt }, |
7214 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_I218_LM3), board_pch_lpt }, | 7240 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_I218_LM3), board_pch_lpt }, |
7215 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_I218_V3), board_pch_lpt }, | 7241 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_I218_V3), board_pch_lpt }, |
7242 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_SPT_I219_LM), board_pch_spt }, | ||
7243 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_SPT_I219_V), board_pch_spt }, | ||
7244 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_SPT_I219_LM2), board_pch_spt }, | ||
7245 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_SPT_I219_V2), board_pch_spt }, | ||
7216 | 7246 | ||
7217 | { 0, 0, 0, 0, 0, 0, 0 } /* terminate list */ | 7247 | { 0, 0, 0, 0, 0, 0, 0 } /* terminate list */ |
7218 | }; | 7248 | }; |
diff --git a/drivers/net/ethernet/intel/e1000e/ptp.c b/drivers/net/ethernet/intel/e1000e/ptp.c index 978ef9c4a043..1490f1e8d6aa 100644 --- a/drivers/net/ethernet/intel/e1000e/ptp.c +++ b/drivers/net/ethernet/intel/e1000e/ptp.c | |||
@@ -221,7 +221,9 @@ void e1000e_ptp_init(struct e1000_adapter *adapter) | |||
221 | switch (hw->mac.type) { | 221 | switch (hw->mac.type) { |
222 | case e1000_pch2lan: | 222 | case e1000_pch2lan: |
223 | case e1000_pch_lpt: | 223 | case e1000_pch_lpt: |
224 | if ((hw->mac.type != e1000_pch_lpt) || | 224 | case e1000_pch_spt: |
225 | if (((hw->mac.type != e1000_pch_lpt) && | ||
226 | (hw->mac.type != e1000_pch_spt)) || | ||
225 | (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI)) { | 227 | (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI)) { |
226 | adapter->ptp_clock_info.max_adj = 24000000 - 1; | 228 | adapter->ptp_clock_info.max_adj = 24000000 - 1; |
227 | break; | 229 | break; |
diff --git a/drivers/net/ethernet/intel/e1000e/regs.h b/drivers/net/ethernet/intel/e1000e/regs.h index ea235bbe50d3..85eefc4832ba 100644 --- a/drivers/net/ethernet/intel/e1000e/regs.h +++ b/drivers/net/ethernet/intel/e1000e/regs.h | |||
@@ -38,6 +38,7 @@ | |||
38 | #define E1000_FEXTNVM4 0x00024 /* Future Extended NVM 4 - RW */ | 38 | #define E1000_FEXTNVM4 0x00024 /* Future Extended NVM 4 - RW */ |
39 | #define E1000_FEXTNVM6 0x00010 /* Future Extended NVM 6 - RW */ | 39 | #define E1000_FEXTNVM6 0x00010 /* Future Extended NVM 6 - RW */ |
40 | #define E1000_FEXTNVM7 0x000E4 /* Future Extended NVM 7 - RW */ | 40 | #define E1000_FEXTNVM7 0x000E4 /* Future Extended NVM 7 - RW */ |
41 | #define E1000_PCIEANACFG 0x00F18 /* PCIE Analog Config */ | ||
41 | #define E1000_FCT 0x00030 /* Flow Control Type - RW */ | 42 | #define E1000_FCT 0x00030 /* Flow Control Type - RW */ |
42 | #define E1000_VET 0x00038 /* VLAN Ether Type - RW */ | 43 | #define E1000_VET 0x00038 /* VLAN Ether Type - RW */ |
43 | #define E1000_ICR 0x000C0 /* Interrupt Cause Read - R/clr */ | 44 | #define E1000_ICR 0x000C0 /* Interrupt Cause Read - R/clr */ |
@@ -67,6 +68,7 @@ | |||
67 | #define E1000_PBA 0x01000 /* Packet Buffer Allocation - RW */ | 68 | #define E1000_PBA 0x01000 /* Packet Buffer Allocation - RW */ |
68 | #define E1000_PBS 0x01008 /* Packet Buffer Size */ | 69 | #define E1000_PBS 0x01008 /* Packet Buffer Size */ |
69 | #define E1000_PBECCSTS 0x0100C /* Packet Buffer ECC Status - RW */ | 70 | #define E1000_PBECCSTS 0x0100C /* Packet Buffer ECC Status - RW */ |
71 | #define E1000_IOSFPC 0x00F28 /* TX corrupted data */ | ||
70 | #define E1000_EEMNGCTL 0x01010 /* MNG EEprom Control */ | 72 | #define E1000_EEMNGCTL 0x01010 /* MNG EEprom Control */ |
71 | #define E1000_EEWR 0x0102C /* EEPROM Write Register - RW */ | 73 | #define E1000_EEWR 0x0102C /* EEPROM Write Register - RW */ |
72 | #define E1000_FLOP 0x0103C /* FLASH Opcode Register */ | 74 | #define E1000_FLOP 0x0103C /* FLASH Opcode Register */ |
@@ -121,6 +123,7 @@ | |||
121 | (0x054E4 + ((_i - 16) * 8))) | 123 | (0x054E4 + ((_i - 16) * 8))) |
122 | #define E1000_SHRAL(_i) (0x05438 + ((_i) * 8)) | 124 | #define E1000_SHRAL(_i) (0x05438 + ((_i) * 8)) |
123 | #define E1000_SHRAH(_i) (0x0543C + ((_i) * 8)) | 125 | #define E1000_SHRAH(_i) (0x0543C + ((_i) * 8)) |
126 | #define E1000_TARC0_CB_MULTIQ_3_REQ (1 << 28 | 1 << 29) | ||
124 | #define E1000_TDFH 0x03410 /* Tx Data FIFO Head - RW */ | 127 | #define E1000_TDFH 0x03410 /* Tx Data FIFO Head - RW */ |
125 | #define E1000_TDFT 0x03418 /* Tx Data FIFO Tail - RW */ | 128 | #define E1000_TDFT 0x03418 /* Tx Data FIFO Tail - RW */ |
126 | #define E1000_TDFHS 0x03420 /* Tx Data FIFO Head Saved - RW */ | 129 | #define E1000_TDFHS 0x03420 /* Tx Data FIFO Head Saved - RW */ |
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k.h b/drivers/net/ethernet/intel/fm10k/fm10k.h index 42eb4344a9dc..59edfd4446cd 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k.h +++ b/drivers/net/ethernet/intel/fm10k/fm10k.h | |||
@@ -439,6 +439,7 @@ extern char fm10k_driver_name[]; | |||
439 | extern const char fm10k_driver_version[]; | 439 | extern const char fm10k_driver_version[]; |
440 | int fm10k_init_queueing_scheme(struct fm10k_intfc *interface); | 440 | int fm10k_init_queueing_scheme(struct fm10k_intfc *interface); |
441 | void fm10k_clear_queueing_scheme(struct fm10k_intfc *interface); | 441 | void fm10k_clear_queueing_scheme(struct fm10k_intfc *interface); |
442 | __be16 fm10k_tx_encap_offload(struct sk_buff *skb); | ||
442 | netdev_tx_t fm10k_xmit_frame_ring(struct sk_buff *skb, | 443 | netdev_tx_t fm10k_xmit_frame_ring(struct sk_buff *skb, |
443 | struct fm10k_ring *tx_ring); | 444 | struct fm10k_ring *tx_ring); |
444 | void fm10k_tx_timeout_reset(struct fm10k_intfc *interface); | 445 | void fm10k_tx_timeout_reset(struct fm10k_intfc *interface); |
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_common.c b/drivers/net/ethernet/intel/fm10k/fm10k_common.c index bf19dccd4288..6cfae6ac04ea 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_common.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_common.c | |||
@@ -398,7 +398,7 @@ static void fm10k_update_hw_stats_rx_q(struct fm10k_hw *hw, | |||
398 | /* Retrieve RX Owner Data */ | 398 | /* Retrieve RX Owner Data */ |
399 | id_rx = fm10k_read_reg(hw, FM10K_RXQCTL(idx)); | 399 | id_rx = fm10k_read_reg(hw, FM10K_RXQCTL(idx)); |
400 | 400 | ||
401 | /* Process RX Ring*/ | 401 | /* Process RX Ring */ |
402 | do { | 402 | do { |
403 | rx_drops = fm10k_read_hw_stats_32b(hw, FM10K_QPRDC(idx), | 403 | rx_drops = fm10k_read_hw_stats_32b(hw, FM10K_QPRDC(idx), |
404 | &q->rx_drops); | 404 | &q->rx_drops); |
@@ -466,7 +466,6 @@ void fm10k_update_hw_stats_q(struct fm10k_hw *hw, struct fm10k_hw_stats_q *q, | |||
466 | * Function invalidates the index values for the queues so any updates that | 466 | * Function invalidates the index values for the queues so any updates that |
467 | * may have happened are ignored and the base for the queue stats is reset. | 467 | * may have happened are ignored and the base for the queue stats is reset. |
468 | **/ | 468 | **/ |
469 | |||
470 | void fm10k_unbind_hw_stats_q(struct fm10k_hw_stats_q *q, u32 idx, u32 count) | 469 | void fm10k_unbind_hw_stats_q(struct fm10k_hw_stats_q *q, u32 idx, u32 count) |
471 | { | 470 | { |
472 | u32 i; | 471 | u32 i; |
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c index 651f53bc7376..33b6106c764b 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c | |||
@@ -1019,7 +1019,7 @@ static int fm10k_set_channels(struct net_device *dev, | |||
1019 | } | 1019 | } |
1020 | 1020 | ||
1021 | static int fm10k_get_ts_info(struct net_device *dev, | 1021 | static int fm10k_get_ts_info(struct net_device *dev, |
1022 | struct ethtool_ts_info *info) | 1022 | struct ethtool_ts_info *info) |
1023 | { | 1023 | { |
1024 | struct fm10k_intfc *interface = netdev_priv(dev); | 1024 | struct fm10k_intfc *interface = netdev_priv(dev); |
1025 | 1025 | ||
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_iov.c b/drivers/net/ethernet/intel/fm10k/fm10k_iov.c index 060190864238..a02308f5048f 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_iov.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_iov.c | |||
@@ -275,7 +275,7 @@ s32 fm10k_iov_update_pvid(struct fm10k_intfc *interface, u16 glort, u16 pvid) | |||
275 | if (vf_idx >= iov_data->num_vfs) | 275 | if (vf_idx >= iov_data->num_vfs) |
276 | return FM10K_ERR_PARAM; | 276 | return FM10K_ERR_PARAM; |
277 | 277 | ||
278 | /* determine if an update has occured and if so notify the VF */ | 278 | /* determine if an update has occurred and if so notify the VF */ |
279 | vf_info = &iov_data->vf_info[vf_idx]; | 279 | vf_info = &iov_data->vf_info[vf_idx]; |
280 | if (vf_info->sw_vid != pvid) { | 280 | if (vf_info->sw_vid != pvid) { |
281 | vf_info->sw_vid = pvid; | 281 | vf_info->sw_vid = pvid; |
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c index 84ab9eea2768..c325bc0c8338 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c | |||
@@ -711,10 +711,6 @@ static struct ethhdr *fm10k_gre_is_nvgre(struct sk_buff *skb) | |||
711 | if (nvgre_hdr->flags & FM10K_NVGRE_RESERVED0_FLAGS) | 711 | if (nvgre_hdr->flags & FM10K_NVGRE_RESERVED0_FLAGS) |
712 | return NULL; | 712 | return NULL; |
713 | 713 | ||
714 | /* verify protocol is transparent Ethernet bridging */ | ||
715 | if (nvgre_hdr->proto != htons(ETH_P_TEB)) | ||
716 | return NULL; | ||
717 | |||
718 | /* report start of ethernet header */ | 714 | /* report start of ethernet header */ |
719 | if (nvgre_hdr->flags & NVGRE_TNI) | 715 | if (nvgre_hdr->flags & NVGRE_TNI) |
720 | return (struct ethhdr *)(nvgre_hdr + 1); | 716 | return (struct ethhdr *)(nvgre_hdr + 1); |
@@ -722,15 +718,13 @@ static struct ethhdr *fm10k_gre_is_nvgre(struct sk_buff *skb) | |||
722 | return (struct ethhdr *)(&nvgre_hdr->tni); | 718 | return (struct ethhdr *)(&nvgre_hdr->tni); |
723 | } | 719 | } |
724 | 720 | ||
725 | static __be16 fm10k_tx_encap_offload(struct sk_buff *skb) | 721 | __be16 fm10k_tx_encap_offload(struct sk_buff *skb) |
726 | { | 722 | { |
723 | u8 l4_hdr = 0, inner_l4_hdr = 0, inner_l4_hlen; | ||
727 | struct ethhdr *eth_hdr; | 724 | struct ethhdr *eth_hdr; |
728 | u8 l4_hdr = 0; | ||
729 | 725 | ||
730 | /* fm10k supports 184 octets of outer+inner headers. Minus 20 for inner L4. */ | 726 | if (skb->inner_protocol_type != ENCAP_TYPE_ETHER || |
731 | #define FM10K_MAX_ENCAP_TRANSPORT_OFFSET 164 | 727 | skb->inner_protocol != htons(ETH_P_TEB)) |
732 | if (skb_inner_transport_header(skb) - skb_mac_header(skb) > | ||
733 | FM10K_MAX_ENCAP_TRANSPORT_OFFSET) | ||
734 | return 0; | 728 | return 0; |
735 | 729 | ||
736 | switch (vlan_get_protocol(skb)) { | 730 | switch (vlan_get_protocol(skb)) { |
@@ -760,12 +754,33 @@ static __be16 fm10k_tx_encap_offload(struct sk_buff *skb) | |||
760 | 754 | ||
761 | switch (eth_hdr->h_proto) { | 755 | switch (eth_hdr->h_proto) { |
762 | case htons(ETH_P_IP): | 756 | case htons(ETH_P_IP): |
757 | inner_l4_hdr = inner_ip_hdr(skb)->protocol; | ||
758 | break; | ||
763 | case htons(ETH_P_IPV6): | 759 | case htons(ETH_P_IPV6): |
760 | inner_l4_hdr = inner_ipv6_hdr(skb)->nexthdr; | ||
764 | break; | 761 | break; |
765 | default: | 762 | default: |
766 | return 0; | 763 | return 0; |
767 | } | 764 | } |
768 | 765 | ||
766 | switch (inner_l4_hdr) { | ||
767 | case IPPROTO_TCP: | ||
768 | inner_l4_hlen = inner_tcp_hdrlen(skb); | ||
769 | break; | ||
770 | case IPPROTO_UDP: | ||
771 | inner_l4_hlen = 8; | ||
772 | break; | ||
773 | default: | ||
774 | return 0; | ||
775 | } | ||
776 | |||
777 | /* The hardware allows tunnel offloads only if the combined inner and | ||
778 | * outer header is 184 bytes or less | ||
779 | */ | ||
780 | if (skb_inner_transport_header(skb) + inner_l4_hlen - | ||
781 | skb_mac_header(skb) > FM10K_TUNNEL_HEADER_LENGTH) | ||
782 | return 0; | ||
783 | |||
769 | return eth_hdr->h_proto; | 784 | return eth_hdr->h_proto; |
770 | } | 785 | } |
771 | 786 | ||
@@ -934,10 +949,10 @@ static int __fm10k_maybe_stop_tx(struct fm10k_ring *tx_ring, u16 size) | |||
934 | { | 949 | { |
935 | netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); | 950 | netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); |
936 | 951 | ||
952 | /* Memory barrier before checking head and tail */ | ||
937 | smp_mb(); | 953 | smp_mb(); |
938 | 954 | ||
939 | /* We need to check again in a case another CPU has just | 955 | /* Check again in a case another CPU has just made room available */ |
940 | * made room available. */ | ||
941 | if (likely(fm10k_desc_unused(tx_ring) < size)) | 956 | if (likely(fm10k_desc_unused(tx_ring) < size)) |
942 | return -EBUSY; | 957 | return -EBUSY; |
943 | 958 | ||
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c b/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c index 9f5457c9e627..14ee696e9830 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c | |||
@@ -72,7 +72,7 @@ static bool fm10k_fifo_empty(struct fm10k_mbx_fifo *fifo) | |||
72 | * @fifo: pointer to FIFO | 72 | * @fifo: pointer to FIFO |
73 | * @offset: offset to add to head | 73 | * @offset: offset to add to head |
74 | * | 74 | * |
75 | * This function returns the indicies into the fifo based on head + offset | 75 | * This function returns the indices into the fifo based on head + offset |
76 | **/ | 76 | **/ |
77 | static u16 fm10k_fifo_head_offset(struct fm10k_mbx_fifo *fifo, u16 offset) | 77 | static u16 fm10k_fifo_head_offset(struct fm10k_mbx_fifo *fifo, u16 offset) |
78 | { | 78 | { |
@@ -84,7 +84,7 @@ static u16 fm10k_fifo_head_offset(struct fm10k_mbx_fifo *fifo, u16 offset) | |||
84 | * @fifo: pointer to FIFO | 84 | * @fifo: pointer to FIFO |
85 | * @offset: offset to add to tail | 85 | * @offset: offset to add to tail |
86 | * | 86 | * |
87 | * This function returns the indicies into the fifo based on tail + offset | 87 | * This function returns the indices into the fifo based on tail + offset |
88 | **/ | 88 | **/ |
89 | static u16 fm10k_fifo_tail_offset(struct fm10k_mbx_fifo *fifo, u16 offset) | 89 | static u16 fm10k_fifo_tail_offset(struct fm10k_mbx_fifo *fifo, u16 offset) |
90 | { | 90 | { |
@@ -326,7 +326,7 @@ static u16 fm10k_mbx_validate_msg_size(struct fm10k_mbx_info *mbx, u16 len) | |||
326 | * fm10k_mbx_write_copy - pulls data off of Tx FIFO and places it in mbmem | 326 | * fm10k_mbx_write_copy - pulls data off of Tx FIFO and places it in mbmem |
327 | * @mbx: pointer to mailbox | 327 | * @mbx: pointer to mailbox |
328 | * | 328 | * |
329 | * This function will take a seciton of the Rx FIFO and copy it into the | 329 | * This function will take a section of the Rx FIFO and copy it into the |
330 | mbx->tail--; | 330 | mbx->tail--; |
331 | * mailbox memory. The offset in mbmem is based on the lower bits of the | 331 | * mailbox memory. The offset in mbmem is based on the lower bits of the |
332 | * tail and len determines the length to copy. | 332 | * tail and len determines the length to copy. |
@@ -418,7 +418,7 @@ static void fm10k_mbx_pull_head(struct fm10k_hw *hw, | |||
418 | * @hw: pointer to hardware structure | 418 | * @hw: pointer to hardware structure |
419 | * @mbx: pointer to mailbox | 419 | * @mbx: pointer to mailbox |
420 | * | 420 | * |
421 | * This function will take a seciton of the mailbox memory and copy it | 421 | * This function will take a section of the mailbox memory and copy it |
422 | * into the Rx FIFO. The offset is based on the lower bits of the | 422 | * into the Rx FIFO. The offset is based on the lower bits of the |
423 | * head and len determines the length to copy. | 423 | * head and len determines the length to copy. |
424 | **/ | 424 | **/ |
@@ -464,7 +464,7 @@ static void fm10k_mbx_read_copy(struct fm10k_hw *hw, | |||
464 | * @tail: tail index of message | 464 | * @tail: tail index of message |
465 | * | 465 | * |
466 | * This function will first validate the tail index and size for the | 466 | * This function will first validate the tail index and size for the |
467 | * incoming message. It then updates the acknowlegment number and | 467 | * incoming message. It then updates the acknowledgment number and |
468 | * copies the data into the FIFO. It will return the number of messages | 468 | * copies the data into the FIFO. It will return the number of messages |
469 | * dequeued on success and a negative value on error. | 469 | * dequeued on success and a negative value on error. |
470 | **/ | 470 | **/ |
@@ -761,7 +761,7 @@ static s32 fm10k_mbx_enqueue_tx(struct fm10k_hw *hw, | |||
761 | err = fm10k_fifo_enqueue(&mbx->tx, msg); | 761 | err = fm10k_fifo_enqueue(&mbx->tx, msg); |
762 | } | 762 | } |
763 | 763 | ||
764 | /* if we failed trhead the error */ | 764 | /* if we failed treat the error */ |
765 | if (err) { | 765 | if (err) { |
766 | mbx->timeout = 0; | 766 | mbx->timeout = 0; |
767 | mbx->tx_busy++; | 767 | mbx->tx_busy++; |
@@ -815,7 +815,7 @@ static void fm10k_mbx_write(struct fm10k_hw *hw, struct fm10k_mbx_info *mbx) | |||
815 | { | 815 | { |
816 | u32 mbmem = mbx->mbmem_reg; | 816 | u32 mbmem = mbx->mbmem_reg; |
817 | 817 | ||
818 | /* write new msg header to notify recepient of change */ | 818 | /* write new msg header to notify recipient of change */ |
819 | fm10k_write_reg(hw, mbmem, mbx->mbx_hdr); | 819 | fm10k_write_reg(hw, mbmem, mbx->mbx_hdr); |
820 | 820 | ||
821 | /* write mailbox to sent interrupt */ | 821 | /* write mailbox to sent interrupt */ |
@@ -1251,7 +1251,7 @@ static s32 fm10k_mbx_process_error(struct fm10k_hw *hw, | |||
1251 | /* we will need to pull all of the fields for verification */ | 1251 | /* we will need to pull all of the fields for verification */ |
1252 | head = FM10K_MSG_HDR_FIELD_GET(*hdr, HEAD); | 1252 | head = FM10K_MSG_HDR_FIELD_GET(*hdr, HEAD); |
1253 | 1253 | ||
1254 | /* we only have lower 10 bits of error number os add upper bits */ | 1254 | /* we only have lower 10 bits of error number so add upper bits */ |
1255 | err_no = FM10K_MSG_HDR_FIELD_GET(*hdr, ERR_NO); | 1255 | err_no = FM10K_MSG_HDR_FIELD_GET(*hdr, ERR_NO); |
1256 | err_no |= ~FM10K_MSG_HDR_MASK(ERR_NO); | 1256 | err_no |= ~FM10K_MSG_HDR_MASK(ERR_NO); |
1257 | 1257 | ||
@@ -1548,7 +1548,7 @@ s32 fm10k_pfvf_mbx_init(struct fm10k_hw *hw, struct fm10k_mbx_info *mbx, | |||
1548 | mbx->timeout = 0; | 1548 | mbx->timeout = 0; |
1549 | mbx->udelay = FM10K_MBX_INIT_DELAY; | 1549 | mbx->udelay = FM10K_MBX_INIT_DELAY; |
1550 | 1550 | ||
1551 | /* initalize tail and head */ | 1551 | /* initialize tail and head */ |
1552 | mbx->tail = 1; | 1552 | mbx->tail = 1; |
1553 | mbx->head = 1; | 1553 | mbx->head = 1; |
1554 | 1554 | ||
@@ -1627,7 +1627,7 @@ static void fm10k_sm_mbx_connect_reset(struct fm10k_mbx_info *mbx) | |||
1627 | mbx->local = FM10K_SM_MBX_VERSION; | 1627 | mbx->local = FM10K_SM_MBX_VERSION; |
1628 | mbx->remote = 0; | 1628 | mbx->remote = 0; |
1629 | 1629 | ||
1630 | /* initalize tail and head */ | 1630 | /* initialize tail and head */ |
1631 | mbx->tail = 1; | 1631 | mbx->tail = 1; |
1632 | mbx->head = 1; | 1632 | mbx->head = 1; |
1633 | 1633 | ||
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c index cfde8bac1aeb..d5b303dad95e 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c | |||
@@ -356,7 +356,7 @@ static void fm10k_free_all_rx_resources(struct fm10k_intfc *interface) | |||
356 | * fm10k_request_glort_range - Request GLORTs for use in configuring rules | 356 | * fm10k_request_glort_range - Request GLORTs for use in configuring rules |
357 | * @interface: board private structure | 357 | * @interface: board private structure |
358 | * | 358 | * |
359 | * This function allocates a range of glorts for this inteface to use. | 359 | * This function allocates a range of glorts for this interface to use. |
360 | **/ | 360 | **/ |
361 | static void fm10k_request_glort_range(struct fm10k_intfc *interface) | 361 | static void fm10k_request_glort_range(struct fm10k_intfc *interface) |
362 | { | 362 | { |
@@ -781,7 +781,7 @@ static int fm10k_update_vid(struct net_device *netdev, u16 vid, bool set) | |||
781 | 781 | ||
782 | fm10k_mbx_lock(interface); | 782 | fm10k_mbx_lock(interface); |
783 | 783 | ||
784 | /* only need to update the VLAN if not in promiscous mode */ | 784 | /* only need to update the VLAN if not in promiscuous mode */ |
785 | if (!(netdev->flags & IFF_PROMISC)) { | 785 | if (!(netdev->flags & IFF_PROMISC)) { |
786 | err = hw->mac.ops.update_vlan(hw, vid, 0, set); | 786 | err = hw->mac.ops.update_vlan(hw, vid, 0, set); |
787 | if (err) | 787 | if (err) |
@@ -970,7 +970,7 @@ static void fm10k_set_rx_mode(struct net_device *dev) | |||
970 | 970 | ||
971 | fm10k_mbx_lock(interface); | 971 | fm10k_mbx_lock(interface); |
972 | 972 | ||
973 | /* syncronize all of the addresses */ | 973 | /* synchronize all of the addresses */ |
974 | if (xcast_mode != FM10K_XCAST_MODE_PROMISC) { | 974 | if (xcast_mode != FM10K_XCAST_MODE_PROMISC) { |
975 | __dev_uc_sync(dev, fm10k_uc_sync, fm10k_uc_unsync); | 975 | __dev_uc_sync(dev, fm10k_uc_sync, fm10k_uc_unsync); |
976 | if (xcast_mode != FM10K_XCAST_MODE_ALLMULTI) | 976 | if (xcast_mode != FM10K_XCAST_MODE_ALLMULTI) |
@@ -1051,7 +1051,7 @@ void fm10k_restore_rx_state(struct fm10k_intfc *interface) | |||
1051 | vid, true, 0); | 1051 | vid, true, 0); |
1052 | } | 1052 | } |
1053 | 1053 | ||
1054 | /* syncronize all of the addresses */ | 1054 | /* synchronize all of the addresses */ |
1055 | if (xcast_mode != FM10K_XCAST_MODE_PROMISC) { | 1055 | if (xcast_mode != FM10K_XCAST_MODE_PROMISC) { |
1056 | __dev_uc_sync(netdev, fm10k_uc_sync, fm10k_uc_unsync); | 1056 | __dev_uc_sync(netdev, fm10k_uc_sync, fm10k_uc_unsync); |
1057 | if (xcast_mode != FM10K_XCAST_MODE_ALLMULTI) | 1057 | if (xcast_mode != FM10K_XCAST_MODE_ALLMULTI) |
@@ -1350,6 +1350,16 @@ static void fm10k_dfwd_del_station(struct net_device *dev, void *priv) | |||
1350 | } | 1350 | } |
1351 | } | 1351 | } |
1352 | 1352 | ||
1353 | static netdev_features_t fm10k_features_check(struct sk_buff *skb, | ||
1354 | struct net_device *dev, | ||
1355 | netdev_features_t features) | ||
1356 | { | ||
1357 | if (!skb->encapsulation || fm10k_tx_encap_offload(skb)) | ||
1358 | return features; | ||
1359 | |||
1360 | return features & ~(NETIF_F_ALL_CSUM | NETIF_F_GSO_MASK); | ||
1361 | } | ||
1362 | |||
1353 | static const struct net_device_ops fm10k_netdev_ops = { | 1363 | static const struct net_device_ops fm10k_netdev_ops = { |
1354 | .ndo_open = fm10k_open, | 1364 | .ndo_open = fm10k_open, |
1355 | .ndo_stop = fm10k_close, | 1365 | .ndo_stop = fm10k_close, |
@@ -1372,6 +1382,7 @@ static const struct net_device_ops fm10k_netdev_ops = { | |||
1372 | .ndo_do_ioctl = fm10k_ioctl, | 1382 | .ndo_do_ioctl = fm10k_ioctl, |
1373 | .ndo_dfwd_add_station = fm10k_dfwd_add_station, | 1383 | .ndo_dfwd_add_station = fm10k_dfwd_add_station, |
1374 | .ndo_dfwd_del_station = fm10k_dfwd_del_station, | 1384 | .ndo_dfwd_del_station = fm10k_dfwd_del_station, |
1385 | .ndo_features_check = fm10k_features_check, | ||
1375 | }; | 1386 | }; |
1376 | 1387 | ||
1377 | #define DEFAULT_DEBUG_LEVEL_SHIFT 3 | 1388 | #define DEFAULT_DEBUG_LEVEL_SHIFT 3 |
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c index 4f5892cc32d7..8978d55a1c51 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c | |||
@@ -648,7 +648,7 @@ static void fm10k_configure_rx_ring(struct fm10k_intfc *interface, | |||
648 | /* Configure the Rx buffer size for one buff without split */ | 648 | /* Configure the Rx buffer size for one buff without split */ |
649 | srrctl |= FM10K_RX_BUFSZ >> FM10K_SRRCTL_BSIZEPKT_SHIFT; | 649 | srrctl |= FM10K_RX_BUFSZ >> FM10K_SRRCTL_BSIZEPKT_SHIFT; |
650 | 650 | ||
651 | /* Configure the Rx ring to supress loopback packets */ | 651 | /* Configure the Rx ring to suppress loopback packets */ |
652 | srrctl |= FM10K_SRRCTL_LOOPBACK_SUPPRESS; | 652 | srrctl |= FM10K_SRRCTL_LOOPBACK_SUPPRESS; |
653 | fm10k_write_reg(hw, FM10K_SRRCTL(reg_idx), srrctl); | 653 | fm10k_write_reg(hw, FM10K_SRRCTL(reg_idx), srrctl); |
654 | 654 | ||
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c index 7e4711958e46..159cd8463800 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c | |||
@@ -234,8 +234,7 @@ static s32 fm10k_update_vlan_pf(struct fm10k_hw *hw, u32 vid, u8 vsi, bool set) | |||
234 | vid = (vid << 17) >> 17; | 234 | vid = (vid << 17) >> 17; |
235 | 235 | ||
236 | /* verify the reserved 0 fields are 0 */ | 236 | /* verify the reserved 0 fields are 0 */ |
237 | if (len >= FM10K_VLAN_TABLE_VID_MAX || | 237 | if (len >= FM10K_VLAN_TABLE_VID_MAX || vid >= FM10K_VLAN_TABLE_VID_MAX) |
238 | vid >= FM10K_VLAN_TABLE_VID_MAX) | ||
239 | return FM10K_ERR_PARAM; | 238 | return FM10K_ERR_PARAM; |
240 | 239 | ||
241 | /* Loop through the table updating all required VLANs */ | 240 | /* Loop through the table updating all required VLANs */ |
@@ -312,7 +311,7 @@ bool fm10k_glort_valid_pf(struct fm10k_hw *hw, u16 glort) | |||
312 | } | 311 | } |
313 | 312 | ||
314 | /** | 313 | /** |
315 | * fm10k_update_uc_addr_pf - Update device unicast addresss | 314 | * fm10k_update_xc_addr_pf - Update device addresses |
316 | * @hw: pointer to the HW structure | 315 | * @hw: pointer to the HW structure |
317 | * @glort: base resource tag for this request | 316 | * @glort: base resource tag for this request |
318 | * @mac: MAC address to add/remove from table | 317 | * @mac: MAC address to add/remove from table |
@@ -356,7 +355,7 @@ static s32 fm10k_update_xc_addr_pf(struct fm10k_hw *hw, u16 glort, | |||
356 | } | 355 | } |
357 | 356 | ||
358 | /** | 357 | /** |
359 | * fm10k_update_uc_addr_pf - Update device unicast addresss | 358 | * fm10k_update_uc_addr_pf - Update device unicast addresses |
360 | * @hw: pointer to the HW structure | 359 | * @hw: pointer to the HW structure |
361 | * @glort: base resource tag for this request | 360 | * @glort: base resource tag for this request |
362 | * @mac: MAC address to add/remove from table | 361 | * @mac: MAC address to add/remove from table |
@@ -454,7 +453,7 @@ static void fm10k_update_int_moderator_pf(struct fm10k_hw *hw) | |||
454 | break; | 453 | break; |
455 | } | 454 | } |
456 | 455 | ||
457 | /* always reset VFITR2[0] to point to last enabled PF vector*/ | 456 | /* always reset VFITR2[0] to point to last enabled PF vector */ |
458 | fm10k_write_reg(hw, FM10K_ITR2(FM10K_ITR_REG_COUNT_PF), i); | 457 | fm10k_write_reg(hw, FM10K_ITR2(FM10K_ITR_REG_COUNT_PF), i); |
459 | 458 | ||
460 | /* reset ITR2[0] to point to last enabled PF vector */ | 459 | /* reset ITR2[0] to point to last enabled PF vector */ |
@@ -812,7 +811,7 @@ static s32 fm10k_iov_assign_int_moderator_pf(struct fm10k_hw *hw, u16 vf_idx) | |||
812 | if (vf_idx >= hw->iov.num_vfs) | 811 | if (vf_idx >= hw->iov.num_vfs) |
813 | return FM10K_ERR_PARAM; | 812 | return FM10K_ERR_PARAM; |
814 | 813 | ||
815 | /* determine vector offset and count*/ | 814 | /* determine vector offset and count */ |
816 | vf_v_idx = fm10k_vf_vector_index(hw, vf_idx); | 815 | vf_v_idx = fm10k_vf_vector_index(hw, vf_idx); |
817 | vf_v_limit = vf_v_idx + fm10k_vectors_per_pool(hw); | 816 | vf_v_limit = vf_v_idx + fm10k_vectors_per_pool(hw); |
818 | 817 | ||
@@ -951,7 +950,7 @@ static s32 fm10k_iov_reset_resources_pf(struct fm10k_hw *hw, | |||
951 | if (vf_info->mbx.ops.disconnect) | 950 | if (vf_info->mbx.ops.disconnect) |
952 | vf_info->mbx.ops.disconnect(hw, &vf_info->mbx); | 951 | vf_info->mbx.ops.disconnect(hw, &vf_info->mbx); |
953 | 952 | ||
954 | /* determine vector offset and count*/ | 953 | /* determine vector offset and count */ |
955 | vf_v_idx = fm10k_vf_vector_index(hw, vf_idx); | 954 | vf_v_idx = fm10k_vf_vector_index(hw, vf_idx); |
956 | vf_v_limit = vf_v_idx + fm10k_vectors_per_pool(hw); | 955 | vf_v_limit = vf_v_idx + fm10k_vectors_per_pool(hw); |
957 | 956 | ||
@@ -1035,7 +1034,7 @@ static s32 fm10k_iov_reset_resources_pf(struct fm10k_hw *hw, | |||
1035 | ((u32)vf_info->mac[2]); | 1034 | ((u32)vf_info->mac[2]); |
1036 | } | 1035 | } |
1037 | 1036 | ||
1038 | /* map queue pairs back to VF from last to first*/ | 1037 | /* map queue pairs back to VF from last to first */ |
1039 | for (i = queues_per_pool; i--;) { | 1038 | for (i = queues_per_pool; i--;) { |
1040 | fm10k_write_reg(hw, FM10K_TDBAL(vf_q_idx + i), tdbal); | 1039 | fm10k_write_reg(hw, FM10K_TDBAL(vf_q_idx + i), tdbal); |
1041 | fm10k_write_reg(hw, FM10K_TDBAH(vf_q_idx + i), tdbah); | 1040 | fm10k_write_reg(hw, FM10K_TDBAH(vf_q_idx + i), tdbah); |
@@ -1141,7 +1140,7 @@ static s32 fm10k_iov_report_timestamp_pf(struct fm10k_hw *hw, | |||
1141 | * | 1140 | * |
1142 | * This function is a default handler for MSI-X requests from the VF. The | 1141 | * This function is a default handler for MSI-X requests from the VF. The |
1143 | * assumption is that in this case it is acceptable to just directly | 1142 | * assumption is that in this case it is acceptable to just directly |
1144 | * hand off the message form the VF to the underlying shared code. | 1143 | * hand off the message from the VF to the underlying shared code. |
1145 | **/ | 1144 | **/ |
1146 | s32 fm10k_iov_msg_msix_pf(struct fm10k_hw *hw, u32 **results, | 1145 | s32 fm10k_iov_msg_msix_pf(struct fm10k_hw *hw, u32 **results, |
1147 | struct fm10k_mbx_info *mbx) | 1146 | struct fm10k_mbx_info *mbx) |
@@ -1160,7 +1159,7 @@ s32 fm10k_iov_msg_msix_pf(struct fm10k_hw *hw, u32 **results, | |||
1160 | * | 1159 | * |
1161 | * This function is a default handler for MAC/VLAN requests from the VF. | 1160 | * This function is a default handler for MAC/VLAN requests from the VF. |
1162 | * The assumption is that in this case it is acceptable to just directly | 1161 | * The assumption is that in this case it is acceptable to just directly |
1163 | * hand off the message form the VF to the underlying shared code. | 1162 | * hand off the message from the VF to the underlying shared code. |
1164 | **/ | 1163 | **/ |
1165 | s32 fm10k_iov_msg_mac_vlan_pf(struct fm10k_hw *hw, u32 **results, | 1164 | s32 fm10k_iov_msg_mac_vlan_pf(struct fm10k_hw *hw, u32 **results, |
1166 | struct fm10k_mbx_info *mbx) | 1165 | struct fm10k_mbx_info *mbx) |
@@ -1404,7 +1403,7 @@ static void fm10k_update_hw_stats_pf(struct fm10k_hw *hw, | |||
1404 | &stats->vlan_drop); | 1403 | &stats->vlan_drop); |
1405 | loopback_drop = fm10k_read_hw_stats_32b(hw, | 1404 | loopback_drop = fm10k_read_hw_stats_32b(hw, |
1406 | FM10K_STATS_LOOPBACK_DROP, | 1405 | FM10K_STATS_LOOPBACK_DROP, |
1407 | &stats->loopback_drop); | 1406 | &stats->loopback_drop); |
1408 | nodesc_drop = fm10k_read_hw_stats_32b(hw, | 1407 | nodesc_drop = fm10k_read_hw_stats_32b(hw, |
1409 | FM10K_STATS_NODESC_DROP, | 1408 | FM10K_STATS_NODESC_DROP, |
1410 | &stats->nodesc_drop); | 1409 | &stats->nodesc_drop); |
@@ -1573,7 +1572,7 @@ static s32 fm10k_get_host_state_pf(struct fm10k_hw *hw, bool *switch_ready) | |||
1573 | s32 ret_val = 0; | 1572 | s32 ret_val = 0; |
1574 | u32 dma_ctrl2; | 1573 | u32 dma_ctrl2; |
1575 | 1574 | ||
1576 | /* verify the switch is ready for interraction */ | 1575 | /* verify the switch is ready for interaction */ |
1577 | dma_ctrl2 = fm10k_read_reg(hw, FM10K_DMA_CTRL2); | 1576 | dma_ctrl2 = fm10k_read_reg(hw, FM10K_DMA_CTRL2); |
1578 | if (!(dma_ctrl2 & FM10K_DMA_CTRL2_SWITCH_READY)) | 1577 | if (!(dma_ctrl2 & FM10K_DMA_CTRL2_SWITCH_READY)) |
1579 | goto out; | 1578 | goto out; |
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_tlv.c b/drivers/net/ethernet/intel/fm10k/fm10k_tlv.c index fd0a05f011a8..9b29d7b0377a 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_tlv.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_tlv.c | |||
@@ -710,7 +710,7 @@ void fm10k_tlv_msg_test_create(u32 *msg, u32 attr_flags) | |||
710 | /** | 710 | /** |
711 | * fm10k_tlv_msg_test - Validate all results on test message receive | 711 | * fm10k_tlv_msg_test - Validate all results on test message receive |
712 | * @hw: Pointer to hardware structure | 712 | * @hw: Pointer to hardware structure |
713 | * @results: Pointer array to attributes in the mesage | 713 | * @results: Pointer array to attributes in the message |
714 | * @mbx: Pointer to mailbox information structure | 714 | * @mbx: Pointer to mailbox information structure |
715 | * | 715 | * |
716 | * This function does a check to verify all attributes match what the test | 716 | * This function does a check to verify all attributes match what the test |
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_type.h b/drivers/net/ethernet/intel/fm10k/fm10k_type.h index 7c6d9d5a8ae5..4af96686c584 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_type.h +++ b/drivers/net/ethernet/intel/fm10k/fm10k_type.h | |||
@@ -356,6 +356,9 @@ struct fm10k_hw; | |||
356 | #define FM10K_QUEUE_DISABLE_TIMEOUT 100 | 356 | #define FM10K_QUEUE_DISABLE_TIMEOUT 100 |
357 | #define FM10K_RESET_TIMEOUT 150 | 357 | #define FM10K_RESET_TIMEOUT 150 |
358 | 358 | ||
359 | /* Maximum supported combined inner and outer header length for encapsulation */ | ||
360 | #define FM10K_TUNNEL_HEADER_LENGTH 184 | ||
361 | |||
359 | /* VF registers */ | 362 | /* VF registers */ |
360 | #define FM10K_VFCTRL 0x00000 | 363 | #define FM10K_VFCTRL 0x00000 |
361 | #define FM10K_VFCTRL_RST 0x00000008 | 364 | #define FM10K_VFCTRL_RST 0x00000008 |
@@ -593,7 +596,7 @@ struct fm10k_vf_info { | |||
593 | u16 sw_vid; /* Switch API assigned VLAN */ | 596 | u16 sw_vid; /* Switch API assigned VLAN */ |
594 | u16 pf_vid; /* PF assigned Default VLAN */ | 597 | u16 pf_vid; /* PF assigned Default VLAN */ |
595 | u8 mac[ETH_ALEN]; /* PF Default MAC address */ | 598 | u8 mac[ETH_ALEN]; /* PF Default MAC address */ |
596 | u8 vsi; /* VSI idenfifier */ | 599 | u8 vsi; /* VSI identifier */ |
597 | u8 vf_idx; /* which VF this is */ | 600 | u8 vf_idx; /* which VF this is */ |
598 | u8 vf_flags; /* flags indicating what modes | 601 | u8 vf_flags; /* flags indicating what modes |
599 | * are supported for the port | 602 | * are supported for the port |
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_vf.c b/drivers/net/ethernet/intel/fm10k/fm10k_vf.c index f0aa0f97b4a9..17219678439a 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_vf.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_vf.c | |||
@@ -37,7 +37,7 @@ static s32 fm10k_stop_hw_vf(struct fm10k_hw *hw) | |||
37 | if (err) | 37 | if (err) |
38 | return err; | 38 | return err; |
39 | 39 | ||
40 | /* If permenant address is set then we need to restore it */ | 40 | /* If permanent address is set then we need to restore it */ |
41 | if (is_valid_ether_addr(perm_addr)) { | 41 | if (is_valid_ether_addr(perm_addr)) { |
42 | bal = (((u32)perm_addr[3]) << 24) | | 42 | bal = (((u32)perm_addr[3]) << 24) | |
43 | (((u32)perm_addr[4]) << 16) | | 43 | (((u32)perm_addr[4]) << 16) | |
@@ -65,7 +65,7 @@ static s32 fm10k_stop_hw_vf(struct fm10k_hw *hw) | |||
65 | * fm10k_reset_hw_vf - VF hardware reset | 65 | * fm10k_reset_hw_vf - VF hardware reset |
66 | * @hw: pointer to hardware structure | 66 | * @hw: pointer to hardware structure |
67 | * | 67 | * |
68 | * This function should return the hardare to a state similar to the | 68 | * This function should return the hardware to a state similar to the |
69 | * one it is in after just being initialized. | 69 | * one it is in after just being initialized. |
70 | **/ | 70 | **/ |
71 | static s32 fm10k_reset_hw_vf(struct fm10k_hw *hw) | 71 | static s32 fm10k_reset_hw_vf(struct fm10k_hw *hw) |
@@ -252,7 +252,7 @@ static s32 fm10k_read_mac_addr_vf(struct fm10k_hw *hw) | |||
252 | } | 252 | } |
253 | 253 | ||
254 | /** | 254 | /** |
255 | * fm10k_update_uc_addr_vf - Update device unicast address | 255 | * fm10k_update_uc_addr_vf - Update device unicast addresses |
256 | * @hw: pointer to the HW structure | 256 | * @hw: pointer to the HW structure |
257 | * @glort: unused | 257 | * @glort: unused |
258 | * @mac: MAC address to add/remove from table | 258 | * @mac: MAC address to add/remove from table |
@@ -282,7 +282,7 @@ static s32 fm10k_update_uc_addr_vf(struct fm10k_hw *hw, u16 glort, | |||
282 | memcmp(hw->mac.perm_addr, mac, ETH_ALEN)) | 282 | memcmp(hw->mac.perm_addr, mac, ETH_ALEN)) |
283 | return FM10K_ERR_PARAM; | 283 | return FM10K_ERR_PARAM; |
284 | 284 | ||
285 | /* add bit to notify us if this is a set of clear operation */ | 285 | /* add bit to notify us if this is a set or clear operation */ |
286 | if (!add) | 286 | if (!add) |
287 | vid |= FM10K_VLAN_CLEAR; | 287 | vid |= FM10K_VLAN_CLEAR; |
288 | 288 | ||
@@ -295,7 +295,7 @@ static s32 fm10k_update_uc_addr_vf(struct fm10k_hw *hw, u16 glort, | |||
295 | } | 295 | } |
296 | 296 | ||
297 | /** | 297 | /** |
298 | * fm10k_update_mc_addr_vf - Update device multicast address | 298 | * fm10k_update_mc_addr_vf - Update device multicast addresses |
299 | * @hw: pointer to the HW structure | 299 | * @hw: pointer to the HW structure |
300 | * @glort: unused | 300 | * @glort: unused |
301 | * @mac: MAC address to add/remove from table | 301 | * @mac: MAC address to add/remove from table |
@@ -319,7 +319,7 @@ static s32 fm10k_update_mc_addr_vf(struct fm10k_hw *hw, u16 glort, | |||
319 | if (!is_multicast_ether_addr(mac)) | 319 | if (!is_multicast_ether_addr(mac)) |
320 | return FM10K_ERR_PARAM; | 320 | return FM10K_ERR_PARAM; |
321 | 321 | ||
322 | /* add bit to notify us if this is a set of clear operation */ | 322 | /* add bit to notify us if this is a set or clear operation */ |
323 | if (!add) | 323 | if (!add) |
324 | vid |= FM10K_VLAN_CLEAR; | 324 | vid |= FM10K_VLAN_CLEAR; |
325 | 325 | ||
@@ -515,7 +515,7 @@ static s32 fm10k_adjust_systime_vf(struct fm10k_hw *hw, s32 ppb) | |||
515 | * @hw: pointer to the hardware structure | 515 | * @hw: pointer to the hardware structure |
516 | * | 516 | * |
517 | * Function reads the content of 2 registers, combined to represent a 64 bit | 517 | * Function reads the content of 2 registers, combined to represent a 64 bit |
518 | * value measured in nanosecods. In order to guarantee the value is accurate | 518 | * value measured in nanoseconds. In order to guarantee the value is accurate |
519 | * we check the 32 most significant bits both before and after reading the | 519 | * we check the 32 most significant bits both before and after reading the |
520 | * 32 least significant bits to verify they didn't change as we were reading | 520 | * 32 least significant bits to verify they didn't change as we were reading |
521 | * the registers. | 521 | * the registers. |
diff --git a/drivers/net/ethernet/intel/i40e/Makefile b/drivers/net/ethernet/intel/i40e/Makefile index c40581999121..023e452aff8c 100644 --- a/drivers/net/ethernet/intel/i40e/Makefile +++ b/drivers/net/ethernet/intel/i40e/Makefile | |||
@@ -1,7 +1,7 @@ | |||
1 | ################################################################################ | 1 | ################################################################################ |
2 | # | 2 | # |
3 | # Intel Ethernet Controller XL710 Family Linux Driver | 3 | # Intel Ethernet Controller XL710 Family Linux Driver |
4 | # Copyright(c) 2013 - 2014 Intel Corporation. | 4 | # Copyright(c) 2013 - 2015 Intel Corporation. |
5 | # | 5 | # |
6 | # This program is free software; you can redistribute it and/or modify it | 6 | # This program is free software; you can redistribute it and/or modify it |
7 | # under the terms and conditions of the GNU General Public License, | 7 | # under the terms and conditions of the GNU General Public License, |
@@ -37,6 +37,7 @@ i40e-objs := i40e_main.o \ | |||
37 | i40e_hmc.o \ | 37 | i40e_hmc.o \ |
38 | i40e_lan_hmc.o \ | 38 | i40e_lan_hmc.o \ |
39 | i40e_nvm.o \ | 39 | i40e_nvm.o \ |
40 | i40e_configfs.o \ | ||
40 | i40e_debugfs.o \ | 41 | i40e_debugfs.o \ |
41 | i40e_diag.o \ | 42 | i40e_diag.o \ |
42 | i40e_txrx.o \ | 43 | i40e_txrx.o \ |
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h index 2b65cdcad6ba..c5137313b62a 100644 --- a/drivers/net/ethernet/intel/i40e/i40e.h +++ b/drivers/net/ethernet/intel/i40e/i40e.h | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************************* | 1 | /******************************************************************************* |
2 | * | 2 | * |
3 | * Intel Ethernet Controller XL710 Family Linux Driver | 3 | * Intel Ethernet Controller XL710 Family Linux Driver |
4 | * Copyright(c) 2013 - 2014 Intel Corporation. | 4 | * Copyright(c) 2013 - 2015 Intel Corporation. |
5 | * | 5 | * |
6 | * This program is free software; you can redistribute it and/or modify it | 6 | * This program is free software; you can redistribute it and/or modify it |
7 | * under the terms and conditions of the GNU General Public License, | 7 | * under the terms and conditions of the GNU General Public License, |
@@ -36,6 +36,7 @@ | |||
36 | #include <linux/aer.h> | 36 | #include <linux/aer.h> |
37 | #include <linux/netdevice.h> | 37 | #include <linux/netdevice.h> |
38 | #include <linux/ioport.h> | 38 | #include <linux/ioport.h> |
39 | #include <linux/iommu.h> | ||
39 | #include <linux/slab.h> | 40 | #include <linux/slab.h> |
40 | #include <linux/list.h> | 41 | #include <linux/list.h> |
41 | #include <linux/string.h> | 42 | #include <linux/string.h> |
@@ -49,6 +50,7 @@ | |||
49 | #include <net/ip6_checksum.h> | 50 | #include <net/ip6_checksum.h> |
50 | #include <linux/ethtool.h> | 51 | #include <linux/ethtool.h> |
51 | #include <linux/if_vlan.h> | 52 | #include <linux/if_vlan.h> |
53 | #include <linux/if_bridge.h> | ||
52 | #include <linux/clocksource.h> | 54 | #include <linux/clocksource.h> |
53 | #include <linux/net_tstamp.h> | 55 | #include <linux/net_tstamp.h> |
54 | #include <linux/ptp_clock_kernel.h> | 56 | #include <linux/ptp_clock_kernel.h> |
@@ -94,6 +96,9 @@ | |||
94 | #define I40E_QUEUE_WAIT_RETRY_LIMIT 10 | 96 | #define I40E_QUEUE_WAIT_RETRY_LIMIT 10 |
95 | #define I40E_INT_NAME_STR_LEN (IFNAMSIZ + 9) | 97 | #define I40E_INT_NAME_STR_LEN (IFNAMSIZ + 9) |
96 | 98 | ||
99 | /* Ethtool Private Flags */ | ||
100 | #define I40E_PRIV_FLAGS_NPAR_FLAG (1 << 0) | ||
101 | |||
97 | #define I40E_NVM_VERSION_LO_SHIFT 0 | 102 | #define I40E_NVM_VERSION_LO_SHIFT 0 |
98 | #define I40E_NVM_VERSION_LO_MASK (0xff << I40E_NVM_VERSION_LO_SHIFT) | 103 | #define I40E_NVM_VERSION_LO_MASK (0xff << I40E_NVM_VERSION_LO_SHIFT) |
99 | #define I40E_NVM_VERSION_HI_SHIFT 12 | 104 | #define I40E_NVM_VERSION_HI_SHIFT 12 |
@@ -140,6 +145,7 @@ enum i40e_state_t { | |||
140 | __I40E_CORE_RESET_REQUESTED, | 145 | __I40E_CORE_RESET_REQUESTED, |
141 | __I40E_GLOBAL_RESET_REQUESTED, | 146 | __I40E_GLOBAL_RESET_REQUESTED, |
142 | __I40E_EMP_RESET_REQUESTED, | 147 | __I40E_EMP_RESET_REQUESTED, |
148 | __I40E_EMP_RESET_INTR_RECEIVED, | ||
143 | __I40E_FILTER_OVERFLOW_PROMISC, | 149 | __I40E_FILTER_OVERFLOW_PROMISC, |
144 | __I40E_SUSPENDED, | 150 | __I40E_SUSPENDED, |
145 | __I40E_PTP_TX_IN_PROGRESS, | 151 | __I40E_PTP_TX_IN_PROGRESS, |
@@ -383,6 +389,9 @@ struct i40e_pf { | |||
383 | bool ptp_tx; | 389 | bool ptp_tx; |
384 | bool ptp_rx; | 390 | bool ptp_rx; |
385 | u16 rss_table_size; | 391 | u16 rss_table_size; |
392 | /* These are only valid in NPAR modes */ | ||
393 | u32 npar_max_bw; | ||
394 | u32 npar_min_bw; | ||
386 | }; | 395 | }; |
387 | 396 | ||
388 | struct i40e_mac_filter { | 397 | struct i40e_mac_filter { |
@@ -405,6 +414,7 @@ struct i40e_veb { | |||
405 | u16 uplink_seid; | 414 | u16 uplink_seid; |
406 | u16 stats_idx; /* index of VEB parent */ | 415 | u16 stats_idx; /* index of VEB parent */ |
407 | u8 enabled_tc; | 416 | u8 enabled_tc; |
417 | u16 bridge_mode; /* Bridge Mode (VEB/VEPA) */ | ||
408 | u16 flags; | 418 | u16 flags; |
409 | u16 bw_limit; | 419 | u16 bw_limit; |
410 | u8 bw_max_quanta; | 420 | u8 bw_max_quanta; |
@@ -461,6 +471,9 @@ struct i40e_vsi { | |||
461 | u16 rx_itr_setting; | 471 | u16 rx_itr_setting; |
462 | u16 tx_itr_setting; | 472 | u16 tx_itr_setting; |
463 | 473 | ||
474 | u16 rss_table_size; | ||
475 | u16 rss_size; | ||
476 | |||
464 | u16 max_frame; | 477 | u16 max_frame; |
465 | u16 rx_hdr_len; | 478 | u16 rx_hdr_len; |
466 | u16 rx_buf_len; | 479 | u16 rx_buf_len; |
@@ -478,6 +491,7 @@ struct i40e_vsi { | |||
478 | 491 | ||
479 | u16 base_queue; /* vsi's first queue in hw array */ | 492 | u16 base_queue; /* vsi's first queue in hw array */ |
480 | u16 alloc_queue_pairs; /* Allocated Tx/Rx queues */ | 493 | u16 alloc_queue_pairs; /* Allocated Tx/Rx queues */ |
494 | u16 req_queue_pairs; /* User requested queue pairs */ | ||
481 | u16 num_queue_pairs; /* Used tx and rx pairs */ | 495 | u16 num_queue_pairs; /* Used tx and rx pairs */ |
482 | u16 num_desc; | 496 | u16 num_desc; |
483 | enum i40e_vsi_type type; /* VSI type, e.g., LAN, FCoE, etc */ | 497 | enum i40e_vsi_type type; /* VSI type, e.g., LAN, FCoE, etc */ |
@@ -504,6 +518,9 @@ struct i40e_vsi { | |||
504 | 518 | ||
505 | /* VSI specific handlers */ | 519 | /* VSI specific handlers */ |
506 | irqreturn_t (*irq_handler)(int irq, void *data); | 520 | irqreturn_t (*irq_handler)(int irq, void *data); |
521 | |||
522 | /* current rxnfc data */ | ||
523 | struct ethtool_rxnfc rxnfc; /* current rss hash opts */ | ||
507 | } ____cacheline_internodealigned_in_smp; | 524 | } ____cacheline_internodealigned_in_smp; |
508 | 525 | ||
509 | struct i40e_netdev_priv { | 526 | struct i40e_netdev_priv { |
@@ -544,14 +561,14 @@ static inline char *i40e_fw_version_str(struct i40e_hw *hw) | |||
544 | static char buf[32]; | 561 | static char buf[32]; |
545 | 562 | ||
546 | snprintf(buf, sizeof(buf), | 563 | snprintf(buf, sizeof(buf), |
547 | "f%d.%d a%d.%d n%02x.%02x e%08x", | 564 | "f%d.%d.%05d a%d.%d n%x.%02x e%x", |
548 | hw->aq.fw_maj_ver, hw->aq.fw_min_ver, | 565 | hw->aq.fw_maj_ver, hw->aq.fw_min_ver, hw->aq.fw_build, |
549 | hw->aq.api_maj_ver, hw->aq.api_min_ver, | 566 | hw->aq.api_maj_ver, hw->aq.api_min_ver, |
550 | (hw->nvm.version & I40E_NVM_VERSION_HI_MASK) >> | 567 | (hw->nvm.version & I40E_NVM_VERSION_HI_MASK) >> |
551 | I40E_NVM_VERSION_HI_SHIFT, | 568 | I40E_NVM_VERSION_HI_SHIFT, |
552 | (hw->nvm.version & I40E_NVM_VERSION_LO_MASK) >> | 569 | (hw->nvm.version & I40E_NVM_VERSION_LO_MASK) >> |
553 | I40E_NVM_VERSION_LO_SHIFT, | 570 | I40E_NVM_VERSION_LO_SHIFT, |
554 | hw->nvm.eetrack); | 571 | (hw->nvm.eetrack & 0xffffff)); |
555 | 572 | ||
556 | return buf; | 573 | return buf; |
557 | } | 574 | } |
@@ -680,6 +697,7 @@ int i40e_vlan_rx_add_vid(struct net_device *netdev, | |||
680 | int i40e_vlan_rx_kill_vid(struct net_device *netdev, | 697 | int i40e_vlan_rx_kill_vid(struct net_device *netdev, |
681 | __always_unused __be16 proto, u16 vid); | 698 | __always_unused __be16 proto, u16 vid); |
682 | #endif | 699 | #endif |
700 | int i40e_open(struct net_device *netdev); | ||
683 | int i40e_vsi_open(struct i40e_vsi *vsi); | 701 | int i40e_vsi_open(struct i40e_vsi *vsi); |
684 | void i40e_vlan_stripping_disable(struct i40e_vsi *vsi); | 702 | void i40e_vlan_stripping_disable(struct i40e_vsi *vsi); |
685 | int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid); | 703 | int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid); |
@@ -690,7 +708,6 @@ bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi); | |||
690 | struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, u8 *macaddr, | 708 | struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, u8 *macaddr, |
691 | bool is_vf, bool is_netdev); | 709 | bool is_vf, bool is_netdev); |
692 | #ifdef I40E_FCOE | 710 | #ifdef I40E_FCOE |
693 | int i40e_open(struct net_device *netdev); | ||
694 | int i40e_close(struct net_device *netdev); | 711 | int i40e_close(struct net_device *netdev); |
695 | int i40e_setup_tc(struct net_device *netdev, u8 tc); | 712 | int i40e_setup_tc(struct net_device *netdev, u8 tc); |
696 | void i40e_netpoll(struct net_device *netdev); | 713 | void i40e_netpoll(struct net_device *netdev); |
@@ -712,6 +729,7 @@ void i40e_fcoe_handle_status(struct i40e_ring *rx_ring, | |||
712 | void i40e_vlan_stripping_enable(struct i40e_vsi *vsi); | 729 | void i40e_vlan_stripping_enable(struct i40e_vsi *vsi); |
713 | #ifdef CONFIG_I40E_DCB | 730 | #ifdef CONFIG_I40E_DCB |
714 | void i40e_dcbnl_flush_apps(struct i40e_pf *pf, | 731 | void i40e_dcbnl_flush_apps(struct i40e_pf *pf, |
732 | struct i40e_dcbx_config *old_cfg, | ||
715 | struct i40e_dcbx_config *new_cfg); | 733 | struct i40e_dcbx_config *new_cfg); |
716 | void i40e_dcbnl_set_all(struct i40e_vsi *vsi); | 734 | void i40e_dcbnl_set_all(struct i40e_vsi *vsi); |
717 | void i40e_dcbnl_setup(struct i40e_vsi *vsi); | 735 | void i40e_dcbnl_setup(struct i40e_vsi *vsi); |
@@ -727,4 +745,12 @@ int i40e_ptp_set_ts_config(struct i40e_pf *pf, struct ifreq *ifr); | |||
727 | int i40e_ptp_get_ts_config(struct i40e_pf *pf, struct ifreq *ifr); | 745 | int i40e_ptp_get_ts_config(struct i40e_pf *pf, struct ifreq *ifr); |
728 | void i40e_ptp_init(struct i40e_pf *pf); | 746 | void i40e_ptp_init(struct i40e_pf *pf); |
729 | void i40e_ptp_stop(struct i40e_pf *pf); | 747 | void i40e_ptp_stop(struct i40e_pf *pf); |
748 | int i40e_is_vsi_uplink_mode_veb(struct i40e_vsi *vsi); | ||
749 | #if IS_ENABLED(CONFIG_I40E_CONFIGFS_FS) | ||
750 | int i40e_configfs_init(void); | ||
751 | void i40e_configfs_exit(void); | ||
752 | #endif /* CONFIG_I40E_CONFIGFS_FS */ | ||
753 | i40e_status i40e_get_npar_bw_setting(struct i40e_pf *pf); | ||
754 | i40e_status i40e_set_npar_bw_setting(struct i40e_pf *pf); | ||
755 | i40e_status i40e_commit_npar_bw_setting(struct i40e_pf *pf); | ||
730 | #endif /* _I40E_H_ */ | 756 | #endif /* _I40E_H_ */ |
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c index 77f6254a89ac..dc2ed359e945 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c +++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c | |||
@@ -592,6 +592,7 @@ i40e_status i40e_init_adminq(struct i40e_hw *hw) | |||
592 | ret_code = i40e_aq_get_firmware_version(hw, | 592 | ret_code = i40e_aq_get_firmware_version(hw, |
593 | &hw->aq.fw_maj_ver, | 593 | &hw->aq.fw_maj_ver, |
594 | &hw->aq.fw_min_ver, | 594 | &hw->aq.fw_min_ver, |
595 | &hw->aq.fw_build, | ||
595 | &hw->aq.api_maj_ver, | 596 | &hw->aq.api_maj_ver, |
596 | &hw->aq.api_min_ver, | 597 | &hw->aq.api_min_ver, |
597 | NULL); | 598 | NULL); |
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.h b/drivers/net/ethernet/intel/i40e/i40e_adminq.h index de17b6fbcc4e..28e519a50de4 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_adminq.h +++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.h | |||
@@ -93,6 +93,7 @@ struct i40e_adminq_info { | |||
93 | u16 asq_buf_size; /* send queue buffer size */ | 93 | u16 asq_buf_size; /* send queue buffer size */ |
94 | u16 fw_maj_ver; /* firmware major version */ | 94 | u16 fw_maj_ver; /* firmware major version */ |
95 | u16 fw_min_ver; /* firmware minor version */ | 95 | u16 fw_min_ver; /* firmware minor version */ |
96 | u32 fw_build; /* firmware build number */ | ||
96 | u16 api_maj_ver; /* api major version */ | 97 | u16 api_maj_ver; /* api major version */ |
97 | u16 api_min_ver; /* api minor version */ | 98 | u16 api_min_ver; /* api minor version */ |
98 | bool nvm_release_on_done; | 99 | bool nvm_release_on_done; |
diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c index 6aea65dae5ed..1da7d05abd38 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_common.c +++ b/drivers/net/ethernet/intel/i40e/i40e_common.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************************* | 1 | /******************************************************************************* |
2 | * | 2 | * |
3 | * Intel Ethernet Controller XL710 Family Linux Driver | 3 | * Intel Ethernet Controller XL710 Family Linux Driver |
4 | * Copyright(c) 2013 - 2014 Intel Corporation. | 4 | * Copyright(c) 2013 - 2015 Intel Corporation. |
5 | * | 5 | * |
6 | * This program is free software; you can redistribute it and/or modify it | 6 | * This program is free software; you can redistribute it and/or modify it |
7 | * under the terms and conditions of the GNU General Public License, | 7 | * under the terms and conditions of the GNU General Public License, |
@@ -94,16 +94,19 @@ void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc, | |||
94 | 94 | ||
95 | i40e_debug(hw, mask, | 95 | i40e_debug(hw, mask, |
96 | "AQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n", | 96 | "AQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n", |
97 | aq_desc->opcode, aq_desc->flags, aq_desc->datalen, | 97 | le16_to_cpu(aq_desc->opcode), |
98 | aq_desc->retval); | 98 | le16_to_cpu(aq_desc->flags), |
99 | le16_to_cpu(aq_desc->datalen), | ||
100 | le16_to_cpu(aq_desc->retval)); | ||
99 | i40e_debug(hw, mask, "\tcookie (h,l) 0x%08X 0x%08X\n", | 101 | i40e_debug(hw, mask, "\tcookie (h,l) 0x%08X 0x%08X\n", |
100 | aq_desc->cookie_high, aq_desc->cookie_low); | 102 | le32_to_cpu(aq_desc->cookie_high), |
103 | le32_to_cpu(aq_desc->cookie_low)); | ||
101 | i40e_debug(hw, mask, "\tparam (0,1) 0x%08X 0x%08X\n", | 104 | i40e_debug(hw, mask, "\tparam (0,1) 0x%08X 0x%08X\n", |
102 | aq_desc->params.internal.param0, | 105 | le32_to_cpu(aq_desc->params.internal.param0), |
103 | aq_desc->params.internal.param1); | 106 | le32_to_cpu(aq_desc->params.internal.param1)); |
104 | i40e_debug(hw, mask, "\taddr (h,l) 0x%08X 0x%08X\n", | 107 | i40e_debug(hw, mask, "\taddr (h,l) 0x%08X 0x%08X\n", |
105 | aq_desc->params.external.addr_high, | 108 | le32_to_cpu(aq_desc->params.external.addr_high), |
106 | aq_desc->params.external.addr_low); | 109 | le32_to_cpu(aq_desc->params.external.addr_low)); |
107 | 110 | ||
108 | if ((buffer != NULL) && (aq_desc->datalen != 0)) { | 111 | if ((buffer != NULL) && (aq_desc->datalen != 0)) { |
109 | memset(data, 0, sizeof(data)); | 112 | memset(data, 0, sizeof(data)); |
@@ -116,15 +119,19 @@ void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc, | |||
116 | if ((i % 16) == 15) { | 119 | if ((i % 16) == 15) { |
117 | i40e_debug(hw, mask, | 120 | i40e_debug(hw, mask, |
118 | "\t0x%04X %08X %08X %08X %08X\n", | 121 | "\t0x%04X %08X %08X %08X %08X\n", |
119 | i - 15, data[0], data[1], data[2], | 122 | i - 15, le32_to_cpu(data[0]), |
120 | data[3]); | 123 | le32_to_cpu(data[1]), |
124 | le32_to_cpu(data[2]), | ||
125 | le32_to_cpu(data[3])); | ||
121 | memset(data, 0, sizeof(data)); | 126 | memset(data, 0, sizeof(data)); |
122 | } | 127 | } |
123 | } | 128 | } |
124 | if ((i % 16) != 0) | 129 | if ((i % 16) != 0) |
125 | i40e_debug(hw, mask, "\t0x%04X %08X %08X %08X %08X\n", | 130 | i40e_debug(hw, mask, "\t0x%04X %08X %08X %08X %08X\n", |
126 | i - (i % 16), data[0], data[1], data[2], | 131 | i - (i % 16), le32_to_cpu(data[0]), |
127 | data[3]); | 132 | le32_to_cpu(data[1]), |
133 | le32_to_cpu(data[2]), | ||
134 | le32_to_cpu(data[3])); | ||
128 | } | 135 | } |
129 | } | 136 | } |
130 | 137 | ||
@@ -1298,14 +1305,14 @@ enum i40e_status_code i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures, | |||
1298 | *aq_failures |= I40E_SET_FC_AQ_FAIL_SET; | 1305 | *aq_failures |= I40E_SET_FC_AQ_FAIL_SET; |
1299 | } | 1306 | } |
1300 | /* Update the link info */ | 1307 | /* Update the link info */ |
1301 | status = i40e_update_link_info(hw, true); | 1308 | status = i40e_aq_get_link_info(hw, true, NULL, NULL); |
1302 | if (status) { | 1309 | if (status) { |
1303 | /* Wait a little bit (on 40G cards it sometimes takes a really | 1310 | /* Wait a little bit (on 40G cards it sometimes takes a really |
1304 | * long time for link to come back from the atomic reset) | 1311 | * long time for link to come back from the atomic reset) |
1305 | * and try once more | 1312 | * and try once more |
1306 | */ | 1313 | */ |
1307 | msleep(1000); | 1314 | msleep(1000); |
1308 | status = i40e_update_link_info(hw, true); | 1315 | status = i40e_aq_get_link_info(hw, true, NULL, NULL); |
1309 | } | 1316 | } |
1310 | if (status) | 1317 | if (status) |
1311 | *aq_failures |= I40E_SET_FC_AQ_FAIL_UPDATE; | 1318 | *aq_failures |= I40E_SET_FC_AQ_FAIL_UPDATE; |
@@ -1453,35 +1460,6 @@ aq_get_link_info_exit: | |||
1453 | } | 1460 | } |
1454 | 1461 | ||
1455 | /** | 1462 | /** |
1456 | * i40e_update_link_info | ||
1457 | * @hw: pointer to the hw struct | ||
1458 | * @enable_lse: enable/disable LinkStatusEvent reporting | ||
1459 | * | ||
1460 | * Returns the link status of the adapter | ||
1461 | **/ | ||
1462 | i40e_status i40e_update_link_info(struct i40e_hw *hw, bool enable_lse) | ||
1463 | { | ||
1464 | struct i40e_aq_get_phy_abilities_resp abilities; | ||
1465 | i40e_status status; | ||
1466 | |||
1467 | status = i40e_aq_get_link_info(hw, enable_lse, NULL, NULL); | ||
1468 | if (status) | ||
1469 | return status; | ||
1470 | |||
1471 | status = i40e_aq_get_phy_capabilities(hw, false, false, | ||
1472 | &abilities, NULL); | ||
1473 | if (status) | ||
1474 | return status; | ||
1475 | |||
1476 | if (abilities.abilities & I40E_AQ_PHY_AN_ENABLED) | ||
1477 | hw->phy.link_info.an_enabled = true; | ||
1478 | else | ||
1479 | hw->phy.link_info.an_enabled = false; | ||
1480 | |||
1481 | return status; | ||
1482 | } | ||
1483 | |||
1484 | /** | ||
1485 | * i40e_aq_set_phy_int_mask | 1463 | * i40e_aq_set_phy_int_mask |
1486 | * @hw: pointer to the hw struct | 1464 | * @hw: pointer to the hw struct |
1487 | * @mask: interrupt mask to be set | 1465 | * @mask: interrupt mask to be set |
@@ -1760,6 +1738,7 @@ i40e_status i40e_aq_get_switch_config(struct i40e_hw *hw, | |||
1760 | * @hw: pointer to the hw struct | 1738 | * @hw: pointer to the hw struct |
1761 | * @fw_major_version: firmware major version | 1739 | * @fw_major_version: firmware major version |
1762 | * @fw_minor_version: firmware minor version | 1740 | * @fw_minor_version: firmware minor version |
1741 | * @fw_build: firmware build number | ||
1763 | * @api_major_version: major queue version | 1742 | * @api_major_version: major queue version |
1764 | * @api_minor_version: minor queue version | 1743 | * @api_minor_version: minor queue version |
1765 | * @cmd_details: pointer to command details structure or NULL | 1744 | * @cmd_details: pointer to command details structure or NULL |
@@ -1768,6 +1747,7 @@ i40e_status i40e_aq_get_switch_config(struct i40e_hw *hw, | |||
1768 | **/ | 1747 | **/ |
1769 | i40e_status i40e_aq_get_firmware_version(struct i40e_hw *hw, | 1748 | i40e_status i40e_aq_get_firmware_version(struct i40e_hw *hw, |
1770 | u16 *fw_major_version, u16 *fw_minor_version, | 1749 | u16 *fw_major_version, u16 *fw_minor_version, |
1750 | u32 *fw_build, | ||
1771 | u16 *api_major_version, u16 *api_minor_version, | 1751 | u16 *api_major_version, u16 *api_minor_version, |
1772 | struct i40e_asq_cmd_details *cmd_details) | 1752 | struct i40e_asq_cmd_details *cmd_details) |
1773 | { | 1753 | { |
@@ -1781,13 +1761,15 @@ i40e_status i40e_aq_get_firmware_version(struct i40e_hw *hw, | |||
1781 | status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); | 1761 | status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); |
1782 | 1762 | ||
1783 | if (!status) { | 1763 | if (!status) { |
1784 | if (fw_major_version != NULL) | 1764 | if (fw_major_version) |
1785 | *fw_major_version = le16_to_cpu(resp->fw_major); | 1765 | *fw_major_version = le16_to_cpu(resp->fw_major); |
1786 | if (fw_minor_version != NULL) | 1766 | if (fw_minor_version) |
1787 | *fw_minor_version = le16_to_cpu(resp->fw_minor); | 1767 | *fw_minor_version = le16_to_cpu(resp->fw_minor); |
1788 | if (api_major_version != NULL) | 1768 | if (fw_build) |
1769 | *fw_build = le32_to_cpu(resp->fw_build); | ||
1770 | if (api_major_version) | ||
1789 | *api_major_version = le16_to_cpu(resp->api_major); | 1771 | *api_major_version = le16_to_cpu(resp->api_major); |
1790 | if (api_minor_version != NULL) | 1772 | if (api_minor_version) |
1791 | *api_minor_version = le16_to_cpu(resp->api_minor); | 1773 | *api_minor_version = le16_to_cpu(resp->api_minor); |
1792 | } | 1774 | } |
1793 | 1775 | ||
@@ -1817,7 +1799,7 @@ i40e_status i40e_aq_send_driver_version(struct i40e_hw *hw, | |||
1817 | 1799 | ||
1818 | i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_driver_version); | 1800 | i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_driver_version); |
1819 | 1801 | ||
1820 | desc.flags |= cpu_to_le16(I40E_AQ_FLAG_SI); | 1802 | desc.flags |= cpu_to_le16(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD); |
1821 | cmd->driver_major_ver = dv->major_version; | 1803 | cmd->driver_major_ver = dv->major_version; |
1822 | cmd->driver_minor_ver = dv->minor_version; | 1804 | cmd->driver_minor_ver = dv->minor_version; |
1823 | cmd->driver_build_ver = dv->build_version; | 1805 | cmd->driver_build_ver = dv->build_version; |
@@ -3377,6 +3359,47 @@ i40e_status i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw, | |||
3377 | } | 3359 | } |
3378 | 3360 | ||
3379 | /** | 3361 | /** |
3362 | * i40e_aq_alternate_read | ||
3363 | * @hw: pointer to the hardware structure | ||
3364 | * @reg_addr0: address of first dword to be read | ||
3365 | * @reg_val0: pointer for data read from 'reg_addr0' | ||
3366 | * @reg_addr1: address of second dword to be read | ||
3367 | * @reg_val1: pointer for data read from 'reg_addr1' | ||
3368 | * | ||
3369 | * Read one or two dwords from alternate structure. Fields are indicated | ||
3370 | * by 'reg_addr0' and 'reg_addr1' register numbers. If 'reg_val1' pointer | ||
3371 | * is not passed then only register at 'reg_addr0' is read. | ||
3372 | * | ||
3373 | **/ | ||
3374 | i40e_status i40e_aq_alternate_read(struct i40e_hw *hw, | ||
3375 | u32 reg_addr0, u32 *reg_val0, | ||
3376 | u32 reg_addr1, u32 *reg_val1) | ||
3377 | { | ||
3378 | struct i40e_aq_desc desc; | ||
3379 | struct i40e_aqc_alternate_write *cmd_resp = | ||
3380 | (struct i40e_aqc_alternate_write *)&desc.params.raw; | ||
3381 | i40e_status status; | ||
3382 | |||
3383 | if (!reg_val0) | ||
3384 | return I40E_ERR_PARAM; | ||
3385 | |||
3386 | i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_alternate_read); | ||
3387 | cmd_resp->address0 = cpu_to_le32(reg_addr0); | ||
3388 | cmd_resp->address1 = cpu_to_le32(reg_addr1); | ||
3389 | |||
3390 | status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL); | ||
3391 | |||
3392 | if (!status) { | ||
3393 | *reg_val0 = le32_to_cpu(cmd_resp->data0); | ||
3394 | |||
3395 | if (reg_val1) | ||
3396 | *reg_val1 = le32_to_cpu(cmd_resp->data1); | ||
3397 | } | ||
3398 | |||
3399 | return status; | ||
3400 | } | ||
3401 | |||
3402 | /** | ||
3380 | * i40e_aq_resume_port_tx | 3403 | * i40e_aq_resume_port_tx |
3381 | * @hw: pointer to the hardware structure | 3404 | * @hw: pointer to the hardware structure |
3382 | * @cmd_details: pointer to command details structure or NULL | 3405 | * @cmd_details: pointer to command details structure or NULL |
@@ -3440,3 +3463,79 @@ void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status) | |||
3440 | break; | 3463 | break; |
3441 | } | 3464 | } |
3442 | } | 3465 | } |
3466 | |||
3467 | /** | ||
3468 | * i40e_read_bw_from_alt_ram | ||
3469 | * @hw: pointer to the hardware structure | ||
3470 | * @max_bw: pointer for max_bw read | ||
3471 | * @min_bw: pointer for min_bw read | ||
3472 | * @min_valid: pointer for bool that is true if min_bw is a valid value | ||
3473 | * @max_valid: pointer for bool that is true if max_bw is a valid value | ||
3474 | * | ||
3475 | * Read bw from the alternate ram for the given pf | ||
3476 | **/ | ||
3477 | i40e_status i40e_read_bw_from_alt_ram(struct i40e_hw *hw, | ||
3478 | u32 *max_bw, u32 *min_bw, | ||
3479 | bool *min_valid, bool *max_valid) | ||
3480 | { | ||
3481 | i40e_status status; | ||
3482 | u32 max_bw_addr, min_bw_addr; | ||
3483 | |||
3484 | /* Calculate the address of the min/max bw registers */ | ||
3485 | max_bw_addr = I40E_ALT_STRUCT_FIRST_PF_OFFSET + | ||
3486 | I40E_ALT_STRUCT_MAX_BW_OFFSET + | ||
3487 | (I40E_ALT_STRUCT_DWORDS_PER_PF * hw->pf_id); | ||
3488 | min_bw_addr = I40E_ALT_STRUCT_FIRST_PF_OFFSET + | ||
3489 | I40E_ALT_STRUCT_MIN_BW_OFFSET + | ||
3490 | (I40E_ALT_STRUCT_DWORDS_PER_PF * hw->pf_id); | ||
3491 | |||
3492 | /* Read the bandwidths from alt ram */ | ||
3493 | status = i40e_aq_alternate_read(hw, max_bw_addr, max_bw, | ||
3494 | min_bw_addr, min_bw); | ||
3495 | |||
3496 | if (*min_bw & I40E_ALT_BW_VALID_MASK) | ||
3497 | *min_valid = true; | ||
3498 | else | ||
3499 | *min_valid = false; | ||
3500 | |||
3501 | if (*max_bw & I40E_ALT_BW_VALID_MASK) | ||
3502 | *max_valid = true; | ||
3503 | else | ||
3504 | *max_valid = false; | ||
3505 | |||
3506 | return status; | ||
3507 | } | ||
3508 | |||
3509 | /** | ||
3510 | * i40e_aq_configure_partition_bw | ||
3511 | * @hw: pointer to the hardware structure | ||
3512 | * @bw_data: Buffer holding valid pfs and bw limits | ||
3513 | * @cmd_details: pointer to command details | ||
3514 | * | ||
3515 | * Configure partitions guaranteed/max bw | ||
3516 | **/ | ||
3517 | i40e_status i40e_aq_configure_partition_bw(struct i40e_hw *hw, | ||
3518 | struct i40e_aqc_configure_partition_bw_data *bw_data, | ||
3519 | struct i40e_asq_cmd_details *cmd_details) | ||
3520 | { | ||
3521 | i40e_status status; | ||
3522 | struct i40e_aq_desc desc; | ||
3523 | u16 bwd_size = sizeof(*bw_data); | ||
3524 | |||
3525 | i40e_fill_default_direct_cmd_desc(&desc, | ||
3526 | i40e_aqc_opc_configure_partition_bw); | ||
3527 | |||
3528 | /* Indirect command */ | ||
3529 | desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); | ||
3530 | desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD); | ||
3531 | |||
3532 | if (bwd_size > I40E_AQ_LARGE_BUF) | ||
3533 | desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); | ||
3534 | |||
3535 | desc.datalen = cpu_to_le16(bwd_size); | ||
3536 | |||
3537 | status = i40e_asq_send_command(hw, &desc, bw_data, bwd_size, | ||
3538 | cmd_details); | ||
3539 | |||
3540 | return status; | ||
3541 | } | ||
diff --git a/drivers/net/ethernet/intel/i40e/i40e_configfs.c b/drivers/net/ethernet/intel/i40e/i40e_configfs.c new file mode 100644 index 000000000000..d3cdfc24d5bf --- /dev/null +++ b/drivers/net/ethernet/intel/i40e/i40e_configfs.c | |||
@@ -0,0 +1,354 @@ | |||
1 | /******************************************************************************* | ||
2 | * | ||
3 | * Intel Ethernet Controller XL710 Family Linux Driver | ||
4 | * Copyright(c) 2013 - 2015 Intel Corporation. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify it | ||
7 | * under the terms and conditions of the GNU General Public License, | ||
8 | * version 2, as published by the Free Software Foundation. | ||
9 | * | ||
10 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
11 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
12 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
13 | * more details. | ||
14 | * | ||
15 | * You should have received a copy of the GNU General Public License along | ||
16 | * with this program. If not, see <http://www.gnu.org/licenses/>. | ||
17 | * | ||
18 | * The full GNU General Public License is included in this distribution in | ||
19 | * the file called "COPYING". | ||
20 | * | ||
21 | * Contact Information: | ||
22 | * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> | ||
23 | * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 | ||
24 | * | ||
25 | ******************************************************************************/ | ||
26 | |||
27 | #include <linux/configfs.h> | ||
28 | #include "i40e.h" | ||
29 | |||
30 | #if IS_ENABLED(CONFIG_I40E_CONFIGFS_FS) | ||
31 | |||
32 | /** | ||
33 | * configfs structure for i40e | ||
34 | * | ||
35 | * This file adds code for configfs support for the i40e driver. This sets | ||
36 | * up a filesystem under /sys/kernel/config in which configuration changes | ||
37 | * can be made for the driver's netdevs. | ||
38 | * | ||
39 | * The initialization in this code creates the "i40e" entry in the configfs | ||
40 | * system. After that, the user needs to use mkdir to create configurations | ||
41 | * for specific netdev ports; for example "mkdir eth3". This code will verify | ||
42 | * that such a netdev exists and that it is owned by i40e. | ||
43 | * | ||
44 | **/ | ||
45 | |||
46 | struct i40e_cfgfs_vsi { | ||
47 | struct config_item item; | ||
48 | struct i40e_vsi *vsi; | ||
49 | }; | ||
50 | |||
51 | static inline struct i40e_cfgfs_vsi *to_i40e_cfgfs_vsi(struct config_item *item) | ||
52 | { | ||
53 | return item ? container_of(item, struct i40e_cfgfs_vsi, item) : NULL; | ||
54 | } | ||
55 | |||
56 | static struct configfs_attribute i40e_cfgfs_vsi_attr_min_bw = { | ||
57 | .ca_owner = THIS_MODULE, | ||
58 | .ca_name = "min_bw", | ||
59 | .ca_mode = S_IRUGO | S_IWUSR, | ||
60 | }; | ||
61 | |||
62 | static struct configfs_attribute i40e_cfgfs_vsi_attr_max_bw = { | ||
63 | .ca_owner = THIS_MODULE, | ||
64 | .ca_name = "max_bw", | ||
65 | .ca_mode = S_IRUGO | S_IWUSR, | ||
66 | }; | ||
67 | |||
68 | static struct configfs_attribute i40e_cfgfs_vsi_attr_commit = { | ||
69 | .ca_owner = THIS_MODULE, | ||
70 | .ca_name = "commit", | ||
71 | .ca_mode = S_IRUGO | S_IWUSR, | ||
72 | }; | ||
73 | |||
74 | static struct configfs_attribute i40e_cfgfs_vsi_attr_port_count = { | ||
75 | .ca_owner = THIS_MODULE, | ||
76 | .ca_name = "ports", | ||
77 | .ca_mode = S_IRUGO | S_IWUSR, | ||
78 | }; | ||
79 | |||
80 | static struct configfs_attribute i40e_cfgfs_vsi_attr_part_count = { | ||
81 | .ca_owner = THIS_MODULE, | ||
82 | .ca_name = "partitions", | ||
83 | .ca_mode = S_IRUGO | S_IWUSR, | ||
84 | }; | ||
85 | |||
86 | static struct configfs_attribute *i40e_cfgfs_vsi_attrs[] = { | ||
87 | &i40e_cfgfs_vsi_attr_min_bw, | ||
88 | &i40e_cfgfs_vsi_attr_max_bw, | ||
89 | &i40e_cfgfs_vsi_attr_commit, | ||
90 | &i40e_cfgfs_vsi_attr_port_count, | ||
91 | &i40e_cfgfs_vsi_attr_part_count, | ||
92 | NULL, | ||
93 | }; | ||
94 | |||
95 | /** | ||
96 | * i40e_cfgfs_vsi_attr_show - Show a VSI's NPAR BW partition info | ||
97 | * @item: A pointer back to the configfs item created on driver load | ||
98 | * @attr: A pointer to this item's configuration attribute | ||
99 | * @page: A pointer to the output buffer | ||
100 | **/ | ||
101 | static ssize_t i40e_cfgfs_vsi_attr_show(struct config_item *item, | ||
102 | struct configfs_attribute *attr, | ||
103 | char *page) | ||
104 | { | ||
105 | struct i40e_cfgfs_vsi *i40e_cfgfs_vsi = to_i40e_cfgfs_vsi(item); | ||
106 | struct i40e_pf *pf = i40e_cfgfs_vsi->vsi->back; | ||
107 | ssize_t count; | ||
108 | |||
109 | if (i40e_cfgfs_vsi->vsi != pf->vsi[pf->lan_vsi]) | ||
110 | return 0; | ||
111 | |||
112 | if (strncmp(attr->ca_name, "min_bw", 6) == 0) | ||
113 | count = sprintf(page, "%s %s %d%%\n", | ||
114 | i40e_cfgfs_vsi->vsi->netdev->name, | ||
115 | (pf->npar_min_bw & I40E_ALT_BW_RELATIVE_MASK) ? | ||
116 | "Relative Min BW" : "Absolute Min BW", | ||
117 | pf->npar_min_bw & I40E_ALT_BW_VALUE_MASK); | ||
118 | else if (strncmp(attr->ca_name, "max_bw", 6) == 0) | ||
119 | count = sprintf(page, "%s %s %d%%\n", | ||
120 | i40e_cfgfs_vsi->vsi->netdev->name, | ||
121 | (pf->npar_max_bw & I40E_ALT_BW_RELATIVE_MASK) ? | ||
122 | "Relative Max BW" : "Absolute Max BW", | ||
123 | pf->npar_max_bw & I40E_ALT_BW_VALUE_MASK); | ||
124 | else if (strncmp(attr->ca_name, "ports", 5) == 0) | ||
125 | count = sprintf(page, "%d\n", | ||
126 | pf->hw.num_ports); | ||
127 | else if (strncmp(attr->ca_name, "partitions", 10) == 0) | ||
128 | count = sprintf(page, "%d\n", | ||
129 | pf->hw.num_partitions); | ||
130 | else | ||
131 | return 0; | ||
132 | |||
133 | return count; | ||
134 | } | ||
135 | |||
136 | /** | ||
137 | * i40e_cfgfs_vsi_attr_store - Show a VSI's NPAR BW partition info | ||
138 | * @item: A pointer back to the configfs item created on driver load | ||
139 | * @attr: A pointer to this item's configuration attribute | ||
140 | * @page: A pointer to the user input buffer holding the user input values | ||
141 | **/ | ||
142 | static ssize_t i40e_cfgfs_vsi_attr_store(struct config_item *item, | ||
143 | struct configfs_attribute *attr, | ||
144 | const char *page, size_t count) | ||
145 | { | ||
146 | struct i40e_cfgfs_vsi *i40e_cfgfs_vsi = to_i40e_cfgfs_vsi(item); | ||
147 | struct i40e_pf *pf = i40e_cfgfs_vsi->vsi->back; | ||
148 | char *p = (char *)page; | ||
149 | int rc; | ||
150 | unsigned long tmp; | ||
151 | |||
152 | if (i40e_cfgfs_vsi->vsi != pf->vsi[pf->lan_vsi]) | ||
153 | return 0; | ||
154 | |||
155 | if (!p || (*p && (*p == '\n'))) | ||
156 | return -EINVAL; | ||
157 | |||
158 | rc = kstrtoul(p, 10, &tmp); | ||
159 | if (rc) | ||
160 | return rc; | ||
161 | if (tmp > 100) | ||
162 | return -ERANGE; | ||
163 | |||
164 | if (strncmp(attr->ca_name, "min_bw", 6) == 0) { | ||
165 | if (tmp > (pf->npar_max_bw & I40E_ALT_BW_VALUE_MASK)) | ||
166 | return -ERANGE; | ||
167 | /* Preserve the valid and relative BW bits - the rest is | ||
168 | * don't care. | ||
169 | */ | ||
170 | pf->npar_min_bw &= (I40E_ALT_BW_RELATIVE_MASK | | ||
171 | I40E_ALT_BW_VALID_MASK); | ||
172 | pf->npar_min_bw |= (tmp & I40E_ALT_BW_VALUE_MASK); | ||
173 | i40e_set_npar_bw_setting(pf); | ||
174 | } else if (strncmp(attr->ca_name, "max_bw", 6) == 0) { | ||
175 | if (tmp < 1 || | ||
176 | tmp < (pf->npar_min_bw & I40E_ALT_BW_VALUE_MASK)) | ||
177 | return -ERANGE; | ||
178 | /* Preserve the valid and relative BW bits - the rest is | ||
179 | * don't care. | ||
180 | */ | ||
181 | pf->npar_max_bw &= (I40E_ALT_BW_RELATIVE_MASK | | ||
182 | I40E_ALT_BW_VALID_MASK); | ||
183 | pf->npar_max_bw |= (tmp & I40E_ALT_BW_VALUE_MASK); | ||
184 | i40e_set_npar_bw_setting(pf); | ||
185 | } else if (strncmp(attr->ca_name, "commit", 6) == 0 && tmp == 1) { | ||
186 | if (i40e_commit_npar_bw_setting(pf)) | ||
187 | return -EIO; | ||
188 | } | ||
189 | |||
190 | return count; | ||
191 | } | ||
192 | |||
193 | /** | ||
194 | * i40e_cfgfs_vsi_release - Free up the configuration item memory | ||
195 | * @item: A pointer back to the configfs item created on driver load | ||
196 | **/ | ||
197 | static void i40e_cfgfs_vsi_release(struct config_item *item) | ||
198 | { | ||
199 | kfree(to_i40e_cfgfs_vsi(item)); | ||
200 | } | ||
201 | |||
202 | static struct configfs_item_operations i40e_cfgfs_vsi_item_ops = { | ||
203 | .release = i40e_cfgfs_vsi_release, | ||
204 | .show_attribute = i40e_cfgfs_vsi_attr_show, | ||
205 | .store_attribute = i40e_cfgfs_vsi_attr_store, | ||
206 | }; | ||
207 | |||
208 | static struct config_item_type i40e_cfgfs_vsi_type = { | ||
209 | .ct_item_ops = &i40e_cfgfs_vsi_item_ops, | ||
210 | .ct_attrs = i40e_cfgfs_vsi_attrs, | ||
211 | .ct_owner = THIS_MODULE, | ||
212 | }; | ||
213 | |||
214 | struct i40e_cfgfs_group { | ||
215 | struct config_group group; | ||
216 | }; | ||
217 | |||
218 | /** | ||
219 | * to_i40e_cfgfs_group - Get the group pointer from the config item | ||
220 | * @item: A pointer back to the configfs item created on driver load | ||
221 | **/ | ||
222 | static inline struct i40e_cfgfs_group * | ||
223 | to_i40e_cfgfs_group(struct config_item *item) | ||
224 | { | ||
225 | return item ? container_of(to_config_group(item), | ||
226 | struct i40e_cfgfs_group, group) : NULL; | ||
227 | } | ||
228 | |||
229 | /** | ||
230 | * i40e_cfgfs_group_make_item - Create the configfs item with group container | ||
231 | * @group: A pointer to our configfs group | ||
232 | * @name: A pointer to the nume of the device we're looking for | ||
233 | **/ | ||
234 | static struct config_item * | ||
235 | i40e_cfgfs_group_make_item(struct config_group *group, const char *name) | ||
236 | { | ||
237 | struct i40e_cfgfs_vsi *i40e_cfgfs_vsi; | ||
238 | struct net_device *netdev; | ||
239 | struct i40e_netdev_priv *np; | ||
240 | |||
241 | read_lock(&dev_base_lock); | ||
242 | netdev = first_net_device(&init_net); | ||
243 | while (netdev) { | ||
244 | if (strncmp(netdev->name, name, sizeof(netdev->name)) == 0) | ||
245 | break; | ||
246 | netdev = next_net_device(netdev); | ||
247 | } | ||
248 | read_unlock(&dev_base_lock); | ||
249 | |||
250 | if (!netdev) | ||
251 | return ERR_PTR(-ENODEV); | ||
252 | |||
253 | /* is this netdev owned by i40e? */ | ||
254 | if (netdev->netdev_ops->ndo_open != i40e_open) | ||
255 | return ERR_PTR(-EACCES); | ||
256 | |||
257 | i40e_cfgfs_vsi = kzalloc(sizeof(*i40e_cfgfs_vsi), GFP_KERNEL); | ||
258 | if (!i40e_cfgfs_vsi) | ||
259 | return ERR_PTR(-ENOMEM); | ||
260 | |||
261 | np = netdev_priv(netdev); | ||
262 | i40e_cfgfs_vsi->vsi = np->vsi; | ||
263 | config_item_init_type_name(&i40e_cfgfs_vsi->item, name, | ||
264 | &i40e_cfgfs_vsi_type); | ||
265 | |||
266 | return &i40e_cfgfs_vsi->item; | ||
267 | } | ||
268 | |||
269 | static struct configfs_attribute i40e_cfgfs_group_attr_description = { | ||
270 | .ca_owner = THIS_MODULE, | ||
271 | .ca_name = "description", | ||
272 | .ca_mode = S_IRUGO, | ||
273 | }; | ||
274 | |||
275 | static struct configfs_attribute *i40e_cfgfs_group_attrs[] = { | ||
276 | &i40e_cfgfs_group_attr_description, | ||
277 | NULL, | ||
278 | }; | ||
279 | |||
280 | static ssize_t i40e_cfgfs_group_attr_show(struct config_item *item, | ||
281 | struct configfs_attribute *attr, | ||
282 | char *page) | ||
283 | { | ||
284 | return sprintf(page, | ||
285 | "i40e\n" | ||
286 | "\n" | ||
287 | "This subsystem allows the modification of network port configurations.\n" | ||
288 | "To start, use the name of the network port to be configured in a 'mkdir'\n" | ||
289 | "command, e.g. 'mkdir eth3'.\n"); | ||
290 | } | ||
291 | |||
292 | static void i40e_cfgfs_group_release(struct config_item *item) | ||
293 | { | ||
294 | kfree(to_i40e_cfgfs_group(item)); | ||
295 | } | ||
296 | |||
297 | static struct configfs_item_operations i40e_cfgfs_group_item_ops = { | ||
298 | .release = i40e_cfgfs_group_release, | ||
299 | .show_attribute = i40e_cfgfs_group_attr_show, | ||
300 | }; | ||
301 | |||
302 | /* Note that, since no extra work is required on ->drop_item(), | ||
303 | * no ->drop_item() is provided. | ||
304 | */ | ||
305 | static struct configfs_group_operations i40e_cfgfs_group_ops = { | ||
306 | .make_item = i40e_cfgfs_group_make_item, | ||
307 | }; | ||
308 | |||
309 | static struct config_item_type i40e_cfgfs_group_type = { | ||
310 | .ct_item_ops = &i40e_cfgfs_group_item_ops, | ||
311 | .ct_group_ops = &i40e_cfgfs_group_ops, | ||
312 | .ct_attrs = i40e_cfgfs_group_attrs, | ||
313 | .ct_owner = THIS_MODULE, | ||
314 | }; | ||
315 | |||
316 | static struct configfs_subsystem i40e_cfgfs_group_subsys = { | ||
317 | .su_group = { | ||
318 | .cg_item = { | ||
319 | .ci_namebuf = "i40e", | ||
320 | .ci_type = &i40e_cfgfs_group_type, | ||
321 | }, | ||
322 | }, | ||
323 | }; | ||
324 | |||
325 | /** | ||
326 | * i40e_configfs_init - Initialize configfs support for our driver | ||
327 | **/ | ||
328 | int i40e_configfs_init(void) | ||
329 | { | ||
330 | int ret; | ||
331 | struct configfs_subsystem *subsys; | ||
332 | |||
333 | subsys = &i40e_cfgfs_group_subsys; | ||
334 | |||
335 | config_group_init(&subsys->su_group); | ||
336 | mutex_init(&subsys->su_mutex); | ||
337 | ret = configfs_register_subsystem(subsys); | ||
338 | if (ret) { | ||
339 | pr_err("Error %d while registering configfs subsystem %s\n", | ||
340 | ret, subsys->su_group.cg_item.ci_namebuf); | ||
341 | return ret; | ||
342 | } | ||
343 | |||
344 | return 0; | ||
345 | } | ||
346 | |||
347 | /** | ||
348 | * i40e_configfs_init - Bail out - unregister configfs subsystem and release | ||
349 | **/ | ||
350 | void i40e_configfs_exit(void) | ||
351 | { | ||
352 | configfs_unregister_subsystem(&i40e_cfgfs_group_subsys); | ||
353 | } | ||
354 | #endif /* IS_ENABLED(CONFIG_I40E_CONFIGFS_FS) */ | ||
diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c index a11c70ca5a28..2f583554a260 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c +++ b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c | |||
@@ -269,22 +269,21 @@ static bool i40e_dcbnl_find_app(struct i40e_dcbx_config *cfg, | |||
269 | /** | 269 | /** |
270 | * i40e_dcbnl_flush_apps - Delete all removed APPs | 270 | * i40e_dcbnl_flush_apps - Delete all removed APPs |
271 | * @pf: the corresponding pf | 271 | * @pf: the corresponding pf |
272 | * @old_cfg: old DCBX configuration data | ||
272 | * @new_cfg: new DCBX configuration data | 273 | * @new_cfg: new DCBX configuration data |
273 | * | 274 | * |
274 | * Find and delete all APPs that are not present in the passed | 275 | * Find and delete all APPs that are not present in the passed |
275 | * DCB configuration | 276 | * DCB configuration |
276 | **/ | 277 | **/ |
277 | void i40e_dcbnl_flush_apps(struct i40e_pf *pf, | 278 | void i40e_dcbnl_flush_apps(struct i40e_pf *pf, |
279 | struct i40e_dcbx_config *old_cfg, | ||
278 | struct i40e_dcbx_config *new_cfg) | 280 | struct i40e_dcbx_config *new_cfg) |
279 | { | 281 | { |
280 | struct i40e_dcb_app_priority_table app; | 282 | struct i40e_dcb_app_priority_table app; |
281 | struct i40e_dcbx_config *dcbxcfg; | ||
282 | struct i40e_hw *hw = &pf->hw; | ||
283 | int i; | 283 | int i; |
284 | 284 | ||
285 | dcbxcfg = &hw->local_dcbx_config; | 285 | for (i = 0; i < old_cfg->numapps; i++) { |
286 | for (i = 0; i < dcbxcfg->numapps; i++) { | 286 | app = old_cfg->app[i]; |
287 | app = dcbxcfg->app[i]; | ||
288 | /* The APP is not available anymore delete it */ | 287 | /* The APP is not available anymore delete it */ |
289 | if (!i40e_dcbnl_find_app(new_cfg, &app)) | 288 | if (!i40e_dcbnl_find_app(new_cfg, &app)) |
290 | i40e_dcbnl_del_app(pf, &app); | 289 | i40e_dcbnl_del_app(pf, &app); |
@@ -306,9 +305,7 @@ void i40e_dcbnl_setup(struct i40e_vsi *vsi) | |||
306 | if (!(pf->flags & I40E_FLAG_DCB_CAPABLE)) | 305 | if (!(pf->flags & I40E_FLAG_DCB_CAPABLE)) |
307 | return; | 306 | return; |
308 | 307 | ||
309 | /* Do not setup DCB NL ops for MFP mode */ | 308 | dev->dcbnl_ops = &dcbnl_ops; |
310 | if (!(pf->flags & I40E_FLAG_MFP_ENABLED)) | ||
311 | dev->dcbnl_ops = &dcbnl_ops; | ||
312 | 309 | ||
313 | /* Set initial IEEE DCB settings */ | 310 | /* Set initial IEEE DCB settings */ |
314 | i40e_dcbnl_set_all(vsi); | 311 | i40e_dcbnl_set_all(vsi); |
diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c index c17ee77100d3..e802b6bc067d 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c +++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c | |||
@@ -921,9 +921,10 @@ static void i40e_dbg_dump_veb_seid(struct i40e_pf *pf, int seid) | |||
921 | return; | 921 | return; |
922 | } | 922 | } |
923 | dev_info(&pf->pdev->dev, | 923 | dev_info(&pf->pdev->dev, |
924 | "veb idx=%d,%d stats_ic=%d seid=%d uplink=%d\n", | 924 | "veb idx=%d,%d stats_ic=%d seid=%d uplink=%d mode=%s\n", |
925 | veb->idx, veb->veb_idx, veb->stats_idx, veb->seid, | 925 | veb->idx, veb->veb_idx, veb->stats_idx, veb->seid, |
926 | veb->uplink_seid); | 926 | veb->uplink_seid, |
927 | veb->bridge_mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB"); | ||
927 | i40e_dbg_dump_eth_stats(pf, &veb->stats); | 928 | i40e_dbg_dump_eth_stats(pf, &veb->stats); |
928 | } | 929 | } |
929 | 930 | ||
@@ -1487,11 +1488,15 @@ static ssize_t i40e_dbg_command_write(struct file *filp, | |||
1487 | } else { | 1488 | } else { |
1488 | dev_info(&pf->pdev->dev, "clear_stats vsi [seid]\n"); | 1489 | dev_info(&pf->pdev->dev, "clear_stats vsi [seid]\n"); |
1489 | } | 1490 | } |
1490 | } else if (strncmp(&cmd_buf[12], "pf", 2) == 0) { | 1491 | } else if (strncmp(&cmd_buf[12], "port", 4) == 0) { |
1491 | i40e_pf_reset_stats(pf); | 1492 | if (pf->hw.partition_id == 1) { |
1492 | dev_info(&pf->pdev->dev, "pf clear stats called\n"); | 1493 | i40e_pf_reset_stats(pf); |
1494 | dev_info(&pf->pdev->dev, "port stats cleared\n"); | ||
1495 | } else { | ||
1496 | dev_info(&pf->pdev->dev, "clear port stats not allowed on this port partition\n"); | ||
1497 | } | ||
1493 | } else { | 1498 | } else { |
1494 | dev_info(&pf->pdev->dev, "clear_stats vsi [seid] or clear_stats pf\n"); | 1499 | dev_info(&pf->pdev->dev, "clear_stats vsi [seid] or clear_stats port\n"); |
1495 | } | 1500 | } |
1496 | } else if (strncmp(cmd_buf, "send aq_cmd", 11) == 0) { | 1501 | } else if (strncmp(cmd_buf, "send aq_cmd", 11) == 0) { |
1497 | struct i40e_aq_desc *desc; | 1502 | struct i40e_aq_desc *desc; |
@@ -1897,7 +1902,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp, | |||
1897 | dev_info(&pf->pdev->dev, " read <reg>\n"); | 1902 | dev_info(&pf->pdev->dev, " read <reg>\n"); |
1898 | dev_info(&pf->pdev->dev, " write <reg> <value>\n"); | 1903 | dev_info(&pf->pdev->dev, " write <reg> <value>\n"); |
1899 | dev_info(&pf->pdev->dev, " clear_stats vsi [seid]\n"); | 1904 | dev_info(&pf->pdev->dev, " clear_stats vsi [seid]\n"); |
1900 | dev_info(&pf->pdev->dev, " clear_stats pf\n"); | 1905 | dev_info(&pf->pdev->dev, " clear_stats port\n"); |
1901 | dev_info(&pf->pdev->dev, " pfr\n"); | 1906 | dev_info(&pf->pdev->dev, " pfr\n"); |
1902 | dev_info(&pf->pdev->dev, " corer\n"); | 1907 | dev_info(&pf->pdev->dev, " corer\n"); |
1903 | dev_info(&pf->pdev->dev, " globr\n"); | 1908 | dev_info(&pf->pdev->dev, " globr\n"); |
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index b8230dc205ec..7413b0e429c8 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************************* | 1 | /******************************************************************************* |
2 | * | 2 | * |
3 | * Intel Ethernet Controller XL710 Family Linux Driver | 3 | * Intel Ethernet Controller XL710 Family Linux Driver |
4 | * Copyright(c) 2013 - 2014 Intel Corporation. | 4 | * Copyright(c) 2013 - 2015 Intel Corporation. |
5 | * | 5 | * |
6 | * This program is free software; you can redistribute it and/or modify it | 6 | * This program is free software; you can redistribute it and/or modify it |
7 | * under the terms and conditions of the GNU General Public License, | 7 | * under the terms and conditions of the GNU General Public License, |
@@ -113,7 +113,6 @@ static struct i40e_stats i40e_gstrings_stats[] = { | |||
113 | I40E_PF_STAT("tx_broadcast", stats.eth.tx_broadcast), | 113 | I40E_PF_STAT("tx_broadcast", stats.eth.tx_broadcast), |
114 | I40E_PF_STAT("tx_errors", stats.eth.tx_errors), | 114 | I40E_PF_STAT("tx_errors", stats.eth.tx_errors), |
115 | I40E_PF_STAT("rx_dropped", stats.eth.rx_discards), | 115 | I40E_PF_STAT("rx_dropped", stats.eth.rx_discards), |
116 | I40E_PF_STAT("tx_dropped", stats.eth.tx_discards), | ||
117 | I40E_PF_STAT("tx_dropped_link_down", stats.tx_dropped_link_down), | 116 | I40E_PF_STAT("tx_dropped_link_down", stats.tx_dropped_link_down), |
118 | I40E_PF_STAT("crc_errors", stats.crc_errors), | 117 | I40E_PF_STAT("crc_errors", stats.crc_errors), |
119 | I40E_PF_STAT("illegal_bytes", stats.illegal_bytes), | 118 | I40E_PF_STAT("illegal_bytes", stats.illegal_bytes), |
@@ -218,6 +217,13 @@ static const char i40e_gstrings_test[][ETH_GSTRING_LEN] = { | |||
218 | 217 | ||
219 | #define I40E_TEST_LEN (sizeof(i40e_gstrings_test) / ETH_GSTRING_LEN) | 218 | #define I40E_TEST_LEN (sizeof(i40e_gstrings_test) / ETH_GSTRING_LEN) |
220 | 219 | ||
220 | static const char i40e_priv_flags_strings[][ETH_GSTRING_LEN] = { | ||
221 | "NPAR", | ||
222 | }; | ||
223 | |||
224 | #define I40E_PRIV_FLAGS_STR_LEN \ | ||
225 | (sizeof(i40e_priv_flags_strings) / ETH_GSTRING_LEN) | ||
226 | |||
221 | /** | 227 | /** |
222 | * i40e_partition_setting_complaint - generic complaint for MFP restriction | 228 | * i40e_partition_setting_complaint - generic complaint for MFP restriction |
223 | * @pf: the PF struct | 229 | * @pf: the PF struct |
@@ -229,73 +235,20 @@ static void i40e_partition_setting_complaint(struct i40e_pf *pf) | |||
229 | } | 235 | } |
230 | 236 | ||
231 | /** | 237 | /** |
232 | * i40e_get_settings - Get Link Speed and Duplex settings | 238 | * i40e_get_settings_link_up - Get the Link settings for when link is up |
239 | * @hw: hw structure | ||
240 | * @ecmd: ethtool command to fill in | ||
233 | * @netdev: network interface device structure | 241 | * @netdev: network interface device structure |
234 | * @ecmd: ethtool command | ||
235 | * | 242 | * |
236 | * Reports speed/duplex settings based on media_type | ||
237 | **/ | 243 | **/ |
238 | static int i40e_get_settings(struct net_device *netdev, | 244 | static void i40e_get_settings_link_up(struct i40e_hw *hw, |
239 | struct ethtool_cmd *ecmd) | 245 | struct ethtool_cmd *ecmd, |
246 | struct net_device *netdev) | ||
240 | { | 247 | { |
241 | struct i40e_netdev_priv *np = netdev_priv(netdev); | ||
242 | struct i40e_pf *pf = np->vsi->back; | ||
243 | struct i40e_hw *hw = &pf->hw; | ||
244 | struct i40e_link_status *hw_link_info = &hw->phy.link_info; | 248 | struct i40e_link_status *hw_link_info = &hw->phy.link_info; |
245 | bool link_up = hw_link_info->link_info & I40E_AQ_LINK_UP; | ||
246 | u32 link_speed = hw_link_info->link_speed; | 249 | u32 link_speed = hw_link_info->link_speed; |
247 | 250 | ||
248 | /* hardware is either in 40G mode or 10G mode | 251 | /* Initialize supported and advertised settings based on phy settings */ |
249 | * NOTE: this section initializes supported and advertising | ||
250 | */ | ||
251 | if (!link_up) { | ||
252 | /* link is down and the driver needs to fall back on | ||
253 | * device ID to determine what kinds of info to display, | ||
254 | * it's mostly a guess that may change when link is up | ||
255 | */ | ||
256 | switch (hw->device_id) { | ||
257 | case I40E_DEV_ID_QSFP_A: | ||
258 | case I40E_DEV_ID_QSFP_B: | ||
259 | case I40E_DEV_ID_QSFP_C: | ||
260 | /* pluggable QSFP */ | ||
261 | ecmd->supported = SUPPORTED_40000baseSR4_Full | | ||
262 | SUPPORTED_40000baseCR4_Full | | ||
263 | SUPPORTED_40000baseLR4_Full; | ||
264 | ecmd->advertising = ADVERTISED_40000baseSR4_Full | | ||
265 | ADVERTISED_40000baseCR4_Full | | ||
266 | ADVERTISED_40000baseLR4_Full; | ||
267 | break; | ||
268 | case I40E_DEV_ID_KX_B: | ||
269 | /* backplane 40G */ | ||
270 | ecmd->supported = SUPPORTED_40000baseKR4_Full; | ||
271 | ecmd->advertising = ADVERTISED_40000baseKR4_Full; | ||
272 | break; | ||
273 | case I40E_DEV_ID_KX_C: | ||
274 | /* backplane 10G */ | ||
275 | ecmd->supported = SUPPORTED_10000baseKR_Full; | ||
276 | ecmd->advertising = ADVERTISED_10000baseKR_Full; | ||
277 | break; | ||
278 | case I40E_DEV_ID_10G_BASE_T: | ||
279 | ecmd->supported = SUPPORTED_10000baseT_Full | | ||
280 | SUPPORTED_1000baseT_Full | | ||
281 | SUPPORTED_100baseT_Full; | ||
282 | ecmd->advertising = ADVERTISED_10000baseT_Full | | ||
283 | ADVERTISED_1000baseT_Full | | ||
284 | ADVERTISED_100baseT_Full; | ||
285 | break; | ||
286 | default: | ||
287 | /* all the rest are 10G/1G */ | ||
288 | ecmd->supported = SUPPORTED_10000baseT_Full | | ||
289 | SUPPORTED_1000baseT_Full; | ||
290 | ecmd->advertising = ADVERTISED_10000baseT_Full | | ||
291 | ADVERTISED_1000baseT_Full; | ||
292 | break; | ||
293 | } | ||
294 | |||
295 | /* skip phy_type use as it is zero when link is down */ | ||
296 | goto no_valid_phy_type; | ||
297 | } | ||
298 | |||
299 | switch (hw_link_info->phy_type) { | 252 | switch (hw_link_info->phy_type) { |
300 | case I40E_PHY_TYPE_40GBASE_CR4: | 253 | case I40E_PHY_TYPE_40GBASE_CR4: |
301 | case I40E_PHY_TYPE_40GBASE_CR4_CU: | 254 | case I40E_PHY_TYPE_40GBASE_CR4_CU: |
@@ -304,6 +257,10 @@ static int i40e_get_settings(struct net_device *netdev, | |||
304 | ecmd->advertising = ADVERTISED_Autoneg | | 257 | ecmd->advertising = ADVERTISED_Autoneg | |
305 | ADVERTISED_40000baseCR4_Full; | 258 | ADVERTISED_40000baseCR4_Full; |
306 | break; | 259 | break; |
260 | case I40E_PHY_TYPE_XLAUI: | ||
261 | case I40E_PHY_TYPE_XLPPI: | ||
262 | ecmd->supported = SUPPORTED_40000baseCR4_Full; | ||
263 | break; | ||
307 | case I40E_PHY_TYPE_40GBASE_KR4: | 264 | case I40E_PHY_TYPE_40GBASE_KR4: |
308 | ecmd->supported = SUPPORTED_Autoneg | | 265 | ecmd->supported = SUPPORTED_Autoneg | |
309 | SUPPORTED_40000baseKR4_Full; | 266 | SUPPORTED_40000baseKR4_Full; |
@@ -311,8 +268,6 @@ static int i40e_get_settings(struct net_device *netdev, | |||
311 | ADVERTISED_40000baseKR4_Full; | 268 | ADVERTISED_40000baseKR4_Full; |
312 | break; | 269 | break; |
313 | case I40E_PHY_TYPE_40GBASE_SR4: | 270 | case I40E_PHY_TYPE_40GBASE_SR4: |
314 | case I40E_PHY_TYPE_XLPPI: | ||
315 | case I40E_PHY_TYPE_XLAUI: | ||
316 | ecmd->supported = SUPPORTED_40000baseSR4_Full; | 271 | ecmd->supported = SUPPORTED_40000baseSR4_Full; |
317 | break; | 272 | break; |
318 | case I40E_PHY_TYPE_40GBASE_LR4: | 273 | case I40E_PHY_TYPE_40GBASE_LR4: |
@@ -334,20 +289,40 @@ static int i40e_get_settings(struct net_device *netdev, | |||
334 | case I40E_PHY_TYPE_10GBASE_LR: | 289 | case I40E_PHY_TYPE_10GBASE_LR: |
335 | case I40E_PHY_TYPE_1000BASE_SX: | 290 | case I40E_PHY_TYPE_1000BASE_SX: |
336 | case I40E_PHY_TYPE_1000BASE_LX: | 291 | case I40E_PHY_TYPE_1000BASE_LX: |
337 | ecmd->supported = SUPPORTED_10000baseT_Full; | 292 | ecmd->supported = SUPPORTED_10000baseT_Full | |
338 | ecmd->supported |= SUPPORTED_1000baseT_Full; | 293 | SUPPORTED_1000baseT_Full; |
294 | if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB) | ||
295 | ecmd->advertising |= ADVERTISED_10000baseT_Full; | ||
296 | if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB) | ||
297 | ecmd->advertising |= ADVERTISED_1000baseT_Full; | ||
298 | break; | ||
299 | case I40E_PHY_TYPE_1000BASE_KX: | ||
300 | ecmd->supported = SUPPORTED_Autoneg | | ||
301 | SUPPORTED_1000baseKX_Full; | ||
302 | ecmd->advertising = ADVERTISED_Autoneg | | ||
303 | ADVERTISED_1000baseKX_Full; | ||
339 | break; | 304 | break; |
340 | case I40E_PHY_TYPE_10GBASE_CR1_CU: | ||
341 | case I40E_PHY_TYPE_10GBASE_CR1: | ||
342 | case I40E_PHY_TYPE_10GBASE_T: | 305 | case I40E_PHY_TYPE_10GBASE_T: |
306 | case I40E_PHY_TYPE_1000BASE_T: | ||
307 | case I40E_PHY_TYPE_100BASE_TX: | ||
343 | ecmd->supported = SUPPORTED_Autoneg | | 308 | ecmd->supported = SUPPORTED_Autoneg | |
344 | SUPPORTED_10000baseT_Full | | 309 | SUPPORTED_10000baseT_Full | |
345 | SUPPORTED_1000baseT_Full | | 310 | SUPPORTED_1000baseT_Full | |
346 | SUPPORTED_100baseT_Full; | 311 | SUPPORTED_100baseT_Full; |
312 | ecmd->advertising = ADVERTISED_Autoneg; | ||
313 | if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB) | ||
314 | ecmd->advertising |= ADVERTISED_10000baseT_Full; | ||
315 | if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB) | ||
316 | ecmd->advertising |= ADVERTISED_1000baseT_Full; | ||
317 | if (hw_link_info->requested_speeds & I40E_LINK_SPEED_100MB) | ||
318 | ecmd->advertising |= ADVERTISED_100baseT_Full; | ||
319 | break; | ||
320 | case I40E_PHY_TYPE_10GBASE_CR1_CU: | ||
321 | case I40E_PHY_TYPE_10GBASE_CR1: | ||
322 | ecmd->supported = SUPPORTED_Autoneg | | ||
323 | SUPPORTED_10000baseT_Full; | ||
347 | ecmd->advertising = ADVERTISED_Autoneg | | 324 | ecmd->advertising = ADVERTISED_Autoneg | |
348 | ADVERTISED_10000baseT_Full | | 325 | ADVERTISED_10000baseT_Full; |
349 | ADVERTISED_1000baseT_Full | | ||
350 | ADVERTISED_100baseT_Full; | ||
351 | break; | 326 | break; |
352 | case I40E_PHY_TYPE_XAUI: | 327 | case I40E_PHY_TYPE_XAUI: |
353 | case I40E_PHY_TYPE_XFI: | 328 | case I40E_PHY_TYPE_XFI: |
@@ -355,34 +330,14 @@ static int i40e_get_settings(struct net_device *netdev, | |||
355 | case I40E_PHY_TYPE_10GBASE_SFPP_CU: | 330 | case I40E_PHY_TYPE_10GBASE_SFPP_CU: |
356 | ecmd->supported = SUPPORTED_10000baseT_Full; | 331 | ecmd->supported = SUPPORTED_10000baseT_Full; |
357 | break; | 332 | break; |
358 | case I40E_PHY_TYPE_1000BASE_KX: | ||
359 | case I40E_PHY_TYPE_1000BASE_T: | ||
360 | ecmd->supported = SUPPORTED_Autoneg | | ||
361 | SUPPORTED_10000baseT_Full | | ||
362 | SUPPORTED_1000baseT_Full | | ||
363 | SUPPORTED_100baseT_Full; | ||
364 | ecmd->advertising = ADVERTISED_Autoneg | | ||
365 | ADVERTISED_10000baseT_Full | | ||
366 | ADVERTISED_1000baseT_Full | | ||
367 | ADVERTISED_100baseT_Full; | ||
368 | break; | ||
369 | case I40E_PHY_TYPE_100BASE_TX: | ||
370 | ecmd->supported = SUPPORTED_Autoneg | | ||
371 | SUPPORTED_10000baseT_Full | | ||
372 | SUPPORTED_1000baseT_Full | | ||
373 | SUPPORTED_100baseT_Full; | ||
374 | ecmd->advertising = ADVERTISED_Autoneg | | ||
375 | ADVERTISED_10000baseT_Full | | ||
376 | ADVERTISED_1000baseT_Full | | ||
377 | ADVERTISED_100baseT_Full; | ||
378 | break; | ||
379 | case I40E_PHY_TYPE_SGMII: | 333 | case I40E_PHY_TYPE_SGMII: |
380 | ecmd->supported = SUPPORTED_Autoneg | | 334 | ecmd->supported = SUPPORTED_Autoneg | |
381 | SUPPORTED_1000baseT_Full | | 335 | SUPPORTED_1000baseT_Full | |
382 | SUPPORTED_100baseT_Full; | 336 | SUPPORTED_100baseT_Full; |
383 | ecmd->advertising = ADVERTISED_Autoneg | | 337 | if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB) |
384 | ADVERTISED_1000baseT_Full | | 338 | ecmd->advertising |= ADVERTISED_1000baseT_Full; |
385 | ADVERTISED_100baseT_Full; | 339 | if (hw_link_info->requested_speeds & I40E_LINK_SPEED_100MB) |
340 | ecmd->advertising |= ADVERTISED_100baseT_Full; | ||
386 | break; | 341 | break; |
387 | default: | 342 | default: |
388 | /* if we got here and link is up something bad is afoot */ | 343 | /* if we got here and link is up something bad is afoot */ |
@@ -390,8 +345,118 @@ static int i40e_get_settings(struct net_device *netdev, | |||
390 | hw_link_info->phy_type); | 345 | hw_link_info->phy_type); |
391 | } | 346 | } |
392 | 347 | ||
393 | no_valid_phy_type: | 348 | /* Set speed and duplex */ |
394 | /* this is if autoneg is enabled or disabled */ | 349 | switch (link_speed) { |
350 | case I40E_LINK_SPEED_40GB: | ||
351 | /* need a SPEED_40000 in ethtool.h */ | ||
352 | ethtool_cmd_speed_set(ecmd, 40000); | ||
353 | break; | ||
354 | case I40E_LINK_SPEED_10GB: | ||
355 | ethtool_cmd_speed_set(ecmd, SPEED_10000); | ||
356 | break; | ||
357 | case I40E_LINK_SPEED_1GB: | ||
358 | ethtool_cmd_speed_set(ecmd, SPEED_1000); | ||
359 | break; | ||
360 | case I40E_LINK_SPEED_100MB: | ||
361 | ethtool_cmd_speed_set(ecmd, SPEED_100); | ||
362 | break; | ||
363 | default: | ||
364 | break; | ||
365 | } | ||
366 | ecmd->duplex = DUPLEX_FULL; | ||
367 | } | ||
368 | |||
369 | /** | ||
370 | * i40e_get_settings_link_down - Get the Link settings for when link is down | ||
371 | * @hw: hw structure | ||
372 | * @ecmd: ethtool command to fill in | ||
373 | * | ||
374 | * Reports link settings that can be determined when link is down | ||
375 | **/ | ||
376 | static void i40e_get_settings_link_down(struct i40e_hw *hw, | ||
377 | struct ethtool_cmd *ecmd) | ||
378 | { | ||
379 | struct i40e_link_status *hw_link_info = &hw->phy.link_info; | ||
380 | |||
381 | /* link is down and the driver needs to fall back on | ||
382 | * device ID to determine what kinds of info to display, | ||
383 | * it's mostly a guess that may change when link is up | ||
384 | */ | ||
385 | switch (hw->device_id) { | ||
386 | case I40E_DEV_ID_QSFP_A: | ||
387 | case I40E_DEV_ID_QSFP_B: | ||
388 | case I40E_DEV_ID_QSFP_C: | ||
389 | /* pluggable QSFP */ | ||
390 | ecmd->supported = SUPPORTED_40000baseSR4_Full | | ||
391 | SUPPORTED_40000baseCR4_Full | | ||
392 | SUPPORTED_40000baseLR4_Full; | ||
393 | ecmd->advertising = ADVERTISED_40000baseSR4_Full | | ||
394 | ADVERTISED_40000baseCR4_Full | | ||
395 | ADVERTISED_40000baseLR4_Full; | ||
396 | break; | ||
397 | case I40E_DEV_ID_KX_B: | ||
398 | /* backplane 40G */ | ||
399 | ecmd->supported = SUPPORTED_40000baseKR4_Full; | ||
400 | ecmd->advertising = ADVERTISED_40000baseKR4_Full; | ||
401 | break; | ||
402 | case I40E_DEV_ID_KX_C: | ||
403 | /* backplane 10G */ | ||
404 | ecmd->supported = SUPPORTED_10000baseKR_Full; | ||
405 | ecmd->advertising = ADVERTISED_10000baseKR_Full; | ||
406 | break; | ||
407 | case I40E_DEV_ID_10G_BASE_T: | ||
408 | ecmd->supported = SUPPORTED_10000baseT_Full | | ||
409 | SUPPORTED_1000baseT_Full | | ||
410 | SUPPORTED_100baseT_Full; | ||
411 | /* Figure out what has been requested */ | ||
412 | if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB) | ||
413 | ecmd->advertising |= ADVERTISED_10000baseT_Full; | ||
414 | if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB) | ||
415 | ecmd->advertising |= ADVERTISED_1000baseT_Full; | ||
416 | if (hw_link_info->requested_speeds & I40E_LINK_SPEED_100MB) | ||
417 | ecmd->advertising |= ADVERTISED_100baseT_Full; | ||
418 | break; | ||
419 | default: | ||
420 | /* all the rest are 10G/1G */ | ||
421 | ecmd->supported = SUPPORTED_10000baseT_Full | | ||
422 | SUPPORTED_1000baseT_Full; | ||
423 | /* Figure out what has been requested */ | ||
424 | if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB) | ||
425 | ecmd->advertising |= ADVERTISED_10000baseT_Full; | ||
426 | if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB) | ||
427 | ecmd->advertising |= ADVERTISED_1000baseT_Full; | ||
428 | break; | ||
429 | } | ||
430 | |||
431 | /* With no link speed and duplex are unknown */ | ||
432 | ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN); | ||
433 | ecmd->duplex = DUPLEX_UNKNOWN; | ||
434 | } | ||
435 | |||
436 | /** | ||
437 | * i40e_get_settings - Get Link Speed and Duplex settings | ||
438 | * @netdev: network interface device structure | ||
439 | * @ecmd: ethtool command | ||
440 | * | ||
441 | * Reports speed/duplex settings based on media_type | ||
442 | **/ | ||
443 | static int i40e_get_settings(struct net_device *netdev, | ||
444 | struct ethtool_cmd *ecmd) | ||
445 | { | ||
446 | struct i40e_netdev_priv *np = netdev_priv(netdev); | ||
447 | struct i40e_pf *pf = np->vsi->back; | ||
448 | struct i40e_hw *hw = &pf->hw; | ||
449 | struct i40e_link_status *hw_link_info = &hw->phy.link_info; | ||
450 | bool link_up = hw_link_info->link_info & I40E_AQ_LINK_UP; | ||
451 | |||
452 | if (link_up) | ||
453 | i40e_get_settings_link_up(hw, ecmd, netdev); | ||
454 | else | ||
455 | i40e_get_settings_link_down(hw, ecmd); | ||
456 | |||
457 | /* Now set the settings that don't rely on link being up/down */ | ||
458 | |||
459 | /* Set autoneg settings */ | ||
395 | ecmd->autoneg = ((hw_link_info->an_info & I40E_AQ_AN_COMPLETED) ? | 460 | ecmd->autoneg = ((hw_link_info->an_info & I40E_AQ_AN_COMPLETED) ? |
396 | AUTONEG_ENABLE : AUTONEG_DISABLE); | 461 | AUTONEG_ENABLE : AUTONEG_DISABLE); |
397 | 462 | ||
@@ -424,11 +489,13 @@ no_valid_phy_type: | |||
424 | break; | 489 | break; |
425 | } | 490 | } |
426 | 491 | ||
492 | /* Set transceiver */ | ||
427 | ecmd->transceiver = XCVR_EXTERNAL; | 493 | ecmd->transceiver = XCVR_EXTERNAL; |
428 | 494 | ||
495 | /* Set flow control settings */ | ||
429 | ecmd->supported |= SUPPORTED_Pause; | 496 | ecmd->supported |= SUPPORTED_Pause; |
430 | 497 | ||
431 | switch (hw->fc.current_mode) { | 498 | switch (hw->fc.requested_mode) { |
432 | case I40E_FC_FULL: | 499 | case I40E_FC_FULL: |
433 | ecmd->advertising |= ADVERTISED_Pause; | 500 | ecmd->advertising |= ADVERTISED_Pause; |
434 | break; | 501 | break; |
@@ -445,30 +512,6 @@ no_valid_phy_type: | |||
445 | break; | 512 | break; |
446 | } | 513 | } |
447 | 514 | ||
448 | if (link_up) { | ||
449 | switch (link_speed) { | ||
450 | case I40E_LINK_SPEED_40GB: | ||
451 | /* need a SPEED_40000 in ethtool.h */ | ||
452 | ethtool_cmd_speed_set(ecmd, 40000); | ||
453 | break; | ||
454 | case I40E_LINK_SPEED_10GB: | ||
455 | ethtool_cmd_speed_set(ecmd, SPEED_10000); | ||
456 | break; | ||
457 | case I40E_LINK_SPEED_1GB: | ||
458 | ethtool_cmd_speed_set(ecmd, SPEED_1000); | ||
459 | break; | ||
460 | case I40E_LINK_SPEED_100MB: | ||
461 | ethtool_cmd_speed_set(ecmd, SPEED_100); | ||
462 | break; | ||
463 | default: | ||
464 | break; | ||
465 | } | ||
466 | ecmd->duplex = DUPLEX_FULL; | ||
467 | } else { | ||
468 | ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN); | ||
469 | ecmd->duplex = DUPLEX_UNKNOWN; | ||
470 | } | ||
471 | |||
472 | return 0; | 515 | return 0; |
473 | } | 516 | } |
474 | 517 | ||
@@ -601,6 +644,8 @@ static int i40e_set_settings(struct net_device *netdev, | |||
601 | config.eeer = abilities.eeer_val; | 644 | config.eeer = abilities.eeer_val; |
602 | config.low_power_ctrl = abilities.d3_lpan; | 645 | config.low_power_ctrl = abilities.d3_lpan; |
603 | 646 | ||
647 | /* save the requested speeds */ | ||
648 | hw->phy.link_info.requested_speeds = config.link_speed; | ||
604 | /* set link and auto negotiation so changes take effect */ | 649 | /* set link and auto negotiation so changes take effect */ |
605 | config.abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK; | 650 | config.abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK; |
606 | /* If link is up put link down */ | 651 | /* If link is up put link down */ |
@@ -621,7 +666,7 @@ static int i40e_set_settings(struct net_device *netdev, | |||
621 | return -EAGAIN; | 666 | return -EAGAIN; |
622 | } | 667 | } |
623 | 668 | ||
624 | status = i40e_update_link_info(hw, true); | 669 | status = i40e_aq_get_link_info(hw, true, NULL, NULL); |
625 | if (status) | 670 | if (status) |
626 | netdev_info(netdev, "Updating link info failed with error %d\n", | 671 | netdev_info(netdev, "Updating link info failed with error %d\n", |
627 | status); | 672 | status); |
@@ -767,7 +812,7 @@ static int i40e_set_pauseparam(struct net_device *netdev, | |||
767 | err = -EAGAIN; | 812 | err = -EAGAIN; |
768 | } | 813 | } |
769 | if (aq_failures & I40E_SET_FC_AQ_FAIL_UPDATE) { | 814 | if (aq_failures & I40E_SET_FC_AQ_FAIL_UPDATE) { |
770 | netdev_info(netdev, "Set fc failed on the update_link_info call with error %d and status %d\n", | 815 | netdev_info(netdev, "Set fc failed on the get_link_info call with error %d and status %d\n", |
771 | status, hw->aq.asq_last_status); | 816 | status, hw->aq.asq_last_status); |
772 | err = -EAGAIN; | 817 | err = -EAGAIN; |
773 | } | 818 | } |
@@ -998,6 +1043,7 @@ static void i40e_get_drvinfo(struct net_device *netdev, | |||
998 | sizeof(drvinfo->fw_version)); | 1043 | sizeof(drvinfo->fw_version)); |
999 | strlcpy(drvinfo->bus_info, pci_name(pf->pdev), | 1044 | strlcpy(drvinfo->bus_info, pci_name(pf->pdev), |
1000 | sizeof(drvinfo->bus_info)); | 1045 | sizeof(drvinfo->bus_info)); |
1046 | drvinfo->n_priv_flags = I40E_PRIV_FLAGS_STR_LEN; | ||
1001 | } | 1047 | } |
1002 | 1048 | ||
1003 | static void i40e_get_ringparam(struct net_device *netdev, | 1049 | static void i40e_get_ringparam(struct net_device *netdev, |
@@ -1185,6 +1231,8 @@ static int i40e_get_sset_count(struct net_device *netdev, int sset) | |||
1185 | } else { | 1231 | } else { |
1186 | return I40E_VSI_STATS_LEN(netdev); | 1232 | return I40E_VSI_STATS_LEN(netdev); |
1187 | } | 1233 | } |
1234 | case ETH_SS_PRIV_FLAGS: | ||
1235 | return I40E_PRIV_FLAGS_STR_LEN; | ||
1188 | default: | 1236 | default: |
1189 | return -EOPNOTSUPP; | 1237 | return -EOPNOTSUPP; |
1190 | } | 1238 | } |
@@ -1358,6 +1406,15 @@ static void i40e_get_strings(struct net_device *netdev, u32 stringset, | |||
1358 | } | 1406 | } |
1359 | /* BUG_ON(p - data != I40E_STATS_LEN * ETH_GSTRING_LEN); */ | 1407 | /* BUG_ON(p - data != I40E_STATS_LEN * ETH_GSTRING_LEN); */ |
1360 | break; | 1408 | break; |
1409 | case ETH_SS_PRIV_FLAGS: | ||
1410 | for (i = 0; i < I40E_PRIV_FLAGS_STR_LEN; i++) { | ||
1411 | memcpy(data, i40e_priv_flags_strings[i], | ||
1412 | ETH_GSTRING_LEN); | ||
1413 | data += ETH_GSTRING_LEN; | ||
1414 | } | ||
1415 | break; | ||
1416 | default: | ||
1417 | break; | ||
1361 | } | 1418 | } |
1362 | } | 1419 | } |
1363 | 1420 | ||
@@ -1599,6 +1656,8 @@ static int i40e_set_phys_id(struct net_device *netdev, | |||
1599 | case ETHTOOL_ID_INACTIVE: | 1656 | case ETHTOOL_ID_INACTIVE: |
1600 | i40e_led_set(hw, pf->led_status, false); | 1657 | i40e_led_set(hw, pf->led_status, false); |
1601 | break; | 1658 | break; |
1659 | default: | ||
1660 | break; | ||
1602 | } | 1661 | } |
1603 | 1662 | ||
1604 | return 0; | 1663 | return 0; |
@@ -1703,6 +1762,11 @@ static int i40e_get_rss_hash_opts(struct i40e_pf *pf, struct ethtool_rxnfc *cmd) | |||
1703 | { | 1762 | { |
1704 | cmd->data = 0; | 1763 | cmd->data = 0; |
1705 | 1764 | ||
1765 | if (pf->vsi[pf->lan_vsi]->rxnfc.data != 0) { | ||
1766 | cmd->data = pf->vsi[pf->lan_vsi]->rxnfc.data; | ||
1767 | cmd->flow_type = pf->vsi[pf->lan_vsi]->rxnfc.flow_type; | ||
1768 | return 0; | ||
1769 | } | ||
1706 | /* Report default options for RSS on i40e */ | 1770 | /* Report default options for RSS on i40e */ |
1707 | switch (cmd->flow_type) { | 1771 | switch (cmd->flow_type) { |
1708 | case TCP_V4_FLOW: | 1772 | case TCP_V4_FLOW: |
@@ -1974,6 +2038,9 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc) | |||
1974 | wr32(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32)); | 2038 | wr32(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32)); |
1975 | i40e_flush(hw); | 2039 | i40e_flush(hw); |
1976 | 2040 | ||
2041 | /* Save setting for future output/update */ | ||
2042 | pf->vsi[pf->lan_vsi]->rxnfc = *nfc; | ||
2043 | |||
1977 | return 0; | 2044 | return 0; |
1978 | } | 2045 | } |
1979 | 2046 | ||
@@ -2281,10 +2348,6 @@ static int i40e_set_channels(struct net_device *dev, | |||
2281 | /* update feature limits from largest to smallest supported values */ | 2348 | /* update feature limits from largest to smallest supported values */ |
2282 | /* TODO: Flow director limit, DCB etc */ | 2349 | /* TODO: Flow director limit, DCB etc */ |
2283 | 2350 | ||
2284 | /* cap RSS limit */ | ||
2285 | if (count > pf->rss_size_max) | ||
2286 | count = pf->rss_size_max; | ||
2287 | |||
2288 | /* use rss_reconfig to rebuild with new queue count and update traffic | 2351 | /* use rss_reconfig to rebuild with new queue count and update traffic |
2289 | * class queue mapping | 2352 | * class queue mapping |
2290 | */ | 2353 | */ |
@@ -2295,6 +2358,29 @@ static int i40e_set_channels(struct net_device *dev, | |||
2295 | return -EINVAL; | 2358 | return -EINVAL; |
2296 | } | 2359 | } |
2297 | 2360 | ||
2361 | /** | ||
2362 | * i40e_get_priv_flags - report device private flags | ||
2363 | * @dev: network interface device structure | ||
2364 | * | ||
2365 | * The get string set count and the string set should be matched for each | ||
2366 | * flag returned. Add new strings for each flag to the i40e_priv_flags_strings | ||
2367 | * array. | ||
2368 | * | ||
2369 | * Returns a u32 bitmap of flags. | ||
2370 | **/ | ||
2371 | u32 i40e_get_priv_flags(struct net_device *dev) | ||
2372 | { | ||
2373 | struct i40e_netdev_priv *np = netdev_priv(dev); | ||
2374 | struct i40e_vsi *vsi = np->vsi; | ||
2375 | struct i40e_pf *pf = vsi->back; | ||
2376 | u32 ret_flags = 0; | ||
2377 | |||
2378 | ret_flags |= pf->hw.func_caps.npar_enable ? | ||
2379 | I40E_PRIV_FLAGS_NPAR_FLAG : 0; | ||
2380 | |||
2381 | return ret_flags; | ||
2382 | } | ||
2383 | |||
2298 | static const struct ethtool_ops i40e_ethtool_ops = { | 2384 | static const struct ethtool_ops i40e_ethtool_ops = { |
2299 | .get_settings = i40e_get_settings, | 2385 | .get_settings = i40e_get_settings, |
2300 | .set_settings = i40e_set_settings, | 2386 | .set_settings = i40e_set_settings, |
@@ -2326,6 +2412,7 @@ static const struct ethtool_ops i40e_ethtool_ops = { | |||
2326 | .get_channels = i40e_get_channels, | 2412 | .get_channels = i40e_get_channels, |
2327 | .set_channels = i40e_set_channels, | 2413 | .set_channels = i40e_set_channels, |
2328 | .get_ts_info = i40e_get_ts_info, | 2414 | .get_ts_info = i40e_get_ts_info, |
2415 | .get_priv_flags = i40e_get_priv_flags, | ||
2329 | }; | 2416 | }; |
2330 | 2417 | ||
2331 | void i40e_set_ethtool_ops(struct net_device *netdev) | 2418 | void i40e_set_ethtool_ops(struct net_device *netdev) |
diff --git a/drivers/net/ethernet/intel/i40e/i40e_fcoe.c b/drivers/net/ethernet/intel/i40e/i40e_fcoe.c index 27c206e62da7..05d883e4d4ac 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_fcoe.c +++ b/drivers/net/ethernet/intel/i40e/i40e_fcoe.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************************* | 1 | /******************************************************************************* |
2 | * | 2 | * |
3 | * Intel Ethernet Controller XL710 Family Linux Driver | 3 | * Intel Ethernet Controller XL710 Family Linux Driver |
4 | * Copyright(c) 2013 - 2014 Intel Corporation. | 4 | * Copyright(c) 2013 - 2015 Intel Corporation. |
5 | * | 5 | * |
6 | * This program is free software; you can redistribute it and/or modify it | 6 | * This program is free software; you can redistribute it and/or modify it |
7 | * under the terms and conditions of the GNU General Public License, | 7 | * under the terms and conditions of the GNU General Public License, |
@@ -381,12 +381,11 @@ int i40e_fcoe_vsi_init(struct i40e_vsi *vsi, struct i40e_vsi_context *ctxt) | |||
381 | ctxt->pf_num = hw->pf_id; | 381 | ctxt->pf_num = hw->pf_id; |
382 | ctxt->vf_num = 0; | 382 | ctxt->vf_num = 0; |
383 | ctxt->uplink_seid = vsi->uplink_seid; | 383 | ctxt->uplink_seid = vsi->uplink_seid; |
384 | ctxt->connection_type = 0x1; | 384 | ctxt->connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL; |
385 | ctxt->flags = I40E_AQ_VSI_TYPE_PF; | 385 | ctxt->flags = I40E_AQ_VSI_TYPE_PF; |
386 | 386 | ||
387 | /* FCoE VSI would need the following sections */ | 387 | /* FCoE VSI would need the following sections */ |
388 | info->valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID | | 388 | info->valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID); |
389 | I40E_AQ_VSI_PROP_QUEUE_OPT_VALID); | ||
390 | 389 | ||
391 | /* FCoE VSI does not need these sections */ | 390 | /* FCoE VSI does not need these sections */ |
392 | info->valid_sections &= cpu_to_le16(~(I40E_AQ_VSI_PROP_SECURITY_VALID | | 391 | info->valid_sections &= cpu_to_le16(~(I40E_AQ_VSI_PROP_SECURITY_VALID | |
@@ -395,7 +394,12 @@ int i40e_fcoe_vsi_init(struct i40e_vsi *vsi, struct i40e_vsi_context *ctxt) | |||
395 | I40E_AQ_VSI_PROP_INGRESS_UP_VALID | | 394 | I40E_AQ_VSI_PROP_INGRESS_UP_VALID | |
396 | I40E_AQ_VSI_PROP_EGRESS_UP_VALID)); | 395 | I40E_AQ_VSI_PROP_EGRESS_UP_VALID)); |
397 | 396 | ||
398 | info->switch_id = cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB); | 397 | if (i40e_is_vsi_uplink_mode_veb(vsi)) { |
398 | info->valid_sections |= | ||
399 | cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID); | ||
400 | info->switch_id = | ||
401 | cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB); | ||
402 | } | ||
399 | enabled_tc = i40e_get_fcoe_tc_map(pf); | 403 | enabled_tc = i40e_get_fcoe_tc_map(pf); |
400 | i40e_vsi_setup_queue_map(vsi, ctxt, enabled_tc, true); | 404 | i40e_vsi_setup_queue_map(vsi, ctxt, enabled_tc, true); |
401 | 405 | ||
@@ -1470,6 +1474,11 @@ static const struct net_device_ops i40e_fcoe_netdev_ops = { | |||
1470 | .ndo_set_features = i40e_fcoe_set_features, | 1474 | .ndo_set_features = i40e_fcoe_set_features, |
1471 | }; | 1475 | }; |
1472 | 1476 | ||
1477 | /* fcoe network device type */ | ||
1478 | static struct device_type fcoe_netdev_type = { | ||
1479 | .name = "fcoe", | ||
1480 | }; | ||
1481 | |||
1473 | /** | 1482 | /** |
1474 | * i40e_fcoe_config_netdev - prepares the VSI context for creating a FCoE VSI | 1483 | * i40e_fcoe_config_netdev - prepares the VSI context for creating a FCoE VSI |
1475 | * @vsi: pointer to the associated VSI struct | 1484 | * @vsi: pointer to the associated VSI struct |
@@ -1503,6 +1512,7 @@ void i40e_fcoe_config_netdev(struct net_device *netdev, struct i40e_vsi *vsi) | |||
1503 | strlcpy(netdev->name, "fcoe%d", IFNAMSIZ-1); | 1512 | strlcpy(netdev->name, "fcoe%d", IFNAMSIZ-1); |
1504 | netdev->mtu = FCOE_MTU; | 1513 | netdev->mtu = FCOE_MTU; |
1505 | SET_NETDEV_DEV(netdev, &pf->pdev->dev); | 1514 | SET_NETDEV_DEV(netdev, &pf->pdev->dev); |
1515 | SET_NETDEV_DEVTYPE(netdev, &fcoe_netdev_type); | ||
1506 | /* set different dev_port value 1 for FCoE netdev than the default | 1516 | /* set different dev_port value 1 for FCoE netdev than the default |
1507 | * zero dev_port value for PF netdev, this helps biosdevname user | 1517 | * zero dev_port value for PF netdev, this helps biosdevname user |
1508 | * tool to differentiate them correctly while both attached to the | 1518 | * tool to differentiate them correctly while both attached to the |
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index dadda3c5d658..56bdaff9f27e 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************************* | 1 | /******************************************************************************* |
2 | * | 2 | * |
3 | * Intel Ethernet Controller XL710 Family Linux Driver | 3 | * Intel Ethernet Controller XL710 Family Linux Driver |
4 | * Copyright(c) 2013 - 2014 Intel Corporation. | 4 | * Copyright(c) 2013 - 2015 Intel Corporation. |
5 | * | 5 | * |
6 | * This program is free software; you can redistribute it and/or modify it | 6 | * This program is free software; you can redistribute it and/or modify it |
7 | * under the terms and conditions of the GNU General Public License, | 7 | * under the terms and conditions of the GNU General Public License, |
@@ -39,7 +39,7 @@ static const char i40e_driver_string[] = | |||
39 | 39 | ||
40 | #define DRV_VERSION_MAJOR 1 | 40 | #define DRV_VERSION_MAJOR 1 |
41 | #define DRV_VERSION_MINOR 2 | 41 | #define DRV_VERSION_MINOR 2 |
42 | #define DRV_VERSION_BUILD 6 | 42 | #define DRV_VERSION_BUILD 10 |
43 | #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \ | 43 | #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \ |
44 | __stringify(DRV_VERSION_MINOR) "." \ | 44 | __stringify(DRV_VERSION_MINOR) "." \ |
45 | __stringify(DRV_VERSION_BUILD) DRV_KERN | 45 | __stringify(DRV_VERSION_BUILD) DRV_KERN |
@@ -919,11 +919,6 @@ static void i40e_update_pf_stats(struct i40e_pf *pf) | |||
919 | pf->stat_offsets_loaded, | 919 | pf->stat_offsets_loaded, |
920 | &osd->eth.rx_discards, | 920 | &osd->eth.rx_discards, |
921 | &nsd->eth.rx_discards); | 921 | &nsd->eth.rx_discards); |
922 | i40e_stat_update32(hw, I40E_GLPRT_TDPC(hw->port), | ||
923 | pf->stat_offsets_loaded, | ||
924 | &osd->eth.tx_discards, | ||
925 | &nsd->eth.tx_discards); | ||
926 | |||
927 | i40e_stat_update48(hw, I40E_GLPRT_UPRCH(hw->port), | 922 | i40e_stat_update48(hw, I40E_GLPRT_UPRCH(hw->port), |
928 | I40E_GLPRT_UPRCL(hw->port), | 923 | I40E_GLPRT_UPRCL(hw->port), |
929 | pf->stat_offsets_loaded, | 924 | pf->stat_offsets_loaded, |
@@ -1576,6 +1571,12 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi, | |||
1576 | 1571 | ||
1577 | /* Set actual Tx/Rx queue pairs */ | 1572 | /* Set actual Tx/Rx queue pairs */ |
1578 | vsi->num_queue_pairs = offset; | 1573 | vsi->num_queue_pairs = offset; |
1574 | if ((vsi->type == I40E_VSI_MAIN) && (numtc == 1)) { | ||
1575 | if (vsi->req_queue_pairs > 0) | ||
1576 | vsi->num_queue_pairs = vsi->req_queue_pairs; | ||
1577 | else | ||
1578 | vsi->num_queue_pairs = pf->num_lan_msix; | ||
1579 | } | ||
1579 | 1580 | ||
1580 | /* Scheduler section valid can only be set for ADD VSI */ | 1581 | /* Scheduler section valid can only be set for ADD VSI */ |
1581 | if (is_add) { | 1582 | if (is_add) { |
@@ -2596,7 +2597,12 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring) | |||
2596 | ring->tail = hw->hw_addr + I40E_QRX_TAIL(pf_q); | 2597 | ring->tail = hw->hw_addr + I40E_QRX_TAIL(pf_q); |
2597 | writel(0, ring->tail); | 2598 | writel(0, ring->tail); |
2598 | 2599 | ||
2599 | i40e_alloc_rx_buffers(ring, I40E_DESC_UNUSED(ring)); | 2600 | if (ring_is_ps_enabled(ring)) { |
2601 | i40e_alloc_rx_headers(ring); | ||
2602 | i40e_alloc_rx_buffers_ps(ring, I40E_DESC_UNUSED(ring)); | ||
2603 | } else { | ||
2604 | i40e_alloc_rx_buffers_1buf(ring, I40E_DESC_UNUSED(ring)); | ||
2605 | } | ||
2600 | 2606 | ||
2601 | return 0; | 2607 | return 0; |
2602 | } | 2608 | } |
@@ -3183,7 +3189,7 @@ static irqreturn_t i40e_intr(int irq, void *data) | |||
3183 | pf->globr_count++; | 3189 | pf->globr_count++; |
3184 | } else if (val == I40E_RESET_EMPR) { | 3190 | } else if (val == I40E_RESET_EMPR) { |
3185 | pf->empr_count++; | 3191 | pf->empr_count++; |
3186 | set_bit(__I40E_EMP_RESET_REQUESTED, &pf->state); | 3192 | set_bit(__I40E_EMP_RESET_INTR_RECEIVED, &pf->state); |
3187 | } | 3193 | } |
3188 | } | 3194 | } |
3189 | 3195 | ||
@@ -4119,7 +4125,7 @@ static u8 i40e_pf_get_num_tc(struct i40e_pf *pf) | |||
4119 | if (pf->hw.func_caps.iscsi) | 4125 | if (pf->hw.func_caps.iscsi) |
4120 | enabled_tc = i40e_get_iscsi_tc_map(pf); | 4126 | enabled_tc = i40e_get_iscsi_tc_map(pf); |
4121 | else | 4127 | else |
4122 | enabled_tc = pf->hw.func_caps.enabled_tcmap; | 4128 | return 1; /* Only TC0 */ |
4123 | 4129 | ||
4124 | /* At least have TC0 */ | 4130 | /* At least have TC0 */ |
4125 | enabled_tc = (enabled_tc ? enabled_tc : 0x1); | 4131 | enabled_tc = (enabled_tc ? enabled_tc : 0x1); |
@@ -4169,11 +4175,11 @@ static u8 i40e_pf_get_tc_map(struct i40e_pf *pf) | |||
4169 | if (!(pf->flags & I40E_FLAG_MFP_ENABLED)) | 4175 | if (!(pf->flags & I40E_FLAG_MFP_ENABLED)) |
4170 | return i40e_dcb_get_enabled_tc(&pf->hw.local_dcbx_config); | 4176 | return i40e_dcb_get_enabled_tc(&pf->hw.local_dcbx_config); |
4171 | 4177 | ||
4172 | /* MPF enabled and iSCSI PF type */ | 4178 | /* MFP enabled and iSCSI PF type */ |
4173 | if (pf->hw.func_caps.iscsi) | 4179 | if (pf->hw.func_caps.iscsi) |
4174 | return i40e_get_iscsi_tc_map(pf); | 4180 | return i40e_get_iscsi_tc_map(pf); |
4175 | else | 4181 | else |
4176 | return pf->hw.func_caps.enabled_tcmap; | 4182 | return i40e_pf_get_default_tc(pf); |
4177 | } | 4183 | } |
4178 | 4184 | ||
4179 | /** | 4185 | /** |
@@ -4563,6 +4569,11 @@ static int i40e_init_pf_dcb(struct i40e_pf *pf) | |||
4563 | struct i40e_hw *hw = &pf->hw; | 4569 | struct i40e_hw *hw = &pf->hw; |
4564 | int err = 0; | 4570 | int err = 0; |
4565 | 4571 | ||
4572 | /* Do not enable DCB for SW1 and SW2 images even if the FW is capable */ | ||
4573 | if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) || | ||
4574 | (pf->hw.aq.fw_maj_ver < 4)) | ||
4575 | goto out; | ||
4576 | |||
4566 | /* Get the initial DCB configuration */ | 4577 | /* Get the initial DCB configuration */ |
4567 | err = i40e_init_dcb(hw); | 4578 | err = i40e_init_dcb(hw); |
4568 | if (!err) { | 4579 | if (!err) { |
@@ -4853,11 +4864,7 @@ exit: | |||
4853 | * | 4864 | * |
4854 | * Returns 0 on success, negative value on failure | 4865 | * Returns 0 on success, negative value on failure |
4855 | **/ | 4866 | **/ |
4856 | #ifdef I40E_FCOE | ||
4857 | int i40e_open(struct net_device *netdev) | 4867 | int i40e_open(struct net_device *netdev) |
4858 | #else | ||
4859 | static int i40e_open(struct net_device *netdev) | ||
4860 | #endif | ||
4861 | { | 4868 | { |
4862 | struct i40e_netdev_priv *np = netdev_priv(netdev); | 4869 | struct i40e_netdev_priv *np = netdev_priv(netdev); |
4863 | struct i40e_vsi *vsi = np->vsi; | 4870 | struct i40e_vsi *vsi = np->vsi; |
@@ -5055,24 +5062,6 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags) | |||
5055 | wr32(&pf->hw, I40E_GLGEN_RTRIG, val); | 5062 | wr32(&pf->hw, I40E_GLGEN_RTRIG, val); |
5056 | i40e_flush(&pf->hw); | 5063 | i40e_flush(&pf->hw); |
5057 | 5064 | ||
5058 | } else if (reset_flags & (1 << __I40E_EMP_RESET_REQUESTED)) { | ||
5059 | |||
5060 | /* Request a Firmware Reset | ||
5061 | * | ||
5062 | * Same as Global reset, plus restarting the | ||
5063 | * embedded firmware engine. | ||
5064 | */ | ||
5065 | /* enable EMP Reset */ | ||
5066 | val = rd32(&pf->hw, I40E_GLGEN_RSTENA_EMP); | ||
5067 | val |= I40E_GLGEN_RSTENA_EMP_EMP_RST_ENA_MASK; | ||
5068 | wr32(&pf->hw, I40E_GLGEN_RSTENA_EMP, val); | ||
5069 | |||
5070 | /* force the reset */ | ||
5071 | val = rd32(&pf->hw, I40E_GLGEN_RTRIG); | ||
5072 | val |= I40E_GLGEN_RTRIG_EMPFWR_MASK; | ||
5073 | wr32(&pf->hw, I40E_GLGEN_RTRIG, val); | ||
5074 | i40e_flush(&pf->hw); | ||
5075 | |||
5076 | } else if (reset_flags & (1 << __I40E_PF_RESET_REQUESTED)) { | 5065 | } else if (reset_flags & (1 << __I40E_PF_RESET_REQUESTED)) { |
5077 | 5066 | ||
5078 | /* Request a PF Reset | 5067 | /* Request a PF Reset |
@@ -5195,7 +5184,6 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf, | |||
5195 | struct i40e_aqc_lldp_get_mib *mib = | 5184 | struct i40e_aqc_lldp_get_mib *mib = |
5196 | (struct i40e_aqc_lldp_get_mib *)&e->desc.params.raw; | 5185 | (struct i40e_aqc_lldp_get_mib *)&e->desc.params.raw; |
5197 | struct i40e_hw *hw = &pf->hw; | 5186 | struct i40e_hw *hw = &pf->hw; |
5198 | struct i40e_dcbx_config *dcbx_cfg = &hw->local_dcbx_config; | ||
5199 | struct i40e_dcbx_config tmp_dcbx_cfg; | 5187 | struct i40e_dcbx_config tmp_dcbx_cfg; |
5200 | bool need_reconfig = false; | 5188 | bool need_reconfig = false; |
5201 | int ret = 0; | 5189 | int ret = 0; |
@@ -5228,8 +5216,10 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf, | |||
5228 | 5216 | ||
5229 | memset(&tmp_dcbx_cfg, 0, sizeof(tmp_dcbx_cfg)); | 5217 | memset(&tmp_dcbx_cfg, 0, sizeof(tmp_dcbx_cfg)); |
5230 | /* Store the old configuration */ | 5218 | /* Store the old configuration */ |
5231 | tmp_dcbx_cfg = *dcbx_cfg; | 5219 | memcpy(&tmp_dcbx_cfg, &hw->local_dcbx_config, sizeof(tmp_dcbx_cfg)); |
5232 | 5220 | ||
5221 | /* Reset the old DCBx configuration data */ | ||
5222 | memset(&hw->local_dcbx_config, 0, sizeof(hw->local_dcbx_config)); | ||
5233 | /* Get updated DCBX data from firmware */ | 5223 | /* Get updated DCBX data from firmware */ |
5234 | ret = i40e_get_dcb_config(&pf->hw); | 5224 | ret = i40e_get_dcb_config(&pf->hw); |
5235 | if (ret) { | 5225 | if (ret) { |
@@ -5238,20 +5228,22 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf, | |||
5238 | } | 5228 | } |
5239 | 5229 | ||
5240 | /* No change detected in DCBX configs */ | 5230 | /* No change detected in DCBX configs */ |
5241 | if (!memcmp(&tmp_dcbx_cfg, dcbx_cfg, sizeof(tmp_dcbx_cfg))) { | 5231 | if (!memcmp(&tmp_dcbx_cfg, &hw->local_dcbx_config, |
5232 | sizeof(tmp_dcbx_cfg))) { | ||
5242 | dev_dbg(&pf->pdev->dev, "No change detected in DCBX configuration.\n"); | 5233 | dev_dbg(&pf->pdev->dev, "No change detected in DCBX configuration.\n"); |
5243 | goto exit; | 5234 | goto exit; |
5244 | } | 5235 | } |
5245 | 5236 | ||
5246 | need_reconfig = i40e_dcb_need_reconfig(pf, &tmp_dcbx_cfg, dcbx_cfg); | 5237 | need_reconfig = i40e_dcb_need_reconfig(pf, &tmp_dcbx_cfg, |
5238 | &hw->local_dcbx_config); | ||
5247 | 5239 | ||
5248 | i40e_dcbnl_flush_apps(pf, dcbx_cfg); | 5240 | i40e_dcbnl_flush_apps(pf, &tmp_dcbx_cfg, &hw->local_dcbx_config); |
5249 | 5241 | ||
5250 | if (!need_reconfig) | 5242 | if (!need_reconfig) |
5251 | goto exit; | 5243 | goto exit; |
5252 | 5244 | ||
5253 | /* Enable DCB tagging only when more than one TC */ | 5245 | /* Enable DCB tagging only when more than one TC */ |
5254 | if (i40e_dcb_get_num_tc(dcbx_cfg) > 1) | 5246 | if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1) |
5255 | pf->flags |= I40E_FLAG_DCB_ENABLED; | 5247 | pf->flags |= I40E_FLAG_DCB_ENABLED; |
5256 | else | 5248 | else |
5257 | pf->flags &= ~I40E_FLAG_DCB_ENABLED; | 5249 | pf->flags &= ~I40E_FLAG_DCB_ENABLED; |
@@ -5919,6 +5911,26 @@ static void i40e_verify_eeprom(struct i40e_pf *pf) | |||
5919 | } | 5911 | } |
5920 | 5912 | ||
5921 | /** | 5913 | /** |
5914 | * i40e_config_bridge_mode - Configure the HW bridge mode | ||
5915 | * @veb: pointer to the bridge instance | ||
5916 | * | ||
5917 | * Configure the loop back mode for the LAN VSI that is downlink to the | ||
5918 | * specified HW bridge instance. It is expected this function is called | ||
5919 | * when a new HW bridge is instantiated. | ||
5920 | **/ | ||
5921 | static void i40e_config_bridge_mode(struct i40e_veb *veb) | ||
5922 | { | ||
5923 | struct i40e_pf *pf = veb->pf; | ||
5924 | |||
5925 | dev_info(&pf->pdev->dev, "enabling bridge mode: %s\n", | ||
5926 | veb->bridge_mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB"); | ||
5927 | if (veb->bridge_mode & BRIDGE_MODE_VEPA) | ||
5928 | i40e_disable_pf_switch_lb(pf); | ||
5929 | else | ||
5930 | i40e_enable_pf_switch_lb(pf); | ||
5931 | } | ||
5932 | |||
5933 | /** | ||
5922 | * i40e_reconstitute_veb - rebuild the VEB and anything connected to it | 5934 | * i40e_reconstitute_veb - rebuild the VEB and anything connected to it |
5923 | * @veb: pointer to the VEB instance | 5935 | * @veb: pointer to the VEB instance |
5924 | * | 5936 | * |
@@ -5964,8 +5976,7 @@ static int i40e_reconstitute_veb(struct i40e_veb *veb) | |||
5964 | if (ret) | 5976 | if (ret) |
5965 | goto end_reconstitute; | 5977 | goto end_reconstitute; |
5966 | 5978 | ||
5967 | /* Enable LB mode for the main VSI now that it is on a VEB */ | 5979 | i40e_config_bridge_mode(veb); |
5968 | i40e_enable_pf_switch_lb(pf); | ||
5969 | 5980 | ||
5970 | /* create the remaining VSIs attached to this VEB */ | 5981 | /* create the remaining VSIs attached to this VEB */ |
5971 | for (v = 0; v < pf->num_alloc_vsi; v++) { | 5982 | for (v = 0; v < pf->num_alloc_vsi; v++) { |
@@ -6222,10 +6233,8 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit) | |||
6222 | } | 6233 | } |
6223 | 6234 | ||
6224 | /* re-verify the eeprom if we just had an EMP reset */ | 6235 | /* re-verify the eeprom if we just had an EMP reset */ |
6225 | if (test_bit(__I40E_EMP_RESET_REQUESTED, &pf->state)) { | 6236 | if (test_and_clear_bit(__I40E_EMP_RESET_INTR_RECEIVED, &pf->state)) |
6226 | clear_bit(__I40E_EMP_RESET_REQUESTED, &pf->state); | ||
6227 | i40e_verify_eeprom(pf); | 6237 | i40e_verify_eeprom(pf); |
6228 | } | ||
6229 | 6238 | ||
6230 | i40e_clear_pxe_mode(hw); | 6239 | i40e_clear_pxe_mode(hw); |
6231 | ret = i40e_get_capabilities(pf); | 6240 | ret = i40e_get_capabilities(pf); |
@@ -6335,13 +6344,14 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit) | |||
6335 | } | 6344 | } |
6336 | } | 6345 | } |
6337 | 6346 | ||
6338 | msleep(75); | 6347 | if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) || |
6339 | ret = i40e_aq_set_link_restart_an(&pf->hw, true, NULL); | 6348 | (pf->hw.aq.fw_maj_ver < 4)) { |
6340 | if (ret) { | 6349 | msleep(75); |
6341 | dev_info(&pf->pdev->dev, "link restart failed, aq_err=%d\n", | 6350 | ret = i40e_aq_set_link_restart_an(&pf->hw, true, NULL); |
6342 | pf->hw.aq.asq_last_status); | 6351 | if (ret) |
6352 | dev_info(&pf->pdev->dev, "link restart failed, aq_err=%d\n", | ||
6353 | pf->hw.aq.asq_last_status); | ||
6343 | } | 6354 | } |
6344 | |||
6345 | /* reinit the misc interrupt */ | 6355 | /* reinit the misc interrupt */ |
6346 | if (pf->flags & I40E_FLAG_MSIX_ENABLED) | 6356 | if (pf->flags & I40E_FLAG_MSIX_ENABLED) |
6347 | ret = i40e_setup_misc_vector(pf); | 6357 | ret = i40e_setup_misc_vector(pf); |
@@ -6728,6 +6738,8 @@ static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type) | |||
6728 | vsi->idx = vsi_idx; | 6738 | vsi->idx = vsi_idx; |
6729 | vsi->rx_itr_setting = pf->rx_itr_default; | 6739 | vsi->rx_itr_setting = pf->rx_itr_default; |
6730 | vsi->tx_itr_setting = pf->tx_itr_default; | 6740 | vsi->tx_itr_setting = pf->tx_itr_default; |
6741 | vsi->rss_table_size = (vsi->type == I40E_VSI_MAIN) ? | ||
6742 | pf->rss_table_size : 64; | ||
6731 | vsi->netdev_registered = false; | 6743 | vsi->netdev_registered = false; |
6732 | vsi->work_limit = I40E_DEFAULT_IRQ_WORK; | 6744 | vsi->work_limit = I40E_DEFAULT_IRQ_WORK; |
6733 | INIT_LIST_HEAD(&vsi->mac_filter_list); | 6745 | INIT_LIST_HEAD(&vsi->mac_filter_list); |
@@ -6951,7 +6963,8 @@ static int i40e_init_msix(struct i40e_pf *pf) | |||
6951 | * If we can't get what we want, we'll simplify to nearly nothing | 6963 | * If we can't get what we want, we'll simplify to nearly nothing |
6952 | * and try again. If that still fails, we punt. | 6964 | * and try again. If that still fails, we punt. |
6953 | */ | 6965 | */ |
6954 | pf->num_lan_msix = pf->num_lan_qps - (pf->rss_size_max - pf->rss_size); | 6966 | pf->num_lan_msix = min_t(int, num_online_cpus(), |
6967 | hw->func_caps.num_msix_vectors); | ||
6955 | pf->num_vmdq_msix = pf->num_vmdq_qps; | 6968 | pf->num_vmdq_msix = pf->num_vmdq_qps; |
6956 | other_vecs = 1; | 6969 | other_vecs = 1; |
6957 | other_vecs += (pf->num_vmdq_vsis * pf->num_vmdq_msix); | 6970 | other_vecs += (pf->num_vmdq_vsis * pf->num_vmdq_msix); |
@@ -7219,6 +7232,7 @@ static int i40e_setup_misc_vector(struct i40e_pf *pf) | |||
7219 | static int i40e_config_rss(struct i40e_pf *pf) | 7232 | static int i40e_config_rss(struct i40e_pf *pf) |
7220 | { | 7233 | { |
7221 | u32 rss_key[I40E_PFQF_HKEY_MAX_INDEX + 1]; | 7234 | u32 rss_key[I40E_PFQF_HKEY_MAX_INDEX + 1]; |
7235 | struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; | ||
7222 | struct i40e_hw *hw = &pf->hw; | 7236 | struct i40e_hw *hw = &pf->hw; |
7223 | u32 lut = 0; | 7237 | u32 lut = 0; |
7224 | int i, j; | 7238 | int i, j; |
@@ -7236,6 +7250,8 @@ static int i40e_config_rss(struct i40e_pf *pf) | |||
7236 | wr32(hw, I40E_PFQF_HENA(0), (u32)hena); | 7250 | wr32(hw, I40E_PFQF_HENA(0), (u32)hena); |
7237 | wr32(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32)); | 7251 | wr32(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32)); |
7238 | 7252 | ||
7253 | vsi->rss_size = min_t(int, pf->rss_size, vsi->num_queue_pairs); | ||
7254 | |||
7239 | /* Check capability and Set table size and register per hw expectation*/ | 7255 | /* Check capability and Set table size and register per hw expectation*/ |
7240 | reg_val = rd32(hw, I40E_PFQF_CTL_0); | 7256 | reg_val = rd32(hw, I40E_PFQF_CTL_0); |
7241 | if (hw->func_caps.rss_table_size == 512) { | 7257 | if (hw->func_caps.rss_table_size == 512) { |
@@ -7257,7 +7273,7 @@ static int i40e_config_rss(struct i40e_pf *pf) | |||
7257 | * If LAN VSI is the only consumer for RSS then this requirement | 7273 | * If LAN VSI is the only consumer for RSS then this requirement |
7258 | * is not necessary. | 7274 | * is not necessary. |
7259 | */ | 7275 | */ |
7260 | if (j == pf->rss_size) | 7276 | if (j == vsi->rss_size) |
7261 | j = 0; | 7277 | j = 0; |
7262 | /* lut = 4-byte sliding window of 4 lut entries */ | 7278 | /* lut = 4-byte sliding window of 4 lut entries */ |
7263 | lut = (lut << 8) | (j & | 7279 | lut = (lut << 8) | (j & |
@@ -7281,15 +7297,19 @@ static int i40e_config_rss(struct i40e_pf *pf) | |||
7281 | **/ | 7297 | **/ |
7282 | int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count) | 7298 | int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count) |
7283 | { | 7299 | { |
7300 | struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; | ||
7301 | int new_rss_size; | ||
7302 | |||
7284 | if (!(pf->flags & I40E_FLAG_RSS_ENABLED)) | 7303 | if (!(pf->flags & I40E_FLAG_RSS_ENABLED)) |
7285 | return 0; | 7304 | return 0; |
7286 | 7305 | ||
7287 | queue_count = min_t(int, queue_count, pf->rss_size_max); | 7306 | new_rss_size = min_t(int, queue_count, pf->rss_size_max); |
7288 | 7307 | ||
7289 | if (queue_count != pf->rss_size) { | 7308 | if (queue_count != vsi->num_queue_pairs) { |
7309 | vsi->req_queue_pairs = queue_count; | ||
7290 | i40e_prep_for_reset(pf); | 7310 | i40e_prep_for_reset(pf); |
7291 | 7311 | ||
7292 | pf->rss_size = queue_count; | 7312 | pf->rss_size = new_rss_size; |
7293 | 7313 | ||
7294 | i40e_reset_and_rebuild(pf, true); | 7314 | i40e_reset_and_rebuild(pf, true); |
7295 | i40e_config_rss(pf); | 7315 | i40e_config_rss(pf); |
@@ -7299,6 +7319,128 @@ int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count) | |||
7299 | } | 7319 | } |
7300 | 7320 | ||
7301 | /** | 7321 | /** |
7322 | * i40e_get_npar_bw_setting - Retrieve BW settings for this PF partition | ||
7323 | * @pf: board private structure | ||
7324 | **/ | ||
7325 | i40e_status i40e_get_npar_bw_setting(struct i40e_pf *pf) | ||
7326 | { | ||
7327 | i40e_status status; | ||
7328 | bool min_valid, max_valid; | ||
7329 | u32 max_bw, min_bw; | ||
7330 | |||
7331 | status = i40e_read_bw_from_alt_ram(&pf->hw, &max_bw, &min_bw, | ||
7332 | &min_valid, &max_valid); | ||
7333 | |||
7334 | if (!status) { | ||
7335 | if (min_valid) | ||
7336 | pf->npar_min_bw = min_bw; | ||
7337 | if (max_valid) | ||
7338 | pf->npar_max_bw = max_bw; | ||
7339 | } | ||
7340 | |||
7341 | return status; | ||
7342 | } | ||
7343 | |||
7344 | /** | ||
7345 | * i40e_set_npar_bw_setting - Set BW settings for this PF partition | ||
7346 | * @pf: board private structure | ||
7347 | **/ | ||
7348 | i40e_status i40e_set_npar_bw_setting(struct i40e_pf *pf) | ||
7349 | { | ||
7350 | struct i40e_aqc_configure_partition_bw_data bw_data; | ||
7351 | i40e_status status; | ||
7352 | |||
7353 | /* Set the valid bit for this pf */ | ||
7354 | bw_data.pf_valid_bits = cpu_to_le16(1 << pf->hw.pf_id); | ||
7355 | bw_data.max_bw[pf->hw.pf_id] = pf->npar_max_bw & I40E_ALT_BW_VALUE_MASK; | ||
7356 | bw_data.min_bw[pf->hw.pf_id] = pf->npar_min_bw & I40E_ALT_BW_VALUE_MASK; | ||
7357 | |||
7358 | /* Set the new bandwidths */ | ||
7359 | status = i40e_aq_configure_partition_bw(&pf->hw, &bw_data, NULL); | ||
7360 | |||
7361 | return status; | ||
7362 | } | ||
7363 | |||
7364 | /** | ||
7365 | * i40e_commit_npar_bw_setting - Commit BW settings for this PF partition | ||
7366 | * @pf: board private structure | ||
7367 | **/ | ||
7368 | i40e_status i40e_commit_npar_bw_setting(struct i40e_pf *pf) | ||
7369 | { | ||
7370 | /* Commit temporary BW setting to permanent NVM image */ | ||
7371 | enum i40e_admin_queue_err last_aq_status; | ||
7372 | i40e_status ret; | ||
7373 | u16 nvm_word; | ||
7374 | |||
7375 | if (pf->hw.partition_id != 1) { | ||
7376 | dev_info(&pf->pdev->dev, | ||
7377 | "Commit BW only works on partition 1! This is partition %d", | ||
7378 | pf->hw.partition_id); | ||
7379 | ret = I40E_NOT_SUPPORTED; | ||
7380 | goto bw_commit_out; | ||
7381 | } | ||
7382 | |||
7383 | /* Acquire NVM for read access */ | ||
7384 | ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_READ); | ||
7385 | last_aq_status = pf->hw.aq.asq_last_status; | ||
7386 | if (ret) { | ||
7387 | dev_info(&pf->pdev->dev, | ||
7388 | "Cannot acquire NVM for read access, err %d: aq_err %d\n", | ||
7389 | ret, last_aq_status); | ||
7390 | goto bw_commit_out; | ||
7391 | } | ||
7392 | |||
7393 | /* Read word 0x10 of NVM - SW compatibility word 1 */ | ||
7394 | ret = i40e_aq_read_nvm(&pf->hw, | ||
7395 | I40E_SR_NVM_CONTROL_WORD, | ||
7396 | 0x10, sizeof(nvm_word), &nvm_word, | ||
7397 | false, NULL); | ||
7398 | /* Save off last admin queue command status before releasing | ||
7399 | * the NVM | ||
7400 | */ | ||
7401 | last_aq_status = pf->hw.aq.asq_last_status; | ||
7402 | i40e_release_nvm(&pf->hw); | ||
7403 | if (ret) { | ||
7404 | dev_info(&pf->pdev->dev, "NVM read error, err %d aq_err %d\n", | ||
7405 | ret, last_aq_status); | ||
7406 | goto bw_commit_out; | ||
7407 | } | ||
7408 | |||
7409 | /* Wait a bit for NVM release to complete */ | ||
7410 | msleep(50); | ||
7411 | |||
7412 | /* Acquire NVM for write access */ | ||
7413 | ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_WRITE); | ||
7414 | last_aq_status = pf->hw.aq.asq_last_status; | ||
7415 | if (ret) { | ||
7416 | dev_info(&pf->pdev->dev, | ||
7417 | "Cannot acquire NVM for write access, err %d: aq_err %d\n", | ||
7418 | ret, last_aq_status); | ||
7419 | goto bw_commit_out; | ||
7420 | } | ||
7421 | /* Write it back out unchanged to initiate update NVM, | ||
7422 | * which will force a write of the shadow (alt) RAM to | ||
7423 | * the NVM - thus storing the bandwidth values permanently. | ||
7424 | */ | ||
7425 | ret = i40e_aq_update_nvm(&pf->hw, | ||
7426 | I40E_SR_NVM_CONTROL_WORD, | ||
7427 | 0x10, sizeof(nvm_word), | ||
7428 | &nvm_word, true, NULL); | ||
7429 | /* Save off last admin queue command status before releasing | ||
7430 | * the NVM | ||
7431 | */ | ||
7432 | last_aq_status = pf->hw.aq.asq_last_status; | ||
7433 | i40e_release_nvm(&pf->hw); | ||
7434 | if (ret) | ||
7435 | dev_info(&pf->pdev->dev, | ||
7436 | "BW settings NOT SAVED, err %d aq_err %d\n", | ||
7437 | ret, last_aq_status); | ||
7438 | bw_commit_out: | ||
7439 | |||
7440 | return ret; | ||
7441 | } | ||
7442 | |||
7443 | /** | ||
7302 | * i40e_sw_init - Initialize general software structures (struct i40e_pf) | 7444 | * i40e_sw_init - Initialize general software structures (struct i40e_pf) |
7303 | * @pf: board private structure to initialize | 7445 | * @pf: board private structure to initialize |
7304 | * | 7446 | * |
@@ -7324,8 +7466,12 @@ static int i40e_sw_init(struct i40e_pf *pf) | |||
7324 | /* Set default capability flags */ | 7466 | /* Set default capability flags */ |
7325 | pf->flags = I40E_FLAG_RX_CSUM_ENABLED | | 7467 | pf->flags = I40E_FLAG_RX_CSUM_ENABLED | |
7326 | I40E_FLAG_MSI_ENABLED | | 7468 | I40E_FLAG_MSI_ENABLED | |
7327 | I40E_FLAG_MSIX_ENABLED | | 7469 | I40E_FLAG_MSIX_ENABLED; |
7328 | I40E_FLAG_RX_1BUF_ENABLED; | 7470 | |
7471 | if (iommu_present(&pci_bus_type)) | ||
7472 | pf->flags |= I40E_FLAG_RX_PS_ENABLED; | ||
7473 | else | ||
7474 | pf->flags |= I40E_FLAG_RX_1BUF_ENABLED; | ||
7329 | 7475 | ||
7330 | /* Set default ITR */ | 7476 | /* Set default ITR */ |
7331 | pf->rx_itr_default = I40E_ITR_DYNAMIC | I40E_ITR_RX_DEF; | 7477 | pf->rx_itr_default = I40E_ITR_DYNAMIC | I40E_ITR_RX_DEF; |
@@ -7336,6 +7482,7 @@ static int i40e_sw_init(struct i40e_pf *pf) | |||
7336 | */ | 7482 | */ |
7337 | pf->rss_size_max = 0x1 << pf->hw.func_caps.rss_table_entry_width; | 7483 | pf->rss_size_max = 0x1 << pf->hw.func_caps.rss_table_entry_width; |
7338 | pf->rss_size = 1; | 7484 | pf->rss_size = 1; |
7485 | pf->rss_table_size = pf->hw.func_caps.rss_table_size; | ||
7339 | pf->rss_size_max = min_t(int, pf->rss_size_max, | 7486 | pf->rss_size_max = min_t(int, pf->rss_size_max, |
7340 | pf->hw.func_caps.num_tx_qp); | 7487 | pf->hw.func_caps.num_tx_qp); |
7341 | if (pf->hw.func_caps.rss) { | 7488 | if (pf->hw.func_caps.rss) { |
@@ -7347,6 +7494,13 @@ static int i40e_sw_init(struct i40e_pf *pf) | |||
7347 | if (pf->hw.func_caps.npar_enable || pf->hw.func_caps.mfp_mode_1) { | 7494 | if (pf->hw.func_caps.npar_enable || pf->hw.func_caps.mfp_mode_1) { |
7348 | pf->flags |= I40E_FLAG_MFP_ENABLED; | 7495 | pf->flags |= I40E_FLAG_MFP_ENABLED; |
7349 | dev_info(&pf->pdev->dev, "MFP mode Enabled\n"); | 7496 | dev_info(&pf->pdev->dev, "MFP mode Enabled\n"); |
7497 | if (i40e_get_npar_bw_setting(pf)) | ||
7498 | dev_warn(&pf->pdev->dev, | ||
7499 | "Could not get NPAR bw settings\n"); | ||
7500 | else | ||
7501 | dev_info(&pf->pdev->dev, | ||
7502 | "Min BW = %8.8x, Max BW = %8.8x\n", | ||
7503 | pf->npar_min_bw, pf->npar_max_bw); | ||
7350 | } | 7504 | } |
7351 | 7505 | ||
7352 | /* FW/NVM is not yet fixed in this regard */ | 7506 | /* FW/NVM is not yet fixed in this regard */ |
@@ -7653,7 +7807,119 @@ static int i40e_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], | |||
7653 | return err; | 7807 | return err; |
7654 | } | 7808 | } |
7655 | 7809 | ||
7656 | static const struct net_device_ops i40e_netdev_ops = { | 7810 | #ifdef HAVE_BRIDGE_ATTRIBS |
7811 | /** | ||
7812 | * i40e_ndo_bridge_setlink - Set the hardware bridge mode | ||
7813 | * @dev: the netdev being configured | ||
7814 | * @nlh: RTNL message | ||
7815 | * | ||
7816 | * Inserts a new hardware bridge if not already created and | ||
7817 | * enables the bridging mode requested (VEB or VEPA). If the | ||
7818 | * hardware bridge has already been inserted and the request | ||
7819 | * is to change the mode then that requires a PF reset to | ||
7820 | * allow rebuild of the components with required hardware | ||
7821 | * bridge mode enabled. | ||
7822 | **/ | ||
7823 | static int i40e_ndo_bridge_setlink(struct net_device *dev, | ||
7824 | struct nlmsghdr *nlh) | ||
7825 | { | ||
7826 | struct i40e_netdev_priv *np = netdev_priv(dev); | ||
7827 | struct i40e_vsi *vsi = np->vsi; | ||
7828 | struct i40e_pf *pf = vsi->back; | ||
7829 | struct i40e_veb *veb = NULL; | ||
7830 | struct nlattr *attr, *br_spec; | ||
7831 | int i, rem; | ||
7832 | |||
7833 | /* Only for PF VSI for now */ | ||
7834 | if (vsi->seid != pf->vsi[pf->lan_vsi]->seid) | ||
7835 | return -EOPNOTSUPP; | ||
7836 | |||
7837 | /* Find the HW bridge for PF VSI */ | ||
7838 | for (i = 0; i < I40E_MAX_VEB && !veb; i++) { | ||
7839 | if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid) | ||
7840 | veb = pf->veb[i]; | ||
7841 | } | ||
7842 | |||
7843 | br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); | ||
7844 | |||
7845 | nla_for_each_nested(attr, br_spec, rem) { | ||
7846 | __u16 mode; | ||
7847 | |||
7848 | if (nla_type(attr) != IFLA_BRIDGE_MODE) | ||
7849 | continue; | ||
7850 | |||
7851 | mode = nla_get_u16(attr); | ||
7852 | if ((mode != BRIDGE_MODE_VEPA) && | ||
7853 | (mode != BRIDGE_MODE_VEB)) | ||
7854 | return -EINVAL; | ||
7855 | |||
7856 | /* Insert a new HW bridge */ | ||
7857 | if (!veb) { | ||
7858 | veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid, | ||
7859 | vsi->tc_config.enabled_tc); | ||
7860 | if (veb) { | ||
7861 | veb->bridge_mode = mode; | ||
7862 | i40e_config_bridge_mode(veb); | ||
7863 | } else { | ||
7864 | /* No Bridge HW offload available */ | ||
7865 | return -ENOENT; | ||
7866 | } | ||
7867 | break; | ||
7868 | } else if (mode != veb->bridge_mode) { | ||
7869 | /* Existing HW bridge but different mode needs reset */ | ||
7870 | veb->bridge_mode = mode; | ||
7871 | i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED)); | ||
7872 | break; | ||
7873 | } | ||
7874 | } | ||
7875 | |||
7876 | return 0; | ||
7877 | } | ||
7878 | |||
7879 | /** | ||
7880 | * i40e_ndo_bridge_getlink - Get the hardware bridge mode | ||
7881 | * @skb: skb buff | ||
7882 | * @pid: process id | ||
7883 | * @seq: RTNL message seq # | ||
7884 | * @dev: the netdev being configured | ||
7885 | * @filter_mask: unused | ||
7886 | * | ||
7887 | * Return the mode in which the hardware bridge is operating in | ||
7888 | * i.e VEB or VEPA. | ||
7889 | **/ | ||
7890 | #ifdef HAVE_BRIDGE_FILTER | ||
7891 | static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, | ||
7892 | struct net_device *dev, | ||
7893 | u32 __always_unused filter_mask) | ||
7894 | #else | ||
7895 | static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, | ||
7896 | struct net_device *dev) | ||
7897 | #endif /* HAVE_BRIDGE_FILTER */ | ||
7898 | { | ||
7899 | struct i40e_netdev_priv *np = netdev_priv(dev); | ||
7900 | struct i40e_vsi *vsi = np->vsi; | ||
7901 | struct i40e_pf *pf = vsi->back; | ||
7902 | struct i40e_veb *veb = NULL; | ||
7903 | int i; | ||
7904 | |||
7905 | /* Only for PF VSI for now */ | ||
7906 | if (vsi->seid != pf->vsi[pf->lan_vsi]->seid) | ||
7907 | return -EOPNOTSUPP; | ||
7908 | |||
7909 | /* Find the HW bridge for the PF VSI */ | ||
7910 | for (i = 0; i < I40E_MAX_VEB && !veb; i++) { | ||
7911 | if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid) | ||
7912 | veb = pf->veb[i]; | ||
7913 | } | ||
7914 | |||
7915 | if (!veb) | ||
7916 | return 0; | ||
7917 | |||
7918 | return ndo_dflt_bridge_getlink(skb, pid, seq, dev, veb->bridge_mode); | ||
7919 | } | ||
7920 | #endif /* HAVE_BRIDGE_ATTRIBS */ | ||
7921 | |||
7922 | const struct net_device_ops i40e_netdev_ops = { | ||
7657 | .ndo_open = i40e_open, | 7923 | .ndo_open = i40e_open, |
7658 | .ndo_stop = i40e_close, | 7924 | .ndo_stop = i40e_close, |
7659 | .ndo_start_xmit = i40e_lan_xmit_frame, | 7925 | .ndo_start_xmit = i40e_lan_xmit_frame, |
@@ -7687,6 +7953,10 @@ static const struct net_device_ops i40e_netdev_ops = { | |||
7687 | #endif | 7953 | #endif |
7688 | .ndo_get_phys_port_id = i40e_get_phys_port_id, | 7954 | .ndo_get_phys_port_id = i40e_get_phys_port_id, |
7689 | .ndo_fdb_add = i40e_ndo_fdb_add, | 7955 | .ndo_fdb_add = i40e_ndo_fdb_add, |
7956 | #ifdef HAVE_BRIDGE_ATTRIBS | ||
7957 | .ndo_bridge_getlink = i40e_ndo_bridge_getlink, | ||
7958 | .ndo_bridge_setlink = i40e_ndo_bridge_setlink, | ||
7959 | #endif /* HAVE_BRIDGE_ATTRIBS */ | ||
7690 | }; | 7960 | }; |
7691 | 7961 | ||
7692 | /** | 7962 | /** |
@@ -7799,6 +8069,30 @@ static void i40e_vsi_delete(struct i40e_vsi *vsi) | |||
7799 | } | 8069 | } |
7800 | 8070 | ||
7801 | /** | 8071 | /** |
8072 | * i40e_is_vsi_uplink_mode_veb - Check if the VSI's uplink bridge mode is VEB | ||
8073 | * @vsi: the VSI being queried | ||
8074 | * | ||
8075 | * Returns 1 if HW bridge mode is VEB and return 0 in case of VEPA mode | ||
8076 | **/ | ||
8077 | int i40e_is_vsi_uplink_mode_veb(struct i40e_vsi *vsi) | ||
8078 | { | ||
8079 | struct i40e_veb *veb; | ||
8080 | struct i40e_pf *pf = vsi->back; | ||
8081 | |||
8082 | /* Uplink is not a bridge so default to VEB */ | ||
8083 | if (vsi->veb_idx == I40E_NO_VEB) | ||
8084 | return 1; | ||
8085 | |||
8086 | veb = pf->veb[vsi->veb_idx]; | ||
8087 | /* Uplink is a bridge in VEPA mode */ | ||
8088 | if (veb && (veb->bridge_mode & BRIDGE_MODE_VEPA)) | ||
8089 | return 0; | ||
8090 | |||
8091 | /* Uplink is a bridge in VEB mode */ | ||
8092 | return 1; | ||
8093 | } | ||
8094 | |||
8095 | /** | ||
7802 | * i40e_add_vsi - Add a VSI to the switch | 8096 | * i40e_add_vsi - Add a VSI to the switch |
7803 | * @vsi: the VSI being configured | 8097 | * @vsi: the VSI being configured |
7804 | * | 8098 | * |
@@ -7883,12 +8177,14 @@ static int i40e_add_vsi(struct i40e_vsi *vsi) | |||
7883 | ctxt.pf_num = hw->pf_id; | 8177 | ctxt.pf_num = hw->pf_id; |
7884 | ctxt.vf_num = 0; | 8178 | ctxt.vf_num = 0; |
7885 | ctxt.uplink_seid = vsi->uplink_seid; | 8179 | ctxt.uplink_seid = vsi->uplink_seid; |
7886 | ctxt.connection_type = 0x1; /* regular data port */ | 8180 | ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL; |
7887 | ctxt.flags = I40E_AQ_VSI_TYPE_PF; | 8181 | ctxt.flags = I40E_AQ_VSI_TYPE_PF; |
7888 | ctxt.info.valid_sections |= | 8182 | if (i40e_is_vsi_uplink_mode_veb(vsi)) { |
8183 | ctxt.info.valid_sections |= | ||
7889 | cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID); | 8184 | cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID); |
7890 | ctxt.info.switch_id = | 8185 | ctxt.info.switch_id = |
7891 | cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB); | 8186 | cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB); |
8187 | } | ||
7892 | i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true); | 8188 | i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true); |
7893 | break; | 8189 | break; |
7894 | 8190 | ||
@@ -7896,16 +8192,18 @@ static int i40e_add_vsi(struct i40e_vsi *vsi) | |||
7896 | ctxt.pf_num = hw->pf_id; | 8192 | ctxt.pf_num = hw->pf_id; |
7897 | ctxt.vf_num = 0; | 8193 | ctxt.vf_num = 0; |
7898 | ctxt.uplink_seid = vsi->uplink_seid; | 8194 | ctxt.uplink_seid = vsi->uplink_seid; |
7899 | ctxt.connection_type = 0x1; /* regular data port */ | 8195 | ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL; |
7900 | ctxt.flags = I40E_AQ_VSI_TYPE_VMDQ2; | 8196 | ctxt.flags = I40E_AQ_VSI_TYPE_VMDQ2; |
7901 | 8197 | ||
7902 | ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID); | ||
7903 | |||
7904 | /* This VSI is connected to VEB so the switch_id | 8198 | /* This VSI is connected to VEB so the switch_id |
7905 | * should be set to zero by default. | 8199 | * should be set to zero by default. |
7906 | */ | 8200 | */ |
7907 | ctxt.info.switch_id = 0; | 8201 | if (i40e_is_vsi_uplink_mode_veb(vsi)) { |
7908 | ctxt.info.switch_id |= cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB); | 8202 | ctxt.info.valid_sections |= |
8203 | cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID); | ||
8204 | ctxt.info.switch_id = | ||
8205 | cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB); | ||
8206 | } | ||
7909 | 8207 | ||
7910 | /* Setup the VSI tx/rx queue map for TC0 only for now */ | 8208 | /* Setup the VSI tx/rx queue map for TC0 only for now */ |
7911 | i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true); | 8209 | i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true); |
@@ -7915,15 +8213,18 @@ static int i40e_add_vsi(struct i40e_vsi *vsi) | |||
7915 | ctxt.pf_num = hw->pf_id; | 8213 | ctxt.pf_num = hw->pf_id; |
7916 | ctxt.vf_num = vsi->vf_id + hw->func_caps.vf_base_id; | 8214 | ctxt.vf_num = vsi->vf_id + hw->func_caps.vf_base_id; |
7917 | ctxt.uplink_seid = vsi->uplink_seid; | 8215 | ctxt.uplink_seid = vsi->uplink_seid; |
7918 | ctxt.connection_type = 0x1; /* regular data port */ | 8216 | ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL; |
7919 | ctxt.flags = I40E_AQ_VSI_TYPE_VF; | 8217 | ctxt.flags = I40E_AQ_VSI_TYPE_VF; |
7920 | 8218 | ||
7921 | ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID); | ||
7922 | |||
7923 | /* This VSI is connected to VEB so the switch_id | 8219 | /* This VSI is connected to VEB so the switch_id |
7924 | * should be set to zero by default. | 8220 | * should be set to zero by default. |
7925 | */ | 8221 | */ |
7926 | ctxt.info.switch_id = cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB); | 8222 | if (i40e_is_vsi_uplink_mode_veb(vsi)) { |
8223 | ctxt.info.valid_sections |= | ||
8224 | cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID); | ||
8225 | ctxt.info.switch_id = | ||
8226 | cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB); | ||
8227 | } | ||
7927 | 8228 | ||
7928 | ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID); | 8229 | ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID); |
7929 | ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL; | 8230 | ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL; |
@@ -8281,7 +8582,7 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type, | |||
8281 | __func__); | 8582 | __func__); |
8282 | return NULL; | 8583 | return NULL; |
8283 | } | 8584 | } |
8284 | i40e_enable_pf_switch_lb(pf); | 8585 | i40e_config_bridge_mode(veb); |
8285 | } | 8586 | } |
8286 | for (i = 0; i < I40E_MAX_VEB && !veb; i++) { | 8587 | for (i = 0; i < I40E_MAX_VEB && !veb; i++) { |
8287 | if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid) | 8588 | if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid) |
@@ -8930,7 +9231,7 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit) | |||
8930 | i40e_config_rss(pf); | 9231 | i40e_config_rss(pf); |
8931 | 9232 | ||
8932 | /* fill in link information and enable LSE reporting */ | 9233 | /* fill in link information and enable LSE reporting */ |
8933 | i40e_update_link_info(&pf->hw, true); | 9234 | i40e_aq_get_link_info(&pf->hw, true, NULL, NULL); |
8934 | i40e_link_event(pf); | 9235 | i40e_link_event(pf); |
8935 | 9236 | ||
8936 | /* Initialize user-specific link properties */ | 9237 | /* Initialize user-specific link properties */ |
@@ -8938,7 +9239,7 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit) | |||
8938 | I40E_AQ_AN_COMPLETED) ? true : false); | 9239 | I40E_AQ_AN_COMPLETED) ? true : false); |
8939 | 9240 | ||
8940 | /* fill in link information and enable LSE reporting */ | 9241 | /* fill in link information and enable LSE reporting */ |
8941 | i40e_update_link_info(&pf->hw, true); | 9242 | i40e_aq_get_link_info(&pf->hw, true, NULL, NULL); |
8942 | i40e_link_event(pf); | 9243 | i40e_link_event(pf); |
8943 | 9244 | ||
8944 | /* Initialize user-specific link properties */ | 9245 | /* Initialize user-specific link properties */ |
@@ -9008,7 +9309,11 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf) | |||
9008 | pf->flags &= ~I40E_FLAG_DCB_CAPABLE; | 9309 | pf->flags &= ~I40E_FLAG_DCB_CAPABLE; |
9009 | dev_info(&pf->pdev->dev, "not enough queues for DCB. DCB is disabled.\n"); | 9310 | dev_info(&pf->pdev->dev, "not enough queues for DCB. DCB is disabled.\n"); |
9010 | } | 9311 | } |
9011 | pf->num_lan_qps = pf->rss_size_max; | 9312 | pf->num_lan_qps = max_t(int, pf->rss_size_max, |
9313 | num_online_cpus()); | ||
9314 | pf->num_lan_qps = min_t(int, pf->num_lan_qps, | ||
9315 | pf->hw.func_caps.num_tx_qp); | ||
9316 | |||
9012 | queues_left -= pf->num_lan_qps; | 9317 | queues_left -= pf->num_lan_qps; |
9013 | } | 9318 | } |
9014 | 9319 | ||
@@ -9106,8 +9411,10 @@ static void i40e_print_features(struct i40e_pf *pf) | |||
9106 | #ifdef CONFIG_PCI_IOV | 9411 | #ifdef CONFIG_PCI_IOV |
9107 | buf += sprintf(buf, "VFs: %d ", pf->num_req_vfs); | 9412 | buf += sprintf(buf, "VFs: %d ", pf->num_req_vfs); |
9108 | #endif | 9413 | #endif |
9109 | buf += sprintf(buf, "VSIs: %d QP: %d ", pf->hw.func_caps.num_vsis, | 9414 | buf += sprintf(buf, "VSIs: %d QP: %d RX: %s ", |
9110 | pf->vsi[pf->lan_vsi]->num_queue_pairs); | 9415 | pf->hw.func_caps.num_vsis, |
9416 | pf->vsi[pf->lan_vsi]->num_queue_pairs, | ||
9417 | pf->flags & I40E_FLAG_RX_PS_ENABLED ? "PS" : "1BUF"); | ||
9111 | 9418 | ||
9112 | if (pf->flags & I40E_FLAG_RSS_ENABLED) | 9419 | if (pf->flags & I40E_FLAG_RSS_ENABLED) |
9113 | buf += sprintf(buf, "RSS "); | 9420 | buf += sprintf(buf, "RSS "); |
@@ -9144,6 +9451,7 @@ static void i40e_print_features(struct i40e_pf *pf) | |||
9144 | **/ | 9451 | **/ |
9145 | static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | 9452 | static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) |
9146 | { | 9453 | { |
9454 | struct i40e_aq_get_phy_abilities_resp abilities; | ||
9147 | struct i40e_pf *pf; | 9455 | struct i40e_pf *pf; |
9148 | struct i40e_hw *hw; | 9456 | struct i40e_hw *hw; |
9149 | static u16 pfs_found; | 9457 | static u16 pfs_found; |
@@ -9409,13 +9717,14 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
9409 | if (err) | 9717 | if (err) |
9410 | dev_info(&pf->pdev->dev, "set phy mask fail, aq_err %d\n", err); | 9718 | dev_info(&pf->pdev->dev, "set phy mask fail, aq_err %d\n", err); |
9411 | 9719 | ||
9412 | msleep(75); | 9720 | if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) || |
9413 | err = i40e_aq_set_link_restart_an(&pf->hw, true, NULL); | 9721 | (pf->hw.aq.fw_maj_ver < 4)) { |
9414 | if (err) { | 9722 | msleep(75); |
9415 | dev_info(&pf->pdev->dev, "link restart failed, aq_err=%d\n", | 9723 | err = i40e_aq_set_link_restart_an(&pf->hw, true, NULL); |
9416 | pf->hw.aq.asq_last_status); | 9724 | if (err) |
9725 | dev_info(&pf->pdev->dev, "link restart failed, aq_err=%d\n", | ||
9726 | pf->hw.aq.asq_last_status); | ||
9417 | } | 9727 | } |
9418 | |||
9419 | /* The main driver is (mostly) up and happy. We need to set this state | 9728 | /* The main driver is (mostly) up and happy. We need to set this state |
9420 | * before setting up the misc vector or we get a race and the vector | 9729 | * before setting up the misc vector or we get a race and the vector |
9421 | * ends up disabled forever. | 9730 | * ends up disabled forever. |
@@ -9499,6 +9808,13 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
9499 | dev_warn(&pdev->dev, "Please move the device to a different PCI-e link with more lanes and/or higher transfer rate.\n"); | 9808 | dev_warn(&pdev->dev, "Please move the device to a different PCI-e link with more lanes and/or higher transfer rate.\n"); |
9500 | } | 9809 | } |
9501 | 9810 | ||
9811 | /* get the requested speeds from the fw */ | ||
9812 | err = i40e_aq_get_phy_capabilities(hw, false, false, &abilities, NULL); | ||
9813 | if (err) | ||
9814 | dev_info(&pf->pdev->dev, "get phy abilities failed, aq_err %d, advertised speed settings may not be correct\n", | ||
9815 | err); | ||
9816 | pf->hw.phy.link_info.requested_speeds = abilities.link_speed; | ||
9817 | |||
9502 | /* print a string summarizing features */ | 9818 | /* print a string summarizing features */ |
9503 | i40e_print_features(pf); | 9819 | i40e_print_features(pf); |
9504 | 9820 | ||
@@ -9844,6 +10160,10 @@ static int __init i40e_init_module(void) | |||
9844 | pr_info("%s: %s - version %s\n", i40e_driver_name, | 10160 | pr_info("%s: %s - version %s\n", i40e_driver_name, |
9845 | i40e_driver_string, i40e_driver_version_str); | 10161 | i40e_driver_string, i40e_driver_version_str); |
9846 | pr_info("%s: %s\n", i40e_driver_name, i40e_copyright); | 10162 | pr_info("%s: %s\n", i40e_driver_name, i40e_copyright); |
10163 | |||
10164 | #if IS_ENABLED(CONFIG_I40E_CONFIGFS_FS) | ||
10165 | i40e_configfs_init(); | ||
10166 | #endif /* CONFIG_I40E_CONFIGFS_FS */ | ||
9847 | i40e_dbg_init(); | 10167 | i40e_dbg_init(); |
9848 | return pci_register_driver(&i40e_driver); | 10168 | return pci_register_driver(&i40e_driver); |
9849 | } | 10169 | } |
@@ -9859,5 +10179,8 @@ static void __exit i40e_exit_module(void) | |||
9859 | { | 10179 | { |
9860 | pci_unregister_driver(&i40e_driver); | 10180 | pci_unregister_driver(&i40e_driver); |
9861 | i40e_dbg_exit(); | 10181 | i40e_dbg_exit(); |
10182 | #if IS_ENABLED(CONFIG_I40E_CONFIGFS_FS) | ||
10183 | i40e_configfs_exit(); | ||
10184 | #endif /* CONFIG_I40E_CONFIGFS_FS */ | ||
9862 | } | 10185 | } |
9863 | module_exit(i40e_exit_module); | 10186 | module_exit(i40e_exit_module); |
diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c index 5defe0d63514..039018abad4a 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c +++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c | |||
@@ -164,15 +164,15 @@ static i40e_status i40e_poll_sr_srctl_done_bit(struct i40e_hw *hw) | |||
164 | } | 164 | } |
165 | 165 | ||
166 | /** | 166 | /** |
167 | * i40e_read_nvm_word - Reads Shadow RAM | 167 | * i40e_read_nvm_word_srctl - Reads Shadow RAM via SRCTL register |
168 | * @hw: pointer to the HW structure | 168 | * @hw: pointer to the HW structure |
169 | * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF) | 169 | * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF) |
170 | * @data: word read from the Shadow RAM | 170 | * @data: word read from the Shadow RAM |
171 | * | 171 | * |
172 | * Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register. | 172 | * Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register. |
173 | **/ | 173 | **/ |
174 | i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset, | 174 | i40e_status i40e_read_nvm_word_srctl(struct i40e_hw *hw, u16 offset, |
175 | u16 *data) | 175 | u16 *data) |
176 | { | 176 | { |
177 | i40e_status ret_code = I40E_ERR_TIMEOUT; | 177 | i40e_status ret_code = I40E_ERR_TIMEOUT; |
178 | u32 sr_reg; | 178 | u32 sr_reg; |
@@ -200,6 +200,7 @@ i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset, | |||
200 | *data = (u16)((sr_reg & | 200 | *data = (u16)((sr_reg & |
201 | I40E_GLNVM_SRDATA_RDDATA_MASK) | 201 | I40E_GLNVM_SRDATA_RDDATA_MASK) |
202 | >> I40E_GLNVM_SRDATA_RDDATA_SHIFT); | 202 | >> I40E_GLNVM_SRDATA_RDDATA_SHIFT); |
203 | *data = le16_to_cpu(*data); | ||
203 | } | 204 | } |
204 | } | 205 | } |
205 | if (ret_code) | 206 | if (ret_code) |
@@ -212,7 +213,21 @@ read_nvm_exit: | |||
212 | } | 213 | } |
213 | 214 | ||
214 | /** | 215 | /** |
215 | * i40e_read_nvm_buffer - Reads Shadow RAM buffer | 216 | * i40e_read_nvm_word - Reads Shadow RAM |
217 | * @hw: pointer to the HW structure | ||
218 | * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF) | ||
219 | * @data: word read from the Shadow RAM | ||
220 | * | ||
221 | * Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register. | ||
222 | **/ | ||
223 | i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset, | ||
224 | u16 *data) | ||
225 | { | ||
226 | return i40e_read_nvm_word_srctl(hw, offset, data); | ||
227 | } | ||
228 | |||
229 | /** | ||
230 | * i40e_read_nvm_buffer_srctl - Reads Shadow RAM buffer via SRCTL register | ||
216 | * @hw: pointer to the HW structure | 231 | * @hw: pointer to the HW structure |
217 | * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF). | 232 | * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF). |
218 | * @words: (in) number of words to read; (out) number of words actually read | 233 | * @words: (in) number of words to read; (out) number of words actually read |
@@ -222,8 +237,8 @@ read_nvm_exit: | |||
222 | * method. The buffer read is preceded by the NVM ownership take | 237 | * method. The buffer read is preceded by the NVM ownership take |
223 | * and followed by the release. | 238 | * and followed by the release. |
224 | **/ | 239 | **/ |
225 | i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset, | 240 | i40e_status i40e_read_nvm_buffer_srctl(struct i40e_hw *hw, u16 offset, |
226 | u16 *words, u16 *data) | 241 | u16 *words, u16 *data) |
227 | { | 242 | { |
228 | i40e_status ret_code = 0; | 243 | i40e_status ret_code = 0; |
229 | u16 index, word; | 244 | u16 index, word; |
@@ -231,7 +246,7 @@ i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset, | |||
231 | /* Loop thru the selected region */ | 246 | /* Loop thru the selected region */ |
232 | for (word = 0; word < *words; word++) { | 247 | for (word = 0; word < *words; word++) { |
233 | index = offset + word; | 248 | index = offset + word; |
234 | ret_code = i40e_read_nvm_word(hw, index, &data[word]); | 249 | ret_code = i40e_read_nvm_word_srctl(hw, index, &data[word]); |
235 | if (ret_code) | 250 | if (ret_code) |
236 | break; | 251 | break; |
237 | } | 252 | } |
@@ -243,6 +258,23 @@ i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset, | |||
243 | } | 258 | } |
244 | 259 | ||
245 | /** | 260 | /** |
261 | * i40e_read_nvm_buffer - Reads Shadow RAM buffer | ||
262 | * @hw: pointer to the HW structure | ||
263 | * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF). | ||
264 | * @words: (in) number of words to read; (out) number of words actually read | ||
265 | * @data: words read from the Shadow RAM | ||
266 | * | ||
267 | * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd() | ||
268 | * method. The buffer read is preceded by the NVM ownership take | ||
269 | * and followed by the release. | ||
270 | **/ | ||
271 | i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset, | ||
272 | u16 *words, u16 *data) | ||
273 | { | ||
274 | return i40e_read_nvm_buffer_srctl(hw, offset, words, data); | ||
275 | } | ||
276 | |||
277 | /** | ||
246 | * i40e_write_nvm_aq - Writes Shadow RAM. | 278 | * i40e_write_nvm_aq - Writes Shadow RAM. |
247 | * @hw: pointer to the HW structure. | 279 | * @hw: pointer to the HW structure. |
248 | * @module_pointer: module pointer location in words from the NVM beginning | 280 | * @module_pointer: module pointer location in words from the NVM beginning |
@@ -302,11 +334,18 @@ static i40e_status i40e_calc_nvm_checksum(struct i40e_hw *hw, | |||
302 | u16 *checksum) | 334 | u16 *checksum) |
303 | { | 335 | { |
304 | i40e_status ret_code = 0; | 336 | i40e_status ret_code = 0; |
337 | struct i40e_virt_mem vmem; | ||
305 | u16 pcie_alt_module = 0; | 338 | u16 pcie_alt_module = 0; |
306 | u16 checksum_local = 0; | 339 | u16 checksum_local = 0; |
307 | u16 vpd_module = 0; | 340 | u16 vpd_module = 0; |
308 | u16 word = 0; | 341 | u16 *data; |
309 | u32 i = 0; | 342 | u16 i = 0; |
343 | |||
344 | ret_code = i40e_allocate_virt_mem(hw, &vmem, | ||
345 | I40E_SR_SECTOR_SIZE_IN_WORDS * sizeof(u16)); | ||
346 | if (ret_code) | ||
347 | goto i40e_calc_nvm_checksum_exit; | ||
348 | data = (u16 *)vmem.va; | ||
310 | 349 | ||
311 | /* read pointer to VPD area */ | 350 | /* read pointer to VPD area */ |
312 | ret_code = i40e_read_nvm_word(hw, I40E_SR_VPD_PTR, &vpd_module); | 351 | ret_code = i40e_read_nvm_word(hw, I40E_SR_VPD_PTR, &vpd_module); |
@@ -317,7 +356,7 @@ static i40e_status i40e_calc_nvm_checksum(struct i40e_hw *hw, | |||
317 | 356 | ||
318 | /* read pointer to PCIe Alt Auto-load module */ | 357 | /* read pointer to PCIe Alt Auto-load module */ |
319 | ret_code = i40e_read_nvm_word(hw, I40E_SR_PCIE_ALT_AUTO_LOAD_PTR, | 358 | ret_code = i40e_read_nvm_word(hw, I40E_SR_PCIE_ALT_AUTO_LOAD_PTR, |
320 | &pcie_alt_module); | 359 | &pcie_alt_module); |
321 | if (ret_code) { | 360 | if (ret_code) { |
322 | ret_code = I40E_ERR_NVM_CHECKSUM; | 361 | ret_code = I40E_ERR_NVM_CHECKSUM; |
323 | goto i40e_calc_nvm_checksum_exit; | 362 | goto i40e_calc_nvm_checksum_exit; |
@@ -327,33 +366,40 @@ static i40e_status i40e_calc_nvm_checksum(struct i40e_hw *hw, | |||
327 | * except the VPD and PCIe ALT Auto-load modules | 366 | * except the VPD and PCIe ALT Auto-load modules |
328 | */ | 367 | */ |
329 | for (i = 0; i < hw->nvm.sr_size; i++) { | 368 | for (i = 0; i < hw->nvm.sr_size; i++) { |
369 | /* Read SR page */ | ||
370 | if ((i % I40E_SR_SECTOR_SIZE_IN_WORDS) == 0) { | ||
371 | u16 words = I40E_SR_SECTOR_SIZE_IN_WORDS; | ||
372 | |||
373 | ret_code = i40e_read_nvm_buffer(hw, i, &words, data); | ||
374 | if (ret_code) { | ||
375 | ret_code = I40E_ERR_NVM_CHECKSUM; | ||
376 | goto i40e_calc_nvm_checksum_exit; | ||
377 | } | ||
378 | } | ||
379 | |||
330 | /* Skip Checksum word */ | 380 | /* Skip Checksum word */ |
331 | if (i == I40E_SR_SW_CHECKSUM_WORD) | 381 | if (i == I40E_SR_SW_CHECKSUM_WORD) |
332 | i++; | 382 | continue; |
333 | /* Skip VPD module (convert byte size to word count) */ | 383 | /* Skip VPD module (convert byte size to word count) */ |
334 | if (i == (u32)vpd_module) { | 384 | if ((i >= (u32)vpd_module) && |
335 | i += (I40E_SR_VPD_MODULE_MAX_SIZE / 2); | 385 | (i < ((u32)vpd_module + |
336 | if (i >= hw->nvm.sr_size) | 386 | (I40E_SR_VPD_MODULE_MAX_SIZE / 2)))) { |
337 | break; | 387 | continue; |
338 | } | 388 | } |
339 | /* Skip PCIe ALT module (convert byte size to word count) */ | 389 | /* Skip PCIe ALT module (convert byte size to word count) */ |
340 | if (i == (u32)pcie_alt_module) { | 390 | if ((i >= (u32)pcie_alt_module) && |
341 | i += (I40E_SR_PCIE_ALT_MODULE_MAX_SIZE / 2); | 391 | (i < ((u32)pcie_alt_module + |
342 | if (i >= hw->nvm.sr_size) | 392 | (I40E_SR_PCIE_ALT_MODULE_MAX_SIZE / 2)))) { |
343 | break; | 393 | continue; |
344 | } | 394 | } |
345 | 395 | ||
346 | ret_code = i40e_read_nvm_word(hw, (u16)i, &word); | 396 | checksum_local += data[i % I40E_SR_SECTOR_SIZE_IN_WORDS]; |
347 | if (ret_code) { | ||
348 | ret_code = I40E_ERR_NVM_CHECKSUM; | ||
349 | goto i40e_calc_nvm_checksum_exit; | ||
350 | } | ||
351 | checksum_local += word; | ||
352 | } | 397 | } |
353 | 398 | ||
354 | *checksum = (u16)I40E_SR_SW_CHECKSUM_BASE - checksum_local; | 399 | *checksum = (u16)I40E_SR_SW_CHECKSUM_BASE - checksum_local; |
355 | 400 | ||
356 | i40e_calc_nvm_checksum_exit: | 401 | i40e_calc_nvm_checksum_exit: |
402 | i40e_free_virt_mem(hw, &vmem); | ||
357 | return ret_code; | 403 | return ret_code; |
358 | } | 404 | } |
359 | 405 | ||
diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h index 68e852a96680..fea0d37ecc72 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h +++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************************* | 1 | /******************************************************************************* |
2 | * | 2 | * |
3 | * Intel Ethernet Controller XL710 Family Linux Driver | 3 | * Intel Ethernet Controller XL710 Family Linux Driver |
4 | * Copyright(c) 2013 - 2014 Intel Corporation. | 4 | * Copyright(c) 2013 - 2015 Intel Corporation. |
5 | * | 5 | * |
6 | * This program is free software; you can redistribute it and/or modify it | 6 | * This program is free software; you can redistribute it and/or modify it |
7 | * under the terms and conditions of the GNU General Public License, | 7 | * under the terms and conditions of the GNU General Public License, |
@@ -66,6 +66,7 @@ void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink); | |||
66 | 66 | ||
67 | i40e_status i40e_aq_get_firmware_version(struct i40e_hw *hw, | 67 | i40e_status i40e_aq_get_firmware_version(struct i40e_hw *hw, |
68 | u16 *fw_major_version, u16 *fw_minor_version, | 68 | u16 *fw_major_version, u16 *fw_minor_version, |
69 | u32 *fw_build, | ||
69 | u16 *api_major_version, u16 *api_minor_version, | 70 | u16 *api_major_version, u16 *api_minor_version, |
70 | struct i40e_asq_cmd_details *cmd_details); | 71 | struct i40e_asq_cmd_details *cmd_details); |
71 | i40e_status i40e_aq_debug_write_register(struct i40e_hw *hw, | 72 | i40e_status i40e_aq_debug_write_register(struct i40e_hw *hw, |
@@ -97,7 +98,6 @@ i40e_status i40e_aq_set_link_restart_an(struct i40e_hw *hw, | |||
97 | i40e_status i40e_aq_get_link_info(struct i40e_hw *hw, | 98 | i40e_status i40e_aq_get_link_info(struct i40e_hw *hw, |
98 | bool enable_lse, struct i40e_link_status *link, | 99 | bool enable_lse, struct i40e_link_status *link, |
99 | struct i40e_asq_cmd_details *cmd_details); | 100 | struct i40e_asq_cmd_details *cmd_details); |
100 | i40e_status i40e_update_link_info(struct i40e_hw *hw, bool enable_lse); | ||
101 | i40e_status i40e_aq_set_local_advt_reg(struct i40e_hw *hw, | 101 | i40e_status i40e_aq_set_local_advt_reg(struct i40e_hw *hw, |
102 | u64 advt_reg, | 102 | u64 advt_reg, |
103 | struct i40e_asq_cmd_details *cmd_details); | 103 | struct i40e_asq_cmd_details *cmd_details); |
@@ -247,6 +247,12 @@ void i40e_clear_hw(struct i40e_hw *hw); | |||
247 | void i40e_clear_pxe_mode(struct i40e_hw *hw); | 247 | void i40e_clear_pxe_mode(struct i40e_hw *hw); |
248 | bool i40e_get_link_status(struct i40e_hw *hw); | 248 | bool i40e_get_link_status(struct i40e_hw *hw); |
249 | i40e_status i40e_get_mac_addr(struct i40e_hw *hw, u8 *mac_addr); | 249 | i40e_status i40e_get_mac_addr(struct i40e_hw *hw, u8 *mac_addr); |
250 | i40e_status i40e_read_bw_from_alt_ram(struct i40e_hw *hw, | ||
251 | u32 *max_bw, u32 *min_bw, bool *min_valid, | ||
252 | bool *max_valid); | ||
253 | i40e_status i40e_aq_configure_partition_bw(struct i40e_hw *hw, | ||
254 | struct i40e_aqc_configure_partition_bw_data *bw_data, | ||
255 | struct i40e_asq_cmd_details *cmd_details); | ||
250 | i40e_status i40e_get_port_mac_addr(struct i40e_hw *hw, u8 *mac_addr); | 256 | i40e_status i40e_get_port_mac_addr(struct i40e_hw *hw, u8 *mac_addr); |
251 | i40e_status i40e_read_pba_string(struct i40e_hw *hw, u8 *pba_num, | 257 | i40e_status i40e_read_pba_string(struct i40e_hw *hw, u8 *pba_num, |
252 | u32 pba_num_size); | 258 | u32 pba_num_size); |
@@ -260,8 +266,6 @@ i40e_status i40e_init_nvm(struct i40e_hw *hw); | |||
260 | i40e_status i40e_acquire_nvm(struct i40e_hw *hw, | 266 | i40e_status i40e_acquire_nvm(struct i40e_hw *hw, |
261 | enum i40e_aq_resource_access_type access); | 267 | enum i40e_aq_resource_access_type access); |
262 | void i40e_release_nvm(struct i40e_hw *hw); | 268 | void i40e_release_nvm(struct i40e_hw *hw); |
263 | i40e_status i40e_read_nvm_srrd(struct i40e_hw *hw, u16 offset, | ||
264 | u16 *data); | ||
265 | i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset, | 269 | i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset, |
266 | u16 *data); | 270 | u16 *data); |
267 | i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset, | 271 | i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset, |
diff --git a/drivers/net/ethernet/intel/i40e/i40e_register.h b/drivers/net/ethernet/intel/i40e/i40e_register.h index 65d3c8bb2d5b..522d6df51330 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_register.h +++ b/drivers/net/ethernet/intel/i40e/i40e_register.h | |||
@@ -310,6 +310,10 @@ | |||
310 | #define I40E_PRTDCB_RUP2TC_UP6TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP6TC_SHIFT) | 310 | #define I40E_PRTDCB_RUP2TC_UP6TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP6TC_SHIFT) |
311 | #define I40E_PRTDCB_RUP2TC_UP7TC_SHIFT 21 | 311 | #define I40E_PRTDCB_RUP2TC_UP7TC_SHIFT 21 |
312 | #define I40E_PRTDCB_RUP2TC_UP7TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP7TC_SHIFT) | 312 | #define I40E_PRTDCB_RUP2TC_UP7TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP7TC_SHIFT) |
313 | #define I40E_PRTDCB_RUPTQ(_i) (0x00122400 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */ | ||
314 | #define I40E_PRTDCB_RUPTQ_MAX_INDEX 7 | ||
315 | #define I40E_PRTDCB_RUPTQ_RXQNUM_SHIFT 0 | ||
316 | #define I40E_PRTDCB_RUPTQ_RXQNUM_MASK I40E_MASK(0x3FFF, I40E_PRTDCB_RUPTQ_RXQNUM_SHIFT) | ||
313 | #define I40E_PRTDCB_TC2PFC 0x001C0980 /* Reset: CORER */ | 317 | #define I40E_PRTDCB_TC2PFC 0x001C0980 /* Reset: CORER */ |
314 | #define I40E_PRTDCB_TC2PFC_TC2PFC_SHIFT 0 | 318 | #define I40E_PRTDCB_TC2PFC_TC2PFC_SHIFT 0 |
315 | #define I40E_PRTDCB_TC2PFC_TC2PFC_MASK I40E_MASK(0xFF, I40E_PRTDCB_TC2PFC_TC2PFC_SHIFT) | 319 | #define I40E_PRTDCB_TC2PFC_TC2PFC_MASK I40E_MASK(0xFF, I40E_PRTDCB_TC2PFC_TC2PFC_SHIFT) |
@@ -421,6 +425,8 @@ | |||
421 | #define I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_SHIFT) | 425 | #define I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_SHIFT) |
422 | #define I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_SHIFT 20 | 426 | #define I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_SHIFT 20 |
423 | #define I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_MASK I40E_MASK(0x3F, I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_SHIFT) | 427 | #define I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_MASK I40E_MASK(0x3F, I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_SHIFT) |
428 | #define I40E_GLGEN_GPIO_CTL_PRT_BIT_MAP_SHIFT 26 | ||
429 | #define I40E_GLGEN_GPIO_CTL_PRT_BIT_MAP_MASK I40E_MASK(0xF, I40E_GLGEN_GPIO_CTL_PRT_BIT_MAP_SHIFT) | ||
424 | #define I40E_GLGEN_GPIO_SET 0x00088184 /* Reset: POR */ | 430 | #define I40E_GLGEN_GPIO_SET 0x00088184 /* Reset: POR */ |
425 | #define I40E_GLGEN_GPIO_SET_GPIO_INDX_SHIFT 0 | 431 | #define I40E_GLGEN_GPIO_SET_GPIO_INDX_SHIFT 0 |
426 | #define I40E_GLGEN_GPIO_SET_GPIO_INDX_MASK I40E_MASK(0x1F, I40E_GLGEN_GPIO_SET_GPIO_INDX_SHIFT) | 432 | #define I40E_GLGEN_GPIO_SET_GPIO_INDX_MASK I40E_MASK(0x1F, I40E_GLGEN_GPIO_SET_GPIO_INDX_SHIFT) |
@@ -484,7 +490,9 @@ | |||
484 | #define I40E_GLGEN_MDIO_CTRL_CONTMDC_SHIFT 17 | 490 | #define I40E_GLGEN_MDIO_CTRL_CONTMDC_SHIFT 17 |
485 | #define I40E_GLGEN_MDIO_CTRL_CONTMDC_MASK I40E_MASK(0x1, I40E_GLGEN_MDIO_CTRL_CONTMDC_SHIFT) | 491 | #define I40E_GLGEN_MDIO_CTRL_CONTMDC_MASK I40E_MASK(0x1, I40E_GLGEN_MDIO_CTRL_CONTMDC_SHIFT) |
486 | #define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_SHIFT 18 | 492 | #define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_SHIFT 18 |
487 | #define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_MASK I40E_MASK(0x3FFF, I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_SHIFT) | 493 | #define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_MASK I40E_MASK(0x7FF, I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_SHIFT) |
494 | #define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD0_SHIFT 29 | ||
495 | #define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD0_MASK I40E_MASK(0x7, I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD0_SHIFT) | ||
488 | #define I40E_GLGEN_MDIO_I2C_SEL(_i) (0x000881C0 + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */ | 496 | #define I40E_GLGEN_MDIO_I2C_SEL(_i) (0x000881C0 + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */ |
489 | #define I40E_GLGEN_MDIO_I2C_SEL_MAX_INDEX 3 | 497 | #define I40E_GLGEN_MDIO_I2C_SEL_MAX_INDEX 3 |
490 | #define I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_SHIFT 0 | 498 | #define I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_SHIFT 0 |
@@ -548,9 +556,6 @@ | |||
548 | #define I40E_GLGEN_RSTCTL_GRSTDEL_MASK I40E_MASK(0x3F, I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT) | 556 | #define I40E_GLGEN_RSTCTL_GRSTDEL_MASK I40E_MASK(0x3F, I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT) |
549 | #define I40E_GLGEN_RSTCTL_ECC_RST_ENA_SHIFT 8 | 557 | #define I40E_GLGEN_RSTCTL_ECC_RST_ENA_SHIFT 8 |
550 | #define I40E_GLGEN_RSTCTL_ECC_RST_ENA_MASK I40E_MASK(0x1, I40E_GLGEN_RSTCTL_ECC_RST_ENA_SHIFT) | 558 | #define I40E_GLGEN_RSTCTL_ECC_RST_ENA_MASK I40E_MASK(0x1, I40E_GLGEN_RSTCTL_ECC_RST_ENA_SHIFT) |
551 | #define I40E_GLGEN_RSTENA_EMP 0x000B818C /* Reset: POR */ | ||
552 | #define I40E_GLGEN_RSTENA_EMP_EMP_RST_ENA_SHIFT 0 | ||
553 | #define I40E_GLGEN_RSTENA_EMP_EMP_RST_ENA_MASK I40E_MASK(0x1, I40E_GLGEN_RSTENA_EMP_EMP_RST_ENA_SHIFT) | ||
554 | #define I40E_GLGEN_RTRIG 0x000B8190 /* Reset: CORER */ | 559 | #define I40E_GLGEN_RTRIG 0x000B8190 /* Reset: CORER */ |
555 | #define I40E_GLGEN_RTRIG_CORER_SHIFT 0 | 560 | #define I40E_GLGEN_RTRIG_CORER_SHIFT 0 |
556 | #define I40E_GLGEN_RTRIG_CORER_MASK I40E_MASK(0x1, I40E_GLGEN_RTRIG_CORER_SHIFT) | 561 | #define I40E_GLGEN_RTRIG_CORER_MASK I40E_MASK(0x1, I40E_GLGEN_RTRIG_CORER_SHIFT) |
@@ -1066,7 +1071,7 @@ | |||
1066 | #define I40E_PFINT_RATEN_INTERVAL_MASK I40E_MASK(0x3F, I40E_PFINT_RATEN_INTERVAL_SHIFT) | 1071 | #define I40E_PFINT_RATEN_INTERVAL_MASK I40E_MASK(0x3F, I40E_PFINT_RATEN_INTERVAL_SHIFT) |
1067 | #define I40E_PFINT_RATEN_INTRL_ENA_SHIFT 6 | 1072 | #define I40E_PFINT_RATEN_INTRL_ENA_SHIFT 6 |
1068 | #define I40E_PFINT_RATEN_INTRL_ENA_MASK I40E_MASK(0x1, I40E_PFINT_RATEN_INTRL_ENA_SHIFT) | 1073 | #define I40E_PFINT_RATEN_INTRL_ENA_MASK I40E_MASK(0x1, I40E_PFINT_RATEN_INTRL_ENA_SHIFT) |
1069 | #define I40E_PFINT_STAT_CTL0 0x00038400 /* Reset: PFR */ | 1074 | #define I40E_PFINT_STAT_CTL0 0x00038400 /* Reset: CORER */ |
1070 | #define I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT 2 | 1075 | #define I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT 2 |
1071 | #define I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT) | 1076 | #define I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT) |
1072 | #define I40E_QINT_RQCTL(_Q) (0x0003A000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: CORER */ | 1077 | #define I40E_QINT_RQCTL(_Q) (0x0003A000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: CORER */ |
@@ -1171,7 +1176,7 @@ | |||
1171 | #define I40E_VFINT_ITRN_MAX_INDEX 2 | 1176 | #define I40E_VFINT_ITRN_MAX_INDEX 2 |
1172 | #define I40E_VFINT_ITRN_INTERVAL_SHIFT 0 | 1177 | #define I40E_VFINT_ITRN_INTERVAL_SHIFT 0 |
1173 | #define I40E_VFINT_ITRN_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_ITRN_INTERVAL_SHIFT) | 1178 | #define I40E_VFINT_ITRN_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_ITRN_INTERVAL_SHIFT) |
1174 | #define I40E_VFINT_STAT_CTL0(_VF) (0x0002A000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ | 1179 | #define I40E_VFINT_STAT_CTL0(_VF) (0x0002A000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */ |
1175 | #define I40E_VFINT_STAT_CTL0_MAX_INDEX 127 | 1180 | #define I40E_VFINT_STAT_CTL0_MAX_INDEX 127 |
1176 | #define I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT 2 | 1181 | #define I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT 2 |
1177 | #define I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT) | 1182 | #define I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT) |
@@ -1803,9 +1808,6 @@ | |||
1803 | #define I40E_GLPCI_GSCN_0_3_MAX_INDEX 3 | 1808 | #define I40E_GLPCI_GSCN_0_3_MAX_INDEX 3 |
1804 | #define I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_SHIFT 0 | 1809 | #define I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_SHIFT 0 |
1805 | #define I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_SHIFT) | 1810 | #define I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_SHIFT) |
1806 | #define I40E_GLPCI_LATCT 0x0009C4B4 /* Reset: PCIR */ | ||
1807 | #define I40E_GLPCI_LATCT_PCI_COUNT_LAT_CT_SHIFT 0 | ||
1808 | #define I40E_GLPCI_LATCT_PCI_COUNT_LAT_CT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_LATCT_PCI_COUNT_LAT_CT_SHIFT) | ||
1809 | #define I40E_GLPCI_LBARCTRL 0x000BE484 /* Reset: POR */ | 1811 | #define I40E_GLPCI_LBARCTRL 0x000BE484 /* Reset: POR */ |
1810 | #define I40E_GLPCI_LBARCTRL_PREFBAR_SHIFT 0 | 1812 | #define I40E_GLPCI_LBARCTRL_PREFBAR_SHIFT 0 |
1811 | #define I40E_GLPCI_LBARCTRL_PREFBAR_MASK I40E_MASK(0x1, I40E_GLPCI_LBARCTRL_PREFBAR_SHIFT) | 1813 | #define I40E_GLPCI_LBARCTRL_PREFBAR_MASK I40E_MASK(0x1, I40E_GLPCI_LBARCTRL_PREFBAR_SHIFT) |
@@ -1902,6 +1904,11 @@ | |||
1902 | #define I40E_GLPCI_VFSUP_VF_PREFETCH_MASK I40E_MASK(0x1, I40E_GLPCI_VFSUP_VF_PREFETCH_SHIFT) | 1904 | #define I40E_GLPCI_VFSUP_VF_PREFETCH_MASK I40E_MASK(0x1, I40E_GLPCI_VFSUP_VF_PREFETCH_SHIFT) |
1903 | #define I40E_GLPCI_VFSUP_VR_BAR_TYPE_SHIFT 1 | 1905 | #define I40E_GLPCI_VFSUP_VR_BAR_TYPE_SHIFT 1 |
1904 | #define I40E_GLPCI_VFSUP_VR_BAR_TYPE_MASK I40E_MASK(0x1, I40E_GLPCI_VFSUP_VR_BAR_TYPE_SHIFT) | 1906 | #define I40E_GLPCI_VFSUP_VR_BAR_TYPE_MASK I40E_MASK(0x1, I40E_GLPCI_VFSUP_VR_BAR_TYPE_SHIFT) |
1907 | #define I40E_GLTPH_CTRL 0x000BE480 /* Reset: PCIR */ | ||
1908 | #define I40E_GLTPH_CTRL_DESC_PH_SHIFT 9 | ||
1909 | #define I40E_GLTPH_CTRL_DESC_PH_MASK I40E_MASK(0x3, I40E_GLTPH_CTRL_DESC_PH_SHIFT) | ||
1910 | #define I40E_GLTPH_CTRL_DATA_PH_SHIFT 11 | ||
1911 | #define I40E_GLTPH_CTRL_DATA_PH_MASK I40E_MASK(0x3, I40E_GLTPH_CTRL_DATA_PH_SHIFT) | ||
1905 | #define I40E_PF_FUNC_RID 0x0009C000 /* Reset: PCIR */ | 1912 | #define I40E_PF_FUNC_RID 0x0009C000 /* Reset: PCIR */ |
1906 | #define I40E_PF_FUNC_RID_FUNCTION_NUMBER_SHIFT 0 | 1913 | #define I40E_PF_FUNC_RID_FUNCTION_NUMBER_SHIFT 0 |
1907 | #define I40E_PF_FUNC_RID_FUNCTION_NUMBER_MASK I40E_MASK(0x7, I40E_PF_FUNC_RID_FUNCTION_NUMBER_SHIFT) | 1914 | #define I40E_PF_FUNC_RID_FUNCTION_NUMBER_MASK I40E_MASK(0x7, I40E_PF_FUNC_RID_FUNCTION_NUMBER_SHIFT) |
@@ -2374,20 +2381,20 @@ | |||
2374 | #define I40E_GL_RXERR2_L_FCOEDIXAC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_RXERR2_L_FCOEDIXAC_SHIFT) | 2381 | #define I40E_GL_RXERR2_L_FCOEDIXAC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_RXERR2_L_FCOEDIXAC_SHIFT) |
2375 | #define I40E_GLPRT_BPRCH(_i) (0x003005E4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ | 2382 | #define I40E_GLPRT_BPRCH(_i) (0x003005E4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ |
2376 | #define I40E_GLPRT_BPRCH_MAX_INDEX 3 | 2383 | #define I40E_GLPRT_BPRCH_MAX_INDEX 3 |
2377 | #define I40E_GLPRT_BPRCH_UPRCH_SHIFT 0 | 2384 | #define I40E_GLPRT_BPRCH_BPRCH_SHIFT 0 |
2378 | #define I40E_GLPRT_BPRCH_UPRCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_BPRCH_UPRCH_SHIFT) | 2385 | #define I40E_GLPRT_BPRCH_BPRCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_BPRCH_BPRCH_SHIFT) |
2379 | #define I40E_GLPRT_BPRCL(_i) (0x003005E0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ | 2386 | #define I40E_GLPRT_BPRCL(_i) (0x003005E0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ |
2380 | #define I40E_GLPRT_BPRCL_MAX_INDEX 3 | 2387 | #define I40E_GLPRT_BPRCL_MAX_INDEX 3 |
2381 | #define I40E_GLPRT_BPRCL_UPRCH_SHIFT 0 | 2388 | #define I40E_GLPRT_BPRCL_BPRCL_SHIFT 0 |
2382 | #define I40E_GLPRT_BPRCL_UPRCH_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_BPRCL_UPRCH_SHIFT) | 2389 | #define I40E_GLPRT_BPRCL_BPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_BPRCL_BPRCL_SHIFT) |
2383 | #define I40E_GLPRT_BPTCH(_i) (0x00300A04 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ | 2390 | #define I40E_GLPRT_BPTCH(_i) (0x00300A04 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ |
2384 | #define I40E_GLPRT_BPTCH_MAX_INDEX 3 | 2391 | #define I40E_GLPRT_BPTCH_MAX_INDEX 3 |
2385 | #define I40E_GLPRT_BPTCH_UPRCH_SHIFT 0 | 2392 | #define I40E_GLPRT_BPTCH_BPTCH_SHIFT 0 |
2386 | #define I40E_GLPRT_BPTCH_UPRCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_BPTCH_UPRCH_SHIFT) | 2393 | #define I40E_GLPRT_BPTCH_BPTCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_BPTCH_BPTCH_SHIFT) |
2387 | #define I40E_GLPRT_BPTCL(_i) (0x00300A00 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ | 2394 | #define I40E_GLPRT_BPTCL(_i) (0x00300A00 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ |
2388 | #define I40E_GLPRT_BPTCL_MAX_INDEX 3 | 2395 | #define I40E_GLPRT_BPTCL_MAX_INDEX 3 |
2389 | #define I40E_GLPRT_BPTCL_UPRCH_SHIFT 0 | 2396 | #define I40E_GLPRT_BPTCL_BPTCL_SHIFT 0 |
2390 | #define I40E_GLPRT_BPTCL_UPRCH_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_BPTCL_UPRCH_SHIFT) | 2397 | #define I40E_GLPRT_BPTCL_BPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_BPTCL_BPTCL_SHIFT) |
2391 | #define I40E_GLPRT_CRCERRS(_i) (0x00300080 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ | 2398 | #define I40E_GLPRT_CRCERRS(_i) (0x00300080 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ |
2392 | #define I40E_GLPRT_CRCERRS_MAX_INDEX 3 | 2399 | #define I40E_GLPRT_CRCERRS_MAX_INDEX 3 |
2393 | #define I40E_GLPRT_CRCERRS_CRCERRS_SHIFT 0 | 2400 | #define I40E_GLPRT_CRCERRS_CRCERRS_SHIFT 0 |
@@ -2620,10 +2627,6 @@ | |||
2620 | #define I40E_GLPRT_TDOLD_MAX_INDEX 3 | 2627 | #define I40E_GLPRT_TDOLD_MAX_INDEX 3 |
2621 | #define I40E_GLPRT_TDOLD_GLPRT_TDOLD_SHIFT 0 | 2628 | #define I40E_GLPRT_TDOLD_GLPRT_TDOLD_SHIFT 0 |
2622 | #define I40E_GLPRT_TDOLD_GLPRT_TDOLD_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_TDOLD_GLPRT_TDOLD_SHIFT) | 2629 | #define I40E_GLPRT_TDOLD_GLPRT_TDOLD_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_TDOLD_GLPRT_TDOLD_SHIFT) |
2623 | #define I40E_GLPRT_TDPC(_i) (0x00375400 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ | ||
2624 | #define I40E_GLPRT_TDPC_MAX_INDEX 3 | ||
2625 | #define I40E_GLPRT_TDPC_TDPC_SHIFT 0 | ||
2626 | #define I40E_GLPRT_TDPC_TDPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_TDPC_TDPC_SHIFT) | ||
2627 | #define I40E_GLPRT_UPRCH(_i) (0x003005A4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ | 2630 | #define I40E_GLPRT_UPRCH(_i) (0x003005A4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ |
2628 | #define I40E_GLPRT_UPRCH_MAX_INDEX 3 | 2631 | #define I40E_GLPRT_UPRCH_MAX_INDEX 3 |
2629 | #define I40E_GLPRT_UPRCH_UPRCH_SHIFT 0 | 2632 | #define I40E_GLPRT_UPRCH_UPRCH_SHIFT 0 |
@@ -2990,9 +2993,6 @@ | |||
2990 | #define I40E_PRTTSYN_TXTIME_L 0x001E41C0 /* Reset: GLOBR */ | 2993 | #define I40E_PRTTSYN_TXTIME_L 0x001E41C0 /* Reset: GLOBR */ |
2991 | #define I40E_PRTTSYN_TXTIME_L_TXTIEM_L_SHIFT 0 | 2994 | #define I40E_PRTTSYN_TXTIME_L_TXTIEM_L_SHIFT 0 |
2992 | #define I40E_PRTTSYN_TXTIME_L_TXTIEM_L_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_TXTIME_L_TXTIEM_L_SHIFT) | 2995 | #define I40E_PRTTSYN_TXTIME_L_TXTIEM_L_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_TXTIME_L_TXTIEM_L_SHIFT) |
2993 | #define I40E_GLSCD_QUANTA 0x000B2080 /* Reset: CORER */ | ||
2994 | #define I40E_GLSCD_QUANTA_TSCDQUANTA_SHIFT 0 | ||
2995 | #define I40E_GLSCD_QUANTA_TSCDQUANTA_MASK I40E_MASK(0x7, I40E_GLSCD_QUANTA_TSCDQUANTA_SHIFT) | ||
2996 | #define I40E_GL_MDET_RX 0x0012A510 /* Reset: CORER */ | 2996 | #define I40E_GL_MDET_RX 0x0012A510 /* Reset: CORER */ |
2997 | #define I40E_GL_MDET_RX_FUNCTION_SHIFT 0 | 2997 | #define I40E_GL_MDET_RX_FUNCTION_SHIFT 0 |
2998 | #define I40E_GL_MDET_RX_FUNCTION_MASK I40E_MASK(0xFF, I40E_GL_MDET_RX_FUNCTION_SHIFT) | 2998 | #define I40E_GL_MDET_RX_FUNCTION_MASK I40E_MASK(0xFF, I40E_GL_MDET_RX_FUNCTION_SHIFT) |
@@ -3258,7 +3258,7 @@ | |||
3258 | #define I40E_VFINT_ITRN1_MAX_INDEX 2 | 3258 | #define I40E_VFINT_ITRN1_MAX_INDEX 2 |
3259 | #define I40E_VFINT_ITRN1_INTERVAL_SHIFT 0 | 3259 | #define I40E_VFINT_ITRN1_INTERVAL_SHIFT 0 |
3260 | #define I40E_VFINT_ITRN1_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_ITRN1_INTERVAL_SHIFT) | 3260 | #define I40E_VFINT_ITRN1_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_ITRN1_INTERVAL_SHIFT) |
3261 | #define I40E_VFINT_STAT_CTL01 0x00005400 /* Reset: VFR */ | 3261 | #define I40E_VFINT_STAT_CTL01 0x00005400 /* Reset: CORER */ |
3262 | #define I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_SHIFT 2 | 3262 | #define I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_SHIFT 2 |
3263 | #define I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_SHIFT) | 3263 | #define I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_SHIFT) |
3264 | #define I40E_QRX_TAIL1(_Q) (0x00002000 + ((_Q) * 4)) /* _i=0...15 */ /* Reset: CORER */ | 3264 | #define I40E_QRX_TAIL1(_Q) (0x00002000 + ((_Q) * 4)) /* _i=0...15 */ /* Reset: CORER */ |
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index bbf1b1247ac4..d4b4aa7c204e 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c | |||
@@ -25,6 +25,7 @@ | |||
25 | ******************************************************************************/ | 25 | ******************************************************************************/ |
26 | 26 | ||
27 | #include <linux/prefetch.h> | 27 | #include <linux/prefetch.h> |
28 | #include <net/busy_poll.h> | ||
28 | #include "i40e.h" | 29 | #include "i40e.h" |
29 | #include "i40e_prototype.h" | 30 | #include "i40e_prototype.h" |
30 | 31 | ||
@@ -1031,6 +1032,22 @@ void i40e_clean_rx_ring(struct i40e_ring *rx_ring) | |||
1031 | if (!rx_ring->rx_bi) | 1032 | if (!rx_ring->rx_bi) |
1032 | return; | 1033 | return; |
1033 | 1034 | ||
1035 | if (ring_is_ps_enabled(rx_ring)) { | ||
1036 | int bufsz = ALIGN(rx_ring->rx_hdr_len, 256) * rx_ring->count; | ||
1037 | |||
1038 | rx_bi = &rx_ring->rx_bi[0]; | ||
1039 | if (rx_bi->hdr_buf) { | ||
1040 | dma_free_coherent(dev, | ||
1041 | bufsz, | ||
1042 | rx_bi->hdr_buf, | ||
1043 | rx_bi->dma); | ||
1044 | for (i = 0; i < rx_ring->count; i++) { | ||
1045 | rx_bi = &rx_ring->rx_bi[i]; | ||
1046 | rx_bi->dma = 0; | ||
1047 | rx_bi->hdr_buf = 0; | ||
1048 | } | ||
1049 | } | ||
1050 | } | ||
1034 | /* Free all the Rx ring sk_buffs */ | 1051 | /* Free all the Rx ring sk_buffs */ |
1035 | for (i = 0; i < rx_ring->count; i++) { | 1052 | for (i = 0; i < rx_ring->count; i++) { |
1036 | rx_bi = &rx_ring->rx_bi[i]; | 1053 | rx_bi = &rx_ring->rx_bi[i]; |
@@ -1089,6 +1106,37 @@ void i40e_free_rx_resources(struct i40e_ring *rx_ring) | |||
1089 | } | 1106 | } |
1090 | 1107 | ||
1091 | /** | 1108 | /** |
1109 | * i40e_alloc_rx_headers - allocate rx header buffers | ||
1110 | * @rx_ring: ring to alloc buffers | ||
1111 | * | ||
1112 | * Allocate rx header buffers for the entire ring. As these are static, | ||
1113 | * this is only called when setting up a new ring. | ||
1114 | **/ | ||
1115 | void i40e_alloc_rx_headers(struct i40e_ring *rx_ring) | ||
1116 | { | ||
1117 | struct device *dev = rx_ring->dev; | ||
1118 | struct i40e_rx_buffer *rx_bi; | ||
1119 | dma_addr_t dma; | ||
1120 | void *buffer; | ||
1121 | int buf_size; | ||
1122 | int i; | ||
1123 | |||
1124 | if (rx_ring->rx_bi[0].hdr_buf) | ||
1125 | return; | ||
1126 | /* Make sure the buffers don't cross cache line boundaries. */ | ||
1127 | buf_size = ALIGN(rx_ring->rx_hdr_len, 256); | ||
1128 | buffer = dma_alloc_coherent(dev, buf_size * rx_ring->count, | ||
1129 | &dma, GFP_KERNEL); | ||
1130 | if (!buffer) | ||
1131 | return; | ||
1132 | for (i = 0; i < rx_ring->count; i++) { | ||
1133 | rx_bi = &rx_ring->rx_bi[i]; | ||
1134 | rx_bi->dma = dma + (i * buf_size); | ||
1135 | rx_bi->hdr_buf = buffer + (i * buf_size); | ||
1136 | } | ||
1137 | } | ||
1138 | |||
1139 | /** | ||
1092 | * i40e_setup_rx_descriptors - Allocate Rx descriptors | 1140 | * i40e_setup_rx_descriptors - Allocate Rx descriptors |
1093 | * @rx_ring: Rx descriptor ring (for a specific queue) to setup | 1141 | * @rx_ring: Rx descriptor ring (for a specific queue) to setup |
1094 | * | 1142 | * |
@@ -1148,11 +1196,76 @@ static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val) | |||
1148 | } | 1196 | } |
1149 | 1197 | ||
1150 | /** | 1198 | /** |
1151 | * i40e_alloc_rx_buffers - Replace used receive buffers; packet split | 1199 | * i40e_alloc_rx_buffers_ps - Replace used receive buffers; packet split |
1200 | * @rx_ring: ring to place buffers on | ||
1201 | * @cleaned_count: number of buffers to replace | ||
1202 | **/ | ||
1203 | void i40e_alloc_rx_buffers_ps(struct i40e_ring *rx_ring, u16 cleaned_count) | ||
1204 | { | ||
1205 | u16 i = rx_ring->next_to_use; | ||
1206 | union i40e_rx_desc *rx_desc; | ||
1207 | struct i40e_rx_buffer *bi; | ||
1208 | |||
1209 | /* do nothing if no valid netdev defined */ | ||
1210 | if (!rx_ring->netdev || !cleaned_count) | ||
1211 | return; | ||
1212 | |||
1213 | while (cleaned_count--) { | ||
1214 | rx_desc = I40E_RX_DESC(rx_ring, i); | ||
1215 | bi = &rx_ring->rx_bi[i]; | ||
1216 | |||
1217 | if (bi->skb) /* desc is in use */ | ||
1218 | goto no_buffers; | ||
1219 | if (!bi->page) { | ||
1220 | bi->page = alloc_page(GFP_ATOMIC); | ||
1221 | if (!bi->page) { | ||
1222 | rx_ring->rx_stats.alloc_page_failed++; | ||
1223 | goto no_buffers; | ||
1224 | } | ||
1225 | } | ||
1226 | |||
1227 | if (!bi->page_dma) { | ||
1228 | /* use a half page if we're re-using */ | ||
1229 | bi->page_offset ^= PAGE_SIZE / 2; | ||
1230 | bi->page_dma = dma_map_page(rx_ring->dev, | ||
1231 | bi->page, | ||
1232 | bi->page_offset, | ||
1233 | PAGE_SIZE / 2, | ||
1234 | DMA_FROM_DEVICE); | ||
1235 | if (dma_mapping_error(rx_ring->dev, | ||
1236 | bi->page_dma)) { | ||
1237 | rx_ring->rx_stats.alloc_page_failed++; | ||
1238 | bi->page_dma = 0; | ||
1239 | goto no_buffers; | ||
1240 | } | ||
1241 | } | ||
1242 | |||
1243 | dma_sync_single_range_for_device(rx_ring->dev, | ||
1244 | bi->dma, | ||
1245 | 0, | ||
1246 | rx_ring->rx_hdr_len, | ||
1247 | DMA_FROM_DEVICE); | ||
1248 | /* Refresh the desc even if buffer_addrs didn't change | ||
1249 | * because each write-back erases this info. | ||
1250 | */ | ||
1251 | rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma); | ||
1252 | rx_desc->read.hdr_addr = cpu_to_le64(bi->dma); | ||
1253 | i++; | ||
1254 | if (i == rx_ring->count) | ||
1255 | i = 0; | ||
1256 | } | ||
1257 | |||
1258 | no_buffers: | ||
1259 | if (rx_ring->next_to_use != i) | ||
1260 | i40e_release_rx_desc(rx_ring, i); | ||
1261 | } | ||
1262 | |||
1263 | /** | ||
1264 | * i40e_alloc_rx_buffers_1buf - Replace used receive buffers; single buffer | ||
1152 | * @rx_ring: ring to place buffers on | 1265 | * @rx_ring: ring to place buffers on |
1153 | * @cleaned_count: number of buffers to replace | 1266 | * @cleaned_count: number of buffers to replace |
1154 | **/ | 1267 | **/ |
1155 | void i40e_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count) | 1268 | void i40e_alloc_rx_buffers_1buf(struct i40e_ring *rx_ring, u16 cleaned_count) |
1156 | { | 1269 | { |
1157 | u16 i = rx_ring->next_to_use; | 1270 | u16 i = rx_ring->next_to_use; |
1158 | union i40e_rx_desc *rx_desc; | 1271 | union i40e_rx_desc *rx_desc; |
@@ -1192,40 +1305,8 @@ void i40e_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count) | |||
1192 | } | 1305 | } |
1193 | } | 1306 | } |
1194 | 1307 | ||
1195 | if (ring_is_ps_enabled(rx_ring)) { | 1308 | rx_desc->read.pkt_addr = cpu_to_le64(bi->dma); |
1196 | if (!bi->page) { | 1309 | rx_desc->read.hdr_addr = 0; |
1197 | bi->page = alloc_page(GFP_ATOMIC); | ||
1198 | if (!bi->page) { | ||
1199 | rx_ring->rx_stats.alloc_page_failed++; | ||
1200 | goto no_buffers; | ||
1201 | } | ||
1202 | } | ||
1203 | |||
1204 | if (!bi->page_dma) { | ||
1205 | /* use a half page if we're re-using */ | ||
1206 | bi->page_offset ^= PAGE_SIZE / 2; | ||
1207 | bi->page_dma = dma_map_page(rx_ring->dev, | ||
1208 | bi->page, | ||
1209 | bi->page_offset, | ||
1210 | PAGE_SIZE / 2, | ||
1211 | DMA_FROM_DEVICE); | ||
1212 | if (dma_mapping_error(rx_ring->dev, | ||
1213 | bi->page_dma)) { | ||
1214 | rx_ring->rx_stats.alloc_page_failed++; | ||
1215 | bi->page_dma = 0; | ||
1216 | goto no_buffers; | ||
1217 | } | ||
1218 | } | ||
1219 | |||
1220 | /* Refresh the desc even if buffer_addrs didn't change | ||
1221 | * because each write-back erases this info. | ||
1222 | */ | ||
1223 | rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma); | ||
1224 | rx_desc->read.hdr_addr = cpu_to_le64(bi->dma); | ||
1225 | } else { | ||
1226 | rx_desc->read.pkt_addr = cpu_to_le64(bi->dma); | ||
1227 | rx_desc->read.hdr_addr = 0; | ||
1228 | } | ||
1229 | i++; | 1310 | i++; |
1230 | if (i == rx_ring->count) | 1311 | if (i == rx_ring->count) |
1231 | i = 0; | 1312 | i = 0; |
@@ -1279,10 +1360,10 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi, | |||
1279 | struct iphdr *iph; | 1360 | struct iphdr *iph; |
1280 | __sum16 csum; | 1361 | __sum16 csum; |
1281 | 1362 | ||
1282 | ipv4_tunnel = (rx_ptype > I40E_RX_PTYPE_GRENAT4_MAC_PAY3) && | 1363 | ipv4_tunnel = (rx_ptype >= I40E_RX_PTYPE_GRENAT4_MAC_PAY3) && |
1283 | (rx_ptype < I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4); | 1364 | (rx_ptype <= I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4); |
1284 | ipv6_tunnel = (rx_ptype > I40E_RX_PTYPE_GRENAT6_MAC_PAY3) && | 1365 | ipv6_tunnel = (rx_ptype >= I40E_RX_PTYPE_GRENAT6_MAC_PAY3) && |
1285 | (rx_ptype < I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4); | 1366 | (rx_ptype <= I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4); |
1286 | 1367 | ||
1287 | skb->ip_summed = CHECKSUM_NONE; | 1368 | skb->ip_summed = CHECKSUM_NONE; |
1288 | 1369 | ||
@@ -1410,13 +1491,13 @@ static inline enum pkt_hash_types i40e_ptype_to_hash(u8 ptype) | |||
1410 | } | 1491 | } |
1411 | 1492 | ||
1412 | /** | 1493 | /** |
1413 | * i40e_clean_rx_irq - Reclaim resources after receive completes | 1494 | * i40e_clean_rx_irq_ps - Reclaim resources after receive; packet split |
1414 | * @rx_ring: rx ring to clean | 1495 | * @rx_ring: rx ring to clean |
1415 | * @budget: how many cleans we're allowed | 1496 | * @budget: how many cleans we're allowed |
1416 | * | 1497 | * |
1417 | * Returns true if there's any budget left (e.g. the clean is finished) | 1498 | * Returns true if there's any budget left (e.g. the clean is finished) |
1418 | **/ | 1499 | **/ |
1419 | static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) | 1500 | static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget) |
1420 | { | 1501 | { |
1421 | unsigned int total_rx_bytes = 0, total_rx_packets = 0; | 1502 | unsigned int total_rx_bytes = 0, total_rx_packets = 0; |
1422 | u16 rx_packet_len, rx_header_len, rx_sph, rx_hbo; | 1503 | u16 rx_packet_len, rx_header_len, rx_sph, rx_hbo; |
@@ -1432,25 +1513,51 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) | |||
1432 | if (budget <= 0) | 1513 | if (budget <= 0) |
1433 | return 0; | 1514 | return 0; |
1434 | 1515 | ||
1435 | rx_desc = I40E_RX_DESC(rx_ring, i); | 1516 | do { |
1436 | qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len); | ||
1437 | rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >> | ||
1438 | I40E_RXD_QW1_STATUS_SHIFT; | ||
1439 | |||
1440 | while (rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT)) { | ||
1441 | union i40e_rx_desc *next_rxd; | ||
1442 | struct i40e_rx_buffer *rx_bi; | 1517 | struct i40e_rx_buffer *rx_bi; |
1443 | struct sk_buff *skb; | 1518 | struct sk_buff *skb; |
1444 | u16 vlan_tag; | 1519 | u16 vlan_tag; |
1520 | /* return some buffers to hardware, one at a time is too slow */ | ||
1521 | if (cleaned_count >= I40E_RX_BUFFER_WRITE) { | ||
1522 | i40e_alloc_rx_buffers_ps(rx_ring, cleaned_count); | ||
1523 | cleaned_count = 0; | ||
1524 | } | ||
1525 | |||
1526 | i = rx_ring->next_to_clean; | ||
1527 | rx_desc = I40E_RX_DESC(rx_ring, i); | ||
1528 | qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len); | ||
1529 | rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >> | ||
1530 | I40E_RXD_QW1_STATUS_SHIFT; | ||
1531 | |||
1532 | if (!(rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT))) | ||
1533 | break; | ||
1534 | |||
1535 | /* This memory barrier is needed to keep us from reading | ||
1536 | * any other fields out of the rx_desc until we know the | ||
1537 | * DD bit is set. | ||
1538 | */ | ||
1539 | rmb(); | ||
1445 | if (i40e_rx_is_programming_status(qword)) { | 1540 | if (i40e_rx_is_programming_status(qword)) { |
1446 | i40e_clean_programming_status(rx_ring, rx_desc); | 1541 | i40e_clean_programming_status(rx_ring, rx_desc); |
1447 | I40E_RX_NEXT_DESC_PREFETCH(rx_ring, i, next_rxd); | 1542 | I40E_RX_INCREMENT(rx_ring, i); |
1448 | goto next_desc; | 1543 | continue; |
1449 | } | 1544 | } |
1450 | rx_bi = &rx_ring->rx_bi[i]; | 1545 | rx_bi = &rx_ring->rx_bi[i]; |
1451 | skb = rx_bi->skb; | 1546 | skb = rx_bi->skb; |
1452 | prefetch(skb->data); | 1547 | if (likely(!skb)) { |
1453 | 1548 | skb = netdev_alloc_skb_ip_align(rx_ring->netdev, | |
1549 | rx_ring->rx_hdr_len); | ||
1550 | if (!skb) | ||
1551 | rx_ring->rx_stats.alloc_buff_failed++; | ||
1552 | /* initialize queue mapping */ | ||
1553 | skb_record_rx_queue(skb, rx_ring->queue_index); | ||
1554 | /* we are reusing so sync this buffer for CPU use */ | ||
1555 | dma_sync_single_range_for_cpu(rx_ring->dev, | ||
1556 | rx_bi->dma, | ||
1557 | 0, | ||
1558 | rx_ring->rx_hdr_len, | ||
1559 | DMA_FROM_DEVICE); | ||
1560 | } | ||
1454 | rx_packet_len = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >> | 1561 | rx_packet_len = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >> |
1455 | I40E_RXD_QW1_LENGTH_PBUF_SHIFT; | 1562 | I40E_RXD_QW1_LENGTH_PBUF_SHIFT; |
1456 | rx_header_len = (qword & I40E_RXD_QW1_LENGTH_HBUF_MASK) >> | 1563 | rx_header_len = (qword & I40E_RXD_QW1_LENGTH_HBUF_MASK) >> |
@@ -1465,40 +1572,30 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) | |||
1465 | 1572 | ||
1466 | rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >> | 1573 | rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >> |
1467 | I40E_RXD_QW1_PTYPE_SHIFT; | 1574 | I40E_RXD_QW1_PTYPE_SHIFT; |
1575 | prefetch(rx_bi->page); | ||
1468 | rx_bi->skb = NULL; | 1576 | rx_bi->skb = NULL; |
1469 | 1577 | cleaned_count++; | |
1470 | /* This memory barrier is needed to keep us from reading | 1578 | if (rx_hbo || rx_sph) { |
1471 | * any other fields out of the rx_desc until we know the | 1579 | int len; |
1472 | * STATUS_DD bit is set | ||
1473 | */ | ||
1474 | rmb(); | ||
1475 | |||
1476 | /* Get the header and possibly the whole packet | ||
1477 | * If this is an skb from previous receive dma will be 0 | ||
1478 | */ | ||
1479 | if (rx_bi->dma) { | ||
1480 | u16 len; | ||
1481 | |||
1482 | if (rx_hbo) | 1580 | if (rx_hbo) |
1483 | len = I40E_RX_HDR_SIZE; | 1581 | len = I40E_RX_HDR_SIZE; |
1484 | else if (rx_sph) | ||
1485 | len = rx_header_len; | ||
1486 | else if (rx_packet_len) | ||
1487 | len = rx_packet_len; /* 1buf/no split found */ | ||
1488 | else | 1582 | else |
1489 | len = rx_header_len; /* split always mode */ | 1583 | len = rx_header_len; |
1490 | 1584 | memcpy(__skb_put(skb, len), rx_bi->hdr_buf, len); | |
1491 | skb_put(skb, len); | 1585 | } else if (skb->len == 0) { |
1492 | dma_unmap_single(rx_ring->dev, | 1586 | int len; |
1493 | rx_bi->dma, | 1587 | |
1494 | rx_ring->rx_buf_len, | 1588 | len = (rx_packet_len > skb_headlen(skb) ? |
1495 | DMA_FROM_DEVICE); | 1589 | skb_headlen(skb) : rx_packet_len); |
1496 | rx_bi->dma = 0; | 1590 | memcpy(__skb_put(skb, len), |
1591 | rx_bi->page + rx_bi->page_offset, | ||
1592 | len); | ||
1593 | rx_bi->page_offset += len; | ||
1594 | rx_packet_len -= len; | ||
1497 | } | 1595 | } |
1498 | 1596 | ||
1499 | /* Get the rest of the data if this was a header split */ | 1597 | /* Get the rest of the data if this was a header split */ |
1500 | if (ring_is_ps_enabled(rx_ring) && rx_packet_len) { | 1598 | if (rx_packet_len) { |
1501 | |||
1502 | skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, | 1599 | skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, |
1503 | rx_bi->page, | 1600 | rx_bi->page, |
1504 | rx_bi->page_offset, | 1601 | rx_bi->page_offset, |
@@ -1520,22 +1617,16 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) | |||
1520 | DMA_FROM_DEVICE); | 1617 | DMA_FROM_DEVICE); |
1521 | rx_bi->page_dma = 0; | 1618 | rx_bi->page_dma = 0; |
1522 | } | 1619 | } |
1523 | I40E_RX_NEXT_DESC_PREFETCH(rx_ring, i, next_rxd); | 1620 | I40E_RX_INCREMENT(rx_ring, i); |
1524 | 1621 | ||
1525 | if (unlikely( | 1622 | if (unlikely( |
1526 | !(rx_status & (1 << I40E_RX_DESC_STATUS_EOF_SHIFT)))) { | 1623 | !(rx_status & (1 << I40E_RX_DESC_STATUS_EOF_SHIFT)))) { |
1527 | struct i40e_rx_buffer *next_buffer; | 1624 | struct i40e_rx_buffer *next_buffer; |
1528 | 1625 | ||
1529 | next_buffer = &rx_ring->rx_bi[i]; | 1626 | next_buffer = &rx_ring->rx_bi[i]; |
1530 | 1627 | next_buffer->skb = skb; | |
1531 | if (ring_is_ps_enabled(rx_ring)) { | ||
1532 | rx_bi->skb = next_buffer->skb; | ||
1533 | rx_bi->dma = next_buffer->dma; | ||
1534 | next_buffer->skb = skb; | ||
1535 | next_buffer->dma = 0; | ||
1536 | } | ||
1537 | rx_ring->rx_stats.non_eop_descs++; | 1628 | rx_ring->rx_stats.non_eop_descs++; |
1538 | goto next_desc; | 1629 | continue; |
1539 | } | 1630 | } |
1540 | 1631 | ||
1541 | /* ERR_MASK will only have valid bits if EOP set */ | 1632 | /* ERR_MASK will only have valid bits if EOP set */ |
@@ -1544,7 +1635,7 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) | |||
1544 | /* TODO: shouldn't we increment a counter indicating the | 1635 | /* TODO: shouldn't we increment a counter indicating the |
1545 | * drop? | 1636 | * drop? |
1546 | */ | 1637 | */ |
1547 | goto next_desc; | 1638 | continue; |
1548 | } | 1639 | } |
1549 | 1640 | ||
1550 | skb_set_hash(skb, i40e_rx_hash(rx_ring, rx_desc), | 1641 | skb_set_hash(skb, i40e_rx_hash(rx_ring, rx_desc), |
@@ -1570,33 +1661,149 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) | |||
1570 | #ifdef I40E_FCOE | 1661 | #ifdef I40E_FCOE |
1571 | if (!i40e_fcoe_handle_offload(rx_ring, rx_desc, skb)) { | 1662 | if (!i40e_fcoe_handle_offload(rx_ring, rx_desc, skb)) { |
1572 | dev_kfree_skb_any(skb); | 1663 | dev_kfree_skb_any(skb); |
1573 | goto next_desc; | 1664 | continue; |
1574 | } | 1665 | } |
1575 | #endif | 1666 | #endif |
1667 | skb_mark_napi_id(skb, &rx_ring->q_vector->napi); | ||
1576 | i40e_receive_skb(rx_ring, skb, vlan_tag); | 1668 | i40e_receive_skb(rx_ring, skb, vlan_tag); |
1577 | 1669 | ||
1578 | rx_ring->netdev->last_rx = jiffies; | 1670 | rx_ring->netdev->last_rx = jiffies; |
1579 | budget--; | ||
1580 | next_desc: | ||
1581 | rx_desc->wb.qword1.status_error_len = 0; | 1671 | rx_desc->wb.qword1.status_error_len = 0; |
1582 | if (!budget) | ||
1583 | break; | ||
1584 | 1672 | ||
1585 | cleaned_count++; | 1673 | } while (likely(total_rx_packets < budget)); |
1674 | |||
1675 | u64_stats_update_begin(&rx_ring->syncp); | ||
1676 | rx_ring->stats.packets += total_rx_packets; | ||
1677 | rx_ring->stats.bytes += total_rx_bytes; | ||
1678 | u64_stats_update_end(&rx_ring->syncp); | ||
1679 | rx_ring->q_vector->rx.total_packets += total_rx_packets; | ||
1680 | rx_ring->q_vector->rx.total_bytes += total_rx_bytes; | ||
1681 | |||
1682 | return total_rx_packets; | ||
1683 | } | ||
1684 | |||
1685 | /** | ||
1686 | * i40e_clean_rx_irq_1buf - Reclaim resources after receive; single buffer | ||
1687 | * @rx_ring: rx ring to clean | ||
1688 | * @budget: how many cleans we're allowed | ||
1689 | * | ||
1690 | * Returns number of packets cleaned | ||
1691 | **/ | ||
1692 | static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget) | ||
1693 | { | ||
1694 | unsigned int total_rx_bytes = 0, total_rx_packets = 0; | ||
1695 | u16 cleaned_count = I40E_DESC_UNUSED(rx_ring); | ||
1696 | struct i40e_vsi *vsi = rx_ring->vsi; | ||
1697 | union i40e_rx_desc *rx_desc; | ||
1698 | u32 rx_error, rx_status; | ||
1699 | u16 rx_packet_len; | ||
1700 | u8 rx_ptype; | ||
1701 | u64 qword; | ||
1702 | u16 i; | ||
1703 | |||
1704 | do { | ||
1705 | struct i40e_rx_buffer *rx_bi; | ||
1706 | struct sk_buff *skb; | ||
1707 | u16 vlan_tag; | ||
1586 | /* return some buffers to hardware, one at a time is too slow */ | 1708 | /* return some buffers to hardware, one at a time is too slow */ |
1587 | if (cleaned_count >= I40E_RX_BUFFER_WRITE) { | 1709 | if (cleaned_count >= I40E_RX_BUFFER_WRITE) { |
1588 | i40e_alloc_rx_buffers(rx_ring, cleaned_count); | 1710 | i40e_alloc_rx_buffers_1buf(rx_ring, cleaned_count); |
1589 | cleaned_count = 0; | 1711 | cleaned_count = 0; |
1590 | } | 1712 | } |
1591 | 1713 | ||
1592 | /* use prefetched values */ | 1714 | i = rx_ring->next_to_clean; |
1593 | rx_desc = next_rxd; | 1715 | rx_desc = I40E_RX_DESC(rx_ring, i); |
1594 | qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len); | 1716 | qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len); |
1595 | rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >> | 1717 | rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >> |
1596 | I40E_RXD_QW1_STATUS_SHIFT; | 1718 | I40E_RXD_QW1_STATUS_SHIFT; |
1597 | } | 1719 | |
1720 | if (!(rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT))) | ||
1721 | break; | ||
1722 | |||
1723 | /* This memory barrier is needed to keep us from reading | ||
1724 | * any other fields out of the rx_desc until we know the | ||
1725 | * DD bit is set. | ||
1726 | */ | ||
1727 | rmb(); | ||
1728 | |||
1729 | if (i40e_rx_is_programming_status(qword)) { | ||
1730 | i40e_clean_programming_status(rx_ring, rx_desc); | ||
1731 | I40E_RX_INCREMENT(rx_ring, i); | ||
1732 | continue; | ||
1733 | } | ||
1734 | rx_bi = &rx_ring->rx_bi[i]; | ||
1735 | skb = rx_bi->skb; | ||
1736 | prefetch(skb->data); | ||
1737 | |||
1738 | rx_packet_len = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >> | ||
1739 | I40E_RXD_QW1_LENGTH_PBUF_SHIFT; | ||
1740 | |||
1741 | rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >> | ||
1742 | I40E_RXD_QW1_ERROR_SHIFT; | ||
1743 | rx_error &= ~(1 << I40E_RX_DESC_ERROR_HBO_SHIFT); | ||
1744 | |||
1745 | rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >> | ||
1746 | I40E_RXD_QW1_PTYPE_SHIFT; | ||
1747 | rx_bi->skb = NULL; | ||
1748 | cleaned_count++; | ||
1749 | |||
1750 | /* Get the header and possibly the whole packet | ||
1751 | * If this is an skb from previous receive dma will be 0 | ||
1752 | */ | ||
1753 | skb_put(skb, rx_packet_len); | ||
1754 | dma_unmap_single(rx_ring->dev, rx_bi->dma, rx_ring->rx_buf_len, | ||
1755 | DMA_FROM_DEVICE); | ||
1756 | rx_bi->dma = 0; | ||
1757 | |||
1758 | I40E_RX_INCREMENT(rx_ring, i); | ||
1759 | |||
1760 | if (unlikely( | ||
1761 | !(rx_status & (1 << I40E_RX_DESC_STATUS_EOF_SHIFT)))) { | ||
1762 | rx_ring->rx_stats.non_eop_descs++; | ||
1763 | continue; | ||
1764 | } | ||
1765 | |||
1766 | /* ERR_MASK will only have valid bits if EOP set */ | ||
1767 | if (unlikely(rx_error & (1 << I40E_RX_DESC_ERROR_RXE_SHIFT))) { | ||
1768 | dev_kfree_skb_any(skb); | ||
1769 | /* TODO: shouldn't we increment a counter indicating the | ||
1770 | * drop? | ||
1771 | */ | ||
1772 | continue; | ||
1773 | } | ||
1774 | |||
1775 | skb_set_hash(skb, i40e_rx_hash(rx_ring, rx_desc), | ||
1776 | i40e_ptype_to_hash(rx_ptype)); | ||
1777 | if (unlikely(rx_status & I40E_RXD_QW1_STATUS_TSYNVALID_MASK)) { | ||
1778 | i40e_ptp_rx_hwtstamp(vsi->back, skb, (rx_status & | ||
1779 | I40E_RXD_QW1_STATUS_TSYNINDX_MASK) >> | ||
1780 | I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT); | ||
1781 | rx_ring->last_rx_timestamp = jiffies; | ||
1782 | } | ||
1783 | |||
1784 | /* probably a little skewed due to removing CRC */ | ||
1785 | total_rx_bytes += skb->len; | ||
1786 | total_rx_packets++; | ||
1787 | |||
1788 | skb->protocol = eth_type_trans(skb, rx_ring->netdev); | ||
1789 | |||
1790 | i40e_rx_checksum(vsi, skb, rx_status, rx_error, rx_ptype); | ||
1791 | |||
1792 | vlan_tag = rx_status & (1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT) | ||
1793 | ? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1) | ||
1794 | : 0; | ||
1795 | #ifdef I40E_FCOE | ||
1796 | if (!i40e_fcoe_handle_offload(rx_ring, rx_desc, skb)) { | ||
1797 | dev_kfree_skb_any(skb); | ||
1798 | continue; | ||
1799 | } | ||
1800 | #endif | ||
1801 | i40e_receive_skb(rx_ring, skb, vlan_tag); | ||
1802 | |||
1803 | rx_ring->netdev->last_rx = jiffies; | ||
1804 | rx_desc->wb.qword1.status_error_len = 0; | ||
1805 | } while (likely(total_rx_packets < budget)); | ||
1598 | 1806 | ||
1599 | rx_ring->next_to_clean = i; | ||
1600 | u64_stats_update_begin(&rx_ring->syncp); | 1807 | u64_stats_update_begin(&rx_ring->syncp); |
1601 | rx_ring->stats.packets += total_rx_packets; | 1808 | rx_ring->stats.packets += total_rx_packets; |
1602 | rx_ring->stats.bytes += total_rx_bytes; | 1809 | rx_ring->stats.bytes += total_rx_bytes; |
@@ -1604,10 +1811,7 @@ next_desc: | |||
1604 | rx_ring->q_vector->rx.total_packets += total_rx_packets; | 1811 | rx_ring->q_vector->rx.total_packets += total_rx_packets; |
1605 | rx_ring->q_vector->rx.total_bytes += total_rx_bytes; | 1812 | rx_ring->q_vector->rx.total_bytes += total_rx_bytes; |
1606 | 1813 | ||
1607 | if (cleaned_count) | 1814 | return total_rx_packets; |
1608 | i40e_alloc_rx_buffers(rx_ring, cleaned_count); | ||
1609 | |||
1610 | return budget > 0; | ||
1611 | } | 1815 | } |
1612 | 1816 | ||
1613 | /** | 1817 | /** |
@@ -1628,6 +1832,7 @@ int i40e_napi_poll(struct napi_struct *napi, int budget) | |||
1628 | bool clean_complete = true; | 1832 | bool clean_complete = true; |
1629 | bool arm_wb = false; | 1833 | bool arm_wb = false; |
1630 | int budget_per_ring; | 1834 | int budget_per_ring; |
1835 | int cleaned; | ||
1631 | 1836 | ||
1632 | if (test_bit(__I40E_DOWN, &vsi->state)) { | 1837 | if (test_bit(__I40E_DOWN, &vsi->state)) { |
1633 | napi_complete(napi); | 1838 | napi_complete(napi); |
@@ -1647,8 +1852,14 @@ int i40e_napi_poll(struct napi_struct *napi, int budget) | |||
1647 | */ | 1852 | */ |
1648 | budget_per_ring = max(budget/q_vector->num_ringpairs, 1); | 1853 | budget_per_ring = max(budget/q_vector->num_ringpairs, 1); |
1649 | 1854 | ||
1650 | i40e_for_each_ring(ring, q_vector->rx) | 1855 | i40e_for_each_ring(ring, q_vector->rx) { |
1651 | clean_complete &= i40e_clean_rx_irq(ring, budget_per_ring); | 1856 | if (ring_is_ps_enabled(ring)) |
1857 | cleaned = i40e_clean_rx_irq_ps(ring, budget_per_ring); | ||
1858 | else | ||
1859 | cleaned = i40e_clean_rx_irq_1buf(ring, budget_per_ring); | ||
1860 | /* if we didn't clean as many as budgeted, we must be done */ | ||
1861 | clean_complete &= (budget_per_ring != cleaned); | ||
1862 | } | ||
1652 | 1863 | ||
1653 | /* If work not completed, return budget and polling will return */ | 1864 | /* If work not completed, return budget and polling will return */ |
1654 | if (!clean_complete) { | 1865 | if (!clean_complete) { |
@@ -1838,6 +2049,9 @@ static int i40e_tx_prepare_vlan_flags(struct sk_buff *skb, | |||
1838 | tx_flags |= I40E_TX_FLAGS_SW_VLAN; | 2049 | tx_flags |= I40E_TX_FLAGS_SW_VLAN; |
1839 | } | 2050 | } |
1840 | 2051 | ||
2052 | if (!(tx_ring->vsi->back->flags & I40E_FLAG_DCB_ENABLED)) | ||
2053 | goto out; | ||
2054 | |||
1841 | /* Insert 802.1p priority into VLAN header */ | 2055 | /* Insert 802.1p priority into VLAN header */ |
1842 | if ((tx_flags & (I40E_TX_FLAGS_HW_VLAN | I40E_TX_FLAGS_SW_VLAN)) || | 2056 | if ((tx_flags & (I40E_TX_FLAGS_HW_VLAN | I40E_TX_FLAGS_SW_VLAN)) || |
1843 | (skb->priority != TC_PRIO_CONTROL)) { | 2057 | (skb->priority != TC_PRIO_CONTROL)) { |
@@ -1858,6 +2072,8 @@ static int i40e_tx_prepare_vlan_flags(struct sk_buff *skb, | |||
1858 | tx_flags |= I40E_TX_FLAGS_HW_VLAN; | 2072 | tx_flags |= I40E_TX_FLAGS_HW_VLAN; |
1859 | } | 2073 | } |
1860 | } | 2074 | } |
2075 | |||
2076 | out: | ||
1861 | *flags = tx_flags; | 2077 | *flags = tx_flags; |
1862 | return 0; | 2078 | return 0; |
1863 | } | 2079 | } |
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h index dff0baeb1ecc..4b0b8102cdc3 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h | |||
@@ -96,6 +96,14 @@ enum i40e_dyn_idx_t { | |||
96 | 96 | ||
97 | /* How many Rx Buffers do we bundle into one write to the hardware ? */ | 97 | /* How many Rx Buffers do we bundle into one write to the hardware ? */ |
98 | #define I40E_RX_BUFFER_WRITE 16 /* Must be power of 2 */ | 98 | #define I40E_RX_BUFFER_WRITE 16 /* Must be power of 2 */ |
99 | #define I40E_RX_INCREMENT(r, i) \ | ||
100 | do { \ | ||
101 | (i)++; \ | ||
102 | if ((i) == (r)->count) \ | ||
103 | i = 0; \ | ||
104 | r->next_to_clean = i; \ | ||
105 | } while (0) | ||
106 | |||
99 | #define I40E_RX_NEXT_DESC(r, i, n) \ | 107 | #define I40E_RX_NEXT_DESC(r, i, n) \ |
100 | do { \ | 108 | do { \ |
101 | (i)++; \ | 109 | (i)++; \ |
@@ -152,6 +160,7 @@ struct i40e_tx_buffer { | |||
152 | 160 | ||
153 | struct i40e_rx_buffer { | 161 | struct i40e_rx_buffer { |
154 | struct sk_buff *skb; | 162 | struct sk_buff *skb; |
163 | void *hdr_buf; | ||
155 | dma_addr_t dma; | 164 | dma_addr_t dma; |
156 | struct page *page; | 165 | struct page *page; |
157 | dma_addr_t page_dma; | 166 | dma_addr_t page_dma; |
@@ -224,8 +233,8 @@ struct i40e_ring { | |||
224 | u16 rx_buf_len; | 233 | u16 rx_buf_len; |
225 | u8 dtype; | 234 | u8 dtype; |
226 | #define I40E_RX_DTYPE_NO_SPLIT 0 | 235 | #define I40E_RX_DTYPE_NO_SPLIT 0 |
227 | #define I40E_RX_DTYPE_SPLIT_ALWAYS 1 | 236 | #define I40E_RX_DTYPE_HEADER_SPLIT 1 |
228 | #define I40E_RX_DTYPE_HEADER_SPLIT 2 | 237 | #define I40E_RX_DTYPE_SPLIT_ALWAYS 2 |
229 | u8 hsplit; | 238 | u8 hsplit; |
230 | #define I40E_RX_SPLIT_L2 0x1 | 239 | #define I40E_RX_SPLIT_L2 0x1 |
231 | #define I40E_RX_SPLIT_IP 0x2 | 240 | #define I40E_RX_SPLIT_IP 0x2 |
@@ -281,7 +290,9 @@ struct i40e_ring_container { | |||
281 | #define i40e_for_each_ring(pos, head) \ | 290 | #define i40e_for_each_ring(pos, head) \ |
282 | for (pos = (head).ring; pos != NULL; pos = pos->next) | 291 | for (pos = (head).ring; pos != NULL; pos = pos->next) |
283 | 292 | ||
284 | void i40e_alloc_rx_buffers(struct i40e_ring *rxr, u16 cleaned_count); | 293 | void i40e_alloc_rx_buffers_ps(struct i40e_ring *rxr, u16 cleaned_count); |
294 | void i40e_alloc_rx_buffers_1buf(struct i40e_ring *rxr, u16 cleaned_count); | ||
295 | void i40e_alloc_rx_headers(struct i40e_ring *rxr); | ||
285 | netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev); | 296 | netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev); |
286 | void i40e_clean_tx_ring(struct i40e_ring *tx_ring); | 297 | void i40e_clean_tx_ring(struct i40e_ring *tx_ring); |
287 | void i40e_clean_rx_ring(struct i40e_ring *rx_ring); | 298 | void i40e_clean_rx_ring(struct i40e_ring *rx_ring); |
diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h index e9901ef06a63..90069396bb28 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_type.h +++ b/drivers/net/ethernet/intel/i40e/i40e_type.h | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************************* | 1 | /******************************************************************************* |
2 | * | 2 | * |
3 | * Intel Ethernet Controller XL710 Family Linux Driver | 3 | * Intel Ethernet Controller XL710 Family Linux Driver |
4 | * Copyright(c) 2013 - 2014 Intel Corporation. | 4 | * Copyright(c) 2013 - 2015 Intel Corporation. |
5 | * | 5 | * |
6 | * This program is free software; you can redistribute it and/or modify it | 6 | * This program is free software; you can redistribute it and/or modify it |
7 | * under the terms and conditions of the GNU General Public License, | 7 | * under the terms and conditions of the GNU General Public License, |
@@ -175,12 +175,12 @@ struct i40e_link_status { | |||
175 | u8 an_info; | 175 | u8 an_info; |
176 | u8 ext_info; | 176 | u8 ext_info; |
177 | u8 loopback; | 177 | u8 loopback; |
178 | bool an_enabled; | ||
179 | /* is Link Status Event notification to SW enabled */ | 178 | /* is Link Status Event notification to SW enabled */ |
180 | bool lse_enable; | 179 | bool lse_enable; |
181 | u16 max_frame_size; | 180 | u16 max_frame_size; |
182 | bool crc_enable; | 181 | bool crc_enable; |
183 | u8 pacing; | 182 | u8 pacing; |
183 | u8 requested_speeds; | ||
184 | }; | 184 | }; |
185 | 185 | ||
186 | struct i40e_phy_info { | 186 | struct i40e_phy_info { |
@@ -1401,6 +1401,19 @@ struct i40e_lldp_variables { | |||
1401 | u16 crc8; | 1401 | u16 crc8; |
1402 | }; | 1402 | }; |
1403 | 1403 | ||
1404 | /* Offsets into Alternate Ram */ | ||
1405 | #define I40E_ALT_STRUCT_FIRST_PF_OFFSET 0 /* in dwords */ | ||
1406 | #define I40E_ALT_STRUCT_DWORDS_PER_PF 64 /* in dwords */ | ||
1407 | #define I40E_ALT_STRUCT_OUTER_VLAN_TAG_OFFSET 0xD /* in dwords */ | ||
1408 | #define I40E_ALT_STRUCT_USER_PRIORITY_OFFSET 0xC /* in dwords */ | ||
1409 | #define I40E_ALT_STRUCT_MIN_BW_OFFSET 0xE /* in dwords */ | ||
1410 | #define I40E_ALT_STRUCT_MAX_BW_OFFSET 0xF /* in dwords */ | ||
1411 | |||
1412 | /* Alternate Ram Bandwidth Masks */ | ||
1413 | #define I40E_ALT_BW_VALUE_MASK 0xFF | ||
1414 | #define I40E_ALT_BW_RELATIVE_MASK 0x40000000 | ||
1415 | #define I40E_ALT_BW_VALID_MASK 0x80000000 | ||
1416 | |||
1404 | /* RSS Hash Table Size */ | 1417 | /* RSS Hash Table Size */ |
1405 | #define I40E_PFQF_CTL_0_HASHLUTSIZE_512 0x00010000 | 1418 | #define I40E_PFQF_CTL_0_HASHLUTSIZE_512 0x00010000 |
1406 | #endif /* _I40E_TYPE_H_ */ | 1419 | #endif /* _I40E_TYPE_H_ */ |
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h index 61dd1b187624..2d20af290fbf 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h | |||
@@ -59,31 +59,29 @@ | |||
59 | * of the virtchnl_msg structure. | 59 | * of the virtchnl_msg structure. |
60 | */ | 60 | */ |
61 | enum i40e_virtchnl_ops { | 61 | enum i40e_virtchnl_ops { |
62 | /* VF sends req. to pf for the following | 62 | /* The PF sends status change events to VFs using |
63 | * ops. | 63 | * the I40E_VIRTCHNL_OP_EVENT opcode. |
64 | * VFs send requests to the PF using the other ops. | ||
64 | */ | 65 | */ |
65 | I40E_VIRTCHNL_OP_UNKNOWN = 0, | 66 | I40E_VIRTCHNL_OP_UNKNOWN = 0, |
66 | I40E_VIRTCHNL_OP_VERSION = 1, /* must ALWAYS be 1 */ | 67 | I40E_VIRTCHNL_OP_VERSION = 1, /* must ALWAYS be 1 */ |
67 | I40E_VIRTCHNL_OP_RESET_VF, | 68 | I40E_VIRTCHNL_OP_RESET_VF = 2, |
68 | I40E_VIRTCHNL_OP_GET_VF_RESOURCES, | 69 | I40E_VIRTCHNL_OP_GET_VF_RESOURCES = 3, |
69 | I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE, | 70 | I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE = 4, |
70 | I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE, | 71 | I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE = 5, |
71 | I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES, | 72 | I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES = 6, |
72 | I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP, | 73 | I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP = 7, |
73 | I40E_VIRTCHNL_OP_ENABLE_QUEUES, | 74 | I40E_VIRTCHNL_OP_ENABLE_QUEUES = 8, |
74 | I40E_VIRTCHNL_OP_DISABLE_QUEUES, | 75 | I40E_VIRTCHNL_OP_DISABLE_QUEUES = 9, |
75 | I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS, | 76 | I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS = 10, |
76 | I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS, | 77 | I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS = 11, |
77 | I40E_VIRTCHNL_OP_ADD_VLAN, | 78 | I40E_VIRTCHNL_OP_ADD_VLAN = 12, |
78 | I40E_VIRTCHNL_OP_DEL_VLAN, | 79 | I40E_VIRTCHNL_OP_DEL_VLAN = 13, |
79 | I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, | 80 | I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE = 14, |
80 | I40E_VIRTCHNL_OP_GET_STATS, | 81 | I40E_VIRTCHNL_OP_GET_STATS = 15, |
81 | I40E_VIRTCHNL_OP_FCOE, | 82 | I40E_VIRTCHNL_OP_FCOE = 16, |
82 | I40E_VIRTCHNL_OP_CONFIG_RSS, | 83 | I40E_VIRTCHNL_OP_EVENT = 17, |
83 | /* PF sends status change events to vfs using | 84 | I40E_VIRTCHNL_OP_CONFIG_RSS = 18, |
84 | * the following op. | ||
85 | */ | ||
86 | I40E_VIRTCHNL_OP_EVENT, | ||
87 | }; | 85 | }; |
88 | 86 | ||
89 | /* Virtual channel message descriptor. This overlays the admin queue | 87 | /* Virtual channel message descriptor. This overlays the admin queue |
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index 40f042af4131..910c45e83fdd 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************************* | 1 | /******************************************************************************* |
2 | * | 2 | * |
3 | * Intel Ethernet Controller XL710 Family Linux Driver | 3 | * Intel Ethernet Controller XL710 Family Linux Driver |
4 | * Copyright(c) 2013 - 2014 Intel Corporation. | 4 | * Copyright(c) 2013 - 2015 Intel Corporation. |
5 | * | 5 | * |
6 | * This program is free software; you can redistribute it and/or modify it | 6 | * This program is free software; you can redistribute it and/or modify it |
7 | * under the terms and conditions of the GNU General Public License, | 7 | * under the terms and conditions of the GNU General Public License, |
@@ -752,7 +752,7 @@ void i40e_enable_pf_switch_lb(struct i40e_pf *pf) | |||
752 | * | 752 | * |
753 | * disable switch loop back or die - no point in a return value | 753 | * disable switch loop back or die - no point in a return value |
754 | **/ | 754 | **/ |
755 | static void i40e_disable_pf_switch_lb(struct i40e_pf *pf) | 755 | void i40e_disable_pf_switch_lb(struct i40e_pf *pf) |
756 | { | 756 | { |
757 | struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; | 757 | struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; |
758 | struct i40e_vsi_context ctxt; | 758 | struct i40e_vsi_context ctxt; |
@@ -832,7 +832,6 @@ void i40e_free_vfs(struct i40e_pf *pf) | |||
832 | bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32; | 832 | bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32; |
833 | wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), (1 << bit_idx)); | 833 | wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), (1 << bit_idx)); |
834 | } | 834 | } |
835 | i40e_disable_pf_switch_lb(pf); | ||
836 | } else { | 835 | } else { |
837 | dev_warn(&pf->pdev->dev, | 836 | dev_warn(&pf->pdev->dev, |
838 | "unable to disable SR-IOV because VFs are assigned.\n"); | 837 | "unable to disable SR-IOV because VFs are assigned.\n"); |
@@ -891,7 +890,6 @@ int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs) | |||
891 | } | 890 | } |
892 | pf->num_alloc_vfs = num_alloc_vfs; | 891 | pf->num_alloc_vfs = num_alloc_vfs; |
893 | 892 | ||
894 | i40e_enable_pf_switch_lb(pf); | ||
895 | err_alloc: | 893 | err_alloc: |
896 | if (ret) | 894 | if (ret) |
897 | i40e_free_vfs(pf); | 895 | i40e_free_vfs(pf); |
@@ -2427,7 +2425,8 @@ int i40e_ndo_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool enable) | |||
2427 | ctxt.pf_num = pf->hw.pf_id; | 2425 | ctxt.pf_num = pf->hw.pf_id; |
2428 | ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID); | 2426 | ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID); |
2429 | if (enable) | 2427 | if (enable) |
2430 | ctxt.info.sec_flags |= I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK; | 2428 | ctxt.info.sec_flags |= (I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK | |
2429 | I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK); | ||
2431 | ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL); | 2430 | ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL); |
2432 | if (ret) { | 2431 | if (ret) { |
2433 | dev_err(&pf->pdev->dev, "Error %d updating VSI parameters\n", | 2432 | dev_err(&pf->pdev->dev, "Error %d updating VSI parameters\n", |
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h index 9452f5247cff..ef777a62e393 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************************* | 1 | /******************************************************************************* |
2 | * | 2 | * |
3 | * Intel Ethernet Controller XL710 Family Linux Driver | 3 | * Intel Ethernet Controller XL710 Family Linux Driver |
4 | * Copyright(c) 2013 - 2014 Intel Corporation. | 4 | * Copyright(c) 2013 - 2015 Intel Corporation. |
5 | * | 5 | * |
6 | * This program is free software; you can redistribute it and/or modify it | 6 | * This program is free software; you can redistribute it and/or modify it |
7 | * under the terms and conditions of the GNU General Public License, | 7 | * under the terms and conditions of the GNU General Public License, |
@@ -127,5 +127,6 @@ int i40e_ndo_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool enable); | |||
127 | void i40e_vc_notify_link_state(struct i40e_pf *pf); | 127 | void i40e_vc_notify_link_state(struct i40e_pf *pf); |
128 | void i40e_vc_notify_reset(struct i40e_pf *pf); | 128 | void i40e_vc_notify_reset(struct i40e_pf *pf); |
129 | void i40e_enable_pf_switch_lb(struct i40e_pf *pf); | 129 | void i40e_enable_pf_switch_lb(struct i40e_pf *pf); |
130 | void i40e_disable_pf_switch_lb(struct i40e_pf *pf); | ||
130 | 131 | ||
131 | #endif /* _I40E_VIRTCHNL_PF_H_ */ | 132 | #endif /* _I40E_VIRTCHNL_PF_H_ */ |
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq.h b/drivers/net/ethernet/intel/i40evf/i40e_adminq.h index 60f04e96a80e..ef43d68f67b3 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_adminq.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq.h | |||
@@ -93,6 +93,7 @@ struct i40e_adminq_info { | |||
93 | u16 asq_buf_size; /* send queue buffer size */ | 93 | u16 asq_buf_size; /* send queue buffer size */ |
94 | u16 fw_maj_ver; /* firmware major version */ | 94 | u16 fw_maj_ver; /* firmware major version */ |
95 | u16 fw_min_ver; /* firmware minor version */ | 95 | u16 fw_min_ver; /* firmware minor version */ |
96 | u32 fw_build; /* firmware build number */ | ||
96 | u16 api_maj_ver; /* api major version */ | 97 | u16 api_maj_ver; /* api major version */ |
97 | u16 api_min_ver; /* api minor version */ | 98 | u16 api_min_ver; /* api minor version */ |
98 | bool nvm_release_on_done; | 99 | bool nvm_release_on_done; |
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_common.c b/drivers/net/ethernet/intel/i40evf/i40e_common.c index 28c40c57d4f5..50b0ee54fc06 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_common.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_common.c | |||
@@ -94,16 +94,19 @@ void i40evf_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc, | |||
94 | 94 | ||
95 | i40e_debug(hw, mask, | 95 | i40e_debug(hw, mask, |
96 | "AQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n", | 96 | "AQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n", |
97 | aq_desc->opcode, aq_desc->flags, aq_desc->datalen, | 97 | le16_to_cpu(aq_desc->opcode), |
98 | aq_desc->retval); | 98 | le16_to_cpu(aq_desc->flags), |
99 | le16_to_cpu(aq_desc->datalen), | ||
100 | le16_to_cpu(aq_desc->retval)); | ||
99 | i40e_debug(hw, mask, "\tcookie (h,l) 0x%08X 0x%08X\n", | 101 | i40e_debug(hw, mask, "\tcookie (h,l) 0x%08X 0x%08X\n", |
100 | aq_desc->cookie_high, aq_desc->cookie_low); | 102 | le32_to_cpu(aq_desc->cookie_high), |
103 | le32_to_cpu(aq_desc->cookie_low)); | ||
101 | i40e_debug(hw, mask, "\tparam (0,1) 0x%08X 0x%08X\n", | 104 | i40e_debug(hw, mask, "\tparam (0,1) 0x%08X 0x%08X\n", |
102 | aq_desc->params.internal.param0, | 105 | le32_to_cpu(aq_desc->params.internal.param0), |
103 | aq_desc->params.internal.param1); | 106 | le32_to_cpu(aq_desc->params.internal.param1)); |
104 | i40e_debug(hw, mask, "\taddr (h,l) 0x%08X 0x%08X\n", | 107 | i40e_debug(hw, mask, "\taddr (h,l) 0x%08X 0x%08X\n", |
105 | aq_desc->params.external.addr_high, | 108 | le32_to_cpu(aq_desc->params.external.addr_high), |
106 | aq_desc->params.external.addr_low); | 109 | le32_to_cpu(aq_desc->params.external.addr_low)); |
107 | 110 | ||
108 | if ((buffer != NULL) && (aq_desc->datalen != 0)) { | 111 | if ((buffer != NULL) && (aq_desc->datalen != 0)) { |
109 | memset(data, 0, sizeof(data)); | 112 | memset(data, 0, sizeof(data)); |
@@ -116,15 +119,19 @@ void i40evf_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc, | |||
116 | if ((i % 16) == 15) { | 119 | if ((i % 16) == 15) { |
117 | i40e_debug(hw, mask, | 120 | i40e_debug(hw, mask, |
118 | "\t0x%04X %08X %08X %08X %08X\n", | 121 | "\t0x%04X %08X %08X %08X %08X\n", |
119 | i - 15, data[0], data[1], data[2], | 122 | i - 15, le32_to_cpu(data[0]), |
120 | data[3]); | 123 | le32_to_cpu(data[1]), |
124 | le32_to_cpu(data[2]), | ||
125 | le32_to_cpu(data[3])); | ||
121 | memset(data, 0, sizeof(data)); | 126 | memset(data, 0, sizeof(data)); |
122 | } | 127 | } |
123 | } | 128 | } |
124 | if ((i % 16) != 0) | 129 | if ((i % 16) != 0) |
125 | i40e_debug(hw, mask, "\t0x%04X %08X %08X %08X %08X\n", | 130 | i40e_debug(hw, mask, "\t0x%04X %08X %08X %08X %08X\n", |
126 | i - (i % 16), data[0], data[1], data[2], | 131 | i - (i % 16), le32_to_cpu(data[0]), |
127 | data[3]); | 132 | le32_to_cpu(data[1]), |
133 | le32_to_cpu(data[2]), | ||
134 | le32_to_cpu(data[3])); | ||
128 | } | 135 | } |
129 | } | 136 | } |
130 | 137 | ||
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_register.h b/drivers/net/ethernet/intel/i40evf/i40e_register.h index c1f6a59bfea0..3cc737629bf7 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_register.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_register.h | |||
@@ -310,6 +310,10 @@ | |||
310 | #define I40E_PRTDCB_RUP2TC_UP6TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP6TC_SHIFT) | 310 | #define I40E_PRTDCB_RUP2TC_UP6TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP6TC_SHIFT) |
311 | #define I40E_PRTDCB_RUP2TC_UP7TC_SHIFT 21 | 311 | #define I40E_PRTDCB_RUP2TC_UP7TC_SHIFT 21 |
312 | #define I40E_PRTDCB_RUP2TC_UP7TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP7TC_SHIFT) | 312 | #define I40E_PRTDCB_RUP2TC_UP7TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP7TC_SHIFT) |
313 | #define I40E_PRTDCB_RUPTQ(_i) (0x00122400 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */ | ||
314 | #define I40E_PRTDCB_RUPTQ_MAX_INDEX 7 | ||
315 | #define I40E_PRTDCB_RUPTQ_RXQNUM_SHIFT 0 | ||
316 | #define I40E_PRTDCB_RUPTQ_RXQNUM_MASK I40E_MASK(0x3FFF, I40E_PRTDCB_RUPTQ_RXQNUM_SHIFT) | ||
313 | #define I40E_PRTDCB_TC2PFC 0x001C0980 /* Reset: CORER */ | 317 | #define I40E_PRTDCB_TC2PFC 0x001C0980 /* Reset: CORER */ |
314 | #define I40E_PRTDCB_TC2PFC_TC2PFC_SHIFT 0 | 318 | #define I40E_PRTDCB_TC2PFC_TC2PFC_SHIFT 0 |
315 | #define I40E_PRTDCB_TC2PFC_TC2PFC_MASK I40E_MASK(0xFF, I40E_PRTDCB_TC2PFC_TC2PFC_SHIFT) | 319 | #define I40E_PRTDCB_TC2PFC_TC2PFC_MASK I40E_MASK(0xFF, I40E_PRTDCB_TC2PFC_TC2PFC_SHIFT) |
@@ -421,6 +425,8 @@ | |||
421 | #define I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_SHIFT) | 425 | #define I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_SHIFT) |
422 | #define I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_SHIFT 20 | 426 | #define I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_SHIFT 20 |
423 | #define I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_MASK I40E_MASK(0x3F, I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_SHIFT) | 427 | #define I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_MASK I40E_MASK(0x3F, I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_SHIFT) |
428 | #define I40E_GLGEN_GPIO_CTL_PRT_BIT_MAP_SHIFT 26 | ||
429 | #define I40E_GLGEN_GPIO_CTL_PRT_BIT_MAP_MASK I40E_MASK(0xF, I40E_GLGEN_GPIO_CTL_PRT_BIT_MAP_SHIFT) | ||
424 | #define I40E_GLGEN_GPIO_SET 0x00088184 /* Reset: POR */ | 430 | #define I40E_GLGEN_GPIO_SET 0x00088184 /* Reset: POR */ |
425 | #define I40E_GLGEN_GPIO_SET_GPIO_INDX_SHIFT 0 | 431 | #define I40E_GLGEN_GPIO_SET_GPIO_INDX_SHIFT 0 |
426 | #define I40E_GLGEN_GPIO_SET_GPIO_INDX_MASK I40E_MASK(0x1F, I40E_GLGEN_GPIO_SET_GPIO_INDX_SHIFT) | 432 | #define I40E_GLGEN_GPIO_SET_GPIO_INDX_MASK I40E_MASK(0x1F, I40E_GLGEN_GPIO_SET_GPIO_INDX_SHIFT) |
@@ -484,7 +490,9 @@ | |||
484 | #define I40E_GLGEN_MDIO_CTRL_CONTMDC_SHIFT 17 | 490 | #define I40E_GLGEN_MDIO_CTRL_CONTMDC_SHIFT 17 |
485 | #define I40E_GLGEN_MDIO_CTRL_CONTMDC_MASK I40E_MASK(0x1, I40E_GLGEN_MDIO_CTRL_CONTMDC_SHIFT) | 491 | #define I40E_GLGEN_MDIO_CTRL_CONTMDC_MASK I40E_MASK(0x1, I40E_GLGEN_MDIO_CTRL_CONTMDC_SHIFT) |
486 | #define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_SHIFT 18 | 492 | #define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_SHIFT 18 |
487 | #define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_MASK I40E_MASK(0x3FFF, I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_SHIFT) | 493 | #define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_MASK I40E_MASK(0x7FF, I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_SHIFT) |
494 | #define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD0_SHIFT 29 | ||
495 | #define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD0_MASK I40E_MASK(0x7, I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD0_SHIFT) | ||
488 | #define I40E_GLGEN_MDIO_I2C_SEL(_i) (0x000881C0 + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */ | 496 | #define I40E_GLGEN_MDIO_I2C_SEL(_i) (0x000881C0 + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */ |
489 | #define I40E_GLGEN_MDIO_I2C_SEL_MAX_INDEX 3 | 497 | #define I40E_GLGEN_MDIO_I2C_SEL_MAX_INDEX 3 |
490 | #define I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_SHIFT 0 | 498 | #define I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_SHIFT 0 |
@@ -548,9 +556,6 @@ | |||
548 | #define I40E_GLGEN_RSTCTL_GRSTDEL_MASK I40E_MASK(0x3F, I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT) | 556 | #define I40E_GLGEN_RSTCTL_GRSTDEL_MASK I40E_MASK(0x3F, I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT) |
549 | #define I40E_GLGEN_RSTCTL_ECC_RST_ENA_SHIFT 8 | 557 | #define I40E_GLGEN_RSTCTL_ECC_RST_ENA_SHIFT 8 |
550 | #define I40E_GLGEN_RSTCTL_ECC_RST_ENA_MASK I40E_MASK(0x1, I40E_GLGEN_RSTCTL_ECC_RST_ENA_SHIFT) | 558 | #define I40E_GLGEN_RSTCTL_ECC_RST_ENA_MASK I40E_MASK(0x1, I40E_GLGEN_RSTCTL_ECC_RST_ENA_SHIFT) |
551 | #define I40E_GLGEN_RSTENA_EMP 0x000B818C /* Reset: POR */ | ||
552 | #define I40E_GLGEN_RSTENA_EMP_EMP_RST_ENA_SHIFT 0 | ||
553 | #define I40E_GLGEN_RSTENA_EMP_EMP_RST_ENA_MASK I40E_MASK(0x1, I40E_GLGEN_RSTENA_EMP_EMP_RST_ENA_SHIFT) | ||
554 | #define I40E_GLGEN_RTRIG 0x000B8190 /* Reset: CORER */ | 559 | #define I40E_GLGEN_RTRIG 0x000B8190 /* Reset: CORER */ |
555 | #define I40E_GLGEN_RTRIG_CORER_SHIFT 0 | 560 | #define I40E_GLGEN_RTRIG_CORER_SHIFT 0 |
556 | #define I40E_GLGEN_RTRIG_CORER_MASK I40E_MASK(0x1, I40E_GLGEN_RTRIG_CORER_SHIFT) | 561 | #define I40E_GLGEN_RTRIG_CORER_MASK I40E_MASK(0x1, I40E_GLGEN_RTRIG_CORER_SHIFT) |
@@ -1066,7 +1071,7 @@ | |||
1066 | #define I40E_PFINT_RATEN_INTERVAL_MASK I40E_MASK(0x3F, I40E_PFINT_RATEN_INTERVAL_SHIFT) | 1071 | #define I40E_PFINT_RATEN_INTERVAL_MASK I40E_MASK(0x3F, I40E_PFINT_RATEN_INTERVAL_SHIFT) |
1067 | #define I40E_PFINT_RATEN_INTRL_ENA_SHIFT 6 | 1072 | #define I40E_PFINT_RATEN_INTRL_ENA_SHIFT 6 |
1068 | #define I40E_PFINT_RATEN_INTRL_ENA_MASK I40E_MASK(0x1, I40E_PFINT_RATEN_INTRL_ENA_SHIFT) | 1073 | #define I40E_PFINT_RATEN_INTRL_ENA_MASK I40E_MASK(0x1, I40E_PFINT_RATEN_INTRL_ENA_SHIFT) |
1069 | #define I40E_PFINT_STAT_CTL0 0x00038400 /* Reset: PFR */ | 1074 | #define I40E_PFINT_STAT_CTL0 0x00038400 /* Reset: CORER */ |
1070 | #define I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT 2 | 1075 | #define I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT 2 |
1071 | #define I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT) | 1076 | #define I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT) |
1072 | #define I40E_QINT_RQCTL(_Q) (0x0003A000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: CORER */ | 1077 | #define I40E_QINT_RQCTL(_Q) (0x0003A000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: CORER */ |
@@ -1171,7 +1176,7 @@ | |||
1171 | #define I40E_VFINT_ITRN_MAX_INDEX 2 | 1176 | #define I40E_VFINT_ITRN_MAX_INDEX 2 |
1172 | #define I40E_VFINT_ITRN_INTERVAL_SHIFT 0 | 1177 | #define I40E_VFINT_ITRN_INTERVAL_SHIFT 0 |
1173 | #define I40E_VFINT_ITRN_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_ITRN_INTERVAL_SHIFT) | 1178 | #define I40E_VFINT_ITRN_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_ITRN_INTERVAL_SHIFT) |
1174 | #define I40E_VFINT_STAT_CTL0(_VF) (0x0002A000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ | 1179 | #define I40E_VFINT_STAT_CTL0(_VF) (0x0002A000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */ |
1175 | #define I40E_VFINT_STAT_CTL0_MAX_INDEX 127 | 1180 | #define I40E_VFINT_STAT_CTL0_MAX_INDEX 127 |
1176 | #define I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT 2 | 1181 | #define I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT 2 |
1177 | #define I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT) | 1182 | #define I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT) |
@@ -1803,9 +1808,6 @@ | |||
1803 | #define I40E_GLPCI_GSCN_0_3_MAX_INDEX 3 | 1808 | #define I40E_GLPCI_GSCN_0_3_MAX_INDEX 3 |
1804 | #define I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_SHIFT 0 | 1809 | #define I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_SHIFT 0 |
1805 | #define I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_SHIFT) | 1810 | #define I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_SHIFT) |
1806 | #define I40E_GLPCI_LATCT 0x0009C4B4 /* Reset: PCIR */ | ||
1807 | #define I40E_GLPCI_LATCT_PCI_COUNT_LAT_CT_SHIFT 0 | ||
1808 | #define I40E_GLPCI_LATCT_PCI_COUNT_LAT_CT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_LATCT_PCI_COUNT_LAT_CT_SHIFT) | ||
1809 | #define I40E_GLPCI_LBARCTRL 0x000BE484 /* Reset: POR */ | 1811 | #define I40E_GLPCI_LBARCTRL 0x000BE484 /* Reset: POR */ |
1810 | #define I40E_GLPCI_LBARCTRL_PREFBAR_SHIFT 0 | 1812 | #define I40E_GLPCI_LBARCTRL_PREFBAR_SHIFT 0 |
1811 | #define I40E_GLPCI_LBARCTRL_PREFBAR_MASK I40E_MASK(0x1, I40E_GLPCI_LBARCTRL_PREFBAR_SHIFT) | 1813 | #define I40E_GLPCI_LBARCTRL_PREFBAR_MASK I40E_MASK(0x1, I40E_GLPCI_LBARCTRL_PREFBAR_SHIFT) |
@@ -1902,6 +1904,11 @@ | |||
1902 | #define I40E_GLPCI_VFSUP_VF_PREFETCH_MASK I40E_MASK(0x1, I40E_GLPCI_VFSUP_VF_PREFETCH_SHIFT) | 1904 | #define I40E_GLPCI_VFSUP_VF_PREFETCH_MASK I40E_MASK(0x1, I40E_GLPCI_VFSUP_VF_PREFETCH_SHIFT) |
1903 | #define I40E_GLPCI_VFSUP_VR_BAR_TYPE_SHIFT 1 | 1905 | #define I40E_GLPCI_VFSUP_VR_BAR_TYPE_SHIFT 1 |
1904 | #define I40E_GLPCI_VFSUP_VR_BAR_TYPE_MASK I40E_MASK(0x1, I40E_GLPCI_VFSUP_VR_BAR_TYPE_SHIFT) | 1906 | #define I40E_GLPCI_VFSUP_VR_BAR_TYPE_MASK I40E_MASK(0x1, I40E_GLPCI_VFSUP_VR_BAR_TYPE_SHIFT) |
1907 | #define I40E_GLTPH_CTRL 0x000BE480 /* Reset: PCIR */ | ||
1908 | #define I40E_GLTPH_CTRL_DESC_PH_SHIFT 9 | ||
1909 | #define I40E_GLTPH_CTRL_DESC_PH_MASK I40E_MASK(0x3, I40E_GLTPH_CTRL_DESC_PH_SHIFT) | ||
1910 | #define I40E_GLTPH_CTRL_DATA_PH_SHIFT 11 | ||
1911 | #define I40E_GLTPH_CTRL_DATA_PH_MASK I40E_MASK(0x3, I40E_GLTPH_CTRL_DATA_PH_SHIFT) | ||
1905 | #define I40E_PF_FUNC_RID 0x0009C000 /* Reset: PCIR */ | 1912 | #define I40E_PF_FUNC_RID 0x0009C000 /* Reset: PCIR */ |
1906 | #define I40E_PF_FUNC_RID_FUNCTION_NUMBER_SHIFT 0 | 1913 | #define I40E_PF_FUNC_RID_FUNCTION_NUMBER_SHIFT 0 |
1907 | #define I40E_PF_FUNC_RID_FUNCTION_NUMBER_MASK I40E_MASK(0x7, I40E_PF_FUNC_RID_FUNCTION_NUMBER_SHIFT) | 1914 | #define I40E_PF_FUNC_RID_FUNCTION_NUMBER_MASK I40E_MASK(0x7, I40E_PF_FUNC_RID_FUNCTION_NUMBER_SHIFT) |
@@ -2374,20 +2381,20 @@ | |||
2374 | #define I40E_GL_RXERR2_L_FCOEDIXAC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_RXERR2_L_FCOEDIXAC_SHIFT) | 2381 | #define I40E_GL_RXERR2_L_FCOEDIXAC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_RXERR2_L_FCOEDIXAC_SHIFT) |
2375 | #define I40E_GLPRT_BPRCH(_i) (0x003005E4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ | 2382 | #define I40E_GLPRT_BPRCH(_i) (0x003005E4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ |
2376 | #define I40E_GLPRT_BPRCH_MAX_INDEX 3 | 2383 | #define I40E_GLPRT_BPRCH_MAX_INDEX 3 |
2377 | #define I40E_GLPRT_BPRCH_UPRCH_SHIFT 0 | 2384 | #define I40E_GLPRT_BPRCH_BPRCH_SHIFT 0 |
2378 | #define I40E_GLPRT_BPRCH_UPRCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_BPRCH_UPRCH_SHIFT) | 2385 | #define I40E_GLPRT_BPRCH_BPRCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_BPRCH_BPRCH_SHIFT) |
2379 | #define I40E_GLPRT_BPRCL(_i) (0x003005E0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ | 2386 | #define I40E_GLPRT_BPRCL(_i) (0x003005E0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ |
2380 | #define I40E_GLPRT_BPRCL_MAX_INDEX 3 | 2387 | #define I40E_GLPRT_BPRCL_MAX_INDEX 3 |
2381 | #define I40E_GLPRT_BPRCL_UPRCH_SHIFT 0 | 2388 | #define I40E_GLPRT_BPRCL_BPRCL_SHIFT 0 |
2382 | #define I40E_GLPRT_BPRCL_UPRCH_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_BPRCL_UPRCH_SHIFT) | 2389 | #define I40E_GLPRT_BPRCL_BPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_BPRCL_BPRCL_SHIFT) |
2383 | #define I40E_GLPRT_BPTCH(_i) (0x00300A04 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ | 2390 | #define I40E_GLPRT_BPTCH(_i) (0x00300A04 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ |
2384 | #define I40E_GLPRT_BPTCH_MAX_INDEX 3 | 2391 | #define I40E_GLPRT_BPTCH_MAX_INDEX 3 |
2385 | #define I40E_GLPRT_BPTCH_UPRCH_SHIFT 0 | 2392 | #define I40E_GLPRT_BPTCH_BPTCH_SHIFT 0 |
2386 | #define I40E_GLPRT_BPTCH_UPRCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_BPTCH_UPRCH_SHIFT) | 2393 | #define I40E_GLPRT_BPTCH_BPTCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_BPTCH_BPTCH_SHIFT) |
2387 | #define I40E_GLPRT_BPTCL(_i) (0x00300A00 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ | 2394 | #define I40E_GLPRT_BPTCL(_i) (0x00300A00 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ |
2388 | #define I40E_GLPRT_BPTCL_MAX_INDEX 3 | 2395 | #define I40E_GLPRT_BPTCL_MAX_INDEX 3 |
2389 | #define I40E_GLPRT_BPTCL_UPRCH_SHIFT 0 | 2396 | #define I40E_GLPRT_BPTCL_BPTCL_SHIFT 0 |
2390 | #define I40E_GLPRT_BPTCL_UPRCH_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_BPTCL_UPRCH_SHIFT) | 2397 | #define I40E_GLPRT_BPTCL_BPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_BPTCL_BPTCL_SHIFT) |
2391 | #define I40E_GLPRT_CRCERRS(_i) (0x00300080 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ | 2398 | #define I40E_GLPRT_CRCERRS(_i) (0x00300080 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ |
2392 | #define I40E_GLPRT_CRCERRS_MAX_INDEX 3 | 2399 | #define I40E_GLPRT_CRCERRS_MAX_INDEX 3 |
2393 | #define I40E_GLPRT_CRCERRS_CRCERRS_SHIFT 0 | 2400 | #define I40E_GLPRT_CRCERRS_CRCERRS_SHIFT 0 |
@@ -2620,10 +2627,6 @@ | |||
2620 | #define I40E_GLPRT_TDOLD_MAX_INDEX 3 | 2627 | #define I40E_GLPRT_TDOLD_MAX_INDEX 3 |
2621 | #define I40E_GLPRT_TDOLD_GLPRT_TDOLD_SHIFT 0 | 2628 | #define I40E_GLPRT_TDOLD_GLPRT_TDOLD_SHIFT 0 |
2622 | #define I40E_GLPRT_TDOLD_GLPRT_TDOLD_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_TDOLD_GLPRT_TDOLD_SHIFT) | 2629 | #define I40E_GLPRT_TDOLD_GLPRT_TDOLD_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_TDOLD_GLPRT_TDOLD_SHIFT) |
2623 | #define I40E_GLPRT_TDPC(_i) (0x00375400 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ | ||
2624 | #define I40E_GLPRT_TDPC_MAX_INDEX 3 | ||
2625 | #define I40E_GLPRT_TDPC_TDPC_SHIFT 0 | ||
2626 | #define I40E_GLPRT_TDPC_TDPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_TDPC_TDPC_SHIFT) | ||
2627 | #define I40E_GLPRT_UPRCH(_i) (0x003005A4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ | 2630 | #define I40E_GLPRT_UPRCH(_i) (0x003005A4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ |
2628 | #define I40E_GLPRT_UPRCH_MAX_INDEX 3 | 2631 | #define I40E_GLPRT_UPRCH_MAX_INDEX 3 |
2629 | #define I40E_GLPRT_UPRCH_UPRCH_SHIFT 0 | 2632 | #define I40E_GLPRT_UPRCH_UPRCH_SHIFT 0 |
@@ -2990,9 +2993,6 @@ | |||
2990 | #define I40E_PRTTSYN_TXTIME_L 0x001E41C0 /* Reset: GLOBR */ | 2993 | #define I40E_PRTTSYN_TXTIME_L 0x001E41C0 /* Reset: GLOBR */ |
2991 | #define I40E_PRTTSYN_TXTIME_L_TXTIEM_L_SHIFT 0 | 2994 | #define I40E_PRTTSYN_TXTIME_L_TXTIEM_L_SHIFT 0 |
2992 | #define I40E_PRTTSYN_TXTIME_L_TXTIEM_L_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_TXTIME_L_TXTIEM_L_SHIFT) | 2995 | #define I40E_PRTTSYN_TXTIME_L_TXTIEM_L_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_TXTIME_L_TXTIEM_L_SHIFT) |
2993 | #define I40E_GLSCD_QUANTA 0x000B2080 /* Reset: CORER */ | ||
2994 | #define I40E_GLSCD_QUANTA_TSCDQUANTA_SHIFT 0 | ||
2995 | #define I40E_GLSCD_QUANTA_TSCDQUANTA_MASK I40E_MASK(0x7, I40E_GLSCD_QUANTA_TSCDQUANTA_SHIFT) | ||
2996 | #define I40E_GL_MDET_RX 0x0012A510 /* Reset: CORER */ | 2996 | #define I40E_GL_MDET_RX 0x0012A510 /* Reset: CORER */ |
2997 | #define I40E_GL_MDET_RX_FUNCTION_SHIFT 0 | 2997 | #define I40E_GL_MDET_RX_FUNCTION_SHIFT 0 |
2998 | #define I40E_GL_MDET_RX_FUNCTION_MASK I40E_MASK(0xFF, I40E_GL_MDET_RX_FUNCTION_SHIFT) | 2998 | #define I40E_GL_MDET_RX_FUNCTION_MASK I40E_MASK(0xFF, I40E_GL_MDET_RX_FUNCTION_SHIFT) |
@@ -3258,7 +3258,7 @@ | |||
3258 | #define I40E_VFINT_ITRN1_MAX_INDEX 2 | 3258 | #define I40E_VFINT_ITRN1_MAX_INDEX 2 |
3259 | #define I40E_VFINT_ITRN1_INTERVAL_SHIFT 0 | 3259 | #define I40E_VFINT_ITRN1_INTERVAL_SHIFT 0 |
3260 | #define I40E_VFINT_ITRN1_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_ITRN1_INTERVAL_SHIFT) | 3260 | #define I40E_VFINT_ITRN1_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_ITRN1_INTERVAL_SHIFT) |
3261 | #define I40E_VFINT_STAT_CTL01 0x00005400 /* Reset: VFR */ | 3261 | #define I40E_VFINT_STAT_CTL01 0x00005400 /* Reset: CORER */ |
3262 | #define I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_SHIFT 2 | 3262 | #define I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_SHIFT 2 |
3263 | #define I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_SHIFT) | 3263 | #define I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_SHIFT) |
3264 | #define I40E_QRX_TAIL1(_Q) (0x00002000 + ((_Q) * 4)) /* _i=0...15 */ /* Reset: CORER */ | 3264 | #define I40E_QRX_TAIL1(_Q) (0x00002000 + ((_Q) * 4)) /* _i=0...15 */ /* Reset: CORER */ |
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c index 708891571dae..fe13ad2def46 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c | |||
@@ -25,6 +25,7 @@ | |||
25 | ******************************************************************************/ | 25 | ******************************************************************************/ |
26 | 26 | ||
27 | #include <linux/prefetch.h> | 27 | #include <linux/prefetch.h> |
28 | #include <net/busy_poll.h> | ||
28 | 29 | ||
29 | #include "i40evf.h" | 30 | #include "i40evf.h" |
30 | #include "i40e_prototype.h" | 31 | #include "i40e_prototype.h" |
@@ -529,6 +530,22 @@ void i40evf_clean_rx_ring(struct i40e_ring *rx_ring) | |||
529 | if (!rx_ring->rx_bi) | 530 | if (!rx_ring->rx_bi) |
530 | return; | 531 | return; |
531 | 532 | ||
533 | if (ring_is_ps_enabled(rx_ring)) { | ||
534 | int bufsz = ALIGN(rx_ring->rx_hdr_len, 256) * rx_ring->count; | ||
535 | |||
536 | rx_bi = &rx_ring->rx_bi[0]; | ||
537 | if (rx_bi->hdr_buf) { | ||
538 | dma_free_coherent(dev, | ||
539 | bufsz, | ||
540 | rx_bi->hdr_buf, | ||
541 | rx_bi->dma); | ||
542 | for (i = 0; i < rx_ring->count; i++) { | ||
543 | rx_bi = &rx_ring->rx_bi[i]; | ||
544 | rx_bi->dma = 0; | ||
545 | rx_bi->hdr_buf = 0; | ||
546 | } | ||
547 | } | ||
548 | } | ||
532 | /* Free all the Rx ring sk_buffs */ | 549 | /* Free all the Rx ring sk_buffs */ |
533 | for (i = 0; i < rx_ring->count; i++) { | 550 | for (i = 0; i < rx_ring->count; i++) { |
534 | rx_bi = &rx_ring->rx_bi[i]; | 551 | rx_bi = &rx_ring->rx_bi[i]; |
@@ -587,6 +604,37 @@ void i40evf_free_rx_resources(struct i40e_ring *rx_ring) | |||
587 | } | 604 | } |
588 | 605 | ||
589 | /** | 606 | /** |
607 | * i40evf_alloc_rx_headers - allocate rx header buffers | ||
608 | * @rx_ring: ring to alloc buffers | ||
609 | * | ||
610 | * Allocate rx header buffers for the entire ring. As these are static, | ||
611 | * this is only called when setting up a new ring. | ||
612 | **/ | ||
613 | void i40evf_alloc_rx_headers(struct i40e_ring *rx_ring) | ||
614 | { | ||
615 | struct device *dev = rx_ring->dev; | ||
616 | struct i40e_rx_buffer *rx_bi; | ||
617 | dma_addr_t dma; | ||
618 | void *buffer; | ||
619 | int buf_size; | ||
620 | int i; | ||
621 | |||
622 | if (rx_ring->rx_bi[0].hdr_buf) | ||
623 | return; | ||
624 | /* Make sure the buffers don't cross cache line boundaries. */ | ||
625 | buf_size = ALIGN(rx_ring->rx_hdr_len, 256); | ||
626 | buffer = dma_alloc_coherent(dev, buf_size * rx_ring->count, | ||
627 | &dma, GFP_KERNEL); | ||
628 | if (!buffer) | ||
629 | return; | ||
630 | for (i = 0; i < rx_ring->count; i++) { | ||
631 | rx_bi = &rx_ring->rx_bi[i]; | ||
632 | rx_bi->dma = dma + (i * buf_size); | ||
633 | rx_bi->hdr_buf = buffer + (i * buf_size); | ||
634 | } | ||
635 | } | ||
636 | |||
637 | /** | ||
590 | * i40evf_setup_rx_descriptors - Allocate Rx descriptors | 638 | * i40evf_setup_rx_descriptors - Allocate Rx descriptors |
591 | * @rx_ring: Rx descriptor ring (for a specific queue) to setup | 639 | * @rx_ring: Rx descriptor ring (for a specific queue) to setup |
592 | * | 640 | * |
@@ -646,11 +694,76 @@ static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val) | |||
646 | } | 694 | } |
647 | 695 | ||
648 | /** | 696 | /** |
649 | * i40evf_alloc_rx_buffers - Replace used receive buffers; packet split | 697 | * i40evf_alloc_rx_buffers_ps - Replace used receive buffers; packet split |
698 | * @rx_ring: ring to place buffers on | ||
699 | * @cleaned_count: number of buffers to replace | ||
700 | **/ | ||
701 | void i40evf_alloc_rx_buffers_ps(struct i40e_ring *rx_ring, u16 cleaned_count) | ||
702 | { | ||
703 | u16 i = rx_ring->next_to_use; | ||
704 | union i40e_rx_desc *rx_desc; | ||
705 | struct i40e_rx_buffer *bi; | ||
706 | |||
707 | /* do nothing if no valid netdev defined */ | ||
708 | if (!rx_ring->netdev || !cleaned_count) | ||
709 | return; | ||
710 | |||
711 | while (cleaned_count--) { | ||
712 | rx_desc = I40E_RX_DESC(rx_ring, i); | ||
713 | bi = &rx_ring->rx_bi[i]; | ||
714 | |||
715 | if (bi->skb) /* desc is in use */ | ||
716 | goto no_buffers; | ||
717 | if (!bi->page) { | ||
718 | bi->page = alloc_page(GFP_ATOMIC); | ||
719 | if (!bi->page) { | ||
720 | rx_ring->rx_stats.alloc_page_failed++; | ||
721 | goto no_buffers; | ||
722 | } | ||
723 | } | ||
724 | |||
725 | if (!bi->page_dma) { | ||
726 | /* use a half page if we're re-using */ | ||
727 | bi->page_offset ^= PAGE_SIZE / 2; | ||
728 | bi->page_dma = dma_map_page(rx_ring->dev, | ||
729 | bi->page, | ||
730 | bi->page_offset, | ||
731 | PAGE_SIZE / 2, | ||
732 | DMA_FROM_DEVICE); | ||
733 | if (dma_mapping_error(rx_ring->dev, | ||
734 | bi->page_dma)) { | ||
735 | rx_ring->rx_stats.alloc_page_failed++; | ||
736 | bi->page_dma = 0; | ||
737 | goto no_buffers; | ||
738 | } | ||
739 | } | ||
740 | |||
741 | dma_sync_single_range_for_device(rx_ring->dev, | ||
742 | bi->dma, | ||
743 | 0, | ||
744 | rx_ring->rx_hdr_len, | ||
745 | DMA_FROM_DEVICE); | ||
746 | /* Refresh the desc even if buffer_addrs didn't change | ||
747 | * because each write-back erases this info. | ||
748 | */ | ||
749 | rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma); | ||
750 | rx_desc->read.hdr_addr = cpu_to_le64(bi->dma); | ||
751 | i++; | ||
752 | if (i == rx_ring->count) | ||
753 | i = 0; | ||
754 | } | ||
755 | |||
756 | no_buffers: | ||
757 | if (rx_ring->next_to_use != i) | ||
758 | i40e_release_rx_desc(rx_ring, i); | ||
759 | } | ||
760 | |||
761 | /** | ||
762 | * i40evf_alloc_rx_buffers_1buf - Replace used receive buffers; single buffer | ||
650 | * @rx_ring: ring to place buffers on | 763 | * @rx_ring: ring to place buffers on |
651 | * @cleaned_count: number of buffers to replace | 764 | * @cleaned_count: number of buffers to replace |
652 | **/ | 765 | **/ |
653 | void i40evf_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count) | 766 | void i40evf_alloc_rx_buffers_1buf(struct i40e_ring *rx_ring, u16 cleaned_count) |
654 | { | 767 | { |
655 | u16 i = rx_ring->next_to_use; | 768 | u16 i = rx_ring->next_to_use; |
656 | union i40e_rx_desc *rx_desc; | 769 | union i40e_rx_desc *rx_desc; |
@@ -690,40 +803,8 @@ void i40evf_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count) | |||
690 | } | 803 | } |
691 | } | 804 | } |
692 | 805 | ||
693 | if (ring_is_ps_enabled(rx_ring)) { | 806 | rx_desc->read.pkt_addr = cpu_to_le64(bi->dma); |
694 | if (!bi->page) { | 807 | rx_desc->read.hdr_addr = 0; |
695 | bi->page = alloc_page(GFP_ATOMIC); | ||
696 | if (!bi->page) { | ||
697 | rx_ring->rx_stats.alloc_page_failed++; | ||
698 | goto no_buffers; | ||
699 | } | ||
700 | } | ||
701 | |||
702 | if (!bi->page_dma) { | ||
703 | /* use a half page if we're re-using */ | ||
704 | bi->page_offset ^= PAGE_SIZE / 2; | ||
705 | bi->page_dma = dma_map_page(rx_ring->dev, | ||
706 | bi->page, | ||
707 | bi->page_offset, | ||
708 | PAGE_SIZE / 2, | ||
709 | DMA_FROM_DEVICE); | ||
710 | if (dma_mapping_error(rx_ring->dev, | ||
711 | bi->page_dma)) { | ||
712 | rx_ring->rx_stats.alloc_page_failed++; | ||
713 | bi->page_dma = 0; | ||
714 | goto no_buffers; | ||
715 | } | ||
716 | } | ||
717 | |||
718 | /* Refresh the desc even if buffer_addrs didn't change | ||
719 | * because each write-back erases this info. | ||
720 | */ | ||
721 | rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma); | ||
722 | rx_desc->read.hdr_addr = cpu_to_le64(bi->dma); | ||
723 | } else { | ||
724 | rx_desc->read.pkt_addr = cpu_to_le64(bi->dma); | ||
725 | rx_desc->read.hdr_addr = 0; | ||
726 | } | ||
727 | i++; | 808 | i++; |
728 | if (i == rx_ring->count) | 809 | if (i == rx_ring->count) |
729 | i = 0; | 810 | i = 0; |
@@ -777,10 +858,10 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi, | |||
777 | struct iphdr *iph; | 858 | struct iphdr *iph; |
778 | __sum16 csum; | 859 | __sum16 csum; |
779 | 860 | ||
780 | ipv4_tunnel = (rx_ptype > I40E_RX_PTYPE_GRENAT4_MAC_PAY3) && | 861 | ipv4_tunnel = (rx_ptype >= I40E_RX_PTYPE_GRENAT4_MAC_PAY3) && |
781 | (rx_ptype < I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4); | 862 | (rx_ptype <= I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4); |
782 | ipv6_tunnel = (rx_ptype > I40E_RX_PTYPE_GRENAT6_MAC_PAY3) && | 863 | ipv6_tunnel = (rx_ptype >= I40E_RX_PTYPE_GRENAT6_MAC_PAY3) && |
783 | (rx_ptype < I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4); | 864 | (rx_ptype <= I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4); |
784 | 865 | ||
785 | skb->ip_summed = CHECKSUM_NONE; | 866 | skb->ip_summed = CHECKSUM_NONE; |
786 | 867 | ||
@@ -906,13 +987,13 @@ static inline enum pkt_hash_types i40e_ptype_to_hash(u8 ptype) | |||
906 | } | 987 | } |
907 | 988 | ||
908 | /** | 989 | /** |
909 | * i40e_clean_rx_irq - Reclaim resources after receive completes | 990 | * i40e_clean_rx_irq_ps - Reclaim resources after receive; packet split |
910 | * @rx_ring: rx ring to clean | 991 | * @rx_ring: rx ring to clean |
911 | * @budget: how many cleans we're allowed | 992 | * @budget: how many cleans we're allowed |
912 | * | 993 | * |
913 | * Returns true if there's any budget left (e.g. the clean is finished) | 994 | * Returns true if there's any budget left (e.g. the clean is finished) |
914 | **/ | 995 | **/ |
915 | static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) | 996 | static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget) |
916 | { | 997 | { |
917 | unsigned int total_rx_bytes = 0, total_rx_packets = 0; | 998 | unsigned int total_rx_bytes = 0, total_rx_packets = 0; |
918 | u16 rx_packet_len, rx_header_len, rx_sph, rx_hbo; | 999 | u16 rx_packet_len, rx_header_len, rx_sph, rx_hbo; |
@@ -925,20 +1006,46 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) | |||
925 | u8 rx_ptype; | 1006 | u8 rx_ptype; |
926 | u64 qword; | 1007 | u64 qword; |
927 | 1008 | ||
928 | rx_desc = I40E_RX_DESC(rx_ring, i); | 1009 | do { |
929 | qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len); | ||
930 | rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >> | ||
931 | I40E_RXD_QW1_STATUS_SHIFT; | ||
932 | |||
933 | while (rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT)) { | ||
934 | union i40e_rx_desc *next_rxd; | ||
935 | struct i40e_rx_buffer *rx_bi; | 1010 | struct i40e_rx_buffer *rx_bi; |
936 | struct sk_buff *skb; | 1011 | struct sk_buff *skb; |
937 | u16 vlan_tag; | 1012 | u16 vlan_tag; |
1013 | /* return some buffers to hardware, one at a time is too slow */ | ||
1014 | if (cleaned_count >= I40E_RX_BUFFER_WRITE) { | ||
1015 | i40evf_alloc_rx_buffers_ps(rx_ring, cleaned_count); | ||
1016 | cleaned_count = 0; | ||
1017 | } | ||
1018 | |||
1019 | i = rx_ring->next_to_clean; | ||
1020 | rx_desc = I40E_RX_DESC(rx_ring, i); | ||
1021 | qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len); | ||
1022 | rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >> | ||
1023 | I40E_RXD_QW1_STATUS_SHIFT; | ||
1024 | |||
1025 | if (!(rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT))) | ||
1026 | break; | ||
1027 | |||
1028 | /* This memory barrier is needed to keep us from reading | ||
1029 | * any other fields out of the rx_desc until we know the | ||
1030 | * DD bit is set. | ||
1031 | */ | ||
1032 | rmb(); | ||
938 | rx_bi = &rx_ring->rx_bi[i]; | 1033 | rx_bi = &rx_ring->rx_bi[i]; |
939 | skb = rx_bi->skb; | 1034 | skb = rx_bi->skb; |
940 | prefetch(skb->data); | 1035 | if (likely(!skb)) { |
941 | 1036 | skb = netdev_alloc_skb_ip_align(rx_ring->netdev, | |
1037 | rx_ring->rx_hdr_len); | ||
1038 | if (!skb) | ||
1039 | rx_ring->rx_stats.alloc_buff_failed++; | ||
1040 | /* initialize queue mapping */ | ||
1041 | skb_record_rx_queue(skb, rx_ring->queue_index); | ||
1042 | /* we are reusing so sync this buffer for CPU use */ | ||
1043 | dma_sync_single_range_for_cpu(rx_ring->dev, | ||
1044 | rx_bi->dma, | ||
1045 | 0, | ||
1046 | rx_ring->rx_hdr_len, | ||
1047 | DMA_FROM_DEVICE); | ||
1048 | } | ||
942 | rx_packet_len = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >> | 1049 | rx_packet_len = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >> |
943 | I40E_RXD_QW1_LENGTH_PBUF_SHIFT; | 1050 | I40E_RXD_QW1_LENGTH_PBUF_SHIFT; |
944 | rx_header_len = (qword & I40E_RXD_QW1_LENGTH_HBUF_MASK) >> | 1051 | rx_header_len = (qword & I40E_RXD_QW1_LENGTH_HBUF_MASK) >> |
@@ -953,40 +1060,30 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) | |||
953 | 1060 | ||
954 | rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >> | 1061 | rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >> |
955 | I40E_RXD_QW1_PTYPE_SHIFT; | 1062 | I40E_RXD_QW1_PTYPE_SHIFT; |
1063 | prefetch(rx_bi->page); | ||
956 | rx_bi->skb = NULL; | 1064 | rx_bi->skb = NULL; |
957 | 1065 | cleaned_count++; | |
958 | /* This memory barrier is needed to keep us from reading | 1066 | if (rx_hbo || rx_sph) { |
959 | * any other fields out of the rx_desc until we know the | 1067 | int len; |
960 | * STATUS_DD bit is set | ||
961 | */ | ||
962 | rmb(); | ||
963 | |||
964 | /* Get the header and possibly the whole packet | ||
965 | * If this is an skb from previous receive dma will be 0 | ||
966 | */ | ||
967 | if (rx_bi->dma) { | ||
968 | u16 len; | ||
969 | |||
970 | if (rx_hbo) | 1068 | if (rx_hbo) |
971 | len = I40E_RX_HDR_SIZE; | 1069 | len = I40E_RX_HDR_SIZE; |
972 | else if (rx_sph) | ||
973 | len = rx_header_len; | ||
974 | else if (rx_packet_len) | ||
975 | len = rx_packet_len; /* 1buf/no split found */ | ||
976 | else | 1070 | else |
977 | len = rx_header_len; /* split always mode */ | 1071 | len = rx_header_len; |
978 | 1072 | memcpy(__skb_put(skb, len), rx_bi->hdr_buf, len); | |
979 | skb_put(skb, len); | 1073 | } else if (skb->len == 0) { |
980 | dma_unmap_single(rx_ring->dev, | 1074 | int len; |
981 | rx_bi->dma, | 1075 | |
982 | rx_ring->rx_buf_len, | 1076 | len = (rx_packet_len > skb_headlen(skb) ? |
983 | DMA_FROM_DEVICE); | 1077 | skb_headlen(skb) : rx_packet_len); |
984 | rx_bi->dma = 0; | 1078 | memcpy(__skb_put(skb, len), |
1079 | rx_bi->page + rx_bi->page_offset, | ||
1080 | len); | ||
1081 | rx_bi->page_offset += len; | ||
1082 | rx_packet_len -= len; | ||
985 | } | 1083 | } |
986 | 1084 | ||
987 | /* Get the rest of the data if this was a header split */ | 1085 | /* Get the rest of the data if this was a header split */ |
988 | if (ring_is_ps_enabled(rx_ring) && rx_packet_len) { | 1086 | if (rx_packet_len) { |
989 | |||
990 | skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, | 1087 | skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, |
991 | rx_bi->page, | 1088 | rx_bi->page, |
992 | rx_bi->page_offset, | 1089 | rx_bi->page_offset, |
@@ -1008,22 +1105,16 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) | |||
1008 | DMA_FROM_DEVICE); | 1105 | DMA_FROM_DEVICE); |
1009 | rx_bi->page_dma = 0; | 1106 | rx_bi->page_dma = 0; |
1010 | } | 1107 | } |
1011 | I40E_RX_NEXT_DESC_PREFETCH(rx_ring, i, next_rxd); | 1108 | I40E_RX_INCREMENT(rx_ring, i); |
1012 | 1109 | ||
1013 | if (unlikely( | 1110 | if (unlikely( |
1014 | !(rx_status & (1 << I40E_RX_DESC_STATUS_EOF_SHIFT)))) { | 1111 | !(rx_status & (1 << I40E_RX_DESC_STATUS_EOF_SHIFT)))) { |
1015 | struct i40e_rx_buffer *next_buffer; | 1112 | struct i40e_rx_buffer *next_buffer; |
1016 | 1113 | ||
1017 | next_buffer = &rx_ring->rx_bi[i]; | 1114 | next_buffer = &rx_ring->rx_bi[i]; |
1018 | 1115 | next_buffer->skb = skb; | |
1019 | if (ring_is_ps_enabled(rx_ring)) { | ||
1020 | rx_bi->skb = next_buffer->skb; | ||
1021 | rx_bi->dma = next_buffer->dma; | ||
1022 | next_buffer->skb = skb; | ||
1023 | next_buffer->dma = 0; | ||
1024 | } | ||
1025 | rx_ring->rx_stats.non_eop_descs++; | 1116 | rx_ring->rx_stats.non_eop_descs++; |
1026 | goto next_desc; | 1117 | continue; |
1027 | } | 1118 | } |
1028 | 1119 | ||
1029 | /* ERR_MASK will only have valid bits if EOP set */ | 1120 | /* ERR_MASK will only have valid bits if EOP set */ |
@@ -1032,7 +1123,7 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) | |||
1032 | /* TODO: shouldn't we increment a counter indicating the | 1123 | /* TODO: shouldn't we increment a counter indicating the |
1033 | * drop? | 1124 | * drop? |
1034 | */ | 1125 | */ |
1035 | goto next_desc; | 1126 | continue; |
1036 | } | 1127 | } |
1037 | 1128 | ||
1038 | skb_set_hash(skb, i40e_rx_hash(rx_ring, rx_desc), | 1129 | skb_set_hash(skb, i40e_rx_hash(rx_ring, rx_desc), |
@@ -1048,30 +1139,134 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) | |||
1048 | vlan_tag = rx_status & (1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT) | 1139 | vlan_tag = rx_status & (1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT) |
1049 | ? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1) | 1140 | ? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1) |
1050 | : 0; | 1141 | : 0; |
1142 | #ifdef I40E_FCOE | ||
1143 | if (!i40e_fcoe_handle_offload(rx_ring, rx_desc, skb)) { | ||
1144 | dev_kfree_skb_any(skb); | ||
1145 | continue; | ||
1146 | } | ||
1147 | #endif | ||
1148 | skb_mark_napi_id(skb, &rx_ring->q_vector->napi); | ||
1051 | i40e_receive_skb(rx_ring, skb, vlan_tag); | 1149 | i40e_receive_skb(rx_ring, skb, vlan_tag); |
1052 | 1150 | ||
1053 | rx_ring->netdev->last_rx = jiffies; | 1151 | rx_ring->netdev->last_rx = jiffies; |
1054 | budget--; | ||
1055 | next_desc: | ||
1056 | rx_desc->wb.qword1.status_error_len = 0; | 1152 | rx_desc->wb.qword1.status_error_len = 0; |
1057 | if (!budget) | ||
1058 | break; | ||
1059 | 1153 | ||
1060 | cleaned_count++; | 1154 | } while (likely(total_rx_packets < budget)); |
1155 | |||
1156 | u64_stats_update_begin(&rx_ring->syncp); | ||
1157 | rx_ring->stats.packets += total_rx_packets; | ||
1158 | rx_ring->stats.bytes += total_rx_bytes; | ||
1159 | u64_stats_update_end(&rx_ring->syncp); | ||
1160 | rx_ring->q_vector->rx.total_packets += total_rx_packets; | ||
1161 | rx_ring->q_vector->rx.total_bytes += total_rx_bytes; | ||
1162 | |||
1163 | return total_rx_packets; | ||
1164 | } | ||
1165 | |||
1166 | /** | ||
1167 | * i40e_clean_rx_irq_1buf - Reclaim resources after receive; single buffer | ||
1168 | * @rx_ring: rx ring to clean | ||
1169 | * @budget: how many cleans we're allowed | ||
1170 | * | ||
1171 | * Returns number of packets cleaned | ||
1172 | **/ | ||
1173 | static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget) | ||
1174 | { | ||
1175 | unsigned int total_rx_bytes = 0, total_rx_packets = 0; | ||
1176 | u16 cleaned_count = I40E_DESC_UNUSED(rx_ring); | ||
1177 | struct i40e_vsi *vsi = rx_ring->vsi; | ||
1178 | union i40e_rx_desc *rx_desc; | ||
1179 | u32 rx_error, rx_status; | ||
1180 | u16 rx_packet_len; | ||
1181 | u8 rx_ptype; | ||
1182 | u64 qword; | ||
1183 | u16 i; | ||
1184 | |||
1185 | do { | ||
1186 | struct i40e_rx_buffer *rx_bi; | ||
1187 | struct sk_buff *skb; | ||
1188 | u16 vlan_tag; | ||
1061 | /* return some buffers to hardware, one at a time is too slow */ | 1189 | /* return some buffers to hardware, one at a time is too slow */ |
1062 | if (cleaned_count >= I40E_RX_BUFFER_WRITE) { | 1190 | if (cleaned_count >= I40E_RX_BUFFER_WRITE) { |
1063 | i40evf_alloc_rx_buffers(rx_ring, cleaned_count); | 1191 | i40evf_alloc_rx_buffers_1buf(rx_ring, cleaned_count); |
1064 | cleaned_count = 0; | 1192 | cleaned_count = 0; |
1065 | } | 1193 | } |
1066 | 1194 | ||
1067 | /* use prefetched values */ | 1195 | i = rx_ring->next_to_clean; |
1068 | rx_desc = next_rxd; | 1196 | rx_desc = I40E_RX_DESC(rx_ring, i); |
1069 | qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len); | 1197 | qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len); |
1070 | rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >> | 1198 | rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >> |
1071 | I40E_RXD_QW1_STATUS_SHIFT; | 1199 | I40E_RXD_QW1_STATUS_SHIFT; |
1072 | } | 1200 | |
1201 | if (!(rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT))) | ||
1202 | break; | ||
1203 | |||
1204 | /* This memory barrier is needed to keep us from reading | ||
1205 | * any other fields out of the rx_desc until we know the | ||
1206 | * DD bit is set. | ||
1207 | */ | ||
1208 | rmb(); | ||
1209 | |||
1210 | rx_bi = &rx_ring->rx_bi[i]; | ||
1211 | skb = rx_bi->skb; | ||
1212 | prefetch(skb->data); | ||
1213 | |||
1214 | rx_packet_len = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >> | ||
1215 | I40E_RXD_QW1_LENGTH_PBUF_SHIFT; | ||
1216 | |||
1217 | rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >> | ||
1218 | I40E_RXD_QW1_ERROR_SHIFT; | ||
1219 | rx_error &= ~(1 << I40E_RX_DESC_ERROR_HBO_SHIFT); | ||
1220 | |||
1221 | rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >> | ||
1222 | I40E_RXD_QW1_PTYPE_SHIFT; | ||
1223 | rx_bi->skb = NULL; | ||
1224 | cleaned_count++; | ||
1225 | |||
1226 | /* Get the header and possibly the whole packet | ||
1227 | * If this is an skb from previous receive dma will be 0 | ||
1228 | */ | ||
1229 | skb_put(skb, rx_packet_len); | ||
1230 | dma_unmap_single(rx_ring->dev, rx_bi->dma, rx_ring->rx_buf_len, | ||
1231 | DMA_FROM_DEVICE); | ||
1232 | rx_bi->dma = 0; | ||
1233 | |||
1234 | I40E_RX_INCREMENT(rx_ring, i); | ||
1235 | |||
1236 | if (unlikely( | ||
1237 | !(rx_status & (1 << I40E_RX_DESC_STATUS_EOF_SHIFT)))) { | ||
1238 | rx_ring->rx_stats.non_eop_descs++; | ||
1239 | continue; | ||
1240 | } | ||
1241 | |||
1242 | /* ERR_MASK will only have valid bits if EOP set */ | ||
1243 | if (unlikely(rx_error & (1 << I40E_RX_DESC_ERROR_RXE_SHIFT))) { | ||
1244 | dev_kfree_skb_any(skb); | ||
1245 | /* TODO: shouldn't we increment a counter indicating the | ||
1246 | * drop? | ||
1247 | */ | ||
1248 | continue; | ||
1249 | } | ||
1250 | |||
1251 | skb_set_hash(skb, i40e_rx_hash(rx_ring, rx_desc), | ||
1252 | i40e_ptype_to_hash(rx_ptype)); | ||
1253 | /* probably a little skewed due to removing CRC */ | ||
1254 | total_rx_bytes += skb->len; | ||
1255 | total_rx_packets++; | ||
1256 | |||
1257 | skb->protocol = eth_type_trans(skb, rx_ring->netdev); | ||
1258 | |||
1259 | i40e_rx_checksum(vsi, skb, rx_status, rx_error, rx_ptype); | ||
1260 | |||
1261 | vlan_tag = rx_status & (1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT) | ||
1262 | ? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1) | ||
1263 | : 0; | ||
1264 | i40e_receive_skb(rx_ring, skb, vlan_tag); | ||
1265 | |||
1266 | rx_ring->netdev->last_rx = jiffies; | ||
1267 | rx_desc->wb.qword1.status_error_len = 0; | ||
1268 | } while (likely(total_rx_packets < budget)); | ||
1073 | 1269 | ||
1074 | rx_ring->next_to_clean = i; | ||
1075 | u64_stats_update_begin(&rx_ring->syncp); | 1270 | u64_stats_update_begin(&rx_ring->syncp); |
1076 | rx_ring->stats.packets += total_rx_packets; | 1271 | rx_ring->stats.packets += total_rx_packets; |
1077 | rx_ring->stats.bytes += total_rx_bytes; | 1272 | rx_ring->stats.bytes += total_rx_bytes; |
@@ -1079,10 +1274,7 @@ next_desc: | |||
1079 | rx_ring->q_vector->rx.total_packets += total_rx_packets; | 1274 | rx_ring->q_vector->rx.total_packets += total_rx_packets; |
1080 | rx_ring->q_vector->rx.total_bytes += total_rx_bytes; | 1275 | rx_ring->q_vector->rx.total_bytes += total_rx_bytes; |
1081 | 1276 | ||
1082 | if (cleaned_count) | 1277 | return total_rx_packets; |
1083 | i40evf_alloc_rx_buffers(rx_ring, cleaned_count); | ||
1084 | |||
1085 | return budget > 0; | ||
1086 | } | 1278 | } |
1087 | 1279 | ||
1088 | /** | 1280 | /** |
@@ -1103,6 +1295,7 @@ int i40evf_napi_poll(struct napi_struct *napi, int budget) | |||
1103 | bool clean_complete = true; | 1295 | bool clean_complete = true; |
1104 | bool arm_wb = false; | 1296 | bool arm_wb = false; |
1105 | int budget_per_ring; | 1297 | int budget_per_ring; |
1298 | int cleaned; | ||
1106 | 1299 | ||
1107 | if (test_bit(__I40E_DOWN, &vsi->state)) { | 1300 | if (test_bit(__I40E_DOWN, &vsi->state)) { |
1108 | napi_complete(napi); | 1301 | napi_complete(napi); |
@@ -1122,8 +1315,14 @@ int i40evf_napi_poll(struct napi_struct *napi, int budget) | |||
1122 | */ | 1315 | */ |
1123 | budget_per_ring = max(budget/q_vector->num_ringpairs, 1); | 1316 | budget_per_ring = max(budget/q_vector->num_ringpairs, 1); |
1124 | 1317 | ||
1125 | i40e_for_each_ring(ring, q_vector->rx) | 1318 | i40e_for_each_ring(ring, q_vector->rx) { |
1126 | clean_complete &= i40e_clean_rx_irq(ring, budget_per_ring); | 1319 | if (ring_is_ps_enabled(ring)) |
1320 | cleaned = i40e_clean_rx_irq_ps(ring, budget_per_ring); | ||
1321 | else | ||
1322 | cleaned = i40e_clean_rx_irq_1buf(ring, budget_per_ring); | ||
1323 | /* if we didn't clean as many as budgeted, we must be done */ | ||
1324 | clean_complete &= (budget_per_ring != cleaned); | ||
1325 | } | ||
1127 | 1326 | ||
1128 | /* If work not completed, return budget and polling will return */ | 1327 | /* If work not completed, return budget and polling will return */ |
1129 | if (!clean_complete) { | 1328 | if (!clean_complete) { |
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h index c950a038237c..1e49bb1fbac1 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h | |||
@@ -96,6 +96,14 @@ enum i40e_dyn_idx_t { | |||
96 | 96 | ||
97 | /* How many Rx Buffers do we bundle into one write to the hardware ? */ | 97 | /* How many Rx Buffers do we bundle into one write to the hardware ? */ |
98 | #define I40E_RX_BUFFER_WRITE 16 /* Must be power of 2 */ | 98 | #define I40E_RX_BUFFER_WRITE 16 /* Must be power of 2 */ |
99 | #define I40E_RX_INCREMENT(r, i) \ | ||
100 | do { \ | ||
101 | (i)++; \ | ||
102 | if ((i) == (r)->count) \ | ||
103 | i = 0; \ | ||
104 | r->next_to_clean = i; \ | ||
105 | } while (0) | ||
106 | |||
99 | #define I40E_RX_NEXT_DESC(r, i, n) \ | 107 | #define I40E_RX_NEXT_DESC(r, i, n) \ |
100 | do { \ | 108 | do { \ |
101 | (i)++; \ | 109 | (i)++; \ |
@@ -151,6 +159,7 @@ struct i40e_tx_buffer { | |||
151 | 159 | ||
152 | struct i40e_rx_buffer { | 160 | struct i40e_rx_buffer { |
153 | struct sk_buff *skb; | 161 | struct sk_buff *skb; |
162 | void *hdr_buf; | ||
154 | dma_addr_t dma; | 163 | dma_addr_t dma; |
155 | struct page *page; | 164 | struct page *page; |
156 | dma_addr_t page_dma; | 165 | dma_addr_t page_dma; |
@@ -223,8 +232,8 @@ struct i40e_ring { | |||
223 | u16 rx_buf_len; | 232 | u16 rx_buf_len; |
224 | u8 dtype; | 233 | u8 dtype; |
225 | #define I40E_RX_DTYPE_NO_SPLIT 0 | 234 | #define I40E_RX_DTYPE_NO_SPLIT 0 |
226 | #define I40E_RX_DTYPE_SPLIT_ALWAYS 1 | 235 | #define I40E_RX_DTYPE_HEADER_SPLIT 1 |
227 | #define I40E_RX_DTYPE_HEADER_SPLIT 2 | 236 | #define I40E_RX_DTYPE_SPLIT_ALWAYS 2 |
228 | u8 hsplit; | 237 | u8 hsplit; |
229 | #define I40E_RX_SPLIT_L2 0x1 | 238 | #define I40E_RX_SPLIT_L2 0x1 |
230 | #define I40E_RX_SPLIT_IP 0x2 | 239 | #define I40E_RX_SPLIT_IP 0x2 |
@@ -278,7 +287,9 @@ struct i40e_ring_container { | |||
278 | #define i40e_for_each_ring(pos, head) \ | 287 | #define i40e_for_each_ring(pos, head) \ |
279 | for (pos = (head).ring; pos != NULL; pos = pos->next) | 288 | for (pos = (head).ring; pos != NULL; pos = pos->next) |
280 | 289 | ||
281 | void i40evf_alloc_rx_buffers(struct i40e_ring *rxr, u16 cleaned_count); | 290 | void i40evf_alloc_rx_buffers_ps(struct i40e_ring *rxr, u16 cleaned_count); |
291 | void i40evf_alloc_rx_buffers_1buf(struct i40e_ring *rxr, u16 cleaned_count); | ||
292 | void i40evf_alloc_rx_headers(struct i40e_ring *rxr); | ||
282 | netdev_tx_t i40evf_xmit_frame(struct sk_buff *skb, struct net_device *netdev); | 293 | netdev_tx_t i40evf_xmit_frame(struct sk_buff *skb, struct net_device *netdev); |
283 | void i40evf_clean_tx_ring(struct i40e_ring *tx_ring); | 294 | void i40evf_clean_tx_ring(struct i40e_ring *tx_ring); |
284 | void i40evf_clean_rx_ring(struct i40e_ring *rx_ring); | 295 | void i40evf_clean_rx_ring(struct i40e_ring *rx_ring); |
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_type.h b/drivers/net/ethernet/intel/i40evf/i40e_type.h index 3d0fdaab5cc8..a2693865594a 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_type.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_type.h | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************************* | 1 | /******************************************************************************* |
2 | * | 2 | * |
3 | * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver | 3 | * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver |
4 | * Copyright(c) 2013 - 2014 Intel Corporation. | 4 | * Copyright(c) 2013 - 2015 Intel Corporation. |
5 | * | 5 | * |
6 | * This program is free software; you can redistribute it and/or modify it | 6 | * This program is free software; you can redistribute it and/or modify it |
7 | * under the terms and conditions of the GNU General Public License, | 7 | * under the terms and conditions of the GNU General Public License, |
@@ -175,12 +175,12 @@ struct i40e_link_status { | |||
175 | u8 an_info; | 175 | u8 an_info; |
176 | u8 ext_info; | 176 | u8 ext_info; |
177 | u8 loopback; | 177 | u8 loopback; |
178 | bool an_enabled; | ||
179 | /* is Link Status Event notification to SW enabled */ | 178 | /* is Link Status Event notification to SW enabled */ |
180 | bool lse_enable; | 179 | bool lse_enable; |
181 | u16 max_frame_size; | 180 | u16 max_frame_size; |
182 | bool crc_enable; | 181 | bool crc_enable; |
183 | u8 pacing; | 182 | u8 pacing; |
183 | u8 requested_speeds; | ||
184 | }; | 184 | }; |
185 | 185 | ||
186 | struct i40e_phy_info { | 186 | struct i40e_phy_info { |
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h b/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h index e0c8208138f4..59f62f0e65dd 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h | |||
@@ -59,31 +59,29 @@ | |||
59 | * of the virtchnl_msg structure. | 59 | * of the virtchnl_msg structure. |
60 | */ | 60 | */ |
61 | enum i40e_virtchnl_ops { | 61 | enum i40e_virtchnl_ops { |
62 | /* VF sends req. to pf for the following | 62 | /* The PF sends status change events to VFs using |
63 | * ops. | 63 | * the I40E_VIRTCHNL_OP_EVENT opcode. |
64 | * VFs send requests to the PF using the other ops. | ||
64 | */ | 65 | */ |
65 | I40E_VIRTCHNL_OP_UNKNOWN = 0, | 66 | I40E_VIRTCHNL_OP_UNKNOWN = 0, |
66 | I40E_VIRTCHNL_OP_VERSION = 1, /* must ALWAYS be 1 */ | 67 | I40E_VIRTCHNL_OP_VERSION = 1, /* must ALWAYS be 1 */ |
67 | I40E_VIRTCHNL_OP_RESET_VF, | 68 | I40E_VIRTCHNL_OP_RESET_VF = 2, |
68 | I40E_VIRTCHNL_OP_GET_VF_RESOURCES, | 69 | I40E_VIRTCHNL_OP_GET_VF_RESOURCES = 3, |
69 | I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE, | 70 | I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE = 4, |
70 | I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE, | 71 | I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE = 5, |
71 | I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES, | 72 | I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES = 6, |
72 | I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP, | 73 | I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP = 7, |
73 | I40E_VIRTCHNL_OP_ENABLE_QUEUES, | 74 | I40E_VIRTCHNL_OP_ENABLE_QUEUES = 8, |
74 | I40E_VIRTCHNL_OP_DISABLE_QUEUES, | 75 | I40E_VIRTCHNL_OP_DISABLE_QUEUES = 9, |
75 | I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS, | 76 | I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS = 10, |
76 | I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS, | 77 | I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS = 11, |
77 | I40E_VIRTCHNL_OP_ADD_VLAN, | 78 | I40E_VIRTCHNL_OP_ADD_VLAN = 12, |
78 | I40E_VIRTCHNL_OP_DEL_VLAN, | 79 | I40E_VIRTCHNL_OP_DEL_VLAN = 13, |
79 | I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, | 80 | I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE = 14, |
80 | I40E_VIRTCHNL_OP_GET_STATS, | 81 | I40E_VIRTCHNL_OP_GET_STATS = 15, |
81 | I40E_VIRTCHNL_OP_FCOE, | 82 | I40E_VIRTCHNL_OP_FCOE = 16, |
82 | I40E_VIRTCHNL_OP_CONFIG_RSS, | 83 | I40E_VIRTCHNL_OP_EVENT = 17, |
83 | /* PF sends status change events to vfs using | 84 | I40E_VIRTCHNL_OP_CONFIG_RSS = 18, |
84 | * the following op. | ||
85 | */ | ||
86 | I40E_VIRTCHNL_OP_EVENT, | ||
87 | }; | 85 | }; |
88 | 86 | ||
89 | /* Virtual channel message descriptor. This overlays the admin queue | 87 | /* Virtual channel message descriptor. This overlays the admin queue |
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c index 69b97bac182c..681a5d4b4f6a 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************************* | 1 | /******************************************************************************* |
2 | * | 2 | * |
3 | * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver | 3 | * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver |
4 | * Copyright(c) 2013 - 2014 Intel Corporation. | 4 | * Copyright(c) 2013 - 2015 Intel Corporation. |
5 | * | 5 | * |
6 | * This program is free software; you can redistribute it and/or modify it | 6 | * This program is free software; you can redistribute it and/or modify it |
7 | * under the terms and conditions of the GNU General Public License, | 7 | * under the terms and conditions of the GNU General Public License, |
@@ -180,7 +180,7 @@ static u32 i40evf_get_msglevel(struct net_device *netdev) | |||
180 | } | 180 | } |
181 | 181 | ||
182 | /** | 182 | /** |
183 | * i40evf_get_msglevel - Set debug message level | 183 | * i40evf_set_msglevel - Set debug message level |
184 | * @netdev: network interface device structure | 184 | * @netdev: network interface device structure |
185 | * @data: message level | 185 | * @data: message level |
186 | * | 186 | * |
@@ -191,6 +191,8 @@ static void i40evf_set_msglevel(struct net_device *netdev, u32 data) | |||
191 | { | 191 | { |
192 | struct i40evf_adapter *adapter = netdev_priv(netdev); | 192 | struct i40evf_adapter *adapter = netdev_priv(netdev); |
193 | 193 | ||
194 | if (I40E_DEBUG_USER & data) | ||
195 | adapter->hw.debug_mask = data; | ||
194 | adapter->msg_enable = data; | 196 | adapter->msg_enable = data; |
195 | } | 197 | } |
196 | 198 | ||
@@ -640,12 +642,14 @@ static int i40evf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, | |||
640 | if (!indir) | 642 | if (!indir) |
641 | return 0; | 643 | return 0; |
642 | 644 | ||
643 | for (i = 0, j = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++) { | 645 | if (indir) { |
644 | hlut_val = rd32(hw, I40E_VFQF_HLUT(i)); | 646 | for (i = 0, j = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++) { |
645 | indir[j++] = hlut_val & 0xff; | 647 | hlut_val = rd32(hw, I40E_VFQF_HLUT(i)); |
646 | indir[j++] = (hlut_val >> 8) & 0xff; | 648 | indir[j++] = hlut_val & 0xff; |
647 | indir[j++] = (hlut_val >> 16) & 0xff; | 649 | indir[j++] = (hlut_val >> 8) & 0xff; |
648 | indir[j++] = (hlut_val >> 24) & 0xff; | 650 | indir[j++] = (hlut_val >> 16) & 0xff; |
651 | indir[j++] = (hlut_val >> 24) & 0xff; | ||
652 | } | ||
649 | } | 653 | } |
650 | return 0; | 654 | return 0; |
651 | } | 655 | } |
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c index 8d8c201c63c1..a95135846ea9 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************************* | 1 | /******************************************************************************* |
2 | * | 2 | * |
3 | * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver | 3 | * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver |
4 | * Copyright(c) 2013 - 2014 Intel Corporation. | 4 | * Copyright(c) 2013 - 2015 Intel Corporation. |
5 | * | 5 | * |
6 | * This program is free software; you can redistribute it and/or modify it | 6 | * This program is free software; you can redistribute it and/or modify it |
7 | * under the terms and conditions of the GNU General Public License, | 7 | * under the terms and conditions of the GNU General Public License, |
@@ -36,7 +36,7 @@ char i40evf_driver_name[] = "i40evf"; | |||
36 | static const char i40evf_driver_string[] = | 36 | static const char i40evf_driver_string[] = |
37 | "Intel(R) XL710/X710 Virtual Function Network Driver"; | 37 | "Intel(R) XL710/X710 Virtual Function Network Driver"; |
38 | 38 | ||
39 | #define DRV_VERSION "1.2.0" | 39 | #define DRV_VERSION "1.2.4" |
40 | const char i40evf_driver_version[] = DRV_VERSION; | 40 | const char i40evf_driver_version[] = DRV_VERSION; |
41 | static const char i40evf_copyright[] = | 41 | static const char i40evf_copyright[] = |
42 | "Copyright (c) 2013 - 2014 Intel Corporation."; | 42 | "Copyright (c) 2013 - 2014 Intel Corporation."; |
@@ -524,7 +524,8 @@ static int i40evf_request_misc_irq(struct i40evf_adapter *adapter) | |||
524 | int err; | 524 | int err; |
525 | 525 | ||
526 | snprintf(adapter->misc_vector_name, | 526 | snprintf(adapter->misc_vector_name, |
527 | sizeof(adapter->misc_vector_name) - 1, "i40evf:mbx"); | 527 | sizeof(adapter->misc_vector_name) - 1, "i40evf-%s:mbx", |
528 | dev_name(&adapter->pdev->dev)); | ||
528 | err = request_irq(adapter->msix_entries[0].vector, | 529 | err = request_irq(adapter->msix_entries[0].vector, |
529 | &i40evf_msix_aq, 0, | 530 | &i40evf_msix_aq, 0, |
530 | adapter->misc_vector_name, netdev); | 531 | adapter->misc_vector_name, netdev); |
@@ -761,13 +762,17 @@ i40evf_mac_filter *i40evf_add_filter(struct i40evf_adapter *adapter, | |||
761 | u8 *macaddr) | 762 | u8 *macaddr) |
762 | { | 763 | { |
763 | struct i40evf_mac_filter *f; | 764 | struct i40evf_mac_filter *f; |
765 | int count = 50; | ||
764 | 766 | ||
765 | if (!macaddr) | 767 | if (!macaddr) |
766 | return NULL; | 768 | return NULL; |
767 | 769 | ||
768 | while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK, | 770 | while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK, |
769 | &adapter->crit_section)) | 771 | &adapter->crit_section)) { |
770 | udelay(1); | 772 | udelay(1); |
773 | if (--count == 0) | ||
774 | return NULL; | ||
775 | } | ||
771 | 776 | ||
772 | f = i40evf_find_filter(adapter, macaddr); | 777 | f = i40evf_find_filter(adapter, macaddr); |
773 | if (!f) { | 778 | if (!f) { |
@@ -828,6 +833,7 @@ static void i40evf_set_rx_mode(struct net_device *netdev) | |||
828 | struct i40evf_mac_filter *f, *ftmp; | 833 | struct i40evf_mac_filter *f, *ftmp; |
829 | struct netdev_hw_addr *uca; | 834 | struct netdev_hw_addr *uca; |
830 | struct netdev_hw_addr *mca; | 835 | struct netdev_hw_addr *mca; |
836 | int count = 50; | ||
831 | 837 | ||
832 | /* add addr if not already in the filter list */ | 838 | /* add addr if not already in the filter list */ |
833 | netdev_for_each_uc_addr(uca, netdev) { | 839 | netdev_for_each_uc_addr(uca, netdev) { |
@@ -838,8 +844,14 @@ static void i40evf_set_rx_mode(struct net_device *netdev) | |||
838 | } | 844 | } |
839 | 845 | ||
840 | while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK, | 846 | while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK, |
841 | &adapter->crit_section)) | 847 | &adapter->crit_section)) { |
842 | udelay(1); | 848 | udelay(1); |
849 | if (--count == 0) { | ||
850 | dev_err(&adapter->pdev->dev, | ||
851 | "Failed to get lock in %s\n", __func__); | ||
852 | return; | ||
853 | } | ||
854 | } | ||
843 | /* remove filter if not in netdev list */ | 855 | /* remove filter if not in netdev list */ |
844 | list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) { | 856 | list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) { |
845 | bool found = false; | 857 | bool found = false; |
@@ -920,7 +932,7 @@ static void i40evf_configure(struct i40evf_adapter *adapter) | |||
920 | for (i = 0; i < adapter->num_active_queues; i++) { | 932 | for (i = 0; i < adapter->num_active_queues; i++) { |
921 | struct i40e_ring *ring = adapter->rx_rings[i]; | 933 | struct i40e_ring *ring = adapter->rx_rings[i]; |
922 | 934 | ||
923 | i40evf_alloc_rx_buffers(ring, ring->count); | 935 | i40evf_alloc_rx_buffers_1buf(ring, ring->count); |
924 | ring->next_to_use = ring->count - 1; | 936 | ring->next_to_use = ring->count - 1; |
925 | writel(ring->next_to_use, ring->tail); | 937 | writel(ring->next_to_use, ring->tail); |
926 | } | 938 | } |
@@ -959,6 +971,7 @@ void i40evf_down(struct i40evf_adapter *adapter) | |||
959 | usleep_range(500, 1000); | 971 | usleep_range(500, 1000); |
960 | 972 | ||
961 | i40evf_irq_disable(adapter); | 973 | i40evf_irq_disable(adapter); |
974 | i40evf_napi_disable_all(adapter); | ||
962 | 975 | ||
963 | /* remove all MAC filters */ | 976 | /* remove all MAC filters */ |
964 | list_for_each_entry(f, &adapter->mac_filter_list, list) { | 977 | list_for_each_entry(f, &adapter->mac_filter_list, list) { |
@@ -985,8 +998,6 @@ void i40evf_down(struct i40evf_adapter *adapter) | |||
985 | 998 | ||
986 | netif_tx_stop_all_queues(netdev); | 999 | netif_tx_stop_all_queues(netdev); |
987 | 1000 | ||
988 | i40evf_napi_disable_all(adapter); | ||
989 | |||
990 | msleep(20); | 1001 | msleep(20); |
991 | 1002 | ||
992 | netif_carrier_off(netdev); | 1003 | netif_carrier_off(netdev); |
@@ -1481,9 +1492,11 @@ static void i40evf_reset_task(struct work_struct *work) | |||
1481 | struct i40evf_adapter *adapter = container_of(work, | 1492 | struct i40evf_adapter *adapter = container_of(work, |
1482 | struct i40evf_adapter, | 1493 | struct i40evf_adapter, |
1483 | reset_task); | 1494 | reset_task); |
1495 | struct net_device *netdev = adapter->netdev; | ||
1484 | struct i40e_hw *hw = &adapter->hw; | 1496 | struct i40e_hw *hw = &adapter->hw; |
1485 | int i = 0, err; | 1497 | struct i40evf_mac_filter *f; |
1486 | uint32_t rstat_val; | 1498 | uint32_t rstat_val; |
1499 | int i = 0, err; | ||
1487 | 1500 | ||
1488 | while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK, | 1501 | while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK, |
1489 | &adapter->crit_section)) | 1502 | &adapter->crit_section)) |
@@ -1528,7 +1541,11 @@ static void i40evf_reset_task(struct work_struct *work) | |||
1528 | 1541 | ||
1529 | if (netif_running(adapter->netdev)) { | 1542 | if (netif_running(adapter->netdev)) { |
1530 | set_bit(__I40E_DOWN, &adapter->vsi.state); | 1543 | set_bit(__I40E_DOWN, &adapter->vsi.state); |
1531 | i40evf_down(adapter); | 1544 | i40evf_irq_disable(adapter); |
1545 | i40evf_napi_disable_all(adapter); | ||
1546 | netif_tx_disable(netdev); | ||
1547 | netif_tx_stop_all_queues(netdev); | ||
1548 | netif_carrier_off(netdev); | ||
1532 | i40evf_free_traffic_irqs(adapter); | 1549 | i40evf_free_traffic_irqs(adapter); |
1533 | i40evf_free_all_tx_resources(adapter); | 1550 | i40evf_free_all_tx_resources(adapter); |
1534 | i40evf_free_all_rx_resources(adapter); | 1551 | i40evf_free_all_rx_resources(adapter); |
@@ -1560,22 +1577,37 @@ static void i40evf_reset_task(struct work_struct *work) | |||
1560 | continue_reset: | 1577 | continue_reset: |
1561 | adapter->flags &= ~I40EVF_FLAG_RESET_PENDING; | 1578 | adapter->flags &= ~I40EVF_FLAG_RESET_PENDING; |
1562 | 1579 | ||
1563 | i40evf_down(adapter); | 1580 | i40evf_irq_disable(adapter); |
1581 | i40evf_napi_disable_all(adapter); | ||
1582 | |||
1583 | netif_tx_disable(netdev); | ||
1584 | |||
1585 | netif_tx_stop_all_queues(netdev); | ||
1586 | |||
1587 | netif_carrier_off(netdev); | ||
1564 | adapter->state = __I40EVF_RESETTING; | 1588 | adapter->state = __I40EVF_RESETTING; |
1565 | 1589 | ||
1566 | /* kill and reinit the admin queue */ | 1590 | /* kill and reinit the admin queue */ |
1567 | if (i40evf_shutdown_adminq(hw)) | 1591 | if (i40evf_shutdown_adminq(hw)) |
1568 | dev_warn(&adapter->pdev->dev, | 1592 | dev_warn(&adapter->pdev->dev, "Failed to shut down adminq\n"); |
1569 | "%s: Failed to destroy the Admin Queue resources\n", | 1593 | adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN; |
1570 | __func__); | ||
1571 | err = i40evf_init_adminq(hw); | 1594 | err = i40evf_init_adminq(hw); |
1572 | if (err) | 1595 | if (err) |
1573 | dev_info(&adapter->pdev->dev, "%s: init_adminq failed: %d\n", | 1596 | dev_info(&adapter->pdev->dev, "Failed to init adminq: %d\n", |
1574 | __func__, err); | 1597 | err); |
1575 | 1598 | ||
1576 | adapter->aq_pending = 0; | ||
1577 | adapter->aq_required = 0; | ||
1578 | i40evf_map_queues(adapter); | 1599 | i40evf_map_queues(adapter); |
1600 | |||
1601 | /* re-add all MAC filters */ | ||
1602 | list_for_each_entry(f, &adapter->mac_filter_list, list) { | ||
1603 | f->add = true; | ||
1604 | } | ||
1605 | /* re-add all VLAN filters */ | ||
1606 | list_for_each_entry(f, &adapter->vlan_filter_list, list) { | ||
1607 | f->add = true; | ||
1608 | } | ||
1609 | adapter->aq_required = I40EVF_FLAG_AQ_ADD_MAC_FILTER; | ||
1610 | adapter->aq_required |= I40EVF_FLAG_AQ_ADD_VLAN_FILTER; | ||
1579 | clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section); | 1611 | clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section); |
1580 | 1612 | ||
1581 | mod_timer(&adapter->watchdog_timer, jiffies + 2); | 1613 | mod_timer(&adapter->watchdog_timer, jiffies + 2); |
diff --git a/drivers/net/ethernet/intel/igbvf/defines.h b/drivers/net/ethernet/intel/igbvf/defines.h index d9fa999b1685..ae3f28332fa0 100644 --- a/drivers/net/ethernet/intel/igbvf/defines.h +++ b/drivers/net/ethernet/intel/igbvf/defines.h | |||
@@ -13,8 +13,7 @@ | |||
13 | more details. | 13 | more details. |
14 | 14 | ||
15 | You should have received a copy of the GNU General Public License along with | 15 | You should have received a copy of the GNU General Public License along with |
16 | this program; if not, write to the Free Software Foundation, Inc., | 16 | this program; if not, see <http://www.gnu.org/licenses/>. |
17 | 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | ||
18 | 17 | ||
19 | The full GNU General Public License is included in this distribution in | 18 | The full GNU General Public License is included in this distribution in |
20 | the file called "COPYING". | 19 | the file called "COPYING". |
@@ -29,94 +28,93 @@ | |||
29 | #define _E1000_DEFINES_H_ | 28 | #define _E1000_DEFINES_H_ |
30 | 29 | ||
31 | /* Number of Transmit and Receive Descriptors must be a multiple of 8 */ | 30 | /* Number of Transmit and Receive Descriptors must be a multiple of 8 */ |
32 | #define REQ_TX_DESCRIPTOR_MULTIPLE 8 | 31 | #define REQ_TX_DESCRIPTOR_MULTIPLE 8 |
33 | #define REQ_RX_DESCRIPTOR_MULTIPLE 8 | 32 | #define REQ_RX_DESCRIPTOR_MULTIPLE 8 |
34 | 33 | ||
35 | /* IVAR valid bit */ | 34 | /* IVAR valid bit */ |
36 | #define E1000_IVAR_VALID 0x80 | 35 | #define E1000_IVAR_VALID 0x80 |
37 | 36 | ||
38 | /* Receive Descriptor bit definitions */ | 37 | /* Receive Descriptor bit definitions */ |
39 | #define E1000_RXD_STAT_DD 0x01 /* Descriptor Done */ | 38 | #define E1000_RXD_STAT_DD 0x01 /* Descriptor Done */ |
40 | #define E1000_RXD_STAT_EOP 0x02 /* End of Packet */ | 39 | #define E1000_RXD_STAT_EOP 0x02 /* End of Packet */ |
41 | #define E1000_RXD_STAT_IXSM 0x04 /* Ignore checksum */ | 40 | #define E1000_RXD_STAT_IXSM 0x04 /* Ignore checksum */ |
42 | #define E1000_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */ | 41 | #define E1000_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */ |
43 | #define E1000_RXD_STAT_UDPCS 0x10 /* UDP xsum calculated */ | 42 | #define E1000_RXD_STAT_UDPCS 0x10 /* UDP xsum calculated */ |
44 | #define E1000_RXD_STAT_TCPCS 0x20 /* TCP xsum calculated */ | 43 | #define E1000_RXD_STAT_TCPCS 0x20 /* TCP xsum calculated */ |
45 | #define E1000_RXD_STAT_IPCS 0x40 /* IP xsum calculated */ | 44 | #define E1000_RXD_STAT_IPCS 0x40 /* IP xsum calculated */ |
46 | #define E1000_RXD_ERR_SE 0x02 /* Symbol Error */ | 45 | #define E1000_RXD_ERR_SE 0x02 /* Symbol Error */ |
47 | #define E1000_RXD_SPC_VLAN_MASK 0x0FFF /* VLAN ID is in lower 12 bits */ | 46 | #define E1000_RXD_SPC_VLAN_MASK 0x0FFF /* VLAN ID is in lower 12 bits */ |
48 | 47 | ||
49 | #define E1000_RXDEXT_STATERR_LB 0x00040000 | 48 | #define E1000_RXDEXT_STATERR_LB 0x00040000 |
50 | #define E1000_RXDEXT_STATERR_CE 0x01000000 | 49 | #define E1000_RXDEXT_STATERR_CE 0x01000000 |
51 | #define E1000_RXDEXT_STATERR_SE 0x02000000 | 50 | #define E1000_RXDEXT_STATERR_SE 0x02000000 |
52 | #define E1000_RXDEXT_STATERR_SEQ 0x04000000 | 51 | #define E1000_RXDEXT_STATERR_SEQ 0x04000000 |
53 | #define E1000_RXDEXT_STATERR_CXE 0x10000000 | 52 | #define E1000_RXDEXT_STATERR_CXE 0x10000000 |
54 | #define E1000_RXDEXT_STATERR_TCPE 0x20000000 | 53 | #define E1000_RXDEXT_STATERR_TCPE 0x20000000 |
55 | #define E1000_RXDEXT_STATERR_IPE 0x40000000 | 54 | #define E1000_RXDEXT_STATERR_IPE 0x40000000 |
56 | #define E1000_RXDEXT_STATERR_RXE 0x80000000 | 55 | #define E1000_RXDEXT_STATERR_RXE 0x80000000 |
57 | |||
58 | 56 | ||
59 | /* Same mask, but for extended and packet split descriptors */ | 57 | /* Same mask, but for extended and packet split descriptors */ |
60 | #define E1000_RXDEXT_ERR_FRAME_ERR_MASK ( \ | 58 | #define E1000_RXDEXT_ERR_FRAME_ERR_MASK ( \ |
61 | E1000_RXDEXT_STATERR_CE | \ | 59 | E1000_RXDEXT_STATERR_CE | \ |
62 | E1000_RXDEXT_STATERR_SE | \ | 60 | E1000_RXDEXT_STATERR_SE | \ |
63 | E1000_RXDEXT_STATERR_SEQ | \ | 61 | E1000_RXDEXT_STATERR_SEQ | \ |
64 | E1000_RXDEXT_STATERR_CXE | \ | 62 | E1000_RXDEXT_STATERR_CXE | \ |
65 | E1000_RXDEXT_STATERR_RXE) | 63 | E1000_RXDEXT_STATERR_RXE) |
66 | 64 | ||
67 | /* Device Control */ | 65 | /* Device Control */ |
68 | #define E1000_CTRL_RST 0x04000000 /* Global reset */ | 66 | #define E1000_CTRL_RST 0x04000000 /* Global reset */ |
69 | 67 | ||
70 | /* Device Status */ | 68 | /* Device Status */ |
71 | #define E1000_STATUS_FD 0x00000001 /* Full duplex.0=half,1=full */ | 69 | #define E1000_STATUS_FD 0x00000001 /* Full duplex.0=half,1=full */ |
72 | #define E1000_STATUS_LU 0x00000002 /* Link up.0=no,1=link */ | 70 | #define E1000_STATUS_LU 0x00000002 /* Link up.0=no,1=link */ |
73 | #define E1000_STATUS_TXOFF 0x00000010 /* transmission paused */ | 71 | #define E1000_STATUS_TXOFF 0x00000010 /* transmission paused */ |
74 | #define E1000_STATUS_SPEED_10 0x00000000 /* Speed 10Mb/s */ | 72 | #define E1000_STATUS_SPEED_10 0x00000000 /* Speed 10Mb/s */ |
75 | #define E1000_STATUS_SPEED_100 0x00000040 /* Speed 100Mb/s */ | 73 | #define E1000_STATUS_SPEED_100 0x00000040 /* Speed 100Mb/s */ |
76 | #define E1000_STATUS_SPEED_1000 0x00000080 /* Speed 1000Mb/s */ | 74 | #define E1000_STATUS_SPEED_1000 0x00000080 /* Speed 1000Mb/s */ |
77 | 75 | ||
78 | #define SPEED_10 10 | 76 | #define SPEED_10 10 |
79 | #define SPEED_100 100 | 77 | #define SPEED_100 100 |
80 | #define SPEED_1000 1000 | 78 | #define SPEED_1000 1000 |
81 | #define HALF_DUPLEX 1 | 79 | #define HALF_DUPLEX 1 |
82 | #define FULL_DUPLEX 2 | 80 | #define FULL_DUPLEX 2 |
83 | 81 | ||
84 | /* Transmit Descriptor bit definitions */ | 82 | /* Transmit Descriptor bit definitions */ |
85 | #define E1000_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */ | 83 | #define E1000_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */ |
86 | #define E1000_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */ | 84 | #define E1000_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */ |
87 | #define E1000_TXD_CMD_DEXT 0x20000000 /* Descriptor extension (0 = legacy) */ | 85 | #define E1000_TXD_CMD_DEXT 0x20000000 /* Desc extension (0 = legacy) */ |
88 | #define E1000_TXD_STAT_DD 0x00000001 /* Descriptor Done */ | 86 | #define E1000_TXD_STAT_DD 0x00000001 /* Desc Done */ |
89 | 87 | ||
90 | #define MAX_JUMBO_FRAME_SIZE 0x3F00 | 88 | #define MAX_JUMBO_FRAME_SIZE 0x3F00 |
91 | 89 | ||
92 | /* 802.1q VLAN Packet Size */ | 90 | /* 802.1q VLAN Packet Size */ |
93 | #define VLAN_TAG_SIZE 4 /* 802.3ac tag (not DMA'd) */ | 91 | #define VLAN_TAG_SIZE 4 /* 802.3ac tag (not DMA'd) */ |
94 | 92 | ||
95 | /* Error Codes */ | 93 | /* Error Codes */ |
96 | #define E1000_SUCCESS 0 | 94 | #define E1000_SUCCESS 0 |
97 | #define E1000_ERR_CONFIG 3 | 95 | #define E1000_ERR_CONFIG 3 |
98 | #define E1000_ERR_MAC_INIT 5 | 96 | #define E1000_ERR_MAC_INIT 5 |
99 | #define E1000_ERR_MBX 15 | 97 | #define E1000_ERR_MBX 15 |
100 | 98 | ||
101 | /* SRRCTL bit definitions */ | 99 | /* SRRCTL bit definitions */ |
102 | #define E1000_SRRCTL_BSIZEPKT_SHIFT 10 /* Shift _right_ */ | 100 | #define E1000_SRRCTL_BSIZEPKT_SHIFT 10 /* Shift _right_ */ |
103 | #define E1000_SRRCTL_BSIZEHDRSIZE_MASK 0x00000F00 | 101 | #define E1000_SRRCTL_BSIZEHDRSIZE_MASK 0x00000F00 |
104 | #define E1000_SRRCTL_BSIZEHDRSIZE_SHIFT 2 /* Shift _left_ */ | 102 | #define E1000_SRRCTL_BSIZEHDRSIZE_SHIFT 2 /* Shift _left_ */ |
105 | #define E1000_SRRCTL_DESCTYPE_ADV_ONEBUF 0x02000000 | 103 | #define E1000_SRRCTL_DESCTYPE_ADV_ONEBUF 0x02000000 |
106 | #define E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS 0x0A000000 | 104 | #define E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS 0x0A000000 |
107 | #define E1000_SRRCTL_DESCTYPE_MASK 0x0E000000 | 105 | #define E1000_SRRCTL_DESCTYPE_MASK 0x0E000000 |
108 | #define E1000_SRRCTL_DROP_EN 0x80000000 | 106 | #define E1000_SRRCTL_DROP_EN 0x80000000 |
109 | 107 | ||
110 | #define E1000_SRRCTL_BSIZEPKT_MASK 0x0000007F | 108 | #define E1000_SRRCTL_BSIZEPKT_MASK 0x0000007F |
111 | #define E1000_SRRCTL_BSIZEHDR_MASK 0x00003F00 | 109 | #define E1000_SRRCTL_BSIZEHDR_MASK 0x00003F00 |
112 | 110 | ||
113 | /* Additional Descriptor Control definitions */ | 111 | /* Additional Descriptor Control definitions */ |
114 | #define E1000_TXDCTL_QUEUE_ENABLE 0x02000000 /* Enable specific Tx Queue */ | 112 | #define E1000_TXDCTL_QUEUE_ENABLE 0x02000000 /* Enable specific Tx Que */ |
115 | #define E1000_RXDCTL_QUEUE_ENABLE 0x02000000 /* Enable specific Rx Queue */ | 113 | #define E1000_RXDCTL_QUEUE_ENABLE 0x02000000 /* Enable specific Rx Que */ |
116 | 114 | ||
117 | /* Direct Cache Access (DCA) definitions */ | 115 | /* Direct Cache Access (DCA) definitions */ |
118 | #define E1000_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* Tx Desc writeback RO bit */ | 116 | #define E1000_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* Tx Desc writeback RO bit */ |
119 | 117 | ||
120 | #define E1000_VF_INIT_TIMEOUT 200 /* Number of retries to clear RSTI */ | 118 | #define E1000_VF_INIT_TIMEOUT 200 /* Number of retries to clear RSTI */ |
121 | 119 | ||
122 | #endif /* _E1000_DEFINES_H_ */ | 120 | #endif /* _E1000_DEFINES_H_ */ |
diff --git a/drivers/net/ethernet/intel/igbvf/ethtool.c b/drivers/net/ethernet/intel/igbvf/ethtool.c index 2178f87e9f61..c6996feb1cb4 100644 --- a/drivers/net/ethernet/intel/igbvf/ethtool.c +++ b/drivers/net/ethernet/intel/igbvf/ethtool.c | |||
@@ -13,8 +13,7 @@ | |||
13 | more details. | 13 | more details. |
14 | 14 | ||
15 | You should have received a copy of the GNU General Public License along with | 15 | You should have received a copy of the GNU General Public License along with |
16 | this program; if not, write to the Free Software Foundation, Inc., | 16 | this program; if not, see <http://www.gnu.org/licenses/>. |
17 | 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | ||
18 | 17 | ||
19 | The full GNU General Public License is included in this distribution in | 18 | The full GNU General Public License is included in this distribution in |
20 | the file called "COPYING". | 19 | the file called "COPYING". |
@@ -36,7 +35,6 @@ | |||
36 | #include "igbvf.h" | 35 | #include "igbvf.h" |
37 | #include <linux/if_vlan.h> | 36 | #include <linux/if_vlan.h> |
38 | 37 | ||
39 | |||
40 | struct igbvf_stats { | 38 | struct igbvf_stats { |
41 | char stat_string[ETH_GSTRING_LEN]; | 39 | char stat_string[ETH_GSTRING_LEN]; |
42 | int sizeof_stat; | 40 | int sizeof_stat; |
@@ -74,7 +72,7 @@ static const char igbvf_gstrings_test[][ETH_GSTRING_LEN] = { | |||
74 | #define IGBVF_TEST_LEN ARRAY_SIZE(igbvf_gstrings_test) | 72 | #define IGBVF_TEST_LEN ARRAY_SIZE(igbvf_gstrings_test) |
75 | 73 | ||
76 | static int igbvf_get_settings(struct net_device *netdev, | 74 | static int igbvf_get_settings(struct net_device *netdev, |
77 | struct ethtool_cmd *ecmd) | 75 | struct ethtool_cmd *ecmd) |
78 | { | 76 | { |
79 | struct igbvf_adapter *adapter = netdev_priv(netdev); | 77 | struct igbvf_adapter *adapter = netdev_priv(netdev); |
80 | struct e1000_hw *hw = &adapter->hw; | 78 | struct e1000_hw *hw = &adapter->hw; |
@@ -111,18 +109,18 @@ static int igbvf_get_settings(struct net_device *netdev, | |||
111 | } | 109 | } |
112 | 110 | ||
113 | static int igbvf_set_settings(struct net_device *netdev, | 111 | static int igbvf_set_settings(struct net_device *netdev, |
114 | struct ethtool_cmd *ecmd) | 112 | struct ethtool_cmd *ecmd) |
115 | { | 113 | { |
116 | return -EOPNOTSUPP; | 114 | return -EOPNOTSUPP; |
117 | } | 115 | } |
118 | 116 | ||
119 | static void igbvf_get_pauseparam(struct net_device *netdev, | 117 | static void igbvf_get_pauseparam(struct net_device *netdev, |
120 | struct ethtool_pauseparam *pause) | 118 | struct ethtool_pauseparam *pause) |
121 | { | 119 | { |
122 | } | 120 | } |
123 | 121 | ||
124 | static int igbvf_set_pauseparam(struct net_device *netdev, | 122 | static int igbvf_set_pauseparam(struct net_device *netdev, |
125 | struct ethtool_pauseparam *pause) | 123 | struct ethtool_pauseparam *pause) |
126 | { | 124 | { |
127 | return -EOPNOTSUPP; | 125 | return -EOPNOTSUPP; |
128 | } | 126 | } |
@@ -130,12 +128,14 @@ static int igbvf_set_pauseparam(struct net_device *netdev, | |||
130 | static u32 igbvf_get_msglevel(struct net_device *netdev) | 128 | static u32 igbvf_get_msglevel(struct net_device *netdev) |
131 | { | 129 | { |
132 | struct igbvf_adapter *adapter = netdev_priv(netdev); | 130 | struct igbvf_adapter *adapter = netdev_priv(netdev); |
131 | |||
133 | return adapter->msg_enable; | 132 | return adapter->msg_enable; |
134 | } | 133 | } |
135 | 134 | ||
136 | static void igbvf_set_msglevel(struct net_device *netdev, u32 data) | 135 | static void igbvf_set_msglevel(struct net_device *netdev, u32 data) |
137 | { | 136 | { |
138 | struct igbvf_adapter *adapter = netdev_priv(netdev); | 137 | struct igbvf_adapter *adapter = netdev_priv(netdev); |
138 | |||
139 | adapter->msg_enable = data; | 139 | adapter->msg_enable = data; |
140 | } | 140 | } |
141 | 141 | ||
@@ -146,7 +146,7 @@ static int igbvf_get_regs_len(struct net_device *netdev) | |||
146 | } | 146 | } |
147 | 147 | ||
148 | static void igbvf_get_regs(struct net_device *netdev, | 148 | static void igbvf_get_regs(struct net_device *netdev, |
149 | struct ethtool_regs *regs, void *p) | 149 | struct ethtool_regs *regs, void *p) |
150 | { | 150 | { |
151 | struct igbvf_adapter *adapter = netdev_priv(netdev); | 151 | struct igbvf_adapter *adapter = netdev_priv(netdev); |
152 | struct e1000_hw *hw = &adapter->hw; | 152 | struct e1000_hw *hw = &adapter->hw; |
@@ -175,19 +175,19 @@ static int igbvf_get_eeprom_len(struct net_device *netdev) | |||
175 | } | 175 | } |
176 | 176 | ||
177 | static int igbvf_get_eeprom(struct net_device *netdev, | 177 | static int igbvf_get_eeprom(struct net_device *netdev, |
178 | struct ethtool_eeprom *eeprom, u8 *bytes) | 178 | struct ethtool_eeprom *eeprom, u8 *bytes) |
179 | { | 179 | { |
180 | return -EOPNOTSUPP; | 180 | return -EOPNOTSUPP; |
181 | } | 181 | } |
182 | 182 | ||
183 | static int igbvf_set_eeprom(struct net_device *netdev, | 183 | static int igbvf_set_eeprom(struct net_device *netdev, |
184 | struct ethtool_eeprom *eeprom, u8 *bytes) | 184 | struct ethtool_eeprom *eeprom, u8 *bytes) |
185 | { | 185 | { |
186 | return -EOPNOTSUPP; | 186 | return -EOPNOTSUPP; |
187 | } | 187 | } |
188 | 188 | ||
189 | static void igbvf_get_drvinfo(struct net_device *netdev, | 189 | static void igbvf_get_drvinfo(struct net_device *netdev, |
190 | struct ethtool_drvinfo *drvinfo) | 190 | struct ethtool_drvinfo *drvinfo) |
191 | { | 191 | { |
192 | struct igbvf_adapter *adapter = netdev_priv(netdev); | 192 | struct igbvf_adapter *adapter = netdev_priv(netdev); |
193 | 193 | ||
@@ -201,7 +201,7 @@ static void igbvf_get_drvinfo(struct net_device *netdev, | |||
201 | } | 201 | } |
202 | 202 | ||
203 | static void igbvf_get_ringparam(struct net_device *netdev, | 203 | static void igbvf_get_ringparam(struct net_device *netdev, |
204 | struct ethtool_ringparam *ring) | 204 | struct ethtool_ringparam *ring) |
205 | { | 205 | { |
206 | struct igbvf_adapter *adapter = netdev_priv(netdev); | 206 | struct igbvf_adapter *adapter = netdev_priv(netdev); |
207 | struct igbvf_ring *tx_ring = adapter->tx_ring; | 207 | struct igbvf_ring *tx_ring = adapter->tx_ring; |
@@ -214,7 +214,7 @@ static void igbvf_get_ringparam(struct net_device *netdev, | |||
214 | } | 214 | } |
215 | 215 | ||
216 | static int igbvf_set_ringparam(struct net_device *netdev, | 216 | static int igbvf_set_ringparam(struct net_device *netdev, |
217 | struct ethtool_ringparam *ring) | 217 | struct ethtool_ringparam *ring) |
218 | { | 218 | { |
219 | struct igbvf_adapter *adapter = netdev_priv(netdev); | 219 | struct igbvf_adapter *adapter = netdev_priv(netdev); |
220 | struct igbvf_ring *temp_ring; | 220 | struct igbvf_ring *temp_ring; |
@@ -224,12 +224,12 @@ static int igbvf_set_ringparam(struct net_device *netdev, | |||
224 | if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) | 224 | if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) |
225 | return -EINVAL; | 225 | return -EINVAL; |
226 | 226 | ||
227 | new_rx_count = max(ring->rx_pending, (u32)IGBVF_MIN_RXD); | 227 | new_rx_count = max_t(u32, ring->rx_pending, IGBVF_MIN_RXD); |
228 | new_rx_count = min(new_rx_count, (u32)IGBVF_MAX_RXD); | 228 | new_rx_count = min_t(u32, new_rx_count, IGBVF_MAX_RXD); |
229 | new_rx_count = ALIGN(new_rx_count, REQ_RX_DESCRIPTOR_MULTIPLE); | 229 | new_rx_count = ALIGN(new_rx_count, REQ_RX_DESCRIPTOR_MULTIPLE); |
230 | 230 | ||
231 | new_tx_count = max(ring->tx_pending, (u32)IGBVF_MIN_TXD); | 231 | new_tx_count = max_t(u32, ring->tx_pending, IGBVF_MIN_TXD); |
232 | new_tx_count = min(new_tx_count, (u32)IGBVF_MAX_TXD); | 232 | new_tx_count = min_t(u32, new_tx_count, IGBVF_MAX_TXD); |
233 | new_tx_count = ALIGN(new_tx_count, REQ_TX_DESCRIPTOR_MULTIPLE); | 233 | new_tx_count = ALIGN(new_tx_count, REQ_TX_DESCRIPTOR_MULTIPLE); |
234 | 234 | ||
235 | if ((new_tx_count == adapter->tx_ring->count) && | 235 | if ((new_tx_count == adapter->tx_ring->count) && |
@@ -239,7 +239,7 @@ static int igbvf_set_ringparam(struct net_device *netdev, | |||
239 | } | 239 | } |
240 | 240 | ||
241 | while (test_and_set_bit(__IGBVF_RESETTING, &adapter->state)) | 241 | while (test_and_set_bit(__IGBVF_RESETTING, &adapter->state)) |
242 | msleep(1); | 242 | usleep_range(1000, 2000); |
243 | 243 | ||
244 | if (!netif_running(adapter->netdev)) { | 244 | if (!netif_running(adapter->netdev)) { |
245 | adapter->tx_ring->count = new_tx_count; | 245 | adapter->tx_ring->count = new_tx_count; |
@@ -255,10 +255,9 @@ static int igbvf_set_ringparam(struct net_device *netdev, | |||
255 | 255 | ||
256 | igbvf_down(adapter); | 256 | igbvf_down(adapter); |
257 | 257 | ||
258 | /* | 258 | /* We can't just free everything and then setup again, |
259 | * We can't just free everything and then setup again, | ||
260 | * because the ISRs in MSI-X mode get passed pointers | 259 | * because the ISRs in MSI-X mode get passed pointers |
261 | * to the tx and rx ring structs. | 260 | * to the Tx and Rx ring structs. |
262 | */ | 261 | */ |
263 | if (new_tx_count != adapter->tx_ring->count) { | 262 | if (new_tx_count != adapter->tx_ring->count) { |
264 | memcpy(temp_ring, adapter->tx_ring, sizeof(struct igbvf_ring)); | 263 | memcpy(temp_ring, adapter->tx_ring, sizeof(struct igbvf_ring)); |
@@ -283,7 +282,7 @@ static int igbvf_set_ringparam(struct net_device *netdev, | |||
283 | 282 | ||
284 | igbvf_free_rx_resources(adapter->rx_ring); | 283 | igbvf_free_rx_resources(adapter->rx_ring); |
285 | 284 | ||
286 | memcpy(adapter->rx_ring, temp_ring,sizeof(struct igbvf_ring)); | 285 | memcpy(adapter->rx_ring, temp_ring, sizeof(struct igbvf_ring)); |
287 | } | 286 | } |
288 | err_setup: | 287 | err_setup: |
289 | igbvf_up(adapter); | 288 | igbvf_up(adapter); |
@@ -307,14 +306,13 @@ static int igbvf_link_test(struct igbvf_adapter *adapter, u64 *data) | |||
307 | } | 306 | } |
308 | 307 | ||
309 | static void igbvf_diag_test(struct net_device *netdev, | 308 | static void igbvf_diag_test(struct net_device *netdev, |
310 | struct ethtool_test *eth_test, u64 *data) | 309 | struct ethtool_test *eth_test, u64 *data) |
311 | { | 310 | { |
312 | struct igbvf_adapter *adapter = netdev_priv(netdev); | 311 | struct igbvf_adapter *adapter = netdev_priv(netdev); |
313 | 312 | ||
314 | set_bit(__IGBVF_TESTING, &adapter->state); | 313 | set_bit(__IGBVF_TESTING, &adapter->state); |
315 | 314 | ||
316 | /* | 315 | /* Link test performed before hardware reset so autoneg doesn't |
317 | * Link test performed before hardware reset so autoneg doesn't | ||
318 | * interfere with test result | 316 | * interfere with test result |
319 | */ | 317 | */ |
320 | if (igbvf_link_test(adapter, &data[0])) | 318 | if (igbvf_link_test(adapter, &data[0])) |
@@ -325,20 +323,20 @@ static void igbvf_diag_test(struct net_device *netdev, | |||
325 | } | 323 | } |
326 | 324 | ||
327 | static void igbvf_get_wol(struct net_device *netdev, | 325 | static void igbvf_get_wol(struct net_device *netdev, |
328 | struct ethtool_wolinfo *wol) | 326 | struct ethtool_wolinfo *wol) |
329 | { | 327 | { |
330 | wol->supported = 0; | 328 | wol->supported = 0; |
331 | wol->wolopts = 0; | 329 | wol->wolopts = 0; |
332 | } | 330 | } |
333 | 331 | ||
334 | static int igbvf_set_wol(struct net_device *netdev, | 332 | static int igbvf_set_wol(struct net_device *netdev, |
335 | struct ethtool_wolinfo *wol) | 333 | struct ethtool_wolinfo *wol) |
336 | { | 334 | { |
337 | return -EOPNOTSUPP; | 335 | return -EOPNOTSUPP; |
338 | } | 336 | } |
339 | 337 | ||
340 | static int igbvf_get_coalesce(struct net_device *netdev, | 338 | static int igbvf_get_coalesce(struct net_device *netdev, |
341 | struct ethtool_coalesce *ec) | 339 | struct ethtool_coalesce *ec) |
342 | { | 340 | { |
343 | struct igbvf_adapter *adapter = netdev_priv(netdev); | 341 | struct igbvf_adapter *adapter = netdev_priv(netdev); |
344 | 342 | ||
@@ -351,13 +349,13 @@ static int igbvf_get_coalesce(struct net_device *netdev, | |||
351 | } | 349 | } |
352 | 350 | ||
353 | static int igbvf_set_coalesce(struct net_device *netdev, | 351 | static int igbvf_set_coalesce(struct net_device *netdev, |
354 | struct ethtool_coalesce *ec) | 352 | struct ethtool_coalesce *ec) |
355 | { | 353 | { |
356 | struct igbvf_adapter *adapter = netdev_priv(netdev); | 354 | struct igbvf_adapter *adapter = netdev_priv(netdev); |
357 | struct e1000_hw *hw = &adapter->hw; | 355 | struct e1000_hw *hw = &adapter->hw; |
358 | 356 | ||
359 | if ((ec->rx_coalesce_usecs >= IGBVF_MIN_ITR_USECS) && | 357 | if ((ec->rx_coalesce_usecs >= IGBVF_MIN_ITR_USECS) && |
360 | (ec->rx_coalesce_usecs <= IGBVF_MAX_ITR_USECS)) { | 358 | (ec->rx_coalesce_usecs <= IGBVF_MAX_ITR_USECS)) { |
361 | adapter->current_itr = ec->rx_coalesce_usecs << 2; | 359 | adapter->current_itr = ec->rx_coalesce_usecs << 2; |
362 | adapter->requested_itr = 1000000000 / | 360 | adapter->requested_itr = 1000000000 / |
363 | (adapter->current_itr * 256); | 361 | (adapter->current_itr * 256); |
@@ -366,8 +364,7 @@ static int igbvf_set_coalesce(struct net_device *netdev, | |||
366 | adapter->current_itr = IGBVF_START_ITR; | 364 | adapter->current_itr = IGBVF_START_ITR; |
367 | adapter->requested_itr = ec->rx_coalesce_usecs; | 365 | adapter->requested_itr = ec->rx_coalesce_usecs; |
368 | } else if (ec->rx_coalesce_usecs == 0) { | 366 | } else if (ec->rx_coalesce_usecs == 0) { |
369 | /* | 367 | /* The user's desire is to turn off interrupt throttling |
370 | * The user's desire is to turn off interrupt throttling | ||
371 | * altogether, but due to HW limitations, we can't do that. | 368 | * altogether, but due to HW limitations, we can't do that. |
372 | * Instead we set a very small value in EITR, which would | 369 | * Instead we set a very small value in EITR, which would |
373 | * allow ~967k interrupts per second, but allow the adapter's | 370 | * allow ~967k interrupts per second, but allow the adapter's |
@@ -376,8 +373,9 @@ static int igbvf_set_coalesce(struct net_device *netdev, | |||
376 | adapter->current_itr = 4; | 373 | adapter->current_itr = 4; |
377 | adapter->requested_itr = 1000000000 / | 374 | adapter->requested_itr = 1000000000 / |
378 | (adapter->current_itr * 256); | 375 | (adapter->current_itr * 256); |
379 | } else | 376 | } else { |
380 | return -EINVAL; | 377 | return -EINVAL; |
378 | } | ||
381 | 379 | ||
382 | writel(adapter->current_itr, | 380 | writel(adapter->current_itr, |
383 | hw->hw_addr + adapter->rx_ring->itr_register); | 381 | hw->hw_addr + adapter->rx_ring->itr_register); |
@@ -388,15 +386,15 @@ static int igbvf_set_coalesce(struct net_device *netdev, | |||
388 | static int igbvf_nway_reset(struct net_device *netdev) | 386 | static int igbvf_nway_reset(struct net_device *netdev) |
389 | { | 387 | { |
390 | struct igbvf_adapter *adapter = netdev_priv(netdev); | 388 | struct igbvf_adapter *adapter = netdev_priv(netdev); |
389 | |||
391 | if (netif_running(netdev)) | 390 | if (netif_running(netdev)) |
392 | igbvf_reinit_locked(adapter); | 391 | igbvf_reinit_locked(adapter); |
393 | return 0; | 392 | return 0; |
394 | } | 393 | } |
395 | 394 | ||
396 | |||
397 | static void igbvf_get_ethtool_stats(struct net_device *netdev, | 395 | static void igbvf_get_ethtool_stats(struct net_device *netdev, |
398 | struct ethtool_stats *stats, | 396 | struct ethtool_stats *stats, |
399 | u64 *data) | 397 | u64 *data) |
400 | { | 398 | { |
401 | struct igbvf_adapter *adapter = netdev_priv(netdev); | 399 | struct igbvf_adapter *adapter = netdev_priv(netdev); |
402 | int i; | 400 | int i; |
@@ -404,19 +402,18 @@ static void igbvf_get_ethtool_stats(struct net_device *netdev, | |||
404 | igbvf_update_stats(adapter); | 402 | igbvf_update_stats(adapter); |
405 | for (i = 0; i < IGBVF_GLOBAL_STATS_LEN; i++) { | 403 | for (i = 0; i < IGBVF_GLOBAL_STATS_LEN; i++) { |
406 | char *p = (char *)adapter + | 404 | char *p = (char *)adapter + |
407 | igbvf_gstrings_stats[i].stat_offset; | 405 | igbvf_gstrings_stats[i].stat_offset; |
408 | char *b = (char *)adapter + | 406 | char *b = (char *)adapter + |
409 | igbvf_gstrings_stats[i].base_stat_offset; | 407 | igbvf_gstrings_stats[i].base_stat_offset; |
410 | data[i] = ((igbvf_gstrings_stats[i].sizeof_stat == | 408 | data[i] = ((igbvf_gstrings_stats[i].sizeof_stat == |
411 | sizeof(u64)) ? (*(u64 *)p - *(u64 *)b) : | 409 | sizeof(u64)) ? (*(u64 *)p - *(u64 *)b) : |
412 | (*(u32 *)p - *(u32 *)b)); | 410 | (*(u32 *)p - *(u32 *)b)); |
413 | } | 411 | } |
414 | |||
415 | } | 412 | } |
416 | 413 | ||
417 | static int igbvf_get_sset_count(struct net_device *dev, int stringset) | 414 | static int igbvf_get_sset_count(struct net_device *dev, int stringset) |
418 | { | 415 | { |
419 | switch(stringset) { | 416 | switch (stringset) { |
420 | case ETH_SS_TEST: | 417 | case ETH_SS_TEST: |
421 | return IGBVF_TEST_LEN; | 418 | return IGBVF_TEST_LEN; |
422 | case ETH_SS_STATS: | 419 | case ETH_SS_STATS: |
@@ -427,7 +424,7 @@ static int igbvf_get_sset_count(struct net_device *dev, int stringset) | |||
427 | } | 424 | } |
428 | 425 | ||
429 | static void igbvf_get_strings(struct net_device *netdev, u32 stringset, | 426 | static void igbvf_get_strings(struct net_device *netdev, u32 stringset, |
430 | u8 *data) | 427 | u8 *data) |
431 | { | 428 | { |
432 | u8 *p = data; | 429 | u8 *p = data; |
433 | int i; | 430 | int i; |
diff --git a/drivers/net/ethernet/intel/igbvf/igbvf.h b/drivers/net/ethernet/intel/igbvf/igbvf.h index 7d6a25c8f889..f166baab8d7e 100644 --- a/drivers/net/ethernet/intel/igbvf/igbvf.h +++ b/drivers/net/ethernet/intel/igbvf/igbvf.h | |||
@@ -13,8 +13,7 @@ | |||
13 | more details. | 13 | more details. |
14 | 14 | ||
15 | You should have received a copy of the GNU General Public License along with | 15 | You should have received a copy of the GNU General Public License along with |
16 | this program; if not, write to the Free Software Foundation, Inc., | 16 | this program; if not, see <http://www.gnu.org/licenses/>. |
17 | 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | ||
18 | 17 | ||
19 | The full GNU General Public License is included in this distribution in | 18 | The full GNU General Public License is included in this distribution in |
20 | the file called "COPYING". | 19 | the file called "COPYING". |
@@ -43,10 +42,10 @@ struct igbvf_info; | |||
43 | struct igbvf_adapter; | 42 | struct igbvf_adapter; |
44 | 43 | ||
45 | /* Interrupt defines */ | 44 | /* Interrupt defines */ |
46 | #define IGBVF_START_ITR 488 /* ~8000 ints/sec */ | 45 | #define IGBVF_START_ITR 488 /* ~8000 ints/sec */ |
47 | #define IGBVF_4K_ITR 980 | 46 | #define IGBVF_4K_ITR 980 |
48 | #define IGBVF_20K_ITR 196 | 47 | #define IGBVF_20K_ITR 196 |
49 | #define IGBVF_70K_ITR 56 | 48 | #define IGBVF_70K_ITR 56 |
50 | 49 | ||
51 | enum latency_range { | 50 | enum latency_range { |
52 | lowest_latency = 0, | 51 | lowest_latency = 0, |
@@ -55,56 +54,55 @@ enum latency_range { | |||
55 | latency_invalid = 255 | 54 | latency_invalid = 255 |
56 | }; | 55 | }; |
57 | 56 | ||
58 | |||
59 | /* Interrupt modes, as used by the IntMode parameter */ | 57 | /* Interrupt modes, as used by the IntMode parameter */ |
60 | #define IGBVF_INT_MODE_LEGACY 0 | 58 | #define IGBVF_INT_MODE_LEGACY 0 |
61 | #define IGBVF_INT_MODE_MSI 1 | 59 | #define IGBVF_INT_MODE_MSI 1 |
62 | #define IGBVF_INT_MODE_MSIX 2 | 60 | #define IGBVF_INT_MODE_MSIX 2 |
63 | 61 | ||
64 | /* Tx/Rx descriptor defines */ | 62 | /* Tx/Rx descriptor defines */ |
65 | #define IGBVF_DEFAULT_TXD 256 | 63 | #define IGBVF_DEFAULT_TXD 256 |
66 | #define IGBVF_MAX_TXD 4096 | 64 | #define IGBVF_MAX_TXD 4096 |
67 | #define IGBVF_MIN_TXD 80 | 65 | #define IGBVF_MIN_TXD 80 |
68 | 66 | ||
69 | #define IGBVF_DEFAULT_RXD 256 | 67 | #define IGBVF_DEFAULT_RXD 256 |
70 | #define IGBVF_MAX_RXD 4096 | 68 | #define IGBVF_MAX_RXD 4096 |
71 | #define IGBVF_MIN_RXD 80 | 69 | #define IGBVF_MIN_RXD 80 |
72 | 70 | ||
73 | #define IGBVF_MIN_ITR_USECS 10 /* 100000 irq/sec */ | 71 | #define IGBVF_MIN_ITR_USECS 10 /* 100000 irq/sec */ |
74 | #define IGBVF_MAX_ITR_USECS 10000 /* 100 irq/sec */ | 72 | #define IGBVF_MAX_ITR_USECS 10000 /* 100 irq/sec */ |
75 | 73 | ||
76 | /* RX descriptor control thresholds. | 74 | /* RX descriptor control thresholds. |
77 | * PTHRESH - MAC will consider prefetch if it has fewer than this number of | 75 | * PTHRESH - MAC will consider prefetch if it has fewer than this number of |
78 | * descriptors available in its onboard memory. | 76 | * descriptors available in its onboard memory. |
79 | * Setting this to 0 disables RX descriptor prefetch. | 77 | * Setting this to 0 disables RX descriptor prefetch. |
80 | * HTHRESH - MAC will only prefetch if there are at least this many descriptors | 78 | * HTHRESH - MAC will only prefetch if there are at least this many descriptors |
81 | * available in host memory. | 79 | * available in host memory. |
82 | * If PTHRESH is 0, this should also be 0. | 80 | * If PTHRESH is 0, this should also be 0. |
83 | * WTHRESH - RX descriptor writeback threshold - MAC will delay writing back | 81 | * WTHRESH - RX descriptor writeback threshold - MAC will delay writing back |
84 | * descriptors until either it has this many to write back, or the | 82 | * descriptors until either it has this many to write back, or the |
85 | * ITR timer expires. | 83 | * ITR timer expires. |
86 | */ | 84 | */ |
87 | #define IGBVF_RX_PTHRESH 16 | 85 | #define IGBVF_RX_PTHRESH 16 |
88 | #define IGBVF_RX_HTHRESH 8 | 86 | #define IGBVF_RX_HTHRESH 8 |
89 | #define IGBVF_RX_WTHRESH 1 | 87 | #define IGBVF_RX_WTHRESH 1 |
90 | 88 | ||
91 | /* this is the size past which hardware will drop packets when setting LPE=0 */ | 89 | /* this is the size past which hardware will drop packets when setting LPE=0 */ |
92 | #define MAXIMUM_ETHERNET_VLAN_SIZE 1522 | 90 | #define MAXIMUM_ETHERNET_VLAN_SIZE 1522 |
93 | 91 | ||
94 | #define IGBVF_FC_PAUSE_TIME 0x0680 /* 858 usec */ | 92 | #define IGBVF_FC_PAUSE_TIME 0x0680 /* 858 usec */ |
95 | 93 | ||
96 | /* How many Tx Descriptors do we need to call netif_wake_queue ? */ | 94 | /* How many Tx Descriptors do we need to call netif_wake_queue ? */ |
97 | #define IGBVF_TX_QUEUE_WAKE 32 | 95 | #define IGBVF_TX_QUEUE_WAKE 32 |
98 | /* How many Rx Buffers do we bundle into one write to the hardware ? */ | 96 | /* How many Rx Buffers do we bundle into one write to the hardware ? */ |
99 | #define IGBVF_RX_BUFFER_WRITE 16 /* Must be power of 2 */ | 97 | #define IGBVF_RX_BUFFER_WRITE 16 /* Must be power of 2 */ |
100 | 98 | ||
101 | #define AUTO_ALL_MODES 0 | 99 | #define AUTO_ALL_MODES 0 |
102 | #define IGBVF_EEPROM_APME 0x0400 | 100 | #define IGBVF_EEPROM_APME 0x0400 |
103 | 101 | ||
104 | #define IGBVF_MNG_VLAN_NONE (-1) | 102 | #define IGBVF_MNG_VLAN_NONE (-1) |
105 | 103 | ||
106 | /* Number of packet split data buffers (not including the header buffer) */ | 104 | /* Number of packet split data buffers (not including the header buffer) */ |
107 | #define PS_PAGE_BUFFERS (MAX_PS_BUFFERS - 1) | 105 | #define PS_PAGE_BUFFERS (MAX_PS_BUFFERS - 1) |
108 | 106 | ||
109 | enum igbvf_boards { | 107 | enum igbvf_boards { |
110 | board_vf, | 108 | board_vf, |
@@ -116,8 +114,7 @@ struct igbvf_queue_stats { | |||
116 | u64 bytes; | 114 | u64 bytes; |
117 | }; | 115 | }; |
118 | 116 | ||
119 | /* | 117 | /* wrappers around a pointer to a socket buffer, |
120 | * wrappers around a pointer to a socket buffer, | ||
121 | * so a DMA handle can be stored along with the buffer | 118 | * so a DMA handle can be stored along with the buffer |
122 | */ | 119 | */ |
123 | struct igbvf_buffer { | 120 | struct igbvf_buffer { |
@@ -148,10 +145,10 @@ union igbvf_desc { | |||
148 | 145 | ||
149 | struct igbvf_ring { | 146 | struct igbvf_ring { |
150 | struct igbvf_adapter *adapter; /* backlink */ | 147 | struct igbvf_adapter *adapter; /* backlink */ |
151 | union igbvf_desc *desc; /* pointer to ring memory */ | 148 | union igbvf_desc *desc; /* pointer to ring memory */ |
152 | dma_addr_t dma; /* phys address of ring */ | 149 | dma_addr_t dma; /* phys address of ring */ |
153 | unsigned int size; /* length of ring in bytes */ | 150 | unsigned int size; /* length of ring in bytes */ |
154 | unsigned int count; /* number of desc. in ring */ | 151 | unsigned int count; /* number of desc. in ring */ |
155 | 152 | ||
156 | u16 next_to_use; | 153 | u16 next_to_use; |
157 | u16 next_to_clean; | 154 | u16 next_to_clean; |
@@ -202,9 +199,7 @@ struct igbvf_adapter { | |||
202 | u32 requested_itr; /* ints/sec or adaptive */ | 199 | u32 requested_itr; /* ints/sec or adaptive */ |
203 | u32 current_itr; /* Actual ITR register value, not ints/sec */ | 200 | u32 current_itr; /* Actual ITR register value, not ints/sec */ |
204 | 201 | ||
205 | /* | 202 | /* Tx */ |
206 | * Tx | ||
207 | */ | ||
208 | struct igbvf_ring *tx_ring /* One per active queue */ | 203 | struct igbvf_ring *tx_ring /* One per active queue */ |
209 | ____cacheline_aligned_in_smp; | 204 | ____cacheline_aligned_in_smp; |
210 | 205 | ||
@@ -226,9 +221,7 @@ struct igbvf_adapter { | |||
226 | u32 tx_fifo_size; | 221 | u32 tx_fifo_size; |
227 | u32 tx_dma_failed; | 222 | u32 tx_dma_failed; |
228 | 223 | ||
229 | /* | 224 | /* Rx */ |
230 | * Rx | ||
231 | */ | ||
232 | struct igbvf_ring *rx_ring; | 225 | struct igbvf_ring *rx_ring; |
233 | 226 | ||
234 | u32 rx_int_delay; | 227 | u32 rx_int_delay; |
@@ -249,7 +242,7 @@ struct igbvf_adapter { | |||
249 | struct net_device *netdev; | 242 | struct net_device *netdev; |
250 | struct pci_dev *pdev; | 243 | struct pci_dev *pdev; |
251 | struct net_device_stats net_stats; | 244 | struct net_device_stats net_stats; |
252 | spinlock_t stats_lock; /* prevent concurrent stats updates */ | 245 | spinlock_t stats_lock; /* prevent concurrent stats updates */ |
253 | 246 | ||
254 | /* structs defined in e1000_hw.h */ | 247 | /* structs defined in e1000_hw.h */ |
255 | struct e1000_hw hw; | 248 | struct e1000_hw hw; |
@@ -286,16 +279,16 @@ struct igbvf_adapter { | |||
286 | }; | 279 | }; |
287 | 280 | ||
288 | struct igbvf_info { | 281 | struct igbvf_info { |
289 | enum e1000_mac_type mac; | 282 | enum e1000_mac_type mac; |
290 | unsigned int flags; | 283 | unsigned int flags; |
291 | u32 pba; | 284 | u32 pba; |
292 | void (*init_ops)(struct e1000_hw *); | 285 | void (*init_ops)(struct e1000_hw *); |
293 | s32 (*get_variants)(struct igbvf_adapter *); | 286 | s32 (*get_variants)(struct igbvf_adapter *); |
294 | }; | 287 | }; |
295 | 288 | ||
296 | /* hardware capability, feature, and workaround flags */ | 289 | /* hardware capability, feature, and workaround flags */ |
297 | #define IGBVF_FLAG_RX_CSUM_DISABLED (1 << 0) | 290 | #define IGBVF_FLAG_RX_CSUM_DISABLED (1 << 0) |
298 | #define IGBVF_FLAG_RX_LB_VLAN_BSWAP (1 << 1) | 291 | #define IGBVF_FLAG_RX_LB_VLAN_BSWAP (1 << 1) |
299 | #define IGBVF_RX_DESC_ADV(R, i) \ | 292 | #define IGBVF_RX_DESC_ADV(R, i) \ |
300 | (&((((R).desc))[i].rx_desc)) | 293 | (&((((R).desc))[i].rx_desc)) |
301 | #define IGBVF_TX_DESC_ADV(R, i) \ | 294 | #define IGBVF_TX_DESC_ADV(R, i) \ |
diff --git a/drivers/net/ethernet/intel/igbvf/mbx.c b/drivers/net/ethernet/intel/igbvf/mbx.c index b4b65bc9fc5d..7b6cb4c3764c 100644 --- a/drivers/net/ethernet/intel/igbvf/mbx.c +++ b/drivers/net/ethernet/intel/igbvf/mbx.c | |||
@@ -13,8 +13,7 @@ | |||
13 | more details. | 13 | more details. |
14 | 14 | ||
15 | You should have received a copy of the GNU General Public License along with | 15 | You should have received a copy of the GNU General Public License along with |
16 | this program; if not, write to the Free Software Foundation, Inc., | 16 | this program; if not, see <http://www.gnu.org/licenses/>. |
17 | 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | ||
18 | 17 | ||
19 | The full GNU General Public License is included in this distribution in | 18 | The full GNU General Public License is included in this distribution in |
20 | the file called "COPYING". | 19 | the file called "COPYING". |
@@ -54,10 +53,10 @@ out: | |||
54 | } | 53 | } |
55 | 54 | ||
56 | /** | 55 | /** |
57 | * e1000_poll_for_ack - Wait for message acknowledgement | 56 | * e1000_poll_for_ack - Wait for message acknowledgment |
58 | * @hw: pointer to the HW structure | 57 | * @hw: pointer to the HW structure |
59 | * | 58 | * |
60 | * returns SUCCESS if it successfully received a message acknowledgement | 59 | * returns SUCCESS if it successfully received a message acknowledgment |
61 | **/ | 60 | **/ |
62 | static s32 e1000_poll_for_ack(struct e1000_hw *hw) | 61 | static s32 e1000_poll_for_ack(struct e1000_hw *hw) |
63 | { | 62 | { |
@@ -218,7 +217,7 @@ static s32 e1000_check_for_rst_vf(struct e1000_hw *hw) | |||
218 | s32 ret_val = -E1000_ERR_MBX; | 217 | s32 ret_val = -E1000_ERR_MBX; |
219 | 218 | ||
220 | if (!e1000_check_for_bit_vf(hw, (E1000_V2PMAILBOX_RSTD | | 219 | if (!e1000_check_for_bit_vf(hw, (E1000_V2PMAILBOX_RSTD | |
221 | E1000_V2PMAILBOX_RSTI))) { | 220 | E1000_V2PMAILBOX_RSTI))) { |
222 | ret_val = E1000_SUCCESS; | 221 | ret_val = E1000_SUCCESS; |
223 | hw->mbx.stats.rsts++; | 222 | hw->mbx.stats.rsts++; |
224 | } | 223 | } |
@@ -239,7 +238,7 @@ static s32 e1000_obtain_mbx_lock_vf(struct e1000_hw *hw) | |||
239 | /* Take ownership of the buffer */ | 238 | /* Take ownership of the buffer */ |
240 | ew32(V2PMAILBOX(0), E1000_V2PMAILBOX_VFU); | 239 | ew32(V2PMAILBOX(0), E1000_V2PMAILBOX_VFU); |
241 | 240 | ||
242 | /* reserve mailbox for vf use */ | 241 | /* reserve mailbox for VF use */ |
243 | if (e1000_read_v2p_mailbox(hw) & E1000_V2PMAILBOX_VFU) | 242 | if (e1000_read_v2p_mailbox(hw) & E1000_V2PMAILBOX_VFU) |
244 | ret_val = E1000_SUCCESS; | 243 | ret_val = E1000_SUCCESS; |
245 | 244 | ||
@@ -283,7 +282,7 @@ out_no_write: | |||
283 | } | 282 | } |
284 | 283 | ||
285 | /** | 284 | /** |
286 | * e1000_read_mbx_vf - Reads a message from the inbox intended for vf | 285 | * e1000_read_mbx_vf - Reads a message from the inbox intended for VF |
287 | * @hw: pointer to the HW structure | 286 | * @hw: pointer to the HW structure |
288 | * @msg: The message buffer | 287 | * @msg: The message buffer |
289 | * @size: Length of buffer | 288 | * @size: Length of buffer |
@@ -315,17 +314,18 @@ out_no_read: | |||
315 | } | 314 | } |
316 | 315 | ||
317 | /** | 316 | /** |
318 | * e1000_init_mbx_params_vf - set initial values for vf mailbox | 317 | * e1000_init_mbx_params_vf - set initial values for VF mailbox |
319 | * @hw: pointer to the HW structure | 318 | * @hw: pointer to the HW structure |
320 | * | 319 | * |
321 | * Initializes the hw->mbx struct to correct values for vf mailbox | 320 | * Initializes the hw->mbx struct to correct values for VF mailbox |
322 | */ | 321 | */ |
323 | s32 e1000_init_mbx_params_vf(struct e1000_hw *hw) | 322 | s32 e1000_init_mbx_params_vf(struct e1000_hw *hw) |
324 | { | 323 | { |
325 | struct e1000_mbx_info *mbx = &hw->mbx; | 324 | struct e1000_mbx_info *mbx = &hw->mbx; |
326 | 325 | ||
327 | /* start mailbox as timed out and let the reset_hw call set the timeout | 326 | /* start mailbox as timed out and let the reset_hw call set the timeout |
328 | * value to being communications */ | 327 | * value to being communications |
328 | */ | ||
329 | mbx->timeout = 0; | 329 | mbx->timeout = 0; |
330 | mbx->usec_delay = E1000_VF_MBX_INIT_DELAY; | 330 | mbx->usec_delay = E1000_VF_MBX_INIT_DELAY; |
331 | 331 | ||
@@ -347,4 +347,3 @@ s32 e1000_init_mbx_params_vf(struct e1000_hw *hw) | |||
347 | 347 | ||
348 | return E1000_SUCCESS; | 348 | return E1000_SUCCESS; |
349 | } | 349 | } |
350 | |||
diff --git a/drivers/net/ethernet/intel/igbvf/mbx.h b/drivers/net/ethernet/intel/igbvf/mbx.h index 24370bcb0e22..f800bf8eedae 100644 --- a/drivers/net/ethernet/intel/igbvf/mbx.h +++ b/drivers/net/ethernet/intel/igbvf/mbx.h | |||
@@ -13,8 +13,7 @@ | |||
13 | more details. | 13 | more details. |
14 | 14 | ||
15 | You should have received a copy of the GNU General Public License along with | 15 | You should have received a copy of the GNU General Public License along with |
16 | this program; if not, write to the Free Software Foundation, Inc., | 16 | this program; if not, see <http://www.gnu.org/licenses/>. |
17 | 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | ||
18 | 17 | ||
19 | The full GNU General Public License is included in this distribution in | 18 | The full GNU General Public License is included in this distribution in |
20 | the file called "COPYING". | 19 | the file called "COPYING". |
@@ -30,44 +29,44 @@ | |||
30 | 29 | ||
31 | #include "vf.h" | 30 | #include "vf.h" |
32 | 31 | ||
33 | #define E1000_V2PMAILBOX_REQ 0x00000001 /* Request for PF Ready bit */ | 32 | #define E1000_V2PMAILBOX_REQ 0x00000001 /* Request for PF Ready bit */ |
34 | #define E1000_V2PMAILBOX_ACK 0x00000002 /* Ack PF message received */ | 33 | #define E1000_V2PMAILBOX_ACK 0x00000002 /* Ack PF message received */ |
35 | #define E1000_V2PMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */ | 34 | #define E1000_V2PMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */ |
36 | #define E1000_V2PMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */ | 35 | #define E1000_V2PMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */ |
37 | #define E1000_V2PMAILBOX_PFSTS 0x00000010 /* PF wrote a message in the MB */ | 36 | #define E1000_V2PMAILBOX_PFSTS 0x00000010 /* PF wrote a message in the MB */ |
38 | #define E1000_V2PMAILBOX_PFACK 0x00000020 /* PF ack the previous VF msg */ | 37 | #define E1000_V2PMAILBOX_PFACK 0x00000020 /* PF ack the previous VF msg */ |
39 | #define E1000_V2PMAILBOX_RSTI 0x00000040 /* PF has reset indication */ | 38 | #define E1000_V2PMAILBOX_RSTI 0x00000040 /* PF has reset indication */ |
40 | #define E1000_V2PMAILBOX_RSTD 0x00000080 /* PF has indicated reset done */ | 39 | #define E1000_V2PMAILBOX_RSTD 0x00000080 /* PF has indicated reset done */ |
41 | #define E1000_V2PMAILBOX_R2C_BITS 0x000000B0 /* All read to clear bits */ | 40 | #define E1000_V2PMAILBOX_R2C_BITS 0x000000B0 /* All read to clear bits */ |
42 | 41 | ||
43 | #define E1000_VFMAILBOX_SIZE 16 /* 16 32 bit words - 64 bytes */ | 42 | #define E1000_VFMAILBOX_SIZE 16 /* 16 32 bit words - 64 bytes */ |
44 | 43 | ||
45 | /* If it's a E1000_VF_* msg then it originates in the VF and is sent to the | 44 | /* If it's a E1000_VF_* msg then it originates in the VF and is sent to the |
46 | * PF. The reverse is true if it is E1000_PF_*. | 45 | * PF. The reverse is true if it is E1000_PF_*. |
47 | * Message ACK's are the value or'd with 0xF0000000 | 46 | * Message ACK's are the value or'd with 0xF0000000 |
48 | */ | 47 | */ |
49 | #define E1000_VT_MSGTYPE_ACK 0x80000000 /* Messages below or'd with | 48 | /* Messages below or'd with this are the ACK */ |
50 | * this are the ACK */ | 49 | #define E1000_VT_MSGTYPE_ACK 0x80000000 |
51 | #define E1000_VT_MSGTYPE_NACK 0x40000000 /* Messages below or'd with | 50 | /* Messages below or'd with this are the NACK */ |
52 | * this are the NACK */ | 51 | #define E1000_VT_MSGTYPE_NACK 0x40000000 |
53 | #define E1000_VT_MSGTYPE_CTS 0x20000000 /* Indicates that VF is still | 52 | /* Indicates that VF is still clear to send requests */ |
54 | clear to send requests */ | 53 | #define E1000_VT_MSGTYPE_CTS 0x20000000 |
55 | 54 | ||
56 | /* We have a total wait time of 1s for vf mailbox posted messages */ | 55 | /* We have a total wait time of 1s for vf mailbox posted messages */ |
57 | #define E1000_VF_MBX_INIT_TIMEOUT 2000 /* retry count for mailbox timeout */ | 56 | #define E1000_VF_MBX_INIT_TIMEOUT 2000 /* retry count for mbx timeout */ |
58 | #define E1000_VF_MBX_INIT_DELAY 500 /* usec delay between retries */ | 57 | #define E1000_VF_MBX_INIT_DELAY 500 /* usec delay between retries */ |
59 | 58 | ||
60 | #define E1000_VT_MSGINFO_SHIFT 16 | 59 | #define E1000_VT_MSGINFO_SHIFT 16 |
61 | /* bits 23:16 are used for exra info for certain messages */ | 60 | /* bits 23:16 are used for exra info for certain messages */ |
62 | #define E1000_VT_MSGINFO_MASK (0xFF << E1000_VT_MSGINFO_SHIFT) | 61 | #define E1000_VT_MSGINFO_MASK (0xFF << E1000_VT_MSGINFO_SHIFT) |
63 | 62 | ||
64 | #define E1000_VF_RESET 0x01 /* VF requests reset */ | 63 | #define E1000_VF_RESET 0x01 /* VF requests reset */ |
65 | #define E1000_VF_SET_MAC_ADDR 0x02 /* VF requests PF to set MAC addr */ | 64 | #define E1000_VF_SET_MAC_ADDR 0x02 /* VF requests PF to set MAC addr */ |
66 | #define E1000_VF_SET_MULTICAST 0x03 /* VF requests PF to set MC addr */ | 65 | #define E1000_VF_SET_MULTICAST 0x03 /* VF requests PF to set MC addr */ |
67 | #define E1000_VF_SET_VLAN 0x04 /* VF requests PF to set VLAN */ | 66 | #define E1000_VF_SET_VLAN 0x04 /* VF requests PF to set VLAN */ |
68 | #define E1000_VF_SET_LPE 0x05 /* VF requests PF to set VMOLR.LPE */ | 67 | #define E1000_VF_SET_LPE 0x05 /* VF requests PF to set VMOLR.LPE */ |
69 | 68 | ||
70 | #define E1000_PF_CONTROL_MSG 0x0100 /* PF control message */ | 69 | #define E1000_PF_CONTROL_MSG 0x0100 /* PF control message */ |
71 | 70 | ||
72 | void e1000_init_mbx_ops_generic(struct e1000_hw *hw); | 71 | void e1000_init_mbx_ops_generic(struct e1000_hw *hw); |
73 | s32 e1000_init_mbx_params_vf(struct e1000_hw *); | 72 | s32 e1000_init_mbx_params_vf(struct e1000_hw *); |
diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c index ebf9d4a42fdd..c17ea4b8f84d 100644 --- a/drivers/net/ethernet/intel/igbvf/netdev.c +++ b/drivers/net/ethernet/intel/igbvf/netdev.c | |||
@@ -13,8 +13,7 @@ | |||
13 | more details. | 13 | more details. |
14 | 14 | ||
15 | You should have received a copy of the GNU General Public License along with | 15 | You should have received a copy of the GNU General Public License along with |
16 | this program; if not, write to the Free Software Foundation, Inc., | 16 | this program; if not, see <http://www.gnu.org/licenses/>. |
17 | 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | ||
18 | 17 | ||
19 | The full GNU General Public License is included in this distribution in | 18 | The full GNU General Public License is included in this distribution in |
20 | the file called "COPYING". | 19 | the file called "COPYING". |
@@ -66,26 +65,27 @@ static void igbvf_set_interrupt_capability(struct igbvf_adapter *); | |||
66 | static void igbvf_reset_interrupt_capability(struct igbvf_adapter *); | 65 | static void igbvf_reset_interrupt_capability(struct igbvf_adapter *); |
67 | 66 | ||
68 | static struct igbvf_info igbvf_vf_info = { | 67 | static struct igbvf_info igbvf_vf_info = { |
69 | .mac = e1000_vfadapt, | 68 | .mac = e1000_vfadapt, |
70 | .flags = 0, | 69 | .flags = 0, |
71 | .pba = 10, | 70 | .pba = 10, |
72 | .init_ops = e1000_init_function_pointers_vf, | 71 | .init_ops = e1000_init_function_pointers_vf, |
73 | }; | 72 | }; |
74 | 73 | ||
75 | static struct igbvf_info igbvf_i350_vf_info = { | 74 | static struct igbvf_info igbvf_i350_vf_info = { |
76 | .mac = e1000_vfadapt_i350, | 75 | .mac = e1000_vfadapt_i350, |
77 | .flags = 0, | 76 | .flags = 0, |
78 | .pba = 10, | 77 | .pba = 10, |
79 | .init_ops = e1000_init_function_pointers_vf, | 78 | .init_ops = e1000_init_function_pointers_vf, |
80 | }; | 79 | }; |
81 | 80 | ||
82 | static const struct igbvf_info *igbvf_info_tbl[] = { | 81 | static const struct igbvf_info *igbvf_info_tbl[] = { |
83 | [board_vf] = &igbvf_vf_info, | 82 | [board_vf] = &igbvf_vf_info, |
84 | [board_i350_vf] = &igbvf_i350_vf_info, | 83 | [board_i350_vf] = &igbvf_i350_vf_info, |
85 | }; | 84 | }; |
86 | 85 | ||
87 | /** | 86 | /** |
88 | * igbvf_desc_unused - calculate if we have unused descriptors | 87 | * igbvf_desc_unused - calculate if we have unused descriptors |
88 | * @rx_ring: address of receive ring structure | ||
89 | **/ | 89 | **/ |
90 | static int igbvf_desc_unused(struct igbvf_ring *ring) | 90 | static int igbvf_desc_unused(struct igbvf_ring *ring) |
91 | { | 91 | { |
@@ -103,9 +103,9 @@ static int igbvf_desc_unused(struct igbvf_ring *ring) | |||
103 | * @skb: pointer to sk_buff to be indicated to stack | 103 | * @skb: pointer to sk_buff to be indicated to stack |
104 | **/ | 104 | **/ |
105 | static void igbvf_receive_skb(struct igbvf_adapter *adapter, | 105 | static void igbvf_receive_skb(struct igbvf_adapter *adapter, |
106 | struct net_device *netdev, | 106 | struct net_device *netdev, |
107 | struct sk_buff *skb, | 107 | struct sk_buff *skb, |
108 | u32 status, u16 vlan) | 108 | u32 status, u16 vlan) |
109 | { | 109 | { |
110 | u16 vid; | 110 | u16 vid; |
111 | 111 | ||
@@ -123,7 +123,7 @@ static void igbvf_receive_skb(struct igbvf_adapter *adapter, | |||
123 | } | 123 | } |
124 | 124 | ||
125 | static inline void igbvf_rx_checksum_adv(struct igbvf_adapter *adapter, | 125 | static inline void igbvf_rx_checksum_adv(struct igbvf_adapter *adapter, |
126 | u32 status_err, struct sk_buff *skb) | 126 | u32 status_err, struct sk_buff *skb) |
127 | { | 127 | { |
128 | skb_checksum_none_assert(skb); | 128 | skb_checksum_none_assert(skb); |
129 | 129 | ||
@@ -153,7 +153,7 @@ static inline void igbvf_rx_checksum_adv(struct igbvf_adapter *adapter, | |||
153 | * @cleaned_count: number of buffers to repopulate | 153 | * @cleaned_count: number of buffers to repopulate |
154 | **/ | 154 | **/ |
155 | static void igbvf_alloc_rx_buffers(struct igbvf_ring *rx_ring, | 155 | static void igbvf_alloc_rx_buffers(struct igbvf_ring *rx_ring, |
156 | int cleaned_count) | 156 | int cleaned_count) |
157 | { | 157 | { |
158 | struct igbvf_adapter *adapter = rx_ring->adapter; | 158 | struct igbvf_adapter *adapter = rx_ring->adapter; |
159 | struct net_device *netdev = adapter->netdev; | 159 | struct net_device *netdev = adapter->netdev; |
@@ -188,8 +188,8 @@ static void igbvf_alloc_rx_buffers(struct igbvf_ring *rx_ring, | |||
188 | } | 188 | } |
189 | buffer_info->page_dma = | 189 | buffer_info->page_dma = |
190 | dma_map_page(&pdev->dev, buffer_info->page, | 190 | dma_map_page(&pdev->dev, buffer_info->page, |
191 | buffer_info->page_offset, | 191 | buffer_info->page_offset, |
192 | PAGE_SIZE / 2, | 192 | PAGE_SIZE / 2, |
193 | DMA_FROM_DEVICE); | 193 | DMA_FROM_DEVICE); |
194 | if (dma_mapping_error(&pdev->dev, | 194 | if (dma_mapping_error(&pdev->dev, |
195 | buffer_info->page_dma)) { | 195 | buffer_info->page_dma)) { |
@@ -209,7 +209,7 @@ static void igbvf_alloc_rx_buffers(struct igbvf_ring *rx_ring, | |||
209 | 209 | ||
210 | buffer_info->skb = skb; | 210 | buffer_info->skb = skb; |
211 | buffer_info->dma = dma_map_single(&pdev->dev, skb->data, | 211 | buffer_info->dma = dma_map_single(&pdev->dev, skb->data, |
212 | bufsz, | 212 | bufsz, |
213 | DMA_FROM_DEVICE); | 213 | DMA_FROM_DEVICE); |
214 | if (dma_mapping_error(&pdev->dev, buffer_info->dma)) { | 214 | if (dma_mapping_error(&pdev->dev, buffer_info->dma)) { |
215 | dev_kfree_skb(buffer_info->skb); | 215 | dev_kfree_skb(buffer_info->skb); |
@@ -219,14 +219,14 @@ static void igbvf_alloc_rx_buffers(struct igbvf_ring *rx_ring, | |||
219 | } | 219 | } |
220 | } | 220 | } |
221 | /* Refresh the desc even if buffer_addrs didn't change because | 221 | /* Refresh the desc even if buffer_addrs didn't change because |
222 | * each write-back erases this info. */ | 222 | * each write-back erases this info. |
223 | */ | ||
223 | if (adapter->rx_ps_hdr_size) { | 224 | if (adapter->rx_ps_hdr_size) { |
224 | rx_desc->read.pkt_addr = | 225 | rx_desc->read.pkt_addr = |
225 | cpu_to_le64(buffer_info->page_dma); | 226 | cpu_to_le64(buffer_info->page_dma); |
226 | rx_desc->read.hdr_addr = cpu_to_le64(buffer_info->dma); | 227 | rx_desc->read.hdr_addr = cpu_to_le64(buffer_info->dma); |
227 | } else { | 228 | } else { |
228 | rx_desc->read.pkt_addr = | 229 | rx_desc->read.pkt_addr = cpu_to_le64(buffer_info->dma); |
229 | cpu_to_le64(buffer_info->dma); | ||
230 | rx_desc->read.hdr_addr = 0; | 230 | rx_desc->read.hdr_addr = 0; |
231 | } | 231 | } |
232 | 232 | ||
@@ -247,7 +247,8 @@ no_buffers: | |||
247 | /* Force memory writes to complete before letting h/w | 247 | /* Force memory writes to complete before letting h/w |
248 | * know there are new descriptors to fetch. (Only | 248 | * know there are new descriptors to fetch. (Only |
249 | * applicable for weak-ordered memory model archs, | 249 | * applicable for weak-ordered memory model archs, |
250 | * such as IA-64). */ | 250 | * such as IA-64). |
251 | */ | ||
251 | wmb(); | 252 | wmb(); |
252 | writel(i, adapter->hw.hw_addr + rx_ring->tail); | 253 | writel(i, adapter->hw.hw_addr + rx_ring->tail); |
253 | } | 254 | } |
@@ -261,7 +262,7 @@ no_buffers: | |||
261 | * is no guarantee that everything was cleaned | 262 | * is no guarantee that everything was cleaned |
262 | **/ | 263 | **/ |
263 | static bool igbvf_clean_rx_irq(struct igbvf_adapter *adapter, | 264 | static bool igbvf_clean_rx_irq(struct igbvf_adapter *adapter, |
264 | int *work_done, int work_to_do) | 265 | int *work_done, int work_to_do) |
265 | { | 266 | { |
266 | struct igbvf_ring *rx_ring = adapter->rx_ring; | 267 | struct igbvf_ring *rx_ring = adapter->rx_ring; |
267 | struct net_device *netdev = adapter->netdev; | 268 | struct net_device *netdev = adapter->netdev; |
@@ -292,8 +293,9 @@ static bool igbvf_clean_rx_irq(struct igbvf_adapter *adapter, | |||
292 | * that case, it fills the header buffer and spills the rest | 293 | * that case, it fills the header buffer and spills the rest |
293 | * into the page. | 294 | * into the page. |
294 | */ | 295 | */ |
295 | hlen = (le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.hdr_info) & | 296 | hlen = (le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.hdr_info) |
296 | E1000_RXDADV_HDRBUFLEN_MASK) >> E1000_RXDADV_HDRBUFLEN_SHIFT; | 297 | & E1000_RXDADV_HDRBUFLEN_MASK) >> |
298 | E1000_RXDADV_HDRBUFLEN_SHIFT; | ||
297 | if (hlen > adapter->rx_ps_hdr_size) | 299 | if (hlen > adapter->rx_ps_hdr_size) |
298 | hlen = adapter->rx_ps_hdr_size; | 300 | hlen = adapter->rx_ps_hdr_size; |
299 | 301 | ||
@@ -306,7 +308,7 @@ static bool igbvf_clean_rx_irq(struct igbvf_adapter *adapter, | |||
306 | buffer_info->skb = NULL; | 308 | buffer_info->skb = NULL; |
307 | if (!adapter->rx_ps_hdr_size) { | 309 | if (!adapter->rx_ps_hdr_size) { |
308 | dma_unmap_single(&pdev->dev, buffer_info->dma, | 310 | dma_unmap_single(&pdev->dev, buffer_info->dma, |
309 | adapter->rx_buffer_len, | 311 | adapter->rx_buffer_len, |
310 | DMA_FROM_DEVICE); | 312 | DMA_FROM_DEVICE); |
311 | buffer_info->dma = 0; | 313 | buffer_info->dma = 0; |
312 | skb_put(skb, length); | 314 | skb_put(skb, length); |
@@ -315,21 +317,21 @@ static bool igbvf_clean_rx_irq(struct igbvf_adapter *adapter, | |||
315 | 317 | ||
316 | if (!skb_shinfo(skb)->nr_frags) { | 318 | if (!skb_shinfo(skb)->nr_frags) { |
317 | dma_unmap_single(&pdev->dev, buffer_info->dma, | 319 | dma_unmap_single(&pdev->dev, buffer_info->dma, |
318 | adapter->rx_ps_hdr_size, | 320 | adapter->rx_ps_hdr_size, |
319 | DMA_FROM_DEVICE); | 321 | DMA_FROM_DEVICE); |
320 | skb_put(skb, hlen); | 322 | skb_put(skb, hlen); |
321 | } | 323 | } |
322 | 324 | ||
323 | if (length) { | 325 | if (length) { |
324 | dma_unmap_page(&pdev->dev, buffer_info->page_dma, | 326 | dma_unmap_page(&pdev->dev, buffer_info->page_dma, |
325 | PAGE_SIZE / 2, | 327 | PAGE_SIZE / 2, |
326 | DMA_FROM_DEVICE); | 328 | DMA_FROM_DEVICE); |
327 | buffer_info->page_dma = 0; | 329 | buffer_info->page_dma = 0; |
328 | 330 | ||
329 | skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, | 331 | skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, |
330 | buffer_info->page, | 332 | buffer_info->page, |
331 | buffer_info->page_offset, | 333 | buffer_info->page_offset, |
332 | length); | 334 | length); |
333 | 335 | ||
334 | if ((adapter->rx_buffer_len > (PAGE_SIZE / 2)) || | 336 | if ((adapter->rx_buffer_len > (PAGE_SIZE / 2)) || |
335 | (page_count(buffer_info->page) != 1)) | 337 | (page_count(buffer_info->page) != 1)) |
@@ -370,7 +372,7 @@ send_up: | |||
370 | skb->protocol = eth_type_trans(skb, netdev); | 372 | skb->protocol = eth_type_trans(skb, netdev); |
371 | 373 | ||
372 | igbvf_receive_skb(adapter, netdev, skb, staterr, | 374 | igbvf_receive_skb(adapter, netdev, skb, staterr, |
373 | rx_desc->wb.upper.vlan); | 375 | rx_desc->wb.upper.vlan); |
374 | 376 | ||
375 | next_desc: | 377 | next_desc: |
376 | rx_desc->wb.upper.status_error = 0; | 378 | rx_desc->wb.upper.status_error = 0; |
@@ -402,7 +404,7 @@ next_desc: | |||
402 | } | 404 | } |
403 | 405 | ||
404 | static void igbvf_put_txbuf(struct igbvf_adapter *adapter, | 406 | static void igbvf_put_txbuf(struct igbvf_adapter *adapter, |
405 | struct igbvf_buffer *buffer_info) | 407 | struct igbvf_buffer *buffer_info) |
406 | { | 408 | { |
407 | if (buffer_info->dma) { | 409 | if (buffer_info->dma) { |
408 | if (buffer_info->mapped_as_page) | 410 | if (buffer_info->mapped_as_page) |
@@ -431,7 +433,7 @@ static void igbvf_put_txbuf(struct igbvf_adapter *adapter, | |||
431 | * Return 0 on success, negative on failure | 433 | * Return 0 on success, negative on failure |
432 | **/ | 434 | **/ |
433 | int igbvf_setup_tx_resources(struct igbvf_adapter *adapter, | 435 | int igbvf_setup_tx_resources(struct igbvf_adapter *adapter, |
434 | struct igbvf_ring *tx_ring) | 436 | struct igbvf_ring *tx_ring) |
435 | { | 437 | { |
436 | struct pci_dev *pdev = adapter->pdev; | 438 | struct pci_dev *pdev = adapter->pdev; |
437 | int size; | 439 | int size; |
@@ -458,7 +460,7 @@ int igbvf_setup_tx_resources(struct igbvf_adapter *adapter, | |||
458 | err: | 460 | err: |
459 | vfree(tx_ring->buffer_info); | 461 | vfree(tx_ring->buffer_info); |
460 | dev_err(&adapter->pdev->dev, | 462 | dev_err(&adapter->pdev->dev, |
461 | "Unable to allocate memory for the transmit descriptor ring\n"); | 463 | "Unable to allocate memory for the transmit descriptor ring\n"); |
462 | return -ENOMEM; | 464 | return -ENOMEM; |
463 | } | 465 | } |
464 | 466 | ||
@@ -501,7 +503,7 @@ err: | |||
501 | vfree(rx_ring->buffer_info); | 503 | vfree(rx_ring->buffer_info); |
502 | rx_ring->buffer_info = NULL; | 504 | rx_ring->buffer_info = NULL; |
503 | dev_err(&adapter->pdev->dev, | 505 | dev_err(&adapter->pdev->dev, |
504 | "Unable to allocate memory for the receive descriptor ring\n"); | 506 | "Unable to allocate memory for the receive descriptor ring\n"); |
505 | return -ENOMEM; | 507 | return -ENOMEM; |
506 | } | 508 | } |
507 | 509 | ||
@@ -578,13 +580,13 @@ static void igbvf_clean_rx_ring(struct igbvf_ring *rx_ring) | |||
578 | for (i = 0; i < rx_ring->count; i++) { | 580 | for (i = 0; i < rx_ring->count; i++) { |
579 | buffer_info = &rx_ring->buffer_info[i]; | 581 | buffer_info = &rx_ring->buffer_info[i]; |
580 | if (buffer_info->dma) { | 582 | if (buffer_info->dma) { |
581 | if (adapter->rx_ps_hdr_size){ | 583 | if (adapter->rx_ps_hdr_size) { |
582 | dma_unmap_single(&pdev->dev, buffer_info->dma, | 584 | dma_unmap_single(&pdev->dev, buffer_info->dma, |
583 | adapter->rx_ps_hdr_size, | 585 | adapter->rx_ps_hdr_size, |
584 | DMA_FROM_DEVICE); | 586 | DMA_FROM_DEVICE); |
585 | } else { | 587 | } else { |
586 | dma_unmap_single(&pdev->dev, buffer_info->dma, | 588 | dma_unmap_single(&pdev->dev, buffer_info->dma, |
587 | adapter->rx_buffer_len, | 589 | adapter->rx_buffer_len, |
588 | DMA_FROM_DEVICE); | 590 | DMA_FROM_DEVICE); |
589 | } | 591 | } |
590 | buffer_info->dma = 0; | 592 | buffer_info->dma = 0; |
@@ -599,7 +601,7 @@ static void igbvf_clean_rx_ring(struct igbvf_ring *rx_ring) | |||
599 | if (buffer_info->page_dma) | 601 | if (buffer_info->page_dma) |
600 | dma_unmap_page(&pdev->dev, | 602 | dma_unmap_page(&pdev->dev, |
601 | buffer_info->page_dma, | 603 | buffer_info->page_dma, |
602 | PAGE_SIZE / 2, | 604 | PAGE_SIZE / 2, |
603 | DMA_FROM_DEVICE); | 605 | DMA_FROM_DEVICE); |
604 | put_page(buffer_info->page); | 606 | put_page(buffer_info->page); |
605 | buffer_info->page = NULL; | 607 | buffer_info->page = NULL; |
@@ -638,7 +640,7 @@ void igbvf_free_rx_resources(struct igbvf_ring *rx_ring) | |||
638 | rx_ring->buffer_info = NULL; | 640 | rx_ring->buffer_info = NULL; |
639 | 641 | ||
640 | dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc, | 642 | dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc, |
641 | rx_ring->dma); | 643 | rx_ring->dma); |
642 | rx_ring->desc = NULL; | 644 | rx_ring->desc = NULL; |
643 | } | 645 | } |
644 | 646 | ||
@@ -649,13 +651,12 @@ void igbvf_free_rx_resources(struct igbvf_ring *rx_ring) | |||
649 | * @packets: the number of packets during this measurement interval | 651 | * @packets: the number of packets during this measurement interval |
650 | * @bytes: the number of bytes during this measurement interval | 652 | * @bytes: the number of bytes during this measurement interval |
651 | * | 653 | * |
652 | * Stores a new ITR value based on packets and byte | 654 | * Stores a new ITR value based on packets and byte counts during the last |
653 | * counts during the last interrupt. The advantage of per interrupt | 655 | * interrupt. The advantage of per interrupt computation is faster updates |
654 | * computation is faster updates and more accurate ITR for the current | 656 | * and more accurate ITR for the current traffic pattern. Constants in this |
655 | * traffic pattern. Constants in this function were computed | 657 | * function were computed based on theoretical maximum wire speed and thresholds |
656 | * based on theoretical maximum wire speed and thresholds were set based | 658 | * were set based on testing data as well as attempting to minimize response |
657 | * on testing data as well as attempting to minimize response time | 659 | * time while increasing bulk throughput. |
658 | * while increasing bulk throughput. | ||
659 | **/ | 660 | **/ |
660 | static enum latency_range igbvf_update_itr(struct igbvf_adapter *adapter, | 661 | static enum latency_range igbvf_update_itr(struct igbvf_adapter *adapter, |
661 | enum latency_range itr_setting, | 662 | enum latency_range itr_setting, |
@@ -744,17 +745,15 @@ static void igbvf_set_itr(struct igbvf_adapter *adapter) | |||
744 | 745 | ||
745 | new_itr = igbvf_range_to_itr(adapter->tx_ring->itr_range); | 746 | new_itr = igbvf_range_to_itr(adapter->tx_ring->itr_range); |
746 | 747 | ||
747 | |||
748 | if (new_itr != adapter->tx_ring->itr_val) { | 748 | if (new_itr != adapter->tx_ring->itr_val) { |
749 | u32 current_itr = adapter->tx_ring->itr_val; | 749 | u32 current_itr = adapter->tx_ring->itr_val; |
750 | /* | 750 | /* this attempts to bias the interrupt rate towards Bulk |
751 | * this attempts to bias the interrupt rate towards Bulk | ||
752 | * by adding intermediate steps when interrupt rate is | 751 | * by adding intermediate steps when interrupt rate is |
753 | * increasing | 752 | * increasing |
754 | */ | 753 | */ |
755 | new_itr = new_itr > current_itr ? | 754 | new_itr = new_itr > current_itr ? |
756 | min(current_itr + (new_itr >> 2), new_itr) : | 755 | min(current_itr + (new_itr >> 2), new_itr) : |
757 | new_itr; | 756 | new_itr; |
758 | adapter->tx_ring->itr_val = new_itr; | 757 | adapter->tx_ring->itr_val = new_itr; |
759 | 758 | ||
760 | adapter->tx_ring->set_itr = 1; | 759 | adapter->tx_ring->set_itr = 1; |
@@ -772,9 +771,10 @@ static void igbvf_set_itr(struct igbvf_adapter *adapter) | |||
772 | 771 | ||
773 | if (new_itr != adapter->rx_ring->itr_val) { | 772 | if (new_itr != adapter->rx_ring->itr_val) { |
774 | u32 current_itr = adapter->rx_ring->itr_val; | 773 | u32 current_itr = adapter->rx_ring->itr_val; |
774 | |||
775 | new_itr = new_itr > current_itr ? | 775 | new_itr = new_itr > current_itr ? |
776 | min(current_itr + (new_itr >> 2), new_itr) : | 776 | min(current_itr + (new_itr >> 2), new_itr) : |
777 | new_itr; | 777 | new_itr; |
778 | adapter->rx_ring->itr_val = new_itr; | 778 | adapter->rx_ring->itr_val = new_itr; |
779 | 779 | ||
780 | adapter->rx_ring->set_itr = 1; | 780 | adapter->rx_ring->set_itr = 1; |
@@ -829,7 +829,7 @@ static bool igbvf_clean_tx_irq(struct igbvf_ring *tx_ring) | |||
829 | segs = skb_shinfo(skb)->gso_segs ?: 1; | 829 | segs = skb_shinfo(skb)->gso_segs ?: 1; |
830 | /* multiply data chunks by size of headers */ | 830 | /* multiply data chunks by size of headers */ |
831 | bytecount = ((segs - 1) * skb_headlen(skb)) + | 831 | bytecount = ((segs - 1) * skb_headlen(skb)) + |
832 | skb->len; | 832 | skb->len; |
833 | total_packets += segs; | 833 | total_packets += segs; |
834 | total_bytes += bytecount; | 834 | total_bytes += bytecount; |
835 | } | 835 | } |
@@ -849,9 +849,8 @@ static bool igbvf_clean_tx_irq(struct igbvf_ring *tx_ring) | |||
849 | 849 | ||
850 | tx_ring->next_to_clean = i; | 850 | tx_ring->next_to_clean = i; |
851 | 851 | ||
852 | if (unlikely(count && | 852 | if (unlikely(count && netif_carrier_ok(netdev) && |
853 | netif_carrier_ok(netdev) && | 853 | igbvf_desc_unused(tx_ring) >= IGBVF_TX_QUEUE_WAKE)) { |
854 | igbvf_desc_unused(tx_ring) >= IGBVF_TX_QUEUE_WAKE)) { | ||
855 | /* Make sure that anybody stopping the queue after this | 854 | /* Make sure that anybody stopping the queue after this |
856 | * sees the new next_to_clean. | 855 | * sees the new next_to_clean. |
857 | */ | 856 | */ |
@@ -902,8 +901,9 @@ static irqreturn_t igbvf_intr_msix_tx(int irq, void *data) | |||
902 | adapter->total_tx_bytes = 0; | 901 | adapter->total_tx_bytes = 0; |
903 | adapter->total_tx_packets = 0; | 902 | adapter->total_tx_packets = 0; |
904 | 903 | ||
905 | /* auto mask will automatically reenable the interrupt when we write | 904 | /* auto mask will automatically re-enable the interrupt when we write |
906 | * EICS */ | 905 | * EICS |
906 | */ | ||
907 | if (!igbvf_clean_tx_irq(tx_ring)) | 907 | if (!igbvf_clean_tx_irq(tx_ring)) |
908 | /* Ring was not completely cleaned, so fire another interrupt */ | 908 | /* Ring was not completely cleaned, so fire another interrupt */ |
909 | ew32(EICS, tx_ring->eims_value); | 909 | ew32(EICS, tx_ring->eims_value); |
@@ -941,15 +941,16 @@ static irqreturn_t igbvf_intr_msix_rx(int irq, void *data) | |||
941 | #define IGBVF_NO_QUEUE -1 | 941 | #define IGBVF_NO_QUEUE -1 |
942 | 942 | ||
943 | static void igbvf_assign_vector(struct igbvf_adapter *adapter, int rx_queue, | 943 | static void igbvf_assign_vector(struct igbvf_adapter *adapter, int rx_queue, |
944 | int tx_queue, int msix_vector) | 944 | int tx_queue, int msix_vector) |
945 | { | 945 | { |
946 | struct e1000_hw *hw = &adapter->hw; | 946 | struct e1000_hw *hw = &adapter->hw; |
947 | u32 ivar, index; | 947 | u32 ivar, index; |
948 | 948 | ||
949 | /* 82576 uses a table-based method for assigning vectors. | 949 | /* 82576 uses a table-based method for assigning vectors. |
950 | Each queue has a single entry in the table to which we write | 950 | * Each queue has a single entry in the table to which we write |
951 | a vector number along with a "valid" bit. Sadly, the layout | 951 | * a vector number along with a "valid" bit. Sadly, the layout |
952 | of the table is somewhat counterintuitive. */ | 952 | * of the table is somewhat counterintuitive. |
953 | */ | ||
953 | if (rx_queue > IGBVF_NO_QUEUE) { | 954 | if (rx_queue > IGBVF_NO_QUEUE) { |
954 | index = (rx_queue >> 1); | 955 | index = (rx_queue >> 1); |
955 | ivar = array_er32(IVAR0, index); | 956 | ivar = array_er32(IVAR0, index); |
@@ -984,6 +985,7 @@ static void igbvf_assign_vector(struct igbvf_adapter *adapter, int rx_queue, | |||
984 | 985 | ||
985 | /** | 986 | /** |
986 | * igbvf_configure_msix - Configure MSI-X hardware | 987 | * igbvf_configure_msix - Configure MSI-X hardware |
988 | * @adapter: board private structure | ||
987 | * | 989 | * |
988 | * igbvf_configure_msix sets up the hardware to properly | 990 | * igbvf_configure_msix sets up the hardware to properly |
989 | * generate MSI-X interrupts. | 991 | * generate MSI-X interrupts. |
@@ -1027,6 +1029,7 @@ static void igbvf_reset_interrupt_capability(struct igbvf_adapter *adapter) | |||
1027 | 1029 | ||
1028 | /** | 1030 | /** |
1029 | * igbvf_set_interrupt_capability - set MSI or MSI-X if supported | 1031 | * igbvf_set_interrupt_capability - set MSI or MSI-X if supported |
1032 | * @adapter: board private structure | ||
1030 | * | 1033 | * |
1031 | * Attempt to configure interrupts using the best available | 1034 | * Attempt to configure interrupts using the best available |
1032 | * capabilities of the hardware and kernel. | 1035 | * capabilities of the hardware and kernel. |
@@ -1036,27 +1039,28 @@ static void igbvf_set_interrupt_capability(struct igbvf_adapter *adapter) | |||
1036 | int err = -ENOMEM; | 1039 | int err = -ENOMEM; |
1037 | int i; | 1040 | int i; |
1038 | 1041 | ||
1039 | /* we allocate 3 vectors, 1 for tx, 1 for rx, one for pf messages */ | 1042 | /* we allocate 3 vectors, 1 for Tx, 1 for Rx, one for PF messages */ |
1040 | adapter->msix_entries = kcalloc(3, sizeof(struct msix_entry), | 1043 | adapter->msix_entries = kcalloc(3, sizeof(struct msix_entry), |
1041 | GFP_KERNEL); | 1044 | GFP_KERNEL); |
1042 | if (adapter->msix_entries) { | 1045 | if (adapter->msix_entries) { |
1043 | for (i = 0; i < 3; i++) | 1046 | for (i = 0; i < 3; i++) |
1044 | adapter->msix_entries[i].entry = i; | 1047 | adapter->msix_entries[i].entry = i; |
1045 | 1048 | ||
1046 | err = pci_enable_msix_range(adapter->pdev, | 1049 | err = pci_enable_msix_range(adapter->pdev, |
1047 | adapter->msix_entries, 3, 3); | 1050 | adapter->msix_entries, 3, 3); |
1048 | } | 1051 | } |
1049 | 1052 | ||
1050 | if (err < 0) { | 1053 | if (err < 0) { |
1051 | /* MSI-X failed */ | 1054 | /* MSI-X failed */ |
1052 | dev_err(&adapter->pdev->dev, | 1055 | dev_err(&adapter->pdev->dev, |
1053 | "Failed to initialize MSI-X interrupts.\n"); | 1056 | "Failed to initialize MSI-X interrupts.\n"); |
1054 | igbvf_reset_interrupt_capability(adapter); | 1057 | igbvf_reset_interrupt_capability(adapter); |
1055 | } | 1058 | } |
1056 | } | 1059 | } |
1057 | 1060 | ||
1058 | /** | 1061 | /** |
1059 | * igbvf_request_msix - Initialize MSI-X interrupts | 1062 | * igbvf_request_msix - Initialize MSI-X interrupts |
1063 | * @adapter: board private structure | ||
1060 | * | 1064 | * |
1061 | * igbvf_request_msix allocates MSI-X vectors and requests interrupts from the | 1065 | * igbvf_request_msix allocates MSI-X vectors and requests interrupts from the |
1062 | * kernel. | 1066 | * kernel. |
@@ -1075,8 +1079,8 @@ static int igbvf_request_msix(struct igbvf_adapter *adapter) | |||
1075 | } | 1079 | } |
1076 | 1080 | ||
1077 | err = request_irq(adapter->msix_entries[vector].vector, | 1081 | err = request_irq(adapter->msix_entries[vector].vector, |
1078 | igbvf_intr_msix_tx, 0, adapter->tx_ring->name, | 1082 | igbvf_intr_msix_tx, 0, adapter->tx_ring->name, |
1079 | netdev); | 1083 | netdev); |
1080 | if (err) | 1084 | if (err) |
1081 | goto out; | 1085 | goto out; |
1082 | 1086 | ||
@@ -1085,8 +1089,8 @@ static int igbvf_request_msix(struct igbvf_adapter *adapter) | |||
1085 | vector++; | 1089 | vector++; |
1086 | 1090 | ||
1087 | err = request_irq(adapter->msix_entries[vector].vector, | 1091 | err = request_irq(adapter->msix_entries[vector].vector, |
1088 | igbvf_intr_msix_rx, 0, adapter->rx_ring->name, | 1092 | igbvf_intr_msix_rx, 0, adapter->rx_ring->name, |
1089 | netdev); | 1093 | netdev); |
1090 | if (err) | 1094 | if (err) |
1091 | goto out; | 1095 | goto out; |
1092 | 1096 | ||
@@ -1095,7 +1099,7 @@ static int igbvf_request_msix(struct igbvf_adapter *adapter) | |||
1095 | vector++; | 1099 | vector++; |
1096 | 1100 | ||
1097 | err = request_irq(adapter->msix_entries[vector].vector, | 1101 | err = request_irq(adapter->msix_entries[vector].vector, |
1098 | igbvf_msix_other, 0, netdev->name, netdev); | 1102 | igbvf_msix_other, 0, netdev->name, netdev); |
1099 | if (err) | 1103 | if (err) |
1100 | goto out; | 1104 | goto out; |
1101 | 1105 | ||
@@ -1130,6 +1134,7 @@ static int igbvf_alloc_queues(struct igbvf_adapter *adapter) | |||
1130 | 1134 | ||
1131 | /** | 1135 | /** |
1132 | * igbvf_request_irq - initialize interrupts | 1136 | * igbvf_request_irq - initialize interrupts |
1137 | * @adapter: board private structure | ||
1133 | * | 1138 | * |
1134 | * Attempts to configure interrupts using the best available | 1139 | * Attempts to configure interrupts using the best available |
1135 | * capabilities of the hardware and kernel. | 1140 | * capabilities of the hardware and kernel. |
@@ -1146,7 +1151,7 @@ static int igbvf_request_irq(struct igbvf_adapter *adapter) | |||
1146 | return err; | 1151 | return err; |
1147 | 1152 | ||
1148 | dev_err(&adapter->pdev->dev, | 1153 | dev_err(&adapter->pdev->dev, |
1149 | "Unable to allocate interrupt, Error: %d\n", err); | 1154 | "Unable to allocate interrupt, Error: %d\n", err); |
1150 | 1155 | ||
1151 | return err; | 1156 | return err; |
1152 | } | 1157 | } |
@@ -1164,6 +1169,7 @@ static void igbvf_free_irq(struct igbvf_adapter *adapter) | |||
1164 | 1169 | ||
1165 | /** | 1170 | /** |
1166 | * igbvf_irq_disable - Mask off interrupt generation on the NIC | 1171 | * igbvf_irq_disable - Mask off interrupt generation on the NIC |
1172 | * @adapter: board private structure | ||
1167 | **/ | 1173 | **/ |
1168 | static void igbvf_irq_disable(struct igbvf_adapter *adapter) | 1174 | static void igbvf_irq_disable(struct igbvf_adapter *adapter) |
1169 | { | 1175 | { |
@@ -1177,6 +1183,7 @@ static void igbvf_irq_disable(struct igbvf_adapter *adapter) | |||
1177 | 1183 | ||
1178 | /** | 1184 | /** |
1179 | * igbvf_irq_enable - Enable default interrupt generation settings | 1185 | * igbvf_irq_enable - Enable default interrupt generation settings |
1186 | * @adapter: board private structure | ||
1180 | **/ | 1187 | **/ |
1181 | static void igbvf_irq_enable(struct igbvf_adapter *adapter) | 1188 | static void igbvf_irq_enable(struct igbvf_adapter *adapter) |
1182 | { | 1189 | { |
@@ -1252,7 +1259,7 @@ static int igbvf_vlan_rx_kill_vid(struct net_device *netdev, | |||
1252 | 1259 | ||
1253 | if (hw->mac.ops.set_vfta(hw, vid, false)) { | 1260 | if (hw->mac.ops.set_vfta(hw, vid, false)) { |
1254 | dev_err(&adapter->pdev->dev, | 1261 | dev_err(&adapter->pdev->dev, |
1255 | "Failed to remove vlan id %d\n", vid); | 1262 | "Failed to remove vlan id %d\n", vid); |
1256 | return -EINVAL; | 1263 | return -EINVAL; |
1257 | } | 1264 | } |
1258 | clear_bit(vid, adapter->active_vlans); | 1265 | clear_bit(vid, adapter->active_vlans); |
@@ -1298,7 +1305,7 @@ static void igbvf_configure_tx(struct igbvf_adapter *adapter) | |||
1298 | 1305 | ||
1299 | /* Turn off Relaxed Ordering on head write-backs. The writebacks | 1306 | /* Turn off Relaxed Ordering on head write-backs. The writebacks |
1300 | * MUST be delivered in order or it will completely screw up | 1307 | * MUST be delivered in order or it will completely screw up |
1301 | * our bookeeping. | 1308 | * our bookkeeping. |
1302 | */ | 1309 | */ |
1303 | dca_txctrl = er32(DCA_TXCTRL(0)); | 1310 | dca_txctrl = er32(DCA_TXCTRL(0)); |
1304 | dca_txctrl &= ~E1000_DCA_TXCTRL_TX_WB_RO_EN; | 1311 | dca_txctrl &= ~E1000_DCA_TXCTRL_TX_WB_RO_EN; |
@@ -1325,15 +1332,15 @@ static void igbvf_setup_srrctl(struct igbvf_adapter *adapter) | |||
1325 | u32 srrctl = 0; | 1332 | u32 srrctl = 0; |
1326 | 1333 | ||
1327 | srrctl &= ~(E1000_SRRCTL_DESCTYPE_MASK | | 1334 | srrctl &= ~(E1000_SRRCTL_DESCTYPE_MASK | |
1328 | E1000_SRRCTL_BSIZEHDR_MASK | | 1335 | E1000_SRRCTL_BSIZEHDR_MASK | |
1329 | E1000_SRRCTL_BSIZEPKT_MASK); | 1336 | E1000_SRRCTL_BSIZEPKT_MASK); |
1330 | 1337 | ||
1331 | /* Enable queue drop to avoid head of line blocking */ | 1338 | /* Enable queue drop to avoid head of line blocking */ |
1332 | srrctl |= E1000_SRRCTL_DROP_EN; | 1339 | srrctl |= E1000_SRRCTL_DROP_EN; |
1333 | 1340 | ||
1334 | /* Setup buffer sizes */ | 1341 | /* Setup buffer sizes */ |
1335 | srrctl |= ALIGN(adapter->rx_buffer_len, 1024) >> | 1342 | srrctl |= ALIGN(adapter->rx_buffer_len, 1024) >> |
1336 | E1000_SRRCTL_BSIZEPKT_SHIFT; | 1343 | E1000_SRRCTL_BSIZEPKT_SHIFT; |
1337 | 1344 | ||
1338 | if (adapter->rx_buffer_len < 2048) { | 1345 | if (adapter->rx_buffer_len < 2048) { |
1339 | adapter->rx_ps_hdr_size = 0; | 1346 | adapter->rx_ps_hdr_size = 0; |
@@ -1341,7 +1348,7 @@ static void igbvf_setup_srrctl(struct igbvf_adapter *adapter) | |||
1341 | } else { | 1348 | } else { |
1342 | adapter->rx_ps_hdr_size = 128; | 1349 | adapter->rx_ps_hdr_size = 128; |
1343 | srrctl |= adapter->rx_ps_hdr_size << | 1350 | srrctl |= adapter->rx_ps_hdr_size << |
1344 | E1000_SRRCTL_BSIZEHDRSIZE_SHIFT; | 1351 | E1000_SRRCTL_BSIZEHDRSIZE_SHIFT; |
1345 | srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS; | 1352 | srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS; |
1346 | } | 1353 | } |
1347 | 1354 | ||
@@ -1369,8 +1376,7 @@ static void igbvf_configure_rx(struct igbvf_adapter *adapter) | |||
1369 | 1376 | ||
1370 | rdlen = rx_ring->count * sizeof(union e1000_adv_rx_desc); | 1377 | rdlen = rx_ring->count * sizeof(union e1000_adv_rx_desc); |
1371 | 1378 | ||
1372 | /* | 1379 | /* Setup the HW Rx Head and Tail Descriptor Pointers and |
1373 | * Setup the HW Rx Head and Tail Descriptor Pointers and | ||
1374 | * the Base and Length of the Rx Descriptor Ring | 1380 | * the Base and Length of the Rx Descriptor Ring |
1375 | */ | 1381 | */ |
1376 | rdba = rx_ring->dma; | 1382 | rdba = rx_ring->dma; |
@@ -1441,10 +1447,11 @@ static void igbvf_configure(struct igbvf_adapter *adapter) | |||
1441 | igbvf_setup_srrctl(adapter); | 1447 | igbvf_setup_srrctl(adapter); |
1442 | igbvf_configure_rx(adapter); | 1448 | igbvf_configure_rx(adapter); |
1443 | igbvf_alloc_rx_buffers(adapter->rx_ring, | 1449 | igbvf_alloc_rx_buffers(adapter->rx_ring, |
1444 | igbvf_desc_unused(adapter->rx_ring)); | 1450 | igbvf_desc_unused(adapter->rx_ring)); |
1445 | } | 1451 | } |
1446 | 1452 | ||
1447 | /* igbvf_reset - bring the hardware into a known good state | 1453 | /* igbvf_reset - bring the hardware into a known good state |
1454 | * @adapter: private board structure | ||
1448 | * | 1455 | * |
1449 | * This function boots the hardware and enables some settings that | 1456 | * This function boots the hardware and enables some settings that |
1450 | * require a configuration cycle of the hardware - those cannot be | 1457 | * require a configuration cycle of the hardware - those cannot be |
@@ -1494,7 +1501,6 @@ int igbvf_up(struct igbvf_adapter *adapter) | |||
1494 | hw->mac.get_link_status = 1; | 1501 | hw->mac.get_link_status = 1; |
1495 | mod_timer(&adapter->watchdog_timer, jiffies + 1); | 1502 | mod_timer(&adapter->watchdog_timer, jiffies + 1); |
1496 | 1503 | ||
1497 | |||
1498 | return 0; | 1504 | return 0; |
1499 | } | 1505 | } |
1500 | 1506 | ||
@@ -1504,8 +1510,7 @@ void igbvf_down(struct igbvf_adapter *adapter) | |||
1504 | struct e1000_hw *hw = &adapter->hw; | 1510 | struct e1000_hw *hw = &adapter->hw; |
1505 | u32 rxdctl, txdctl; | 1511 | u32 rxdctl, txdctl; |
1506 | 1512 | ||
1507 | /* | 1513 | /* signal that we're down so the interrupt handler does not |
1508 | * signal that we're down so the interrupt handler does not | ||
1509 | * reschedule our watchdog timer | 1514 | * reschedule our watchdog timer |
1510 | */ | 1515 | */ |
1511 | set_bit(__IGBVF_DOWN, &adapter->state); | 1516 | set_bit(__IGBVF_DOWN, &adapter->state); |
@@ -1547,7 +1552,7 @@ void igbvf_reinit_locked(struct igbvf_adapter *adapter) | |||
1547 | { | 1552 | { |
1548 | might_sleep(); | 1553 | might_sleep(); |
1549 | while (test_and_set_bit(__IGBVF_RESETTING, &adapter->state)) | 1554 | while (test_and_set_bit(__IGBVF_RESETTING, &adapter->state)) |
1550 | msleep(1); | 1555 | usleep_range(1000, 2000); |
1551 | igbvf_down(adapter); | 1556 | igbvf_down(adapter); |
1552 | igbvf_up(adapter); | 1557 | igbvf_up(adapter); |
1553 | clear_bit(__IGBVF_RESETTING, &adapter->state); | 1558 | clear_bit(__IGBVF_RESETTING, &adapter->state); |
@@ -1662,8 +1667,7 @@ static int igbvf_open(struct net_device *netdev) | |||
1662 | if (err) | 1667 | if (err) |
1663 | goto err_setup_rx; | 1668 | goto err_setup_rx; |
1664 | 1669 | ||
1665 | /* | 1670 | /* before we allocate an interrupt, we must be ready to handle it. |
1666 | * before we allocate an interrupt, we must be ready to handle it. | ||
1667 | * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt | 1671 | * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt |
1668 | * as soon as we call pci_request_irq, so we have to setup our | 1672 | * as soon as we call pci_request_irq, so we have to setup our |
1669 | * clean_rx handler before we do so. | 1673 | * clean_rx handler before we do so. |
@@ -1725,6 +1729,7 @@ static int igbvf_close(struct net_device *netdev) | |||
1725 | 1729 | ||
1726 | return 0; | 1730 | return 0; |
1727 | } | 1731 | } |
1732 | |||
1728 | /** | 1733 | /** |
1729 | * igbvf_set_mac - Change the Ethernet Address of the NIC | 1734 | * igbvf_set_mac - Change the Ethernet Address of the NIC |
1730 | * @netdev: network interface device structure | 1735 | * @netdev: network interface device structure |
@@ -1753,15 +1758,15 @@ static int igbvf_set_mac(struct net_device *netdev, void *p) | |||
1753 | return 0; | 1758 | return 0; |
1754 | } | 1759 | } |
1755 | 1760 | ||
1756 | #define UPDATE_VF_COUNTER(reg, name) \ | 1761 | #define UPDATE_VF_COUNTER(reg, name) \ |
1757 | { \ | 1762 | { \ |
1758 | u32 current_counter = er32(reg); \ | 1763 | u32 current_counter = er32(reg); \ |
1759 | if (current_counter < adapter->stats.last_##name) \ | 1764 | if (current_counter < adapter->stats.last_##name) \ |
1760 | adapter->stats.name += 0x100000000LL; \ | 1765 | adapter->stats.name += 0x100000000LL; \ |
1761 | adapter->stats.last_##name = current_counter; \ | 1766 | adapter->stats.last_##name = current_counter; \ |
1762 | adapter->stats.name &= 0xFFFFFFFF00000000LL; \ | 1767 | adapter->stats.name &= 0xFFFFFFFF00000000LL; \ |
1763 | adapter->stats.name |= current_counter; \ | 1768 | adapter->stats.name |= current_counter; \ |
1764 | } | 1769 | } |
1765 | 1770 | ||
1766 | /** | 1771 | /** |
1767 | * igbvf_update_stats - Update the board statistics counters | 1772 | * igbvf_update_stats - Update the board statistics counters |
@@ -1772,8 +1777,7 @@ void igbvf_update_stats(struct igbvf_adapter *adapter) | |||
1772 | struct e1000_hw *hw = &adapter->hw; | 1777 | struct e1000_hw *hw = &adapter->hw; |
1773 | struct pci_dev *pdev = adapter->pdev; | 1778 | struct pci_dev *pdev = adapter->pdev; |
1774 | 1779 | ||
1775 | /* | 1780 | /* Prevent stats update while adapter is being reset, link is down |
1776 | * Prevent stats update while adapter is being reset, link is down | ||
1777 | * or if the pci connection is down. | 1781 | * or if the pci connection is down. |
1778 | */ | 1782 | */ |
1779 | if (adapter->link_speed == 0) | 1783 | if (adapter->link_speed == 0) |
@@ -1832,7 +1836,7 @@ static bool igbvf_has_link(struct igbvf_adapter *adapter) | |||
1832 | **/ | 1836 | **/ |
1833 | static void igbvf_watchdog(unsigned long data) | 1837 | static void igbvf_watchdog(unsigned long data) |
1834 | { | 1838 | { |
1835 | struct igbvf_adapter *adapter = (struct igbvf_adapter *) data; | 1839 | struct igbvf_adapter *adapter = (struct igbvf_adapter *)data; |
1836 | 1840 | ||
1837 | /* Do the rest outside of interrupt context */ | 1841 | /* Do the rest outside of interrupt context */ |
1838 | schedule_work(&adapter->watchdog_task); | 1842 | schedule_work(&adapter->watchdog_task); |
@@ -1841,8 +1845,8 @@ static void igbvf_watchdog(unsigned long data) | |||
1841 | static void igbvf_watchdog_task(struct work_struct *work) | 1845 | static void igbvf_watchdog_task(struct work_struct *work) |
1842 | { | 1846 | { |
1843 | struct igbvf_adapter *adapter = container_of(work, | 1847 | struct igbvf_adapter *adapter = container_of(work, |
1844 | struct igbvf_adapter, | 1848 | struct igbvf_adapter, |
1845 | watchdog_task); | 1849 | watchdog_task); |
1846 | struct net_device *netdev = adapter->netdev; | 1850 | struct net_device *netdev = adapter->netdev; |
1847 | struct e1000_mac_info *mac = &adapter->hw.mac; | 1851 | struct e1000_mac_info *mac = &adapter->hw.mac; |
1848 | struct igbvf_ring *tx_ring = adapter->tx_ring; | 1852 | struct igbvf_ring *tx_ring = adapter->tx_ring; |
@@ -1855,8 +1859,8 @@ static void igbvf_watchdog_task(struct work_struct *work) | |||
1855 | if (link) { | 1859 | if (link) { |
1856 | if (!netif_carrier_ok(netdev)) { | 1860 | if (!netif_carrier_ok(netdev)) { |
1857 | mac->ops.get_link_up_info(&adapter->hw, | 1861 | mac->ops.get_link_up_info(&adapter->hw, |
1858 | &adapter->link_speed, | 1862 | &adapter->link_speed, |
1859 | &adapter->link_duplex); | 1863 | &adapter->link_duplex); |
1860 | igbvf_print_link_info(adapter); | 1864 | igbvf_print_link_info(adapter); |
1861 | 1865 | ||
1862 | netif_carrier_on(netdev); | 1866 | netif_carrier_on(netdev); |
@@ -1876,10 +1880,9 @@ static void igbvf_watchdog_task(struct work_struct *work) | |||
1876 | igbvf_update_stats(adapter); | 1880 | igbvf_update_stats(adapter); |
1877 | } else { | 1881 | } else { |
1878 | tx_pending = (igbvf_desc_unused(tx_ring) + 1 < | 1882 | tx_pending = (igbvf_desc_unused(tx_ring) + 1 < |
1879 | tx_ring->count); | 1883 | tx_ring->count); |
1880 | if (tx_pending) { | 1884 | if (tx_pending) { |
1881 | /* | 1885 | /* We've lost link, so the controller stops DMA, |
1882 | * We've lost link, so the controller stops DMA, | ||
1883 | * but we've got queued Tx work that's never going | 1886 | * but we've got queued Tx work that's never going |
1884 | * to get done, so reset controller to flush Tx. | 1887 | * to get done, so reset controller to flush Tx. |
1885 | * (Do the reset outside of interrupt context). | 1888 | * (Do the reset outside of interrupt context). |
@@ -1898,15 +1901,15 @@ static void igbvf_watchdog_task(struct work_struct *work) | |||
1898 | round_jiffies(jiffies + (2 * HZ))); | 1901 | round_jiffies(jiffies + (2 * HZ))); |
1899 | } | 1902 | } |
1900 | 1903 | ||
1901 | #define IGBVF_TX_FLAGS_CSUM 0x00000001 | 1904 | #define IGBVF_TX_FLAGS_CSUM 0x00000001 |
1902 | #define IGBVF_TX_FLAGS_VLAN 0x00000002 | 1905 | #define IGBVF_TX_FLAGS_VLAN 0x00000002 |
1903 | #define IGBVF_TX_FLAGS_TSO 0x00000004 | 1906 | #define IGBVF_TX_FLAGS_TSO 0x00000004 |
1904 | #define IGBVF_TX_FLAGS_IPV4 0x00000008 | 1907 | #define IGBVF_TX_FLAGS_IPV4 0x00000008 |
1905 | #define IGBVF_TX_FLAGS_VLAN_MASK 0xffff0000 | 1908 | #define IGBVF_TX_FLAGS_VLAN_MASK 0xffff0000 |
1906 | #define IGBVF_TX_FLAGS_VLAN_SHIFT 16 | 1909 | #define IGBVF_TX_FLAGS_VLAN_SHIFT 16 |
1907 | 1910 | ||
1908 | static int igbvf_tso(struct igbvf_adapter *adapter, | 1911 | static int igbvf_tso(struct igbvf_adapter *adapter, |
1909 | struct igbvf_ring *tx_ring, | 1912 | struct igbvf_ring *tx_ring, |
1910 | struct sk_buff *skb, u32 tx_flags, u8 *hdr_len, | 1913 | struct sk_buff *skb, u32 tx_flags, u8 *hdr_len, |
1911 | __be16 protocol) | 1914 | __be16 protocol) |
1912 | { | 1915 | { |
@@ -1930,17 +1933,18 @@ static int igbvf_tso(struct igbvf_adapter *adapter, | |||
1930 | 1933 | ||
1931 | if (protocol == htons(ETH_P_IP)) { | 1934 | if (protocol == htons(ETH_P_IP)) { |
1932 | struct iphdr *iph = ip_hdr(skb); | 1935 | struct iphdr *iph = ip_hdr(skb); |
1936 | |||
1933 | iph->tot_len = 0; | 1937 | iph->tot_len = 0; |
1934 | iph->check = 0; | 1938 | iph->check = 0; |
1935 | tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, | 1939 | tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, |
1936 | iph->daddr, 0, | 1940 | iph->daddr, 0, |
1937 | IPPROTO_TCP, | 1941 | IPPROTO_TCP, |
1938 | 0); | 1942 | 0); |
1939 | } else if (skb_is_gso_v6(skb)) { | 1943 | } else if (skb_is_gso_v6(skb)) { |
1940 | ipv6_hdr(skb)->payload_len = 0; | 1944 | ipv6_hdr(skb)->payload_len = 0; |
1941 | tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, | 1945 | tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, |
1942 | &ipv6_hdr(skb)->daddr, | 1946 | &ipv6_hdr(skb)->daddr, |
1943 | 0, IPPROTO_TCP, 0); | 1947 | 0, IPPROTO_TCP, 0); |
1944 | } | 1948 | } |
1945 | 1949 | ||
1946 | i = tx_ring->next_to_use; | 1950 | i = tx_ring->next_to_use; |
@@ -1984,7 +1988,7 @@ static int igbvf_tso(struct igbvf_adapter *adapter, | |||
1984 | } | 1988 | } |
1985 | 1989 | ||
1986 | static inline bool igbvf_tx_csum(struct igbvf_adapter *adapter, | 1990 | static inline bool igbvf_tx_csum(struct igbvf_adapter *adapter, |
1987 | struct igbvf_ring *tx_ring, | 1991 | struct igbvf_ring *tx_ring, |
1988 | struct sk_buff *skb, u32 tx_flags, | 1992 | struct sk_buff *skb, u32 tx_flags, |
1989 | __be16 protocol) | 1993 | __be16 protocol) |
1990 | { | 1994 | { |
@@ -2005,8 +2009,7 @@ static inline bool igbvf_tx_csum(struct igbvf_adapter *adapter, | |||
2005 | info |= (skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT); | 2009 | info |= (skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT); |
2006 | if (skb->ip_summed == CHECKSUM_PARTIAL) | 2010 | if (skb->ip_summed == CHECKSUM_PARTIAL) |
2007 | info |= (skb_transport_header(skb) - | 2011 | info |= (skb_transport_header(skb) - |
2008 | skb_network_header(skb)); | 2012 | skb_network_header(skb)); |
2009 | |||
2010 | 2013 | ||
2011 | context_desc->vlan_macip_lens = cpu_to_le32(info); | 2014 | context_desc->vlan_macip_lens = cpu_to_le32(info); |
2012 | 2015 | ||
@@ -2055,6 +2058,10 @@ static int igbvf_maybe_stop_tx(struct net_device *netdev, int size) | |||
2055 | 2058 | ||
2056 | netif_stop_queue(netdev); | 2059 | netif_stop_queue(netdev); |
2057 | 2060 | ||
2061 | /* Herbert's original patch had: | ||
2062 | * smp_mb__after_netif_stop_queue(); | ||
2063 | * but since that doesn't exist yet, just open code it. | ||
2064 | */ | ||
2058 | smp_mb(); | 2065 | smp_mb(); |
2059 | 2066 | ||
2060 | /* We need to check again just in case room has been made available */ | 2067 | /* We need to check again just in case room has been made available */ |
@@ -2067,11 +2074,11 @@ static int igbvf_maybe_stop_tx(struct net_device *netdev, int size) | |||
2067 | return 0; | 2074 | return 0; |
2068 | } | 2075 | } |
2069 | 2076 | ||
2070 | #define IGBVF_MAX_TXD_PWR 16 | 2077 | #define IGBVF_MAX_TXD_PWR 16 |
2071 | #define IGBVF_MAX_DATA_PER_TXD (1 << IGBVF_MAX_TXD_PWR) | 2078 | #define IGBVF_MAX_DATA_PER_TXD (1 << IGBVF_MAX_TXD_PWR) |
2072 | 2079 | ||
2073 | static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter, | 2080 | static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter, |
2074 | struct igbvf_ring *tx_ring, | 2081 | struct igbvf_ring *tx_ring, |
2075 | struct sk_buff *skb) | 2082 | struct sk_buff *skb) |
2076 | { | 2083 | { |
2077 | struct igbvf_buffer *buffer_info; | 2084 | struct igbvf_buffer *buffer_info; |
@@ -2093,7 +2100,6 @@ static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter, | |||
2093 | if (dma_mapping_error(&pdev->dev, buffer_info->dma)) | 2100 | if (dma_mapping_error(&pdev->dev, buffer_info->dma)) |
2094 | goto dma_error; | 2101 | goto dma_error; |
2095 | 2102 | ||
2096 | |||
2097 | for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) { | 2103 | for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) { |
2098 | const struct skb_frag_struct *frag; | 2104 | const struct skb_frag_struct *frag; |
2099 | 2105 | ||
@@ -2111,7 +2117,7 @@ static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter, | |||
2111 | buffer_info->time_stamp = jiffies; | 2117 | buffer_info->time_stamp = jiffies; |
2112 | buffer_info->mapped_as_page = true; | 2118 | buffer_info->mapped_as_page = true; |
2113 | buffer_info->dma = skb_frag_dma_map(&pdev->dev, frag, 0, len, | 2119 | buffer_info->dma = skb_frag_dma_map(&pdev->dev, frag, 0, len, |
2114 | DMA_TO_DEVICE); | 2120 | DMA_TO_DEVICE); |
2115 | if (dma_mapping_error(&pdev->dev, buffer_info->dma)) | 2121 | if (dma_mapping_error(&pdev->dev, buffer_info->dma)) |
2116 | goto dma_error; | 2122 | goto dma_error; |
2117 | } | 2123 | } |
@@ -2133,7 +2139,7 @@ dma_error: | |||
2133 | 2139 | ||
2134 | /* clear timestamp and dma mappings for remaining portion of packet */ | 2140 | /* clear timestamp and dma mappings for remaining portion of packet */ |
2135 | while (count--) { | 2141 | while (count--) { |
2136 | if (i==0) | 2142 | if (i == 0) |
2137 | i += tx_ring->count; | 2143 | i += tx_ring->count; |
2138 | i--; | 2144 | i--; |
2139 | buffer_info = &tx_ring->buffer_info[i]; | 2145 | buffer_info = &tx_ring->buffer_info[i]; |
@@ -2144,10 +2150,10 @@ dma_error: | |||
2144 | } | 2150 | } |
2145 | 2151 | ||
2146 | static inline void igbvf_tx_queue_adv(struct igbvf_adapter *adapter, | 2152 | static inline void igbvf_tx_queue_adv(struct igbvf_adapter *adapter, |
2147 | struct igbvf_ring *tx_ring, | 2153 | struct igbvf_ring *tx_ring, |
2148 | int tx_flags, int count, | 2154 | int tx_flags, int count, |
2149 | unsigned int first, u32 paylen, | 2155 | unsigned int first, u32 paylen, |
2150 | u8 hdr_len) | 2156 | u8 hdr_len) |
2151 | { | 2157 | { |
2152 | union e1000_adv_tx_desc *tx_desc = NULL; | 2158 | union e1000_adv_tx_desc *tx_desc = NULL; |
2153 | struct igbvf_buffer *buffer_info; | 2159 | struct igbvf_buffer *buffer_info; |
@@ -2155,7 +2161,7 @@ static inline void igbvf_tx_queue_adv(struct igbvf_adapter *adapter, | |||
2155 | unsigned int i; | 2161 | unsigned int i; |
2156 | 2162 | ||
2157 | cmd_type_len = (E1000_ADVTXD_DTYP_DATA | E1000_ADVTXD_DCMD_IFCS | | 2163 | cmd_type_len = (E1000_ADVTXD_DTYP_DATA | E1000_ADVTXD_DCMD_IFCS | |
2158 | E1000_ADVTXD_DCMD_DEXT); | 2164 | E1000_ADVTXD_DCMD_DEXT); |
2159 | 2165 | ||
2160 | if (tx_flags & IGBVF_TX_FLAGS_VLAN) | 2166 | if (tx_flags & IGBVF_TX_FLAGS_VLAN) |
2161 | cmd_type_len |= E1000_ADVTXD_DCMD_VLE; | 2167 | cmd_type_len |= E1000_ADVTXD_DCMD_VLE; |
@@ -2182,7 +2188,7 @@ static inline void igbvf_tx_queue_adv(struct igbvf_adapter *adapter, | |||
2182 | tx_desc = IGBVF_TX_DESC_ADV(*tx_ring, i); | 2188 | tx_desc = IGBVF_TX_DESC_ADV(*tx_ring, i); |
2183 | tx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma); | 2189 | tx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma); |
2184 | tx_desc->read.cmd_type_len = | 2190 | tx_desc->read.cmd_type_len = |
2185 | cpu_to_le32(cmd_type_len | buffer_info->length); | 2191 | cpu_to_le32(cmd_type_len | buffer_info->length); |
2186 | tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); | 2192 | tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); |
2187 | i++; | 2193 | i++; |
2188 | if (i == tx_ring->count) | 2194 | if (i == tx_ring->count) |
@@ -2193,14 +2199,16 @@ static inline void igbvf_tx_queue_adv(struct igbvf_adapter *adapter, | |||
2193 | /* Force memory writes to complete before letting h/w | 2199 | /* Force memory writes to complete before letting h/w |
2194 | * know there are new descriptors to fetch. (Only | 2200 | * know there are new descriptors to fetch. (Only |
2195 | * applicable for weak-ordered memory model archs, | 2201 | * applicable for weak-ordered memory model archs, |
2196 | * such as IA-64). */ | 2202 | * such as IA-64). |
2203 | */ | ||
2197 | wmb(); | 2204 | wmb(); |
2198 | 2205 | ||
2199 | tx_ring->buffer_info[first].next_to_watch = tx_desc; | 2206 | tx_ring->buffer_info[first].next_to_watch = tx_desc; |
2200 | tx_ring->next_to_use = i; | 2207 | tx_ring->next_to_use = i; |
2201 | writel(i, adapter->hw.hw_addr + tx_ring->tail); | 2208 | writel(i, adapter->hw.hw_addr + tx_ring->tail); |
2202 | /* we need this if more than one processor can write to our tail | 2209 | /* we need this if more than one processor can write to our tail |
2203 | * at a time, it syncronizes IO on IA64/Altix systems */ | 2210 | * at a time, it synchronizes IO on IA64/Altix systems |
2211 | */ | ||
2204 | mmiowb(); | 2212 | mmiowb(); |
2205 | } | 2213 | } |
2206 | 2214 | ||
@@ -2225,11 +2233,10 @@ static netdev_tx_t igbvf_xmit_frame_ring_adv(struct sk_buff *skb, | |||
2225 | return NETDEV_TX_OK; | 2233 | return NETDEV_TX_OK; |
2226 | } | 2234 | } |
2227 | 2235 | ||
2228 | /* | 2236 | /* need: count + 4 desc gap to keep tail from touching |
2229 | * need: count + 4 desc gap to keep tail from touching | 2237 | * + 2 desc gap to keep tail from touching head, |
2230 | * + 2 desc gap to keep tail from touching head, | 2238 | * + 1 desc for skb->data, |
2231 | * + 1 desc for skb->data, | 2239 | * + 1 desc for context descriptor, |
2232 | * + 1 desc for context descriptor, | ||
2233 | * head, otherwise try next time | 2240 | * head, otherwise try next time |
2234 | */ | 2241 | */ |
2235 | if (igbvf_maybe_stop_tx(netdev, skb_shinfo(skb)->nr_frags + 4)) { | 2242 | if (igbvf_maybe_stop_tx(netdev, skb_shinfo(skb)->nr_frags + 4)) { |
@@ -2258,11 +2265,10 @@ static netdev_tx_t igbvf_xmit_frame_ring_adv(struct sk_buff *skb, | |||
2258 | if (tso) | 2265 | if (tso) |
2259 | tx_flags |= IGBVF_TX_FLAGS_TSO; | 2266 | tx_flags |= IGBVF_TX_FLAGS_TSO; |
2260 | else if (igbvf_tx_csum(adapter, tx_ring, skb, tx_flags, protocol) && | 2267 | else if (igbvf_tx_csum(adapter, tx_ring, skb, tx_flags, protocol) && |
2261 | (skb->ip_summed == CHECKSUM_PARTIAL)) | 2268 | (skb->ip_summed == CHECKSUM_PARTIAL)) |
2262 | tx_flags |= IGBVF_TX_FLAGS_CSUM; | 2269 | tx_flags |= IGBVF_TX_FLAGS_CSUM; |
2263 | 2270 | ||
2264 | /* | 2271 | /* count reflects descriptors mapped, if 0 then mapping error |
2265 | * count reflects descriptors mapped, if 0 then mapping error | ||
2266 | * has occurred and we need to rewind the descriptor queue | 2272 | * has occurred and we need to rewind the descriptor queue |
2267 | */ | 2273 | */ |
2268 | count = igbvf_tx_map_adv(adapter, tx_ring, skb); | 2274 | count = igbvf_tx_map_adv(adapter, tx_ring, skb); |
@@ -2313,6 +2319,7 @@ static void igbvf_tx_timeout(struct net_device *netdev) | |||
2313 | static void igbvf_reset_task(struct work_struct *work) | 2319 | static void igbvf_reset_task(struct work_struct *work) |
2314 | { | 2320 | { |
2315 | struct igbvf_adapter *adapter; | 2321 | struct igbvf_adapter *adapter; |
2322 | |||
2316 | adapter = container_of(work, struct igbvf_adapter, reset_task); | 2323 | adapter = container_of(work, struct igbvf_adapter, reset_task); |
2317 | 2324 | ||
2318 | igbvf_reinit_locked(adapter); | 2325 | igbvf_reinit_locked(adapter); |
@@ -2356,14 +2363,13 @@ static int igbvf_change_mtu(struct net_device *netdev, int new_mtu) | |||
2356 | } | 2363 | } |
2357 | 2364 | ||
2358 | while (test_and_set_bit(__IGBVF_RESETTING, &adapter->state)) | 2365 | while (test_and_set_bit(__IGBVF_RESETTING, &adapter->state)) |
2359 | msleep(1); | 2366 | usleep_range(1000, 2000); |
2360 | /* igbvf_down has a dependency on max_frame_size */ | 2367 | /* igbvf_down has a dependency on max_frame_size */ |
2361 | adapter->max_frame_size = max_frame; | 2368 | adapter->max_frame_size = max_frame; |
2362 | if (netif_running(netdev)) | 2369 | if (netif_running(netdev)) |
2363 | igbvf_down(adapter); | 2370 | igbvf_down(adapter); |
2364 | 2371 | ||
2365 | /* | 2372 | /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN |
2366 | * NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN | ||
2367 | * means we reserve 2 more, this pushes us to allocate from the next | 2373 | * means we reserve 2 more, this pushes us to allocate from the next |
2368 | * larger slab size. | 2374 | * larger slab size. |
2369 | * i.e. RXBUFFER_2048 --> size-4096 slab | 2375 | * i.e. RXBUFFER_2048 --> size-4096 slab |
@@ -2382,15 +2388,14 @@ static int igbvf_change_mtu(struct net_device *netdev, int new_mtu) | |||
2382 | adapter->rx_buffer_len = PAGE_SIZE / 2; | 2388 | adapter->rx_buffer_len = PAGE_SIZE / 2; |
2383 | #endif | 2389 | #endif |
2384 | 2390 | ||
2385 | |||
2386 | /* adjust allocation if LPE protects us, and we aren't using SBP */ | 2391 | /* adjust allocation if LPE protects us, and we aren't using SBP */ |
2387 | if ((max_frame == ETH_FRAME_LEN + ETH_FCS_LEN) || | 2392 | if ((max_frame == ETH_FRAME_LEN + ETH_FCS_LEN) || |
2388 | (max_frame == ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN)) | 2393 | (max_frame == ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN)) |
2389 | adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN + | 2394 | adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN + |
2390 | ETH_FCS_LEN; | 2395 | ETH_FCS_LEN; |
2391 | 2396 | ||
2392 | dev_info(&adapter->pdev->dev, "changing MTU from %d to %d\n", | 2397 | dev_info(&adapter->pdev->dev, "changing MTU from %d to %d\n", |
2393 | netdev->mtu, new_mtu); | 2398 | netdev->mtu, new_mtu); |
2394 | netdev->mtu = new_mtu; | 2399 | netdev->mtu = new_mtu; |
2395 | 2400 | ||
2396 | if (netif_running(netdev)) | 2401 | if (netif_running(netdev)) |
@@ -2477,8 +2482,7 @@ static void igbvf_shutdown(struct pci_dev *pdev) | |||
2477 | } | 2482 | } |
2478 | 2483 | ||
2479 | #ifdef CONFIG_NET_POLL_CONTROLLER | 2484 | #ifdef CONFIG_NET_POLL_CONTROLLER |
2480 | /* | 2485 | /* Polling 'interrupt' - used by things like netconsole to send skbs |
2481 | * Polling 'interrupt' - used by things like netconsole to send skbs | ||
2482 | * without having to re-enable interrupts. It's not called while | 2486 | * without having to re-enable interrupts. It's not called while |
2483 | * the interrupt routine is executing. | 2487 | * the interrupt routine is executing. |
2484 | */ | 2488 | */ |
@@ -2503,7 +2507,7 @@ static void igbvf_netpoll(struct net_device *netdev) | |||
2503 | * this device has been detected. | 2507 | * this device has been detected. |
2504 | */ | 2508 | */ |
2505 | static pci_ers_result_t igbvf_io_error_detected(struct pci_dev *pdev, | 2509 | static pci_ers_result_t igbvf_io_error_detected(struct pci_dev *pdev, |
2506 | pci_channel_state_t state) | 2510 | pci_channel_state_t state) |
2507 | { | 2511 | { |
2508 | struct net_device *netdev = pci_get_drvdata(pdev); | 2512 | struct net_device *netdev = pci_get_drvdata(pdev); |
2509 | struct igbvf_adapter *adapter = netdev_priv(netdev); | 2513 | struct igbvf_adapter *adapter = netdev_priv(netdev); |
@@ -2583,7 +2587,7 @@ static void igbvf_print_device_info(struct igbvf_adapter *adapter) | |||
2583 | } | 2587 | } |
2584 | 2588 | ||
2585 | static int igbvf_set_features(struct net_device *netdev, | 2589 | static int igbvf_set_features(struct net_device *netdev, |
2586 | netdev_features_t features) | 2590 | netdev_features_t features) |
2587 | { | 2591 | { |
2588 | struct igbvf_adapter *adapter = netdev_priv(netdev); | 2592 | struct igbvf_adapter *adapter = netdev_priv(netdev); |
2589 | 2593 | ||
@@ -2596,21 +2600,21 @@ static int igbvf_set_features(struct net_device *netdev, | |||
2596 | } | 2600 | } |
2597 | 2601 | ||
2598 | static const struct net_device_ops igbvf_netdev_ops = { | 2602 | static const struct net_device_ops igbvf_netdev_ops = { |
2599 | .ndo_open = igbvf_open, | 2603 | .ndo_open = igbvf_open, |
2600 | .ndo_stop = igbvf_close, | 2604 | .ndo_stop = igbvf_close, |
2601 | .ndo_start_xmit = igbvf_xmit_frame, | 2605 | .ndo_start_xmit = igbvf_xmit_frame, |
2602 | .ndo_get_stats = igbvf_get_stats, | 2606 | .ndo_get_stats = igbvf_get_stats, |
2603 | .ndo_set_rx_mode = igbvf_set_multi, | 2607 | .ndo_set_rx_mode = igbvf_set_multi, |
2604 | .ndo_set_mac_address = igbvf_set_mac, | 2608 | .ndo_set_mac_address = igbvf_set_mac, |
2605 | .ndo_change_mtu = igbvf_change_mtu, | 2609 | .ndo_change_mtu = igbvf_change_mtu, |
2606 | .ndo_do_ioctl = igbvf_ioctl, | 2610 | .ndo_do_ioctl = igbvf_ioctl, |
2607 | .ndo_tx_timeout = igbvf_tx_timeout, | 2611 | .ndo_tx_timeout = igbvf_tx_timeout, |
2608 | .ndo_vlan_rx_add_vid = igbvf_vlan_rx_add_vid, | 2612 | .ndo_vlan_rx_add_vid = igbvf_vlan_rx_add_vid, |
2609 | .ndo_vlan_rx_kill_vid = igbvf_vlan_rx_kill_vid, | 2613 | .ndo_vlan_rx_kill_vid = igbvf_vlan_rx_kill_vid, |
2610 | #ifdef CONFIG_NET_POLL_CONTROLLER | 2614 | #ifdef CONFIG_NET_POLL_CONTROLLER |
2611 | .ndo_poll_controller = igbvf_netpoll, | 2615 | .ndo_poll_controller = igbvf_netpoll, |
2612 | #endif | 2616 | #endif |
2613 | .ndo_set_features = igbvf_set_features, | 2617 | .ndo_set_features = igbvf_set_features, |
2614 | }; | 2618 | }; |
2615 | 2619 | ||
2616 | /** | 2620 | /** |
@@ -2645,8 +2649,8 @@ static int igbvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
2645 | } else { | 2649 | } else { |
2646 | err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); | 2650 | err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); |
2647 | if (err) { | 2651 | if (err) { |
2648 | dev_err(&pdev->dev, "No usable DMA " | 2652 | dev_err(&pdev->dev, |
2649 | "configuration, aborting\n"); | 2653 | "No usable DMA configuration, aborting\n"); |
2650 | goto err_dma; | 2654 | goto err_dma; |
2651 | } | 2655 | } |
2652 | } | 2656 | } |
@@ -2686,7 +2690,7 @@ static int igbvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
2686 | 2690 | ||
2687 | err = -EIO; | 2691 | err = -EIO; |
2688 | adapter->hw.hw_addr = ioremap(pci_resource_start(pdev, 0), | 2692 | adapter->hw.hw_addr = ioremap(pci_resource_start(pdev, 0), |
2689 | pci_resource_len(pdev, 0)); | 2693 | pci_resource_len(pdev, 0)); |
2690 | 2694 | ||
2691 | if (!adapter->hw.hw_addr) | 2695 | if (!adapter->hw.hw_addr) |
2692 | goto err_ioremap; | 2696 | goto err_ioremap; |
@@ -2712,16 +2716,16 @@ static int igbvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
2712 | adapter->bd_number = cards_found++; | 2716 | adapter->bd_number = cards_found++; |
2713 | 2717 | ||
2714 | netdev->hw_features = NETIF_F_SG | | 2718 | netdev->hw_features = NETIF_F_SG | |
2715 | NETIF_F_IP_CSUM | | 2719 | NETIF_F_IP_CSUM | |
2716 | NETIF_F_IPV6_CSUM | | 2720 | NETIF_F_IPV6_CSUM | |
2717 | NETIF_F_TSO | | 2721 | NETIF_F_TSO | |
2718 | NETIF_F_TSO6 | | 2722 | NETIF_F_TSO6 | |
2719 | NETIF_F_RXCSUM; | 2723 | NETIF_F_RXCSUM; |
2720 | 2724 | ||
2721 | netdev->features = netdev->hw_features | | 2725 | netdev->features = netdev->hw_features | |
2722 | NETIF_F_HW_VLAN_CTAG_TX | | 2726 | NETIF_F_HW_VLAN_CTAG_TX | |
2723 | NETIF_F_HW_VLAN_CTAG_RX | | 2727 | NETIF_F_HW_VLAN_CTAG_RX | |
2724 | NETIF_F_HW_VLAN_CTAG_FILTER; | 2728 | NETIF_F_HW_VLAN_CTAG_FILTER; |
2725 | 2729 | ||
2726 | if (pci_using_dac) | 2730 | if (pci_using_dac) |
2727 | netdev->features |= NETIF_F_HIGHDMA; | 2731 | netdev->features |= NETIF_F_HIGHDMA; |
@@ -2742,7 +2746,8 @@ static int igbvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
2742 | if (err) | 2746 | if (err) |
2743 | dev_info(&pdev->dev, "Error reading MAC address.\n"); | 2747 | dev_info(&pdev->dev, "Error reading MAC address.\n"); |
2744 | else if (is_zero_ether_addr(adapter->hw.mac.addr)) | 2748 | else if (is_zero_ether_addr(adapter->hw.mac.addr)) |
2745 | dev_info(&pdev->dev, "MAC address not assigned by administrator.\n"); | 2749 | dev_info(&pdev->dev, |
2750 | "MAC address not assigned by administrator.\n"); | ||
2746 | memcpy(netdev->dev_addr, adapter->hw.mac.addr, | 2751 | memcpy(netdev->dev_addr, adapter->hw.mac.addr, |
2747 | netdev->addr_len); | 2752 | netdev->addr_len); |
2748 | } | 2753 | } |
@@ -2751,11 +2756,11 @@ static int igbvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
2751 | dev_info(&pdev->dev, "Assigning random MAC address.\n"); | 2756 | dev_info(&pdev->dev, "Assigning random MAC address.\n"); |
2752 | eth_hw_addr_random(netdev); | 2757 | eth_hw_addr_random(netdev); |
2753 | memcpy(adapter->hw.mac.addr, netdev->dev_addr, | 2758 | memcpy(adapter->hw.mac.addr, netdev->dev_addr, |
2754 | netdev->addr_len); | 2759 | netdev->addr_len); |
2755 | } | 2760 | } |
2756 | 2761 | ||
2757 | setup_timer(&adapter->watchdog_timer, &igbvf_watchdog, | 2762 | setup_timer(&adapter->watchdog_timer, &igbvf_watchdog, |
2758 | (unsigned long) adapter); | 2763 | (unsigned long)adapter); |
2759 | 2764 | ||
2760 | INIT_WORK(&adapter->reset_task, igbvf_reset_task); | 2765 | INIT_WORK(&adapter->reset_task, igbvf_reset_task); |
2761 | INIT_WORK(&adapter->watchdog_task, igbvf_watchdog_task); | 2766 | INIT_WORK(&adapter->watchdog_task, igbvf_watchdog_task); |
@@ -2818,8 +2823,7 @@ static void igbvf_remove(struct pci_dev *pdev) | |||
2818 | struct igbvf_adapter *adapter = netdev_priv(netdev); | 2823 | struct igbvf_adapter *adapter = netdev_priv(netdev); |
2819 | struct e1000_hw *hw = &adapter->hw; | 2824 | struct e1000_hw *hw = &adapter->hw; |
2820 | 2825 | ||
2821 | /* | 2826 | /* The watchdog timer may be rescheduled, so explicitly |
2822 | * The watchdog timer may be rescheduled, so explicitly | ||
2823 | * disable it from being rescheduled. | 2827 | * disable it from being rescheduled. |
2824 | */ | 2828 | */ |
2825 | set_bit(__IGBVF_DOWN, &adapter->state); | 2829 | set_bit(__IGBVF_DOWN, &adapter->state); |
@@ -2832,9 +2836,8 @@ static void igbvf_remove(struct pci_dev *pdev) | |||
2832 | 2836 | ||
2833 | igbvf_reset_interrupt_capability(adapter); | 2837 | igbvf_reset_interrupt_capability(adapter); |
2834 | 2838 | ||
2835 | /* | 2839 | /* it is important to delete the NAPI struct prior to freeing the |
2836 | * it is important to delete the napi struct prior to freeing the | 2840 | * Rx ring so that you do not end up with null pointer refs |
2837 | * rx ring so that you do not end up with null pointer refs | ||
2838 | */ | 2841 | */ |
2839 | netif_napi_del(&adapter->rx_ring->napi); | 2842 | netif_napi_del(&adapter->rx_ring->napi); |
2840 | kfree(adapter->tx_ring); | 2843 | kfree(adapter->tx_ring); |
@@ -2866,17 +2869,17 @@ MODULE_DEVICE_TABLE(pci, igbvf_pci_tbl); | |||
2866 | 2869 | ||
2867 | /* PCI Device API Driver */ | 2870 | /* PCI Device API Driver */ |
2868 | static struct pci_driver igbvf_driver = { | 2871 | static struct pci_driver igbvf_driver = { |
2869 | .name = igbvf_driver_name, | 2872 | .name = igbvf_driver_name, |
2870 | .id_table = igbvf_pci_tbl, | 2873 | .id_table = igbvf_pci_tbl, |
2871 | .probe = igbvf_probe, | 2874 | .probe = igbvf_probe, |
2872 | .remove = igbvf_remove, | 2875 | .remove = igbvf_remove, |
2873 | #ifdef CONFIG_PM | 2876 | #ifdef CONFIG_PM |
2874 | /* Power Management Hooks */ | 2877 | /* Power Management Hooks */ |
2875 | .suspend = igbvf_suspend, | 2878 | .suspend = igbvf_suspend, |
2876 | .resume = igbvf_resume, | 2879 | .resume = igbvf_resume, |
2877 | #endif | 2880 | #endif |
2878 | .shutdown = igbvf_shutdown, | 2881 | .shutdown = igbvf_shutdown, |
2879 | .err_handler = &igbvf_err_handler | 2882 | .err_handler = &igbvf_err_handler |
2880 | }; | 2883 | }; |
2881 | 2884 | ||
2882 | /** | 2885 | /** |
@@ -2888,6 +2891,7 @@ static struct pci_driver igbvf_driver = { | |||
2888 | static int __init igbvf_init_module(void) | 2891 | static int __init igbvf_init_module(void) |
2889 | { | 2892 | { |
2890 | int ret; | 2893 | int ret; |
2894 | |||
2891 | pr_info("%s - version %s\n", igbvf_driver_string, igbvf_driver_version); | 2895 | pr_info("%s - version %s\n", igbvf_driver_string, igbvf_driver_version); |
2892 | pr_info("%s\n", igbvf_copyright); | 2896 | pr_info("%s\n", igbvf_copyright); |
2893 | 2897 | ||
@@ -2909,7 +2913,6 @@ static void __exit igbvf_exit_module(void) | |||
2909 | } | 2913 | } |
2910 | module_exit(igbvf_exit_module); | 2914 | module_exit(igbvf_exit_module); |
2911 | 2915 | ||
2912 | |||
2913 | MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>"); | 2916 | MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>"); |
2914 | MODULE_DESCRIPTION("Intel(R) Gigabit Virtual Function Network Driver"); | 2917 | MODULE_DESCRIPTION("Intel(R) Gigabit Virtual Function Network Driver"); |
2915 | MODULE_LICENSE("GPL"); | 2918 | MODULE_LICENSE("GPL"); |
diff --git a/drivers/net/ethernet/intel/igbvf/regs.h b/drivers/net/ethernet/intel/igbvf/regs.h index 7dc6341715dc..86a7c120b574 100644 --- a/drivers/net/ethernet/intel/igbvf/regs.h +++ b/drivers/net/ethernet/intel/igbvf/regs.h | |||
@@ -13,8 +13,7 @@ | |||
13 | more details. | 13 | more details. |
14 | 14 | ||
15 | You should have received a copy of the GNU General Public License along with | 15 | You should have received a copy of the GNU General Public License along with |
16 | this program; if not, write to the Free Software Foundation, Inc., | 16 | this program; if not, see <http://www.gnu.org/licenses/>. |
17 | 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | ||
18 | 17 | ||
19 | The full GNU General Public License is included in this distribution in | 18 | The full GNU General Public License is included in this distribution in |
20 | the file called "COPYING". | 19 | the file called "COPYING". |
@@ -28,81 +27,81 @@ | |||
28 | #ifndef _E1000_REGS_H_ | 27 | #ifndef _E1000_REGS_H_ |
29 | #define _E1000_REGS_H_ | 28 | #define _E1000_REGS_H_ |
30 | 29 | ||
31 | #define E1000_CTRL 0x00000 /* Device Control - RW */ | 30 | #define E1000_CTRL 0x00000 /* Device Control - RW */ |
32 | #define E1000_STATUS 0x00008 /* Device Status - RO */ | 31 | #define E1000_STATUS 0x00008 /* Device Status - RO */ |
33 | #define E1000_ITR 0x000C4 /* Interrupt Throttling Rate - RW */ | 32 | #define E1000_ITR 0x000C4 /* Interrupt Throttling Rate - RW */ |
34 | #define E1000_EICR 0x01580 /* Ext. Interrupt Cause Read - R/clr */ | 33 | #define E1000_EICR 0x01580 /* Ext. Interrupt Cause Read - R/clr */ |
35 | #define E1000_EITR(_n) (0x01680 + (0x4 * (_n))) | 34 | #define E1000_EITR(_n) (0x01680 + (0x4 * (_n))) |
36 | #define E1000_EICS 0x01520 /* Ext. Interrupt Cause Set - W0 */ | 35 | #define E1000_EICS 0x01520 /* Ext. Interrupt Cause Set - W0 */ |
37 | #define E1000_EIMS 0x01524 /* Ext. Interrupt Mask Set/Read - RW */ | 36 | #define E1000_EIMS 0x01524 /* Ext. Interrupt Mask Set/Read - RW */ |
38 | #define E1000_EIMC 0x01528 /* Ext. Interrupt Mask Clear - WO */ | 37 | #define E1000_EIMC 0x01528 /* Ext. Interrupt Mask Clear - WO */ |
39 | #define E1000_EIAC 0x0152C /* Ext. Interrupt Auto Clear - RW */ | 38 | #define E1000_EIAC 0x0152C /* Ext. Interrupt Auto Clear - RW */ |
40 | #define E1000_EIAM 0x01530 /* Ext. Interrupt Ack Auto Clear Mask - RW */ | 39 | #define E1000_EIAM 0x01530 /* Ext. Interrupt Ack Auto Clear Mask - RW */ |
41 | #define E1000_IVAR0 0x01700 /* Interrupt Vector Allocation (array) - RW */ | 40 | #define E1000_IVAR0 0x01700 /* Interrupt Vector Allocation (array) - RW */ |
42 | #define E1000_IVAR_MISC 0x01740 /* IVAR for "other" causes - RW */ | 41 | #define E1000_IVAR_MISC 0x01740 /* IVAR for "other" causes - RW */ |
43 | /* | 42 | |
44 | * Convenience macros | 43 | /* Convenience macros |
45 | * | 44 | * |
46 | * Note: "_n" is the queue number of the register to be written to. | 45 | * Note: "_n" is the queue number of the register to be written to. |
47 | * | 46 | * |
48 | * Example usage: | 47 | * Example usage: |
49 | * E1000_RDBAL_REG(current_rx_queue) | 48 | * E1000_RDBAL_REG(current_rx_queue) |
50 | */ | 49 | */ |
51 | #define E1000_RDBAL(_n) ((_n) < 4 ? (0x02800 + ((_n) * 0x100)) : \ | 50 | #define E1000_RDBAL(_n) ((_n) < 4 ? (0x02800 + ((_n) * 0x100)) : \ |
52 | (0x0C000 + ((_n) * 0x40))) | 51 | (0x0C000 + ((_n) * 0x40))) |
53 | #define E1000_RDBAH(_n) ((_n) < 4 ? (0x02804 + ((_n) * 0x100)) : \ | 52 | #define E1000_RDBAH(_n) ((_n) < 4 ? (0x02804 + ((_n) * 0x100)) : \ |
54 | (0x0C004 + ((_n) * 0x40))) | 53 | (0x0C004 + ((_n) * 0x40))) |
55 | #define E1000_RDLEN(_n) ((_n) < 4 ? (0x02808 + ((_n) * 0x100)) : \ | 54 | #define E1000_RDLEN(_n) ((_n) < 4 ? (0x02808 + ((_n) * 0x100)) : \ |
56 | (0x0C008 + ((_n) * 0x40))) | 55 | (0x0C008 + ((_n) * 0x40))) |
57 | #define E1000_SRRCTL(_n) ((_n) < 4 ? (0x0280C + ((_n) * 0x100)) : \ | 56 | #define E1000_SRRCTL(_n) ((_n) < 4 ? (0x0280C + ((_n) * 0x100)) : \ |
58 | (0x0C00C + ((_n) * 0x40))) | 57 | (0x0C00C + ((_n) * 0x40))) |
59 | #define E1000_RDH(_n) ((_n) < 4 ? (0x02810 + ((_n) * 0x100)) : \ | 58 | #define E1000_RDH(_n) ((_n) < 4 ? (0x02810 + ((_n) * 0x100)) : \ |
60 | (0x0C010 + ((_n) * 0x40))) | 59 | (0x0C010 + ((_n) * 0x40))) |
61 | #define E1000_RDT(_n) ((_n) < 4 ? (0x02818 + ((_n) * 0x100)) : \ | 60 | #define E1000_RDT(_n) ((_n) < 4 ? (0x02818 + ((_n) * 0x100)) : \ |
62 | (0x0C018 + ((_n) * 0x40))) | 61 | (0x0C018 + ((_n) * 0x40))) |
63 | #define E1000_RXDCTL(_n) ((_n) < 4 ? (0x02828 + ((_n) * 0x100)) : \ | 62 | #define E1000_RXDCTL(_n) ((_n) < 4 ? (0x02828 + ((_n) * 0x100)) : \ |
64 | (0x0C028 + ((_n) * 0x40))) | 63 | (0x0C028 + ((_n) * 0x40))) |
65 | #define E1000_TDBAL(_n) ((_n) < 4 ? (0x03800 + ((_n) * 0x100)) : \ | 64 | #define E1000_TDBAL(_n) ((_n) < 4 ? (0x03800 + ((_n) * 0x100)) : \ |
66 | (0x0E000 + ((_n) * 0x40))) | 65 | (0x0E000 + ((_n) * 0x40))) |
67 | #define E1000_TDBAH(_n) ((_n) < 4 ? (0x03804 + ((_n) * 0x100)) : \ | 66 | #define E1000_TDBAH(_n) ((_n) < 4 ? (0x03804 + ((_n) * 0x100)) : \ |
68 | (0x0E004 + ((_n) * 0x40))) | 67 | (0x0E004 + ((_n) * 0x40))) |
69 | #define E1000_TDLEN(_n) ((_n) < 4 ? (0x03808 + ((_n) * 0x100)) : \ | 68 | #define E1000_TDLEN(_n) ((_n) < 4 ? (0x03808 + ((_n) * 0x100)) : \ |
70 | (0x0E008 + ((_n) * 0x40))) | 69 | (0x0E008 + ((_n) * 0x40))) |
71 | #define E1000_TDH(_n) ((_n) < 4 ? (0x03810 + ((_n) * 0x100)) : \ | 70 | #define E1000_TDH(_n) ((_n) < 4 ? (0x03810 + ((_n) * 0x100)) : \ |
72 | (0x0E010 + ((_n) * 0x40))) | 71 | (0x0E010 + ((_n) * 0x40))) |
73 | #define E1000_TDT(_n) ((_n) < 4 ? (0x03818 + ((_n) * 0x100)) : \ | 72 | #define E1000_TDT(_n) ((_n) < 4 ? (0x03818 + ((_n) * 0x100)) : \ |
74 | (0x0E018 + ((_n) * 0x40))) | 73 | (0x0E018 + ((_n) * 0x40))) |
75 | #define E1000_TXDCTL(_n) ((_n) < 4 ? (0x03828 + ((_n) * 0x100)) : \ | 74 | #define E1000_TXDCTL(_n) ((_n) < 4 ? (0x03828 + ((_n) * 0x100)) : \ |
76 | (0x0E028 + ((_n) * 0x40))) | 75 | (0x0E028 + ((_n) * 0x40))) |
77 | #define E1000_DCA_TXCTRL(_n) (0x03814 + (_n << 8)) | 76 | #define E1000_DCA_TXCTRL(_n) (0x03814 + (_n << 8)) |
78 | #define E1000_DCA_RXCTRL(_n) (0x02814 + (_n << 8)) | 77 | #define E1000_DCA_RXCTRL(_n) (0x02814 + (_n << 8)) |
79 | #define E1000_RAL(_i) (((_i) <= 15) ? (0x05400 + ((_i) * 8)) : \ | 78 | #define E1000_RAL(_i) (((_i) <= 15) ? (0x05400 + ((_i) * 8)) : \ |
80 | (0x054E0 + ((_i - 16) * 8))) | 79 | (0x054E0 + ((_i - 16) * 8))) |
81 | #define E1000_RAH(_i) (((_i) <= 15) ? (0x05404 + ((_i) * 8)) : \ | 80 | #define E1000_RAH(_i) (((_i) <= 15) ? (0x05404 + ((_i) * 8)) : \ |
82 | (0x054E4 + ((_i - 16) * 8))) | 81 | (0x054E4 + ((_i - 16) * 8))) |
83 | 82 | ||
84 | /* Statistics registers */ | 83 | /* Statistics registers */ |
85 | #define E1000_VFGPRC 0x00F10 | 84 | #define E1000_VFGPRC 0x00F10 |
86 | #define E1000_VFGORC 0x00F18 | 85 | #define E1000_VFGORC 0x00F18 |
87 | #define E1000_VFMPRC 0x00F3C | 86 | #define E1000_VFMPRC 0x00F3C |
88 | #define E1000_VFGPTC 0x00F14 | 87 | #define E1000_VFGPTC 0x00F14 |
89 | #define E1000_VFGOTC 0x00F34 | 88 | #define E1000_VFGOTC 0x00F34 |
90 | #define E1000_VFGOTLBC 0x00F50 | 89 | #define E1000_VFGOTLBC 0x00F50 |
91 | #define E1000_VFGPTLBC 0x00F44 | 90 | #define E1000_VFGPTLBC 0x00F44 |
92 | #define E1000_VFGORLBC 0x00F48 | 91 | #define E1000_VFGORLBC 0x00F48 |
93 | #define E1000_VFGPRLBC 0x00F40 | 92 | #define E1000_VFGPRLBC 0x00F40 |
94 | 93 | ||
95 | /* These act per VF so an array friendly macro is used */ | 94 | /* These act per VF so an array friendly macro is used */ |
96 | #define E1000_V2PMAILBOX(_n) (0x00C40 + (4 * (_n))) | 95 | #define E1000_V2PMAILBOX(_n) (0x00C40 + (4 * (_n))) |
97 | #define E1000_VMBMEM(_n) (0x00800 + (64 * (_n))) | 96 | #define E1000_VMBMEM(_n) (0x00800 + (64 * (_n))) |
98 | 97 | ||
99 | /* Define macros for handling registers */ | 98 | /* Define macros for handling registers */ |
100 | #define er32(reg) readl(hw->hw_addr + E1000_##reg) | 99 | #define er32(reg) readl(hw->hw_addr + E1000_##reg) |
101 | #define ew32(reg, val) writel((val), hw->hw_addr + E1000_##reg) | 100 | #define ew32(reg, val) writel((val), hw->hw_addr + E1000_##reg) |
102 | #define array_er32(reg, offset) \ | 101 | #define array_er32(reg, offset) \ |
103 | readl(hw->hw_addr + E1000_##reg + (offset << 2)) | 102 | readl(hw->hw_addr + E1000_##reg + (offset << 2)) |
104 | #define array_ew32(reg, offset, val) \ | 103 | #define array_ew32(reg, offset, val) \ |
105 | writel((val), hw->hw_addr + E1000_##reg + (offset << 2)) | 104 | writel((val), hw->hw_addr + E1000_##reg + (offset << 2)) |
106 | #define e1e_flush() er32(STATUS) | 105 | #define e1e_flush() er32(STATUS) |
107 | 106 | ||
108 | #endif | 107 | #endif |
diff --git a/drivers/net/ethernet/intel/igbvf/vf.c b/drivers/net/ethernet/intel/igbvf/vf.c index 955ad8c2c534..a13baa90ae20 100644 --- a/drivers/net/ethernet/intel/igbvf/vf.c +++ b/drivers/net/ethernet/intel/igbvf/vf.c | |||
@@ -13,8 +13,7 @@ | |||
13 | more details. | 13 | more details. |
14 | 14 | ||
15 | You should have received a copy of the GNU General Public License along with | 15 | You should have received a copy of the GNU General Public License along with |
16 | this program; if not, write to the Free Software Foundation, Inc., | 16 | this program; if not, see <http://www.gnu.org/licenses/>. |
17 | 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | ||
18 | 17 | ||
19 | The full GNU General Public License is included in this distribution in | 18 | The full GNU General Public License is included in this distribution in |
20 | the file called "COPYING". | 19 | the file called "COPYING". |
@@ -25,17 +24,16 @@ | |||
25 | 24 | ||
26 | *******************************************************************************/ | 25 | *******************************************************************************/ |
27 | 26 | ||
28 | |||
29 | #include "vf.h" | 27 | #include "vf.h" |
30 | 28 | ||
31 | static s32 e1000_check_for_link_vf(struct e1000_hw *hw); | 29 | static s32 e1000_check_for_link_vf(struct e1000_hw *hw); |
32 | static s32 e1000_get_link_up_info_vf(struct e1000_hw *hw, u16 *speed, | 30 | static s32 e1000_get_link_up_info_vf(struct e1000_hw *hw, u16 *speed, |
33 | u16 *duplex); | 31 | u16 *duplex); |
34 | static s32 e1000_init_hw_vf(struct e1000_hw *hw); | 32 | static s32 e1000_init_hw_vf(struct e1000_hw *hw); |
35 | static s32 e1000_reset_hw_vf(struct e1000_hw *hw); | 33 | static s32 e1000_reset_hw_vf(struct e1000_hw *hw); |
36 | 34 | ||
37 | static void e1000_update_mc_addr_list_vf(struct e1000_hw *hw, u8 *, | 35 | static void e1000_update_mc_addr_list_vf(struct e1000_hw *hw, u8 *, |
38 | u32, u32, u32); | 36 | u32, u32, u32); |
39 | static void e1000_rar_set_vf(struct e1000_hw *, u8 *, u32); | 37 | static void e1000_rar_set_vf(struct e1000_hw *, u8 *, u32); |
40 | static s32 e1000_read_mac_addr_vf(struct e1000_hw *); | 38 | static s32 e1000_read_mac_addr_vf(struct e1000_hw *); |
41 | static s32 e1000_set_vfta_vf(struct e1000_hw *, u16, bool); | 39 | static s32 e1000_set_vfta_vf(struct e1000_hw *, u16, bool); |
@@ -94,7 +92,7 @@ void e1000_init_function_pointers_vf(struct e1000_hw *hw) | |||
94 | * the status register's data which is often stale and inaccurate. | 92 | * the status register's data which is often stale and inaccurate. |
95 | **/ | 93 | **/ |
96 | static s32 e1000_get_link_up_info_vf(struct e1000_hw *hw, u16 *speed, | 94 | static s32 e1000_get_link_up_info_vf(struct e1000_hw *hw, u16 *speed, |
97 | u16 *duplex) | 95 | u16 *duplex) |
98 | { | 96 | { |
99 | s32 status; | 97 | s32 status; |
100 | 98 | ||
@@ -130,7 +128,7 @@ static s32 e1000_reset_hw_vf(struct e1000_hw *hw) | |||
130 | u8 *addr = (u8 *)(&msgbuf[1]); | 128 | u8 *addr = (u8 *)(&msgbuf[1]); |
131 | u32 ctrl; | 129 | u32 ctrl; |
132 | 130 | ||
133 | /* assert vf queue/interrupt reset */ | 131 | /* assert VF queue/interrupt reset */ |
134 | ctrl = er32(CTRL); | 132 | ctrl = er32(CTRL); |
135 | ew32(CTRL, ctrl | E1000_CTRL_RST); | 133 | ew32(CTRL, ctrl | E1000_CTRL_RST); |
136 | 134 | ||
@@ -144,7 +142,7 @@ static s32 e1000_reset_hw_vf(struct e1000_hw *hw) | |||
144 | /* mailbox timeout can now become active */ | 142 | /* mailbox timeout can now become active */ |
145 | mbx->timeout = E1000_VF_MBX_INIT_TIMEOUT; | 143 | mbx->timeout = E1000_VF_MBX_INIT_TIMEOUT; |
146 | 144 | ||
147 | /* notify pf of vf reset completion */ | 145 | /* notify PF of VF reset completion */ |
148 | msgbuf[0] = E1000_VF_RESET; | 146 | msgbuf[0] = E1000_VF_RESET; |
149 | mbx->ops.write_posted(hw, msgbuf, 1); | 147 | mbx->ops.write_posted(hw, msgbuf, 1); |
150 | 148 | ||
@@ -153,7 +151,8 @@ static s32 e1000_reset_hw_vf(struct e1000_hw *hw) | |||
153 | /* set our "perm_addr" based on info provided by PF */ | 151 | /* set our "perm_addr" based on info provided by PF */ |
154 | ret_val = mbx->ops.read_posted(hw, msgbuf, 3); | 152 | ret_val = mbx->ops.read_posted(hw, msgbuf, 3); |
155 | if (!ret_val) { | 153 | if (!ret_val) { |
156 | if (msgbuf[0] == (E1000_VF_RESET | E1000_VT_MSGTYPE_ACK)) | 154 | if (msgbuf[0] == (E1000_VF_RESET | |
155 | E1000_VT_MSGTYPE_ACK)) | ||
157 | memcpy(hw->mac.perm_addr, addr, ETH_ALEN); | 156 | memcpy(hw->mac.perm_addr, addr, ETH_ALEN); |
158 | else | 157 | else |
159 | ret_val = -E1000_ERR_MAC_INIT; | 158 | ret_val = -E1000_ERR_MAC_INIT; |
@@ -194,15 +193,14 @@ static u32 e1000_hash_mc_addr_vf(struct e1000_hw *hw, u8 *mc_addr) | |||
194 | /* Register count multiplied by bits per register */ | 193 | /* Register count multiplied by bits per register */ |
195 | hash_mask = (hw->mac.mta_reg_count * 32) - 1; | 194 | hash_mask = (hw->mac.mta_reg_count * 32) - 1; |
196 | 195 | ||
197 | /* | 196 | /* The bit_shift is the number of left-shifts |
198 | * The bit_shift is the number of left-shifts | ||
199 | * where 0xFF would still fall within the hash mask. | 197 | * where 0xFF would still fall within the hash mask. |
200 | */ | 198 | */ |
201 | while (hash_mask >> bit_shift != 0xFF) | 199 | while (hash_mask >> bit_shift != 0xFF) |
202 | bit_shift++; | 200 | bit_shift++; |
203 | 201 | ||
204 | hash_value = hash_mask & (((mc_addr[4] >> (8 - bit_shift)) | | 202 | hash_value = hash_mask & (((mc_addr[4] >> (8 - bit_shift)) | |
205 | (((u16) mc_addr[5]) << bit_shift))); | 203 | (((u16)mc_addr[5]) << bit_shift))); |
206 | 204 | ||
207 | return hash_value; | 205 | return hash_value; |
208 | } | 206 | } |
@@ -221,8 +219,8 @@ static u32 e1000_hash_mc_addr_vf(struct e1000_hw *hw, u8 *mc_addr) | |||
221 | * unless there are workarounds that change this. | 219 | * unless there are workarounds that change this. |
222 | **/ | 220 | **/ |
223 | static void e1000_update_mc_addr_list_vf(struct e1000_hw *hw, | 221 | static void e1000_update_mc_addr_list_vf(struct e1000_hw *hw, |
224 | u8 *mc_addr_list, u32 mc_addr_count, | 222 | u8 *mc_addr_list, u32 mc_addr_count, |
225 | u32 rar_used_count, u32 rar_count) | 223 | u32 rar_used_count, u32 rar_count) |
226 | { | 224 | { |
227 | struct e1000_mbx_info *mbx = &hw->mbx; | 225 | struct e1000_mbx_info *mbx = &hw->mbx; |
228 | u32 msgbuf[E1000_VFMAILBOX_SIZE]; | 226 | u32 msgbuf[E1000_VFMAILBOX_SIZE]; |
@@ -305,7 +303,7 @@ void e1000_rlpml_set_vf(struct e1000_hw *hw, u16 max_size) | |||
305 | * @addr: pointer to the receive address | 303 | * @addr: pointer to the receive address |
306 | * @index: receive address array register | 304 | * @index: receive address array register |
307 | **/ | 305 | **/ |
308 | static void e1000_rar_set_vf(struct e1000_hw *hw, u8 * addr, u32 index) | 306 | static void e1000_rar_set_vf(struct e1000_hw *hw, u8 *addr, u32 index) |
309 | { | 307 | { |
310 | struct e1000_mbx_info *mbx = &hw->mbx; | 308 | struct e1000_mbx_info *mbx = &hw->mbx; |
311 | u32 msgbuf[3]; | 309 | u32 msgbuf[3]; |
@@ -354,8 +352,7 @@ static s32 e1000_check_for_link_vf(struct e1000_hw *hw) | |||
354 | s32 ret_val = E1000_SUCCESS; | 352 | s32 ret_val = E1000_SUCCESS; |
355 | u32 in_msg = 0; | 353 | u32 in_msg = 0; |
356 | 354 | ||
357 | /* | 355 | /* We only want to run this if there has been a rst asserted. |
358 | * We only want to run this if there has been a rst asserted. | ||
359 | * in this case that could mean a link change, device reset, | 356 | * in this case that could mean a link change, device reset, |
360 | * or a virtual function reset | 357 | * or a virtual function reset |
361 | */ | 358 | */ |
@@ -367,31 +364,33 @@ static s32 e1000_check_for_link_vf(struct e1000_hw *hw) | |||
367 | if (!mac->get_link_status) | 364 | if (!mac->get_link_status) |
368 | goto out; | 365 | goto out; |
369 | 366 | ||
370 | /* if link status is down no point in checking to see if pf is up */ | 367 | /* if link status is down no point in checking to see if PF is up */ |
371 | if (!(er32(STATUS) & E1000_STATUS_LU)) | 368 | if (!(er32(STATUS) & E1000_STATUS_LU)) |
372 | goto out; | 369 | goto out; |
373 | 370 | ||
374 | /* if the read failed it could just be a mailbox collision, best wait | 371 | /* if the read failed it could just be a mailbox collision, best wait |
375 | * until we are called again and don't report an error */ | 372 | * until we are called again and don't report an error |
373 | */ | ||
376 | if (mbx->ops.read(hw, &in_msg, 1)) | 374 | if (mbx->ops.read(hw, &in_msg, 1)) |
377 | goto out; | 375 | goto out; |
378 | 376 | ||
379 | /* if incoming message isn't clear to send we are waiting on response */ | 377 | /* if incoming message isn't clear to send we are waiting on response */ |
380 | if (!(in_msg & E1000_VT_MSGTYPE_CTS)) { | 378 | if (!(in_msg & E1000_VT_MSGTYPE_CTS)) { |
381 | /* message is not CTS and is NACK we must have lost CTS status */ | 379 | /* msg is not CTS and is NACK we must have lost CTS status */ |
382 | if (in_msg & E1000_VT_MSGTYPE_NACK) | 380 | if (in_msg & E1000_VT_MSGTYPE_NACK) |
383 | ret_val = -E1000_ERR_MAC_INIT; | 381 | ret_val = -E1000_ERR_MAC_INIT; |
384 | goto out; | 382 | goto out; |
385 | } | 383 | } |
386 | 384 | ||
387 | /* the pf is talking, if we timed out in the past we reinit */ | 385 | /* the PF is talking, if we timed out in the past we reinit */ |
388 | if (!mbx->timeout) { | 386 | if (!mbx->timeout) { |
389 | ret_val = -E1000_ERR_MAC_INIT; | 387 | ret_val = -E1000_ERR_MAC_INIT; |
390 | goto out; | 388 | goto out; |
391 | } | 389 | } |
392 | 390 | ||
393 | /* if we passed all the tests above then the link is up and we no | 391 | /* if we passed all the tests above then the link is up and we no |
394 | * longer need to check for link */ | 392 | * longer need to check for link |
393 | */ | ||
395 | mac->get_link_status = false; | 394 | mac->get_link_status = false; |
396 | 395 | ||
397 | out: | 396 | out: |
diff --git a/drivers/net/ethernet/intel/igbvf/vf.h b/drivers/net/ethernet/intel/igbvf/vf.h index 57db3c68dfcd..0f1eca639f68 100644 --- a/drivers/net/ethernet/intel/igbvf/vf.h +++ b/drivers/net/ethernet/intel/igbvf/vf.h | |||
@@ -13,8 +13,7 @@ | |||
13 | more details. | 13 | more details. |
14 | 14 | ||
15 | You should have received a copy of the GNU General Public License along with | 15 | You should have received a copy of the GNU General Public License along with |
16 | this program; if not, write to the Free Software Foundation, Inc., | 16 | this program; if not, see <http://www.gnu.org/licenses/>. |
17 | 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | ||
18 | 17 | ||
19 | The full GNU General Public License is included in this distribution in | 18 | The full GNU General Public License is included in this distribution in |
20 | the file called "COPYING". | 19 | the file called "COPYING". |
@@ -38,30 +37,29 @@ | |||
38 | 37 | ||
39 | struct e1000_hw; | 38 | struct e1000_hw; |
40 | 39 | ||
41 | #define E1000_DEV_ID_82576_VF 0x10CA | 40 | #define E1000_DEV_ID_82576_VF 0x10CA |
42 | #define E1000_DEV_ID_I350_VF 0x1520 | 41 | #define E1000_DEV_ID_I350_VF 0x1520 |
43 | #define E1000_REVISION_0 0 | 42 | #define E1000_REVISION_0 0 |
44 | #define E1000_REVISION_1 1 | 43 | #define E1000_REVISION_1 1 |
45 | #define E1000_REVISION_2 2 | 44 | #define E1000_REVISION_2 2 |
46 | #define E1000_REVISION_3 3 | 45 | #define E1000_REVISION_3 3 |
47 | #define E1000_REVISION_4 4 | 46 | #define E1000_REVISION_4 4 |
48 | 47 | ||
49 | #define E1000_FUNC_0 0 | 48 | #define E1000_FUNC_0 0 |
50 | #define E1000_FUNC_1 1 | 49 | #define E1000_FUNC_1 1 |
51 | 50 | ||
52 | /* | 51 | /* Receive Address Register Count |
53 | * Receive Address Register Count | ||
54 | * Number of high/low register pairs in the RAR. The RAR (Receive Address | 52 | * Number of high/low register pairs in the RAR. The RAR (Receive Address |
55 | * Registers) holds the directed and multicast addresses that we monitor. | 53 | * Registers) holds the directed and multicast addresses that we monitor. |
56 | * These entries are also used for MAC-based filtering. | 54 | * These entries are also used for MAC-based filtering. |
57 | */ | 55 | */ |
58 | #define E1000_RAR_ENTRIES_VF 1 | 56 | #define E1000_RAR_ENTRIES_VF 1 |
59 | 57 | ||
60 | /* Receive Descriptor - Advanced */ | 58 | /* Receive Descriptor - Advanced */ |
61 | union e1000_adv_rx_desc { | 59 | union e1000_adv_rx_desc { |
62 | struct { | 60 | struct { |
63 | u64 pkt_addr; /* Packet buffer address */ | 61 | u64 pkt_addr; /* Packet buffer address */ |
64 | u64 hdr_addr; /* Header buffer address */ | 62 | u64 hdr_addr; /* Header buffer address */ |
65 | } read; | 63 | } read; |
66 | struct { | 64 | struct { |
67 | struct { | 65 | struct { |
@@ -69,53 +67,53 @@ union e1000_adv_rx_desc { | |||
69 | u32 data; | 67 | u32 data; |
70 | struct { | 68 | struct { |
71 | u16 pkt_info; /* RSS/Packet type */ | 69 | u16 pkt_info; /* RSS/Packet type */ |
72 | u16 hdr_info; /* Split Header, | 70 | /* Split Header, hdr buffer length */ |
73 | * hdr buffer length */ | 71 | u16 hdr_info; |
74 | } hs_rss; | 72 | } hs_rss; |
75 | } lo_dword; | 73 | } lo_dword; |
76 | union { | 74 | union { |
77 | u32 rss; /* RSS Hash */ | 75 | u32 rss; /* RSS Hash */ |
78 | struct { | 76 | struct { |
79 | u16 ip_id; /* IP id */ | 77 | u16 ip_id; /* IP id */ |
80 | u16 csum; /* Packet Checksum */ | 78 | u16 csum; /* Packet Checksum */ |
81 | } csum_ip; | 79 | } csum_ip; |
82 | } hi_dword; | 80 | } hi_dword; |
83 | } lower; | 81 | } lower; |
84 | struct { | 82 | struct { |
85 | u32 status_error; /* ext status/error */ | 83 | u32 status_error; /* ext status/error */ |
86 | u16 length; /* Packet length */ | 84 | u16 length; /* Packet length */ |
87 | u16 vlan; /* VLAN tag */ | 85 | u16 vlan; /* VLAN tag */ |
88 | } upper; | 86 | } upper; |
89 | } wb; /* writeback */ | 87 | } wb; /* writeback */ |
90 | }; | 88 | }; |
91 | 89 | ||
92 | #define E1000_RXDADV_HDRBUFLEN_MASK 0x7FE0 | 90 | #define E1000_RXDADV_HDRBUFLEN_MASK 0x7FE0 |
93 | #define E1000_RXDADV_HDRBUFLEN_SHIFT 5 | 91 | #define E1000_RXDADV_HDRBUFLEN_SHIFT 5 |
94 | 92 | ||
95 | /* Transmit Descriptor - Advanced */ | 93 | /* Transmit Descriptor - Advanced */ |
96 | union e1000_adv_tx_desc { | 94 | union e1000_adv_tx_desc { |
97 | struct { | 95 | struct { |
98 | u64 buffer_addr; /* Address of descriptor's data buf */ | 96 | u64 buffer_addr; /* Address of descriptor's data buf */ |
99 | u32 cmd_type_len; | 97 | u32 cmd_type_len; |
100 | u32 olinfo_status; | 98 | u32 olinfo_status; |
101 | } read; | 99 | } read; |
102 | struct { | 100 | struct { |
103 | u64 rsvd; /* Reserved */ | 101 | u64 rsvd; /* Reserved */ |
104 | u32 nxtseq_seed; | 102 | u32 nxtseq_seed; |
105 | u32 status; | 103 | u32 status; |
106 | } wb; | 104 | } wb; |
107 | }; | 105 | }; |
108 | 106 | ||
109 | /* Adv Transmit Descriptor Config Masks */ | 107 | /* Adv Transmit Descriptor Config Masks */ |
110 | #define E1000_ADVTXD_DTYP_CTXT 0x00200000 /* Advanced Context Descriptor */ | 108 | #define E1000_ADVTXD_DTYP_CTXT 0x00200000 /* Advanced Context Descriptor */ |
111 | #define E1000_ADVTXD_DTYP_DATA 0x00300000 /* Advanced Data Descriptor */ | 109 | #define E1000_ADVTXD_DTYP_DATA 0x00300000 /* Advanced Data Descriptor */ |
112 | #define E1000_ADVTXD_DCMD_EOP 0x01000000 /* End of Packet */ | 110 | #define E1000_ADVTXD_DCMD_EOP 0x01000000 /* End of Packet */ |
113 | #define E1000_ADVTXD_DCMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */ | 111 | #define E1000_ADVTXD_DCMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */ |
114 | #define E1000_ADVTXD_DCMD_RS 0x08000000 /* Report Status */ | 112 | #define E1000_ADVTXD_DCMD_RS 0x08000000 /* Report Status */ |
115 | #define E1000_ADVTXD_DCMD_DEXT 0x20000000 /* Descriptor extension (1=Adv) */ | 113 | #define E1000_ADVTXD_DCMD_DEXT 0x20000000 /* Descriptor extension (1=Adv) */ |
116 | #define E1000_ADVTXD_DCMD_VLE 0x40000000 /* VLAN pkt enable */ | 114 | #define E1000_ADVTXD_DCMD_VLE 0x40000000 /* VLAN pkt enable */ |
117 | #define E1000_ADVTXD_DCMD_TSE 0x80000000 /* TCP Seg enable */ | 115 | #define E1000_ADVTXD_DCMD_TSE 0x80000000 /* TCP Seg enable */ |
118 | #define E1000_ADVTXD_PAYLEN_SHIFT 14 /* Adv desc PAYLEN shift */ | 116 | #define E1000_ADVTXD_PAYLEN_SHIFT 14 /* Adv desc PAYLEN shift */ |
119 | 117 | ||
120 | /* Context descriptors */ | 118 | /* Context descriptors */ |
121 | struct e1000_adv_tx_context_desc { | 119 | struct e1000_adv_tx_context_desc { |
@@ -125,11 +123,11 @@ struct e1000_adv_tx_context_desc { | |||
125 | u32 mss_l4len_idx; | 123 | u32 mss_l4len_idx; |
126 | }; | 124 | }; |
127 | 125 | ||
128 | #define E1000_ADVTXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */ | 126 | #define E1000_ADVTXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */ |
129 | #define E1000_ADVTXD_TUCMD_IPV4 0x00000400 /* IP Packet Type: 1=IPv4 */ | 127 | #define E1000_ADVTXD_TUCMD_IPV4 0x00000400 /* IP Packet Type: 1=IPv4 */ |
130 | #define E1000_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet TYPE of TCP */ | 128 | #define E1000_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet TYPE of TCP */ |
131 | #define E1000_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */ | 129 | #define E1000_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */ |
132 | #define E1000_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */ | 130 | #define E1000_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */ |
133 | 131 | ||
134 | enum e1000_mac_type { | 132 | enum e1000_mac_type { |
135 | e1000_undefined = 0, | 133 | e1000_undefined = 0, |
@@ -262,5 +260,4 @@ struct e1000_hw { | |||
262 | void e1000_rlpml_set_vf(struct e1000_hw *, u16); | 260 | void e1000_rlpml_set_vf(struct e1000_hw *, u16); |
263 | void e1000_init_function_pointers_vf(struct e1000_hw *hw); | 261 | void e1000_init_function_pointers_vf(struct e1000_hw *hw); |
264 | 262 | ||
265 | |||
266 | #endif /* _E1000_VF_H_ */ | 263 | #endif /* _E1000_VF_H_ */ |
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 70cc4c5c0a01..903664ff6904 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | |||
@@ -3924,7 +3924,7 @@ static void ixgbe_flush_sw_mac_table(struct ixgbe_adapter *adapter) | |||
3924 | for (i = 0; i < hw->mac.num_rar_entries; i++) { | 3924 | for (i = 0; i < hw->mac.num_rar_entries; i++) { |
3925 | adapter->mac_table[i].state |= IXGBE_MAC_STATE_MODIFIED; | 3925 | adapter->mac_table[i].state |= IXGBE_MAC_STATE_MODIFIED; |
3926 | adapter->mac_table[i].state &= ~IXGBE_MAC_STATE_IN_USE; | 3926 | adapter->mac_table[i].state &= ~IXGBE_MAC_STATE_IN_USE; |
3927 | memset(adapter->mac_table[i].addr, 0, ETH_ALEN); | 3927 | eth_zero_addr(adapter->mac_table[i].addr); |
3928 | adapter->mac_table[i].queue = 0; | 3928 | adapter->mac_table[i].queue = 0; |
3929 | } | 3929 | } |
3930 | ixgbe_sync_mac_table(adapter); | 3930 | ixgbe_sync_mac_table(adapter); |
@@ -3992,7 +3992,7 @@ int ixgbe_del_mac_filter(struct ixgbe_adapter *adapter, u8 *addr, u16 queue) | |||
3992 | adapter->mac_table[i].queue == queue) { | 3992 | adapter->mac_table[i].queue == queue) { |
3993 | adapter->mac_table[i].state |= IXGBE_MAC_STATE_MODIFIED; | 3993 | adapter->mac_table[i].state |= IXGBE_MAC_STATE_MODIFIED; |
3994 | adapter->mac_table[i].state &= ~IXGBE_MAC_STATE_IN_USE; | 3994 | adapter->mac_table[i].state &= ~IXGBE_MAC_STATE_IN_USE; |
3995 | memset(adapter->mac_table[i].addr, 0, ETH_ALEN); | 3995 | eth_zero_addr(adapter->mac_table[i].addr); |
3996 | adapter->mac_table[i].queue = 0; | 3996 | adapter->mac_table[i].queue = 0; |
3997 | ixgbe_sync_mac_table(adapter); | 3997 | ixgbe_sync_mac_table(adapter); |
3998 | return 0; | 3998 | return 0; |
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index 2a210c4efb89..c59ed925adaf 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c | |||
@@ -1685,7 +1685,7 @@ int mlx4_en_start_port(struct net_device *dev) | |||
1685 | } | 1685 | } |
1686 | 1686 | ||
1687 | /* Attach rx QP to bradcast address */ | 1687 | /* Attach rx QP to bradcast address */ |
1688 | memset(&mc_list[10], 0xff, ETH_ALEN); | 1688 | eth_broadcast_addr(&mc_list[10]); |
1689 | mc_list[5] = priv->port; /* needed for B0 steering support */ | 1689 | mc_list[5] = priv->port; /* needed for B0 steering support */ |
1690 | if (mlx4_multicast_attach(mdev->dev, &priv->rss_map.indir_qp, mc_list, | 1690 | if (mlx4_multicast_attach(mdev->dev, &priv->rss_map.indir_qp, mc_list, |
1691 | priv->port, 0, MLX4_PROT_ETH, | 1691 | priv->port, 0, MLX4_PROT_ETH, |
@@ -1788,7 +1788,7 @@ void mlx4_en_stop_port(struct net_device *dev, int detach) | |||
1788 | } | 1788 | } |
1789 | 1789 | ||
1790 | /* Detach All multicasts */ | 1790 | /* Detach All multicasts */ |
1791 | memset(&mc_list[10], 0xff, ETH_ALEN); | 1791 | eth_broadcast_addr(&mc_list[10]); |
1792 | mc_list[5] = priv->port; /* needed for B0 steering support */ | 1792 | mc_list[5] = priv->port; /* needed for B0 steering support */ |
1793 | mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp, mc_list, | 1793 | mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp, mc_list, |
1794 | MLX4_PROT_ETH, priv->broadcast_id); | 1794 | MLX4_PROT_ETH, priv->broadcast_id); |
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_selftest.c b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c index a61009f4b2df..b66e03d9711f 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_selftest.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c | |||
@@ -66,7 +66,7 @@ static int mlx4_en_test_loopback_xmit(struct mlx4_en_priv *priv) | |||
66 | ethh = (struct ethhdr *)skb_put(skb, sizeof(struct ethhdr)); | 66 | ethh = (struct ethhdr *)skb_put(skb, sizeof(struct ethhdr)); |
67 | packet = (unsigned char *)skb_put(skb, packet_size); | 67 | packet = (unsigned char *)skb_put(skb, packet_size); |
68 | memcpy(ethh->h_dest, priv->dev->dev_addr, ETH_ALEN); | 68 | memcpy(ethh->h_dest, priv->dev->dev_addr, ETH_ALEN); |
69 | memset(ethh->h_source, 0, ETH_ALEN); | 69 | eth_zero_addr(ethh->h_source); |
70 | ethh->h_proto = htons(ETH_P_ARP); | 70 | ethh->h_proto = htons(ETH_P_ARP); |
71 | skb_set_mac_header(skb, 0); | 71 | skb_set_mac_header(skb, 0); |
72 | for (i = 0; i < packet_size; ++i) /* fill our packet */ | 72 | for (i = 0; i < packet_size; ++i) /* fill our packet */ |
diff --git a/drivers/net/ethernet/micrel/ksz884x.c b/drivers/net/ethernet/micrel/ksz884x.c index 10988fbf47eb..6f332ebdf3b5 100644 --- a/drivers/net/ethernet/micrel/ksz884x.c +++ b/drivers/net/ethernet/micrel/ksz884x.c | |||
@@ -4144,7 +4144,7 @@ static int hw_del_addr(struct ksz_hw *hw, u8 *mac_addr) | |||
4144 | 4144 | ||
4145 | for (i = 0; i < hw->addr_list_size; i++) { | 4145 | for (i = 0; i < hw->addr_list_size; i++) { |
4146 | if (ether_addr_equal(hw->address[i], mac_addr)) { | 4146 | if (ether_addr_equal(hw->address[i], mac_addr)) { |
4147 | memset(hw->address[i], 0, ETH_ALEN); | 4147 | eth_zero_addr(hw->address[i]); |
4148 | writel(0, hw->io + ADD_ADDR_INCR * i + | 4148 | writel(0, hw->io + ADD_ADDR_INCR * i + |
4149 | KS_ADD_ADDR_0_HI); | 4149 | KS_ADD_ADDR_0_HI); |
4150 | return 0; | 4150 | return 0; |
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c index 716fc37ada5a..db80eb1c6d4f 100644 --- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c | |||
@@ -537,7 +537,7 @@ static void netxen_p2_nic_set_multi(struct net_device *netdev) | |||
537 | u8 null_addr[ETH_ALEN]; | 537 | u8 null_addr[ETH_ALEN]; |
538 | int i; | 538 | int i; |
539 | 539 | ||
540 | memset(null_addr, 0, ETH_ALEN); | 540 | eth_zero_addr(null_addr); |
541 | 541 | ||
542 | if (netdev->flags & IFF_PROMISC) { | 542 | if (netdev->flags & IFF_PROMISC) { |
543 | 543 | ||
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c index 8011ef3e7707..25800a1dedcb 100644 --- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c +++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c | |||
@@ -460,7 +460,7 @@ static int ql_set_mac_addr(struct ql_adapter *qdev, int set) | |||
460 | netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, | 460 | netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, |
461 | "Set Mac addr %pM\n", addr); | 461 | "Set Mac addr %pM\n", addr); |
462 | } else { | 462 | } else { |
463 | memset(zero_mac_addr, 0, ETH_ALEN); | 463 | eth_zero_addr(zero_mac_addr); |
464 | addr = &zero_mac_addr[0]; | 464 | addr = &zero_mac_addr[0]; |
465 | netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, | 465 | netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, |
466 | "Clearing MAC address\n"); | 466 | "Clearing MAC address\n"); |
diff --git a/drivers/net/ethernet/rocker/rocker.c b/drivers/net/ethernet/rocker/rocker.c index 9fb6948e14c6..a5d1e6ea7d58 100644 --- a/drivers/net/ethernet/rocker/rocker.c +++ b/drivers/net/ethernet/rocker/rocker.c | |||
@@ -49,12 +49,12 @@ struct rocker_flow_tbl_key { | |||
49 | enum rocker_of_dpa_table_id tbl_id; | 49 | enum rocker_of_dpa_table_id tbl_id; |
50 | union { | 50 | union { |
51 | struct { | 51 | struct { |
52 | u32 in_lport; | 52 | u32 in_pport; |
53 | u32 in_lport_mask; | 53 | u32 in_pport_mask; |
54 | enum rocker_of_dpa_table_id goto_tbl; | 54 | enum rocker_of_dpa_table_id goto_tbl; |
55 | } ig_port; | 55 | } ig_port; |
56 | struct { | 56 | struct { |
57 | u32 in_lport; | 57 | u32 in_pport; |
58 | __be16 vlan_id; | 58 | __be16 vlan_id; |
59 | __be16 vlan_id_mask; | 59 | __be16 vlan_id_mask; |
60 | enum rocker_of_dpa_table_id goto_tbl; | 60 | enum rocker_of_dpa_table_id goto_tbl; |
@@ -62,8 +62,8 @@ struct rocker_flow_tbl_key { | |||
62 | __be16 new_vlan_id; | 62 | __be16 new_vlan_id; |
63 | } vlan; | 63 | } vlan; |
64 | struct { | 64 | struct { |
65 | u32 in_lport; | 65 | u32 in_pport; |
66 | u32 in_lport_mask; | 66 | u32 in_pport_mask; |
67 | __be16 eth_type; | 67 | __be16 eth_type; |
68 | u8 eth_dst[ETH_ALEN]; | 68 | u8 eth_dst[ETH_ALEN]; |
69 | u8 eth_dst_mask[ETH_ALEN]; | 69 | u8 eth_dst_mask[ETH_ALEN]; |
@@ -91,8 +91,8 @@ struct rocker_flow_tbl_key { | |||
91 | bool copy_to_cpu; | 91 | bool copy_to_cpu; |
92 | } bridge; | 92 | } bridge; |
93 | struct { | 93 | struct { |
94 | u32 in_lport; | 94 | u32 in_pport; |
95 | u32 in_lport_mask; | 95 | u32 in_pport_mask; |
96 | u8 eth_src[ETH_ALEN]; | 96 | u8 eth_src[ETH_ALEN]; |
97 | u8 eth_src_mask[ETH_ALEN]; | 97 | u8 eth_src_mask[ETH_ALEN]; |
98 | u8 eth_dst[ETH_ALEN]; | 98 | u8 eth_dst[ETH_ALEN]; |
@@ -148,7 +148,7 @@ struct rocker_fdb_tbl_entry { | |||
148 | u32 key_crc32; /* key */ | 148 | u32 key_crc32; /* key */ |
149 | bool learned; | 149 | bool learned; |
150 | struct rocker_fdb_tbl_key { | 150 | struct rocker_fdb_tbl_key { |
151 | u32 lport; | 151 | u32 pport; |
152 | u8 addr[ETH_ALEN]; | 152 | u8 addr[ETH_ALEN]; |
153 | __be16 vlan_id; | 153 | __be16 vlan_id; |
154 | } key; | 154 | } key; |
@@ -200,7 +200,7 @@ struct rocker_port { | |||
200 | struct net_device *bridge_dev; | 200 | struct net_device *bridge_dev; |
201 | struct rocker *rocker; | 201 | struct rocker *rocker; |
202 | unsigned int port_number; | 202 | unsigned int port_number; |
203 | u32 lport; | 203 | u32 pport; |
204 | __be16 internal_vlan_id; | 204 | __be16 internal_vlan_id; |
205 | int stp_state; | 205 | int stp_state; |
206 | u32 brport_flags; | 206 | u32 brport_flags; |
@@ -789,7 +789,30 @@ static u32 __pos_inc(u32 pos, size_t limit) | |||
789 | 789 | ||
790 | static int rocker_desc_err(struct rocker_desc_info *desc_info) | 790 | static int rocker_desc_err(struct rocker_desc_info *desc_info) |
791 | { | 791 | { |
792 | return -(desc_info->desc->comp_err & ~ROCKER_DMA_DESC_COMP_ERR_GEN); | 792 | int err = desc_info->desc->comp_err & ~ROCKER_DMA_DESC_COMP_ERR_GEN; |
793 | |||
794 | switch (err) { | ||
795 | case ROCKER_OK: | ||
796 | return 0; | ||
797 | case -ROCKER_ENOENT: | ||
798 | return -ENOENT; | ||
799 | case -ROCKER_ENXIO: | ||
800 | return -ENXIO; | ||
801 | case -ROCKER_ENOMEM: | ||
802 | return -ENOMEM; | ||
803 | case -ROCKER_EEXIST: | ||
804 | return -EEXIST; | ||
805 | case -ROCKER_EINVAL: | ||
806 | return -EINVAL; | ||
807 | case -ROCKER_EMSGSIZE: | ||
808 | return -EMSGSIZE; | ||
809 | case -ROCKER_ENOTSUP: | ||
810 | return -EOPNOTSUPP; | ||
811 | case -ROCKER_ENOBUFS: | ||
812 | return -ENOBUFS; | ||
813 | } | ||
814 | |||
815 | return -EINVAL; | ||
793 | } | 816 | } |
794 | 817 | ||
795 | static void rocker_desc_gen_clear(struct rocker_desc_info *desc_info) | 818 | static void rocker_desc_gen_clear(struct rocker_desc_info *desc_info) |
@@ -1257,9 +1280,9 @@ static void rocker_port_set_enable(struct rocker_port *rocker_port, bool enable) | |||
1257 | u64 val = rocker_read64(rocker_port->rocker, PORT_PHYS_ENABLE); | 1280 | u64 val = rocker_read64(rocker_port->rocker, PORT_PHYS_ENABLE); |
1258 | 1281 | ||
1259 | if (enable) | 1282 | if (enable) |
1260 | val |= 1ULL << rocker_port->lport; | 1283 | val |= 1ULL << rocker_port->pport; |
1261 | else | 1284 | else |
1262 | val &= ~(1ULL << rocker_port->lport); | 1285 | val &= ~(1ULL << rocker_port->pport); |
1263 | rocker_write64(rocker_port->rocker, PORT_PHYS_ENABLE, val); | 1286 | rocker_write64(rocker_port->rocker, PORT_PHYS_ENABLE, val); |
1264 | } | 1287 | } |
1265 | 1288 | ||
@@ -1312,11 +1335,11 @@ static int rocker_event_link_change(struct rocker *rocker, | |||
1312 | struct rocker_port *rocker_port; | 1335 | struct rocker_port *rocker_port; |
1313 | 1336 | ||
1314 | rocker_tlv_parse_nested(attrs, ROCKER_TLV_EVENT_LINK_CHANGED_MAX, info); | 1337 | rocker_tlv_parse_nested(attrs, ROCKER_TLV_EVENT_LINK_CHANGED_MAX, info); |
1315 | if (!attrs[ROCKER_TLV_EVENT_LINK_CHANGED_LPORT] || | 1338 | if (!attrs[ROCKER_TLV_EVENT_LINK_CHANGED_PPORT] || |
1316 | !attrs[ROCKER_TLV_EVENT_LINK_CHANGED_LINKUP]) | 1339 | !attrs[ROCKER_TLV_EVENT_LINK_CHANGED_LINKUP]) |
1317 | return -EIO; | 1340 | return -EIO; |
1318 | port_number = | 1341 | port_number = |
1319 | rocker_tlv_get_u32(attrs[ROCKER_TLV_EVENT_LINK_CHANGED_LPORT]) - 1; | 1342 | rocker_tlv_get_u32(attrs[ROCKER_TLV_EVENT_LINK_CHANGED_PPORT]) - 1; |
1320 | link_up = rocker_tlv_get_u8(attrs[ROCKER_TLV_EVENT_LINK_CHANGED_LINKUP]); | 1343 | link_up = rocker_tlv_get_u8(attrs[ROCKER_TLV_EVENT_LINK_CHANGED_LINKUP]); |
1321 | 1344 | ||
1322 | if (port_number >= rocker->port_count) | 1345 | if (port_number >= rocker->port_count) |
@@ -1353,12 +1376,12 @@ static int rocker_event_mac_vlan_seen(struct rocker *rocker, | |||
1353 | __be16 vlan_id; | 1376 | __be16 vlan_id; |
1354 | 1377 | ||
1355 | rocker_tlv_parse_nested(attrs, ROCKER_TLV_EVENT_MAC_VLAN_MAX, info); | 1378 | rocker_tlv_parse_nested(attrs, ROCKER_TLV_EVENT_MAC_VLAN_MAX, info); |
1356 | if (!attrs[ROCKER_TLV_EVENT_MAC_VLAN_LPORT] || | 1379 | if (!attrs[ROCKER_TLV_EVENT_MAC_VLAN_PPORT] || |
1357 | !attrs[ROCKER_TLV_EVENT_MAC_VLAN_MAC] || | 1380 | !attrs[ROCKER_TLV_EVENT_MAC_VLAN_MAC] || |
1358 | !attrs[ROCKER_TLV_EVENT_MAC_VLAN_VLAN_ID]) | 1381 | !attrs[ROCKER_TLV_EVENT_MAC_VLAN_VLAN_ID]) |
1359 | return -EIO; | 1382 | return -EIO; |
1360 | port_number = | 1383 | port_number = |
1361 | rocker_tlv_get_u32(attrs[ROCKER_TLV_EVENT_MAC_VLAN_LPORT]) - 1; | 1384 | rocker_tlv_get_u32(attrs[ROCKER_TLV_EVENT_MAC_VLAN_PPORT]) - 1; |
1362 | addr = rocker_tlv_data(attrs[ROCKER_TLV_EVENT_MAC_VLAN_MAC]); | 1385 | addr = rocker_tlv_data(attrs[ROCKER_TLV_EVENT_MAC_VLAN_MAC]); |
1363 | vlan_id = rocker_tlv_get_be16(attrs[ROCKER_TLV_EVENT_MAC_VLAN_VLAN_ID]); | 1386 | vlan_id = rocker_tlv_get_be16(attrs[ROCKER_TLV_EVENT_MAC_VLAN_VLAN_ID]); |
1364 | 1387 | ||
@@ -1517,8 +1540,8 @@ rocker_cmd_get_port_settings_prep(struct rocker *rocker, | |||
1517 | cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO); | 1540 | cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO); |
1518 | if (!cmd_info) | 1541 | if (!cmd_info) |
1519 | return -EMSGSIZE; | 1542 | return -EMSGSIZE; |
1520 | if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_LPORT, | 1543 | if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT, |
1521 | rocker_port->lport)) | 1544 | rocker_port->pport)) |
1522 | return -EMSGSIZE; | 1545 | return -EMSGSIZE; |
1523 | rocker_tlv_nest_end(desc_info, cmd_info); | 1546 | rocker_tlv_nest_end(desc_info, cmd_info); |
1524 | return 0; | 1547 | return 0; |
@@ -1606,8 +1629,8 @@ rocker_cmd_set_port_settings_ethtool_prep(struct rocker *rocker, | |||
1606 | cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO); | 1629 | cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO); |
1607 | if (!cmd_info) | 1630 | if (!cmd_info) |
1608 | return -EMSGSIZE; | 1631 | return -EMSGSIZE; |
1609 | if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_LPORT, | 1632 | if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT, |
1610 | rocker_port->lport)) | 1633 | rocker_port->pport)) |
1611 | return -EMSGSIZE; | 1634 | return -EMSGSIZE; |
1612 | if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_SPEED, | 1635 | if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_SPEED, |
1613 | ethtool_cmd_speed(ecmd))) | 1636 | ethtool_cmd_speed(ecmd))) |
@@ -1637,8 +1660,8 @@ rocker_cmd_set_port_settings_macaddr_prep(struct rocker *rocker, | |||
1637 | cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO); | 1660 | cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO); |
1638 | if (!cmd_info) | 1661 | if (!cmd_info) |
1639 | return -EMSGSIZE; | 1662 | return -EMSGSIZE; |
1640 | if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_LPORT, | 1663 | if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT, |
1641 | rocker_port->lport)) | 1664 | rocker_port->pport)) |
1642 | return -EMSGSIZE; | 1665 | return -EMSGSIZE; |
1643 | if (rocker_tlv_put(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_MACADDR, | 1666 | if (rocker_tlv_put(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_MACADDR, |
1644 | ETH_ALEN, macaddr)) | 1667 | ETH_ALEN, macaddr)) |
@@ -1661,8 +1684,8 @@ rocker_cmd_set_port_learning_prep(struct rocker *rocker, | |||
1661 | cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO); | 1684 | cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO); |
1662 | if (!cmd_info) | 1685 | if (!cmd_info) |
1663 | return -EMSGSIZE; | 1686 | return -EMSGSIZE; |
1664 | if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_LPORT, | 1687 | if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT, |
1665 | rocker_port->lport)) | 1688 | rocker_port->pport)) |
1666 | return -EMSGSIZE; | 1689 | return -EMSGSIZE; |
1667 | if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_LEARNING, | 1690 | if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_LEARNING, |
1668 | !!(rocker_port->brport_flags & BR_LEARNING))) | 1691 | !!(rocker_port->brport_flags & BR_LEARNING))) |
@@ -1715,11 +1738,11 @@ static int rocker_port_set_learning(struct rocker_port *rocker_port) | |||
1715 | static int rocker_cmd_flow_tbl_add_ig_port(struct rocker_desc_info *desc_info, | 1738 | static int rocker_cmd_flow_tbl_add_ig_port(struct rocker_desc_info *desc_info, |
1716 | struct rocker_flow_tbl_entry *entry) | 1739 | struct rocker_flow_tbl_entry *entry) |
1717 | { | 1740 | { |
1718 | if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_LPORT, | 1741 | if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT, |
1719 | entry->key.ig_port.in_lport)) | 1742 | entry->key.ig_port.in_pport)) |
1720 | return -EMSGSIZE; | 1743 | return -EMSGSIZE; |
1721 | if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_LPORT_MASK, | 1744 | if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT_MASK, |
1722 | entry->key.ig_port.in_lport_mask)) | 1745 | entry->key.ig_port.in_pport_mask)) |
1723 | return -EMSGSIZE; | 1746 | return -EMSGSIZE; |
1724 | if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID, | 1747 | if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID, |
1725 | entry->key.ig_port.goto_tbl)) | 1748 | entry->key.ig_port.goto_tbl)) |
@@ -1731,8 +1754,8 @@ static int rocker_cmd_flow_tbl_add_ig_port(struct rocker_desc_info *desc_info, | |||
1731 | static int rocker_cmd_flow_tbl_add_vlan(struct rocker_desc_info *desc_info, | 1754 | static int rocker_cmd_flow_tbl_add_vlan(struct rocker_desc_info *desc_info, |
1732 | struct rocker_flow_tbl_entry *entry) | 1755 | struct rocker_flow_tbl_entry *entry) |
1733 | { | 1756 | { |
1734 | if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_LPORT, | 1757 | if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT, |
1735 | entry->key.vlan.in_lport)) | 1758 | entry->key.vlan.in_pport)) |
1736 | return -EMSGSIZE; | 1759 | return -EMSGSIZE; |
1737 | if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID, | 1760 | if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID, |
1738 | entry->key.vlan.vlan_id)) | 1761 | entry->key.vlan.vlan_id)) |
@@ -1754,11 +1777,11 @@ static int rocker_cmd_flow_tbl_add_vlan(struct rocker_desc_info *desc_info, | |||
1754 | static int rocker_cmd_flow_tbl_add_term_mac(struct rocker_desc_info *desc_info, | 1777 | static int rocker_cmd_flow_tbl_add_term_mac(struct rocker_desc_info *desc_info, |
1755 | struct rocker_flow_tbl_entry *entry) | 1778 | struct rocker_flow_tbl_entry *entry) |
1756 | { | 1779 | { |
1757 | if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_LPORT, | 1780 | if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT, |
1758 | entry->key.term_mac.in_lport)) | 1781 | entry->key.term_mac.in_pport)) |
1759 | return -EMSGSIZE; | 1782 | return -EMSGSIZE; |
1760 | if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_LPORT_MASK, | 1783 | if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT_MASK, |
1761 | entry->key.term_mac.in_lport_mask)) | 1784 | entry->key.term_mac.in_pport_mask)) |
1762 | return -EMSGSIZE; | 1785 | return -EMSGSIZE; |
1763 | if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE, | 1786 | if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE, |
1764 | entry->key.term_mac.eth_type)) | 1787 | entry->key.term_mac.eth_type)) |
@@ -1845,11 +1868,11 @@ static int rocker_cmd_flow_tbl_add_bridge(struct rocker_desc_info *desc_info, | |||
1845 | static int rocker_cmd_flow_tbl_add_acl(struct rocker_desc_info *desc_info, | 1868 | static int rocker_cmd_flow_tbl_add_acl(struct rocker_desc_info *desc_info, |
1846 | struct rocker_flow_tbl_entry *entry) | 1869 | struct rocker_flow_tbl_entry *entry) |
1847 | { | 1870 | { |
1848 | if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_LPORT, | 1871 | if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT, |
1849 | entry->key.acl.in_lport)) | 1872 | entry->key.acl.in_pport)) |
1850 | return -EMSGSIZE; | 1873 | return -EMSGSIZE; |
1851 | if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_LPORT_MASK, | 1874 | if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT_MASK, |
1852 | entry->key.acl.in_lport_mask)) | 1875 | entry->key.acl.in_pport_mask)) |
1853 | return -EMSGSIZE; | 1876 | return -EMSGSIZE; |
1854 | if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC, | 1877 | if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC, |
1855 | ETH_ALEN, entry->key.acl.eth_src)) | 1878 | ETH_ALEN, entry->key.acl.eth_src)) |
@@ -1993,7 +2016,7 @@ static int | |||
1993 | rocker_cmd_group_tbl_add_l2_interface(struct rocker_desc_info *desc_info, | 2016 | rocker_cmd_group_tbl_add_l2_interface(struct rocker_desc_info *desc_info, |
1994 | struct rocker_group_tbl_entry *entry) | 2017 | struct rocker_group_tbl_entry *entry) |
1995 | { | 2018 | { |
1996 | if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_OUT_LPORT, | 2019 | if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_OUT_PPORT, |
1997 | ROCKER_GROUP_PORT_GET(entry->group_id))) | 2020 | ROCKER_GROUP_PORT_GET(entry->group_id))) |
1998 | return -EMSGSIZE; | 2021 | return -EMSGSIZE; |
1999 | if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_POP_VLAN, | 2022 | if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_POP_VLAN, |
@@ -2311,7 +2334,7 @@ static int rocker_flow_tbl_do(struct rocker_port *rocker_port, | |||
2311 | } | 2334 | } |
2312 | 2335 | ||
2313 | static int rocker_flow_tbl_ig_port(struct rocker_port *rocker_port, | 2336 | static int rocker_flow_tbl_ig_port(struct rocker_port *rocker_port, |
2314 | int flags, u32 in_lport, u32 in_lport_mask, | 2337 | int flags, u32 in_pport, u32 in_pport_mask, |
2315 | enum rocker_of_dpa_table_id goto_tbl) | 2338 | enum rocker_of_dpa_table_id goto_tbl) |
2316 | { | 2339 | { |
2317 | struct rocker_flow_tbl_entry *entry; | 2340 | struct rocker_flow_tbl_entry *entry; |
@@ -2322,15 +2345,15 @@ static int rocker_flow_tbl_ig_port(struct rocker_port *rocker_port, | |||
2322 | 2345 | ||
2323 | entry->key.priority = ROCKER_PRIORITY_IG_PORT; | 2346 | entry->key.priority = ROCKER_PRIORITY_IG_PORT; |
2324 | entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT; | 2347 | entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT; |
2325 | entry->key.ig_port.in_lport = in_lport; | 2348 | entry->key.ig_port.in_pport = in_pport; |
2326 | entry->key.ig_port.in_lport_mask = in_lport_mask; | 2349 | entry->key.ig_port.in_pport_mask = in_pport_mask; |
2327 | entry->key.ig_port.goto_tbl = goto_tbl; | 2350 | entry->key.ig_port.goto_tbl = goto_tbl; |
2328 | 2351 | ||
2329 | return rocker_flow_tbl_do(rocker_port, flags, entry); | 2352 | return rocker_flow_tbl_do(rocker_port, flags, entry); |
2330 | } | 2353 | } |
2331 | 2354 | ||
2332 | static int rocker_flow_tbl_vlan(struct rocker_port *rocker_port, | 2355 | static int rocker_flow_tbl_vlan(struct rocker_port *rocker_port, |
2333 | int flags, u32 in_lport, | 2356 | int flags, u32 in_pport, |
2334 | __be16 vlan_id, __be16 vlan_id_mask, | 2357 | __be16 vlan_id, __be16 vlan_id_mask, |
2335 | enum rocker_of_dpa_table_id goto_tbl, | 2358 | enum rocker_of_dpa_table_id goto_tbl, |
2336 | bool untagged, __be16 new_vlan_id) | 2359 | bool untagged, __be16 new_vlan_id) |
@@ -2343,7 +2366,7 @@ static int rocker_flow_tbl_vlan(struct rocker_port *rocker_port, | |||
2343 | 2366 | ||
2344 | entry->key.priority = ROCKER_PRIORITY_VLAN; | 2367 | entry->key.priority = ROCKER_PRIORITY_VLAN; |
2345 | entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_VLAN; | 2368 | entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_VLAN; |
2346 | entry->key.vlan.in_lport = in_lport; | 2369 | entry->key.vlan.in_pport = in_pport; |
2347 | entry->key.vlan.vlan_id = vlan_id; | 2370 | entry->key.vlan.vlan_id = vlan_id; |
2348 | entry->key.vlan.vlan_id_mask = vlan_id_mask; | 2371 | entry->key.vlan.vlan_id_mask = vlan_id_mask; |
2349 | entry->key.vlan.goto_tbl = goto_tbl; | 2372 | entry->key.vlan.goto_tbl = goto_tbl; |
@@ -2355,7 +2378,7 @@ static int rocker_flow_tbl_vlan(struct rocker_port *rocker_port, | |||
2355 | } | 2378 | } |
2356 | 2379 | ||
2357 | static int rocker_flow_tbl_term_mac(struct rocker_port *rocker_port, | 2380 | static int rocker_flow_tbl_term_mac(struct rocker_port *rocker_port, |
2358 | u32 in_lport, u32 in_lport_mask, | 2381 | u32 in_pport, u32 in_pport_mask, |
2359 | __be16 eth_type, const u8 *eth_dst, | 2382 | __be16 eth_type, const u8 *eth_dst, |
2360 | const u8 *eth_dst_mask, __be16 vlan_id, | 2383 | const u8 *eth_dst_mask, __be16 vlan_id, |
2361 | __be16 vlan_id_mask, bool copy_to_cpu, | 2384 | __be16 vlan_id_mask, bool copy_to_cpu, |
@@ -2378,8 +2401,8 @@ static int rocker_flow_tbl_term_mac(struct rocker_port *rocker_port, | |||
2378 | } | 2401 | } |
2379 | 2402 | ||
2380 | entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC; | 2403 | entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC; |
2381 | entry->key.term_mac.in_lport = in_lport; | 2404 | entry->key.term_mac.in_pport = in_pport; |
2382 | entry->key.term_mac.in_lport_mask = in_lport_mask; | 2405 | entry->key.term_mac.in_pport_mask = in_pport_mask; |
2383 | entry->key.term_mac.eth_type = eth_type; | 2406 | entry->key.term_mac.eth_type = eth_type; |
2384 | ether_addr_copy(entry->key.term_mac.eth_dst, eth_dst); | 2407 | ether_addr_copy(entry->key.term_mac.eth_dst, eth_dst); |
2385 | ether_addr_copy(entry->key.term_mac.eth_dst_mask, eth_dst_mask); | 2408 | ether_addr_copy(entry->key.term_mac.eth_dst_mask, eth_dst_mask); |
@@ -2445,8 +2468,8 @@ static int rocker_flow_tbl_bridge(struct rocker_port *rocker_port, | |||
2445 | } | 2468 | } |
2446 | 2469 | ||
2447 | static int rocker_flow_tbl_acl(struct rocker_port *rocker_port, | 2470 | static int rocker_flow_tbl_acl(struct rocker_port *rocker_port, |
2448 | int flags, u32 in_lport, | 2471 | int flags, u32 in_pport, |
2449 | u32 in_lport_mask, | 2472 | u32 in_pport_mask, |
2450 | const u8 *eth_src, const u8 *eth_src_mask, | 2473 | const u8 *eth_src, const u8 *eth_src_mask, |
2451 | const u8 *eth_dst, const u8 *eth_dst_mask, | 2474 | const u8 *eth_dst, const u8 *eth_dst_mask, |
2452 | __be16 eth_type, | 2475 | __be16 eth_type, |
@@ -2472,8 +2495,8 @@ static int rocker_flow_tbl_acl(struct rocker_port *rocker_port, | |||
2472 | 2495 | ||
2473 | entry->key.priority = priority; | 2496 | entry->key.priority = priority; |
2474 | entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_ACL_POLICY; | 2497 | entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_ACL_POLICY; |
2475 | entry->key.acl.in_lport = in_lport; | 2498 | entry->key.acl.in_pport = in_pport; |
2476 | entry->key.acl.in_lport_mask = in_lport_mask; | 2499 | entry->key.acl.in_pport_mask = in_pport_mask; |
2477 | 2500 | ||
2478 | if (eth_src) | 2501 | if (eth_src) |
2479 | ether_addr_copy(entry->key.acl.eth_src, eth_src); | 2502 | ether_addr_copy(entry->key.acl.eth_src, eth_src); |
@@ -2604,7 +2627,7 @@ static int rocker_group_tbl_do(struct rocker_port *rocker_port, | |||
2604 | 2627 | ||
2605 | static int rocker_group_l2_interface(struct rocker_port *rocker_port, | 2628 | static int rocker_group_l2_interface(struct rocker_port *rocker_port, |
2606 | int flags, __be16 vlan_id, | 2629 | int flags, __be16 vlan_id, |
2607 | u32 out_lport, int pop_vlan) | 2630 | u32 out_pport, int pop_vlan) |
2608 | { | 2631 | { |
2609 | struct rocker_group_tbl_entry *entry; | 2632 | struct rocker_group_tbl_entry *entry; |
2610 | 2633 | ||
@@ -2612,7 +2635,7 @@ static int rocker_group_l2_interface(struct rocker_port *rocker_port, | |||
2612 | if (!entry) | 2635 | if (!entry) |
2613 | return -ENOMEM; | 2636 | return -ENOMEM; |
2614 | 2637 | ||
2615 | entry->group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_lport); | 2638 | entry->group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport); |
2616 | entry->l2_interface.pop_vlan = pop_vlan; | 2639 | entry->l2_interface.pop_vlan = pop_vlan; |
2617 | 2640 | ||
2618 | return rocker_group_tbl_do(rocker_port, flags, entry); | 2641 | return rocker_group_tbl_do(rocker_port, flags, entry); |
@@ -2674,8 +2697,7 @@ static int rocker_port_vlan_flood_group(struct rocker_port *rocker_port, | |||
2674 | continue; | 2697 | continue; |
2675 | if (test_bit(ntohs(vlan_id), p->vlan_bitmap)) { | 2698 | if (test_bit(ntohs(vlan_id), p->vlan_bitmap)) { |
2676 | group_ids[group_count++] = | 2699 | group_ids[group_count++] = |
2677 | ROCKER_GROUP_L2_INTERFACE(vlan_id, | 2700 | ROCKER_GROUP_L2_INTERFACE(vlan_id, p->pport); |
2678 | p->lport); | ||
2679 | } | 2701 | } |
2680 | } | 2702 | } |
2681 | 2703 | ||
@@ -2700,7 +2722,7 @@ static int rocker_port_vlan_l2_groups(struct rocker_port *rocker_port, | |||
2700 | struct rocker *rocker = rocker_port->rocker; | 2722 | struct rocker *rocker = rocker_port->rocker; |
2701 | struct rocker_port *p; | 2723 | struct rocker_port *p; |
2702 | bool adding = !(flags & ROCKER_OP_FLAG_REMOVE); | 2724 | bool adding = !(flags & ROCKER_OP_FLAG_REMOVE); |
2703 | u32 out_lport; | 2725 | u32 out_pport; |
2704 | int ref = 0; | 2726 | int ref = 0; |
2705 | int err; | 2727 | int err; |
2706 | int i; | 2728 | int i; |
@@ -2711,14 +2733,14 @@ static int rocker_port_vlan_l2_groups(struct rocker_port *rocker_port, | |||
2711 | 2733 | ||
2712 | if (rocker_port->stp_state == BR_STATE_LEARNING || | 2734 | if (rocker_port->stp_state == BR_STATE_LEARNING || |
2713 | rocker_port->stp_state == BR_STATE_FORWARDING) { | 2735 | rocker_port->stp_state == BR_STATE_FORWARDING) { |
2714 | out_lport = rocker_port->lport; | 2736 | out_pport = rocker_port->pport; |
2715 | err = rocker_group_l2_interface(rocker_port, flags, | 2737 | err = rocker_group_l2_interface(rocker_port, flags, |
2716 | vlan_id, out_lport, | 2738 | vlan_id, out_pport, |
2717 | pop_vlan); | 2739 | pop_vlan); |
2718 | if (err) { | 2740 | if (err) { |
2719 | netdev_err(rocker_port->dev, | 2741 | netdev_err(rocker_port->dev, |
2720 | "Error (%d) port VLAN l2 group for lport %d\n", | 2742 | "Error (%d) port VLAN l2 group for pport %d\n", |
2721 | err, out_lport); | 2743 | err, out_pport); |
2722 | return err; | 2744 | return err; |
2723 | } | 2745 | } |
2724 | } | 2746 | } |
@@ -2737,9 +2759,9 @@ static int rocker_port_vlan_l2_groups(struct rocker_port *rocker_port, | |||
2737 | if ((!adding || ref != 1) && (adding || ref != 0)) | 2759 | if ((!adding || ref != 1) && (adding || ref != 0)) |
2738 | return 0; | 2760 | return 0; |
2739 | 2761 | ||
2740 | out_lport = 0; | 2762 | out_pport = 0; |
2741 | err = rocker_group_l2_interface(rocker_port, flags, | 2763 | err = rocker_group_l2_interface(rocker_port, flags, |
2742 | vlan_id, out_lport, | 2764 | vlan_id, out_pport, |
2743 | pop_vlan); | 2765 | pop_vlan); |
2744 | if (err) { | 2766 | if (err) { |
2745 | netdev_err(rocker_port->dev, | 2767 | netdev_err(rocker_port->dev, |
@@ -2799,9 +2821,9 @@ static int rocker_port_ctrl_vlan_acl(struct rocker_port *rocker_port, | |||
2799 | int flags, struct rocker_ctrl *ctrl, | 2821 | int flags, struct rocker_ctrl *ctrl, |
2800 | __be16 vlan_id) | 2822 | __be16 vlan_id) |
2801 | { | 2823 | { |
2802 | u32 in_lport = rocker_port->lport; | 2824 | u32 in_pport = rocker_port->pport; |
2803 | u32 in_lport_mask = 0xffffffff; | 2825 | u32 in_pport_mask = 0xffffffff; |
2804 | u32 out_lport = 0; | 2826 | u32 out_pport = 0; |
2805 | u8 *eth_src = NULL; | 2827 | u8 *eth_src = NULL; |
2806 | u8 *eth_src_mask = NULL; | 2828 | u8 *eth_src_mask = NULL; |
2807 | __be16 vlan_id_mask = htons(0xffff); | 2829 | __be16 vlan_id_mask = htons(0xffff); |
@@ -2809,11 +2831,11 @@ static int rocker_port_ctrl_vlan_acl(struct rocker_port *rocker_port, | |||
2809 | u8 ip_proto_mask = 0; | 2831 | u8 ip_proto_mask = 0; |
2810 | u8 ip_tos = 0; | 2832 | u8 ip_tos = 0; |
2811 | u8 ip_tos_mask = 0; | 2833 | u8 ip_tos_mask = 0; |
2812 | u32 group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_lport); | 2834 | u32 group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport); |
2813 | int err; | 2835 | int err; |
2814 | 2836 | ||
2815 | err = rocker_flow_tbl_acl(rocker_port, flags, | 2837 | err = rocker_flow_tbl_acl(rocker_port, flags, |
2816 | in_lport, in_lport_mask, | 2838 | in_pport, in_pport_mask, |
2817 | eth_src, eth_src_mask, | 2839 | eth_src, eth_src_mask, |
2818 | ctrl->eth_dst, ctrl->eth_dst_mask, | 2840 | ctrl->eth_dst, ctrl->eth_dst_mask, |
2819 | ctrl->eth_type, | 2841 | ctrl->eth_type, |
@@ -2856,7 +2878,7 @@ static int rocker_port_ctrl_vlan_term(struct rocker_port *rocker_port, | |||
2856 | int flags, struct rocker_ctrl *ctrl, | 2878 | int flags, struct rocker_ctrl *ctrl, |
2857 | __be16 vlan_id) | 2879 | __be16 vlan_id) |
2858 | { | 2880 | { |
2859 | u32 in_lport_mask = 0xffffffff; | 2881 | u32 in_pport_mask = 0xffffffff; |
2860 | __be16 vlan_id_mask = htons(0xffff); | 2882 | __be16 vlan_id_mask = htons(0xffff); |
2861 | int err; | 2883 | int err; |
2862 | 2884 | ||
@@ -2864,7 +2886,7 @@ static int rocker_port_ctrl_vlan_term(struct rocker_port *rocker_port, | |||
2864 | vlan_id = rocker_port->internal_vlan_id; | 2886 | vlan_id = rocker_port->internal_vlan_id; |
2865 | 2887 | ||
2866 | err = rocker_flow_tbl_term_mac(rocker_port, | 2888 | err = rocker_flow_tbl_term_mac(rocker_port, |
2867 | rocker_port->lport, in_lport_mask, | 2889 | rocker_port->pport, in_pport_mask, |
2868 | ctrl->eth_type, ctrl->eth_dst, | 2890 | ctrl->eth_type, ctrl->eth_dst, |
2869 | ctrl->eth_dst_mask, vlan_id, | 2891 | ctrl->eth_dst_mask, vlan_id, |
2870 | vlan_id_mask, ctrl->copy_to_cpu, | 2892 | vlan_id_mask, ctrl->copy_to_cpu, |
@@ -2934,7 +2956,7 @@ static int rocker_port_vlan(struct rocker_port *rocker_port, int flags, | |||
2934 | { | 2956 | { |
2935 | enum rocker_of_dpa_table_id goto_tbl = | 2957 | enum rocker_of_dpa_table_id goto_tbl = |
2936 | ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC; | 2958 | ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC; |
2937 | u32 in_lport = rocker_port->lport; | 2959 | u32 in_pport = rocker_port->pport; |
2938 | __be16 vlan_id = htons(vid); | 2960 | __be16 vlan_id = htons(vid); |
2939 | __be16 vlan_id_mask = htons(0xffff); | 2961 | __be16 vlan_id_mask = htons(0xffff); |
2940 | __be16 internal_vlan_id; | 2962 | __be16 internal_vlan_id; |
@@ -2978,7 +3000,7 @@ static int rocker_port_vlan(struct rocker_port *rocker_port, int flags, | |||
2978 | } | 3000 | } |
2979 | 3001 | ||
2980 | err = rocker_flow_tbl_vlan(rocker_port, flags, | 3002 | err = rocker_flow_tbl_vlan(rocker_port, flags, |
2981 | in_lport, vlan_id, vlan_id_mask, | 3003 | in_pport, vlan_id, vlan_id_mask, |
2982 | goto_tbl, untagged, internal_vlan_id); | 3004 | goto_tbl, untagged, internal_vlan_id); |
2983 | if (err) | 3005 | if (err) |
2984 | netdev_err(rocker_port->dev, | 3006 | netdev_err(rocker_port->dev, |
@@ -2990,20 +3012,20 @@ static int rocker_port_vlan(struct rocker_port *rocker_port, int flags, | |||
2990 | static int rocker_port_ig_tbl(struct rocker_port *rocker_port, int flags) | 3012 | static int rocker_port_ig_tbl(struct rocker_port *rocker_port, int flags) |
2991 | { | 3013 | { |
2992 | enum rocker_of_dpa_table_id goto_tbl; | 3014 | enum rocker_of_dpa_table_id goto_tbl; |
2993 | u32 in_lport; | 3015 | u32 in_pport; |
2994 | u32 in_lport_mask; | 3016 | u32 in_pport_mask; |
2995 | int err; | 3017 | int err; |
2996 | 3018 | ||
2997 | /* Normal Ethernet Frames. Matches pkts from any local physical | 3019 | /* Normal Ethernet Frames. Matches pkts from any local physical |
2998 | * ports. Goto VLAN tbl. | 3020 | * ports. Goto VLAN tbl. |
2999 | */ | 3021 | */ |
3000 | 3022 | ||
3001 | in_lport = 0; | 3023 | in_pport = 0; |
3002 | in_lport_mask = 0xffff0000; | 3024 | in_pport_mask = 0xffff0000; |
3003 | goto_tbl = ROCKER_OF_DPA_TABLE_ID_VLAN; | 3025 | goto_tbl = ROCKER_OF_DPA_TABLE_ID_VLAN; |
3004 | 3026 | ||
3005 | err = rocker_flow_tbl_ig_port(rocker_port, flags, | 3027 | err = rocker_flow_tbl_ig_port(rocker_port, flags, |
3006 | in_lport, in_lport_mask, | 3028 | in_pport, in_pport_mask, |
3007 | goto_tbl); | 3029 | goto_tbl); |
3008 | if (err) | 3030 | if (err) |
3009 | netdev_err(rocker_port->dev, | 3031 | netdev_err(rocker_port->dev, |
@@ -3047,7 +3069,7 @@ static int rocker_port_fdb_learn(struct rocker_port *rocker_port, | |||
3047 | struct rocker_fdb_learn_work *lw; | 3069 | struct rocker_fdb_learn_work *lw; |
3048 | enum rocker_of_dpa_table_id goto_tbl = | 3070 | enum rocker_of_dpa_table_id goto_tbl = |
3049 | ROCKER_OF_DPA_TABLE_ID_ACL_POLICY; | 3071 | ROCKER_OF_DPA_TABLE_ID_ACL_POLICY; |
3050 | u32 out_lport = rocker_port->lport; | 3072 | u32 out_pport = rocker_port->pport; |
3051 | u32 tunnel_id = 0; | 3073 | u32 tunnel_id = 0; |
3052 | u32 group_id = ROCKER_GROUP_NONE; | 3074 | u32 group_id = ROCKER_GROUP_NONE; |
3053 | bool syncing = !!(rocker_port->brport_flags & BR_LEARNING_SYNC); | 3075 | bool syncing = !!(rocker_port->brport_flags & BR_LEARNING_SYNC); |
@@ -3055,7 +3077,7 @@ static int rocker_port_fdb_learn(struct rocker_port *rocker_port, | |||
3055 | int err; | 3077 | int err; |
3056 | 3078 | ||
3057 | if (rocker_port_is_bridged(rocker_port)) | 3079 | if (rocker_port_is_bridged(rocker_port)) |
3058 | group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_lport); | 3080 | group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport); |
3059 | 3081 | ||
3060 | if (!(flags & ROCKER_OP_FLAG_REFRESH)) { | 3082 | if (!(flags & ROCKER_OP_FLAG_REFRESH)) { |
3061 | err = rocker_flow_tbl_bridge(rocker_port, flags, addr, NULL, | 3083 | err = rocker_flow_tbl_bridge(rocker_port, flags, addr, NULL, |
@@ -3114,7 +3136,7 @@ static int rocker_port_fdb(struct rocker_port *rocker_port, | |||
3114 | return -ENOMEM; | 3136 | return -ENOMEM; |
3115 | 3137 | ||
3116 | fdb->learned = (flags & ROCKER_OP_FLAG_LEARNED); | 3138 | fdb->learned = (flags & ROCKER_OP_FLAG_LEARNED); |
3117 | fdb->key.lport = rocker_port->lport; | 3139 | fdb->key.pport = rocker_port->pport; |
3118 | ether_addr_copy(fdb->key.addr, addr); | 3140 | ether_addr_copy(fdb->key.addr, addr); |
3119 | fdb->key.vlan_id = vlan_id; | 3141 | fdb->key.vlan_id = vlan_id; |
3120 | fdb->key_crc32 = crc32(~0, &fdb->key, sizeof(fdb->key)); | 3142 | fdb->key_crc32 = crc32(~0, &fdb->key, sizeof(fdb->key)); |
@@ -3161,7 +3183,7 @@ static int rocker_port_fdb_flush(struct rocker_port *rocker_port) | |||
3161 | spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags); | 3183 | spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags); |
3162 | 3184 | ||
3163 | hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, found, entry) { | 3185 | hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, found, entry) { |
3164 | if (found->key.lport != rocker_port->lport) | 3186 | if (found->key.pport != rocker_port->pport) |
3165 | continue; | 3187 | continue; |
3166 | if (!found->learned) | 3188 | if (!found->learned) |
3167 | continue; | 3189 | continue; |
@@ -3182,7 +3204,7 @@ err_out: | |||
3182 | static int rocker_port_router_mac(struct rocker_port *rocker_port, | 3204 | static int rocker_port_router_mac(struct rocker_port *rocker_port, |
3183 | int flags, __be16 vlan_id) | 3205 | int flags, __be16 vlan_id) |
3184 | { | 3206 | { |
3185 | u32 in_lport_mask = 0xffffffff; | 3207 | u32 in_pport_mask = 0xffffffff; |
3186 | __be16 eth_type; | 3208 | __be16 eth_type; |
3187 | const u8 *dst_mac_mask = ff_mac; | 3209 | const u8 *dst_mac_mask = ff_mac; |
3188 | __be16 vlan_id_mask = htons(0xffff); | 3210 | __be16 vlan_id_mask = htons(0xffff); |
@@ -3194,7 +3216,7 @@ static int rocker_port_router_mac(struct rocker_port *rocker_port, | |||
3194 | 3216 | ||
3195 | eth_type = htons(ETH_P_IP); | 3217 | eth_type = htons(ETH_P_IP); |
3196 | err = rocker_flow_tbl_term_mac(rocker_port, | 3218 | err = rocker_flow_tbl_term_mac(rocker_port, |
3197 | rocker_port->lport, in_lport_mask, | 3219 | rocker_port->pport, in_pport_mask, |
3198 | eth_type, rocker_port->dev->dev_addr, | 3220 | eth_type, rocker_port->dev->dev_addr, |
3199 | dst_mac_mask, vlan_id, vlan_id_mask, | 3221 | dst_mac_mask, vlan_id, vlan_id_mask, |
3200 | copy_to_cpu, flags); | 3222 | copy_to_cpu, flags); |
@@ -3203,7 +3225,7 @@ static int rocker_port_router_mac(struct rocker_port *rocker_port, | |||
3203 | 3225 | ||
3204 | eth_type = htons(ETH_P_IPV6); | 3226 | eth_type = htons(ETH_P_IPV6); |
3205 | err = rocker_flow_tbl_term_mac(rocker_port, | 3227 | err = rocker_flow_tbl_term_mac(rocker_port, |
3206 | rocker_port->lport, in_lport_mask, | 3228 | rocker_port->pport, in_pport_mask, |
3207 | eth_type, rocker_port->dev->dev_addr, | 3229 | eth_type, rocker_port->dev->dev_addr, |
3208 | dst_mac_mask, vlan_id, vlan_id_mask, | 3230 | dst_mac_mask, vlan_id, vlan_id_mask, |
3209 | copy_to_cpu, flags); | 3231 | copy_to_cpu, flags); |
@@ -3214,7 +3236,7 @@ static int rocker_port_router_mac(struct rocker_port *rocker_port, | |||
3214 | static int rocker_port_fwding(struct rocker_port *rocker_port) | 3236 | static int rocker_port_fwding(struct rocker_port *rocker_port) |
3215 | { | 3237 | { |
3216 | bool pop_vlan; | 3238 | bool pop_vlan; |
3217 | u32 out_lport; | 3239 | u32 out_pport; |
3218 | __be16 vlan_id; | 3240 | __be16 vlan_id; |
3219 | u16 vid; | 3241 | u16 vid; |
3220 | int flags = ROCKER_OP_FLAG_NOWAIT; | 3242 | int flags = ROCKER_OP_FLAG_NOWAIT; |
@@ -3231,19 +3253,19 @@ static int rocker_port_fwding(struct rocker_port *rocker_port) | |||
3231 | rocker_port->stp_state != BR_STATE_FORWARDING) | 3253 | rocker_port->stp_state != BR_STATE_FORWARDING) |
3232 | flags |= ROCKER_OP_FLAG_REMOVE; | 3254 | flags |= ROCKER_OP_FLAG_REMOVE; |
3233 | 3255 | ||
3234 | out_lport = rocker_port->lport; | 3256 | out_pport = rocker_port->pport; |
3235 | for (vid = 1; vid < VLAN_N_VID; vid++) { | 3257 | for (vid = 1; vid < VLAN_N_VID; vid++) { |
3236 | if (!test_bit(vid, rocker_port->vlan_bitmap)) | 3258 | if (!test_bit(vid, rocker_port->vlan_bitmap)) |
3237 | continue; | 3259 | continue; |
3238 | vlan_id = htons(vid); | 3260 | vlan_id = htons(vid); |
3239 | pop_vlan = rocker_vlan_id_is_internal(vlan_id); | 3261 | pop_vlan = rocker_vlan_id_is_internal(vlan_id); |
3240 | err = rocker_group_l2_interface(rocker_port, flags, | 3262 | err = rocker_group_l2_interface(rocker_port, flags, |
3241 | vlan_id, out_lport, | 3263 | vlan_id, out_pport, |
3242 | pop_vlan); | 3264 | pop_vlan); |
3243 | if (err) { | 3265 | if (err) { |
3244 | netdev_err(rocker_port->dev, | 3266 | netdev_err(rocker_port->dev, |
3245 | "Error (%d) port VLAN l2 group for lport %d\n", | 3267 | "Error (%d) port VLAN l2 group for pport %d\n", |
3246 | err, out_lport); | 3268 | err, out_pport); |
3247 | return err; | 3269 | return err; |
3248 | } | 3270 | } |
3249 | } | 3271 | } |
@@ -3302,6 +3324,26 @@ static int rocker_port_stp_update(struct rocker_port *rocker_port, u8 state) | |||
3302 | return rocker_port_fwding(rocker_port); | 3324 | return rocker_port_fwding(rocker_port); |
3303 | } | 3325 | } |
3304 | 3326 | ||
3327 | static int rocker_port_fwd_enable(struct rocker_port *rocker_port) | ||
3328 | { | ||
3329 | if (rocker_port_is_bridged(rocker_port)) | ||
3330 | /* bridge STP will enable port */ | ||
3331 | return 0; | ||
3332 | |||
3333 | /* port is not bridged, so simulate going to FORWARDING state */ | ||
3334 | return rocker_port_stp_update(rocker_port, BR_STATE_FORWARDING); | ||
3335 | } | ||
3336 | |||
3337 | static int rocker_port_fwd_disable(struct rocker_port *rocker_port) | ||
3338 | { | ||
3339 | if (rocker_port_is_bridged(rocker_port)) | ||
3340 | /* bridge STP will disable port */ | ||
3341 | return 0; | ||
3342 | |||
3343 | /* port is not bridged, so simulate going to DISABLED state */ | ||
3344 | return rocker_port_stp_update(rocker_port, BR_STATE_DISABLED); | ||
3345 | } | ||
3346 | |||
3305 | static struct rocker_internal_vlan_tbl_entry * | 3347 | static struct rocker_internal_vlan_tbl_entry * |
3306 | rocker_internal_vlan_tbl_find(struct rocker *rocker, int ifindex) | 3348 | rocker_internal_vlan_tbl_find(struct rocker *rocker, int ifindex) |
3307 | { | 3349 | { |
@@ -3394,8 +3436,6 @@ not_found: | |||
3394 | static int rocker_port_open(struct net_device *dev) | 3436 | static int rocker_port_open(struct net_device *dev) |
3395 | { | 3437 | { |
3396 | struct rocker_port *rocker_port = netdev_priv(dev); | 3438 | struct rocker_port *rocker_port = netdev_priv(dev); |
3397 | u8 stp_state = rocker_port_is_bridged(rocker_port) ? | ||
3398 | BR_STATE_BLOCKING : BR_STATE_FORWARDING; | ||
3399 | int err; | 3439 | int err; |
3400 | 3440 | ||
3401 | err = rocker_port_dma_rings_init(rocker_port); | 3441 | err = rocker_port_dma_rings_init(rocker_port); |
@@ -3418,9 +3458,9 @@ static int rocker_port_open(struct net_device *dev) | |||
3418 | goto err_request_rx_irq; | 3458 | goto err_request_rx_irq; |
3419 | } | 3459 | } |
3420 | 3460 | ||
3421 | err = rocker_port_stp_update(rocker_port, stp_state); | 3461 | err = rocker_port_fwd_enable(rocker_port); |
3422 | if (err) | 3462 | if (err) |
3423 | goto err_stp_update; | 3463 | goto err_fwd_enable; |
3424 | 3464 | ||
3425 | napi_enable(&rocker_port->napi_tx); | 3465 | napi_enable(&rocker_port->napi_tx); |
3426 | napi_enable(&rocker_port->napi_rx); | 3466 | napi_enable(&rocker_port->napi_rx); |
@@ -3428,7 +3468,7 @@ static int rocker_port_open(struct net_device *dev) | |||
3428 | netif_start_queue(dev); | 3468 | netif_start_queue(dev); |
3429 | return 0; | 3469 | return 0; |
3430 | 3470 | ||
3431 | err_stp_update: | 3471 | err_fwd_enable: |
3432 | free_irq(rocker_msix_rx_vector(rocker_port), rocker_port); | 3472 | free_irq(rocker_msix_rx_vector(rocker_port), rocker_port); |
3433 | err_request_rx_irq: | 3473 | err_request_rx_irq: |
3434 | free_irq(rocker_msix_tx_vector(rocker_port), rocker_port); | 3474 | free_irq(rocker_msix_tx_vector(rocker_port), rocker_port); |
@@ -3445,7 +3485,7 @@ static int rocker_port_stop(struct net_device *dev) | |||
3445 | rocker_port_set_enable(rocker_port, false); | 3485 | rocker_port_set_enable(rocker_port, false); |
3446 | napi_disable(&rocker_port->napi_rx); | 3486 | napi_disable(&rocker_port->napi_rx); |
3447 | napi_disable(&rocker_port->napi_tx); | 3487 | napi_disable(&rocker_port->napi_tx); |
3448 | rocker_port_stp_update(rocker_port, BR_STATE_DISABLED); | 3488 | rocker_port_fwd_disable(rocker_port); |
3449 | free_irq(rocker_msix_rx_vector(rocker_port), rocker_port); | 3489 | free_irq(rocker_msix_rx_vector(rocker_port), rocker_port); |
3450 | free_irq(rocker_msix_tx_vector(rocker_port), rocker_port); | 3490 | free_irq(rocker_msix_tx_vector(rocker_port), rocker_port); |
3451 | rocker_port_dma_rings_fini(rocker_port); | 3491 | rocker_port_dma_rings_fini(rocker_port); |
@@ -3702,7 +3742,7 @@ static int rocker_port_fdb_dump(struct sk_buff *skb, | |||
3702 | 3742 | ||
3703 | spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags); | 3743 | spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags); |
3704 | hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, found, entry) { | 3744 | hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, found, entry) { |
3705 | if (found->key.lport != rocker_port->lport) | 3745 | if (found->key.pport != rocker_port->pport) |
3706 | continue; | 3746 | continue; |
3707 | if (idx < cb->args[0]) | 3747 | if (idx < cb->args[0]) |
3708 | goto skip; | 3748 | goto skip; |
@@ -3882,8 +3922,8 @@ rocker_cmd_get_port_stats_prep(struct rocker *rocker, | |||
3882 | if (!cmd_stats) | 3922 | if (!cmd_stats) |
3883 | return -EMSGSIZE; | 3923 | return -EMSGSIZE; |
3884 | 3924 | ||
3885 | if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_STATS_LPORT, | 3925 | if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_STATS_PPORT, |
3886 | rocker_port->lport)) | 3926 | rocker_port->pport)) |
3887 | return -EMSGSIZE; | 3927 | return -EMSGSIZE; |
3888 | 3928 | ||
3889 | rocker_tlv_nest_end(desc_info, cmd_stats); | 3929 | rocker_tlv_nest_end(desc_info, cmd_stats); |
@@ -3900,7 +3940,7 @@ rocker_cmd_get_port_stats_ethtool_proc(struct rocker *rocker, | |||
3900 | struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1]; | 3940 | struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1]; |
3901 | struct rocker_tlv *stats_attrs[ROCKER_TLV_CMD_PORT_STATS_MAX + 1]; | 3941 | struct rocker_tlv *stats_attrs[ROCKER_TLV_CMD_PORT_STATS_MAX + 1]; |
3902 | struct rocker_tlv *pattr; | 3942 | struct rocker_tlv *pattr; |
3903 | u32 lport; | 3943 | u32 pport; |
3904 | u64 *data = priv; | 3944 | u64 *data = priv; |
3905 | int i; | 3945 | int i; |
3906 | 3946 | ||
@@ -3912,11 +3952,11 @@ rocker_cmd_get_port_stats_ethtool_proc(struct rocker *rocker, | |||
3912 | rocker_tlv_parse_nested(stats_attrs, ROCKER_TLV_CMD_PORT_STATS_MAX, | 3952 | rocker_tlv_parse_nested(stats_attrs, ROCKER_TLV_CMD_PORT_STATS_MAX, |
3913 | attrs[ROCKER_TLV_CMD_INFO]); | 3953 | attrs[ROCKER_TLV_CMD_INFO]); |
3914 | 3954 | ||
3915 | if (!stats_attrs[ROCKER_TLV_CMD_PORT_STATS_LPORT]) | 3955 | if (!stats_attrs[ROCKER_TLV_CMD_PORT_STATS_PPORT]) |
3916 | return -EIO; | 3956 | return -EIO; |
3917 | 3957 | ||
3918 | lport = rocker_tlv_get_u32(stats_attrs[ROCKER_TLV_CMD_PORT_STATS_LPORT]); | 3958 | pport = rocker_tlv_get_u32(stats_attrs[ROCKER_TLV_CMD_PORT_STATS_PPORT]); |
3919 | if (lport != rocker_port->lport) | 3959 | if (pport != rocker_port->pport) |
3920 | return -EIO; | 3960 | return -EIO; |
3921 | 3961 | ||
3922 | for (i = 0; i < ARRAY_SIZE(rocker_port_stats); i++) { | 3962 | for (i = 0; i < ARRAY_SIZE(rocker_port_stats); i++) { |
@@ -4104,7 +4144,7 @@ static void rocker_carrier_init(struct rocker_port *rocker_port) | |||
4104 | u64 link_status = rocker_read64(rocker, PORT_PHYS_LINK_STATUS); | 4144 | u64 link_status = rocker_read64(rocker, PORT_PHYS_LINK_STATUS); |
4105 | bool link_up; | 4145 | bool link_up; |
4106 | 4146 | ||
4107 | link_up = link_status & (1 << rocker_port->lport); | 4147 | link_up = link_status & (1 << rocker_port->pport); |
4108 | if (link_up) | 4148 | if (link_up) |
4109 | netif_carrier_on(rocker_port->dev); | 4149 | netif_carrier_on(rocker_port->dev); |
4110 | else | 4150 | else |
@@ -4152,7 +4192,7 @@ static int rocker_probe_port(struct rocker *rocker, unsigned int port_number) | |||
4152 | rocker_port->dev = dev; | 4192 | rocker_port->dev = dev; |
4153 | rocker_port->rocker = rocker; | 4193 | rocker_port->rocker = rocker; |
4154 | rocker_port->port_number = port_number; | 4194 | rocker_port->port_number = port_number; |
4155 | rocker_port->lport = port_number + 1; | 4195 | rocker_port->pport = port_number + 1; |
4156 | rocker_port->brport_flags = BR_LEARNING | BR_LEARNING_SYNC; | 4196 | rocker_port->brport_flags = BR_LEARNING | BR_LEARNING_SYNC; |
4157 | 4197 | ||
4158 | rocker_port_dev_addr_init(rocker, rocker_port); | 4198 | rocker_port_dev_addr_init(rocker, rocker_port); |
@@ -4436,9 +4476,7 @@ static int rocker_port_bridge_join(struct rocker_port *rocker_port, | |||
4436 | rocker_port->internal_vlan_id = | 4476 | rocker_port->internal_vlan_id = |
4437 | rocker_port_internal_vlan_id_get(rocker_port, | 4477 | rocker_port_internal_vlan_id_get(rocker_port, |
4438 | bridge->ifindex); | 4478 | bridge->ifindex); |
4439 | err = rocker_port_vlan(rocker_port, 0, 0); | 4479 | return rocker_port_vlan(rocker_port, 0, 0); |
4440 | |||
4441 | return err; | ||
4442 | } | 4480 | } |
4443 | 4481 | ||
4444 | static int rocker_port_bridge_leave(struct rocker_port *rocker_port) | 4482 | static int rocker_port_bridge_leave(struct rocker_port *rocker_port) |
@@ -4458,6 +4496,11 @@ static int rocker_port_bridge_leave(struct rocker_port *rocker_port) | |||
4458 | rocker_port_internal_vlan_id_get(rocker_port, | 4496 | rocker_port_internal_vlan_id_get(rocker_port, |
4459 | rocker_port->dev->ifindex); | 4497 | rocker_port->dev->ifindex); |
4460 | err = rocker_port_vlan(rocker_port, 0, 0); | 4498 | err = rocker_port_vlan(rocker_port, 0, 0); |
4499 | if (err) | ||
4500 | return err; | ||
4501 | |||
4502 | if (rocker_port->dev->flags & IFF_UP) | ||
4503 | err = rocker_port_fwd_enable(rocker_port); | ||
4461 | 4504 | ||
4462 | return err; | 4505 | return err; |
4463 | } | 4506 | } |
diff --git a/drivers/net/ethernet/rocker/rocker.h b/drivers/net/ethernet/rocker/rocker.h index a5bc432feada..0a94b7c300be 100644 --- a/drivers/net/ethernet/rocker/rocker.h +++ b/drivers/net/ethernet/rocker/rocker.h | |||
@@ -14,6 +14,19 @@ | |||
14 | 14 | ||
15 | #include <linux/types.h> | 15 | #include <linux/types.h> |
16 | 16 | ||
17 | /* Return codes */ | ||
18 | enum { | ||
19 | ROCKER_OK = 0, | ||
20 | ROCKER_ENOENT = 2, | ||
21 | ROCKER_ENXIO = 6, | ||
22 | ROCKER_ENOMEM = 12, | ||
23 | ROCKER_EEXIST = 17, | ||
24 | ROCKER_EINVAL = 22, | ||
25 | ROCKER_EMSGSIZE = 90, | ||
26 | ROCKER_ENOTSUP = 95, | ||
27 | ROCKER_ENOBUFS = 105, | ||
28 | }; | ||
29 | |||
17 | #define PCI_VENDOR_ID_REDHAT 0x1b36 | 30 | #define PCI_VENDOR_ID_REDHAT 0x1b36 |
18 | #define PCI_DEVICE_ID_REDHAT_ROCKER 0x0006 | 31 | #define PCI_DEVICE_ID_REDHAT_ROCKER 0x0006 |
19 | 32 | ||
@@ -136,7 +149,7 @@ enum { | |||
136 | 149 | ||
137 | enum { | 150 | enum { |
138 | ROCKER_TLV_CMD_PORT_SETTINGS_UNSPEC, | 151 | ROCKER_TLV_CMD_PORT_SETTINGS_UNSPEC, |
139 | ROCKER_TLV_CMD_PORT_SETTINGS_LPORT, /* u32 */ | 152 | ROCKER_TLV_CMD_PORT_SETTINGS_PPORT, /* u32 */ |
140 | ROCKER_TLV_CMD_PORT_SETTINGS_SPEED, /* u32 */ | 153 | ROCKER_TLV_CMD_PORT_SETTINGS_SPEED, /* u32 */ |
141 | ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX, /* u8 */ | 154 | ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX, /* u8 */ |
142 | ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG, /* u8 */ | 155 | ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG, /* u8 */ |
@@ -151,7 +164,7 @@ enum { | |||
151 | 164 | ||
152 | enum { | 165 | enum { |
153 | ROCKER_TLV_CMD_PORT_STATS_UNSPEC, | 166 | ROCKER_TLV_CMD_PORT_STATS_UNSPEC, |
154 | ROCKER_TLV_CMD_PORT_STATS_LPORT, /* u32 */ | 167 | ROCKER_TLV_CMD_PORT_STATS_PPORT, /* u32 */ |
155 | 168 | ||
156 | ROCKER_TLV_CMD_PORT_STATS_RX_PKTS, /* u64 */ | 169 | ROCKER_TLV_CMD_PORT_STATS_RX_PKTS, /* u64 */ |
157 | ROCKER_TLV_CMD_PORT_STATS_RX_BYTES, /* u64 */ | 170 | ROCKER_TLV_CMD_PORT_STATS_RX_BYTES, /* u64 */ |
@@ -191,7 +204,7 @@ enum { | |||
191 | 204 | ||
192 | enum { | 205 | enum { |
193 | ROCKER_TLV_EVENT_LINK_CHANGED_UNSPEC, | 206 | ROCKER_TLV_EVENT_LINK_CHANGED_UNSPEC, |
194 | ROCKER_TLV_EVENT_LINK_CHANGED_LPORT, /* u32 */ | 207 | ROCKER_TLV_EVENT_LINK_CHANGED_PPORT, /* u32 */ |
195 | ROCKER_TLV_EVENT_LINK_CHANGED_LINKUP, /* u8 */ | 208 | ROCKER_TLV_EVENT_LINK_CHANGED_LINKUP, /* u8 */ |
196 | 209 | ||
197 | __ROCKER_TLV_EVENT_LINK_CHANGED_MAX, | 210 | __ROCKER_TLV_EVENT_LINK_CHANGED_MAX, |
@@ -201,7 +214,7 @@ enum { | |||
201 | 214 | ||
202 | enum { | 215 | enum { |
203 | ROCKER_TLV_EVENT_MAC_VLAN_UNSPEC, | 216 | ROCKER_TLV_EVENT_MAC_VLAN_UNSPEC, |
204 | ROCKER_TLV_EVENT_MAC_VLAN_LPORT, /* u32 */ | 217 | ROCKER_TLV_EVENT_MAC_VLAN_PPORT, /* u32 */ |
205 | ROCKER_TLV_EVENT_MAC_VLAN_MAC, /* binary */ | 218 | ROCKER_TLV_EVENT_MAC_VLAN_MAC, /* binary */ |
206 | ROCKER_TLV_EVENT_MAC_VLAN_VLAN_ID, /* __be16 */ | 219 | ROCKER_TLV_EVENT_MAC_VLAN_VLAN_ID, /* __be16 */ |
207 | 220 | ||
@@ -275,9 +288,9 @@ enum { | |||
275 | ROCKER_TLV_OF_DPA_HARDTIME, /* u32 */ | 288 | ROCKER_TLV_OF_DPA_HARDTIME, /* u32 */ |
276 | ROCKER_TLV_OF_DPA_IDLETIME, /* u32 */ | 289 | ROCKER_TLV_OF_DPA_IDLETIME, /* u32 */ |
277 | ROCKER_TLV_OF_DPA_COOKIE, /* u64 */ | 290 | ROCKER_TLV_OF_DPA_COOKIE, /* u64 */ |
278 | ROCKER_TLV_OF_DPA_IN_LPORT, /* u32 */ | 291 | ROCKER_TLV_OF_DPA_IN_PPORT, /* u32 */ |
279 | ROCKER_TLV_OF_DPA_IN_LPORT_MASK, /* u32 */ | 292 | ROCKER_TLV_OF_DPA_IN_PPORT_MASK, /* u32 */ |
280 | ROCKER_TLV_OF_DPA_OUT_LPORT, /* u32 */ | 293 | ROCKER_TLV_OF_DPA_OUT_PPORT, /* u32 */ |
281 | ROCKER_TLV_OF_DPA_GOTO_TABLE_ID, /* u16 */ | 294 | ROCKER_TLV_OF_DPA_GOTO_TABLE_ID, /* u16 */ |
282 | ROCKER_TLV_OF_DPA_GROUP_ID, /* u32 */ | 295 | ROCKER_TLV_OF_DPA_GROUP_ID, /* u32 */ |
283 | ROCKER_TLV_OF_DPA_GROUP_ID_LOWER, /* u32 */ | 296 | ROCKER_TLV_OF_DPA_GROUP_ID_LOWER, /* u32 */ |
@@ -291,7 +304,7 @@ enum { | |||
291 | ROCKER_TLV_OF_DPA_NEW_VLAN_ID, /* __be16 */ | 304 | ROCKER_TLV_OF_DPA_NEW_VLAN_ID, /* __be16 */ |
292 | ROCKER_TLV_OF_DPA_NEW_VLAN_PCP, /* u8 */ | 305 | ROCKER_TLV_OF_DPA_NEW_VLAN_PCP, /* u8 */ |
293 | ROCKER_TLV_OF_DPA_TUNNEL_ID, /* u32 */ | 306 | ROCKER_TLV_OF_DPA_TUNNEL_ID, /* u32 */ |
294 | ROCKER_TLV_OF_DPA_TUN_LOG_LPORT, /* u32 */ | 307 | ROCKER_TLV_OF_DPA_TUNNEL_LPORT, /* u32 */ |
295 | ROCKER_TLV_OF_DPA_ETHERTYPE, /* __be16 */ | 308 | ROCKER_TLV_OF_DPA_ETHERTYPE, /* __be16 */ |
296 | ROCKER_TLV_OF_DPA_DST_MAC, /* binary */ | 309 | ROCKER_TLV_OF_DPA_DST_MAC, /* binary */ |
297 | ROCKER_TLV_OF_DPA_DST_MAC_MASK, /* binary */ | 310 | ROCKER_TLV_OF_DPA_DST_MAC_MASK, /* binary */ |
diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c index 2965c6ae7d6e..41047c9143d0 100644 --- a/drivers/net/ethernet/smsc/smsc911x.c +++ b/drivers/net/ethernet/smsc/smsc911x.c | |||
@@ -843,7 +843,7 @@ static int smsc911x_phy_loopbacktest(struct net_device *dev) | |||
843 | unsigned long flags; | 843 | unsigned long flags; |
844 | 844 | ||
845 | /* Initialise tx packet using broadcast destination address */ | 845 | /* Initialise tx packet using broadcast destination address */ |
846 | memset(pdata->loopback_tx_pkt, 0xff, ETH_ALEN); | 846 | eth_broadcast_addr(pdata->loopback_tx_pkt); |
847 | 847 | ||
848 | /* Use incrementing source address */ | 848 | /* Use incrementing source address */ |
849 | for (i = 6; i < 12; i++) | 849 | for (i = 6; i < 12; i++) |
diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c index 22e0cad1b4b5..401abf7254d3 100644 --- a/drivers/net/ethernet/sun/sunvnet.c +++ b/drivers/net/ethernet/sun/sunvnet.c | |||
@@ -1411,6 +1411,8 @@ static int vnet_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1411 | if (unlikely(err < 0)) { | 1411 | if (unlikely(err < 0)) { |
1412 | netdev_info(dev, "TX trigger error %d\n", err); | 1412 | netdev_info(dev, "TX trigger error %d\n", err); |
1413 | d->hdr.state = VIO_DESC_FREE; | 1413 | d->hdr.state = VIO_DESC_FREE; |
1414 | skb = port->tx_bufs[txi].skb; | ||
1415 | port->tx_bufs[txi].skb = NULL; | ||
1414 | dev->stats.tx_carrier_errors++; | 1416 | dev->stats.tx_carrier_errors++; |
1415 | goto out_dropped; | 1417 | goto out_dropped; |
1416 | } | 1418 | } |
diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c index a31a8c3c8e7c..9f14d8b515c7 100644 --- a/drivers/net/ethernet/ti/netcp_core.c +++ b/drivers/net/ethernet/ti/netcp_core.c | |||
@@ -1320,7 +1320,7 @@ static struct netcp_addr *netcp_addr_add(struct netcp_intf *netcp, | |||
1320 | if (addr) | 1320 | if (addr) |
1321 | ether_addr_copy(naddr->addr, addr); | 1321 | ether_addr_copy(naddr->addr, addr); |
1322 | else | 1322 | else |
1323 | memset(naddr->addr, 0, ETH_ALEN); | 1323 | eth_zero_addr(naddr->addr); |
1324 | list_add_tail(&naddr->node, &netcp->addr_list); | 1324 | list_add_tail(&naddr->node, &netcp->addr_list); |
1325 | 1325 | ||
1326 | return naddr; | 1326 | return naddr; |
diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c b/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c index 0a7f2e77557f..13214a6492ac 100644 --- a/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c +++ b/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c | |||
@@ -1167,7 +1167,7 @@ static int gelic_wl_set_ap(struct net_device *netdev, | |||
1167 | } else { | 1167 | } else { |
1168 | pr_debug("%s: clear bssid\n", __func__); | 1168 | pr_debug("%s: clear bssid\n", __func__); |
1169 | clear_bit(GELIC_WL_STAT_BSSID_SET, &wl->stat); | 1169 | clear_bit(GELIC_WL_STAT_BSSID_SET, &wl->stat); |
1170 | memset(wl->bssid, 0, ETH_ALEN); | 1170 | eth_zero_addr(wl->bssid); |
1171 | } | 1171 | } |
1172 | spin_unlock_irqrestore(&wl->lock, irqflag); | 1172 | spin_unlock_irqrestore(&wl->lock, irqflag); |
1173 | pr_debug("%s: ->\n", __func__); | 1173 | pr_debug("%s: ->\n", __func__); |
@@ -1189,7 +1189,7 @@ static int gelic_wl_get_ap(struct net_device *netdev, | |||
1189 | memcpy(data->ap_addr.sa_data, wl->active_bssid, | 1189 | memcpy(data->ap_addr.sa_data, wl->active_bssid, |
1190 | ETH_ALEN); | 1190 | ETH_ALEN); |
1191 | } else | 1191 | } else |
1192 | memset(data->ap_addr.sa_data, 0, ETH_ALEN); | 1192 | eth_zero_addr(data->ap_addr.sa_data); |
1193 | 1193 | ||
1194 | spin_unlock_irqrestore(&wl->lock, irqflag); | 1194 | spin_unlock_irqrestore(&wl->lock, irqflag); |
1195 | mutex_unlock(&wl->assoc_stat_lock); | 1195 | mutex_unlock(&wl->assoc_stat_lock); |
diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c index 17e276651601..8fb807ea1caa 100644 --- a/drivers/net/ethernet/via/via-rhine.c +++ b/drivers/net/ethernet/via/via-rhine.c | |||
@@ -70,12 +70,14 @@ static const int multicast_filter_limit = 32; | |||
70 | /* Operational parameters that are set at compile time. */ | 70 | /* Operational parameters that are set at compile time. */ |
71 | 71 | ||
72 | /* Keep the ring sizes a power of two for compile efficiency. | 72 | /* Keep the ring sizes a power of two for compile efficiency. |
73 | The compiler will convert <unsigned>'%'<2^N> into a bit mask. | 73 | * The compiler will convert <unsigned>'%'<2^N> into a bit mask. |
74 | Making the Tx ring too large decreases the effectiveness of channel | 74 | * Making the Tx ring too large decreases the effectiveness of channel |
75 | bonding and packet priority. | 75 | * bonding and packet priority. |
76 | There are no ill effects from too-large receive rings. */ | 76 | * With BQL support, we can increase TX ring safely. |
77 | #define TX_RING_SIZE 16 | 77 | * There are no ill effects from too-large receive rings. |
78 | #define TX_QUEUE_LEN 10 /* Limit ring entries actually used. */ | 78 | */ |
79 | #define TX_RING_SIZE 64 | ||
80 | #define TX_QUEUE_LEN (TX_RING_SIZE - 6) /* Limit ring entries actually used. */ | ||
79 | #define RX_RING_SIZE 64 | 81 | #define RX_RING_SIZE 64 |
80 | 82 | ||
81 | /* Operational parameters that usually are not changed. */ | 83 | /* Operational parameters that usually are not changed. */ |
@@ -1295,6 +1297,7 @@ static void alloc_tbufs(struct net_device* dev) | |||
1295 | } | 1297 | } |
1296 | rp->tx_ring[i-1].next_desc = cpu_to_le32(rp->tx_ring_dma); | 1298 | rp->tx_ring[i-1].next_desc = cpu_to_le32(rp->tx_ring_dma); |
1297 | 1299 | ||
1300 | netdev_reset_queue(dev); | ||
1298 | } | 1301 | } |
1299 | 1302 | ||
1300 | static void free_tbufs(struct net_device* dev) | 1303 | static void free_tbufs(struct net_device* dev) |
@@ -1795,6 +1798,7 @@ static netdev_tx_t rhine_start_tx(struct sk_buff *skb, | |||
1795 | else | 1798 | else |
1796 | rp->tx_ring[entry].tx_status = 0; | 1799 | rp->tx_ring[entry].tx_status = 0; |
1797 | 1800 | ||
1801 | netdev_sent_queue(dev, skb->len); | ||
1798 | /* lock eth irq */ | 1802 | /* lock eth irq */ |
1799 | wmb(); | 1803 | wmb(); |
1800 | rp->tx_ring[entry].tx_status |= cpu_to_le32(DescOwn); | 1804 | rp->tx_ring[entry].tx_status |= cpu_to_le32(DescOwn); |
@@ -1863,6 +1867,8 @@ static void rhine_tx(struct net_device *dev) | |||
1863 | struct rhine_private *rp = netdev_priv(dev); | 1867 | struct rhine_private *rp = netdev_priv(dev); |
1864 | struct device *hwdev = dev->dev.parent; | 1868 | struct device *hwdev = dev->dev.parent; |
1865 | int txstatus = 0, entry = rp->dirty_tx % TX_RING_SIZE; | 1869 | int txstatus = 0, entry = rp->dirty_tx % TX_RING_SIZE; |
1870 | unsigned int pkts_compl = 0, bytes_compl = 0; | ||
1871 | struct sk_buff *skb; | ||
1866 | 1872 | ||
1867 | /* find and cleanup dirty tx descriptors */ | 1873 | /* find and cleanup dirty tx descriptors */ |
1868 | while (rp->dirty_tx != rp->cur_tx) { | 1874 | while (rp->dirty_tx != rp->cur_tx) { |
@@ -1871,6 +1877,7 @@ static void rhine_tx(struct net_device *dev) | |||
1871 | entry, txstatus); | 1877 | entry, txstatus); |
1872 | if (txstatus & DescOwn) | 1878 | if (txstatus & DescOwn) |
1873 | break; | 1879 | break; |
1880 | skb = rp->tx_skbuff[entry]; | ||
1874 | if (txstatus & 0x8000) { | 1881 | if (txstatus & 0x8000) { |
1875 | netif_dbg(rp, tx_done, dev, | 1882 | netif_dbg(rp, tx_done, dev, |
1876 | "Transmit error, Tx status %08x\n", txstatus); | 1883 | "Transmit error, Tx status %08x\n", txstatus); |
@@ -1899,7 +1906,7 @@ static void rhine_tx(struct net_device *dev) | |||
1899 | (txstatus >> 3) & 0xF, txstatus & 0xF); | 1906 | (txstatus >> 3) & 0xF, txstatus & 0xF); |
1900 | 1907 | ||
1901 | u64_stats_update_begin(&rp->tx_stats.syncp); | 1908 | u64_stats_update_begin(&rp->tx_stats.syncp); |
1902 | rp->tx_stats.bytes += rp->tx_skbuff[entry]->len; | 1909 | rp->tx_stats.bytes += skb->len; |
1903 | rp->tx_stats.packets++; | 1910 | rp->tx_stats.packets++; |
1904 | u64_stats_update_end(&rp->tx_stats.syncp); | 1911 | u64_stats_update_end(&rp->tx_stats.syncp); |
1905 | } | 1912 | } |
@@ -1907,13 +1914,17 @@ static void rhine_tx(struct net_device *dev) | |||
1907 | if (rp->tx_skbuff_dma[entry]) { | 1914 | if (rp->tx_skbuff_dma[entry]) { |
1908 | dma_unmap_single(hwdev, | 1915 | dma_unmap_single(hwdev, |
1909 | rp->tx_skbuff_dma[entry], | 1916 | rp->tx_skbuff_dma[entry], |
1910 | rp->tx_skbuff[entry]->len, | 1917 | skb->len, |
1911 | DMA_TO_DEVICE); | 1918 | DMA_TO_DEVICE); |
1912 | } | 1919 | } |
1913 | dev_consume_skb_any(rp->tx_skbuff[entry]); | 1920 | bytes_compl += skb->len; |
1921 | pkts_compl++; | ||
1922 | dev_consume_skb_any(skb); | ||
1914 | rp->tx_skbuff[entry] = NULL; | 1923 | rp->tx_skbuff[entry] = NULL; |
1915 | entry = (++rp->dirty_tx) % TX_RING_SIZE; | 1924 | entry = (++rp->dirty_tx) % TX_RING_SIZE; |
1916 | } | 1925 | } |
1926 | |||
1927 | netdev_completed_queue(dev, pkts_compl, bytes_compl); | ||
1917 | if ((rp->cur_tx - rp->dirty_tx) < TX_QUEUE_LEN - 4) | 1928 | if ((rp->cur_tx - rp->dirty_tx) < TX_QUEUE_LEN - 4) |
1918 | netif_wake_queue(dev); | 1929 | netif_wake_queue(dev); |
1919 | } | 1930 | } |
diff --git a/drivers/net/ethernet/xscale/ixp4xx_eth.c b/drivers/net/ethernet/xscale/ixp4xx_eth.c index 9e16a2819d48..5138407941cf 100644 --- a/drivers/net/ethernet/xscale/ixp4xx_eth.c +++ b/drivers/net/ethernet/xscale/ixp4xx_eth.c | |||
@@ -954,7 +954,7 @@ static void eth_set_mcast_list(struct net_device *dev) | |||
954 | return; | 954 | return; |
955 | } | 955 | } |
956 | 956 | ||
957 | memset(diffs, 0, ETH_ALEN); | 957 | eth_zero_addr(diffs); |
958 | 958 | ||
959 | addr = NULL; | 959 | addr = NULL; |
960 | netdev_for_each_mc_addr(ha, dev) { | 960 | netdev_for_each_mc_addr(ha, dev) { |
diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c index daca0dee88f3..7c4a4151ef0f 100644 --- a/drivers/net/hamradio/6pack.c +++ b/drivers/net/hamradio/6pack.c | |||
@@ -247,6 +247,9 @@ static netdev_tx_t sp_xmit(struct sk_buff *skb, struct net_device *dev) | |||
247 | { | 247 | { |
248 | struct sixpack *sp = netdev_priv(dev); | 248 | struct sixpack *sp = netdev_priv(dev); |
249 | 249 | ||
250 | if (skb->protocol == htons(ETH_P_IP)) | ||
251 | return ax25_ip_xmit(skb); | ||
252 | |||
250 | spin_lock_bh(&sp->lock); | 253 | spin_lock_bh(&sp->lock); |
251 | /* We were not busy, so we are now... :-) */ | 254 | /* We were not busy, so we are now... :-) */ |
252 | netif_stop_queue(dev); | 255 | netif_stop_queue(dev); |
@@ -284,18 +287,6 @@ static int sp_close(struct net_device *dev) | |||
284 | return 0; | 287 | return 0; |
285 | } | 288 | } |
286 | 289 | ||
287 | /* Return the frame type ID */ | ||
288 | static int sp_header(struct sk_buff *skb, struct net_device *dev, | ||
289 | unsigned short type, const void *daddr, | ||
290 | const void *saddr, unsigned len) | ||
291 | { | ||
292 | #ifdef CONFIG_INET | ||
293 | if (type != ETH_P_AX25) | ||
294 | return ax25_hard_header(skb, dev, type, daddr, saddr, len); | ||
295 | #endif | ||
296 | return 0; | ||
297 | } | ||
298 | |||
299 | static int sp_set_mac_address(struct net_device *dev, void *addr) | 290 | static int sp_set_mac_address(struct net_device *dev, void *addr) |
300 | { | 291 | { |
301 | struct sockaddr_ax25 *sa = addr; | 292 | struct sockaddr_ax25 *sa = addr; |
@@ -309,20 +300,6 @@ static int sp_set_mac_address(struct net_device *dev, void *addr) | |||
309 | return 0; | 300 | return 0; |
310 | } | 301 | } |
311 | 302 | ||
312 | static int sp_rebuild_header(struct sk_buff *skb) | ||
313 | { | ||
314 | #ifdef CONFIG_INET | ||
315 | return ax25_rebuild_header(skb); | ||
316 | #else | ||
317 | return 0; | ||
318 | #endif | ||
319 | } | ||
320 | |||
321 | static const struct header_ops sp_header_ops = { | ||
322 | .create = sp_header, | ||
323 | .rebuild = sp_rebuild_header, | ||
324 | }; | ||
325 | |||
326 | static const struct net_device_ops sp_netdev_ops = { | 303 | static const struct net_device_ops sp_netdev_ops = { |
327 | .ndo_open = sp_open_dev, | 304 | .ndo_open = sp_open_dev, |
328 | .ndo_stop = sp_close, | 305 | .ndo_stop = sp_close, |
@@ -337,7 +314,7 @@ static void sp_setup(struct net_device *dev) | |||
337 | dev->destructor = free_netdev; | 314 | dev->destructor = free_netdev; |
338 | dev->mtu = SIXP_MTU; | 315 | dev->mtu = SIXP_MTU; |
339 | dev->hard_header_len = AX25_MAX_HEADER_LEN; | 316 | dev->hard_header_len = AX25_MAX_HEADER_LEN; |
340 | dev->header_ops = &sp_header_ops; | 317 | dev->header_ops = &ax25_header_ops; |
341 | 318 | ||
342 | dev->addr_len = AX25_ADDR_LEN; | 319 | dev->addr_len = AX25_ADDR_LEN; |
343 | dev->type = ARPHRD_AX25; | 320 | dev->type = ARPHRD_AX25; |
diff --git a/drivers/net/hamradio/baycom_epp.c b/drivers/net/hamradio/baycom_epp.c index a98c153f371e..83c7cce0d172 100644 --- a/drivers/net/hamradio/baycom_epp.c +++ b/drivers/net/hamradio/baycom_epp.c | |||
@@ -772,6 +772,9 @@ static int baycom_send_packet(struct sk_buff *skb, struct net_device *dev) | |||
772 | { | 772 | { |
773 | struct baycom_state *bc = netdev_priv(dev); | 773 | struct baycom_state *bc = netdev_priv(dev); |
774 | 774 | ||
775 | if (skb->protocol == htons(ETH_P_IP)) | ||
776 | return ax25_ip_xmit(skb); | ||
777 | |||
775 | if (skb->data[0] != 0) { | 778 | if (skb->data[0] != 0) { |
776 | do_kiss_params(bc, skb->data, skb->len); | 779 | do_kiss_params(bc, skb->data, skb->len); |
777 | dev_kfree_skb(skb); | 780 | dev_kfree_skb(skb); |
diff --git a/drivers/net/hamradio/bpqether.c b/drivers/net/hamradio/bpqether.c index c2894e43840e..63ff08a26da8 100644 --- a/drivers/net/hamradio/bpqether.c +++ b/drivers/net/hamradio/bpqether.c | |||
@@ -251,6 +251,9 @@ static netdev_tx_t bpq_xmit(struct sk_buff *skb, struct net_device *dev) | |||
251 | struct net_device *orig_dev; | 251 | struct net_device *orig_dev; |
252 | int size; | 252 | int size; |
253 | 253 | ||
254 | if (skb->protocol == htons(ETH_P_IP)) | ||
255 | return ax25_ip_xmit(skb); | ||
256 | |||
254 | /* | 257 | /* |
255 | * Just to be *really* sure not to send anything if the interface | 258 | * Just to be *really* sure not to send anything if the interface |
256 | * is down, the ethernet device may have gone. | 259 | * is down, the ethernet device may have gone. |
diff --git a/drivers/net/hamradio/dmascc.c b/drivers/net/hamradio/dmascc.c index 0fad408f24aa..c3d377770616 100644 --- a/drivers/net/hamradio/dmascc.c +++ b/drivers/net/hamradio/dmascc.c | |||
@@ -920,6 +920,9 @@ static int scc_send_packet(struct sk_buff *skb, struct net_device *dev) | |||
920 | unsigned long flags; | 920 | unsigned long flags; |
921 | int i; | 921 | int i; |
922 | 922 | ||
923 | if (skb->protocol == htons(ETH_P_IP)) | ||
924 | return ax25_ip_xmit(skb); | ||
925 | |||
923 | /* Temporarily stop the scheduler feeding us packets */ | 926 | /* Temporarily stop the scheduler feeding us packets */ |
924 | netif_stop_queue(dev); | 927 | netif_stop_queue(dev); |
925 | 928 | ||
diff --git a/drivers/net/hamradio/hdlcdrv.c b/drivers/net/hamradio/hdlcdrv.c index c67a27245072..49fe59b180a8 100644 --- a/drivers/net/hamradio/hdlcdrv.c +++ b/drivers/net/hamradio/hdlcdrv.c | |||
@@ -404,6 +404,9 @@ static netdev_tx_t hdlcdrv_send_packet(struct sk_buff *skb, | |||
404 | { | 404 | { |
405 | struct hdlcdrv_state *sm = netdev_priv(dev); | 405 | struct hdlcdrv_state *sm = netdev_priv(dev); |
406 | 406 | ||
407 | if (skb->protocol == htons(ETH_P_IP)) | ||
408 | return ax25_ip_xmit(skb); | ||
409 | |||
407 | if (skb->data[0] != 0) { | 410 | if (skb->data[0] != 0) { |
408 | do_kiss_params(sm, skb->data, skb->len); | 411 | do_kiss_params(sm, skb->data, skb->len); |
409 | dev_kfree_skb(skb); | 412 | dev_kfree_skb(skb); |
diff --git a/drivers/net/hamradio/mkiss.c b/drivers/net/hamradio/mkiss.c index f990bb1c3e02..17058c490b79 100644 --- a/drivers/net/hamradio/mkiss.c +++ b/drivers/net/hamradio/mkiss.c | |||
@@ -529,6 +529,9 @@ static netdev_tx_t ax_xmit(struct sk_buff *skb, struct net_device *dev) | |||
529 | { | 529 | { |
530 | struct mkiss *ax = netdev_priv(dev); | 530 | struct mkiss *ax = netdev_priv(dev); |
531 | 531 | ||
532 | if (skb->protocol == htons(ETH_P_IP)) | ||
533 | return ax25_ip_xmit(skb); | ||
534 | |||
532 | if (!netif_running(dev)) { | 535 | if (!netif_running(dev)) { |
533 | printk(KERN_ERR "mkiss: %s: xmit call when iface is down\n", dev->name); | 536 | printk(KERN_ERR "mkiss: %s: xmit call when iface is down\n", dev->name); |
534 | return NETDEV_TX_BUSY; | 537 | return NETDEV_TX_BUSY; |
@@ -573,32 +576,6 @@ static int ax_open_dev(struct net_device *dev) | |||
573 | return 0; | 576 | return 0; |
574 | } | 577 | } |
575 | 578 | ||
576 | #if defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE) | ||
577 | |||
578 | /* Return the frame type ID */ | ||
579 | static int ax_header(struct sk_buff *skb, struct net_device *dev, | ||
580 | unsigned short type, const void *daddr, | ||
581 | const void *saddr, unsigned len) | ||
582 | { | ||
583 | #ifdef CONFIG_INET | ||
584 | if (type != ETH_P_AX25) | ||
585 | return ax25_hard_header(skb, dev, type, daddr, saddr, len); | ||
586 | #endif | ||
587 | return 0; | ||
588 | } | ||
589 | |||
590 | |||
591 | static int ax_rebuild_header(struct sk_buff *skb) | ||
592 | { | ||
593 | #ifdef CONFIG_INET | ||
594 | return ax25_rebuild_header(skb); | ||
595 | #else | ||
596 | return 0; | ||
597 | #endif | ||
598 | } | ||
599 | |||
600 | #endif /* CONFIG_{AX25,AX25_MODULE} */ | ||
601 | |||
602 | /* Open the low-level part of the AX25 channel. Easy! */ | 579 | /* Open the low-level part of the AX25 channel. Easy! */ |
603 | static int ax_open(struct net_device *dev) | 580 | static int ax_open(struct net_device *dev) |
604 | { | 581 | { |
@@ -662,11 +639,6 @@ static int ax_close(struct net_device *dev) | |||
662 | return 0; | 639 | return 0; |
663 | } | 640 | } |
664 | 641 | ||
665 | static const struct header_ops ax_header_ops = { | ||
666 | .create = ax_header, | ||
667 | .rebuild = ax_rebuild_header, | ||
668 | }; | ||
669 | |||
670 | static const struct net_device_ops ax_netdev_ops = { | 642 | static const struct net_device_ops ax_netdev_ops = { |
671 | .ndo_open = ax_open_dev, | 643 | .ndo_open = ax_open_dev, |
672 | .ndo_stop = ax_close, | 644 | .ndo_stop = ax_close, |
@@ -682,7 +654,7 @@ static void ax_setup(struct net_device *dev) | |||
682 | dev->addr_len = 0; | 654 | dev->addr_len = 0; |
683 | dev->type = ARPHRD_AX25; | 655 | dev->type = ARPHRD_AX25; |
684 | dev->tx_queue_len = 10; | 656 | dev->tx_queue_len = 10; |
685 | dev->header_ops = &ax_header_ops; | 657 | dev->header_ops = &ax25_header_ops; |
686 | dev->netdev_ops = &ax_netdev_ops; | 658 | dev->netdev_ops = &ax_netdev_ops; |
687 | 659 | ||
688 | 660 | ||
diff --git a/drivers/net/hamradio/scc.c b/drivers/net/hamradio/scc.c index 57be9e0e98a6..ce88df33fe17 100644 --- a/drivers/net/hamradio/scc.c +++ b/drivers/net/hamradio/scc.c | |||
@@ -1639,6 +1639,9 @@ static netdev_tx_t scc_net_tx(struct sk_buff *skb, struct net_device *dev) | |||
1639 | unsigned long flags; | 1639 | unsigned long flags; |
1640 | char kisscmd; | 1640 | char kisscmd; |
1641 | 1641 | ||
1642 | if (skb->protocol == htons(ETH_P_IP)) | ||
1643 | return ax25_ip_xmit(skb); | ||
1644 | |||
1642 | if (skb->len > scc->stat.bufsize || skb->len < 2) { | 1645 | if (skb->len > scc->stat.bufsize || skb->len < 2) { |
1643 | scc->dev_stat.tx_dropped++; /* bogus frame */ | 1646 | scc->dev_stat.tx_dropped++; /* bogus frame */ |
1644 | dev_kfree_skb(skb); | 1647 | dev_kfree_skb(skb); |
diff --git a/drivers/net/hamradio/yam.c b/drivers/net/hamradio/yam.c index 717433cfb81d..1a4729c36aa4 100644 --- a/drivers/net/hamradio/yam.c +++ b/drivers/net/hamradio/yam.c | |||
@@ -597,6 +597,9 @@ static netdev_tx_t yam_send_packet(struct sk_buff *skb, | |||
597 | { | 597 | { |
598 | struct yam_port *yp = netdev_priv(dev); | 598 | struct yam_port *yp = netdev_priv(dev); |
599 | 599 | ||
600 | if (skb->protocol == htons(ETH_P_IP)) | ||
601 | return ax25_ip_xmit(skb); | ||
602 | |||
600 | skb_queue_tail(&yp->send_queue, skb); | 603 | skb_queue_tail(&yp->send_queue, skb); |
601 | dev->trans_start = jiffies; | 604 | dev->trans_start = jiffies; |
602 | return NETDEV_TX_OK; | 605 | return NETDEV_TX_OK; |
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h index 384ca4f4de4a..4815843a6019 100644 --- a/drivers/net/hyperv/hyperv_net.h +++ b/drivers/net/hyperv/hyperv_net.h | |||
@@ -634,6 +634,7 @@ struct netvsc_device { | |||
634 | 634 | ||
635 | struct vmbus_channel *chn_table[NR_CPUS]; | 635 | struct vmbus_channel *chn_table[NR_CPUS]; |
636 | u32 send_table[VRSS_SEND_TAB_SIZE]; | 636 | u32 send_table[VRSS_SEND_TAB_SIZE]; |
637 | u32 max_chn; | ||
637 | u32 num_chn; | 638 | u32 num_chn; |
638 | atomic_t queue_sends[NR_CPUS]; | 639 | atomic_t queue_sends[NR_CPUS]; |
639 | 640 | ||
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index 15d82eda0baf..a06bd6614007 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c | |||
@@ -687,6 +687,19 @@ static void netvsc_get_drvinfo(struct net_device *net, | |||
687 | strlcpy(info->fw_version, "N/A", sizeof(info->fw_version)); | 687 | strlcpy(info->fw_version, "N/A", sizeof(info->fw_version)); |
688 | } | 688 | } |
689 | 689 | ||
690 | static void netvsc_get_channels(struct net_device *net, | ||
691 | struct ethtool_channels *channel) | ||
692 | { | ||
693 | struct net_device_context *net_device_ctx = netdev_priv(net); | ||
694 | struct hv_device *dev = net_device_ctx->device_ctx; | ||
695 | struct netvsc_device *nvdev = hv_get_drvdata(dev); | ||
696 | |||
697 | if (nvdev) { | ||
698 | channel->max_combined = nvdev->max_chn; | ||
699 | channel->combined_count = nvdev->num_chn; | ||
700 | } | ||
701 | } | ||
702 | |||
690 | static int netvsc_change_mtu(struct net_device *ndev, int mtu) | 703 | static int netvsc_change_mtu(struct net_device *ndev, int mtu) |
691 | { | 704 | { |
692 | struct net_device_context *ndevctx = netdev_priv(ndev); | 705 | struct net_device_context *ndevctx = netdev_priv(ndev); |
@@ -760,6 +773,7 @@ static void netvsc_poll_controller(struct net_device *net) | |||
760 | static const struct ethtool_ops ethtool_ops = { | 773 | static const struct ethtool_ops ethtool_ops = { |
761 | .get_drvinfo = netvsc_get_drvinfo, | 774 | .get_drvinfo = netvsc_get_drvinfo, |
762 | .get_link = ethtool_op_get_link, | 775 | .get_link = ethtool_op_get_link, |
776 | .get_channels = netvsc_get_channels, | ||
763 | }; | 777 | }; |
764 | 778 | ||
765 | static const struct net_device_ops device_ops = { | 779 | static const struct net_device_ops device_ops = { |
diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c index 7816d98bdddc..ca81de04bc76 100644 --- a/drivers/net/hyperv/rndis_filter.c +++ b/drivers/net/hyperv/rndis_filter.c | |||
@@ -1027,6 +1027,7 @@ int rndis_filter_device_add(struct hv_device *dev, | |||
1027 | 1027 | ||
1028 | /* Initialize the rndis device */ | 1028 | /* Initialize the rndis device */ |
1029 | net_device = hv_get_drvdata(dev); | 1029 | net_device = hv_get_drvdata(dev); |
1030 | net_device->max_chn = 1; | ||
1030 | net_device->num_chn = 1; | 1031 | net_device->num_chn = 1; |
1031 | 1032 | ||
1032 | net_device->extension = rndis_device; | 1033 | net_device->extension = rndis_device; |
@@ -1094,6 +1095,7 @@ int rndis_filter_device_add(struct hv_device *dev, | |||
1094 | if (ret || rsscap.num_recv_que < 2) | 1095 | if (ret || rsscap.num_recv_que < 2) |
1095 | goto out; | 1096 | goto out; |
1096 | 1097 | ||
1098 | net_device->max_chn = rsscap.num_recv_que; | ||
1097 | net_device->num_chn = (num_online_cpus() < rsscap.num_recv_que) ? | 1099 | net_device->num_chn = (num_online_cpus() < rsscap.num_recv_que) ? |
1098 | num_online_cpus() : rsscap.num_recv_que; | 1100 | num_online_cpus() : rsscap.num_recv_que; |
1099 | if (net_device->num_chn == 1) | 1101 | if (net_device->num_chn == 1) |
@@ -1140,8 +1142,10 @@ int rndis_filter_device_add(struct hv_device *dev, | |||
1140 | ret = rndis_filter_set_rss_param(rndis_device, net_device->num_chn); | 1142 | ret = rndis_filter_set_rss_param(rndis_device, net_device->num_chn); |
1141 | 1143 | ||
1142 | out: | 1144 | out: |
1143 | if (ret) | 1145 | if (ret) { |
1146 | net_device->max_chn = 1; | ||
1144 | net_device->num_chn = 1; | 1147 | net_device->num_chn = 1; |
1148 | } | ||
1145 | return 0; /* return 0 because primary channel can be used alone */ | 1149 | return 0; /* return 0 because primary channel can be used alone */ |
1146 | 1150 | ||
1147 | err_dev_remv: | 1151 | err_dev_remv: |
diff --git a/drivers/net/ieee802154/at86rf230.c b/drivers/net/ieee802154/at86rf230.c index 7b051eacb7f1..1d438bc54189 100644 --- a/drivers/net/ieee802154/at86rf230.c +++ b/drivers/net/ieee802154/at86rf230.c | |||
@@ -46,8 +46,6 @@ struct at86rf2xx_chip_data { | |||
46 | u16 t_off_to_tx_on; | 46 | u16 t_off_to_tx_on; |
47 | u16 t_frame; | 47 | u16 t_frame; |
48 | u16 t_p_ack; | 48 | u16 t_p_ack; |
49 | /* completion timeout for tx in msecs */ | ||
50 | u16 t_tx_timeout; | ||
51 | int rssi_base_val; | 49 | int rssi_base_val; |
52 | 50 | ||
53 | int (*set_channel)(struct at86rf230_local *, u8, u8); | 51 | int (*set_channel)(struct at86rf230_local *, u8, u8); |
@@ -689,7 +687,7 @@ at86rf230_sync_state_change_complete(void *context) | |||
689 | static int | 687 | static int |
690 | at86rf230_sync_state_change(struct at86rf230_local *lp, unsigned int state) | 688 | at86rf230_sync_state_change(struct at86rf230_local *lp, unsigned int state) |
691 | { | 689 | { |
692 | int rc; | 690 | unsigned long rc; |
693 | 691 | ||
694 | at86rf230_async_state_change(lp, &lp->state, state, | 692 | at86rf230_async_state_change(lp, &lp->state, state, |
695 | at86rf230_sync_state_change_complete, | 693 | at86rf230_sync_state_change_complete, |
@@ -1281,7 +1279,6 @@ static struct at86rf2xx_chip_data at86rf233_data = { | |||
1281 | .t_off_to_tx_on = 80, | 1279 | .t_off_to_tx_on = 80, |
1282 | .t_frame = 4096, | 1280 | .t_frame = 4096, |
1283 | .t_p_ack = 545, | 1281 | .t_p_ack = 545, |
1284 | .t_tx_timeout = 2000, | ||
1285 | .rssi_base_val = -91, | 1282 | .rssi_base_val = -91, |
1286 | .set_channel = at86rf23x_set_channel, | 1283 | .set_channel = at86rf23x_set_channel, |
1287 | .get_desense_steps = at86rf23x_get_desens_steps | 1284 | .get_desense_steps = at86rf23x_get_desens_steps |
@@ -1295,7 +1292,6 @@ static struct at86rf2xx_chip_data at86rf231_data = { | |||
1295 | .t_off_to_tx_on = 110, | 1292 | .t_off_to_tx_on = 110, |
1296 | .t_frame = 4096, | 1293 | .t_frame = 4096, |
1297 | .t_p_ack = 545, | 1294 | .t_p_ack = 545, |
1298 | .t_tx_timeout = 2000, | ||
1299 | .rssi_base_val = -91, | 1295 | .rssi_base_val = -91, |
1300 | .set_channel = at86rf23x_set_channel, | 1296 | .set_channel = at86rf23x_set_channel, |
1301 | .get_desense_steps = at86rf23x_get_desens_steps | 1297 | .get_desense_steps = at86rf23x_get_desens_steps |
@@ -1309,13 +1305,12 @@ static struct at86rf2xx_chip_data at86rf212_data = { | |||
1309 | .t_off_to_tx_on = 200, | 1305 | .t_off_to_tx_on = 200, |
1310 | .t_frame = 4096, | 1306 | .t_frame = 4096, |
1311 | .t_p_ack = 545, | 1307 | .t_p_ack = 545, |
1312 | .t_tx_timeout = 2000, | ||
1313 | .rssi_base_val = -100, | 1308 | .rssi_base_val = -100, |
1314 | .set_channel = at86rf212_set_channel, | 1309 | .set_channel = at86rf212_set_channel, |
1315 | .get_desense_steps = at86rf212_get_desens_steps | 1310 | .get_desense_steps = at86rf212_get_desens_steps |
1316 | }; | 1311 | }; |
1317 | 1312 | ||
1318 | static int at86rf230_hw_init(struct at86rf230_local *lp) | 1313 | static int at86rf230_hw_init(struct at86rf230_local *lp, u8 xtal_trim) |
1319 | { | 1314 | { |
1320 | int rc, irq_type, irq_pol = IRQ_ACTIVE_HIGH; | 1315 | int rc, irq_type, irq_pol = IRQ_ACTIVE_HIGH; |
1321 | unsigned int dvdd; | 1316 | unsigned int dvdd; |
@@ -1326,7 +1321,12 @@ static int at86rf230_hw_init(struct at86rf230_local *lp) | |||
1326 | return rc; | 1321 | return rc; |
1327 | 1322 | ||
1328 | irq_type = irq_get_trigger_type(lp->spi->irq); | 1323 | irq_type = irq_get_trigger_type(lp->spi->irq); |
1329 | if (irq_type == IRQ_TYPE_EDGE_FALLING) | 1324 | if (irq_type == IRQ_TYPE_EDGE_RISING || |
1325 | irq_type == IRQ_TYPE_EDGE_FALLING) | ||
1326 | dev_warn(&lp->spi->dev, | ||
1327 | "Using edge triggered irq's are not recommended!\n"); | ||
1328 | if (irq_type == IRQ_TYPE_EDGE_FALLING || | ||
1329 | irq_type == IRQ_TYPE_LEVEL_LOW) | ||
1330 | irq_pol = IRQ_ACTIVE_LOW; | 1330 | irq_pol = IRQ_ACTIVE_LOW; |
1331 | 1331 | ||
1332 | rc = at86rf230_write_subreg(lp, SR_IRQ_POLARITY, irq_pol); | 1332 | rc = at86rf230_write_subreg(lp, SR_IRQ_POLARITY, irq_pol); |
@@ -1341,6 +1341,11 @@ static int at86rf230_hw_init(struct at86rf230_local *lp) | |||
1341 | if (rc) | 1341 | if (rc) |
1342 | return rc; | 1342 | return rc; |
1343 | 1343 | ||
1344 | /* reset values differs in at86rf231 and at86rf233 */ | ||
1345 | rc = at86rf230_write_subreg(lp, SR_IRQ_MASK_MODE, 0); | ||
1346 | if (rc) | ||
1347 | return rc; | ||
1348 | |||
1344 | get_random_bytes(csma_seed, ARRAY_SIZE(csma_seed)); | 1349 | get_random_bytes(csma_seed, ARRAY_SIZE(csma_seed)); |
1345 | rc = at86rf230_write_subreg(lp, SR_CSMA_SEED_0, csma_seed[0]); | 1350 | rc = at86rf230_write_subreg(lp, SR_CSMA_SEED_0, csma_seed[0]); |
1346 | if (rc) | 1351 | if (rc) |
@@ -1362,6 +1367,45 @@ static int at86rf230_hw_init(struct at86rf230_local *lp) | |||
1362 | usleep_range(lp->data->t_sleep_cycle, | 1367 | usleep_range(lp->data->t_sleep_cycle, |
1363 | lp->data->t_sleep_cycle + 100); | 1368 | lp->data->t_sleep_cycle + 100); |
1364 | 1369 | ||
1370 | /* xtal_trim value is calculated by: | ||
1371 | * CL = 0.5 * (CX + CTRIM + CPAR) | ||
1372 | * | ||
1373 | * whereas: | ||
1374 | * CL = capacitor of used crystal | ||
1375 | * CX = connected capacitors at xtal pins | ||
1376 | * CPAR = in all at86rf2xx datasheets this is a constant value 3 pF, | ||
1377 | * but this is different on each board setup. You need to fine | ||
1378 | * tuning this value via CTRIM. | ||
1379 | * CTRIM = variable capacitor setting. Resolution is 0.3 pF range is | ||
1380 | * 0 pF upto 4.5 pF. | ||
1381 | * | ||
1382 | * Examples: | ||
1383 | * atben transceiver: | ||
1384 | * | ||
1385 | * CL = 8 pF | ||
1386 | * CX = 12 pF | ||
1387 | * CPAR = 3 pF (We assume the magic constant from datasheet) | ||
1388 | * CTRIM = 0.9 pF | ||
1389 | * | ||
1390 | * (12+0.9+3)/2 = 7.95 which is nearly at 8 pF | ||
1391 | * | ||
1392 | * xtal_trim = 0x3 | ||
1393 | * | ||
1394 | * openlabs transceiver: | ||
1395 | * | ||
1396 | * CL = 16 pF | ||
1397 | * CX = 22 pF | ||
1398 | * CPAR = 3 pF (We assume the magic constant from datasheet) | ||
1399 | * CTRIM = 4.5 pF | ||
1400 | * | ||
1401 | * (22+4.5+3)/2 = 14.75 which is the nearest value to 16 pF | ||
1402 | * | ||
1403 | * xtal_trim = 0xf | ||
1404 | */ | ||
1405 | rc = at86rf230_write_subreg(lp, SR_XTAL_TRIM, xtal_trim); | ||
1406 | if (rc) | ||
1407 | return rc; | ||
1408 | |||
1365 | rc = at86rf230_read_subreg(lp, SR_DVDD_OK, &dvdd); | 1409 | rc = at86rf230_read_subreg(lp, SR_DVDD_OK, &dvdd); |
1366 | if (rc) | 1410 | if (rc) |
1367 | return rc; | 1411 | return rc; |
@@ -1377,24 +1421,30 @@ static int at86rf230_hw_init(struct at86rf230_local *lp) | |||
1377 | return at86rf230_write_subreg(lp, SR_SLOTTED_OPERATION, 0); | 1421 | return at86rf230_write_subreg(lp, SR_SLOTTED_OPERATION, 0); |
1378 | } | 1422 | } |
1379 | 1423 | ||
1380 | static struct at86rf230_platform_data * | 1424 | static int |
1381 | at86rf230_get_pdata(struct spi_device *spi) | 1425 | at86rf230_get_pdata(struct spi_device *spi, int *rstn, int *slp_tr, |
1426 | u8 *xtal_trim) | ||
1382 | { | 1427 | { |
1383 | struct at86rf230_platform_data *pdata; | 1428 | struct at86rf230_platform_data *pdata = spi->dev.platform_data; |
1429 | int ret; | ||
1384 | 1430 | ||
1385 | if (!IS_ENABLED(CONFIG_OF) || !spi->dev.of_node) | 1431 | if (!IS_ENABLED(CONFIG_OF) || !spi->dev.of_node) { |
1386 | return spi->dev.platform_data; | 1432 | if (!pdata) |
1433 | return -ENOENT; | ||
1387 | 1434 | ||
1388 | pdata = devm_kzalloc(&spi->dev, sizeof(*pdata), GFP_KERNEL); | 1435 | *rstn = pdata->rstn; |
1389 | if (!pdata) | 1436 | *slp_tr = pdata->slp_tr; |
1390 | goto done; | 1437 | *xtal_trim = pdata->xtal_trim; |
1438 | return 0; | ||
1439 | } | ||
1391 | 1440 | ||
1392 | pdata->rstn = of_get_named_gpio(spi->dev.of_node, "reset-gpio", 0); | 1441 | *rstn = of_get_named_gpio(spi->dev.of_node, "reset-gpio", 0); |
1393 | pdata->slp_tr = of_get_named_gpio(spi->dev.of_node, "sleep-gpio", 0); | 1442 | *slp_tr = of_get_named_gpio(spi->dev.of_node, "sleep-gpio", 0); |
1443 | ret = of_property_read_u8(spi->dev.of_node, "xtal-trim", xtal_trim); | ||
1444 | if (ret < 0 && ret != -EINVAL) | ||
1445 | return ret; | ||
1394 | 1446 | ||
1395 | spi->dev.platform_data = pdata; | 1447 | return 0; |
1396 | done: | ||
1397 | return pdata; | ||
1398 | } | 1448 | } |
1399 | 1449 | ||
1400 | static int | 1450 | static int |
@@ -1501,43 +1551,43 @@ at86rf230_setup_spi_messages(struct at86rf230_local *lp) | |||
1501 | 1551 | ||
1502 | static int at86rf230_probe(struct spi_device *spi) | 1552 | static int at86rf230_probe(struct spi_device *spi) |
1503 | { | 1553 | { |
1504 | struct at86rf230_platform_data *pdata; | ||
1505 | struct ieee802154_hw *hw; | 1554 | struct ieee802154_hw *hw; |
1506 | struct at86rf230_local *lp; | 1555 | struct at86rf230_local *lp; |
1507 | unsigned int status; | 1556 | unsigned int status; |
1508 | int rc, irq_type; | 1557 | int rc, irq_type, rstn, slp_tr; |
1558 | u8 xtal_trim; | ||
1509 | 1559 | ||
1510 | if (!spi->irq) { | 1560 | if (!spi->irq) { |
1511 | dev_err(&spi->dev, "no IRQ specified\n"); | 1561 | dev_err(&spi->dev, "no IRQ specified\n"); |
1512 | return -EINVAL; | 1562 | return -EINVAL; |
1513 | } | 1563 | } |
1514 | 1564 | ||
1515 | pdata = at86rf230_get_pdata(spi); | 1565 | rc = at86rf230_get_pdata(spi, &rstn, &slp_tr, &xtal_trim); |
1516 | if (!pdata) { | 1566 | if (rc < 0) { |
1517 | dev_err(&spi->dev, "no platform_data\n"); | 1567 | dev_err(&spi->dev, "failed to parse platform_data: %d\n", rc); |
1518 | return -EINVAL; | 1568 | return rc; |
1519 | } | 1569 | } |
1520 | 1570 | ||
1521 | if (gpio_is_valid(pdata->rstn)) { | 1571 | if (gpio_is_valid(rstn)) { |
1522 | rc = devm_gpio_request_one(&spi->dev, pdata->rstn, | 1572 | rc = devm_gpio_request_one(&spi->dev, rstn, |
1523 | GPIOF_OUT_INIT_HIGH, "rstn"); | 1573 | GPIOF_OUT_INIT_HIGH, "rstn"); |
1524 | if (rc) | 1574 | if (rc) |
1525 | return rc; | 1575 | return rc; |
1526 | } | 1576 | } |
1527 | 1577 | ||
1528 | if (gpio_is_valid(pdata->slp_tr)) { | 1578 | if (gpio_is_valid(slp_tr)) { |
1529 | rc = devm_gpio_request_one(&spi->dev, pdata->slp_tr, | 1579 | rc = devm_gpio_request_one(&spi->dev, slp_tr, |
1530 | GPIOF_OUT_INIT_LOW, "slp_tr"); | 1580 | GPIOF_OUT_INIT_LOW, "slp_tr"); |
1531 | if (rc) | 1581 | if (rc) |
1532 | return rc; | 1582 | return rc; |
1533 | } | 1583 | } |
1534 | 1584 | ||
1535 | /* Reset */ | 1585 | /* Reset */ |
1536 | if (gpio_is_valid(pdata->rstn)) { | 1586 | if (gpio_is_valid(rstn)) { |
1537 | udelay(1); | 1587 | udelay(1); |
1538 | gpio_set_value(pdata->rstn, 0); | 1588 | gpio_set_value(rstn, 0); |
1539 | udelay(1); | 1589 | udelay(1); |
1540 | gpio_set_value(pdata->rstn, 1); | 1590 | gpio_set_value(rstn, 1); |
1541 | usleep_range(120, 240); | 1591 | usleep_range(120, 240); |
1542 | } | 1592 | } |
1543 | 1593 | ||
@@ -1571,7 +1621,7 @@ static int at86rf230_probe(struct spi_device *spi) | |||
1571 | 1621 | ||
1572 | spi_set_drvdata(spi, lp); | 1622 | spi_set_drvdata(spi, lp); |
1573 | 1623 | ||
1574 | rc = at86rf230_hw_init(lp); | 1624 | rc = at86rf230_hw_init(lp, xtal_trim); |
1575 | if (rc) | 1625 | if (rc) |
1576 | goto free_dev; | 1626 | goto free_dev; |
1577 | 1627 | ||
diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c index 4f4099d5603d..2950c3780230 100644 --- a/drivers/net/ipvlan/ipvlan_main.c +++ b/drivers/net/ipvlan/ipvlan_main.c | |||
@@ -336,7 +336,6 @@ static int ipvlan_hard_header(struct sk_buff *skb, struct net_device *dev, | |||
336 | 336 | ||
337 | static const struct header_ops ipvlan_header_ops = { | 337 | static const struct header_ops ipvlan_header_ops = { |
338 | .create = ipvlan_hard_header, | 338 | .create = ipvlan_hard_header, |
339 | .rebuild = eth_rebuild_header, | ||
340 | .parse = eth_header_parse, | 339 | .parse = eth_header_parse, |
341 | .cache = eth_header_cache, | 340 | .cache = eth_header_cache, |
342 | .cache_update = eth_header_cache_update, | 341 | .cache_update = eth_header_cache_update, |
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index 1df38bdae2ee..b5e3320ca506 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c | |||
@@ -550,7 +550,6 @@ static int macvlan_hard_header(struct sk_buff *skb, struct net_device *dev, | |||
550 | 550 | ||
551 | static const struct header_ops macvlan_hard_header_ops = { | 551 | static const struct header_ops macvlan_hard_header_ops = { |
552 | .create = macvlan_hard_header, | 552 | .create = macvlan_hard_header, |
553 | .rebuild = eth_rebuild_header, | ||
554 | .parse = eth_header_parse, | 553 | .parse = eth_header_parse, |
555 | .cache = eth_header_cache, | 554 | .cache = eth_header_cache, |
556 | .cache_update = eth_header_cache_update, | 555 | .cache_update = eth_header_cache_update, |
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c index 27ecc5c4fa26..8362aef0c15e 100644 --- a/drivers/net/macvtap.c +++ b/drivers/net/macvtap.c | |||
@@ -1130,16 +1130,15 @@ static const struct file_operations macvtap_fops = { | |||
1130 | #endif | 1130 | #endif |
1131 | }; | 1131 | }; |
1132 | 1132 | ||
1133 | static int macvtap_sendmsg(struct kiocb *iocb, struct socket *sock, | 1133 | static int macvtap_sendmsg(struct socket *sock, struct msghdr *m, |
1134 | struct msghdr *m, size_t total_len) | 1134 | size_t total_len) |
1135 | { | 1135 | { |
1136 | struct macvtap_queue *q = container_of(sock, struct macvtap_queue, sock); | 1136 | struct macvtap_queue *q = container_of(sock, struct macvtap_queue, sock); |
1137 | return macvtap_get_user(q, m, &m->msg_iter, m->msg_flags & MSG_DONTWAIT); | 1137 | return macvtap_get_user(q, m, &m->msg_iter, m->msg_flags & MSG_DONTWAIT); |
1138 | } | 1138 | } |
1139 | 1139 | ||
1140 | static int macvtap_recvmsg(struct kiocb *iocb, struct socket *sock, | 1140 | static int macvtap_recvmsg(struct socket *sock, struct msghdr *m, |
1141 | struct msghdr *m, size_t total_len, | 1141 | size_t total_len, int flags) |
1142 | int flags) | ||
1143 | { | 1142 | { |
1144 | struct macvtap_queue *q = container_of(sock, struct macvtap_queue, sock); | 1143 | struct macvtap_queue *q = container_of(sock, struct macvtap_queue, sock); |
1145 | int ret; | 1144 | int ret; |
diff --git a/drivers/net/netconsole.c b/drivers/net/netconsole.c index ba2f5e710af1..15731d1db918 100644 --- a/drivers/net/netconsole.c +++ b/drivers/net/netconsole.c | |||
@@ -47,6 +47,7 @@ | |||
47 | #include <linux/netpoll.h> | 47 | #include <linux/netpoll.h> |
48 | #include <linux/inet.h> | 48 | #include <linux/inet.h> |
49 | #include <linux/configfs.h> | 49 | #include <linux/configfs.h> |
50 | #include <linux/etherdevice.h> | ||
50 | 51 | ||
51 | MODULE_AUTHOR("Maintainer: Matt Mackall <mpm@selenic.com>"); | 52 | MODULE_AUTHOR("Maintainer: Matt Mackall <mpm@selenic.com>"); |
52 | MODULE_DESCRIPTION("Console driver for network interfaces"); | 53 | MODULE_DESCRIPTION("Console driver for network interfaces"); |
@@ -185,7 +186,7 @@ static struct netconsole_target *alloc_param_target(char *target_config) | |||
185 | nt->np.local_port = 6665; | 186 | nt->np.local_port = 6665; |
186 | nt->np.remote_port = 6666; | 187 | nt->np.remote_port = 6666; |
187 | mutex_init(&nt->mutex); | 188 | mutex_init(&nt->mutex); |
188 | memset(nt->np.remote_mac, 0xff, ETH_ALEN); | 189 | eth_broadcast_addr(nt->np.remote_mac); |
189 | 190 | ||
190 | /* Parse parameters and setup netpoll */ | 191 | /* Parse parameters and setup netpoll */ |
191 | err = netpoll_parse_options(&nt->np, target_config); | 192 | err = netpoll_parse_options(&nt->np, target_config); |
@@ -604,7 +605,7 @@ static struct config_item *make_netconsole_target(struct config_group *group, | |||
604 | nt->np.local_port = 6665; | 605 | nt->np.local_port = 6665; |
605 | nt->np.remote_port = 6666; | 606 | nt->np.remote_port = 6666; |
606 | mutex_init(&nt->mutex); | 607 | mutex_init(&nt->mutex); |
607 | memset(nt->np.remote_mac, 0xff, ETH_ALEN); | 608 | eth_broadcast_addr(nt->np.remote_mac); |
608 | 609 | ||
609 | /* Initialize the config_item member */ | 610 | /* Initialize the config_item member */ |
610 | config_item_init_type_name(&nt->item, name, &netconsole_target_type); | 611 | config_item_init_type_name(&nt->item, name, &netconsole_target_type); |
diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c index d2408a5e43a6..ff059e1d8ac6 100644 --- a/drivers/net/ppp/pppoe.c +++ b/drivers/net/ppp/pppoe.c | |||
@@ -455,6 +455,18 @@ out: | |||
455 | return NET_RX_DROP; | 455 | return NET_RX_DROP; |
456 | } | 456 | } |
457 | 457 | ||
458 | static void pppoe_unbind_sock_work(struct work_struct *work) | ||
459 | { | ||
460 | struct pppox_sock *po = container_of(work, struct pppox_sock, | ||
461 | proto.pppoe.padt_work); | ||
462 | struct sock *sk = sk_pppox(po); | ||
463 | |||
464 | lock_sock(sk); | ||
465 | pppox_unbind_sock(sk); | ||
466 | release_sock(sk); | ||
467 | sock_put(sk); | ||
468 | } | ||
469 | |||
458 | /************************************************************************ | 470 | /************************************************************************ |
459 | * | 471 | * |
460 | * Receive a PPPoE Discovery frame. | 472 | * Receive a PPPoE Discovery frame. |
@@ -500,7 +512,8 @@ static int pppoe_disc_rcv(struct sk_buff *skb, struct net_device *dev, | |||
500 | } | 512 | } |
501 | 513 | ||
502 | bh_unlock_sock(sk); | 514 | bh_unlock_sock(sk); |
503 | sock_put(sk); | 515 | if (!schedule_work(&po->proto.pppoe.padt_work)) |
516 | sock_put(sk); | ||
504 | } | 517 | } |
505 | 518 | ||
506 | abort: | 519 | abort: |
@@ -613,6 +626,8 @@ static int pppoe_connect(struct socket *sock, struct sockaddr *uservaddr, | |||
613 | 626 | ||
614 | lock_sock(sk); | 627 | lock_sock(sk); |
615 | 628 | ||
629 | INIT_WORK(&po->proto.pppoe.padt_work, pppoe_unbind_sock_work); | ||
630 | |||
616 | error = -EINVAL; | 631 | error = -EINVAL; |
617 | if (sp->sa_protocol != PX_PROTO_OE) | 632 | if (sp->sa_protocol != PX_PROTO_OE) |
618 | goto end; | 633 | goto end; |
@@ -820,8 +835,8 @@ static int pppoe_ioctl(struct socket *sock, unsigned int cmd, | |||
820 | return err; | 835 | return err; |
821 | } | 836 | } |
822 | 837 | ||
823 | static int pppoe_sendmsg(struct kiocb *iocb, struct socket *sock, | 838 | static int pppoe_sendmsg(struct socket *sock, struct msghdr *m, |
824 | struct msghdr *m, size_t total_len) | 839 | size_t total_len) |
825 | { | 840 | { |
826 | struct sk_buff *skb; | 841 | struct sk_buff *skb; |
827 | struct sock *sk = sock->sk; | 842 | struct sock *sk = sock->sk; |
@@ -962,8 +977,8 @@ static const struct ppp_channel_ops pppoe_chan_ops = { | |||
962 | .start_xmit = pppoe_xmit, | 977 | .start_xmit = pppoe_xmit, |
963 | }; | 978 | }; |
964 | 979 | ||
965 | static int pppoe_recvmsg(struct kiocb *iocb, struct socket *sock, | 980 | static int pppoe_recvmsg(struct socket *sock, struct msghdr *m, |
966 | struct msghdr *m, size_t total_len, int flags) | 981 | size_t total_len, int flags) |
967 | { | 982 | { |
968 | struct sock *sk = sock->sk; | 983 | struct sock *sk = sock->sk; |
969 | struct sk_buff *skb; | 984 | struct sk_buff *skb; |
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c index f1ee71e22241..9d3366f7c9ad 100644 --- a/drivers/net/team/team.c +++ b/drivers/net/team/team.c | |||
@@ -1935,6 +1935,9 @@ static netdev_features_t team_fix_features(struct net_device *dev, | |||
1935 | mask); | 1935 | mask); |
1936 | } | 1936 | } |
1937 | rcu_read_unlock(); | 1937 | rcu_read_unlock(); |
1938 | |||
1939 | features = netdev_add_tso_features(features, mask); | ||
1940 | |||
1938 | return features; | 1941 | return features; |
1939 | } | 1942 | } |
1940 | 1943 | ||
diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 857dca47bf80..b96b94cee760 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c | |||
@@ -1448,8 +1448,7 @@ static void tun_sock_write_space(struct sock *sk) | |||
1448 | kill_fasync(&tfile->fasync, SIGIO, POLL_OUT); | 1448 | kill_fasync(&tfile->fasync, SIGIO, POLL_OUT); |
1449 | } | 1449 | } |
1450 | 1450 | ||
1451 | static int tun_sendmsg(struct kiocb *iocb, struct socket *sock, | 1451 | static int tun_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len) |
1452 | struct msghdr *m, size_t total_len) | ||
1453 | { | 1452 | { |
1454 | int ret; | 1453 | int ret; |
1455 | struct tun_file *tfile = container_of(sock, struct tun_file, socket); | 1454 | struct tun_file *tfile = container_of(sock, struct tun_file, socket); |
@@ -1464,8 +1463,7 @@ static int tun_sendmsg(struct kiocb *iocb, struct socket *sock, | |||
1464 | return ret; | 1463 | return ret; |
1465 | } | 1464 | } |
1466 | 1465 | ||
1467 | static int tun_recvmsg(struct kiocb *iocb, struct socket *sock, | 1466 | static int tun_recvmsg(struct socket *sock, struct msghdr *m, size_t total_len, |
1468 | struct msghdr *m, size_t total_len, | ||
1469 | int flags) | 1467 | int flags) |
1470 | { | 1468 | { |
1471 | struct tun_file *tfile = container_of(sock, struct tun_file, socket); | 1469 | struct tun_file *tfile = container_of(sock, struct tun_file, socket); |
diff --git a/drivers/net/usb/asix_common.c b/drivers/net/usb/asix_common.c index 5c55f11572ba..724a9b50df7a 100644 --- a/drivers/net/usb/asix_common.c +++ b/drivers/net/usb/asix_common.c | |||
@@ -188,6 +188,8 @@ struct sk_buff *asix_tx_fixup(struct usbnet *dev, struct sk_buff *skb, | |||
188 | memcpy(skb_tail_pointer(skb), &padbytes, sizeof(padbytes)); | 188 | memcpy(skb_tail_pointer(skb), &padbytes, sizeof(padbytes)); |
189 | skb_put(skb, sizeof(padbytes)); | 189 | skb_put(skb, sizeof(padbytes)); |
190 | } | 190 | } |
191 | |||
192 | usbnet_set_skb_tx_stats(skb, 1); | ||
191 | return skb; | 193 | return skb; |
192 | } | 194 | } |
193 | 195 | ||
diff --git a/drivers/net/usb/catc.c b/drivers/net/usb/catc.c index 8cfc3bb0c6a6..4e2b26a88b15 100644 --- a/drivers/net/usb/catc.c +++ b/drivers/net/usb/catc.c | |||
@@ -641,7 +641,7 @@ static void catc_set_multicast_list(struct net_device *netdev) | |||
641 | u8 broadcast[ETH_ALEN]; | 641 | u8 broadcast[ETH_ALEN]; |
642 | u8 rx = RxEnable | RxPolarity | RxMultiCast; | 642 | u8 rx = RxEnable | RxPolarity | RxMultiCast; |
643 | 643 | ||
644 | memset(broadcast, 0xff, ETH_ALEN); | 644 | eth_broadcast_addr(broadcast); |
645 | memset(catc->multicast, 0, 64); | 645 | memset(catc->multicast, 0, 64); |
646 | 646 | ||
647 | catc_multicast(broadcast, catc->multicast); | 647 | catc_multicast(broadcast, catc->multicast); |
@@ -880,7 +880,7 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id | |||
880 | 880 | ||
881 | dev_dbg(dev, "Filling the multicast list.\n"); | 881 | dev_dbg(dev, "Filling the multicast list.\n"); |
882 | 882 | ||
883 | memset(broadcast, 0xff, ETH_ALEN); | 883 | eth_broadcast_addr(broadcast); |
884 | catc_multicast(broadcast, catc->multicast); | 884 | catc_multicast(broadcast, catc->multicast); |
885 | catc_multicast(netdev->dev_addr, catc->multicast); | 885 | catc_multicast(netdev->dev_addr, catc->multicast); |
886 | catc_write_mem(catc, 0xfa80, catc->multicast, 64); | 886 | catc_write_mem(catc, 0xfa80, catc->multicast, 64); |
diff --git a/drivers/net/usb/cdc_mbim.c b/drivers/net/usb/cdc_mbim.c index 96fc8a5bde84..e4b7a47a825c 100644 --- a/drivers/net/usb/cdc_mbim.c +++ b/drivers/net/usb/cdc_mbim.c | |||
@@ -394,7 +394,7 @@ static struct sk_buff *cdc_mbim_process_dgram(struct usbnet *dev, u8 *buf, size_ | |||
394 | skb_put(skb, ETH_HLEN); | 394 | skb_put(skb, ETH_HLEN); |
395 | skb_reset_mac_header(skb); | 395 | skb_reset_mac_header(skb); |
396 | eth_hdr(skb)->h_proto = proto; | 396 | eth_hdr(skb)->h_proto = proto; |
397 | memset(eth_hdr(skb)->h_source, 0, ETH_ALEN); | 397 | eth_zero_addr(eth_hdr(skb)->h_source); |
398 | memcpy(eth_hdr(skb)->h_dest, dev->net->dev_addr, ETH_ALEN); | 398 | memcpy(eth_hdr(skb)->h_dest, dev->net->dev_addr, ETH_ALEN); |
399 | 399 | ||
400 | /* add datagram */ | 400 | /* add datagram */ |
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c index 80a844e0ae03..70cbea551139 100644 --- a/drivers/net/usb/cdc_ncm.c +++ b/drivers/net/usb/cdc_ncm.c | |||
@@ -1172,7 +1172,6 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign) | |||
1172 | 1172 | ||
1173 | /* return skb */ | 1173 | /* return skb */ |
1174 | ctx->tx_curr_skb = NULL; | 1174 | ctx->tx_curr_skb = NULL; |
1175 | dev->net->stats.tx_packets += ctx->tx_curr_frame_num; | ||
1176 | 1175 | ||
1177 | /* keep private stats: framing overhead and number of NTBs */ | 1176 | /* keep private stats: framing overhead and number of NTBs */ |
1178 | ctx->tx_overhead += skb_out->len - ctx->tx_curr_frame_payload; | 1177 | ctx->tx_overhead += skb_out->len - ctx->tx_curr_frame_payload; |
@@ -1184,6 +1183,8 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign) | |||
1184 | */ | 1183 | */ |
1185 | dev->net->stats.tx_bytes -= skb_out->len - ctx->tx_curr_frame_payload; | 1184 | dev->net->stats.tx_bytes -= skb_out->len - ctx->tx_curr_frame_payload; |
1186 | 1185 | ||
1186 | usbnet_set_skb_tx_stats(skb_out, n); | ||
1187 | |||
1187 | return skb_out; | 1188 | return skb_out; |
1188 | 1189 | ||
1189 | exit_no_skb: | 1190 | exit_no_skb: |
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c index 778e91531fac..111d907e0c11 100644 --- a/drivers/net/usb/hso.c +++ b/drivers/net/usb/hso.c | |||
@@ -1477,6 +1477,7 @@ static void tiocmget_intr_callback(struct urb *urb) | |||
1477 | struct uart_icount *icount; | 1477 | struct uart_icount *icount; |
1478 | struct hso_serial_state_notification *serial_state_notification; | 1478 | struct hso_serial_state_notification *serial_state_notification; |
1479 | struct usb_device *usb; | 1479 | struct usb_device *usb; |
1480 | struct usb_interface *interface; | ||
1480 | int if_num; | 1481 | int if_num; |
1481 | 1482 | ||
1482 | /* Sanity checks */ | 1483 | /* Sanity checks */ |
@@ -1494,7 +1495,9 @@ static void tiocmget_intr_callback(struct urb *urb) | |||
1494 | BUG_ON((serial->parent->port_spec & HSO_PORT_MASK) != HSO_PORT_MODEM); | 1495 | BUG_ON((serial->parent->port_spec & HSO_PORT_MASK) != HSO_PORT_MODEM); |
1495 | 1496 | ||
1496 | usb = serial->parent->usb; | 1497 | usb = serial->parent->usb; |
1497 | if_num = serial->parent->interface->altsetting->desc.bInterfaceNumber; | 1498 | interface = serial->parent->interface; |
1499 | |||
1500 | if_num = interface->cur_altsetting->desc.bInterfaceNumber; | ||
1498 | 1501 | ||
1499 | /* wIndex should be the USB interface number of the port to which the | 1502 | /* wIndex should be the USB interface number of the port to which the |
1500 | * notification applies, which should always be the Modem port. | 1503 | * notification applies, which should always be the Modem port. |
@@ -1675,6 +1678,7 @@ static int hso_serial_tiocmset(struct tty_struct *tty, | |||
1675 | unsigned long flags; | 1678 | unsigned long flags; |
1676 | int if_num; | 1679 | int if_num; |
1677 | struct hso_serial *serial = tty->driver_data; | 1680 | struct hso_serial *serial = tty->driver_data; |
1681 | struct usb_interface *interface; | ||
1678 | 1682 | ||
1679 | /* sanity check */ | 1683 | /* sanity check */ |
1680 | if (!serial) { | 1684 | if (!serial) { |
@@ -1685,7 +1689,8 @@ static int hso_serial_tiocmset(struct tty_struct *tty, | |||
1685 | if ((serial->parent->port_spec & HSO_PORT_MASK) != HSO_PORT_MODEM) | 1689 | if ((serial->parent->port_spec & HSO_PORT_MASK) != HSO_PORT_MODEM) |
1686 | return -EINVAL; | 1690 | return -EINVAL; |
1687 | 1691 | ||
1688 | if_num = serial->parent->interface->altsetting->desc.bInterfaceNumber; | 1692 | interface = serial->parent->interface; |
1693 | if_num = interface->cur_altsetting->desc.bInterfaceNumber; | ||
1689 | 1694 | ||
1690 | spin_lock_irqsave(&serial->serial_lock, flags); | 1695 | spin_lock_irqsave(&serial->serial_lock, flags); |
1691 | if (set & TIOCM_RTS) | 1696 | if (set & TIOCM_RTS) |
@@ -2808,7 +2813,7 @@ static int hso_get_config_data(struct usb_interface *interface) | |||
2808 | { | 2813 | { |
2809 | struct usb_device *usbdev = interface_to_usbdev(interface); | 2814 | struct usb_device *usbdev = interface_to_usbdev(interface); |
2810 | u8 *config_data = kmalloc(17, GFP_KERNEL); | 2815 | u8 *config_data = kmalloc(17, GFP_KERNEL); |
2811 | u32 if_num = interface->altsetting->desc.bInterfaceNumber; | 2816 | u32 if_num = interface->cur_altsetting->desc.bInterfaceNumber; |
2812 | s32 result; | 2817 | s32 result; |
2813 | 2818 | ||
2814 | if (!config_data) | 2819 | if (!config_data) |
@@ -2886,7 +2891,7 @@ static int hso_probe(struct usb_interface *interface, | |||
2886 | return -ENODEV; | 2891 | return -ENODEV; |
2887 | } | 2892 | } |
2888 | 2893 | ||
2889 | if_num = interface->altsetting->desc.bInterfaceNumber; | 2894 | if_num = interface->cur_altsetting->desc.bInterfaceNumber; |
2890 | 2895 | ||
2891 | /* Get the interface/port specification from either driver_info or from | 2896 | /* Get the interface/port specification from either driver_info or from |
2892 | * the device itself */ | 2897 | * the device itself */ |
diff --git a/drivers/net/usb/lg-vl600.c b/drivers/net/usb/lg-vl600.c index 8f37efd2d2fb..5714107533bb 100644 --- a/drivers/net/usb/lg-vl600.c +++ b/drivers/net/usb/lg-vl600.c | |||
@@ -201,7 +201,7 @@ static int vl600_rx_fixup(struct usbnet *dev, struct sk_buff *skb) | |||
201 | &buf->data[sizeof(*ethhdr) + 0x12], | 201 | &buf->data[sizeof(*ethhdr) + 0x12], |
202 | ETH_ALEN); | 202 | ETH_ALEN); |
203 | } else { | 203 | } else { |
204 | memset(ethhdr->h_source, 0, ETH_ALEN); | 204 | eth_zero_addr(ethhdr->h_source); |
205 | memcpy(ethhdr->h_dest, dev->net->dev_addr, ETH_ALEN); | 205 | memcpy(ethhdr->h_dest, dev->net->dev_addr, ETH_ALEN); |
206 | 206 | ||
207 | /* Inbound IPv6 packets have an IPv4 ethertype (0x800) | 207 | /* Inbound IPv6 packets have an IPv4 ethertype (0x800) |
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index 602dc6668c3a..f603f362504b 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c | |||
@@ -108,7 +108,7 @@ static int qmi_wwan_rx_fixup(struct usbnet *dev, struct sk_buff *skb) | |||
108 | skb_push(skb, ETH_HLEN); | 108 | skb_push(skb, ETH_HLEN); |
109 | skb_reset_mac_header(skb); | 109 | skb_reset_mac_header(skb); |
110 | eth_hdr(skb)->h_proto = proto; | 110 | eth_hdr(skb)->h_proto = proto; |
111 | memset(eth_hdr(skb)->h_source, 0, ETH_ALEN); | 111 | eth_zero_addr(eth_hdr(skb)->h_source); |
112 | fix_dest: | 112 | fix_dest: |
113 | memcpy(eth_hdr(skb)->h_dest, dev->net->dev_addr, ETH_ALEN); | 113 | memcpy(eth_hdr(skb)->h_dest, dev->net->dev_addr, ETH_ALEN); |
114 | return 1; | 114 | return 1; |
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index 438fc6bcaef1..5065538dd03b 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c | |||
@@ -104,7 +104,8 @@ | |||
104 | #define USB_TX_AGG 0xd40a | 104 | #define USB_TX_AGG 0xd40a |
105 | #define USB_RX_BUF_TH 0xd40c | 105 | #define USB_RX_BUF_TH 0xd40c |
106 | #define USB_USB_TIMER 0xd428 | 106 | #define USB_USB_TIMER 0xd428 |
107 | #define USB_RX_EARLY_AGG 0xd42c | 107 | #define USB_RX_EARLY_TIMEOUT 0xd42c |
108 | #define USB_RX_EARLY_SIZE 0xd42e | ||
108 | #define USB_PM_CTRL_STATUS 0xd432 | 109 | #define USB_PM_CTRL_STATUS 0xd432 |
109 | #define USB_TX_DMA 0xd434 | 110 | #define USB_TX_DMA 0xd434 |
110 | #define USB_TOLERANCE 0xd490 | 111 | #define USB_TOLERANCE 0xd490 |
@@ -349,10 +350,10 @@ | |||
349 | /* USB_MISC_0 */ | 350 | /* USB_MISC_0 */ |
350 | #define PCUT_STATUS 0x0001 | 351 | #define PCUT_STATUS 0x0001 |
351 | 352 | ||
352 | /* USB_RX_EARLY_AGG */ | 353 | /* USB_RX_EARLY_TIMEOUT */ |
353 | #define EARLY_AGG_SUPPER 0x0e832981 | 354 | #define COALESCE_SUPER 85000U |
354 | #define EARLY_AGG_HIGH 0x0e837a12 | 355 | #define COALESCE_HIGH 250000U |
355 | #define EARLY_AGG_SLOW 0x0e83ffff | 356 | #define COALESCE_SLOW 524280U |
356 | 357 | ||
357 | /* USB_WDT11_CTRL */ | 358 | /* USB_WDT11_CTRL */ |
358 | #define TIMER11_EN 0x0001 | 359 | #define TIMER11_EN 0x0001 |
@@ -606,6 +607,7 @@ struct r8152 { | |||
606 | u32 saved_wolopts; | 607 | u32 saved_wolopts; |
607 | u32 msg_enable; | 608 | u32 msg_enable; |
608 | u32 tx_qlen; | 609 | u32 tx_qlen; |
610 | u32 coalesce; | ||
609 | u16 ocp_base; | 611 | u16 ocp_base; |
610 | u8 *intr_buff; | 612 | u8 *intr_buff; |
611 | u8 version; | 613 | u8 version; |
@@ -2142,28 +2144,19 @@ static int rtl8152_enable(struct r8152 *tp) | |||
2142 | return rtl_enable(tp); | 2144 | return rtl_enable(tp); |
2143 | } | 2145 | } |
2144 | 2146 | ||
2145 | static void r8153_set_rx_agg(struct r8152 *tp) | 2147 | static void r8153_set_rx_early_timeout(struct r8152 *tp) |
2146 | { | 2148 | { |
2147 | u8 speed; | 2149 | u32 ocp_data = tp->coalesce / 8; |
2148 | 2150 | ||
2149 | speed = rtl8152_get_speed(tp); | 2151 | ocp_write_word(tp, MCU_TYPE_USB, USB_RX_EARLY_TIMEOUT, ocp_data); |
2150 | if (speed & _1000bps) { | 2152 | } |
2151 | if (tp->udev->speed == USB_SPEED_SUPER) { | 2153 | |
2152 | ocp_write_dword(tp, MCU_TYPE_USB, USB_RX_BUF_TH, | 2154 | static void r8153_set_rx_early_size(struct r8152 *tp) |
2153 | RX_THR_SUPPER); | 2155 | { |
2154 | ocp_write_dword(tp, MCU_TYPE_USB, USB_RX_EARLY_AGG, | 2156 | u32 mtu = tp->netdev->mtu; |
2155 | EARLY_AGG_SUPPER); | 2157 | u32 ocp_data = (agg_buf_sz - mtu - VLAN_ETH_HLEN - VLAN_HLEN) / 4; |
2156 | } else { | 2158 | |
2157 | ocp_write_dword(tp, MCU_TYPE_USB, USB_RX_BUF_TH, | 2159 | ocp_write_word(tp, MCU_TYPE_USB, USB_RX_EARLY_SIZE, ocp_data); |
2158 | RX_THR_HIGH); | ||
2159 | ocp_write_dword(tp, MCU_TYPE_USB, USB_RX_EARLY_AGG, | ||
2160 | EARLY_AGG_HIGH); | ||
2161 | } | ||
2162 | } else { | ||
2163 | ocp_write_dword(tp, MCU_TYPE_USB, USB_RX_BUF_TH, RX_THR_SLOW); | ||
2164 | ocp_write_dword(tp, MCU_TYPE_USB, USB_RX_EARLY_AGG, | ||
2165 | EARLY_AGG_SLOW); | ||
2166 | } | ||
2167 | } | 2160 | } |
2168 | 2161 | ||
2169 | static int rtl8153_enable(struct r8152 *tp) | 2162 | static int rtl8153_enable(struct r8152 *tp) |
@@ -2173,7 +2166,8 @@ static int rtl8153_enable(struct r8152 *tp) | |||
2173 | 2166 | ||
2174 | set_tx_qlen(tp); | 2167 | set_tx_qlen(tp); |
2175 | rtl_set_eee_plus(tp); | 2168 | rtl_set_eee_plus(tp); |
2176 | r8153_set_rx_agg(tp); | 2169 | r8153_set_rx_early_timeout(tp); |
2170 | r8153_set_rx_early_size(tp); | ||
2177 | 2171 | ||
2178 | return rtl_enable(tp); | 2172 | return rtl_enable(tp); |
2179 | } | 2173 | } |
@@ -3719,6 +3713,61 @@ out: | |||
3719 | return ret; | 3713 | return ret; |
3720 | } | 3714 | } |
3721 | 3715 | ||
3716 | static int rtl8152_get_coalesce(struct net_device *netdev, | ||
3717 | struct ethtool_coalesce *coalesce) | ||
3718 | { | ||
3719 | struct r8152 *tp = netdev_priv(netdev); | ||
3720 | |||
3721 | switch (tp->version) { | ||
3722 | case RTL_VER_01: | ||
3723 | case RTL_VER_02: | ||
3724 | return -EOPNOTSUPP; | ||
3725 | default: | ||
3726 | break; | ||
3727 | } | ||
3728 | |||
3729 | coalesce->rx_coalesce_usecs = tp->coalesce; | ||
3730 | |||
3731 | return 0; | ||
3732 | } | ||
3733 | |||
3734 | static int rtl8152_set_coalesce(struct net_device *netdev, | ||
3735 | struct ethtool_coalesce *coalesce) | ||
3736 | { | ||
3737 | struct r8152 *tp = netdev_priv(netdev); | ||
3738 | int ret; | ||
3739 | |||
3740 | switch (tp->version) { | ||
3741 | case RTL_VER_01: | ||
3742 | case RTL_VER_02: | ||
3743 | return -EOPNOTSUPP; | ||
3744 | default: | ||
3745 | break; | ||
3746 | } | ||
3747 | |||
3748 | if (coalesce->rx_coalesce_usecs > COALESCE_SLOW) | ||
3749 | return -EINVAL; | ||
3750 | |||
3751 | ret = usb_autopm_get_interface(tp->intf); | ||
3752 | if (ret < 0) | ||
3753 | return ret; | ||
3754 | |||
3755 | mutex_lock(&tp->control); | ||
3756 | |||
3757 | if (tp->coalesce != coalesce->rx_coalesce_usecs) { | ||
3758 | tp->coalesce = coalesce->rx_coalesce_usecs; | ||
3759 | |||
3760 | if (netif_running(tp->netdev) && netif_carrier_ok(netdev)) | ||
3761 | r8153_set_rx_early_timeout(tp); | ||
3762 | } | ||
3763 | |||
3764 | mutex_unlock(&tp->control); | ||
3765 | |||
3766 | usb_autopm_put_interface(tp->intf); | ||
3767 | |||
3768 | return ret; | ||
3769 | } | ||
3770 | |||
3722 | static struct ethtool_ops ops = { | 3771 | static struct ethtool_ops ops = { |
3723 | .get_drvinfo = rtl8152_get_drvinfo, | 3772 | .get_drvinfo = rtl8152_get_drvinfo, |
3724 | .get_settings = rtl8152_get_settings, | 3773 | .get_settings = rtl8152_get_settings, |
@@ -3732,6 +3781,8 @@ static struct ethtool_ops ops = { | |||
3732 | .get_strings = rtl8152_get_strings, | 3781 | .get_strings = rtl8152_get_strings, |
3733 | .get_sset_count = rtl8152_get_sset_count, | 3782 | .get_sset_count = rtl8152_get_sset_count, |
3734 | .get_ethtool_stats = rtl8152_get_ethtool_stats, | 3783 | .get_ethtool_stats = rtl8152_get_ethtool_stats, |
3784 | .get_coalesce = rtl8152_get_coalesce, | ||
3785 | .set_coalesce = rtl8152_set_coalesce, | ||
3735 | .get_eee = rtl_ethtool_get_eee, | 3786 | .get_eee = rtl_ethtool_get_eee, |
3736 | .set_eee = rtl_ethtool_set_eee, | 3787 | .set_eee = rtl_ethtool_set_eee, |
3737 | }; | 3788 | }; |
@@ -3783,6 +3834,7 @@ out: | |||
3783 | static int rtl8152_change_mtu(struct net_device *dev, int new_mtu) | 3834 | static int rtl8152_change_mtu(struct net_device *dev, int new_mtu) |
3784 | { | 3835 | { |
3785 | struct r8152 *tp = netdev_priv(dev); | 3836 | struct r8152 *tp = netdev_priv(dev); |
3837 | int ret; | ||
3786 | 3838 | ||
3787 | switch (tp->version) { | 3839 | switch (tp->version) { |
3788 | case RTL_VER_01: | 3840 | case RTL_VER_01: |
@@ -3795,9 +3847,22 @@ static int rtl8152_change_mtu(struct net_device *dev, int new_mtu) | |||
3795 | if (new_mtu < 68 || new_mtu > RTL8153_MAX_MTU) | 3847 | if (new_mtu < 68 || new_mtu > RTL8153_MAX_MTU) |
3796 | return -EINVAL; | 3848 | return -EINVAL; |
3797 | 3849 | ||
3850 | ret = usb_autopm_get_interface(tp->intf); | ||
3851 | if (ret < 0) | ||
3852 | return ret; | ||
3853 | |||
3854 | mutex_lock(&tp->control); | ||
3855 | |||
3798 | dev->mtu = new_mtu; | 3856 | dev->mtu = new_mtu; |
3799 | 3857 | ||
3800 | return 0; | 3858 | if (netif_running(dev) && netif_carrier_ok(dev)) |
3859 | r8153_set_rx_early_size(tp); | ||
3860 | |||
3861 | mutex_unlock(&tp->control); | ||
3862 | |||
3863 | usb_autopm_put_interface(tp->intf); | ||
3864 | |||
3865 | return ret; | ||
3801 | } | 3866 | } |
3802 | 3867 | ||
3803 | static const struct net_device_ops rtl8152_netdev_ops = { | 3868 | static const struct net_device_ops rtl8152_netdev_ops = { |
@@ -3966,6 +4031,18 @@ static int rtl8152_probe(struct usb_interface *intf, | |||
3966 | tp->mii.reg_num_mask = 0x1f; | 4031 | tp->mii.reg_num_mask = 0x1f; |
3967 | tp->mii.phy_id = R8152_PHY_ID; | 4032 | tp->mii.phy_id = R8152_PHY_ID; |
3968 | 4033 | ||
4034 | switch (udev->speed) { | ||
4035 | case USB_SPEED_SUPER: | ||
4036 | tp->coalesce = COALESCE_SUPER; | ||
4037 | break; | ||
4038 | case USB_SPEED_HIGH: | ||
4039 | tp->coalesce = COALESCE_HIGH; | ||
4040 | break; | ||
4041 | default: | ||
4042 | tp->coalesce = COALESCE_SLOW; | ||
4043 | break; | ||
4044 | } | ||
4045 | |||
3969 | intf->needs_remote_wakeup = 1; | 4046 | intf->needs_remote_wakeup = 1; |
3970 | 4047 | ||
3971 | tp->rtl_ops.init(tp); | 4048 | tp->rtl_ops.init(tp); |
diff --git a/drivers/net/usb/sr9800.c b/drivers/net/usb/sr9800.c index b94a0fbb8b3b..7650cdc8fe6b 100644 --- a/drivers/net/usb/sr9800.c +++ b/drivers/net/usb/sr9800.c | |||
@@ -144,6 +144,7 @@ static struct sk_buff *sr_tx_fixup(struct usbnet *dev, struct sk_buff *skb, | |||
144 | skb_put(skb, sizeof(padbytes)); | 144 | skb_put(skb, sizeof(padbytes)); |
145 | } | 145 | } |
146 | 146 | ||
147 | usbnet_set_skb_tx_stats(skb, 1); | ||
147 | return skb; | 148 | return skb; |
148 | } | 149 | } |
149 | 150 | ||
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c index 449835f4331e..0f3ff285f6a1 100644 --- a/drivers/net/usb/usbnet.c +++ b/drivers/net/usb/usbnet.c | |||
@@ -1188,8 +1188,7 @@ static void tx_complete (struct urb *urb) | |||
1188 | struct usbnet *dev = entry->dev; | 1188 | struct usbnet *dev = entry->dev; |
1189 | 1189 | ||
1190 | if (urb->status == 0) { | 1190 | if (urb->status == 0) { |
1191 | if (!(dev->driver_info->flags & FLAG_MULTI_PACKET)) | 1191 | dev->net->stats.tx_packets += entry->packets; |
1192 | dev->net->stats.tx_packets++; | ||
1193 | dev->net->stats.tx_bytes += entry->length; | 1192 | dev->net->stats.tx_bytes += entry->length; |
1194 | } else { | 1193 | } else { |
1195 | dev->net->stats.tx_errors++; | 1194 | dev->net->stats.tx_errors++; |
@@ -1348,6 +1347,8 @@ netdev_tx_t usbnet_start_xmit (struct sk_buff *skb, | |||
1348 | urb->transfer_flags |= URB_ZERO_PACKET; | 1347 | urb->transfer_flags |= URB_ZERO_PACKET; |
1349 | } | 1348 | } |
1350 | entry->length = urb->transfer_buffer_length = length; | 1349 | entry->length = urb->transfer_buffer_length = length; |
1350 | if (!(info->flags & FLAG_MULTI_PACKET)) | ||
1351 | usbnet_set_skb_tx_stats(skb, 1); | ||
1351 | 1352 | ||
1352 | spin_lock_irqsave(&dev->txq.lock, flags); | 1353 | spin_lock_irqsave(&dev->txq.lock, flags); |
1353 | retval = usb_autopm_get_interface_async(dev->intf); | 1354 | retval = usb_autopm_get_interface_async(dev->intf); |
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c index 294214c15292..61c0840c448c 100644 --- a/drivers/net/vmxnet3/vmxnet3_drv.c +++ b/drivers/net/vmxnet3/vmxnet3_drv.c | |||
@@ -819,6 +819,7 @@ vmxnet3_parse_and_copy_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq, | |||
819 | struct vmxnet3_adapter *adapter) | 819 | struct vmxnet3_adapter *adapter) |
820 | { | 820 | { |
821 | struct Vmxnet3_TxDataDesc *tdd; | 821 | struct Vmxnet3_TxDataDesc *tdd; |
822 | u8 protocol = 0; | ||
822 | 823 | ||
823 | if (ctx->mss) { /* TSO */ | 824 | if (ctx->mss) { /* TSO */ |
824 | ctx->eth_ip_hdr_size = skb_transport_offset(skb); | 825 | ctx->eth_ip_hdr_size = skb_transport_offset(skb); |
@@ -831,16 +832,25 @@ vmxnet3_parse_and_copy_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq, | |||
831 | if (ctx->ipv4) { | 832 | if (ctx->ipv4) { |
832 | const struct iphdr *iph = ip_hdr(skb); | 833 | const struct iphdr *iph = ip_hdr(skb); |
833 | 834 | ||
834 | if (iph->protocol == IPPROTO_TCP) | 835 | protocol = iph->protocol; |
835 | ctx->l4_hdr_size = tcp_hdrlen(skb); | 836 | } else if (ctx->ipv6) { |
836 | else if (iph->protocol == IPPROTO_UDP) | 837 | const struct ipv6hdr *ipv6h = ipv6_hdr(skb); |
837 | ctx->l4_hdr_size = sizeof(struct udphdr); | 838 | |
838 | else | 839 | protocol = ipv6h->nexthdr; |
839 | ctx->l4_hdr_size = 0; | 840 | } |
840 | } else { | 841 | |
841 | /* for simplicity, don't copy L4 headers */ | 842 | switch (protocol) { |
843 | case IPPROTO_TCP: | ||
844 | ctx->l4_hdr_size = tcp_hdrlen(skb); | ||
845 | break; | ||
846 | case IPPROTO_UDP: | ||
847 | ctx->l4_hdr_size = sizeof(struct udphdr); | ||
848 | break; | ||
849 | default: | ||
842 | ctx->l4_hdr_size = 0; | 850 | ctx->l4_hdr_size = 0; |
851 | break; | ||
843 | } | 852 | } |
853 | |||
844 | ctx->copy_size = min(ctx->eth_ip_hdr_size + | 854 | ctx->copy_size = min(ctx->eth_ip_hdr_size + |
845 | ctx->l4_hdr_size, skb->len); | 855 | ctx->l4_hdr_size, skb->len); |
846 | } else { | 856 | } else { |
@@ -887,7 +897,7 @@ vmxnet3_prepare_tso(struct sk_buff *skb, | |||
887 | iph->check = 0; | 897 | iph->check = 0; |
888 | tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, 0, | 898 | tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, 0, |
889 | IPPROTO_TCP, 0); | 899 | IPPROTO_TCP, 0); |
890 | } else { | 900 | } else if (ctx->ipv6) { |
891 | struct ipv6hdr *iph = ipv6_hdr(skb); | 901 | struct ipv6hdr *iph = ipv6_hdr(skb); |
892 | 902 | ||
893 | tcph->check = ~csum_ipv6_magic(&iph->saddr, &iph->daddr, 0, | 903 | tcph->check = ~csum_ipv6_magic(&iph->saddr, &iph->daddr, 0, |
@@ -938,6 +948,7 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq, | |||
938 | count = txd_estimate(skb); | 948 | count = txd_estimate(skb); |
939 | 949 | ||
940 | ctx.ipv4 = (vlan_get_protocol(skb) == cpu_to_be16(ETH_P_IP)); | 950 | ctx.ipv4 = (vlan_get_protocol(skb) == cpu_to_be16(ETH_P_IP)); |
951 | ctx.ipv6 = (vlan_get_protocol(skb) == cpu_to_be16(ETH_P_IPV6)); | ||
941 | 952 | ||
942 | ctx.mss = skb_shinfo(skb)->gso_size; | 953 | ctx.mss = skb_shinfo(skb)->gso_size; |
943 | if (ctx.mss) { | 954 | if (ctx.mss) { |
diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h index cd71c77f78f2..6bb769ae7de9 100644 --- a/drivers/net/vmxnet3/vmxnet3_int.h +++ b/drivers/net/vmxnet3/vmxnet3_int.h | |||
@@ -69,10 +69,10 @@ | |||
69 | /* | 69 | /* |
70 | * Version numbers | 70 | * Version numbers |
71 | */ | 71 | */ |
72 | #define VMXNET3_DRIVER_VERSION_STRING "1.3.4.0-k" | 72 | #define VMXNET3_DRIVER_VERSION_STRING "1.3.5.0-k" |
73 | 73 | ||
74 | /* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */ | 74 | /* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */ |
75 | #define VMXNET3_DRIVER_VERSION_NUM 0x01030400 | 75 | #define VMXNET3_DRIVER_VERSION_NUM 0x01030500 |
76 | 76 | ||
77 | #if defined(CONFIG_PCI_MSI) | 77 | #if defined(CONFIG_PCI_MSI) |
78 | /* RSS only makes sense if MSI-X is supported. */ | 78 | /* RSS only makes sense if MSI-X is supported. */ |
@@ -211,6 +211,7 @@ struct vmxnet3_tq_driver_stats { | |||
211 | 211 | ||
212 | struct vmxnet3_tx_ctx { | 212 | struct vmxnet3_tx_ctx { |
213 | bool ipv4; | 213 | bool ipv4; |
214 | bool ipv6; | ||
214 | u16 mss; | 215 | u16 mss; |
215 | u32 eth_ip_hdr_size; /* only valid for pkts requesting tso or csum | 216 | u32 eth_ip_hdr_size; /* only valid for pkts requesting tso or csum |
216 | * offloading | 217 | * offloading |
diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c index e71a2ce7a448..627443283e1d 100644 --- a/drivers/net/wireless/airo.c +++ b/drivers/net/wireless/airo.c | |||
@@ -2676,7 +2676,7 @@ static void wifi_setup(struct net_device *dev) | |||
2676 | dev->addr_len = ETH_ALEN; | 2676 | dev->addr_len = ETH_ALEN; |
2677 | dev->tx_queue_len = 100; | 2677 | dev->tx_queue_len = 100; |
2678 | 2678 | ||
2679 | memset(dev->broadcast,0xFF, ETH_ALEN); | 2679 | eth_broadcast_addr(dev->broadcast); |
2680 | 2680 | ||
2681 | dev->flags = IFF_BROADCAST|IFF_MULTICAST; | 2681 | dev->flags = IFF_BROADCAST|IFF_MULTICAST; |
2682 | } | 2682 | } |
@@ -3273,7 +3273,7 @@ static void airo_handle_link(struct airo_info *ai) | |||
3273 | } | 3273 | } |
3274 | 3274 | ||
3275 | /* Send event to user space */ | 3275 | /* Send event to user space */ |
3276 | memset(wrqu.ap_addr.sa_data, '\0', ETH_ALEN); | 3276 | eth_zero_addr(wrqu.ap_addr.sa_data); |
3277 | wrqu.ap_addr.sa_family = ARPHRD_ETHER; | 3277 | wrqu.ap_addr.sa_family = ARPHRD_ETHER; |
3278 | wireless_send_event(ai->dev, SIOCGIWAP, &wrqu, NULL); | 3278 | wireless_send_event(ai->dev, SIOCGIWAP, &wrqu, NULL); |
3279 | } | 3279 | } |
diff --git a/drivers/net/wireless/at76c50x-usb.c b/drivers/net/wireless/at76c50x-usb.c index da92bfa76b7c..49219c508963 100644 --- a/drivers/net/wireless/at76c50x-usb.c +++ b/drivers/net/wireless/at76c50x-usb.c | |||
@@ -1166,7 +1166,7 @@ static int at76_start_monitor(struct at76_priv *priv) | |||
1166 | int ret; | 1166 | int ret; |
1167 | 1167 | ||
1168 | memset(&scan, 0, sizeof(struct at76_req_scan)); | 1168 | memset(&scan, 0, sizeof(struct at76_req_scan)); |
1169 | memset(scan.bssid, 0xff, ETH_ALEN); | 1169 | eth_broadcast_addr(scan.bssid); |
1170 | 1170 | ||
1171 | scan.channel = priv->channel; | 1171 | scan.channel = priv->channel; |
1172 | scan.scan_type = SCAN_TYPE_PASSIVE; | 1172 | scan.scan_type = SCAN_TYPE_PASSIVE; |
@@ -1427,7 +1427,7 @@ static int at76_startup_device(struct at76_priv *priv) | |||
1427 | at76_wait_completion(priv, CMD_STARTUP); | 1427 | at76_wait_completion(priv, CMD_STARTUP); |
1428 | 1428 | ||
1429 | /* remove BSSID from previous run */ | 1429 | /* remove BSSID from previous run */ |
1430 | memset(priv->bssid, 0, ETH_ALEN); | 1430 | eth_zero_addr(priv->bssid); |
1431 | 1431 | ||
1432 | priv->scanning = false; | 1432 | priv->scanning = false; |
1433 | 1433 | ||
@@ -1973,7 +1973,7 @@ static int at76_hw_scan(struct ieee80211_hw *hw, | |||
1973 | ieee80211_stop_queues(hw); | 1973 | ieee80211_stop_queues(hw); |
1974 | 1974 | ||
1975 | memset(&scan, 0, sizeof(struct at76_req_scan)); | 1975 | memset(&scan, 0, sizeof(struct at76_req_scan)); |
1976 | memset(scan.bssid, 0xFF, ETH_ALEN); | 1976 | eth_broadcast_addr(scan.bssid); |
1977 | 1977 | ||
1978 | if (req->n_ssids) { | 1978 | if (req->n_ssids) { |
1979 | scan.scan_type = SCAN_TYPE_ACTIVE; | 1979 | scan.scan_type = SCAN_TYPE_ACTIVE; |
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c index d6d2f0f00caa..6c364bb98924 100644 --- a/drivers/net/wireless/ath/ath10k/mac.c +++ b/drivers/net/wireless/ath/ath10k/mac.c | |||
@@ -1182,7 +1182,7 @@ static void ath10k_control_ibss(struct ath10k_vif *arvif, | |||
1182 | if (is_zero_ether_addr(arvif->bssid)) | 1182 | if (is_zero_ether_addr(arvif->bssid)) |
1183 | return; | 1183 | return; |
1184 | 1184 | ||
1185 | memset(arvif->bssid, 0, ETH_ALEN); | 1185 | eth_zero_addr(arvif->bssid); |
1186 | 1186 | ||
1187 | return; | 1187 | return; |
1188 | } | 1188 | } |
diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c index bc9cb356fa69..57a80e89822d 100644 --- a/drivers/net/wireless/ath/ath5k/base.c +++ b/drivers/net/wireless/ath/ath5k/base.c | |||
@@ -528,7 +528,7 @@ ath5k_update_bssid_mask_and_opmode(struct ath5k_hw *ah, | |||
528 | * together with the BSSID mask when matching addresses. | 528 | * together with the BSSID mask when matching addresses. |
529 | */ | 529 | */ |
530 | iter_data.hw_macaddr = common->macaddr; | 530 | iter_data.hw_macaddr = common->macaddr; |
531 | memset(&iter_data.mask, 0xff, ETH_ALEN); | 531 | eth_broadcast_addr(iter_data.mask); |
532 | iter_data.found_active = false; | 532 | iter_data.found_active = false; |
533 | iter_data.need_set_hw_addr = true; | 533 | iter_data.need_set_hw_addr = true; |
534 | iter_data.opmode = NL80211_IFTYPE_UNSPECIFIED; | 534 | iter_data.opmode = NL80211_IFTYPE_UNSPECIFIED; |
diff --git a/drivers/net/wireless/ath/ath6kl/cfg80211.c b/drivers/net/wireless/ath/ath6kl/cfg80211.c index 85da63a67faf..e2978037d858 100644 --- a/drivers/net/wireless/ath/ath6kl/cfg80211.c +++ b/drivers/net/wireless/ath/ath6kl/cfg80211.c | |||
@@ -2033,7 +2033,7 @@ static int ath6kl_wow_sta(struct ath6kl *ar, struct ath6kl_vif *vif) | |||
2033 | int ret; | 2033 | int ret; |
2034 | 2034 | ||
2035 | /* Setup unicast pkt pattern */ | 2035 | /* Setup unicast pkt pattern */ |
2036 | memset(mac_mask, 0xff, ETH_ALEN); | 2036 | eth_broadcast_addr(mac_mask); |
2037 | ret = ath6kl_wmi_add_wow_pattern_cmd(ar->wmi, | 2037 | ret = ath6kl_wmi_add_wow_pattern_cmd(ar->wmi, |
2038 | vif->fw_vif_idx, WOW_LIST_ID, | 2038 | vif->fw_vif_idx, WOW_LIST_ID, |
2039 | ETH_ALEN, 0, ndev->dev_addr, | 2039 | ETH_ALEN, 0, ndev->dev_addr, |
diff --git a/drivers/net/wireless/ath/ath6kl/main.c b/drivers/net/wireless/ath/ath6kl/main.c index b42ba46b5030..1af3fed5a72c 100644 --- a/drivers/net/wireless/ath/ath6kl/main.c +++ b/drivers/net/wireless/ath/ath6kl/main.c | |||
@@ -105,7 +105,7 @@ static void ath6kl_sta_cleanup(struct ath6kl *ar, u8 i) | |||
105 | 105 | ||
106 | memset(&ar->ap_stats.sta[sta->aid - 1], 0, | 106 | memset(&ar->ap_stats.sta[sta->aid - 1], 0, |
107 | sizeof(struct wmi_per_sta_stat)); | 107 | sizeof(struct wmi_per_sta_stat)); |
108 | memset(sta->mac, 0, ETH_ALEN); | 108 | eth_zero_addr(sta->mac); |
109 | memset(sta->wpa_ie, 0, ATH6KL_MAX_IE); | 109 | memset(sta->wpa_ie, 0, ATH6KL_MAX_IE); |
110 | sta->aid = 0; | 110 | sta->aid = 0; |
111 | sta->sta_flags = 0; | 111 | sta->sta_flags = 0; |
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c index 92d5a6c5a225..564923c0df87 100644 --- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c +++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c | |||
@@ -149,7 +149,7 @@ static void ath9k_htc_set_mac_bssid_mask(struct ath9k_htc_priv *priv, | |||
149 | * when matching addresses. | 149 | * when matching addresses. |
150 | */ | 150 | */ |
151 | iter_data.hw_macaddr = NULL; | 151 | iter_data.hw_macaddr = NULL; |
152 | memset(&iter_data.mask, 0xff, ETH_ALEN); | 152 | eth_broadcast_addr(iter_data.mask); |
153 | 153 | ||
154 | if (vif) | 154 | if (vif) |
155 | ath9k_htc_bssid_iter(&iter_data, vif->addr, vif); | 155 | ath9k_htc_bssid_iter(&iter_data, vif->addr, vif); |
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c index 9ede991b8d76..93ed99a72542 100644 --- a/drivers/net/wireless/ath/ath9k/main.c +++ b/drivers/net/wireless/ath/ath9k/main.c | |||
@@ -994,7 +994,7 @@ void ath9k_calculate_iter_data(struct ath_softc *sc, | |||
994 | * BSSID mask when matching addresses. | 994 | * BSSID mask when matching addresses. |
995 | */ | 995 | */ |
996 | memset(iter_data, 0, sizeof(*iter_data)); | 996 | memset(iter_data, 0, sizeof(*iter_data)); |
997 | memset(&iter_data->mask, 0xff, ETH_ALEN); | 997 | eth_broadcast_addr(iter_data->mask); |
998 | iter_data->slottime = ATH9K_SLOT_TIME_9; | 998 | iter_data->slottime = ATH9K_SLOT_TIME_9; |
999 | 999 | ||
1000 | list_for_each_entry(avp, &ctx->vifs, list) | 1000 | list_for_each_entry(avp, &ctx->vifs, list) |
@@ -1139,7 +1139,7 @@ void ath9k_calculate_summary_state(struct ath_softc *sc, | |||
1139 | ctx->primary_sta = iter_data.primary_sta; | 1139 | ctx->primary_sta = iter_data.primary_sta; |
1140 | } else { | 1140 | } else { |
1141 | ctx->primary_sta = NULL; | 1141 | ctx->primary_sta = NULL; |
1142 | memset(common->curbssid, 0, ETH_ALEN); | 1142 | eth_zero_addr(common->curbssid); |
1143 | common->curaid = 0; | 1143 | common->curaid = 0; |
1144 | ath9k_hw_write_associd(sc->sc_ah); | 1144 | ath9k_hw_write_associd(sc->sc_ah); |
1145 | if (ath9k_hw_mci_is_enabled(sc->sc_ah)) | 1145 | if (ath9k_hw_mci_is_enabled(sc->sc_ah)) |
diff --git a/drivers/net/wireless/atmel.c b/drivers/net/wireless/atmel.c index 55db9f03eb2a..6a1f03c271c1 100644 --- a/drivers/net/wireless/atmel.c +++ b/drivers/net/wireless/atmel.c | |||
@@ -1004,7 +1004,7 @@ static void frag_rx_path(struct atmel_private *priv, | |||
1004 | atmel_copy_to_host(priv->dev, (void *)&netcrc, rx_packet_loc + msdu_size, 4); | 1004 | atmel_copy_to_host(priv->dev, (void *)&netcrc, rx_packet_loc + msdu_size, 4); |
1005 | if ((crc ^ 0xffffffff) != netcrc) { | 1005 | if ((crc ^ 0xffffffff) != netcrc) { |
1006 | priv->dev->stats.rx_crc_errors++; | 1006 | priv->dev->stats.rx_crc_errors++; |
1007 | memset(priv->frag_source, 0xff, ETH_ALEN); | 1007 | eth_broadcast_addr(priv->frag_source); |
1008 | } | 1008 | } |
1009 | } | 1009 | } |
1010 | 1010 | ||
@@ -1022,7 +1022,7 @@ static void frag_rx_path(struct atmel_private *priv, | |||
1022 | atmel_copy_to_host(priv->dev, (void *)&netcrc, rx_packet_loc + msdu_size, 4); | 1022 | atmel_copy_to_host(priv->dev, (void *)&netcrc, rx_packet_loc + msdu_size, 4); |
1023 | if ((crc ^ 0xffffffff) != netcrc) { | 1023 | if ((crc ^ 0xffffffff) != netcrc) { |
1024 | priv->dev->stats.rx_crc_errors++; | 1024 | priv->dev->stats.rx_crc_errors++; |
1025 | memset(priv->frag_source, 0xff, ETH_ALEN); | 1025 | eth_broadcast_addr(priv->frag_source); |
1026 | more_frags = 1; /* don't send broken assembly */ | 1026 | more_frags = 1; /* don't send broken assembly */ |
1027 | } | 1027 | } |
1028 | } | 1028 | } |
@@ -1031,7 +1031,7 @@ static void frag_rx_path(struct atmel_private *priv, | |||
1031 | priv->frag_no++; | 1031 | priv->frag_no++; |
1032 | 1032 | ||
1033 | if (!more_frags) { /* last one */ | 1033 | if (!more_frags) { /* last one */ |
1034 | memset(priv->frag_source, 0xff, ETH_ALEN); | 1034 | eth_broadcast_addr(priv->frag_source); |
1035 | if (!(skb = dev_alloc_skb(priv->frag_len + 14))) { | 1035 | if (!(skb = dev_alloc_skb(priv->frag_len + 14))) { |
1036 | priv->dev->stats.rx_dropped++; | 1036 | priv->dev->stats.rx_dropped++; |
1037 | } else { | 1037 | } else { |
@@ -1127,7 +1127,7 @@ static void rx_done_irq(struct atmel_private *priv) | |||
1127 | atmel_copy_to_host(priv->dev, (unsigned char *)&priv->rx_buf, rx_packet_loc + 24, msdu_size); | 1127 | atmel_copy_to_host(priv->dev, (unsigned char *)&priv->rx_buf, rx_packet_loc + 24, msdu_size); |
1128 | 1128 | ||
1129 | /* we use the same buffer for frag reassembly and control packets */ | 1129 | /* we use the same buffer for frag reassembly and control packets */ |
1130 | memset(priv->frag_source, 0xff, ETH_ALEN); | 1130 | eth_broadcast_addr(priv->frag_source); |
1131 | 1131 | ||
1132 | if (priv->do_rx_crc) { | 1132 | if (priv->do_rx_crc) { |
1133 | /* last 4 octets is crc */ | 1133 | /* last 4 octets is crc */ |
@@ -1379,7 +1379,7 @@ static int atmel_close(struct net_device *dev) | |||
1379 | wrqu.data.length = 0; | 1379 | wrqu.data.length = 0; |
1380 | wrqu.data.flags = 0; | 1380 | wrqu.data.flags = 0; |
1381 | wrqu.ap_addr.sa_family = ARPHRD_ETHER; | 1381 | wrqu.ap_addr.sa_family = ARPHRD_ETHER; |
1382 | memset(wrqu.ap_addr.sa_data, 0, ETH_ALEN); | 1382 | eth_zero_addr(wrqu.ap_addr.sa_data); |
1383 | wireless_send_event(priv->dev, SIOCGIWAP, &wrqu, NULL); | 1383 | wireless_send_event(priv->dev, SIOCGIWAP, &wrqu, NULL); |
1384 | } | 1384 | } |
1385 | 1385 | ||
@@ -1555,7 +1555,7 @@ struct net_device *init_atmel_card(unsigned short irq, unsigned long port, | |||
1555 | priv->last_qual = jiffies; | 1555 | priv->last_qual = jiffies; |
1556 | priv->last_beacon_timestamp = 0; | 1556 | priv->last_beacon_timestamp = 0; |
1557 | memset(priv->frag_source, 0xff, sizeof(priv->frag_source)); | 1557 | memset(priv->frag_source, 0xff, sizeof(priv->frag_source)); |
1558 | memset(priv->BSSID, 0, ETH_ALEN); | 1558 | eth_zero_addr(priv->BSSID); |
1559 | priv->CurrentBSSID[0] = 0xFF; /* Initialize to something invalid.... */ | 1559 | priv->CurrentBSSID[0] = 0xFF; /* Initialize to something invalid.... */ |
1560 | priv->station_was_associated = 0; | 1560 | priv->station_was_associated = 0; |
1561 | 1561 | ||
@@ -2760,7 +2760,7 @@ static void atmel_scan(struct atmel_private *priv, int specific_ssid) | |||
2760 | u8 SSID_size; | 2760 | u8 SSID_size; |
2761 | } cmd; | 2761 | } cmd; |
2762 | 2762 | ||
2763 | memset(cmd.BSSID, 0xff, ETH_ALEN); | 2763 | eth_broadcast_addr(cmd.BSSID); |
2764 | 2764 | ||
2765 | if (priv->fast_scan) { | 2765 | if (priv->fast_scan) { |
2766 | cmd.SSID_size = priv->SSID_size; | 2766 | cmd.SSID_size = priv->SSID_size; |
@@ -4049,7 +4049,7 @@ static int reset_atmel_card(struct net_device *dev) | |||
4049 | wrqu.data.length = 0; | 4049 | wrqu.data.length = 0; |
4050 | wrqu.data.flags = 0; | 4050 | wrqu.data.flags = 0; |
4051 | wrqu.ap_addr.sa_family = ARPHRD_ETHER; | 4051 | wrqu.ap_addr.sa_family = ARPHRD_ETHER; |
4052 | memset(wrqu.ap_addr.sa_data, 0, ETH_ALEN); | 4052 | eth_zero_addr(wrqu.ap_addr.sa_data); |
4053 | wireless_send_event(priv->dev, SIOCGIWAP, &wrqu, NULL); | 4053 | wireless_send_event(priv->dev, SIOCGIWAP, &wrqu, NULL); |
4054 | } | 4054 | } |
4055 | 4055 | ||
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c index ccbdb05b28cd..31c7e4d41a9a 100644 --- a/drivers/net/wireless/b43/main.c +++ b/drivers/net/wireless/b43/main.c | |||
@@ -4132,7 +4132,7 @@ static void b43_op_bss_info_changed(struct ieee80211_hw *hw, | |||
4132 | if (conf->bssid) | 4132 | if (conf->bssid) |
4133 | memcpy(wl->bssid, conf->bssid, ETH_ALEN); | 4133 | memcpy(wl->bssid, conf->bssid, ETH_ALEN); |
4134 | else | 4134 | else |
4135 | memset(wl->bssid, 0, ETH_ALEN); | 4135 | eth_zero_addr(wl->bssid); |
4136 | } | 4136 | } |
4137 | 4137 | ||
4138 | if (b43_status(dev) >= B43_STAT_INITIALIZED) { | 4138 | if (b43_status(dev) >= B43_STAT_INITIALIZED) { |
@@ -5051,7 +5051,7 @@ static void b43_op_remove_interface(struct ieee80211_hw *hw, | |||
5051 | wl->operating = false; | 5051 | wl->operating = false; |
5052 | 5052 | ||
5053 | b43_adjust_opmode(dev); | 5053 | b43_adjust_opmode(dev); |
5054 | memset(wl->mac_addr, 0, ETH_ALEN); | 5054 | eth_zero_addr(wl->mac_addr); |
5055 | b43_upload_card_macaddress(dev); | 5055 | b43_upload_card_macaddress(dev); |
5056 | 5056 | ||
5057 | mutex_unlock(&wl->mutex); | 5057 | mutex_unlock(&wl->mutex); |
@@ -5067,8 +5067,8 @@ static int b43_op_start(struct ieee80211_hw *hw) | |||
5067 | /* Kill all old instance specific information to make sure | 5067 | /* Kill all old instance specific information to make sure |
5068 | * the card won't use it in the short timeframe between start | 5068 | * the card won't use it in the short timeframe between start |
5069 | * and mac80211 reconfiguring it. */ | 5069 | * and mac80211 reconfiguring it. */ |
5070 | memset(wl->bssid, 0, ETH_ALEN); | 5070 | eth_zero_addr(wl->bssid); |
5071 | memset(wl->mac_addr, 0, ETH_ALEN); | 5071 | eth_zero_addr(wl->mac_addr); |
5072 | wl->filter_flags = 0; | 5072 | wl->filter_flags = 0; |
5073 | wl->radiotap_enabled = false; | 5073 | wl->radiotap_enabled = false; |
5074 | b43_qos_clear(wl); | 5074 | b43_qos_clear(wl); |
diff --git a/drivers/net/wireless/b43legacy/main.c b/drivers/net/wireless/b43legacy/main.c index 4e58c0069830..c77b7f59505c 100644 --- a/drivers/net/wireless/b43legacy/main.c +++ b/drivers/net/wireless/b43legacy/main.c | |||
@@ -2866,7 +2866,7 @@ static void b43legacy_op_bss_info_changed(struct ieee80211_hw *hw, | |||
2866 | if (conf->bssid) | 2866 | if (conf->bssid) |
2867 | memcpy(wl->bssid, conf->bssid, ETH_ALEN); | 2867 | memcpy(wl->bssid, conf->bssid, ETH_ALEN); |
2868 | else | 2868 | else |
2869 | memset(wl->bssid, 0, ETH_ALEN); | 2869 | eth_zero_addr(wl->bssid); |
2870 | } | 2870 | } |
2871 | 2871 | ||
2872 | if (b43legacy_status(dev) >= B43legacy_STAT_INITIALIZED) { | 2872 | if (b43legacy_status(dev) >= B43legacy_STAT_INITIALIZED) { |
@@ -3470,7 +3470,7 @@ static void b43legacy_op_remove_interface(struct ieee80211_hw *hw, | |||
3470 | 3470 | ||
3471 | spin_lock_irqsave(&wl->irq_lock, flags); | 3471 | spin_lock_irqsave(&wl->irq_lock, flags); |
3472 | b43legacy_adjust_opmode(dev); | 3472 | b43legacy_adjust_opmode(dev); |
3473 | memset(wl->mac_addr, 0, ETH_ALEN); | 3473 | eth_zero_addr(wl->mac_addr); |
3474 | b43legacy_upload_card_macaddress(dev); | 3474 | b43legacy_upload_card_macaddress(dev); |
3475 | spin_unlock_irqrestore(&wl->irq_lock, flags); | 3475 | spin_unlock_irqrestore(&wl->irq_lock, flags); |
3476 | 3476 | ||
@@ -3487,8 +3487,8 @@ static int b43legacy_op_start(struct ieee80211_hw *hw) | |||
3487 | /* Kill all old instance specific information to make sure | 3487 | /* Kill all old instance specific information to make sure |
3488 | * the card won't use it in the short timeframe between start | 3488 | * the card won't use it in the short timeframe between start |
3489 | * and mac80211 reconfiguring it. */ | 3489 | * and mac80211 reconfiguring it. */ |
3490 | memset(wl->bssid, 0, ETH_ALEN); | 3490 | eth_zero_addr(wl->bssid); |
3491 | memset(wl->mac_addr, 0, ETH_ALEN); | 3491 | eth_zero_addr(wl->mac_addr); |
3492 | wl->filter_flags = 0; | 3492 | wl->filter_flags = 0; |
3493 | wl->beacon0_uploaded = false; | 3493 | wl->beacon0_uploaded = false; |
3494 | wl->beacon1_uploaded = false; | 3494 | wl->beacon1_uploaded = false; |
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.c index b59b8c6c42ab..06727a61b438 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.c | |||
@@ -700,7 +700,7 @@ s32 brcmf_notify_escan_complete(struct brcmf_cfg80211_info *cfg, | |||
700 | /* Do a scan abort to stop the driver's scan engine */ | 700 | /* Do a scan abort to stop the driver's scan engine */ |
701 | brcmf_dbg(SCAN, "ABORT scan in firmware\n"); | 701 | brcmf_dbg(SCAN, "ABORT scan in firmware\n"); |
702 | memset(¶ms_le, 0, sizeof(params_le)); | 702 | memset(¶ms_le, 0, sizeof(params_le)); |
703 | memset(params_le.bssid, 0xFF, ETH_ALEN); | 703 | eth_broadcast_addr(params_le.bssid); |
704 | params_le.bss_type = DOT11_BSSTYPE_ANY; | 704 | params_le.bss_type = DOT11_BSSTYPE_ANY; |
705 | params_le.scan_type = 0; | 705 | params_le.scan_type = 0; |
706 | params_le.channel_num = cpu_to_le32(1); | 706 | params_le.channel_num = cpu_to_le32(1); |
@@ -866,7 +866,7 @@ static void brcmf_escan_prep(struct brcmf_cfg80211_info *cfg, | |||
866 | char *ptr; | 866 | char *ptr; |
867 | struct brcmf_ssid_le ssid_le; | 867 | struct brcmf_ssid_le ssid_le; |
868 | 868 | ||
869 | memset(params_le->bssid, 0xFF, ETH_ALEN); | 869 | eth_broadcast_addr(params_le->bssid); |
870 | params_le->bss_type = DOT11_BSSTYPE_ANY; | 870 | params_le->bss_type = DOT11_BSSTYPE_ANY; |
871 | params_le->scan_type = 0; | 871 | params_le->scan_type = 0; |
872 | params_le->channel_num = 0; | 872 | params_le->channel_num = 0; |
@@ -1375,8 +1375,8 @@ brcmf_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *ndev, | |||
1375 | BRCMF_ASSOC_PARAMS_FIXED_SIZE; | 1375 | BRCMF_ASSOC_PARAMS_FIXED_SIZE; |
1376 | memcpy(profile->bssid, params->bssid, ETH_ALEN); | 1376 | memcpy(profile->bssid, params->bssid, ETH_ALEN); |
1377 | } else { | 1377 | } else { |
1378 | memset(join_params.params_le.bssid, 0xFF, ETH_ALEN); | 1378 | eth_broadcast_addr(join_params.params_le.bssid); |
1379 | memset(profile->bssid, 0, ETH_ALEN); | 1379 | eth_zero_addr(profile->bssid); |
1380 | } | 1380 | } |
1381 | 1381 | ||
1382 | /* Channel */ | 1382 | /* Channel */ |
@@ -1850,7 +1850,7 @@ brcmf_cfg80211_connect(struct wiphy *wiphy, struct net_device *ndev, | |||
1850 | if (sme->bssid) | 1850 | if (sme->bssid) |
1851 | memcpy(&ext_join_params->assoc_le.bssid, sme->bssid, ETH_ALEN); | 1851 | memcpy(&ext_join_params->assoc_le.bssid, sme->bssid, ETH_ALEN); |
1852 | else | 1852 | else |
1853 | memset(&ext_join_params->assoc_le.bssid, 0xFF, ETH_ALEN); | 1853 | eth_broadcast_addr(ext_join_params->assoc_le.bssid); |
1854 | 1854 | ||
1855 | if (cfg->channel) { | 1855 | if (cfg->channel) { |
1856 | ext_join_params->assoc_le.chanspec_num = cpu_to_le32(1); | 1856 | ext_join_params->assoc_le.chanspec_num = cpu_to_le32(1); |
@@ -1895,7 +1895,7 @@ brcmf_cfg80211_connect(struct wiphy *wiphy, struct net_device *ndev, | |||
1895 | if (sme->bssid) | 1895 | if (sme->bssid) |
1896 | memcpy(join_params.params_le.bssid, sme->bssid, ETH_ALEN); | 1896 | memcpy(join_params.params_le.bssid, sme->bssid, ETH_ALEN); |
1897 | else | 1897 | else |
1898 | memset(join_params.params_le.bssid, 0xFF, ETH_ALEN); | 1898 | eth_broadcast_addr(join_params.params_le.bssid); |
1899 | 1899 | ||
1900 | if (cfg->channel) { | 1900 | if (cfg->channel) { |
1901 | join_params.params_le.chanspec_list[0] = cpu_to_le16(chanspec); | 1901 | join_params.params_le.chanspec_list[0] = cpu_to_le16(chanspec); |
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/flowring.c b/drivers/net/wireless/brcm80211/brcmfmac/flowring.c index 910fbb561469..eb1325371d3a 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/flowring.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/flowring.c | |||
@@ -236,7 +236,7 @@ void brcmf_flowring_delete(struct brcmf_flowring *flow, u8 flowid) | |||
236 | brcmf_flowring_block(flow, flowid, false); | 236 | brcmf_flowring_block(flow, flowid, false); |
237 | hash_idx = ring->hash_id; | 237 | hash_idx = ring->hash_id; |
238 | flow->hash[hash_idx].ifidx = BRCMF_FLOWRING_INVALID_IFIDX; | 238 | flow->hash[hash_idx].ifidx = BRCMF_FLOWRING_INVALID_IFIDX; |
239 | memset(flow->hash[hash_idx].mac, 0, ETH_ALEN); | 239 | eth_zero_addr(flow->hash[hash_idx].mac); |
240 | flow->rings[flowid] = NULL; | 240 | flow->rings[flowid] = NULL; |
241 | 241 | ||
242 | skb = skb_dequeue(&ring->skblist); | 242 | skb = skb_dequeue(&ring->skblist); |
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/p2p.c b/drivers/net/wireless/brcm80211/brcmfmac/p2p.c index effb48ebd864..98d82ec52de1 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/p2p.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/p2p.c | |||
@@ -697,7 +697,7 @@ static s32 brcmf_p2p_escan(struct brcmf_p2p_info *p2p, u32 num_chans, | |||
697 | else | 697 | else |
698 | sparams->scan_type = 1; | 698 | sparams->scan_type = 1; |
699 | 699 | ||
700 | memset(&sparams->bssid, 0xFF, ETH_ALEN); | 700 | eth_broadcast_addr(sparams->bssid); |
701 | if (ssid.SSID_len) | 701 | if (ssid.SSID_len) |
702 | memcpy(sparams->ssid_le.SSID, ssid.SSID, ssid.SSID_len); | 702 | memcpy(sparams->ssid_le.SSID, ssid.SSID, ssid.SSID_len); |
703 | sparams->ssid_le.SSID_len = cpu_to_le32(ssid.SSID_len); | 703 | sparams->ssid_le.SSID_len = cpu_to_le32(ssid.SSID_len); |
diff --git a/drivers/net/wireless/cw1200/sta.c b/drivers/net/wireless/cw1200/sta.c index 4a47c7f8a246..89bc18cd6700 100644 --- a/drivers/net/wireless/cw1200/sta.c +++ b/drivers/net/wireless/cw1200/sta.c | |||
@@ -293,7 +293,7 @@ void cw1200_remove_interface(struct ieee80211_hw *dev, | |||
293 | } | 293 | } |
294 | priv->vif = NULL; | 294 | priv->vif = NULL; |
295 | priv->mode = NL80211_IFTYPE_MONITOR; | 295 | priv->mode = NL80211_IFTYPE_MONITOR; |
296 | memset(priv->mac_addr, 0, ETH_ALEN); | 296 | eth_zero_addr(priv->mac_addr); |
297 | memset(&priv->p2p_ps_modeinfo, 0, sizeof(priv->p2p_ps_modeinfo)); | 297 | memset(&priv->p2p_ps_modeinfo, 0, sizeof(priv->p2p_ps_modeinfo)); |
298 | cw1200_free_keys(priv); | 298 | cw1200_free_keys(priv); |
299 | cw1200_setup_mac(priv); | 299 | cw1200_setup_mac(priv); |
diff --git a/drivers/net/wireless/cw1200/txrx.c b/drivers/net/wireless/cw1200/txrx.c index 0bd541175ecd..d28bd49cb5fd 100644 --- a/drivers/net/wireless/cw1200/txrx.c +++ b/drivers/net/wireless/cw1200/txrx.c | |||
@@ -1429,7 +1429,7 @@ void cw1200_link_id_gc_work(struct work_struct *work) | |||
1429 | priv->link_id_map &= ~mask; | 1429 | priv->link_id_map &= ~mask; |
1430 | priv->sta_asleep_mask &= ~mask; | 1430 | priv->sta_asleep_mask &= ~mask; |
1431 | priv->pspoll_mask &= ~mask; | 1431 | priv->pspoll_mask &= ~mask; |
1432 | memset(map_link.mac_addr, 0, ETH_ALEN); | 1432 | eth_zero_addr(map_link.mac_addr); |
1433 | spin_unlock_bh(&priv->ps_state_lock); | 1433 | spin_unlock_bh(&priv->ps_state_lock); |
1434 | reset.link_id = i + 1; | 1434 | reset.link_id = i + 1; |
1435 | wsm_reset(priv, &reset); | 1435 | wsm_reset(priv, &reset); |
diff --git a/drivers/net/wireless/hostap/hostap_80211_tx.c b/drivers/net/wireless/hostap/hostap_80211_tx.c index 8bde77689469..055e11d353ca 100644 --- a/drivers/net/wireless/hostap/hostap_80211_tx.c +++ b/drivers/net/wireless/hostap/hostap_80211_tx.c | |||
@@ -174,8 +174,8 @@ netdev_tx_t hostap_data_start_xmit(struct sk_buff *skb, | |||
174 | /* send broadcast and multicast frames to broadcast RA, if | 174 | /* send broadcast and multicast frames to broadcast RA, if |
175 | * configured; otherwise, use unicast RA of the WDS link */ | 175 | * configured; otherwise, use unicast RA of the WDS link */ |
176 | if ((local->wds_type & HOSTAP_WDS_BROADCAST_RA) && | 176 | if ((local->wds_type & HOSTAP_WDS_BROADCAST_RA) && |
177 | skb->data[0] & 0x01) | 177 | is_multicast_ether_addr(skb->data)) |
178 | memset(&hdr.addr1, 0xff, ETH_ALEN); | 178 | eth_broadcast_addr(hdr.addr1); |
179 | else if (iface->type == HOSTAP_INTERFACE_WDS) | 179 | else if (iface->type == HOSTAP_INTERFACE_WDS) |
180 | memcpy(&hdr.addr1, iface->u.wds.remote_addr, | 180 | memcpy(&hdr.addr1, iface->u.wds.remote_addr, |
181 | ETH_ALEN); | 181 | ETH_ALEN); |
diff --git a/drivers/net/wireless/hostap/hostap_ap.c b/drivers/net/wireless/hostap/hostap_ap.c index fd8d83dd4f62..c995ace153ee 100644 --- a/drivers/net/wireless/hostap/hostap_ap.c +++ b/drivers/net/wireless/hostap/hostap_ap.c | |||
@@ -309,7 +309,7 @@ void hostap_deauth_all_stas(struct net_device *dev, struct ap_data *ap, | |||
309 | int i; | 309 | int i; |
310 | 310 | ||
311 | PDEBUG(DEBUG_AP, "%s: Deauthenticate all stations\n", dev->name); | 311 | PDEBUG(DEBUG_AP, "%s: Deauthenticate all stations\n", dev->name); |
312 | memset(addr, 0xff, ETH_ALEN); | 312 | eth_broadcast_addr(addr); |
313 | 313 | ||
314 | resp = cpu_to_le16(WLAN_REASON_PREV_AUTH_NOT_VALID); | 314 | resp = cpu_to_le16(WLAN_REASON_PREV_AUTH_NOT_VALID); |
315 | 315 | ||
@@ -1015,8 +1015,8 @@ static void prism2_send_mgmt(struct net_device *dev, | |||
1015 | memcpy(hdr->addr3, dev->dev_addr, ETH_ALEN); /* SA */ | 1015 | memcpy(hdr->addr3, dev->dev_addr, ETH_ALEN); /* SA */ |
1016 | } else if (ieee80211_is_ctl(hdr->frame_control)) { | 1016 | } else if (ieee80211_is_ctl(hdr->frame_control)) { |
1017 | /* control:ACK does not have addr2 or addr3 */ | 1017 | /* control:ACK does not have addr2 or addr3 */ |
1018 | memset(hdr->addr2, 0, ETH_ALEN); | 1018 | eth_zero_addr(hdr->addr2); |
1019 | memset(hdr->addr3, 0, ETH_ALEN); | 1019 | eth_zero_addr(hdr->addr3); |
1020 | } else { | 1020 | } else { |
1021 | memcpy(hdr->addr2, dev->dev_addr, ETH_ALEN); /* SA */ | 1021 | memcpy(hdr->addr2, dev->dev_addr, ETH_ALEN); /* SA */ |
1022 | memcpy(hdr->addr3, dev->dev_addr, ETH_ALEN); /* BSSID */ | 1022 | memcpy(hdr->addr3, dev->dev_addr, ETH_ALEN); /* BSSID */ |
@@ -1601,7 +1601,7 @@ static void handle_assoc(local_info_t *local, struct sk_buff *skb, | |||
1601 | memcpy(prev_ap, pos, ETH_ALEN); | 1601 | memcpy(prev_ap, pos, ETH_ALEN); |
1602 | pos++; pos++; pos++; left -= 6; | 1602 | pos++; pos++; pos++; left -= 6; |
1603 | } else | 1603 | } else |
1604 | memset(prev_ap, 0, ETH_ALEN); | 1604 | eth_zero_addr(prev_ap); |
1605 | 1605 | ||
1606 | if (left >= 2) { | 1606 | if (left >= 2) { |
1607 | unsigned int ileft; | 1607 | unsigned int ileft; |
diff --git a/drivers/net/wireless/hostap/hostap_info.c b/drivers/net/wireless/hostap/hostap_info.c index de7c4ffec309..7635ac4f6679 100644 --- a/drivers/net/wireless/hostap/hostap_info.c +++ b/drivers/net/wireless/hostap/hostap_info.c | |||
@@ -442,7 +442,7 @@ static void handle_info_queue_linkstatus(local_info_t *local) | |||
442 | } else { | 442 | } else { |
443 | netif_carrier_off(local->dev); | 443 | netif_carrier_off(local->dev); |
444 | netif_carrier_off(local->ddev); | 444 | netif_carrier_off(local->ddev); |
445 | memset(wrqu.ap_addr.sa_data, 0, ETH_ALEN); | 445 | eth_zero_addr(wrqu.ap_addr.sa_data); |
446 | } | 446 | } |
447 | wrqu.ap_addr.sa_family = ARPHRD_ETHER; | 447 | wrqu.ap_addr.sa_family = ARPHRD_ETHER; |
448 | 448 | ||
diff --git a/drivers/net/wireless/hostap/hostap_main.c b/drivers/net/wireless/hostap/hostap_main.c index 52919ad42726..01de1a3bf94e 100644 --- a/drivers/net/wireless/hostap/hostap_main.c +++ b/drivers/net/wireless/hostap/hostap_main.c | |||
@@ -224,7 +224,7 @@ int prism2_wds_del(local_info_t *local, u8 *remote_addr, | |||
224 | 224 | ||
225 | if (selected) { | 225 | if (selected) { |
226 | if (do_not_remove) | 226 | if (do_not_remove) |
227 | memset(selected->u.wds.remote_addr, 0, ETH_ALEN); | 227 | eth_zero_addr(selected->u.wds.remote_addr); |
228 | else { | 228 | else { |
229 | hostap_remove_interface(selected->dev, rtnl_locked, 0); | 229 | hostap_remove_interface(selected->dev, rtnl_locked, 0); |
230 | local->wds_connections--; | 230 | local->wds_connections--; |
@@ -798,7 +798,6 @@ static void prism2_tx_timeout(struct net_device *dev) | |||
798 | 798 | ||
799 | const struct header_ops hostap_80211_ops = { | 799 | const struct header_ops hostap_80211_ops = { |
800 | .create = eth_header, | 800 | .create = eth_header, |
801 | .rebuild = eth_rebuild_header, | ||
802 | .cache = eth_header_cache, | 801 | .cache = eth_header_cache, |
803 | .cache_update = eth_header_cache_update, | 802 | .cache_update = eth_header_cache_update, |
804 | .parse = hostap_80211_header_parse, | 803 | .parse = hostap_80211_header_parse, |
@@ -1088,7 +1087,7 @@ int prism2_sta_deauth(local_info_t *local, u16 reason) | |||
1088 | 1087 | ||
1089 | ret = prism2_sta_send_mgmt(local, local->bssid, IEEE80211_STYPE_DEAUTH, | 1088 | ret = prism2_sta_send_mgmt(local, local->bssid, IEEE80211_STYPE_DEAUTH, |
1090 | (u8 *) &val, 2); | 1089 | (u8 *) &val, 2); |
1091 | memset(wrqu.ap_addr.sa_data, 0, ETH_ALEN); | 1090 | eth_zero_addr(wrqu.ap_addr.sa_data); |
1092 | wireless_send_event(local->dev, SIOCGIWAP, &wrqu, NULL); | 1091 | wireless_send_event(local->dev, SIOCGIWAP, &wrqu, NULL); |
1093 | return ret; | 1092 | return ret; |
1094 | } | 1093 | } |
diff --git a/drivers/net/wireless/hostap/hostap_wlan.h b/drivers/net/wireless/hostap/hostap_wlan.h index 57904015380f..ca25283e1c92 100644 --- a/drivers/net/wireless/hostap/hostap_wlan.h +++ b/drivers/net/wireless/hostap/hostap_wlan.h | |||
@@ -4,6 +4,7 @@ | |||
4 | #include <linux/interrupt.h> | 4 | #include <linux/interrupt.h> |
5 | #include <linux/wireless.h> | 5 | #include <linux/wireless.h> |
6 | #include <linux/netdevice.h> | 6 | #include <linux/netdevice.h> |
7 | #include <linux/etherdevice.h> | ||
7 | #include <linux/mutex.h> | 8 | #include <linux/mutex.h> |
8 | #include <net/iw_handler.h> | 9 | #include <net/iw_handler.h> |
9 | #include <net/ieee80211_radiotap.h> | 10 | #include <net/ieee80211_radiotap.h> |
@@ -85,16 +86,16 @@ struct hfa384x_rx_frame { | |||
85 | /* 802.11 */ | 86 | /* 802.11 */ |
86 | __le16 frame_control; | 87 | __le16 frame_control; |
87 | __le16 duration_id; | 88 | __le16 duration_id; |
88 | u8 addr1[6]; | 89 | u8 addr1[ETH_ALEN]; |
89 | u8 addr2[6]; | 90 | u8 addr2[ETH_ALEN]; |
90 | u8 addr3[6]; | 91 | u8 addr3[ETH_ALEN]; |
91 | __le16 seq_ctrl; | 92 | __le16 seq_ctrl; |
92 | u8 addr4[6]; | 93 | u8 addr4[ETH_ALEN]; |
93 | __le16 data_len; | 94 | __le16 data_len; |
94 | 95 | ||
95 | /* 802.3 */ | 96 | /* 802.3 */ |
96 | u8 dst_addr[6]; | 97 | u8 dst_addr[ETH_ALEN]; |
97 | u8 src_addr[6]; | 98 | u8 src_addr[ETH_ALEN]; |
98 | __be16 len; | 99 | __be16 len; |
99 | 100 | ||
100 | /* followed by frame data; max 2304 bytes */ | 101 | /* followed by frame data; max 2304 bytes */ |
@@ -114,16 +115,16 @@ struct hfa384x_tx_frame { | |||
114 | /* 802.11 */ | 115 | /* 802.11 */ |
115 | __le16 frame_control; /* parts not used */ | 116 | __le16 frame_control; /* parts not used */ |
116 | __le16 duration_id; | 117 | __le16 duration_id; |
117 | u8 addr1[6]; | 118 | u8 addr1[ETH_ALEN]; |
118 | u8 addr2[6]; /* filled by firmware */ | 119 | u8 addr2[ETH_ALEN]; /* filled by firmware */ |
119 | u8 addr3[6]; | 120 | u8 addr3[ETH_ALEN]; |
120 | __le16 seq_ctrl; /* filled by firmware */ | 121 | __le16 seq_ctrl; /* filled by firmware */ |
121 | u8 addr4[6]; | 122 | u8 addr4[ETH_ALEN]; |
122 | __le16 data_len; | 123 | __le16 data_len; |
123 | 124 | ||
124 | /* 802.3 */ | 125 | /* 802.3 */ |
125 | u8 dst_addr[6]; | 126 | u8 dst_addr[ETH_ALEN]; |
126 | u8 src_addr[6]; | 127 | u8 src_addr[ETH_ALEN]; |
127 | __be16 len; | 128 | __be16 len; |
128 | 129 | ||
129 | /* followed by frame data; max 2304 bytes */ | 130 | /* followed by frame data; max 2304 bytes */ |
@@ -156,7 +157,7 @@ struct hfa384x_hostscan_request { | |||
156 | } __packed; | 157 | } __packed; |
157 | 158 | ||
158 | struct hfa384x_join_request { | 159 | struct hfa384x_join_request { |
159 | u8 bssid[6]; | 160 | u8 bssid[ETH_ALEN]; |
160 | __le16 channel; | 161 | __le16 channel; |
161 | } __packed; | 162 | } __packed; |
162 | 163 | ||
@@ -228,7 +229,7 @@ struct hfa384x_scan_result { | |||
228 | __le16 chid; | 229 | __le16 chid; |
229 | __le16 anl; | 230 | __le16 anl; |
230 | __le16 sl; | 231 | __le16 sl; |
231 | u8 bssid[6]; | 232 | u8 bssid[ETH_ALEN]; |
232 | __le16 beacon_interval; | 233 | __le16 beacon_interval; |
233 | __le16 capability; | 234 | __le16 capability; |
234 | __le16 ssid_len; | 235 | __le16 ssid_len; |
@@ -241,7 +242,7 @@ struct hfa384x_hostscan_result { | |||
241 | __le16 chid; | 242 | __le16 chid; |
242 | __le16 anl; | 243 | __le16 anl; |
243 | __le16 sl; | 244 | __le16 sl; |
244 | u8 bssid[6]; | 245 | u8 bssid[ETH_ALEN]; |
245 | __le16 beacon_interval; | 246 | __le16 beacon_interval; |
246 | __le16 capability; | 247 | __le16 capability; |
247 | __le16 ssid_len; | 248 | __le16 ssid_len; |
@@ -824,7 +825,7 @@ struct local_info { | |||
824 | #define PRISM2_INFO_PENDING_SCANRESULTS 1 | 825 | #define PRISM2_INFO_PENDING_SCANRESULTS 1 |
825 | int prev_link_status; /* previous received LinkStatus info */ | 826 | int prev_link_status; /* previous received LinkStatus info */ |
826 | int prev_linkstatus_connected; | 827 | int prev_linkstatus_connected; |
827 | u8 preferred_ap[6]; /* use this AP if possible */ | 828 | u8 preferred_ap[ETH_ALEN]; /* use this AP if possible */ |
828 | 829 | ||
829 | #ifdef PRISM2_CALLBACK | 830 | #ifdef PRISM2_CALLBACK |
830 | void *callback_data; /* Can be used in callbacks; e.g., allocate | 831 | void *callback_data; /* Can be used in callbacks; e.g., allocate |
diff --git a/drivers/net/wireless/ipw2x00/ipw2100.c b/drivers/net/wireless/ipw2x00/ipw2100.c index 6fabea0309dd..08eb229e7816 100644 --- a/drivers/net/wireless/ipw2x00/ipw2100.c +++ b/drivers/net/wireless/ipw2x00/ipw2100.c | |||
@@ -2147,8 +2147,8 @@ static void isr_indicate_association_lost(struct ipw2100_priv *priv, u32 status) | |||
2147 | return; | 2147 | return; |
2148 | } | 2148 | } |
2149 | 2149 | ||
2150 | memset(priv->bssid, 0, ETH_ALEN); | 2150 | eth_zero_addr(priv->bssid); |
2151 | memset(priv->ieee->bssid, 0, ETH_ALEN); | 2151 | eth_zero_addr(priv->ieee->bssid); |
2152 | 2152 | ||
2153 | netif_carrier_off(priv->net_dev); | 2153 | netif_carrier_off(priv->net_dev); |
2154 | netif_stop_queue(priv->net_dev); | 2154 | netif_stop_queue(priv->net_dev); |
@@ -6956,7 +6956,7 @@ static int ipw2100_wx_get_wap(struct net_device *dev, | |||
6956 | wrqu->ap_addr.sa_family = ARPHRD_ETHER; | 6956 | wrqu->ap_addr.sa_family = ARPHRD_ETHER; |
6957 | memcpy(wrqu->ap_addr.sa_data, priv->bssid, ETH_ALEN); | 6957 | memcpy(wrqu->ap_addr.sa_data, priv->bssid, ETH_ALEN); |
6958 | } else | 6958 | } else |
6959 | memset(wrqu->ap_addr.sa_data, 0, ETH_ALEN); | 6959 | eth_zero_addr(wrqu->ap_addr.sa_data); |
6960 | 6960 | ||
6961 | IPW_DEBUG_WX("Getting WAP BSSID: %pM\n", wrqu->ap_addr.sa_data); | 6961 | IPW_DEBUG_WX("Getting WAP BSSID: %pM\n", wrqu->ap_addr.sa_data); |
6962 | return 0; | 6962 | return 0; |
@@ -8300,7 +8300,7 @@ static void ipw2100_wx_event_work(struct work_struct *work) | |||
8300 | priv->status & STATUS_RF_KILL_MASK || | 8300 | priv->status & STATUS_RF_KILL_MASK || |
8301 | ipw2100_get_ordinal(priv, IPW_ORD_STAT_ASSN_AP_BSSID, | 8301 | ipw2100_get_ordinal(priv, IPW_ORD_STAT_ASSN_AP_BSSID, |
8302 | &priv->bssid, &len)) { | 8302 | &priv->bssid, &len)) { |
8303 | memset(wrqu.ap_addr.sa_data, 0, ETH_ALEN); | 8303 | eth_zero_addr(wrqu.ap_addr.sa_data); |
8304 | } else { | 8304 | } else { |
8305 | /* We now have the BSSID, so can finish setting to the full | 8305 | /* We now have the BSSID, so can finish setting to the full |
8306 | * associated state */ | 8306 | * associated state */ |
diff --git a/drivers/net/wireless/ipw2x00/ipw2200.c b/drivers/net/wireless/ipw2x00/ipw2200.c index 67cad9b05ad8..39f3e6f5cbcd 100644 --- a/drivers/net/wireless/ipw2x00/ipw2200.c +++ b/drivers/net/wireless/ipw2x00/ipw2200.c | |||
@@ -1964,7 +1964,7 @@ static void notify_wx_assoc_event(struct ipw_priv *priv) | |||
1964 | if (priv->status & STATUS_ASSOCIATED) | 1964 | if (priv->status & STATUS_ASSOCIATED) |
1965 | memcpy(wrqu.ap_addr.sa_data, priv->bssid, ETH_ALEN); | 1965 | memcpy(wrqu.ap_addr.sa_data, priv->bssid, ETH_ALEN); |
1966 | else | 1966 | else |
1967 | memset(wrqu.ap_addr.sa_data, 0, ETH_ALEN); | 1967 | eth_zero_addr(wrqu.ap_addr.sa_data); |
1968 | wireless_send_event(priv->net_dev, SIOCGIWAP, &wrqu, NULL); | 1968 | wireless_send_event(priv->net_dev, SIOCGIWAP, &wrqu, NULL); |
1969 | } | 1969 | } |
1970 | 1970 | ||
@@ -7400,7 +7400,7 @@ static int ipw_associate_network(struct ipw_priv *priv, | |||
7400 | memcpy(priv->assoc_request.bssid, network->bssid, ETH_ALEN); | 7400 | memcpy(priv->assoc_request.bssid, network->bssid, ETH_ALEN); |
7401 | 7401 | ||
7402 | if (priv->ieee->iw_mode == IW_MODE_ADHOC) { | 7402 | if (priv->ieee->iw_mode == IW_MODE_ADHOC) { |
7403 | memset(&priv->assoc_request.dest, 0xFF, ETH_ALEN); | 7403 | eth_broadcast_addr(priv->assoc_request.dest); |
7404 | priv->assoc_request.atim_window = cpu_to_le16(network->atim_window); | 7404 | priv->assoc_request.atim_window = cpu_to_le16(network->atim_window); |
7405 | } else { | 7405 | } else { |
7406 | memcpy(priv->assoc_request.dest, network->bssid, ETH_ALEN); | 7406 | memcpy(priv->assoc_request.dest, network->bssid, ETH_ALEN); |
@@ -8986,7 +8986,7 @@ static int ipw_wx_get_wap(struct net_device *dev, | |||
8986 | wrqu->ap_addr.sa_family = ARPHRD_ETHER; | 8986 | wrqu->ap_addr.sa_family = ARPHRD_ETHER; |
8987 | memcpy(wrqu->ap_addr.sa_data, priv->bssid, ETH_ALEN); | 8987 | memcpy(wrqu->ap_addr.sa_data, priv->bssid, ETH_ALEN); |
8988 | } else | 8988 | } else |
8989 | memset(wrqu->ap_addr.sa_data, 0, ETH_ALEN); | 8989 | eth_zero_addr(wrqu->ap_addr.sa_data); |
8990 | 8990 | ||
8991 | IPW_DEBUG_WX("Getting WAP BSSID: %pM\n", | 8991 | IPW_DEBUG_WX("Getting WAP BSSID: %pM\n", |
8992 | wrqu->ap_addr.sa_data); | 8992 | wrqu->ap_addr.sa_data); |
diff --git a/drivers/net/wireless/iwlegacy/common.c b/drivers/net/wireless/iwlegacy/common.c index 2c4fa49686ef..887114582583 100644 --- a/drivers/net/wireless/iwlegacy/common.c +++ b/drivers/net/wireless/iwlegacy/common.c | |||
@@ -4634,7 +4634,7 @@ il_mac_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) | |||
4634 | il->vif = NULL; | 4634 | il->vif = NULL; |
4635 | il->iw_mode = NL80211_IFTYPE_UNSPECIFIED; | 4635 | il->iw_mode = NL80211_IFTYPE_UNSPECIFIED; |
4636 | il_teardown_interface(il, vif); | 4636 | il_teardown_interface(il, vif); |
4637 | memset(il->bssid, 0, ETH_ALEN); | 4637 | eth_zero_addr(il->bssid); |
4638 | 4638 | ||
4639 | D_MAC80211("leave\n"); | 4639 | D_MAC80211("leave\n"); |
4640 | mutex_unlock(&il->mutex); | 4640 | mutex_unlock(&il->mutex); |
diff --git a/drivers/net/wireless/iwlwifi/mvm/power.c b/drivers/net/wireless/iwlwifi/mvm/power.c index 2620dd0c45f9..33bbdde0046f 100644 --- a/drivers/net/wireless/iwlwifi/mvm/power.c +++ b/drivers/net/wireless/iwlwifi/mvm/power.c | |||
@@ -66,6 +66,7 @@ | |||
66 | #include <linux/kernel.h> | 66 | #include <linux/kernel.h> |
67 | #include <linux/module.h> | 67 | #include <linux/module.h> |
68 | #include <linux/slab.h> | 68 | #include <linux/slab.h> |
69 | #include <linux/etherdevice.h> | ||
69 | 70 | ||
70 | #include <net/mac80211.h> | 71 | #include <net/mac80211.h> |
71 | 72 | ||
@@ -491,7 +492,7 @@ void iwl_mvm_power_vif_assoc(struct iwl_mvm *mvm, struct ieee80211_vif *vif) | |||
491 | 492 | ||
492 | if (memcmp(vif->bss_conf.bssid, mvmvif->uapsd_misbehaving_bssid, | 493 | if (memcmp(vif->bss_conf.bssid, mvmvif->uapsd_misbehaving_bssid, |
493 | ETH_ALEN)) | 494 | ETH_ALEN)) |
494 | memset(mvmvif->uapsd_misbehaving_bssid, 0, ETH_ALEN); | 495 | eth_zero_addr(mvmvif->uapsd_misbehaving_bssid); |
495 | } | 496 | } |
496 | 497 | ||
497 | static void iwl_mvm_power_uapsd_misbehav_ap_iterator(void *_data, u8 *mac, | 498 | static void iwl_mvm_power_uapsd_misbehav_ap_iterator(void *_data, u8 *mac, |
diff --git a/drivers/net/wireless/libertas/main.c b/drivers/net/wireless/libertas/main.c index 569b64ecc607..8079560f4965 100644 --- a/drivers/net/wireless/libertas/main.c +++ b/drivers/net/wireless/libertas/main.c | |||
@@ -667,7 +667,7 @@ static int lbs_setup_firmware(struct lbs_private *priv) | |||
667 | lbs_deb_enter(LBS_DEB_FW); | 667 | lbs_deb_enter(LBS_DEB_FW); |
668 | 668 | ||
669 | /* Read MAC address from firmware */ | 669 | /* Read MAC address from firmware */ |
670 | memset(priv->current_addr, 0xff, ETH_ALEN); | 670 | eth_broadcast_addr(priv->current_addr); |
671 | ret = lbs_update_hw_spec(priv); | 671 | ret = lbs_update_hw_spec(priv); |
672 | if (ret) | 672 | if (ret) |
673 | goto done; | 673 | goto done; |
@@ -871,7 +871,7 @@ static int lbs_init_adapter(struct lbs_private *priv) | |||
871 | 871 | ||
872 | lbs_deb_enter(LBS_DEB_MAIN); | 872 | lbs_deb_enter(LBS_DEB_MAIN); |
873 | 873 | ||
874 | memset(priv->current_addr, 0xff, ETH_ALEN); | 874 | eth_broadcast_addr(priv->current_addr); |
875 | 875 | ||
876 | priv->connect_status = LBS_DISCONNECTED; | 876 | priv->connect_status = LBS_DISCONNECTED; |
877 | priv->channel = DEFAULT_AD_HOC_CHANNEL; | 877 | priv->channel = DEFAULT_AD_HOC_CHANNEL; |
diff --git a/drivers/net/wireless/libertas_tf/main.c b/drivers/net/wireless/libertas_tf/main.c index 25c5acc78bd1..ed02e4bf2c26 100644 --- a/drivers/net/wireless/libertas_tf/main.c +++ b/drivers/net/wireless/libertas_tf/main.c | |||
@@ -152,7 +152,7 @@ static int lbtf_setup_firmware(struct lbtf_private *priv) | |||
152 | /* | 152 | /* |
153 | * Read priv address from HW | 153 | * Read priv address from HW |
154 | */ | 154 | */ |
155 | memset(priv->current_addr, 0xff, ETH_ALEN); | 155 | eth_broadcast_addr(priv->current_addr); |
156 | ret = lbtf_update_hw_spec(priv); | 156 | ret = lbtf_update_hw_spec(priv); |
157 | if (ret) { | 157 | if (ret) { |
158 | ret = -1; | 158 | ret = -1; |
@@ -199,7 +199,7 @@ out: | |||
199 | static int lbtf_init_adapter(struct lbtf_private *priv) | 199 | static int lbtf_init_adapter(struct lbtf_private *priv) |
200 | { | 200 | { |
201 | lbtf_deb_enter(LBTF_DEB_MAIN); | 201 | lbtf_deb_enter(LBTF_DEB_MAIN); |
202 | memset(priv->current_addr, 0xff, ETH_ALEN); | 202 | eth_broadcast_addr(priv->current_addr); |
203 | mutex_init(&priv->lock); | 203 | mutex_init(&priv->lock); |
204 | 204 | ||
205 | priv->vif = NULL; | 205 | priv->vif = NULL; |
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c index 8908be6dbc48..d56b7859a437 100644 --- a/drivers/net/wireless/mac80211_hwsim.c +++ b/drivers/net/wireless/mac80211_hwsim.c | |||
@@ -1911,7 +1911,7 @@ static void mac80211_hwsim_sw_scan_complete(struct ieee80211_hw *hw, | |||
1911 | 1911 | ||
1912 | printk(KERN_DEBUG "hwsim sw_scan_complete\n"); | 1912 | printk(KERN_DEBUG "hwsim sw_scan_complete\n"); |
1913 | hwsim->scanning = false; | 1913 | hwsim->scanning = false; |
1914 | memset(hwsim->scan_addr, 0, ETH_ALEN); | 1914 | eth_zero_addr(hwsim->scan_addr); |
1915 | 1915 | ||
1916 | mutex_unlock(&hwsim->mutex); | 1916 | mutex_unlock(&hwsim->mutex); |
1917 | } | 1917 | } |
@@ -2267,7 +2267,7 @@ static int mac80211_hwsim_new_radio(struct genl_info *info, | |||
2267 | skb_queue_head_init(&data->pending); | 2267 | skb_queue_head_init(&data->pending); |
2268 | 2268 | ||
2269 | SET_IEEE80211_DEV(hw, data->dev); | 2269 | SET_IEEE80211_DEV(hw, data->dev); |
2270 | memset(addr, 0, ETH_ALEN); | 2270 | eth_zero_addr(addr); |
2271 | addr[0] = 0x02; | 2271 | addr[0] = 0x02; |
2272 | addr[3] = idx >> 8; | 2272 | addr[3] = idx >> 8; |
2273 | addr[4] = idx; | 2273 | addr[4] = idx; |
@@ -2600,7 +2600,7 @@ static void hwsim_mon_setup(struct net_device *dev) | |||
2600 | ether_setup(dev); | 2600 | ether_setup(dev); |
2601 | dev->tx_queue_len = 0; | 2601 | dev->tx_queue_len = 0; |
2602 | dev->type = ARPHRD_IEEE80211_RADIOTAP; | 2602 | dev->type = ARPHRD_IEEE80211_RADIOTAP; |
2603 | memset(dev->dev_addr, 0, ETH_ALEN); | 2603 | eth_zero_addr(dev->dev_addr); |
2604 | dev->dev_addr[0] = 0x12; | 2604 | dev->dev_addr[0] = 0x12; |
2605 | } | 2605 | } |
2606 | 2606 | ||
diff --git a/drivers/net/wireless/mwifiex/cfg80211.c b/drivers/net/wireless/mwifiex/cfg80211.c index 41c8e25df954..7c3ca2f50186 100644 --- a/drivers/net/wireless/mwifiex/cfg80211.c +++ b/drivers/net/wireless/mwifiex/cfg80211.c | |||
@@ -1563,7 +1563,7 @@ mwifiex_cfg80211_del_station(struct wiphy *wiphy, struct net_device *dev, | |||
1563 | 1563 | ||
1564 | wiphy_dbg(wiphy, "%s: mac address %pM\n", __func__, params->mac); | 1564 | wiphy_dbg(wiphy, "%s: mac address %pM\n", __func__, params->mac); |
1565 | 1565 | ||
1566 | memset(deauth_mac, 0, ETH_ALEN); | 1566 | eth_zero_addr(deauth_mac); |
1567 | 1567 | ||
1568 | spin_lock_irqsave(&priv->sta_list_spinlock, flags); | 1568 | spin_lock_irqsave(&priv->sta_list_spinlock, flags); |
1569 | sta_node = mwifiex_get_sta_entry(priv, params->mac); | 1569 | sta_node = mwifiex_get_sta_entry(priv, params->mac); |
@@ -1786,7 +1786,7 @@ mwifiex_cfg80211_disconnect(struct wiphy *wiphy, struct net_device *dev, | |||
1786 | wiphy_dbg(wiphy, "info: successfully disconnected from %pM:" | 1786 | wiphy_dbg(wiphy, "info: successfully disconnected from %pM:" |
1787 | " reason code %d\n", priv->cfg_bssid, reason_code); | 1787 | " reason code %d\n", priv->cfg_bssid, reason_code); |
1788 | 1788 | ||
1789 | memset(priv->cfg_bssid, 0, ETH_ALEN); | 1789 | eth_zero_addr(priv->cfg_bssid); |
1790 | priv->hs2_enabled = false; | 1790 | priv->hs2_enabled = false; |
1791 | 1791 | ||
1792 | return 0; | 1792 | return 0; |
@@ -2046,7 +2046,7 @@ mwifiex_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev, | |||
2046 | dev_dbg(priv->adapter->dev, | 2046 | dev_dbg(priv->adapter->dev, |
2047 | "info: association to bssid %pM failed\n", | 2047 | "info: association to bssid %pM failed\n", |
2048 | priv->cfg_bssid); | 2048 | priv->cfg_bssid); |
2049 | memset(priv->cfg_bssid, 0, ETH_ALEN); | 2049 | eth_zero_addr(priv->cfg_bssid); |
2050 | 2050 | ||
2051 | if (ret > 0) | 2051 | if (ret > 0) |
2052 | cfg80211_connect_result(priv->netdev, priv->cfg_bssid, | 2052 | cfg80211_connect_result(priv->netdev, priv->cfg_bssid, |
@@ -2194,7 +2194,7 @@ mwifiex_cfg80211_leave_ibss(struct wiphy *wiphy, struct net_device *dev) | |||
2194 | if (mwifiex_deauthenticate(priv, NULL)) | 2194 | if (mwifiex_deauthenticate(priv, NULL)) |
2195 | return -EFAULT; | 2195 | return -EFAULT; |
2196 | 2196 | ||
2197 | memset(priv->cfg_bssid, 0, ETH_ALEN); | 2197 | eth_zero_addr(priv->cfg_bssid); |
2198 | 2198 | ||
2199 | return 0; | 2199 | return 0; |
2200 | } | 2200 | } |
diff --git a/drivers/net/wireless/mwifiex/init.c b/drivers/net/wireless/mwifiex/init.c index b77ba743e1c4..0978b1cc58b6 100644 --- a/drivers/net/wireless/mwifiex/init.c +++ b/drivers/net/wireless/mwifiex/init.c | |||
@@ -76,7 +76,7 @@ int mwifiex_init_priv(struct mwifiex_private *priv) | |||
76 | u32 i; | 76 | u32 i; |
77 | 77 | ||
78 | priv->media_connected = false; | 78 | priv->media_connected = false; |
79 | memset(priv->curr_addr, 0xff, ETH_ALEN); | 79 | eth_broadcast_addr(priv->curr_addr); |
80 | 80 | ||
81 | priv->pkt_tx_ctrl = 0; | 81 | priv->pkt_tx_ctrl = 0; |
82 | priv->bss_mode = NL80211_IFTYPE_UNSPECIFIED; | 82 | priv->bss_mode = NL80211_IFTYPE_UNSPECIFIED; |
@@ -299,7 +299,7 @@ static void mwifiex_init_adapter(struct mwifiex_adapter *adapter) | |||
299 | adapter->ext_scan = false; | 299 | adapter->ext_scan = false; |
300 | adapter->key_api_major_ver = 0; | 300 | adapter->key_api_major_ver = 0; |
301 | adapter->key_api_minor_ver = 0; | 301 | adapter->key_api_minor_ver = 0; |
302 | memset(adapter->perm_addr, 0xff, ETH_ALEN); | 302 | eth_broadcast_addr(adapter->perm_addr); |
303 | adapter->iface_limit.sta_intf = MWIFIEX_MAX_STA_NUM; | 303 | adapter->iface_limit.sta_intf = MWIFIEX_MAX_STA_NUM; |
304 | adapter->iface_limit.uap_intf = MWIFIEX_MAX_UAP_NUM; | 304 | adapter->iface_limit.uap_intf = MWIFIEX_MAX_UAP_NUM; |
305 | adapter->iface_limit.p2p_intf = MWIFIEX_MAX_P2P_NUM; | 305 | adapter->iface_limit.p2p_intf = MWIFIEX_MAX_P2P_NUM; |
diff --git a/drivers/net/wireless/mwifiex/sta_event.c b/drivers/net/wireless/mwifiex/sta_event.c index 80ffe7412496..64c4223a1e1e 100644 --- a/drivers/net/wireless/mwifiex/sta_event.c +++ b/drivers/net/wireless/mwifiex/sta_event.c | |||
@@ -135,7 +135,7 @@ mwifiex_reset_connect_state(struct mwifiex_private *priv, u16 reason_code) | |||
135 | cfg80211_disconnected(priv->netdev, reason_code, NULL, 0, | 135 | cfg80211_disconnected(priv->netdev, reason_code, NULL, 0, |
136 | GFP_KERNEL); | 136 | GFP_KERNEL); |
137 | } | 137 | } |
138 | memset(priv->cfg_bssid, 0, ETH_ALEN); | 138 | eth_zero_addr(priv->cfg_bssid); |
139 | 139 | ||
140 | mwifiex_stop_net_dev_queue(priv->netdev, adapter); | 140 | mwifiex_stop_net_dev_queue(priv->netdev, adapter); |
141 | if (netif_carrier_ok(priv->netdev)) | 141 | if (netif_carrier_ok(priv->netdev)) |
diff --git a/drivers/net/wireless/mwifiex/wmm.c b/drivers/net/wireless/mwifiex/wmm.c index ef717acec8b7..0cd4f6bed9fc 100644 --- a/drivers/net/wireless/mwifiex/wmm.c +++ b/drivers/net/wireless/mwifiex/wmm.c | |||
@@ -730,7 +730,7 @@ mwifiex_wmm_add_buf_txqueue(struct mwifiex_private *priv, | |||
730 | } else { | 730 | } else { |
731 | memcpy(ra, skb->data, ETH_ALEN); | 731 | memcpy(ra, skb->data, ETH_ALEN); |
732 | if (ra[0] & 0x01 || mwifiex_is_skb_mgmt_frame(skb)) | 732 | if (ra[0] & 0x01 || mwifiex_is_skb_mgmt_frame(skb)) |
733 | memset(ra, 0xff, ETH_ALEN); | 733 | eth_broadcast_addr(ra); |
734 | ra_list = mwifiex_wmm_get_queue_raptr(priv, tid_down, ra); | 734 | ra_list = mwifiex_wmm_get_queue_raptr(priv, tid_down, ra); |
735 | } | 735 | } |
736 | 736 | ||
diff --git a/drivers/net/wireless/mwl8k.c b/drivers/net/wireless/mwl8k.c index f9b1218c761a..95921167b53f 100644 --- a/drivers/net/wireless/mwl8k.c +++ b/drivers/net/wireless/mwl8k.c | |||
@@ -1277,7 +1277,7 @@ static inline void mwl8k_save_beacon(struct ieee80211_hw *hw, | |||
1277 | struct mwl8k_priv *priv = hw->priv; | 1277 | struct mwl8k_priv *priv = hw->priv; |
1278 | 1278 | ||
1279 | priv->capture_beacon = false; | 1279 | priv->capture_beacon = false; |
1280 | memset(priv->capture_bssid, 0, ETH_ALEN); | 1280 | eth_zero_addr(priv->capture_bssid); |
1281 | 1281 | ||
1282 | /* | 1282 | /* |
1283 | * Use GFP_ATOMIC as rxq_process is called from | 1283 | * Use GFP_ATOMIC as rxq_process is called from |
diff --git a/drivers/net/wireless/orinoco/wext.c b/drivers/net/wireless/orinoco/wext.c index 6abdaf0aa052..1d4dae422106 100644 --- a/drivers/net/wireless/orinoco/wext.c +++ b/drivers/net/wireless/orinoco/wext.c | |||
@@ -168,7 +168,7 @@ static int orinoco_ioctl_setwap(struct net_device *dev, | |||
168 | if (is_zero_ether_addr(ap_addr->sa_data) || | 168 | if (is_zero_ether_addr(ap_addr->sa_data) || |
169 | is_broadcast_ether_addr(ap_addr->sa_data)) { | 169 | is_broadcast_ether_addr(ap_addr->sa_data)) { |
170 | priv->bssid_fixed = 0; | 170 | priv->bssid_fixed = 0; |
171 | memset(priv->desired_bssid, 0, ETH_ALEN); | 171 | eth_zero_addr(priv->desired_bssid); |
172 | 172 | ||
173 | /* "off" means keep existing connection */ | 173 | /* "off" means keep existing connection */ |
174 | if (ap_addr->sa_data[0] == 0) { | 174 | if (ap_addr->sa_data[0] == 0) { |
diff --git a/drivers/net/wireless/p54/fwio.c b/drivers/net/wireless/p54/fwio.c index 5367d510b22d..275408eaf95e 100644 --- a/drivers/net/wireless/p54/fwio.c +++ b/drivers/net/wireless/p54/fwio.c | |||
@@ -671,7 +671,7 @@ int p54_upload_key(struct p54_common *priv, u8 algo, int slot, u8 idx, u8 len, | |||
671 | if (addr) | 671 | if (addr) |
672 | memcpy(rxkey->mac, addr, ETH_ALEN); | 672 | memcpy(rxkey->mac, addr, ETH_ALEN); |
673 | else | 673 | else |
674 | memset(rxkey->mac, ~0, ETH_ALEN); | 674 | eth_broadcast_addr(rxkey->mac); |
675 | 675 | ||
676 | switch (algo) { | 676 | switch (algo) { |
677 | case P54_CRYPTO_WEP: | 677 | case P54_CRYPTO_WEP: |
diff --git a/drivers/net/wireless/p54/main.c b/drivers/net/wireless/p54/main.c index b9250d75d253..e79674f73dc5 100644 --- a/drivers/net/wireless/p54/main.c +++ b/drivers/net/wireless/p54/main.c | |||
@@ -182,7 +182,7 @@ static int p54_start(struct ieee80211_hw *dev) | |||
182 | if (err) | 182 | if (err) |
183 | goto out; | 183 | goto out; |
184 | 184 | ||
185 | memset(priv->bssid, ~0, ETH_ALEN); | 185 | eth_broadcast_addr(priv->bssid); |
186 | priv->mode = NL80211_IFTYPE_MONITOR; | 186 | priv->mode = NL80211_IFTYPE_MONITOR; |
187 | err = p54_setup_mac(priv); | 187 | err = p54_setup_mac(priv); |
188 | if (err) { | 188 | if (err) { |
@@ -274,8 +274,8 @@ static void p54_remove_interface(struct ieee80211_hw *dev, | |||
274 | wait_for_completion_interruptible_timeout(&priv->beacon_comp, HZ); | 274 | wait_for_completion_interruptible_timeout(&priv->beacon_comp, HZ); |
275 | } | 275 | } |
276 | priv->mode = NL80211_IFTYPE_MONITOR; | 276 | priv->mode = NL80211_IFTYPE_MONITOR; |
277 | memset(priv->mac_addr, 0, ETH_ALEN); | 277 | eth_zero_addr(priv->mac_addr); |
278 | memset(priv->bssid, 0, ETH_ALEN); | 278 | eth_zero_addr(priv->bssid); |
279 | p54_setup_mac(priv); | 279 | p54_setup_mac(priv); |
280 | mutex_unlock(&priv->conf_mutex); | 280 | mutex_unlock(&priv->conf_mutex); |
281 | } | 281 | } |
@@ -794,7 +794,7 @@ struct ieee80211_hw *p54_init_common(size_t priv_data_len) | |||
794 | init_completion(&priv->beacon_comp); | 794 | init_completion(&priv->beacon_comp); |
795 | INIT_DELAYED_WORK(&priv->work, p54_work); | 795 | INIT_DELAYED_WORK(&priv->work, p54_work); |
796 | 796 | ||
797 | memset(&priv->mc_maclist[0], ~0, ETH_ALEN); | 797 | eth_broadcast_addr(priv->mc_maclist[0]); |
798 | priv->curchan = NULL; | 798 | priv->curchan = NULL; |
799 | p54_reset_stats(priv); | 799 | p54_reset_stats(priv); |
800 | return dev; | 800 | return dev; |
diff --git a/drivers/net/wireless/ray_cs.c b/drivers/net/wireless/ray_cs.c index 8330fa33e50b..477f86354dc5 100644 --- a/drivers/net/wireless/ray_cs.c +++ b/drivers/net/wireless/ray_cs.c | |||
@@ -808,7 +808,7 @@ static int ray_dev_init(struct net_device *dev) | |||
808 | 808 | ||
809 | /* copy mac and broadcast addresses to linux device */ | 809 | /* copy mac and broadcast addresses to linux device */ |
810 | memcpy(dev->dev_addr, &local->sparm.b4.a_mac_addr, ADDRLEN); | 810 | memcpy(dev->dev_addr, &local->sparm.b4.a_mac_addr, ADDRLEN); |
811 | memset(dev->broadcast, 0xff, ETH_ALEN); | 811 | eth_broadcast_addr(dev->broadcast); |
812 | 812 | ||
813 | dev_dbg(&link->dev, "ray_dev_init ending\n"); | 813 | dev_dbg(&link->dev, "ray_dev_init ending\n"); |
814 | return 0; | 814 | return 0; |
diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c index 60d44ce9c017..d72ff8e7125d 100644 --- a/drivers/net/wireless/rndis_wlan.c +++ b/drivers/net/wireless/rndis_wlan.c | |||
@@ -199,13 +199,13 @@ enum ndis_80211_pmkid_cand_list_flag_bits { | |||
199 | 199 | ||
200 | struct ndis_80211_auth_request { | 200 | struct ndis_80211_auth_request { |
201 | __le32 length; | 201 | __le32 length; |
202 | u8 bssid[6]; | 202 | u8 bssid[ETH_ALEN]; |
203 | u8 padding[2]; | 203 | u8 padding[2]; |
204 | __le32 flags; | 204 | __le32 flags; |
205 | } __packed; | 205 | } __packed; |
206 | 206 | ||
207 | struct ndis_80211_pmkid_candidate { | 207 | struct ndis_80211_pmkid_candidate { |
208 | u8 bssid[6]; | 208 | u8 bssid[ETH_ALEN]; |
209 | u8 padding[2]; | 209 | u8 padding[2]; |
210 | __le32 flags; | 210 | __le32 flags; |
211 | } __packed; | 211 | } __packed; |
@@ -248,7 +248,7 @@ struct ndis_80211_conf { | |||
248 | 248 | ||
249 | struct ndis_80211_bssid_ex { | 249 | struct ndis_80211_bssid_ex { |
250 | __le32 length; | 250 | __le32 length; |
251 | u8 mac[6]; | 251 | u8 mac[ETH_ALEN]; |
252 | u8 padding[2]; | 252 | u8 padding[2]; |
253 | struct ndis_80211_ssid ssid; | 253 | struct ndis_80211_ssid ssid; |
254 | __le32 privacy; | 254 | __le32 privacy; |
@@ -283,7 +283,7 @@ struct ndis_80211_key { | |||
283 | __le32 size; | 283 | __le32 size; |
284 | __le32 index; | 284 | __le32 index; |
285 | __le32 length; | 285 | __le32 length; |
286 | u8 bssid[6]; | 286 | u8 bssid[ETH_ALEN]; |
287 | u8 padding[6]; | 287 | u8 padding[6]; |
288 | u8 rsc[8]; | 288 | u8 rsc[8]; |
289 | u8 material[32]; | 289 | u8 material[32]; |
@@ -292,7 +292,7 @@ struct ndis_80211_key { | |||
292 | struct ndis_80211_remove_key { | 292 | struct ndis_80211_remove_key { |
293 | __le32 size; | 293 | __le32 size; |
294 | __le32 index; | 294 | __le32 index; |
295 | u8 bssid[6]; | 295 | u8 bssid[ETH_ALEN]; |
296 | u8 padding[2]; | 296 | u8 padding[2]; |
297 | } __packed; | 297 | } __packed; |
298 | 298 | ||
@@ -310,7 +310,7 @@ struct ndis_80211_assoc_info { | |||
310 | struct req_ie { | 310 | struct req_ie { |
311 | __le16 capa; | 311 | __le16 capa; |
312 | __le16 listen_interval; | 312 | __le16 listen_interval; |
313 | u8 cur_ap_address[6]; | 313 | u8 cur_ap_address[ETH_ALEN]; |
314 | } req_ie; | 314 | } req_ie; |
315 | __le32 req_ie_length; | 315 | __le32 req_ie_length; |
316 | __le32 offset_req_ies; | 316 | __le32 offset_req_ies; |
@@ -338,7 +338,7 @@ struct ndis_80211_capability { | |||
338 | } __packed; | 338 | } __packed; |
339 | 339 | ||
340 | struct ndis_80211_bssid_info { | 340 | struct ndis_80211_bssid_info { |
341 | u8 bssid[6]; | 341 | u8 bssid[ETH_ALEN]; |
342 | u8 pmkid[16]; | 342 | u8 pmkid[16]; |
343 | } __packed; | 343 | } __packed; |
344 | 344 | ||
@@ -1037,7 +1037,7 @@ static int get_bssid(struct usbnet *usbdev, u8 bssid[ETH_ALEN]) | |||
1037 | bssid, &len); | 1037 | bssid, &len); |
1038 | 1038 | ||
1039 | if (ret != 0) | 1039 | if (ret != 0) |
1040 | memset(bssid, 0, ETH_ALEN); | 1040 | eth_zero_addr(bssid); |
1041 | 1041 | ||
1042 | return ret; | 1042 | return ret; |
1043 | } | 1043 | } |
@@ -1391,7 +1391,7 @@ static int add_wep_key(struct usbnet *usbdev, const u8 *key, int key_len, | |||
1391 | priv->encr_keys[index].len = key_len; | 1391 | priv->encr_keys[index].len = key_len; |
1392 | priv->encr_keys[index].cipher = cipher; | 1392 | priv->encr_keys[index].cipher = cipher; |
1393 | memcpy(&priv->encr_keys[index].material, key, key_len); | 1393 | memcpy(&priv->encr_keys[index].material, key, key_len); |
1394 | memset(&priv->encr_keys[index].bssid, 0xff, ETH_ALEN); | 1394 | eth_broadcast_addr(priv->encr_keys[index].bssid); |
1395 | 1395 | ||
1396 | return 0; | 1396 | return 0; |
1397 | } | 1397 | } |
@@ -1466,7 +1466,7 @@ static int add_wpa_key(struct usbnet *usbdev, const u8 *key, int key_len, | |||
1466 | } else { | 1466 | } else { |
1467 | /* group key */ | 1467 | /* group key */ |
1468 | if (priv->infra_mode == NDIS_80211_INFRA_ADHOC) | 1468 | if (priv->infra_mode == NDIS_80211_INFRA_ADHOC) |
1469 | memset(ndis_key.bssid, 0xff, ETH_ALEN); | 1469 | eth_broadcast_addr(ndis_key.bssid); |
1470 | else | 1470 | else |
1471 | get_bssid(usbdev, ndis_key.bssid); | 1471 | get_bssid(usbdev, ndis_key.bssid); |
1472 | } | 1472 | } |
@@ -1486,7 +1486,7 @@ static int add_wpa_key(struct usbnet *usbdev, const u8 *key, int key_len, | |||
1486 | if (flags & NDIS_80211_ADDKEY_PAIRWISE_KEY) | 1486 | if (flags & NDIS_80211_ADDKEY_PAIRWISE_KEY) |
1487 | memcpy(&priv->encr_keys[index].bssid, ndis_key.bssid, ETH_ALEN); | 1487 | memcpy(&priv->encr_keys[index].bssid, ndis_key.bssid, ETH_ALEN); |
1488 | else | 1488 | else |
1489 | memset(&priv->encr_keys[index].bssid, 0xff, ETH_ALEN); | 1489 | eth_broadcast_addr(priv->encr_keys[index].bssid); |
1490 | 1490 | ||
1491 | if (flags & NDIS_80211_ADDKEY_TRANSMIT_KEY) | 1491 | if (flags & NDIS_80211_ADDKEY_TRANSMIT_KEY) |
1492 | priv->encr_tx_key_index = index; | 1492 | priv->encr_tx_key_index = index; |
@@ -2280,7 +2280,7 @@ static int rndis_disconnect(struct wiphy *wiphy, struct net_device *dev, | |||
2280 | netdev_dbg(usbdev->net, "cfg80211.disconnect(%d)\n", reason_code); | 2280 | netdev_dbg(usbdev->net, "cfg80211.disconnect(%d)\n", reason_code); |
2281 | 2281 | ||
2282 | priv->connected = false; | 2282 | priv->connected = false; |
2283 | memset(priv->bssid, 0, ETH_ALEN); | 2283 | eth_zero_addr(priv->bssid); |
2284 | 2284 | ||
2285 | return deauthenticate(usbdev); | 2285 | return deauthenticate(usbdev); |
2286 | } | 2286 | } |
@@ -2392,7 +2392,7 @@ static int rndis_leave_ibss(struct wiphy *wiphy, struct net_device *dev) | |||
2392 | netdev_dbg(usbdev->net, "cfg80211.leave_ibss()\n"); | 2392 | netdev_dbg(usbdev->net, "cfg80211.leave_ibss()\n"); |
2393 | 2393 | ||
2394 | priv->connected = false; | 2394 | priv->connected = false; |
2395 | memset(priv->bssid, 0, ETH_ALEN); | 2395 | eth_zero_addr(priv->bssid); |
2396 | 2396 | ||
2397 | return deauthenticate(usbdev); | 2397 | return deauthenticate(usbdev); |
2398 | } | 2398 | } |
@@ -2857,7 +2857,7 @@ static void rndis_wlan_do_link_down_work(struct usbnet *usbdev) | |||
2857 | 2857 | ||
2858 | if (priv->connected) { | 2858 | if (priv->connected) { |
2859 | priv->connected = false; | 2859 | priv->connected = false; |
2860 | memset(priv->bssid, 0, ETH_ALEN); | 2860 | eth_zero_addr(priv->bssid); |
2861 | 2861 | ||
2862 | deauthenticate(usbdev); | 2862 | deauthenticate(usbdev); |
2863 | 2863 | ||
diff --git a/drivers/net/wireless/rtlwifi/core.c b/drivers/net/wireless/rtlwifi/core.c index a31a12775f1a..3b3a88b53b11 100644 --- a/drivers/net/wireless/rtlwifi/core.c +++ b/drivers/net/wireless/rtlwifi/core.c | |||
@@ -195,7 +195,7 @@ static void rtl_op_stop(struct ieee80211_hw *hw) | |||
195 | if (!(support_remote_wakeup && | 195 | if (!(support_remote_wakeup && |
196 | rtlhal->enter_pnp_sleep)) { | 196 | rtlhal->enter_pnp_sleep)) { |
197 | mac->link_state = MAC80211_NOLINK; | 197 | mac->link_state = MAC80211_NOLINK; |
198 | memset(mac->bssid, 0, 6); | 198 | eth_zero_addr(mac->bssid); |
199 | mac->vendor = PEER_UNKNOWN; | 199 | mac->vendor = PEER_UNKNOWN; |
200 | 200 | ||
201 | /* reset sec info */ | 201 | /* reset sec info */ |
@@ -357,7 +357,7 @@ static void rtl_op_remove_interface(struct ieee80211_hw *hw, | |||
357 | mac->p2p = 0; | 357 | mac->p2p = 0; |
358 | mac->vif = NULL; | 358 | mac->vif = NULL; |
359 | mac->link_state = MAC80211_NOLINK; | 359 | mac->link_state = MAC80211_NOLINK; |
360 | memset(mac->bssid, 0, ETH_ALEN); | 360 | eth_zero_addr(mac->bssid); |
361 | mac->vendor = PEER_UNKNOWN; | 361 | mac->vendor = PEER_UNKNOWN; |
362 | mac->opmode = NL80211_IFTYPE_UNSPECIFIED; | 362 | mac->opmode = NL80211_IFTYPE_UNSPECIFIED; |
363 | rtlpriv->cfg->ops->set_network_type(hw, mac->opmode); | 363 | rtlpriv->cfg->ops->set_network_type(hw, mac->opmode); |
@@ -1157,7 +1157,7 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw, | |||
1157 | if (ppsc->p2p_ps_info.p2p_ps_mode > P2P_PS_NONE) | 1157 | if (ppsc->p2p_ps_info.p2p_ps_mode > P2P_PS_NONE) |
1158 | rtl_p2p_ps_cmd(hw, P2P_PS_DISABLE); | 1158 | rtl_p2p_ps_cmd(hw, P2P_PS_DISABLE); |
1159 | mac->link_state = MAC80211_NOLINK; | 1159 | mac->link_state = MAC80211_NOLINK; |
1160 | memset(mac->bssid, 0, ETH_ALEN); | 1160 | eth_zero_addr(mac->bssid); |
1161 | mac->vendor = PEER_UNKNOWN; | 1161 | mac->vendor = PEER_UNKNOWN; |
1162 | mac->mode = 0; | 1162 | mac->mode = 0; |
1163 | 1163 | ||
diff --git a/drivers/net/wireless/ti/wl1251/main.c b/drivers/net/wireless/ti/wl1251/main.c index d4ba009ac9aa..d1e9a13be910 100644 --- a/drivers/net/wireless/ti/wl1251/main.c +++ b/drivers/net/wireless/ti/wl1251/main.c | |||
@@ -468,7 +468,7 @@ static void wl1251_op_stop(struct ieee80211_hw *hw) | |||
468 | wl1251_tx_flush(wl); | 468 | wl1251_tx_flush(wl); |
469 | wl1251_power_off(wl); | 469 | wl1251_power_off(wl); |
470 | 470 | ||
471 | memset(wl->bssid, 0, ETH_ALEN); | 471 | eth_zero_addr(wl->bssid); |
472 | wl->listen_int = 1; | 472 | wl->listen_int = 1; |
473 | wl->bss_type = MAX_BSS_TYPE; | 473 | wl->bss_type = MAX_BSS_TYPE; |
474 | 474 | ||
@@ -547,7 +547,7 @@ static void wl1251_op_remove_interface(struct ieee80211_hw *hw, | |||
547 | mutex_lock(&wl->mutex); | 547 | mutex_lock(&wl->mutex); |
548 | wl1251_debug(DEBUG_MAC80211, "mac80211 remove interface"); | 548 | wl1251_debug(DEBUG_MAC80211, "mac80211 remove interface"); |
549 | wl->vif = NULL; | 549 | wl->vif = NULL; |
550 | memset(wl->bssid, 0, ETH_ALEN); | 550 | eth_zero_addr(wl->bssid); |
551 | mutex_unlock(&wl->mutex); | 551 | mutex_unlock(&wl->mutex); |
552 | } | 552 | } |
553 | 553 | ||
diff --git a/drivers/net/wireless/ti/wlcore/cmd.c b/drivers/net/wireless/ti/wlcore/cmd.c index c26fc2106e5b..68919f8d4310 100644 --- a/drivers/net/wireless/ti/wlcore/cmd.c +++ b/drivers/net/wireless/ti/wlcore/cmd.c | |||
@@ -367,7 +367,7 @@ void wl12xx_free_link(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 *hlid) | |||
367 | wl->links[*hlid].allocated_pkts = 0; | 367 | wl->links[*hlid].allocated_pkts = 0; |
368 | wl->links[*hlid].prev_freed_pkts = 0; | 368 | wl->links[*hlid].prev_freed_pkts = 0; |
369 | wl->links[*hlid].ba_bitmap = 0; | 369 | wl->links[*hlid].ba_bitmap = 0; |
370 | memset(wl->links[*hlid].addr, 0, ETH_ALEN); | 370 | eth_zero_addr(wl->links[*hlid].addr); |
371 | 371 | ||
372 | /* | 372 | /* |
373 | * At this point op_tx() will not add more packets to the queues. We | 373 | * At this point op_tx() will not add more packets to the queues. We |
@@ -1293,7 +1293,7 @@ int wl1271_cmd_build_arp_rsp(struct wl1271 *wl, struct wl12xx_vif *wlvif) | |||
1293 | hdr->frame_control = cpu_to_le16(fc); | 1293 | hdr->frame_control = cpu_to_le16(fc); |
1294 | memcpy(hdr->addr1, vif->bss_conf.bssid, ETH_ALEN); | 1294 | memcpy(hdr->addr1, vif->bss_conf.bssid, ETH_ALEN); |
1295 | memcpy(hdr->addr2, vif->addr, ETH_ALEN); | 1295 | memcpy(hdr->addr2, vif->addr, ETH_ALEN); |
1296 | memset(hdr->addr3, 0xff, ETH_ALEN); | 1296 | eth_broadcast_addr(hdr->addr3); |
1297 | 1297 | ||
1298 | ret = wl1271_cmd_template_set(wl, wlvif->role_id, CMD_TEMPL_ARP_RSP, | 1298 | ret = wl1271_cmd_template_set(wl, wlvif->role_id, CMD_TEMPL_ARP_RSP, |
1299 | skb->data, skb->len, 0, | 1299 | skb->data, skb->len, 0, |
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c index f38227afe099..4ae98e2ad719 100644 --- a/drivers/net/xen-netback/interface.c +++ b/drivers/net/xen-netback/interface.c | |||
@@ -438,7 +438,7 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid, | |||
438 | * stolen by an Ethernet bridge for STP purposes. | 438 | * stolen by an Ethernet bridge for STP purposes. |
439 | * (FE:FF:FF:FF:FF:FF) | 439 | * (FE:FF:FF:FF:FF:FF) |
440 | */ | 440 | */ |
441 | memset(dev->dev_addr, 0xFF, ETH_ALEN); | 441 | eth_broadcast_addr(dev->dev_addr); |
442 | dev->dev_addr[0] &= ~0x01; | 442 | dev->dev_addr[0] &= ~0x01; |
443 | 443 | ||
444 | netif_carrier_off(dev); | 444 | netif_carrier_off(dev); |
diff --git a/drivers/s390/net/Kconfig b/drivers/s390/net/Kconfig index f1b5111bbaba..b2837b1c70b7 100644 --- a/drivers/s390/net/Kconfig +++ b/drivers/s390/net/Kconfig | |||
@@ -57,17 +57,6 @@ config SMSGIUCV_EVENT | |||
57 | 57 | ||
58 | To compile as a module, choose M. The module name is "smsgiucv_app". | 58 | To compile as a module, choose M. The module name is "smsgiucv_app". |
59 | 59 | ||
60 | config CLAW | ||
61 | def_tristate m | ||
62 | prompt "CLAW device support" | ||
63 | depends on CCW && NETDEVICES | ||
64 | help | ||
65 | This driver supports channel attached CLAW devices. | ||
66 | CLAW is Common Link Access for Workstation. Common devices | ||
67 | that use CLAW are RS/6000s, Cisco Routers (CIP) and 3172 devices. | ||
68 | To compile as a module, choose M. The module name is claw. | ||
69 | To compile into the kernel, choose Y. | ||
70 | |||
71 | config QETH | 60 | config QETH |
72 | def_tristate y | 61 | def_tristate y |
73 | prompt "Gigabit Ethernet device support" | 62 | prompt "Gigabit Ethernet device support" |
@@ -106,6 +95,6 @@ config QETH_IPV6 | |||
106 | 95 | ||
107 | config CCWGROUP | 96 | config CCWGROUP |
108 | tristate | 97 | tristate |
109 | default (LCS || CTCM || QETH || CLAW) | 98 | default (LCS || CTCM || QETH) |
110 | 99 | ||
111 | endmenu | 100 | endmenu |
diff --git a/drivers/s390/net/Makefile b/drivers/s390/net/Makefile index d28f05d0c75a..c351b07603e0 100644 --- a/drivers/s390/net/Makefile +++ b/drivers/s390/net/Makefile | |||
@@ -8,7 +8,6 @@ obj-$(CONFIG_NETIUCV) += netiucv.o fsm.o | |||
8 | obj-$(CONFIG_SMSGIUCV) += smsgiucv.o | 8 | obj-$(CONFIG_SMSGIUCV) += smsgiucv.o |
9 | obj-$(CONFIG_SMSGIUCV_EVENT) += smsgiucv_app.o | 9 | obj-$(CONFIG_SMSGIUCV_EVENT) += smsgiucv_app.o |
10 | obj-$(CONFIG_LCS) += lcs.o | 10 | obj-$(CONFIG_LCS) += lcs.o |
11 | obj-$(CONFIG_CLAW) += claw.o | ||
12 | qeth-y += qeth_core_sys.o qeth_core_main.o qeth_core_mpc.o | 11 | qeth-y += qeth_core_sys.o qeth_core_main.o qeth_core_mpc.o |
13 | obj-$(CONFIG_QETH) += qeth.o | 12 | obj-$(CONFIG_QETH) += qeth.o |
14 | qeth_l2-y += qeth_l2_main.o qeth_l2_sys.o | 13 | qeth_l2-y += qeth_l2_main.o qeth_l2_sys.o |
diff --git a/drivers/s390/net/claw.c b/drivers/s390/net/claw.c deleted file mode 100644 index d609ca09aa94..000000000000 --- a/drivers/s390/net/claw.c +++ /dev/null | |||
@@ -1,3377 +0,0 @@ | |||
1 | /* | ||
2 | * ESCON CLAW network driver | ||
3 | * | ||
4 | * Linux for zSeries version | ||
5 | * Copyright IBM Corp. 2002, 2009 | ||
6 | * Author(s) Original code written by: | ||
7 | * Kazuo Iimura <iimura@jp.ibm.com> | ||
8 | * Rewritten by | ||
9 | * Andy Richter <richtera@us.ibm.com> | ||
10 | * Marc Price <mwprice@us.ibm.com> | ||
11 | * | ||
12 | * sysfs parms: | ||
13 | * group x.x.rrrr,x.x.wwww | ||
14 | * read_buffer nnnnnnn | ||
15 | * write_buffer nnnnnn | ||
16 | * host_name aaaaaaaa | ||
17 | * adapter_name aaaaaaaa | ||
18 | * api_type aaaaaaaa | ||
19 | * | ||
20 | * eg. | ||
21 | * group 0.0.0200 0.0.0201 | ||
22 | * read_buffer 25 | ||
23 | * write_buffer 20 | ||
24 | * host_name LINUX390 | ||
25 | * adapter_name RS6K | ||
26 | * api_type TCPIP | ||
27 | * | ||
28 | * where | ||
29 | * | ||
30 | * The device id is decided by the order entries | ||
31 | * are added to the group the first is claw0 the second claw1 | ||
32 | * up to CLAW_MAX_DEV | ||
33 | * | ||
34 | * rrrr - the first of 2 consecutive device addresses used for the | ||
35 | * CLAW protocol. | ||
36 | * The specified address is always used as the input (Read) | ||
37 | * channel and the next address is used as the output channel. | ||
38 | * | ||
39 | * wwww - the second of 2 consecutive device addresses used for | ||
40 | * the CLAW protocol. | ||
41 | * The specified address is always used as the output | ||
42 | * channel and the previous address is used as the input channel. | ||
43 | * | ||
44 | * read_buffer - specifies number of input buffers to allocate. | ||
45 | * write_buffer - specifies number of output buffers to allocate. | ||
46 | * host_name - host name | ||
47 | * adaptor_name - adaptor name | ||
48 | * api_type - API type TCPIP or API will be sent and expected | ||
49 | * as ws_name | ||
50 | * | ||
51 | * Note the following requirements: | ||
52 | * 1) host_name must match the configured adapter_name on the remote side | ||
53 | * 2) adaptor_name must match the configured host name on the remote side | ||
54 | * | ||
55 | * Change History | ||
56 | * 1.00 Initial release shipped | ||
57 | * 1.10 Changes for Buffer allocation | ||
58 | * 1.15 Changed for 2.6 Kernel No longer compiles on 2.4 or lower | ||
59 | * 1.25 Added Packing support | ||
60 | * 1.5 | ||
61 | */ | ||
62 | |||
63 | #define KMSG_COMPONENT "claw" | ||
64 | |||
65 | #include <asm/ccwdev.h> | ||
66 | #include <asm/ccwgroup.h> | ||
67 | #include <asm/debug.h> | ||
68 | #include <asm/idals.h> | ||
69 | #include <asm/io.h> | ||
70 | #include <linux/bitops.h> | ||
71 | #include <linux/ctype.h> | ||
72 | #include <linux/delay.h> | ||
73 | #include <linux/errno.h> | ||
74 | #include <linux/if_arp.h> | ||
75 | #include <linux/init.h> | ||
76 | #include <linux/interrupt.h> | ||
77 | #include <linux/ip.h> | ||
78 | #include <linux/kernel.h> | ||
79 | #include <linux/module.h> | ||
80 | #include <linux/netdevice.h> | ||
81 | #include <linux/etherdevice.h> | ||
82 | #include <linux/proc_fs.h> | ||
83 | #include <linux/sched.h> | ||
84 | #include <linux/signal.h> | ||
85 | #include <linux/skbuff.h> | ||
86 | #include <linux/slab.h> | ||
87 | #include <linux/string.h> | ||
88 | #include <linux/tcp.h> | ||
89 | #include <linux/timer.h> | ||
90 | #include <linux/types.h> | ||
91 | |||
92 | #include "claw.h" | ||
93 | |||
94 | /* | ||
95 | CLAW uses the s390dbf file system see claw_trace and claw_setup | ||
96 | */ | ||
97 | |||
98 | static char version[] __initdata = "CLAW driver"; | ||
99 | static char debug_buffer[255]; | ||
100 | /** | ||
101 | * Debug Facility Stuff | ||
102 | */ | ||
103 | static debug_info_t *claw_dbf_setup; | ||
104 | static debug_info_t *claw_dbf_trace; | ||
105 | |||
106 | /** | ||
107 | * CLAW Debug Facility functions | ||
108 | */ | ||
109 | static void | ||
110 | claw_unregister_debug_facility(void) | ||
111 | { | ||
112 | debug_unregister(claw_dbf_setup); | ||
113 | debug_unregister(claw_dbf_trace); | ||
114 | } | ||
115 | |||
116 | static int | ||
117 | claw_register_debug_facility(void) | ||
118 | { | ||
119 | claw_dbf_setup = debug_register("claw_setup", 2, 1, 8); | ||
120 | claw_dbf_trace = debug_register("claw_trace", 2, 2, 8); | ||
121 | if (claw_dbf_setup == NULL || claw_dbf_trace == NULL) { | ||
122 | claw_unregister_debug_facility(); | ||
123 | return -ENOMEM; | ||
124 | } | ||
125 | debug_register_view(claw_dbf_setup, &debug_hex_ascii_view); | ||
126 | debug_set_level(claw_dbf_setup, 2); | ||
127 | debug_register_view(claw_dbf_trace, &debug_hex_ascii_view); | ||
128 | debug_set_level(claw_dbf_trace, 2); | ||
129 | return 0; | ||
130 | } | ||
131 | |||
132 | static inline void | ||
133 | claw_set_busy(struct net_device *dev) | ||
134 | { | ||
135 | ((struct claw_privbk *)dev->ml_priv)->tbusy = 1; | ||
136 | } | ||
137 | |||
138 | static inline void | ||
139 | claw_clear_busy(struct net_device *dev) | ||
140 | { | ||
141 | clear_bit(0, &(((struct claw_privbk *) dev->ml_priv)->tbusy)); | ||
142 | netif_wake_queue(dev); | ||
143 | } | ||
144 | |||
145 | static inline int | ||
146 | claw_check_busy(struct net_device *dev) | ||
147 | { | ||
148 | return ((struct claw_privbk *) dev->ml_priv)->tbusy; | ||
149 | } | ||
150 | |||
151 | static inline void | ||
152 | claw_setbit_busy(int nr,struct net_device *dev) | ||
153 | { | ||
154 | netif_stop_queue(dev); | ||
155 | set_bit(nr, (void *)&(((struct claw_privbk *)dev->ml_priv)->tbusy)); | ||
156 | } | ||
157 | |||
158 | static inline void | ||
159 | claw_clearbit_busy(int nr,struct net_device *dev) | ||
160 | { | ||
161 | clear_bit(nr, (void *)&(((struct claw_privbk *)dev->ml_priv)->tbusy)); | ||
162 | netif_wake_queue(dev); | ||
163 | } | ||
164 | |||
165 | static inline int | ||
166 | claw_test_and_setbit_busy(int nr,struct net_device *dev) | ||
167 | { | ||
168 | netif_stop_queue(dev); | ||
169 | return test_and_set_bit(nr, | ||
170 | (void *)&(((struct claw_privbk *) dev->ml_priv)->tbusy)); | ||
171 | } | ||
172 | |||
173 | |||
174 | /* Functions for the DEV methods */ | ||
175 | |||
176 | static int claw_probe(struct ccwgroup_device *cgdev); | ||
177 | static void claw_remove_device(struct ccwgroup_device *cgdev); | ||
178 | static void claw_purge_skb_queue(struct sk_buff_head *q); | ||
179 | static int claw_new_device(struct ccwgroup_device *cgdev); | ||
180 | static int claw_shutdown_device(struct ccwgroup_device *cgdev); | ||
181 | static int claw_tx(struct sk_buff *skb, struct net_device *dev); | ||
182 | static int claw_change_mtu( struct net_device *dev, int new_mtu); | ||
183 | static int claw_open(struct net_device *dev); | ||
184 | static void claw_irq_handler(struct ccw_device *cdev, | ||
185 | unsigned long intparm, struct irb *irb); | ||
186 | static void claw_irq_tasklet ( unsigned long data ); | ||
187 | static int claw_release(struct net_device *dev); | ||
188 | static void claw_write_retry ( struct chbk * p_ch ); | ||
189 | static void claw_write_next ( struct chbk * p_ch ); | ||
190 | static void claw_timer ( struct chbk * p_ch ); | ||
191 | |||
192 | /* Functions */ | ||
193 | static int add_claw_reads(struct net_device *dev, | ||
194 | struct ccwbk* p_first, struct ccwbk* p_last); | ||
195 | static void ccw_check_return_code (struct ccw_device *cdev, int return_code); | ||
196 | static void ccw_check_unit_check (struct chbk * p_ch, unsigned char sense ); | ||
197 | static int find_link(struct net_device *dev, char *host_name, char *ws_name ); | ||
198 | static int claw_hw_tx(struct sk_buff *skb, struct net_device *dev, long linkid); | ||
199 | static int init_ccw_bk(struct net_device *dev); | ||
200 | static void probe_error( struct ccwgroup_device *cgdev); | ||
201 | static struct net_device_stats *claw_stats(struct net_device *dev); | ||
202 | static int pages_to_order_of_mag(int num_of_pages); | ||
203 | static struct sk_buff *claw_pack_skb(struct claw_privbk *privptr); | ||
204 | /* sysfs Functions */ | ||
205 | static ssize_t claw_hname_show(struct device *dev, | ||
206 | struct device_attribute *attr, char *buf); | ||
207 | static ssize_t claw_hname_write(struct device *dev, | ||
208 | struct device_attribute *attr, | ||
209 | const char *buf, size_t count); | ||
210 | static ssize_t claw_adname_show(struct device *dev, | ||
211 | struct device_attribute *attr, char *buf); | ||
212 | static ssize_t claw_adname_write(struct device *dev, | ||
213 | struct device_attribute *attr, | ||
214 | const char *buf, size_t count); | ||
215 | static ssize_t claw_apname_show(struct device *dev, | ||
216 | struct device_attribute *attr, char *buf); | ||
217 | static ssize_t claw_apname_write(struct device *dev, | ||
218 | struct device_attribute *attr, | ||
219 | const char *buf, size_t count); | ||
220 | static ssize_t claw_wbuff_show(struct device *dev, | ||
221 | struct device_attribute *attr, char *buf); | ||
222 | static ssize_t claw_wbuff_write(struct device *dev, | ||
223 | struct device_attribute *attr, | ||
224 | const char *buf, size_t count); | ||
225 | static ssize_t claw_rbuff_show(struct device *dev, | ||
226 | struct device_attribute *attr, char *buf); | ||
227 | static ssize_t claw_rbuff_write(struct device *dev, | ||
228 | struct device_attribute *attr, | ||
229 | const char *buf, size_t count); | ||
230 | |||
231 | /* Functions for System Validate */ | ||
232 | static int claw_process_control( struct net_device *dev, struct ccwbk * p_ccw); | ||
233 | static int claw_send_control(struct net_device *dev, __u8 type, __u8 link, | ||
234 | __u8 correlator, __u8 rc , char *local_name, char *remote_name); | ||
235 | static int claw_snd_conn_req(struct net_device *dev, __u8 link); | ||
236 | static int claw_snd_disc(struct net_device *dev, struct clawctl * p_ctl); | ||
237 | static int claw_snd_sys_validate_rsp(struct net_device *dev, | ||
238 | struct clawctl * p_ctl, __u32 return_code); | ||
239 | static int claw_strt_conn_req(struct net_device *dev ); | ||
240 | static void claw_strt_read(struct net_device *dev, int lock); | ||
241 | static void claw_strt_out_IO(struct net_device *dev); | ||
242 | static void claw_free_wrt_buf(struct net_device *dev); | ||
243 | |||
244 | /* Functions for unpack reads */ | ||
245 | static void unpack_read(struct net_device *dev); | ||
246 | |||
247 | static int claw_pm_prepare(struct ccwgroup_device *gdev) | ||
248 | { | ||
249 | return -EPERM; | ||
250 | } | ||
251 | |||
252 | /* the root device for claw group devices */ | ||
253 | static struct device *claw_root_dev; | ||
254 | |||
255 | /* ccwgroup table */ | ||
256 | |||
257 | static struct ccwgroup_driver claw_group_driver = { | ||
258 | .driver = { | ||
259 | .owner = THIS_MODULE, | ||
260 | .name = "claw", | ||
261 | }, | ||
262 | .setup = claw_probe, | ||
263 | .remove = claw_remove_device, | ||
264 | .set_online = claw_new_device, | ||
265 | .set_offline = claw_shutdown_device, | ||
266 | .prepare = claw_pm_prepare, | ||
267 | }; | ||
268 | |||
269 | static struct ccw_device_id claw_ids[] = { | ||
270 | {CCW_DEVICE(0x3088, 0x61), .driver_info = claw_channel_type_claw}, | ||
271 | {}, | ||
272 | }; | ||
273 | MODULE_DEVICE_TABLE(ccw, claw_ids); | ||
274 | |||
275 | static struct ccw_driver claw_ccw_driver = { | ||
276 | .driver = { | ||
277 | .owner = THIS_MODULE, | ||
278 | .name = "claw", | ||
279 | }, | ||
280 | .ids = claw_ids, | ||
281 | .probe = ccwgroup_probe_ccwdev, | ||
282 | .remove = ccwgroup_remove_ccwdev, | ||
283 | .int_class = IRQIO_CLW, | ||
284 | }; | ||
285 | |||
286 | static ssize_t claw_driver_group_store(struct device_driver *ddrv, | ||
287 | const char *buf, size_t count) | ||
288 | { | ||
289 | int err; | ||
290 | err = ccwgroup_create_dev(claw_root_dev, &claw_group_driver, 2, buf); | ||
291 | return err ? err : count; | ||
292 | } | ||
293 | static DRIVER_ATTR(group, 0200, NULL, claw_driver_group_store); | ||
294 | |||
295 | static struct attribute *claw_drv_attrs[] = { | ||
296 | &driver_attr_group.attr, | ||
297 | NULL, | ||
298 | }; | ||
299 | static struct attribute_group claw_drv_attr_group = { | ||
300 | .attrs = claw_drv_attrs, | ||
301 | }; | ||
302 | static const struct attribute_group *claw_drv_attr_groups[] = { | ||
303 | &claw_drv_attr_group, | ||
304 | NULL, | ||
305 | }; | ||
306 | |||
307 | /* | ||
308 | * Key functions | ||
309 | */ | ||
310 | |||
311 | /*-------------------------------------------------------------------* | ||
312 | * claw_tx * | ||
313 | *-------------------------------------------------------------------*/ | ||
314 | |||
315 | static int | ||
316 | claw_tx(struct sk_buff *skb, struct net_device *dev) | ||
317 | { | ||
318 | int rc; | ||
319 | struct claw_privbk *privptr = dev->ml_priv; | ||
320 | unsigned long saveflags; | ||
321 | struct chbk *p_ch; | ||
322 | |||
323 | CLAW_DBF_TEXT(4, trace, "claw_tx"); | ||
324 | p_ch = &privptr->channel[WRITE_CHANNEL]; | ||
325 | spin_lock_irqsave(get_ccwdev_lock(p_ch->cdev), saveflags); | ||
326 | rc=claw_hw_tx( skb, dev, 1 ); | ||
327 | spin_unlock_irqrestore(get_ccwdev_lock(p_ch->cdev), saveflags); | ||
328 | CLAW_DBF_TEXT_(4, trace, "clawtx%d", rc); | ||
329 | if (rc) | ||
330 | rc = NETDEV_TX_BUSY; | ||
331 | else | ||
332 | rc = NETDEV_TX_OK; | ||
333 | return rc; | ||
334 | } /* end of claw_tx */ | ||
335 | |||
336 | /*------------------------------------------------------------------* | ||
337 | * pack the collect queue into an skb and return it * | ||
338 | * If not packing just return the top skb from the queue * | ||
339 | *------------------------------------------------------------------*/ | ||
340 | |||
341 | static struct sk_buff * | ||
342 | claw_pack_skb(struct claw_privbk *privptr) | ||
343 | { | ||
344 | struct sk_buff *new_skb,*held_skb; | ||
345 | struct chbk *p_ch = &privptr->channel[WRITE_CHANNEL]; | ||
346 | struct claw_env *p_env = privptr->p_env; | ||
347 | int pkt_cnt,pk_ind,so_far; | ||
348 | |||
349 | new_skb = NULL; /* assume no dice */ | ||
350 | pkt_cnt = 0; | ||
351 | CLAW_DBF_TEXT(4, trace, "PackSKBe"); | ||
352 | if (!skb_queue_empty(&p_ch->collect_queue)) { | ||
353 | /* some data */ | ||
354 | held_skb = skb_dequeue(&p_ch->collect_queue); | ||
355 | if (held_skb) | ||
356 | dev_kfree_skb_any(held_skb); | ||
357 | else | ||
358 | return NULL; | ||
359 | if (p_env->packing != DO_PACKED) | ||
360 | return held_skb; | ||
361 | /* get a new SKB we will pack at least one */ | ||
362 | new_skb = dev_alloc_skb(p_env->write_size); | ||
363 | if (new_skb == NULL) { | ||
364 | atomic_inc(&held_skb->users); | ||
365 | skb_queue_head(&p_ch->collect_queue,held_skb); | ||
366 | return NULL; | ||
367 | } | ||
368 | /* we have packed packet and a place to put it */ | ||
369 | pk_ind = 1; | ||
370 | so_far = 0; | ||
371 | new_skb->cb[1] = 'P'; /* every skb on queue has pack header */ | ||
372 | while ((pk_ind) && (held_skb != NULL)) { | ||
373 | if (held_skb->len+so_far <= p_env->write_size-8) { | ||
374 | memcpy(skb_put(new_skb,held_skb->len), | ||
375 | held_skb->data,held_skb->len); | ||
376 | privptr->stats.tx_packets++; | ||
377 | so_far += held_skb->len; | ||
378 | pkt_cnt++; | ||
379 | dev_kfree_skb_any(held_skb); | ||
380 | held_skb = skb_dequeue(&p_ch->collect_queue); | ||
381 | if (held_skb) | ||
382 | atomic_dec(&held_skb->users); | ||
383 | } else { | ||
384 | pk_ind = 0; | ||
385 | atomic_inc(&held_skb->users); | ||
386 | skb_queue_head(&p_ch->collect_queue,held_skb); | ||
387 | } | ||
388 | } | ||
389 | } | ||
390 | CLAW_DBF_TEXT(4, trace, "PackSKBx"); | ||
391 | return new_skb; | ||
392 | } | ||
393 | |||
394 | /*-------------------------------------------------------------------* | ||
395 | * claw_change_mtu * | ||
396 | * * | ||
397 | *-------------------------------------------------------------------*/ | ||
398 | |||
399 | static int | ||
400 | claw_change_mtu(struct net_device *dev, int new_mtu) | ||
401 | { | ||
402 | struct claw_privbk *privptr = dev->ml_priv; | ||
403 | int buff_size; | ||
404 | CLAW_DBF_TEXT(4, trace, "setmtu"); | ||
405 | buff_size = privptr->p_env->write_size; | ||
406 | if ((new_mtu < 60) || (new_mtu > buff_size)) { | ||
407 | return -EINVAL; | ||
408 | } | ||
409 | dev->mtu = new_mtu; | ||
410 | return 0; | ||
411 | } /* end of claw_change_mtu */ | ||
412 | |||
413 | |||
414 | /*-------------------------------------------------------------------* | ||
415 | * claw_open * | ||
416 | * * | ||
417 | *-------------------------------------------------------------------*/ | ||
418 | static int | ||
419 | claw_open(struct net_device *dev) | ||
420 | { | ||
421 | |||
422 | int rc; | ||
423 | int i; | ||
424 | unsigned long saveflags=0; | ||
425 | unsigned long parm; | ||
426 | struct claw_privbk *privptr; | ||
427 | DECLARE_WAITQUEUE(wait, current); | ||
428 | struct timer_list timer; | ||
429 | struct ccwbk *p_buf; | ||
430 | |||
431 | CLAW_DBF_TEXT(4, trace, "open"); | ||
432 | privptr = (struct claw_privbk *)dev->ml_priv; | ||
433 | /* allocate and initialize CCW blocks */ | ||
434 | if (privptr->buffs_alloc == 0) { | ||
435 | rc=init_ccw_bk(dev); | ||
436 | if (rc) { | ||
437 | CLAW_DBF_TEXT(2, trace, "openmem"); | ||
438 | return -ENOMEM; | ||
439 | } | ||
440 | } | ||
441 | privptr->system_validate_comp=0; | ||
442 | privptr->release_pend=0; | ||
443 | if(strncmp(privptr->p_env->api_type,WS_APPL_NAME_PACKED,6) == 0) { | ||
444 | privptr->p_env->read_size=DEF_PACK_BUFSIZE; | ||
445 | privptr->p_env->write_size=DEF_PACK_BUFSIZE; | ||
446 | privptr->p_env->packing=PACKING_ASK; | ||
447 | } else { | ||
448 | privptr->p_env->packing=0; | ||
449 | privptr->p_env->read_size=CLAW_FRAME_SIZE; | ||
450 | privptr->p_env->write_size=CLAW_FRAME_SIZE; | ||
451 | } | ||
452 | claw_set_busy(dev); | ||
453 | tasklet_init(&privptr->channel[READ_CHANNEL].tasklet, claw_irq_tasklet, | ||
454 | (unsigned long) &privptr->channel[READ_CHANNEL]); | ||
455 | for ( i = 0; i < 2; i++) { | ||
456 | CLAW_DBF_TEXT_(2, trace, "opn_ch%d", i); | ||
457 | init_waitqueue_head(&privptr->channel[i].wait); | ||
458 | /* skb_queue_head_init(&p_ch->io_queue); */ | ||
459 | if (i == WRITE_CHANNEL) | ||
460 | skb_queue_head_init( | ||
461 | &privptr->channel[WRITE_CHANNEL].collect_queue); | ||
462 | privptr->channel[i].flag_a = 0; | ||
463 | privptr->channel[i].IO_active = 0; | ||
464 | privptr->channel[i].flag &= ~CLAW_TIMER; | ||
465 | init_timer(&timer); | ||
466 | timer.function = (void *)claw_timer; | ||
467 | timer.data = (unsigned long)(&privptr->channel[i]); | ||
468 | timer.expires = jiffies + 15*HZ; | ||
469 | add_timer(&timer); | ||
470 | spin_lock_irqsave(get_ccwdev_lock( | ||
471 | privptr->channel[i].cdev), saveflags); | ||
472 | parm = (unsigned long) &privptr->channel[i]; | ||
473 | privptr->channel[i].claw_state = CLAW_START_HALT_IO; | ||
474 | rc = 0; | ||
475 | add_wait_queue(&privptr->channel[i].wait, &wait); | ||
476 | rc = ccw_device_halt( | ||
477 | (struct ccw_device *)privptr->channel[i].cdev,parm); | ||
478 | set_current_state(TASK_INTERRUPTIBLE); | ||
479 | spin_unlock_irqrestore( | ||
480 | get_ccwdev_lock(privptr->channel[i].cdev), saveflags); | ||
481 | schedule(); | ||
482 | remove_wait_queue(&privptr->channel[i].wait, &wait); | ||
483 | if(rc != 0) | ||
484 | ccw_check_return_code(privptr->channel[i].cdev, rc); | ||
485 | if((privptr->channel[i].flag & CLAW_TIMER) == 0x00) | ||
486 | del_timer(&timer); | ||
487 | } | ||
488 | if ((((privptr->channel[READ_CHANNEL].last_dstat | | ||
489 | privptr->channel[WRITE_CHANNEL].last_dstat) & | ||
490 | ~(DEV_STAT_CHN_END | DEV_STAT_DEV_END)) != 0x00) || | ||
491 | (((privptr->channel[READ_CHANNEL].flag | | ||
492 | privptr->channel[WRITE_CHANNEL].flag) & CLAW_TIMER) != 0x00)) { | ||
493 | dev_info(&privptr->channel[READ_CHANNEL].cdev->dev, | ||
494 | "%s: remote side is not ready\n", dev->name); | ||
495 | CLAW_DBF_TEXT(2, trace, "notrdy"); | ||
496 | |||
497 | for ( i = 0; i < 2; i++) { | ||
498 | spin_lock_irqsave( | ||
499 | get_ccwdev_lock(privptr->channel[i].cdev), | ||
500 | saveflags); | ||
501 | parm = (unsigned long) &privptr->channel[i]; | ||
502 | privptr->channel[i].claw_state = CLAW_STOP; | ||
503 | rc = ccw_device_halt( | ||
504 | (struct ccw_device *)&privptr->channel[i].cdev, | ||
505 | parm); | ||
506 | spin_unlock_irqrestore( | ||
507 | get_ccwdev_lock(privptr->channel[i].cdev), | ||
508 | saveflags); | ||
509 | if (rc != 0) { | ||
510 | ccw_check_return_code( | ||
511 | privptr->channel[i].cdev, rc); | ||
512 | } | ||
513 | } | ||
514 | free_pages((unsigned long)privptr->p_buff_ccw, | ||
515 | (int)pages_to_order_of_mag(privptr->p_buff_ccw_num)); | ||
516 | if (privptr->p_env->read_size < PAGE_SIZE) { | ||
517 | free_pages((unsigned long)privptr->p_buff_read, | ||
518 | (int)pages_to_order_of_mag( | ||
519 | privptr->p_buff_read_num)); | ||
520 | } | ||
521 | else { | ||
522 | p_buf=privptr->p_read_active_first; | ||
523 | while (p_buf!=NULL) { | ||
524 | free_pages((unsigned long)p_buf->p_buffer, | ||
525 | (int)pages_to_order_of_mag( | ||
526 | privptr->p_buff_pages_perread )); | ||
527 | p_buf=p_buf->next; | ||
528 | } | ||
529 | } | ||
530 | if (privptr->p_env->write_size < PAGE_SIZE ) { | ||
531 | free_pages((unsigned long)privptr->p_buff_write, | ||
532 | (int)pages_to_order_of_mag( | ||
533 | privptr->p_buff_write_num)); | ||
534 | } | ||
535 | else { | ||
536 | p_buf=privptr->p_write_active_first; | ||
537 | while (p_buf!=NULL) { | ||
538 | free_pages((unsigned long)p_buf->p_buffer, | ||
539 | (int)pages_to_order_of_mag( | ||
540 | privptr->p_buff_pages_perwrite )); | ||
541 | p_buf=p_buf->next; | ||
542 | } | ||
543 | } | ||
544 | privptr->buffs_alloc = 0; | ||
545 | privptr->channel[READ_CHANNEL].flag = 0x00; | ||
546 | privptr->channel[WRITE_CHANNEL].flag = 0x00; | ||
547 | privptr->p_buff_ccw=NULL; | ||
548 | privptr->p_buff_read=NULL; | ||
549 | privptr->p_buff_write=NULL; | ||
550 | claw_clear_busy(dev); | ||
551 | CLAW_DBF_TEXT(2, trace, "open EIO"); | ||
552 | return -EIO; | ||
553 | } | ||
554 | |||
555 | /* Send SystemValidate command */ | ||
556 | |||
557 | claw_clear_busy(dev); | ||
558 | CLAW_DBF_TEXT(4, trace, "openok"); | ||
559 | return 0; | ||
560 | } /* end of claw_open */ | ||
561 | |||
562 | /*-------------------------------------------------------------------* | ||
563 | * * | ||
564 | * claw_irq_handler * | ||
565 | * * | ||
566 | *--------------------------------------------------------------------*/ | ||
567 | static void | ||
568 | claw_irq_handler(struct ccw_device *cdev, | ||
569 | unsigned long intparm, struct irb *irb) | ||
570 | { | ||
571 | struct chbk *p_ch = NULL; | ||
572 | struct claw_privbk *privptr = NULL; | ||
573 | struct net_device *dev = NULL; | ||
574 | struct claw_env *p_env; | ||
575 | struct chbk *p_ch_r=NULL; | ||
576 | |||
577 | CLAW_DBF_TEXT(4, trace, "clawirq"); | ||
578 | /* Bypass all 'unsolicited interrupts' */ | ||
579 | privptr = dev_get_drvdata(&cdev->dev); | ||
580 | if (!privptr) { | ||
581 | dev_warn(&cdev->dev, "An uninitialized CLAW device received an" | ||
582 | " IRQ, c-%02x d-%02x\n", | ||
583 | irb->scsw.cmd.cstat, irb->scsw.cmd.dstat); | ||
584 | CLAW_DBF_TEXT(2, trace, "badirq"); | ||
585 | return; | ||
586 | } | ||
587 | |||
588 | /* Try to extract channel from driver data. */ | ||
589 | if (privptr->channel[READ_CHANNEL].cdev == cdev) | ||
590 | p_ch = &privptr->channel[READ_CHANNEL]; | ||
591 | else if (privptr->channel[WRITE_CHANNEL].cdev == cdev) | ||
592 | p_ch = &privptr->channel[WRITE_CHANNEL]; | ||
593 | else { | ||
594 | dev_warn(&cdev->dev, "The device is not a CLAW device\n"); | ||
595 | CLAW_DBF_TEXT(2, trace, "badchan"); | ||
596 | return; | ||
597 | } | ||
598 | CLAW_DBF_TEXT_(4, trace, "IRQCH=%d", p_ch->flag); | ||
599 | |||
600 | dev = (struct net_device *) (p_ch->ndev); | ||
601 | p_env=privptr->p_env; | ||
602 | |||
603 | /* Copy interruption response block. */ | ||
604 | memcpy(p_ch->irb, irb, sizeof(struct irb)); | ||
605 | |||
606 | /* Check for good subchannel return code, otherwise info message */ | ||
607 | if (irb->scsw.cmd.cstat && !(irb->scsw.cmd.cstat & SCHN_STAT_PCI)) { | ||
608 | dev_info(&cdev->dev, | ||
609 | "%s: subchannel check for device: %04x -" | ||
610 | " Sch Stat %02x Dev Stat %02x CPA - %04x\n", | ||
611 | dev->name, p_ch->devno, | ||
612 | irb->scsw.cmd.cstat, irb->scsw.cmd.dstat, | ||
613 | irb->scsw.cmd.cpa); | ||
614 | CLAW_DBF_TEXT(2, trace, "chanchk"); | ||
615 | /* return; */ | ||
616 | } | ||
617 | |||
618 | /* Check the reason-code of a unit check */ | ||
619 | if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) | ||
620 | ccw_check_unit_check(p_ch, irb->ecw[0]); | ||
621 | |||
622 | /* State machine to bring the connection up, down and to restart */ | ||
623 | p_ch->last_dstat = irb->scsw.cmd.dstat; | ||
624 | |||
625 | switch (p_ch->claw_state) { | ||
626 | case CLAW_STOP:/* HALT_IO by claw_release (halt sequence) */ | ||
627 | if (!((p_ch->irb->scsw.cmd.stctl & SCSW_STCTL_SEC_STATUS) || | ||
628 | (p_ch->irb->scsw.cmd.stctl == SCSW_STCTL_STATUS_PEND) || | ||
629 | (p_ch->irb->scsw.cmd.stctl == | ||
630 | (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)))) | ||
631 | return; | ||
632 | wake_up(&p_ch->wait); /* wake up claw_release */ | ||
633 | CLAW_DBF_TEXT(4, trace, "stop"); | ||
634 | return; | ||
635 | case CLAW_START_HALT_IO: /* HALT_IO issued by claw_open */ | ||
636 | if (!((p_ch->irb->scsw.cmd.stctl & SCSW_STCTL_SEC_STATUS) || | ||
637 | (p_ch->irb->scsw.cmd.stctl == SCSW_STCTL_STATUS_PEND) || | ||
638 | (p_ch->irb->scsw.cmd.stctl == | ||
639 | (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)))) { | ||
640 | CLAW_DBF_TEXT(4, trace, "haltio"); | ||
641 | return; | ||
642 | } | ||
643 | if (p_ch->flag == CLAW_READ) { | ||
644 | p_ch->claw_state = CLAW_START_READ; | ||
645 | wake_up(&p_ch->wait); /* wake claw_open (READ)*/ | ||
646 | } else if (p_ch->flag == CLAW_WRITE) { | ||
647 | p_ch->claw_state = CLAW_START_WRITE; | ||
648 | /* send SYSTEM_VALIDATE */ | ||
649 | claw_strt_read(dev, LOCK_NO); | ||
650 | claw_send_control(dev, | ||
651 | SYSTEM_VALIDATE_REQUEST, | ||
652 | 0, 0, 0, | ||
653 | p_env->host_name, | ||
654 | p_env->adapter_name); | ||
655 | } else { | ||
656 | dev_warn(&cdev->dev, "The CLAW device received" | ||
657 | " an unexpected IRQ, " | ||
658 | "c-%02x d-%02x\n", | ||
659 | irb->scsw.cmd.cstat, | ||
660 | irb->scsw.cmd.dstat); | ||
661 | return; | ||
662 | } | ||
663 | CLAW_DBF_TEXT(4, trace, "haltio"); | ||
664 | return; | ||
665 | case CLAW_START_READ: | ||
666 | CLAW_DBF_TEXT(4, trace, "ReadIRQ"); | ||
667 | if (p_ch->irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) { | ||
668 | clear_bit(0, (void *)&p_ch->IO_active); | ||
669 | if ((p_ch->irb->ecw[0] & 0x41) == 0x41 || | ||
670 | (p_ch->irb->ecw[0] & 0x40) == 0x40 || | ||
671 | (p_ch->irb->ecw[0]) == 0) { | ||
672 | privptr->stats.rx_errors++; | ||
673 | dev_info(&cdev->dev, | ||
674 | "%s: Restart is required after remote " | ||
675 | "side recovers \n", | ||
676 | dev->name); | ||
677 | } | ||
678 | CLAW_DBF_TEXT(4, trace, "notrdy"); | ||
679 | return; | ||
680 | } | ||
681 | if ((p_ch->irb->scsw.cmd.cstat & SCHN_STAT_PCI) && | ||
682 | (p_ch->irb->scsw.cmd.dstat == 0)) { | ||
683 | if (test_and_set_bit(CLAW_BH_ACTIVE, | ||
684 | (void *)&p_ch->flag_a) == 0) | ||
685 | tasklet_schedule(&p_ch->tasklet); | ||
686 | else | ||
687 | CLAW_DBF_TEXT(4, trace, "PCINoBH"); | ||
688 | CLAW_DBF_TEXT(4, trace, "PCI_read"); | ||
689 | return; | ||
690 | } | ||
691 | if (!((p_ch->irb->scsw.cmd.stctl & SCSW_STCTL_SEC_STATUS) || | ||
692 | (p_ch->irb->scsw.cmd.stctl == SCSW_STCTL_STATUS_PEND) || | ||
693 | (p_ch->irb->scsw.cmd.stctl == | ||
694 | (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)))) { | ||
695 | CLAW_DBF_TEXT(4, trace, "SPend_rd"); | ||
696 | return; | ||
697 | } | ||
698 | clear_bit(0, (void *)&p_ch->IO_active); | ||
699 | claw_clearbit_busy(TB_RETRY, dev); | ||
700 | if (test_and_set_bit(CLAW_BH_ACTIVE, | ||
701 | (void *)&p_ch->flag_a) == 0) | ||
702 | tasklet_schedule(&p_ch->tasklet); | ||
703 | else | ||
704 | CLAW_DBF_TEXT(4, trace, "RdBHAct"); | ||
705 | CLAW_DBF_TEXT(4, trace, "RdIRQXit"); | ||
706 | return; | ||
707 | case CLAW_START_WRITE: | ||
708 | if (p_ch->irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) { | ||
709 | dev_info(&cdev->dev, | ||
710 | "%s: Unit Check Occurred in " | ||
711 | "write channel\n", dev->name); | ||
712 | clear_bit(0, (void *)&p_ch->IO_active); | ||
713 | if (p_ch->irb->ecw[0] & 0x80) { | ||
714 | dev_info(&cdev->dev, | ||
715 | "%s: Resetting Event " | ||
716 | "occurred:\n", dev->name); | ||
717 | init_timer(&p_ch->timer); | ||
718 | p_ch->timer.function = | ||
719 | (void *)claw_write_retry; | ||
720 | p_ch->timer.data = (unsigned long)p_ch; | ||
721 | p_ch->timer.expires = jiffies + 10*HZ; | ||
722 | add_timer(&p_ch->timer); | ||
723 | dev_info(&cdev->dev, | ||
724 | "%s: write connection " | ||
725 | "restarting\n", dev->name); | ||
726 | } | ||
727 | CLAW_DBF_TEXT(4, trace, "rstrtwrt"); | ||
728 | return; | ||
729 | } | ||
730 | if (p_ch->irb->scsw.cmd.dstat & DEV_STAT_UNIT_EXCEP) { | ||
731 | clear_bit(0, (void *)&p_ch->IO_active); | ||
732 | dev_info(&cdev->dev, | ||
733 | "%s: Unit Exception " | ||
734 | "occurred in write channel\n", | ||
735 | dev->name); | ||
736 | } | ||
737 | if (!((p_ch->irb->scsw.cmd.stctl & SCSW_STCTL_SEC_STATUS) || | ||
738 | (p_ch->irb->scsw.cmd.stctl == SCSW_STCTL_STATUS_PEND) || | ||
739 | (p_ch->irb->scsw.cmd.stctl == | ||
740 | (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)))) { | ||
741 | CLAW_DBF_TEXT(4, trace, "writeUE"); | ||
742 | return; | ||
743 | } | ||
744 | clear_bit(0, (void *)&p_ch->IO_active); | ||
745 | if (claw_test_and_setbit_busy(TB_TX, dev) == 0) { | ||
746 | claw_write_next(p_ch); | ||
747 | claw_clearbit_busy(TB_TX, dev); | ||
748 | claw_clear_busy(dev); | ||
749 | } | ||
750 | p_ch_r = (struct chbk *)&privptr->channel[READ_CHANNEL]; | ||
751 | if (test_and_set_bit(CLAW_BH_ACTIVE, | ||
752 | (void *)&p_ch_r->flag_a) == 0) | ||
753 | tasklet_schedule(&p_ch_r->tasklet); | ||
754 | CLAW_DBF_TEXT(4, trace, "StWtExit"); | ||
755 | return; | ||
756 | default: | ||
757 | dev_warn(&cdev->dev, | ||
758 | "The CLAW device for %s received an unexpected IRQ\n", | ||
759 | dev->name); | ||
760 | CLAW_DBF_TEXT(2, trace, "badIRQ"); | ||
761 | return; | ||
762 | } | ||
763 | |||
764 | } /* end of claw_irq_handler */ | ||
765 | |||
766 | |||
767 | /*-------------------------------------------------------------------* | ||
768 | * claw_irq_tasklet * | ||
769 | * * | ||
770 | *--------------------------------------------------------------------*/ | ||
771 | static void | ||
772 | claw_irq_tasklet ( unsigned long data ) | ||
773 | { | ||
774 | struct chbk * p_ch; | ||
775 | struct net_device *dev; | ||
776 | |||
777 | p_ch = (struct chbk *) data; | ||
778 | dev = (struct net_device *)p_ch->ndev; | ||
779 | CLAW_DBF_TEXT(4, trace, "IRQtask"); | ||
780 | unpack_read(dev); | ||
781 | clear_bit(CLAW_BH_ACTIVE, (void *)&p_ch->flag_a); | ||
782 | CLAW_DBF_TEXT(4, trace, "TskletXt"); | ||
783 | return; | ||
784 | } /* end of claw_irq_bh */ | ||
785 | |||
786 | /*-------------------------------------------------------------------* | ||
787 | * claw_release * | ||
788 | * * | ||
789 | *--------------------------------------------------------------------*/ | ||
790 | static int | ||
791 | claw_release(struct net_device *dev) | ||
792 | { | ||
793 | int rc; | ||
794 | int i; | ||
795 | unsigned long saveflags; | ||
796 | unsigned long parm; | ||
797 | struct claw_privbk *privptr; | ||
798 | DECLARE_WAITQUEUE(wait, current); | ||
799 | struct ccwbk* p_this_ccw; | ||
800 | struct ccwbk* p_buf; | ||
801 | |||
802 | if (!dev) | ||
803 | return 0; | ||
804 | privptr = (struct claw_privbk *)dev->ml_priv; | ||
805 | if (!privptr) | ||
806 | return 0; | ||
807 | CLAW_DBF_TEXT(4, trace, "release"); | ||
808 | privptr->release_pend=1; | ||
809 | claw_setbit_busy(TB_STOP,dev); | ||
810 | for ( i = 1; i >=0 ; i--) { | ||
811 | spin_lock_irqsave( | ||
812 | get_ccwdev_lock(privptr->channel[i].cdev), saveflags); | ||
813 | /* del_timer(&privptr->channel[READ_CHANNEL].timer); */ | ||
814 | privptr->channel[i].claw_state = CLAW_STOP; | ||
815 | privptr->channel[i].IO_active = 0; | ||
816 | parm = (unsigned long) &privptr->channel[i]; | ||
817 | if (i == WRITE_CHANNEL) | ||
818 | claw_purge_skb_queue( | ||
819 | &privptr->channel[WRITE_CHANNEL].collect_queue); | ||
820 | rc = ccw_device_halt (privptr->channel[i].cdev, parm); | ||
821 | if (privptr->system_validate_comp==0x00) /* never opened? */ | ||
822 | init_waitqueue_head(&privptr->channel[i].wait); | ||
823 | add_wait_queue(&privptr->channel[i].wait, &wait); | ||
824 | set_current_state(TASK_INTERRUPTIBLE); | ||
825 | spin_unlock_irqrestore( | ||
826 | get_ccwdev_lock(privptr->channel[i].cdev), saveflags); | ||
827 | schedule(); | ||
828 | remove_wait_queue(&privptr->channel[i].wait, &wait); | ||
829 | if (rc != 0) { | ||
830 | ccw_check_return_code(privptr->channel[i].cdev, rc); | ||
831 | } | ||
832 | } | ||
833 | if (privptr->pk_skb != NULL) { | ||
834 | dev_kfree_skb_any(privptr->pk_skb); | ||
835 | privptr->pk_skb = NULL; | ||
836 | } | ||
837 | if(privptr->buffs_alloc != 1) { | ||
838 | CLAW_DBF_TEXT(4, trace, "none2fre"); | ||
839 | return 0; | ||
840 | } | ||
841 | CLAW_DBF_TEXT(4, trace, "freebufs"); | ||
842 | if (privptr->p_buff_ccw != NULL) { | ||
843 | free_pages((unsigned long)privptr->p_buff_ccw, | ||
844 | (int)pages_to_order_of_mag(privptr->p_buff_ccw_num)); | ||
845 | } | ||
846 | CLAW_DBF_TEXT(4, trace, "freeread"); | ||
847 | if (privptr->p_env->read_size < PAGE_SIZE) { | ||
848 | if (privptr->p_buff_read != NULL) { | ||
849 | free_pages((unsigned long)privptr->p_buff_read, | ||
850 | (int)pages_to_order_of_mag(privptr->p_buff_read_num)); | ||
851 | } | ||
852 | } | ||
853 | else { | ||
854 | p_buf=privptr->p_read_active_first; | ||
855 | while (p_buf!=NULL) { | ||
856 | free_pages((unsigned long)p_buf->p_buffer, | ||
857 | (int)pages_to_order_of_mag( | ||
858 | privptr->p_buff_pages_perread )); | ||
859 | p_buf=p_buf->next; | ||
860 | } | ||
861 | } | ||
862 | CLAW_DBF_TEXT(4, trace, "freewrit"); | ||
863 | if (privptr->p_env->write_size < PAGE_SIZE ) { | ||
864 | free_pages((unsigned long)privptr->p_buff_write, | ||
865 | (int)pages_to_order_of_mag(privptr->p_buff_write_num)); | ||
866 | } | ||
867 | else { | ||
868 | p_buf=privptr->p_write_active_first; | ||
869 | while (p_buf!=NULL) { | ||
870 | free_pages((unsigned long)p_buf->p_buffer, | ||
871 | (int)pages_to_order_of_mag( | ||
872 | privptr->p_buff_pages_perwrite )); | ||
873 | p_buf=p_buf->next; | ||
874 | } | ||
875 | } | ||
876 | CLAW_DBF_TEXT(4, trace, "clearptr"); | ||
877 | privptr->buffs_alloc = 0; | ||
878 | privptr->p_buff_ccw=NULL; | ||
879 | privptr->p_buff_read=NULL; | ||
880 | privptr->p_buff_write=NULL; | ||
881 | privptr->system_validate_comp=0; | ||
882 | privptr->release_pend=0; | ||
883 | /* Remove any writes that were pending and reset all reads */ | ||
884 | p_this_ccw=privptr->p_read_active_first; | ||
885 | while (p_this_ccw!=NULL) { | ||
886 | p_this_ccw->header.length=0xffff; | ||
887 | p_this_ccw->header.opcode=0xff; | ||
888 | p_this_ccw->header.flag=0x00; | ||
889 | p_this_ccw=p_this_ccw->next; | ||
890 | } | ||
891 | |||
892 | while (privptr->p_write_active_first!=NULL) { | ||
893 | p_this_ccw=privptr->p_write_active_first; | ||
894 | p_this_ccw->header.flag=CLAW_PENDING; | ||
895 | privptr->p_write_active_first=p_this_ccw->next; | ||
896 | p_this_ccw->next=privptr->p_write_free_chain; | ||
897 | privptr->p_write_free_chain=p_this_ccw; | ||
898 | ++privptr->write_free_count; | ||
899 | } | ||
900 | privptr->p_write_active_last=NULL; | ||
901 | privptr->mtc_logical_link = -1; | ||
902 | privptr->mtc_skipping = 1; | ||
903 | privptr->mtc_offset=0; | ||
904 | |||
905 | if (((privptr->channel[READ_CHANNEL].last_dstat | | ||
906 | privptr->channel[WRITE_CHANNEL].last_dstat) & | ||
907 | ~(DEV_STAT_CHN_END | DEV_STAT_DEV_END)) != 0x00) { | ||
908 | dev_warn(&privptr->channel[READ_CHANNEL].cdev->dev, | ||
909 | "Deactivating %s completed with incorrect" | ||
910 | " subchannel status " | ||
911 | "(read %02x, write %02x)\n", | ||
912 | dev->name, | ||
913 | privptr->channel[READ_CHANNEL].last_dstat, | ||
914 | privptr->channel[WRITE_CHANNEL].last_dstat); | ||
915 | CLAW_DBF_TEXT(2, trace, "badclose"); | ||
916 | } | ||
917 | CLAW_DBF_TEXT(4, trace, "rlsexit"); | ||
918 | return 0; | ||
919 | } /* end of claw_release */ | ||
920 | |||
921 | /*-------------------------------------------------------------------* | ||
922 | * claw_write_retry * | ||
923 | * * | ||
924 | *--------------------------------------------------------------------*/ | ||
925 | |||
926 | static void | ||
927 | claw_write_retry ( struct chbk *p_ch ) | ||
928 | { | ||
929 | |||
930 | struct net_device *dev=p_ch->ndev; | ||
931 | |||
932 | CLAW_DBF_TEXT(4, trace, "w_retry"); | ||
933 | if (p_ch->claw_state == CLAW_STOP) { | ||
934 | return; | ||
935 | } | ||
936 | claw_strt_out_IO( dev ); | ||
937 | CLAW_DBF_TEXT(4, trace, "rtry_xit"); | ||
938 | return; | ||
939 | } /* end of claw_write_retry */ | ||
940 | |||
941 | |||
942 | /*-------------------------------------------------------------------* | ||
943 | * claw_write_next * | ||
944 | * * | ||
945 | *--------------------------------------------------------------------*/ | ||
946 | |||
947 | static void | ||
948 | claw_write_next ( struct chbk * p_ch ) | ||
949 | { | ||
950 | |||
951 | struct net_device *dev; | ||
952 | struct claw_privbk *privptr=NULL; | ||
953 | struct sk_buff *pk_skb; | ||
954 | |||
955 | CLAW_DBF_TEXT(4, trace, "claw_wrt"); | ||
956 | if (p_ch->claw_state == CLAW_STOP) | ||
957 | return; | ||
958 | dev = (struct net_device *) p_ch->ndev; | ||
959 | privptr = (struct claw_privbk *) dev->ml_priv; | ||
960 | claw_free_wrt_buf( dev ); | ||
961 | if ((privptr->write_free_count > 0) && | ||
962 | !skb_queue_empty(&p_ch->collect_queue)) { | ||
963 | pk_skb = claw_pack_skb(privptr); | ||
964 | while (pk_skb != NULL) { | ||
965 | claw_hw_tx(pk_skb, dev, 1); | ||
966 | if (privptr->write_free_count > 0) { | ||
967 | pk_skb = claw_pack_skb(privptr); | ||
968 | } else | ||
969 | pk_skb = NULL; | ||
970 | } | ||
971 | } | ||
972 | if (privptr->p_write_active_first!=NULL) { | ||
973 | claw_strt_out_IO(dev); | ||
974 | } | ||
975 | return; | ||
976 | } /* end of claw_write_next */ | ||
977 | |||
978 | /*-------------------------------------------------------------------* | ||
979 | * * | ||
980 | * claw_timer * | ||
981 | *--------------------------------------------------------------------*/ | ||
982 | |||
983 | static void | ||
984 | claw_timer ( struct chbk * p_ch ) | ||
985 | { | ||
986 | CLAW_DBF_TEXT(4, trace, "timer"); | ||
987 | p_ch->flag |= CLAW_TIMER; | ||
988 | wake_up(&p_ch->wait); | ||
989 | return; | ||
990 | } /* end of claw_timer */ | ||
991 | |||
992 | /* | ||
993 | * | ||
994 | * functions | ||
995 | */ | ||
996 | |||
997 | |||
998 | /*-------------------------------------------------------------------* | ||
999 | * * | ||
1000 | * pages_to_order_of_mag * | ||
1001 | * * | ||
1002 | * takes a number of pages from 1 to 512 and returns the * | ||
1003 | * log(num_pages)/log(2) get_free_pages() needs a base 2 order * | ||
1004 | * of magnitude get_free_pages() has an upper order of 9 * | ||
1005 | *--------------------------------------------------------------------*/ | ||
1006 | |||
1007 | static int | ||
1008 | pages_to_order_of_mag(int num_of_pages) | ||
1009 | { | ||
1010 | int order_of_mag=1; /* assume 2 pages */ | ||
1011 | int nump; | ||
1012 | |||
1013 | CLAW_DBF_TEXT_(5, trace, "pages%d", num_of_pages); | ||
1014 | if (num_of_pages == 1) {return 0; } /* magnitude of 0 = 1 page */ | ||
1015 | /* 512 pages = 2Meg on 4k page systems */ | ||
1016 | if (num_of_pages >= 512) {return 9; } | ||
1017 | /* we have two or more pages order is at least 1 */ | ||
1018 | for (nump=2 ;nump <= 512;nump*=2) { | ||
1019 | if (num_of_pages <= nump) | ||
1020 | break; | ||
1021 | order_of_mag +=1; | ||
1022 | } | ||
1023 | if (order_of_mag > 9) { order_of_mag = 9; } /* I know it's paranoid */ | ||
1024 | CLAW_DBF_TEXT_(5, trace, "mag%d", order_of_mag); | ||
1025 | return order_of_mag; | ||
1026 | } | ||
1027 | |||
1028 | /*-------------------------------------------------------------------* | ||
1029 | * * | ||
1030 | * add_claw_reads * | ||
1031 | * * | ||
1032 | *--------------------------------------------------------------------*/ | ||
1033 | static int | ||
1034 | add_claw_reads(struct net_device *dev, struct ccwbk* p_first, | ||
1035 | struct ccwbk* p_last) | ||
1036 | { | ||
1037 | struct claw_privbk *privptr; | ||
1038 | struct ccw1 temp_ccw; | ||
1039 | struct endccw * p_end; | ||
1040 | CLAW_DBF_TEXT(4, trace, "addreads"); | ||
1041 | privptr = dev->ml_priv; | ||
1042 | p_end = privptr->p_end_ccw; | ||
1043 | |||
1044 | /* first CCW and last CCW contains a new set of read channel programs | ||
1045 | * to apend the running channel programs | ||
1046 | */ | ||
1047 | if ( p_first==NULL) { | ||
1048 | CLAW_DBF_TEXT(4, trace, "addexit"); | ||
1049 | return 0; | ||
1050 | } | ||
1051 | |||
1052 | /* set up ending CCW sequence for this segment */ | ||
1053 | if (p_end->read1) { | ||
1054 | p_end->read1=0x00; /* second ending CCW is now active */ | ||
1055 | /* reset ending CCWs and setup TIC CCWs */ | ||
1056 | p_end->read2_nop2.cmd_code = CCW_CLAW_CMD_READFF; | ||
1057 | p_end->read2_nop2.flags = CCW_FLAG_SLI | CCW_FLAG_SKIP; | ||
1058 | p_last->r_TIC_1.cda =(__u32)__pa(&p_end->read2_nop1); | ||
1059 | p_last->r_TIC_2.cda =(__u32)__pa(&p_end->read2_nop1); | ||
1060 | p_end->read2_nop2.cda=0; | ||
1061 | p_end->read2_nop2.count=1; | ||
1062 | } | ||
1063 | else { | ||
1064 | p_end->read1=0x01; /* first ending CCW is now active */ | ||
1065 | /* reset ending CCWs and setup TIC CCWs */ | ||
1066 | p_end->read1_nop2.cmd_code = CCW_CLAW_CMD_READFF; | ||
1067 | p_end->read1_nop2.flags = CCW_FLAG_SLI | CCW_FLAG_SKIP; | ||
1068 | p_last->r_TIC_1.cda = (__u32)__pa(&p_end->read1_nop1); | ||
1069 | p_last->r_TIC_2.cda = (__u32)__pa(&p_end->read1_nop1); | ||
1070 | p_end->read1_nop2.cda=0; | ||
1071 | p_end->read1_nop2.count=1; | ||
1072 | } | ||
1073 | |||
1074 | if ( privptr-> p_read_active_first ==NULL ) { | ||
1075 | privptr->p_read_active_first = p_first; /* set new first */ | ||
1076 | privptr->p_read_active_last = p_last; /* set new last */ | ||
1077 | } | ||
1078 | else { | ||
1079 | |||
1080 | /* set up TIC ccw */ | ||
1081 | temp_ccw.cda= (__u32)__pa(&p_first->read); | ||
1082 | temp_ccw.count=0; | ||
1083 | temp_ccw.flags=0; | ||
1084 | temp_ccw.cmd_code = CCW_CLAW_CMD_TIC; | ||
1085 | |||
1086 | |||
1087 | if (p_end->read1) { | ||
1088 | |||
1089 | /* first set of CCW's is chained to the new read */ | ||
1090 | /* chain, so the second set is chained to the active chain. */ | ||
1091 | /* Therefore modify the second set to point to the new */ | ||
1092 | /* read chain set up TIC CCWs */ | ||
1093 | /* make sure we update the CCW so channel doesn't fetch it */ | ||
1094 | /* when it's only half done */ | ||
1095 | memcpy( &p_end->read2_nop2, &temp_ccw , | ||
1096 | sizeof(struct ccw1)); | ||
1097 | privptr->p_read_active_last->r_TIC_1.cda= | ||
1098 | (__u32)__pa(&p_first->read); | ||
1099 | privptr->p_read_active_last->r_TIC_2.cda= | ||
1100 | (__u32)__pa(&p_first->read); | ||
1101 | } | ||
1102 | else { | ||
1103 | /* make sure we update the CCW so channel doesn't */ | ||
1104 | /* fetch it when it is only half done */ | ||
1105 | memcpy( &p_end->read1_nop2, &temp_ccw , | ||
1106 | sizeof(struct ccw1)); | ||
1107 | privptr->p_read_active_last->r_TIC_1.cda= | ||
1108 | (__u32)__pa(&p_first->read); | ||
1109 | privptr->p_read_active_last->r_TIC_2.cda= | ||
1110 | (__u32)__pa(&p_first->read); | ||
1111 | } | ||
1112 | /* chain in new set of blocks */ | ||
1113 | privptr->p_read_active_last->next = p_first; | ||
1114 | privptr->p_read_active_last=p_last; | ||
1115 | } /* end of if ( privptr-> p_read_active_first ==NULL) */ | ||
1116 | CLAW_DBF_TEXT(4, trace, "addexit"); | ||
1117 | return 0; | ||
1118 | } /* end of add_claw_reads */ | ||
1119 | |||
1120 | /*-------------------------------------------------------------------* | ||
1121 | * ccw_check_return_code * | ||
1122 | * * | ||
1123 | *-------------------------------------------------------------------*/ | ||
1124 | |||
1125 | static void | ||
1126 | ccw_check_return_code(struct ccw_device *cdev, int return_code) | ||
1127 | { | ||
1128 | CLAW_DBF_TEXT(4, trace, "ccwret"); | ||
1129 | if (return_code != 0) { | ||
1130 | switch (return_code) { | ||
1131 | case -EBUSY: /* BUSY is a transient state no action needed */ | ||
1132 | break; | ||
1133 | case -ENODEV: | ||
1134 | dev_err(&cdev->dev, "The remote channel adapter is not" | ||
1135 | " available\n"); | ||
1136 | break; | ||
1137 | case -EINVAL: | ||
1138 | dev_err(&cdev->dev, | ||
1139 | "The status of the remote channel adapter" | ||
1140 | " is not valid\n"); | ||
1141 | break; | ||
1142 | default: | ||
1143 | dev_err(&cdev->dev, "The common device layer" | ||
1144 | " returned error code %d\n", | ||
1145 | return_code); | ||
1146 | } | ||
1147 | } | ||
1148 | CLAW_DBF_TEXT(4, trace, "ccwret"); | ||
1149 | } /* end of ccw_check_return_code */ | ||
1150 | |||
1151 | /*-------------------------------------------------------------------* | ||
1152 | * ccw_check_unit_check * | ||
1153 | *--------------------------------------------------------------------*/ | ||
1154 | |||
1155 | static void | ||
1156 | ccw_check_unit_check(struct chbk * p_ch, unsigned char sense ) | ||
1157 | { | ||
1158 | struct net_device *ndev = p_ch->ndev; | ||
1159 | struct device *dev = &p_ch->cdev->dev; | ||
1160 | |||
1161 | CLAW_DBF_TEXT(4, trace, "unitchek"); | ||
1162 | dev_warn(dev, "The communication peer of %s disconnected\n", | ||
1163 | ndev->name); | ||
1164 | |||
1165 | if (sense & 0x40) { | ||
1166 | if (sense & 0x01) { | ||
1167 | dev_warn(dev, "The remote channel adapter for" | ||
1168 | " %s has been reset\n", | ||
1169 | ndev->name); | ||
1170 | } | ||
1171 | } else if (sense & 0x20) { | ||
1172 | if (sense & 0x04) { | ||
1173 | dev_warn(dev, "A data streaming timeout occurred" | ||
1174 | " for %s\n", | ||
1175 | ndev->name); | ||
1176 | } else if (sense & 0x10) { | ||
1177 | dev_warn(dev, "The remote channel adapter for %s" | ||
1178 | " is faulty\n", | ||
1179 | ndev->name); | ||
1180 | } else { | ||
1181 | dev_warn(dev, "A data transfer parity error occurred" | ||
1182 | " for %s\n", | ||
1183 | ndev->name); | ||
1184 | } | ||
1185 | } else if (sense & 0x10) { | ||
1186 | dev_warn(dev, "A read data parity error occurred" | ||
1187 | " for %s\n", | ||
1188 | ndev->name); | ||
1189 | } | ||
1190 | |||
1191 | } /* end of ccw_check_unit_check */ | ||
1192 | |||
1193 | /*-------------------------------------------------------------------* | ||
1194 | * find_link * | ||
1195 | *--------------------------------------------------------------------*/ | ||
1196 | static int | ||
1197 | find_link(struct net_device *dev, char *host_name, char *ws_name ) | ||
1198 | { | ||
1199 | struct claw_privbk *privptr; | ||
1200 | struct claw_env *p_env; | ||
1201 | int rc=0; | ||
1202 | |||
1203 | CLAW_DBF_TEXT(2, setup, "findlink"); | ||
1204 | privptr = dev->ml_priv; | ||
1205 | p_env=privptr->p_env; | ||
1206 | switch (p_env->packing) | ||
1207 | { | ||
1208 | case PACKING_ASK: | ||
1209 | if ((memcmp(WS_APPL_NAME_PACKED, host_name, 8)!=0) || | ||
1210 | (memcmp(WS_APPL_NAME_PACKED, ws_name, 8)!=0 )) | ||
1211 | rc = EINVAL; | ||
1212 | break; | ||
1213 | case DO_PACKED: | ||
1214 | case PACK_SEND: | ||
1215 | if ((memcmp(WS_APPL_NAME_IP_NAME, host_name, 8)!=0) || | ||
1216 | (memcmp(WS_APPL_NAME_IP_NAME, ws_name, 8)!=0 )) | ||
1217 | rc = EINVAL; | ||
1218 | break; | ||
1219 | default: | ||
1220 | if ((memcmp(HOST_APPL_NAME, host_name, 8)!=0) || | ||
1221 | (memcmp(p_env->api_type , ws_name, 8)!=0)) | ||
1222 | rc = EINVAL; | ||
1223 | break; | ||
1224 | } | ||
1225 | |||
1226 | return rc; | ||
1227 | } /* end of find_link */ | ||
1228 | |||
1229 | /*-------------------------------------------------------------------* | ||
1230 | * claw_hw_tx * | ||
1231 | * * | ||
1232 | * * | ||
1233 | *-------------------------------------------------------------------*/ | ||
1234 | |||
1235 | static int | ||
1236 | claw_hw_tx(struct sk_buff *skb, struct net_device *dev, long linkid) | ||
1237 | { | ||
1238 | int rc=0; | ||
1239 | struct claw_privbk *privptr; | ||
1240 | struct ccwbk *p_this_ccw; | ||
1241 | struct ccwbk *p_first_ccw; | ||
1242 | struct ccwbk *p_last_ccw; | ||
1243 | __u32 numBuffers; | ||
1244 | signed long len_of_data; | ||
1245 | unsigned long bytesInThisBuffer; | ||
1246 | unsigned char *pDataAddress; | ||
1247 | struct endccw *pEnd; | ||
1248 | struct ccw1 tempCCW; | ||
1249 | struct claw_env *p_env; | ||
1250 | struct clawph *pk_head; | ||
1251 | struct chbk *ch; | ||
1252 | |||
1253 | CLAW_DBF_TEXT(4, trace, "hw_tx"); | ||
1254 | privptr = (struct claw_privbk *)(dev->ml_priv); | ||
1255 | p_env =privptr->p_env; | ||
1256 | claw_free_wrt_buf(dev); /* Clean up free chain if posible */ | ||
1257 | /* scan the write queue to free any completed write packets */ | ||
1258 | p_first_ccw=NULL; | ||
1259 | p_last_ccw=NULL; | ||
1260 | if ((p_env->packing >= PACK_SEND) && | ||
1261 | (skb->cb[1] != 'P')) { | ||
1262 | skb_push(skb,sizeof(struct clawph)); | ||
1263 | pk_head=(struct clawph *)skb->data; | ||
1264 | pk_head->len=skb->len-sizeof(struct clawph); | ||
1265 | if (pk_head->len%4) { | ||
1266 | pk_head->len+= 4-(pk_head->len%4); | ||
1267 | skb_pad(skb,4-(pk_head->len%4)); | ||
1268 | skb_put(skb,4-(pk_head->len%4)); | ||
1269 | } | ||
1270 | if (p_env->packing == DO_PACKED) | ||
1271 | pk_head->link_num = linkid; | ||
1272 | else | ||
1273 | pk_head->link_num = 0; | ||
1274 | pk_head->flag = 0x00; | ||
1275 | skb_pad(skb,4); | ||
1276 | skb->cb[1] = 'P'; | ||
1277 | } | ||
1278 | if (linkid == 0) { | ||
1279 | if (claw_check_busy(dev)) { | ||
1280 | if (privptr->write_free_count!=0) { | ||
1281 | claw_clear_busy(dev); | ||
1282 | } | ||
1283 | else { | ||
1284 | claw_strt_out_IO(dev ); | ||
1285 | claw_free_wrt_buf( dev ); | ||
1286 | if (privptr->write_free_count==0) { | ||
1287 | ch = &privptr->channel[WRITE_CHANNEL]; | ||
1288 | atomic_inc(&skb->users); | ||
1289 | skb_queue_tail(&ch->collect_queue, skb); | ||
1290 | goto Done; | ||
1291 | } | ||
1292 | else { | ||
1293 | claw_clear_busy(dev); | ||
1294 | } | ||
1295 | } | ||
1296 | } | ||
1297 | /* tx lock */ | ||
1298 | if (claw_test_and_setbit_busy(TB_TX,dev)) { /* set to busy */ | ||
1299 | ch = &privptr->channel[WRITE_CHANNEL]; | ||
1300 | atomic_inc(&skb->users); | ||
1301 | skb_queue_tail(&ch->collect_queue, skb); | ||
1302 | claw_strt_out_IO(dev ); | ||
1303 | rc=-EBUSY; | ||
1304 | goto Done2; | ||
1305 | } | ||
1306 | } | ||
1307 | /* See how many write buffers are required to hold this data */ | ||
1308 | numBuffers = DIV_ROUND_UP(skb->len, privptr->p_env->write_size); | ||
1309 | |||
1310 | /* If that number of buffers isn't available, give up for now */ | ||
1311 | if (privptr->write_free_count < numBuffers || | ||
1312 | privptr->p_write_free_chain == NULL ) { | ||
1313 | |||
1314 | claw_setbit_busy(TB_NOBUFFER,dev); | ||
1315 | ch = &privptr->channel[WRITE_CHANNEL]; | ||
1316 | atomic_inc(&skb->users); | ||
1317 | skb_queue_tail(&ch->collect_queue, skb); | ||
1318 | CLAW_DBF_TEXT(2, trace, "clawbusy"); | ||
1319 | goto Done2; | ||
1320 | } | ||
1321 | pDataAddress=skb->data; | ||
1322 | len_of_data=skb->len; | ||
1323 | |||
1324 | while (len_of_data > 0) { | ||
1325 | p_this_ccw=privptr->p_write_free_chain; /* get a block */ | ||
1326 | if (p_this_ccw == NULL) { /* lost the race */ | ||
1327 | ch = &privptr->channel[WRITE_CHANNEL]; | ||
1328 | atomic_inc(&skb->users); | ||
1329 | skb_queue_tail(&ch->collect_queue, skb); | ||
1330 | goto Done2; | ||
1331 | } | ||
1332 | privptr->p_write_free_chain=p_this_ccw->next; | ||
1333 | p_this_ccw->next=NULL; | ||
1334 | --privptr->write_free_count; /* -1 */ | ||
1335 | if (len_of_data >= privptr->p_env->write_size) | ||
1336 | bytesInThisBuffer = privptr->p_env->write_size; | ||
1337 | else | ||
1338 | bytesInThisBuffer = len_of_data; | ||
1339 | memcpy( p_this_ccw->p_buffer,pDataAddress, bytesInThisBuffer); | ||
1340 | len_of_data-=bytesInThisBuffer; | ||
1341 | pDataAddress+=(unsigned long)bytesInThisBuffer; | ||
1342 | /* setup write CCW */ | ||
1343 | p_this_ccw->write.cmd_code = (linkid * 8) +1; | ||
1344 | if (len_of_data>0) { | ||
1345 | p_this_ccw->write.cmd_code+=MORE_to_COME_FLAG; | ||
1346 | } | ||
1347 | p_this_ccw->write.count=bytesInThisBuffer; | ||
1348 | /* now add to end of this chain */ | ||
1349 | if (p_first_ccw==NULL) { | ||
1350 | p_first_ccw=p_this_ccw; | ||
1351 | } | ||
1352 | if (p_last_ccw!=NULL) { | ||
1353 | p_last_ccw->next=p_this_ccw; | ||
1354 | /* set up TIC ccws */ | ||
1355 | p_last_ccw->w_TIC_1.cda= | ||
1356 | (__u32)__pa(&p_this_ccw->write); | ||
1357 | } | ||
1358 | p_last_ccw=p_this_ccw; /* save new last block */ | ||
1359 | } | ||
1360 | |||
1361 | /* FirstCCW and LastCCW now contain a new set of write channel | ||
1362 | * programs to append to the running channel program | ||
1363 | */ | ||
1364 | |||
1365 | if (p_first_ccw!=NULL) { | ||
1366 | /* setup ending ccw sequence for this segment */ | ||
1367 | pEnd=privptr->p_end_ccw; | ||
1368 | if (pEnd->write1) { | ||
1369 | pEnd->write1=0x00; /* second end ccw is now active */ | ||
1370 | /* set up Tic CCWs */ | ||
1371 | p_last_ccw->w_TIC_1.cda= | ||
1372 | (__u32)__pa(&pEnd->write2_nop1); | ||
1373 | pEnd->write2_nop2.cmd_code = CCW_CLAW_CMD_READFF; | ||
1374 | pEnd->write2_nop2.flags = | ||
1375 | CCW_FLAG_SLI | CCW_FLAG_SKIP; | ||
1376 | pEnd->write2_nop2.cda=0; | ||
1377 | pEnd->write2_nop2.count=1; | ||
1378 | } | ||
1379 | else { /* end of if (pEnd->write1)*/ | ||
1380 | pEnd->write1=0x01; /* first end ccw is now active */ | ||
1381 | /* set up Tic CCWs */ | ||
1382 | p_last_ccw->w_TIC_1.cda= | ||
1383 | (__u32)__pa(&pEnd->write1_nop1); | ||
1384 | pEnd->write1_nop2.cmd_code = CCW_CLAW_CMD_READFF; | ||
1385 | pEnd->write1_nop2.flags = | ||
1386 | CCW_FLAG_SLI | CCW_FLAG_SKIP; | ||
1387 | pEnd->write1_nop2.cda=0; | ||
1388 | pEnd->write1_nop2.count=1; | ||
1389 | } /* end if if (pEnd->write1) */ | ||
1390 | |||
1391 | if (privptr->p_write_active_first==NULL ) { | ||
1392 | privptr->p_write_active_first=p_first_ccw; | ||
1393 | privptr->p_write_active_last=p_last_ccw; | ||
1394 | } | ||
1395 | else { | ||
1396 | /* set up Tic CCWs */ | ||
1397 | |||
1398 | tempCCW.cda=(__u32)__pa(&p_first_ccw->write); | ||
1399 | tempCCW.count=0; | ||
1400 | tempCCW.flags=0; | ||
1401 | tempCCW.cmd_code=CCW_CLAW_CMD_TIC; | ||
1402 | |||
1403 | if (pEnd->write1) { | ||
1404 | |||
1405 | /* | ||
1406 | * first set of ending CCW's is chained to the new write | ||
1407 | * chain, so the second set is chained to the active chain | ||
1408 | * Therefore modify the second set to point the new write chain. | ||
1409 | * make sure we update the CCW atomically | ||
1410 | * so channel does not fetch it when it's only half done | ||
1411 | */ | ||
1412 | memcpy( &pEnd->write2_nop2, &tempCCW , | ||
1413 | sizeof(struct ccw1)); | ||
1414 | privptr->p_write_active_last->w_TIC_1.cda= | ||
1415 | (__u32)__pa(&p_first_ccw->write); | ||
1416 | } | ||
1417 | else { | ||
1418 | |||
1419 | /*make sure we update the CCW atomically | ||
1420 | *so channel does not fetch it when it's only half done | ||
1421 | */ | ||
1422 | memcpy(&pEnd->write1_nop2, &tempCCW , | ||
1423 | sizeof(struct ccw1)); | ||
1424 | privptr->p_write_active_last->w_TIC_1.cda= | ||
1425 | (__u32)__pa(&p_first_ccw->write); | ||
1426 | |||
1427 | } /* end if if (pEnd->write1) */ | ||
1428 | |||
1429 | privptr->p_write_active_last->next=p_first_ccw; | ||
1430 | privptr->p_write_active_last=p_last_ccw; | ||
1431 | } | ||
1432 | |||
1433 | } /* endif (p_first_ccw!=NULL) */ | ||
1434 | dev_kfree_skb_any(skb); | ||
1435 | claw_strt_out_IO(dev ); | ||
1436 | /* if write free count is zero , set NOBUFFER */ | ||
1437 | if (privptr->write_free_count==0) { | ||
1438 | claw_setbit_busy(TB_NOBUFFER,dev); | ||
1439 | } | ||
1440 | Done2: | ||
1441 | claw_clearbit_busy(TB_TX,dev); | ||
1442 | Done: | ||
1443 | return(rc); | ||
1444 | } /* end of claw_hw_tx */ | ||
1445 | |||
1446 | /*-------------------------------------------------------------------* | ||
1447 | * * | ||
1448 | * init_ccw_bk * | ||
1449 | * * | ||
1450 | *--------------------------------------------------------------------*/ | ||
1451 | |||
1452 | static int | ||
1453 | init_ccw_bk(struct net_device *dev) | ||
1454 | { | ||
1455 | |||
1456 | __u32 ccw_blocks_required; | ||
1457 | __u32 ccw_blocks_perpage; | ||
1458 | __u32 ccw_pages_required; | ||
1459 | __u32 claw_reads_perpage=1; | ||
1460 | __u32 claw_read_pages; | ||
1461 | __u32 claw_writes_perpage=1; | ||
1462 | __u32 claw_write_pages; | ||
1463 | void *p_buff=NULL; | ||
1464 | struct ccwbk*p_free_chain; | ||
1465 | struct ccwbk*p_buf; | ||
1466 | struct ccwbk*p_last_CCWB; | ||
1467 | struct ccwbk*p_first_CCWB; | ||
1468 | struct endccw *p_endccw=NULL; | ||
1469 | addr_t real_address; | ||
1470 | struct claw_privbk *privptr = dev->ml_priv; | ||
1471 | struct clawh *pClawH=NULL; | ||
1472 | addr_t real_TIC_address; | ||
1473 | int i,j; | ||
1474 | CLAW_DBF_TEXT(4, trace, "init_ccw"); | ||
1475 | |||
1476 | /* initialize statistics field */ | ||
1477 | privptr->active_link_ID=0; | ||
1478 | /* initialize ccwbk pointers */ | ||
1479 | privptr->p_write_free_chain=NULL; /* pointer to free ccw chain*/ | ||
1480 | privptr->p_write_active_first=NULL; /* pointer to the first write ccw*/ | ||
1481 | privptr->p_write_active_last=NULL; /* pointer to the last write ccw*/ | ||
1482 | privptr->p_read_active_first=NULL; /* pointer to the first read ccw*/ | ||
1483 | privptr->p_read_active_last=NULL; /* pointer to the last read ccw */ | ||
1484 | privptr->p_end_ccw=NULL; /* pointer to ending ccw */ | ||
1485 | privptr->p_claw_signal_blk=NULL; /* pointer to signal block */ | ||
1486 | privptr->buffs_alloc = 0; | ||
1487 | memset(&privptr->end_ccw, 0x00, sizeof(struct endccw)); | ||
1488 | memset(&privptr->ctl_bk, 0x00, sizeof(struct clawctl)); | ||
1489 | /* initialize free write ccwbk counter */ | ||
1490 | privptr->write_free_count=0; /* number of free bufs on write chain */ | ||
1491 | p_last_CCWB = NULL; | ||
1492 | p_first_CCWB= NULL; | ||
1493 | /* | ||
1494 | * We need 1 CCW block for each read buffer, 1 for each | ||
1495 | * write buffer, plus 1 for ClawSignalBlock | ||
1496 | */ | ||
1497 | ccw_blocks_required = | ||
1498 | privptr->p_env->read_buffers+privptr->p_env->write_buffers+1; | ||
1499 | /* | ||
1500 | * compute number of CCW blocks that will fit in a page | ||
1501 | */ | ||
1502 | ccw_blocks_perpage= PAGE_SIZE / CCWBK_SIZE; | ||
1503 | ccw_pages_required= | ||
1504 | DIV_ROUND_UP(ccw_blocks_required, ccw_blocks_perpage); | ||
1505 | |||
1506 | /* | ||
1507 | * read and write sizes are set by 2 constants in claw.h | ||
1508 | * 4k and 32k. Unpacked values other than 4k are not going to | ||
1509 | * provide good performance. With packing buffers support 32k | ||
1510 | * buffers are used. | ||
1511 | */ | ||
1512 | if (privptr->p_env->read_size < PAGE_SIZE) { | ||
1513 | claw_reads_perpage = PAGE_SIZE / privptr->p_env->read_size; | ||
1514 | claw_read_pages = DIV_ROUND_UP(privptr->p_env->read_buffers, | ||
1515 | claw_reads_perpage); | ||
1516 | } | ||
1517 | else { /* > or equal */ | ||
1518 | privptr->p_buff_pages_perread = | ||
1519 | DIV_ROUND_UP(privptr->p_env->read_size, PAGE_SIZE); | ||
1520 | claw_read_pages = privptr->p_env->read_buffers * | ||
1521 | privptr->p_buff_pages_perread; | ||
1522 | } | ||
1523 | if (privptr->p_env->write_size < PAGE_SIZE) { | ||
1524 | claw_writes_perpage = | ||
1525 | PAGE_SIZE / privptr->p_env->write_size; | ||
1526 | claw_write_pages = DIV_ROUND_UP(privptr->p_env->write_buffers, | ||
1527 | claw_writes_perpage); | ||
1528 | |||
1529 | } | ||
1530 | else { /* > or equal */ | ||
1531 | privptr->p_buff_pages_perwrite = | ||
1532 | DIV_ROUND_UP(privptr->p_env->read_size, PAGE_SIZE); | ||
1533 | claw_write_pages = privptr->p_env->write_buffers * | ||
1534 | privptr->p_buff_pages_perwrite; | ||
1535 | } | ||
1536 | /* | ||
1537 | * allocate ccw_pages_required | ||
1538 | */ | ||
1539 | if (privptr->p_buff_ccw==NULL) { | ||
1540 | privptr->p_buff_ccw= | ||
1541 | (void *)__get_free_pages(__GFP_DMA, | ||
1542 | (int)pages_to_order_of_mag(ccw_pages_required )); | ||
1543 | if (privptr->p_buff_ccw==NULL) { | ||
1544 | return -ENOMEM; | ||
1545 | } | ||
1546 | privptr->p_buff_ccw_num=ccw_pages_required; | ||
1547 | } | ||
1548 | memset(privptr->p_buff_ccw, 0x00, | ||
1549 | privptr->p_buff_ccw_num * PAGE_SIZE); | ||
1550 | |||
1551 | /* | ||
1552 | * obtain ending ccw block address | ||
1553 | * | ||
1554 | */ | ||
1555 | privptr->p_end_ccw = (struct endccw *)&privptr->end_ccw; | ||
1556 | real_address = (__u32)__pa(privptr->p_end_ccw); | ||
1557 | /* Initialize ending CCW block */ | ||
1558 | p_endccw=privptr->p_end_ccw; | ||
1559 | p_endccw->real=real_address; | ||
1560 | p_endccw->write1=0x00; | ||
1561 | p_endccw->read1=0x00; | ||
1562 | |||
1563 | /* write1_nop1 */ | ||
1564 | p_endccw->write1_nop1.cmd_code = CCW_CLAW_CMD_NOP; | ||
1565 | p_endccw->write1_nop1.flags = CCW_FLAG_SLI | CCW_FLAG_CC; | ||
1566 | p_endccw->write1_nop1.count = 1; | ||
1567 | p_endccw->write1_nop1.cda = 0; | ||
1568 | |||
1569 | /* write1_nop2 */ | ||
1570 | p_endccw->write1_nop2.cmd_code = CCW_CLAW_CMD_READFF; | ||
1571 | p_endccw->write1_nop2.flags = CCW_FLAG_SLI | CCW_FLAG_SKIP; | ||
1572 | p_endccw->write1_nop2.count = 1; | ||
1573 | p_endccw->write1_nop2.cda = 0; | ||
1574 | |||
1575 | /* write2_nop1 */ | ||
1576 | p_endccw->write2_nop1.cmd_code = CCW_CLAW_CMD_NOP; | ||
1577 | p_endccw->write2_nop1.flags = CCW_FLAG_SLI | CCW_FLAG_CC; | ||
1578 | p_endccw->write2_nop1.count = 1; | ||
1579 | p_endccw->write2_nop1.cda = 0; | ||
1580 | |||
1581 | /* write2_nop2 */ | ||
1582 | p_endccw->write2_nop2.cmd_code = CCW_CLAW_CMD_READFF; | ||
1583 | p_endccw->write2_nop2.flags = CCW_FLAG_SLI | CCW_FLAG_SKIP; | ||
1584 | p_endccw->write2_nop2.count = 1; | ||
1585 | p_endccw->write2_nop2.cda = 0; | ||
1586 | |||
1587 | /* read1_nop1 */ | ||
1588 | p_endccw->read1_nop1.cmd_code = CCW_CLAW_CMD_NOP; | ||
1589 | p_endccw->read1_nop1.flags = CCW_FLAG_SLI | CCW_FLAG_CC; | ||
1590 | p_endccw->read1_nop1.count = 1; | ||
1591 | p_endccw->read1_nop1.cda = 0; | ||
1592 | |||
1593 | /* read1_nop2 */ | ||
1594 | p_endccw->read1_nop2.cmd_code = CCW_CLAW_CMD_READFF; | ||
1595 | p_endccw->read1_nop2.flags = CCW_FLAG_SLI | CCW_FLAG_SKIP; | ||
1596 | p_endccw->read1_nop2.count = 1; | ||
1597 | p_endccw->read1_nop2.cda = 0; | ||
1598 | |||
1599 | /* read2_nop1 */ | ||
1600 | p_endccw->read2_nop1.cmd_code = CCW_CLAW_CMD_NOP; | ||
1601 | p_endccw->read2_nop1.flags = CCW_FLAG_SLI | CCW_FLAG_CC; | ||
1602 | p_endccw->read2_nop1.count = 1; | ||
1603 | p_endccw->read2_nop1.cda = 0; | ||
1604 | |||
1605 | /* read2_nop2 */ | ||
1606 | p_endccw->read2_nop2.cmd_code = CCW_CLAW_CMD_READFF; | ||
1607 | p_endccw->read2_nop2.flags = CCW_FLAG_SLI | CCW_FLAG_SKIP; | ||
1608 | p_endccw->read2_nop2.count = 1; | ||
1609 | p_endccw->read2_nop2.cda = 0; | ||
1610 | |||
1611 | /* | ||
1612 | * Build a chain of CCWs | ||
1613 | * | ||
1614 | */ | ||
1615 | p_buff=privptr->p_buff_ccw; | ||
1616 | |||
1617 | p_free_chain=NULL; | ||
1618 | for (i=0 ; i < ccw_pages_required; i++ ) { | ||
1619 | real_address = (__u32)__pa(p_buff); | ||
1620 | p_buf=p_buff; | ||
1621 | for (j=0 ; j < ccw_blocks_perpage ; j++) { | ||
1622 | p_buf->next = p_free_chain; | ||
1623 | p_free_chain = p_buf; | ||
1624 | p_buf->real=(__u32)__pa(p_buf); | ||
1625 | ++p_buf; | ||
1626 | } | ||
1627 | p_buff+=PAGE_SIZE; | ||
1628 | } | ||
1629 | /* | ||
1630 | * Initialize ClawSignalBlock | ||
1631 | * | ||
1632 | */ | ||
1633 | if (privptr->p_claw_signal_blk==NULL) { | ||
1634 | privptr->p_claw_signal_blk=p_free_chain; | ||
1635 | p_free_chain=p_free_chain->next; | ||
1636 | pClawH=(struct clawh *)privptr->p_claw_signal_blk; | ||
1637 | pClawH->length=0xffff; | ||
1638 | pClawH->opcode=0xff; | ||
1639 | pClawH->flag=CLAW_BUSY; | ||
1640 | } | ||
1641 | |||
1642 | /* | ||
1643 | * allocate write_pages_required and add to free chain | ||
1644 | */ | ||
1645 | if (privptr->p_buff_write==NULL) { | ||
1646 | if (privptr->p_env->write_size < PAGE_SIZE) { | ||
1647 | privptr->p_buff_write= | ||
1648 | (void *)__get_free_pages(__GFP_DMA, | ||
1649 | (int)pages_to_order_of_mag(claw_write_pages )); | ||
1650 | if (privptr->p_buff_write==NULL) { | ||
1651 | privptr->p_buff_ccw=NULL; | ||
1652 | return -ENOMEM; | ||
1653 | } | ||
1654 | /* | ||
1655 | * Build CLAW write free chain | ||
1656 | * | ||
1657 | */ | ||
1658 | |||
1659 | memset(privptr->p_buff_write, 0x00, | ||
1660 | ccw_pages_required * PAGE_SIZE); | ||
1661 | privptr->p_write_free_chain=NULL; | ||
1662 | |||
1663 | p_buff=privptr->p_buff_write; | ||
1664 | |||
1665 | for (i=0 ; i< privptr->p_env->write_buffers ; i++) { | ||
1666 | p_buf = p_free_chain; /* get a CCW */ | ||
1667 | p_free_chain = p_buf->next; | ||
1668 | p_buf->next =privptr->p_write_free_chain; | ||
1669 | privptr->p_write_free_chain = p_buf; | ||
1670 | p_buf-> p_buffer = (struct clawbuf *)p_buff; | ||
1671 | p_buf-> write.cda = (__u32)__pa(p_buff); | ||
1672 | p_buf-> write.flags = CCW_FLAG_SLI | CCW_FLAG_CC; | ||
1673 | p_buf-> w_read_FF.cmd_code = CCW_CLAW_CMD_READFF; | ||
1674 | p_buf-> w_read_FF.flags = CCW_FLAG_SLI | CCW_FLAG_CC; | ||
1675 | p_buf-> w_read_FF.count = 1; | ||
1676 | p_buf-> w_read_FF.cda = | ||
1677 | (__u32)__pa(&p_buf-> header.flag); | ||
1678 | p_buf-> w_TIC_1.cmd_code = CCW_CLAW_CMD_TIC; | ||
1679 | p_buf-> w_TIC_1.flags = 0; | ||
1680 | p_buf-> w_TIC_1.count = 0; | ||
1681 | |||
1682 | if (((unsigned long)p_buff + | ||
1683 | privptr->p_env->write_size) >= | ||
1684 | ((unsigned long)(p_buff+2* | ||
1685 | (privptr->p_env->write_size) - 1) & PAGE_MASK)) { | ||
1686 | p_buff = p_buff+privptr->p_env->write_size; | ||
1687 | } | ||
1688 | } | ||
1689 | } | ||
1690 | else /* Buffers are => PAGE_SIZE. 1 buff per get_free_pages */ | ||
1691 | { | ||
1692 | privptr->p_write_free_chain=NULL; | ||
1693 | for (i = 0; i< privptr->p_env->write_buffers ; i++) { | ||
1694 | p_buff=(void *)__get_free_pages(__GFP_DMA, | ||
1695 | (int)pages_to_order_of_mag( | ||
1696 | privptr->p_buff_pages_perwrite) ); | ||
1697 | if (p_buff==NULL) { | ||
1698 | free_pages((unsigned long)privptr->p_buff_ccw, | ||
1699 | (int)pages_to_order_of_mag( | ||
1700 | privptr->p_buff_ccw_num)); | ||
1701 | privptr->p_buff_ccw=NULL; | ||
1702 | p_buf=privptr->p_buff_write; | ||
1703 | while (p_buf!=NULL) { | ||
1704 | free_pages((unsigned long) | ||
1705 | p_buf->p_buffer, | ||
1706 | (int)pages_to_order_of_mag( | ||
1707 | privptr->p_buff_pages_perwrite)); | ||
1708 | p_buf=p_buf->next; | ||
1709 | } | ||
1710 | return -ENOMEM; | ||
1711 | } /* Error on get_pages */ | ||
1712 | memset(p_buff, 0x00, privptr->p_env->write_size ); | ||
1713 | p_buf = p_free_chain; | ||
1714 | p_free_chain = p_buf->next; | ||
1715 | p_buf->next = privptr->p_write_free_chain; | ||
1716 | privptr->p_write_free_chain = p_buf; | ||
1717 | privptr->p_buff_write = p_buf; | ||
1718 | p_buf->p_buffer=(struct clawbuf *)p_buff; | ||
1719 | p_buf-> write.cda = (__u32)__pa(p_buff); | ||
1720 | p_buf-> write.flags = CCW_FLAG_SLI | CCW_FLAG_CC; | ||
1721 | p_buf-> w_read_FF.cmd_code = CCW_CLAW_CMD_READFF; | ||
1722 | p_buf-> w_read_FF.flags = CCW_FLAG_SLI | CCW_FLAG_CC; | ||
1723 | p_buf-> w_read_FF.count = 1; | ||
1724 | p_buf-> w_read_FF.cda = | ||
1725 | (__u32)__pa(&p_buf-> header.flag); | ||
1726 | p_buf-> w_TIC_1.cmd_code = CCW_CLAW_CMD_TIC; | ||
1727 | p_buf-> w_TIC_1.flags = 0; | ||
1728 | p_buf-> w_TIC_1.count = 0; | ||
1729 | } /* for all write_buffers */ | ||
1730 | |||
1731 | } /* else buffers are PAGE_SIZE or bigger */ | ||
1732 | |||
1733 | } | ||
1734 | privptr->p_buff_write_num=claw_write_pages; | ||
1735 | privptr->write_free_count=privptr->p_env->write_buffers; | ||
1736 | |||
1737 | |||
1738 | /* | ||
1739 | * allocate read_pages_required and chain to free chain | ||
1740 | */ | ||
1741 | if (privptr->p_buff_read==NULL) { | ||
1742 | if (privptr->p_env->read_size < PAGE_SIZE) { | ||
1743 | privptr->p_buff_read= | ||
1744 | (void *)__get_free_pages(__GFP_DMA, | ||
1745 | (int)pages_to_order_of_mag(claw_read_pages) ); | ||
1746 | if (privptr->p_buff_read==NULL) { | ||
1747 | free_pages((unsigned long)privptr->p_buff_ccw, | ||
1748 | (int)pages_to_order_of_mag( | ||
1749 | privptr->p_buff_ccw_num)); | ||
1750 | /* free the write pages size is < page size */ | ||
1751 | free_pages((unsigned long)privptr->p_buff_write, | ||
1752 | (int)pages_to_order_of_mag( | ||
1753 | privptr->p_buff_write_num)); | ||
1754 | privptr->p_buff_ccw=NULL; | ||
1755 | privptr->p_buff_write=NULL; | ||
1756 | return -ENOMEM; | ||
1757 | } | ||
1758 | memset(privptr->p_buff_read, 0x00, claw_read_pages * PAGE_SIZE); | ||
1759 | privptr->p_buff_read_num=claw_read_pages; | ||
1760 | /* | ||
1761 | * Build CLAW read free chain | ||
1762 | * | ||
1763 | */ | ||
1764 | p_buff=privptr->p_buff_read; | ||
1765 | for (i=0 ; i< privptr->p_env->read_buffers ; i++) { | ||
1766 | p_buf = p_free_chain; | ||
1767 | p_free_chain = p_buf->next; | ||
1768 | |||
1769 | if (p_last_CCWB==NULL) { | ||
1770 | p_buf->next=NULL; | ||
1771 | real_TIC_address=0; | ||
1772 | p_last_CCWB=p_buf; | ||
1773 | } | ||
1774 | else { | ||
1775 | p_buf->next=p_first_CCWB; | ||
1776 | real_TIC_address= | ||
1777 | (__u32)__pa(&p_first_CCWB -> read ); | ||
1778 | } | ||
1779 | |||
1780 | p_first_CCWB=p_buf; | ||
1781 | |||
1782 | p_buf->p_buffer=(struct clawbuf *)p_buff; | ||
1783 | /* initialize read command */ | ||
1784 | p_buf-> read.cmd_code = CCW_CLAW_CMD_READ; | ||
1785 | p_buf-> read.cda = (__u32)__pa(p_buff); | ||
1786 | p_buf-> read.flags = CCW_FLAG_SLI | CCW_FLAG_CC; | ||
1787 | p_buf-> read.count = privptr->p_env->read_size; | ||
1788 | |||
1789 | /* initialize read_h command */ | ||
1790 | p_buf-> read_h.cmd_code = CCW_CLAW_CMD_READHEADER; | ||
1791 | p_buf-> read_h.cda = | ||
1792 | (__u32)__pa(&(p_buf->header)); | ||
1793 | p_buf-> read_h.flags = CCW_FLAG_SLI | CCW_FLAG_CC; | ||
1794 | p_buf-> read_h.count = sizeof(struct clawh); | ||
1795 | |||
1796 | /* initialize Signal command */ | ||
1797 | p_buf-> signal.cmd_code = CCW_CLAW_CMD_SIGNAL_SMOD; | ||
1798 | p_buf-> signal.cda = | ||
1799 | (__u32)__pa(&(pClawH->flag)); | ||
1800 | p_buf-> signal.flags = CCW_FLAG_SLI | CCW_FLAG_CC; | ||
1801 | p_buf-> signal.count = 1; | ||
1802 | |||
1803 | /* initialize r_TIC_1 command */ | ||
1804 | p_buf-> r_TIC_1.cmd_code = CCW_CLAW_CMD_TIC; | ||
1805 | p_buf-> r_TIC_1.cda = (__u32)real_TIC_address; | ||
1806 | p_buf-> r_TIC_1.flags = 0; | ||
1807 | p_buf-> r_TIC_1.count = 0; | ||
1808 | |||
1809 | /* initialize r_read_FF command */ | ||
1810 | p_buf-> r_read_FF.cmd_code = CCW_CLAW_CMD_READFF; | ||
1811 | p_buf-> r_read_FF.cda = | ||
1812 | (__u32)__pa(&(pClawH->flag)); | ||
1813 | p_buf-> r_read_FF.flags = | ||
1814 | CCW_FLAG_SLI | CCW_FLAG_CC | CCW_FLAG_PCI; | ||
1815 | p_buf-> r_read_FF.count = 1; | ||
1816 | |||
1817 | /* initialize r_TIC_2 */ | ||
1818 | memcpy(&p_buf->r_TIC_2, | ||
1819 | &p_buf->r_TIC_1, sizeof(struct ccw1)); | ||
1820 | |||
1821 | /* initialize Header */ | ||
1822 | p_buf->header.length=0xffff; | ||
1823 | p_buf->header.opcode=0xff; | ||
1824 | p_buf->header.flag=CLAW_PENDING; | ||
1825 | |||
1826 | if (((unsigned long)p_buff+privptr->p_env->read_size) >= | ||
1827 | ((unsigned long)(p_buff+2*(privptr->p_env->read_size) | ||
1828 | -1) | ||
1829 | & PAGE_MASK)) { | ||
1830 | p_buff= p_buff+privptr->p_env->read_size; | ||
1831 | } | ||
1832 | else { | ||
1833 | p_buff= | ||
1834 | (void *)((unsigned long) | ||
1835 | (p_buff+2*(privptr->p_env->read_size)-1) | ||
1836 | & PAGE_MASK) ; | ||
1837 | } | ||
1838 | } /* for read_buffers */ | ||
1839 | } /* read_size < PAGE_SIZE */ | ||
1840 | else { /* read Size >= PAGE_SIZE */ | ||
1841 | for (i=0 ; i< privptr->p_env->read_buffers ; i++) { | ||
1842 | p_buff = (void *)__get_free_pages(__GFP_DMA, | ||
1843 | (int)pages_to_order_of_mag( | ||
1844 | privptr->p_buff_pages_perread)); | ||
1845 | if (p_buff==NULL) { | ||
1846 | free_pages((unsigned long)privptr->p_buff_ccw, | ||
1847 | (int)pages_to_order_of_mag(privptr-> | ||
1848 | p_buff_ccw_num)); | ||
1849 | /* free the write pages */ | ||
1850 | p_buf=privptr->p_buff_write; | ||
1851 | while (p_buf!=NULL) { | ||
1852 | free_pages( | ||
1853 | (unsigned long)p_buf->p_buffer, | ||
1854 | (int)pages_to_order_of_mag( | ||
1855 | privptr->p_buff_pages_perwrite)); | ||
1856 | p_buf=p_buf->next; | ||
1857 | } | ||
1858 | /* free any read pages already alloc */ | ||
1859 | p_buf=privptr->p_buff_read; | ||
1860 | while (p_buf!=NULL) { | ||
1861 | free_pages( | ||
1862 | (unsigned long)p_buf->p_buffer, | ||
1863 | (int)pages_to_order_of_mag( | ||
1864 | privptr->p_buff_pages_perread)); | ||
1865 | p_buf=p_buf->next; | ||
1866 | } | ||
1867 | privptr->p_buff_ccw=NULL; | ||
1868 | privptr->p_buff_write=NULL; | ||
1869 | return -ENOMEM; | ||
1870 | } | ||
1871 | memset(p_buff, 0x00, privptr->p_env->read_size); | ||
1872 | p_buf = p_free_chain; | ||
1873 | privptr->p_buff_read = p_buf; | ||
1874 | p_free_chain = p_buf->next; | ||
1875 | |||
1876 | if (p_last_CCWB==NULL) { | ||
1877 | p_buf->next=NULL; | ||
1878 | real_TIC_address=0; | ||
1879 | p_last_CCWB=p_buf; | ||
1880 | } | ||
1881 | else { | ||
1882 | p_buf->next=p_first_CCWB; | ||
1883 | real_TIC_address= | ||
1884 | (addr_t)__pa( | ||
1885 | &p_first_CCWB -> read ); | ||
1886 | } | ||
1887 | |||
1888 | p_first_CCWB=p_buf; | ||
1889 | /* save buff address */ | ||
1890 | p_buf->p_buffer=(struct clawbuf *)p_buff; | ||
1891 | /* initialize read command */ | ||
1892 | p_buf-> read.cmd_code = CCW_CLAW_CMD_READ; | ||
1893 | p_buf-> read.cda = (__u32)__pa(p_buff); | ||
1894 | p_buf-> read.flags = CCW_FLAG_SLI | CCW_FLAG_CC; | ||
1895 | p_buf-> read.count = privptr->p_env->read_size; | ||
1896 | |||
1897 | /* initialize read_h command */ | ||
1898 | p_buf-> read_h.cmd_code = CCW_CLAW_CMD_READHEADER; | ||
1899 | p_buf-> read_h.cda = | ||
1900 | (__u32)__pa(&(p_buf->header)); | ||
1901 | p_buf-> read_h.flags = CCW_FLAG_SLI | CCW_FLAG_CC; | ||
1902 | p_buf-> read_h.count = sizeof(struct clawh); | ||
1903 | |||
1904 | /* initialize Signal command */ | ||
1905 | p_buf-> signal.cmd_code = CCW_CLAW_CMD_SIGNAL_SMOD; | ||
1906 | p_buf-> signal.cda = | ||
1907 | (__u32)__pa(&(pClawH->flag)); | ||
1908 | p_buf-> signal.flags = CCW_FLAG_SLI | CCW_FLAG_CC; | ||
1909 | p_buf-> signal.count = 1; | ||
1910 | |||
1911 | /* initialize r_TIC_1 command */ | ||
1912 | p_buf-> r_TIC_1.cmd_code = CCW_CLAW_CMD_TIC; | ||
1913 | p_buf-> r_TIC_1.cda = (__u32)real_TIC_address; | ||
1914 | p_buf-> r_TIC_1.flags = 0; | ||
1915 | p_buf-> r_TIC_1.count = 0; | ||
1916 | |||
1917 | /* initialize r_read_FF command */ | ||
1918 | p_buf-> r_read_FF.cmd_code = CCW_CLAW_CMD_READFF; | ||
1919 | p_buf-> r_read_FF.cda = | ||
1920 | (__u32)__pa(&(pClawH->flag)); | ||
1921 | p_buf-> r_read_FF.flags = | ||
1922 | CCW_FLAG_SLI | CCW_FLAG_CC | CCW_FLAG_PCI; | ||
1923 | p_buf-> r_read_FF.count = 1; | ||
1924 | |||
1925 | /* initialize r_TIC_2 */ | ||
1926 | memcpy(&p_buf->r_TIC_2, &p_buf->r_TIC_1, | ||
1927 | sizeof(struct ccw1)); | ||
1928 | |||
1929 | /* initialize Header */ | ||
1930 | p_buf->header.length=0xffff; | ||
1931 | p_buf->header.opcode=0xff; | ||
1932 | p_buf->header.flag=CLAW_PENDING; | ||
1933 | |||
1934 | } /* For read_buffers */ | ||
1935 | } /* read_size >= PAGE_SIZE */ | ||
1936 | } /* pBuffread = NULL */ | ||
1937 | add_claw_reads( dev ,p_first_CCWB , p_last_CCWB); | ||
1938 | privptr->buffs_alloc = 1; | ||
1939 | |||
1940 | return 0; | ||
1941 | } /* end of init_ccw_bk */ | ||
1942 | |||
1943 | /*-------------------------------------------------------------------* | ||
1944 | * * | ||
1945 | * probe_error * | ||
1946 | * * | ||
1947 | *--------------------------------------------------------------------*/ | ||
1948 | |||
1949 | static void | ||
1950 | probe_error( struct ccwgroup_device *cgdev) | ||
1951 | { | ||
1952 | struct claw_privbk *privptr; | ||
1953 | |||
1954 | CLAW_DBF_TEXT(4, trace, "proberr"); | ||
1955 | privptr = dev_get_drvdata(&cgdev->dev); | ||
1956 | if (privptr != NULL) { | ||
1957 | dev_set_drvdata(&cgdev->dev, NULL); | ||
1958 | kfree(privptr->p_env); | ||
1959 | kfree(privptr->p_mtc_envelope); | ||
1960 | kfree(privptr); | ||
1961 | } | ||
1962 | } /* probe_error */ | ||
1963 | |||
1964 | /*-------------------------------------------------------------------* | ||
1965 | * claw_process_control * | ||
1966 | * * | ||
1967 | * * | ||
1968 | *--------------------------------------------------------------------*/ | ||
1969 | |||
1970 | static int | ||
1971 | claw_process_control( struct net_device *dev, struct ccwbk * p_ccw) | ||
1972 | { | ||
1973 | |||
1974 | struct clawbuf *p_buf; | ||
1975 | struct clawctl ctlbk; | ||
1976 | struct clawctl *p_ctlbk; | ||
1977 | char temp_host_name[8]; | ||
1978 | char temp_ws_name[8]; | ||
1979 | struct claw_privbk *privptr; | ||
1980 | struct claw_env *p_env; | ||
1981 | struct sysval *p_sysval; | ||
1982 | struct conncmd *p_connect=NULL; | ||
1983 | int rc; | ||
1984 | struct chbk *p_ch = NULL; | ||
1985 | struct device *tdev; | ||
1986 | CLAW_DBF_TEXT(2, setup, "clw_cntl"); | ||
1987 | udelay(1000); /* Wait a ms for the control packets to | ||
1988 | *catch up to each other */ | ||
1989 | privptr = dev->ml_priv; | ||
1990 | p_env=privptr->p_env; | ||
1991 | tdev = &privptr->channel[READ_CHANNEL].cdev->dev; | ||
1992 | memcpy( &temp_host_name, p_env->host_name, 8); | ||
1993 | memcpy( &temp_ws_name, p_env->adapter_name , 8); | ||
1994 | dev_info(tdev, "%s: CLAW device %.8s: " | ||
1995 | "Received Control Packet\n", | ||
1996 | dev->name, temp_ws_name); | ||
1997 | if (privptr->release_pend==1) { | ||
1998 | return 0; | ||
1999 | } | ||
2000 | p_buf=p_ccw->p_buffer; | ||
2001 | p_ctlbk=&ctlbk; | ||
2002 | if (p_env->packing == DO_PACKED) { /* packing in progress?*/ | ||
2003 | memcpy(p_ctlbk, &p_buf->buffer[4], sizeof(struct clawctl)); | ||
2004 | } else { | ||
2005 | memcpy(p_ctlbk, p_buf, sizeof(struct clawctl)); | ||
2006 | } | ||
2007 | switch (p_ctlbk->command) | ||
2008 | { | ||
2009 | case SYSTEM_VALIDATE_REQUEST: | ||
2010 | if (p_ctlbk->version != CLAW_VERSION_ID) { | ||
2011 | claw_snd_sys_validate_rsp(dev, p_ctlbk, | ||
2012 | CLAW_RC_WRONG_VERSION); | ||
2013 | dev_warn(tdev, "The communication peer of %s" | ||
2014 | " uses an incorrect API version %d\n", | ||
2015 | dev->name, p_ctlbk->version); | ||
2016 | } | ||
2017 | p_sysval = (struct sysval *)&(p_ctlbk->data); | ||
2018 | dev_info(tdev, "%s: Recv Sys Validate Request: " | ||
2019 | "Vers=%d,link_id=%d,Corr=%d,WS name=%.8s," | ||
2020 | "Host name=%.8s\n", | ||
2021 | dev->name, p_ctlbk->version, | ||
2022 | p_ctlbk->linkid, | ||
2023 | p_ctlbk->correlator, | ||
2024 | p_sysval->WS_name, | ||
2025 | p_sysval->host_name); | ||
2026 | if (memcmp(temp_host_name, p_sysval->host_name, 8)) { | ||
2027 | claw_snd_sys_validate_rsp(dev, p_ctlbk, | ||
2028 | CLAW_RC_NAME_MISMATCH); | ||
2029 | CLAW_DBF_TEXT(2, setup, "HSTBAD"); | ||
2030 | CLAW_DBF_TEXT_(2, setup, "%s", p_sysval->host_name); | ||
2031 | CLAW_DBF_TEXT_(2, setup, "%s", temp_host_name); | ||
2032 | dev_warn(tdev, | ||
2033 | "Host name %s for %s does not match the" | ||
2034 | " remote adapter name %s\n", | ||
2035 | p_sysval->host_name, | ||
2036 | dev->name, | ||
2037 | temp_host_name); | ||
2038 | } | ||
2039 | if (memcmp(temp_ws_name, p_sysval->WS_name, 8)) { | ||
2040 | claw_snd_sys_validate_rsp(dev, p_ctlbk, | ||
2041 | CLAW_RC_NAME_MISMATCH); | ||
2042 | CLAW_DBF_TEXT(2, setup, "WSNBAD"); | ||
2043 | CLAW_DBF_TEXT_(2, setup, "%s", p_sysval->WS_name); | ||
2044 | CLAW_DBF_TEXT_(2, setup, "%s", temp_ws_name); | ||
2045 | dev_warn(tdev, "Adapter name %s for %s does not match" | ||
2046 | " the remote host name %s\n", | ||
2047 | p_sysval->WS_name, | ||
2048 | dev->name, | ||
2049 | temp_ws_name); | ||
2050 | } | ||
2051 | if ((p_sysval->write_frame_size < p_env->write_size) && | ||
2052 | (p_env->packing == 0)) { | ||
2053 | claw_snd_sys_validate_rsp(dev, p_ctlbk, | ||
2054 | CLAW_RC_HOST_RCV_TOO_SMALL); | ||
2055 | dev_warn(tdev, | ||
2056 | "The local write buffer is smaller than the" | ||
2057 | " remote read buffer\n"); | ||
2058 | CLAW_DBF_TEXT(2, setup, "wrtszbad"); | ||
2059 | } | ||
2060 | if ((p_sysval->read_frame_size < p_env->read_size) && | ||
2061 | (p_env->packing == 0)) { | ||
2062 | claw_snd_sys_validate_rsp(dev, p_ctlbk, | ||
2063 | CLAW_RC_HOST_RCV_TOO_SMALL); | ||
2064 | dev_warn(tdev, | ||
2065 | "The local read buffer is smaller than the" | ||
2066 | " remote write buffer\n"); | ||
2067 | CLAW_DBF_TEXT(2, setup, "rdsizbad"); | ||
2068 | } | ||
2069 | claw_snd_sys_validate_rsp(dev, p_ctlbk, 0); | ||
2070 | dev_info(tdev, | ||
2071 | "CLAW device %.8s: System validate" | ||
2072 | " completed.\n", temp_ws_name); | ||
2073 | dev_info(tdev, | ||
2074 | "%s: sys Validate Rsize:%d Wsize:%d\n", | ||
2075 | dev->name, p_sysval->read_frame_size, | ||
2076 | p_sysval->write_frame_size); | ||
2077 | privptr->system_validate_comp = 1; | ||
2078 | if (strncmp(p_env->api_type, WS_APPL_NAME_PACKED, 6) == 0) | ||
2079 | p_env->packing = PACKING_ASK; | ||
2080 | claw_strt_conn_req(dev); | ||
2081 | break; | ||
2082 | case SYSTEM_VALIDATE_RESPONSE: | ||
2083 | p_sysval = (struct sysval *)&(p_ctlbk->data); | ||
2084 | dev_info(tdev, | ||
2085 | "Settings for %s validated (version=%d, " | ||
2086 | "remote device=%d, rc=%d, adapter name=%.8s, " | ||
2087 | "host name=%.8s)\n", | ||
2088 | dev->name, | ||
2089 | p_ctlbk->version, | ||
2090 | p_ctlbk->correlator, | ||
2091 | p_ctlbk->rc, | ||
2092 | p_sysval->WS_name, | ||
2093 | p_sysval->host_name); | ||
2094 | switch (p_ctlbk->rc) { | ||
2095 | case 0: | ||
2096 | dev_info(tdev, "%s: CLAW device " | ||
2097 | "%.8s: System validate completed.\n", | ||
2098 | dev->name, temp_ws_name); | ||
2099 | if (privptr->system_validate_comp == 0) | ||
2100 | claw_strt_conn_req(dev); | ||
2101 | privptr->system_validate_comp = 1; | ||
2102 | break; | ||
2103 | case CLAW_RC_NAME_MISMATCH: | ||
2104 | dev_warn(tdev, "Validating %s failed because of" | ||
2105 | " a host or adapter name mismatch\n", | ||
2106 | dev->name); | ||
2107 | break; | ||
2108 | case CLAW_RC_WRONG_VERSION: | ||
2109 | dev_warn(tdev, "Validating %s failed because of a" | ||
2110 | " version conflict\n", | ||
2111 | dev->name); | ||
2112 | break; | ||
2113 | case CLAW_RC_HOST_RCV_TOO_SMALL: | ||
2114 | dev_warn(tdev, "Validating %s failed because of a" | ||
2115 | " frame size conflict\n", | ||
2116 | dev->name); | ||
2117 | break; | ||
2118 | default: | ||
2119 | dev_warn(tdev, "The communication peer of %s rejected" | ||
2120 | " the connection\n", | ||
2121 | dev->name); | ||
2122 | break; | ||
2123 | } | ||
2124 | break; | ||
2125 | |||
2126 | case CONNECTION_REQUEST: | ||
2127 | p_connect = (struct conncmd *)&(p_ctlbk->data); | ||
2128 | dev_info(tdev, "%s: Recv Conn Req: Vers=%d,link_id=%d," | ||
2129 | "Corr=%d,HOST appl=%.8s,WS appl=%.8s\n", | ||
2130 | dev->name, | ||
2131 | p_ctlbk->version, | ||
2132 | p_ctlbk->linkid, | ||
2133 | p_ctlbk->correlator, | ||
2134 | p_connect->host_name, | ||
2135 | p_connect->WS_name); | ||
2136 | if (privptr->active_link_ID != 0) { | ||
2137 | claw_snd_disc(dev, p_ctlbk); | ||
2138 | dev_info(tdev, "%s rejected a connection request" | ||
2139 | " because it is already active\n", | ||
2140 | dev->name); | ||
2141 | } | ||
2142 | if (p_ctlbk->linkid != 1) { | ||
2143 | claw_snd_disc(dev, p_ctlbk); | ||
2144 | dev_info(tdev, "%s rejected a request to open multiple" | ||
2145 | " connections\n", | ||
2146 | dev->name); | ||
2147 | } | ||
2148 | rc = find_link(dev, p_connect->host_name, p_connect->WS_name); | ||
2149 | if (rc != 0) { | ||
2150 | claw_snd_disc(dev, p_ctlbk); | ||
2151 | dev_info(tdev, "%s rejected a connection request" | ||
2152 | " because of a type mismatch\n", | ||
2153 | dev->name); | ||
2154 | } | ||
2155 | claw_send_control(dev, | ||
2156 | CONNECTION_CONFIRM, p_ctlbk->linkid, | ||
2157 | p_ctlbk->correlator, | ||
2158 | 0, p_connect->host_name, | ||
2159 | p_connect->WS_name); | ||
2160 | if (p_env->packing == PACKING_ASK) { | ||
2161 | p_env->packing = PACK_SEND; | ||
2162 | claw_snd_conn_req(dev, 0); | ||
2163 | } | ||
2164 | dev_info(tdev, "%s: CLAW device %.8s: Connection " | ||
2165 | "completed link_id=%d.\n", | ||
2166 | dev->name, temp_ws_name, | ||
2167 | p_ctlbk->linkid); | ||
2168 | privptr->active_link_ID = p_ctlbk->linkid; | ||
2169 | p_ch = &privptr->channel[WRITE_CHANNEL]; | ||
2170 | wake_up(&p_ch->wait); /* wake up claw_open ( WRITE) */ | ||
2171 | break; | ||
2172 | case CONNECTION_RESPONSE: | ||
2173 | p_connect = (struct conncmd *)&(p_ctlbk->data); | ||
2174 | dev_info(tdev, "%s: Recv Conn Resp: Vers=%d,link_id=%d," | ||
2175 | "Corr=%d,RC=%d,Host appl=%.8s, WS appl=%.8s\n", | ||
2176 | dev->name, | ||
2177 | p_ctlbk->version, | ||
2178 | p_ctlbk->linkid, | ||
2179 | p_ctlbk->correlator, | ||
2180 | p_ctlbk->rc, | ||
2181 | p_connect->host_name, | ||
2182 | p_connect->WS_name); | ||
2183 | |||
2184 | if (p_ctlbk->rc != 0) { | ||
2185 | dev_warn(tdev, "The communication peer of %s rejected" | ||
2186 | " a connection request\n", | ||
2187 | dev->name); | ||
2188 | return 1; | ||
2189 | } | ||
2190 | rc = find_link(dev, | ||
2191 | p_connect->host_name, p_connect->WS_name); | ||
2192 | if (rc != 0) { | ||
2193 | claw_snd_disc(dev, p_ctlbk); | ||
2194 | dev_warn(tdev, "The communication peer of %s" | ||
2195 | " rejected a connection " | ||
2196 | "request because of a type mismatch\n", | ||
2197 | dev->name); | ||
2198 | } | ||
2199 | /* should be until CONNECTION_CONFIRM */ | ||
2200 | privptr->active_link_ID = -(p_ctlbk->linkid); | ||
2201 | break; | ||
2202 | case CONNECTION_CONFIRM: | ||
2203 | p_connect = (struct conncmd *)&(p_ctlbk->data); | ||
2204 | dev_info(tdev, | ||
2205 | "%s: Recv Conn Confirm:Vers=%d,link_id=%d," | ||
2206 | "Corr=%d,Host appl=%.8s,WS appl=%.8s\n", | ||
2207 | dev->name, | ||
2208 | p_ctlbk->version, | ||
2209 | p_ctlbk->linkid, | ||
2210 | p_ctlbk->correlator, | ||
2211 | p_connect->host_name, | ||
2212 | p_connect->WS_name); | ||
2213 | if (p_ctlbk->linkid == -(privptr->active_link_ID)) { | ||
2214 | privptr->active_link_ID = p_ctlbk->linkid; | ||
2215 | if (p_env->packing > PACKING_ASK) { | ||
2216 | dev_info(tdev, | ||
2217 | "%s: Confirmed Now packing\n", dev->name); | ||
2218 | p_env->packing = DO_PACKED; | ||
2219 | } | ||
2220 | p_ch = &privptr->channel[WRITE_CHANNEL]; | ||
2221 | wake_up(&p_ch->wait); | ||
2222 | } else { | ||
2223 | dev_warn(tdev, "Activating %s failed because of" | ||
2224 | " an incorrect link ID=%d\n", | ||
2225 | dev->name, p_ctlbk->linkid); | ||
2226 | claw_snd_disc(dev, p_ctlbk); | ||
2227 | } | ||
2228 | break; | ||
2229 | case DISCONNECT: | ||
2230 | dev_info(tdev, "%s: Disconnect: " | ||
2231 | "Vers=%d,link_id=%d,Corr=%d\n", | ||
2232 | dev->name, p_ctlbk->version, | ||
2233 | p_ctlbk->linkid, p_ctlbk->correlator); | ||
2234 | if ((p_ctlbk->linkid == 2) && | ||
2235 | (p_env->packing == PACK_SEND)) { | ||
2236 | privptr->active_link_ID = 1; | ||
2237 | p_env->packing = DO_PACKED; | ||
2238 | } else | ||
2239 | privptr->active_link_ID = 0; | ||
2240 | break; | ||
2241 | case CLAW_ERROR: | ||
2242 | dev_warn(tdev, "The communication peer of %s failed\n", | ||
2243 | dev->name); | ||
2244 | break; | ||
2245 | default: | ||
2246 | dev_warn(tdev, "The communication peer of %s sent" | ||
2247 | " an unknown command code\n", | ||
2248 | dev->name); | ||
2249 | break; | ||
2250 | } | ||
2251 | |||
2252 | return 0; | ||
2253 | } /* end of claw_process_control */ | ||
2254 | |||
2255 | |||
2256 | /*-------------------------------------------------------------------* | ||
2257 | * claw_send_control * | ||
2258 | * * | ||
2259 | *--------------------------------------------------------------------*/ | ||
2260 | |||
2261 | static int | ||
2262 | claw_send_control(struct net_device *dev, __u8 type, __u8 link, | ||
2263 | __u8 correlator, __u8 rc, char *local_name, char *remote_name) | ||
2264 | { | ||
2265 | struct claw_privbk *privptr; | ||
2266 | struct clawctl *p_ctl; | ||
2267 | struct sysval *p_sysval; | ||
2268 | struct conncmd *p_connect; | ||
2269 | struct sk_buff *skb; | ||
2270 | |||
2271 | CLAW_DBF_TEXT(2, setup, "sndcntl"); | ||
2272 | privptr = dev->ml_priv; | ||
2273 | p_ctl=(struct clawctl *)&privptr->ctl_bk; | ||
2274 | |||
2275 | p_ctl->command=type; | ||
2276 | p_ctl->version=CLAW_VERSION_ID; | ||
2277 | p_ctl->linkid=link; | ||
2278 | p_ctl->correlator=correlator; | ||
2279 | p_ctl->rc=rc; | ||
2280 | |||
2281 | p_sysval=(struct sysval *)&p_ctl->data; | ||
2282 | p_connect=(struct conncmd *)&p_ctl->data; | ||
2283 | |||
2284 | switch (p_ctl->command) { | ||
2285 | case SYSTEM_VALIDATE_REQUEST: | ||
2286 | case SYSTEM_VALIDATE_RESPONSE: | ||
2287 | memcpy(&p_sysval->host_name, local_name, 8); | ||
2288 | memcpy(&p_sysval->WS_name, remote_name, 8); | ||
2289 | if (privptr->p_env->packing > 0) { | ||
2290 | p_sysval->read_frame_size = DEF_PACK_BUFSIZE; | ||
2291 | p_sysval->write_frame_size = DEF_PACK_BUFSIZE; | ||
2292 | } else { | ||
2293 | /* how big is the biggest group of packets */ | ||
2294 | p_sysval->read_frame_size = | ||
2295 | privptr->p_env->read_size; | ||
2296 | p_sysval->write_frame_size = | ||
2297 | privptr->p_env->write_size; | ||
2298 | } | ||
2299 | memset(&p_sysval->reserved, 0x00, 4); | ||
2300 | break; | ||
2301 | case CONNECTION_REQUEST: | ||
2302 | case CONNECTION_RESPONSE: | ||
2303 | case CONNECTION_CONFIRM: | ||
2304 | case DISCONNECT: | ||
2305 | memcpy(&p_sysval->host_name, local_name, 8); | ||
2306 | memcpy(&p_sysval->WS_name, remote_name, 8); | ||
2307 | if (privptr->p_env->packing > 0) { | ||
2308 | /* How big is the biggest packet */ | ||
2309 | p_connect->reserved1[0]=CLAW_FRAME_SIZE; | ||
2310 | p_connect->reserved1[1]=CLAW_FRAME_SIZE; | ||
2311 | } else { | ||
2312 | memset(&p_connect->reserved1, 0x00, 4); | ||
2313 | memset(&p_connect->reserved2, 0x00, 4); | ||
2314 | } | ||
2315 | break; | ||
2316 | default: | ||
2317 | break; | ||
2318 | } | ||
2319 | |||
2320 | /* write Control Record to the device */ | ||
2321 | |||
2322 | |||
2323 | skb = dev_alloc_skb(sizeof(struct clawctl)); | ||
2324 | if (!skb) { | ||
2325 | return -ENOMEM; | ||
2326 | } | ||
2327 | memcpy(skb_put(skb, sizeof(struct clawctl)), | ||
2328 | p_ctl, sizeof(struct clawctl)); | ||
2329 | if (privptr->p_env->packing >= PACK_SEND) | ||
2330 | claw_hw_tx(skb, dev, 1); | ||
2331 | else | ||
2332 | claw_hw_tx(skb, dev, 0); | ||
2333 | return 0; | ||
2334 | } /* end of claw_send_control */ | ||
2335 | |||
2336 | /*-------------------------------------------------------------------* | ||
2337 | * claw_snd_conn_req * | ||
2338 | * * | ||
2339 | *--------------------------------------------------------------------*/ | ||
2340 | static int | ||
2341 | claw_snd_conn_req(struct net_device *dev, __u8 link) | ||
2342 | { | ||
2343 | int rc; | ||
2344 | struct claw_privbk *privptr = dev->ml_priv; | ||
2345 | struct clawctl *p_ctl; | ||
2346 | |||
2347 | CLAW_DBF_TEXT(2, setup, "snd_conn"); | ||
2348 | rc = 1; | ||
2349 | p_ctl=(struct clawctl *)&privptr->ctl_bk; | ||
2350 | p_ctl->linkid = link; | ||
2351 | if ( privptr->system_validate_comp==0x00 ) { | ||
2352 | return rc; | ||
2353 | } | ||
2354 | if (privptr->p_env->packing == PACKING_ASK ) | ||
2355 | rc=claw_send_control(dev, CONNECTION_REQUEST,0,0,0, | ||
2356 | WS_APPL_NAME_PACKED, WS_APPL_NAME_PACKED); | ||
2357 | if (privptr->p_env->packing == PACK_SEND) { | ||
2358 | rc=claw_send_control(dev, CONNECTION_REQUEST,0,0,0, | ||
2359 | WS_APPL_NAME_IP_NAME, WS_APPL_NAME_IP_NAME); | ||
2360 | } | ||
2361 | if (privptr->p_env->packing == 0) | ||
2362 | rc=claw_send_control(dev, CONNECTION_REQUEST,0,0,0, | ||
2363 | HOST_APPL_NAME, privptr->p_env->api_type); | ||
2364 | return rc; | ||
2365 | |||
2366 | } /* end of claw_snd_conn_req */ | ||
2367 | |||
2368 | |||
2369 | /*-------------------------------------------------------------------* | ||
2370 | * claw_snd_disc * | ||
2371 | * * | ||
2372 | *--------------------------------------------------------------------*/ | ||
2373 | |||
2374 | static int | ||
2375 | claw_snd_disc(struct net_device *dev, struct clawctl * p_ctl) | ||
2376 | { | ||
2377 | int rc; | ||
2378 | struct conncmd * p_connect; | ||
2379 | |||
2380 | CLAW_DBF_TEXT(2, setup, "snd_dsc"); | ||
2381 | p_connect=(struct conncmd *)&p_ctl->data; | ||
2382 | |||
2383 | rc=claw_send_control(dev, DISCONNECT, p_ctl->linkid, | ||
2384 | p_ctl->correlator, 0, | ||
2385 | p_connect->host_name, p_connect->WS_name); | ||
2386 | return rc; | ||
2387 | } /* end of claw_snd_disc */ | ||
2388 | |||
2389 | |||
2390 | /*-------------------------------------------------------------------* | ||
2391 | * claw_snd_sys_validate_rsp * | ||
2392 | * * | ||
2393 | *--------------------------------------------------------------------*/ | ||
2394 | |||
2395 | static int | ||
2396 | claw_snd_sys_validate_rsp(struct net_device *dev, | ||
2397 | struct clawctl *p_ctl, __u32 return_code) | ||
2398 | { | ||
2399 | struct claw_env * p_env; | ||
2400 | struct claw_privbk *privptr; | ||
2401 | int rc; | ||
2402 | |||
2403 | CLAW_DBF_TEXT(2, setup, "chkresp"); | ||
2404 | privptr = dev->ml_priv; | ||
2405 | p_env=privptr->p_env; | ||
2406 | rc=claw_send_control(dev, SYSTEM_VALIDATE_RESPONSE, | ||
2407 | p_ctl->linkid, | ||
2408 | p_ctl->correlator, | ||
2409 | return_code, | ||
2410 | p_env->host_name, | ||
2411 | p_env->adapter_name ); | ||
2412 | return rc; | ||
2413 | } /* end of claw_snd_sys_validate_rsp */ | ||
2414 | |||
2415 | /*-------------------------------------------------------------------* | ||
2416 | * claw_strt_conn_req * | ||
2417 | * * | ||
2418 | *--------------------------------------------------------------------*/ | ||
2419 | |||
2420 | static int | ||
2421 | claw_strt_conn_req(struct net_device *dev ) | ||
2422 | { | ||
2423 | int rc; | ||
2424 | |||
2425 | CLAW_DBF_TEXT(2, setup, "conn_req"); | ||
2426 | rc=claw_snd_conn_req(dev, 1); | ||
2427 | return rc; | ||
2428 | } /* end of claw_strt_conn_req */ | ||
2429 | |||
2430 | |||
2431 | |||
2432 | /*-------------------------------------------------------------------* | ||
2433 | * claw_stats * | ||
2434 | *-------------------------------------------------------------------*/ | ||
2435 | |||
2436 | static struct | ||
2437 | net_device_stats *claw_stats(struct net_device *dev) | ||
2438 | { | ||
2439 | struct claw_privbk *privptr; | ||
2440 | |||
2441 | CLAW_DBF_TEXT(4, trace, "stats"); | ||
2442 | privptr = dev->ml_priv; | ||
2443 | return &privptr->stats; | ||
2444 | } /* end of claw_stats */ | ||
2445 | |||
2446 | |||
2447 | /*-------------------------------------------------------------------* | ||
2448 | * unpack_read * | ||
2449 | * * | ||
2450 | *--------------------------------------------------------------------*/ | ||
2451 | static void | ||
2452 | unpack_read(struct net_device *dev ) | ||
2453 | { | ||
2454 | struct sk_buff *skb; | ||
2455 | struct claw_privbk *privptr; | ||
2456 | struct claw_env *p_env; | ||
2457 | struct ccwbk *p_this_ccw; | ||
2458 | struct ccwbk *p_first_ccw; | ||
2459 | struct ccwbk *p_last_ccw; | ||
2460 | struct clawph *p_packh; | ||
2461 | void *p_packd; | ||
2462 | struct clawctl *p_ctlrec=NULL; | ||
2463 | struct device *p_dev; | ||
2464 | |||
2465 | __u32 len_of_data; | ||
2466 | __u32 pack_off; | ||
2467 | __u8 link_num; | ||
2468 | __u8 mtc_this_frm=0; | ||
2469 | __u32 bytes_to_mov; | ||
2470 | int i=0; | ||
2471 | int p=0; | ||
2472 | |||
2473 | CLAW_DBF_TEXT(4, trace, "unpkread"); | ||
2474 | p_first_ccw=NULL; | ||
2475 | p_last_ccw=NULL; | ||
2476 | p_packh=NULL; | ||
2477 | p_packd=NULL; | ||
2478 | privptr = dev->ml_priv; | ||
2479 | |||
2480 | p_dev = &privptr->channel[READ_CHANNEL].cdev->dev; | ||
2481 | p_env = privptr->p_env; | ||
2482 | p_this_ccw=privptr->p_read_active_first; | ||
2483 | while (p_this_ccw!=NULL && p_this_ccw->header.flag!=CLAW_PENDING) { | ||
2484 | pack_off = 0; | ||
2485 | p = 0; | ||
2486 | p_this_ccw->header.flag=CLAW_PENDING; | ||
2487 | privptr->p_read_active_first=p_this_ccw->next; | ||
2488 | p_this_ccw->next=NULL; | ||
2489 | p_packh = (struct clawph *)p_this_ccw->p_buffer; | ||
2490 | if ((p_env->packing == PACK_SEND) && | ||
2491 | (p_packh->len == 32) && | ||
2492 | (p_packh->link_num == 0)) { /* is it a packed ctl rec? */ | ||
2493 | p_packh++; /* peek past pack header */ | ||
2494 | p_ctlrec = (struct clawctl *)p_packh; | ||
2495 | p_packh--; /* un peek */ | ||
2496 | if ((p_ctlrec->command == CONNECTION_RESPONSE) || | ||
2497 | (p_ctlrec->command == CONNECTION_CONFIRM)) | ||
2498 | p_env->packing = DO_PACKED; | ||
2499 | } | ||
2500 | if (p_env->packing == DO_PACKED) | ||
2501 | link_num=p_packh->link_num; | ||
2502 | else | ||
2503 | link_num=p_this_ccw->header.opcode / 8; | ||
2504 | if ((p_this_ccw->header.opcode & MORE_to_COME_FLAG)!=0) { | ||
2505 | mtc_this_frm=1; | ||
2506 | if (p_this_ccw->header.length!= | ||
2507 | privptr->p_env->read_size ) { | ||
2508 | dev_warn(p_dev, | ||
2509 | "The communication peer of %s" | ||
2510 | " sent a faulty" | ||
2511 | " frame of length %02x\n", | ||
2512 | dev->name, p_this_ccw->header.length); | ||
2513 | } | ||
2514 | } | ||
2515 | |||
2516 | if (privptr->mtc_skipping) { | ||
2517 | /* | ||
2518 | * We're in the mode of skipping past a | ||
2519 | * multi-frame message | ||
2520 | * that we can't process for some reason or other. | ||
2521 | * The first frame without the More-To-Come flag is | ||
2522 | * the last frame of the skipped message. | ||
2523 | */ | ||
2524 | /* in case of More-To-Come not set in this frame */ | ||
2525 | if (mtc_this_frm==0) { | ||
2526 | privptr->mtc_skipping=0; /* Ok, the end */ | ||
2527 | privptr->mtc_logical_link=-1; | ||
2528 | } | ||
2529 | goto NextFrame; | ||
2530 | } | ||
2531 | |||
2532 | if (link_num==0) { | ||
2533 | claw_process_control(dev, p_this_ccw); | ||
2534 | CLAW_DBF_TEXT(4, trace, "UnpkCntl"); | ||
2535 | goto NextFrame; | ||
2536 | } | ||
2537 | unpack_next: | ||
2538 | if (p_env->packing == DO_PACKED) { | ||
2539 | if (pack_off > p_env->read_size) | ||
2540 | goto NextFrame; | ||
2541 | p_packd = p_this_ccw->p_buffer+pack_off; | ||
2542 | p_packh = (struct clawph *) p_packd; | ||
2543 | if ((p_packh->len == 0) || /* done with this frame? */ | ||
2544 | (p_packh->flag != 0)) | ||
2545 | goto NextFrame; | ||
2546 | bytes_to_mov = p_packh->len; | ||
2547 | pack_off += bytes_to_mov+sizeof(struct clawph); | ||
2548 | p++; | ||
2549 | } else { | ||
2550 | bytes_to_mov=p_this_ccw->header.length; | ||
2551 | } | ||
2552 | if (privptr->mtc_logical_link<0) { | ||
2553 | |||
2554 | /* | ||
2555 | * if More-To-Come is set in this frame then we don't know | ||
2556 | * length of entire message, and hence have to allocate | ||
2557 | * large buffer */ | ||
2558 | |||
2559 | /* We are starting a new envelope */ | ||
2560 | privptr->mtc_offset=0; | ||
2561 | privptr->mtc_logical_link=link_num; | ||
2562 | } | ||
2563 | |||
2564 | if (bytes_to_mov > (MAX_ENVELOPE_SIZE- privptr->mtc_offset) ) { | ||
2565 | /* error */ | ||
2566 | privptr->stats.rx_frame_errors++; | ||
2567 | goto NextFrame; | ||
2568 | } | ||
2569 | if (p_env->packing == DO_PACKED) { | ||
2570 | memcpy( privptr->p_mtc_envelope+ privptr->mtc_offset, | ||
2571 | p_packd+sizeof(struct clawph), bytes_to_mov); | ||
2572 | |||
2573 | } else { | ||
2574 | memcpy( privptr->p_mtc_envelope+ privptr->mtc_offset, | ||
2575 | p_this_ccw->p_buffer, bytes_to_mov); | ||
2576 | } | ||
2577 | if (mtc_this_frm==0) { | ||
2578 | len_of_data=privptr->mtc_offset+bytes_to_mov; | ||
2579 | skb=dev_alloc_skb(len_of_data); | ||
2580 | if (skb) { | ||
2581 | memcpy(skb_put(skb,len_of_data), | ||
2582 | privptr->p_mtc_envelope, | ||
2583 | len_of_data); | ||
2584 | skb->dev=dev; | ||
2585 | skb_reset_mac_header(skb); | ||
2586 | skb->protocol=htons(ETH_P_IP); | ||
2587 | skb->ip_summed=CHECKSUM_UNNECESSARY; | ||
2588 | privptr->stats.rx_packets++; | ||
2589 | privptr->stats.rx_bytes+=len_of_data; | ||
2590 | netif_rx(skb); | ||
2591 | } | ||
2592 | else { | ||
2593 | dev_info(p_dev, "Allocating a buffer for" | ||
2594 | " incoming data failed\n"); | ||
2595 | privptr->stats.rx_dropped++; | ||
2596 | } | ||
2597 | privptr->mtc_offset=0; | ||
2598 | privptr->mtc_logical_link=-1; | ||
2599 | } | ||
2600 | else { | ||
2601 | privptr->mtc_offset+=bytes_to_mov; | ||
2602 | } | ||
2603 | if (p_env->packing == DO_PACKED) | ||
2604 | goto unpack_next; | ||
2605 | NextFrame: | ||
2606 | /* | ||
2607 | * Remove ThisCCWblock from active read queue, and add it | ||
2608 | * to queue of free blocks to be reused. | ||
2609 | */ | ||
2610 | i++; | ||
2611 | p_this_ccw->header.length=0xffff; | ||
2612 | p_this_ccw->header.opcode=0xff; | ||
2613 | /* | ||
2614 | * add this one to the free queue for later reuse | ||
2615 | */ | ||
2616 | if (p_first_ccw==NULL) { | ||
2617 | p_first_ccw = p_this_ccw; | ||
2618 | } | ||
2619 | else { | ||
2620 | p_last_ccw->next = p_this_ccw; | ||
2621 | } | ||
2622 | p_last_ccw = p_this_ccw; | ||
2623 | /* | ||
2624 | * chain to next block on active read queue | ||
2625 | */ | ||
2626 | p_this_ccw = privptr->p_read_active_first; | ||
2627 | CLAW_DBF_TEXT_(4, trace, "rxpkt %d", p); | ||
2628 | } /* end of while */ | ||
2629 | |||
2630 | /* check validity */ | ||
2631 | |||
2632 | CLAW_DBF_TEXT_(4, trace, "rxfrm %d", i); | ||
2633 | add_claw_reads(dev, p_first_ccw, p_last_ccw); | ||
2634 | claw_strt_read(dev, LOCK_YES); | ||
2635 | return; | ||
2636 | } /* end of unpack_read */ | ||
2637 | |||
2638 | /*-------------------------------------------------------------------* | ||
2639 | * claw_strt_read * | ||
2640 | * * | ||
2641 | *--------------------------------------------------------------------*/ | ||
2642 | static void | ||
2643 | claw_strt_read (struct net_device *dev, int lock ) | ||
2644 | { | ||
2645 | int rc = 0; | ||
2646 | __u32 parm; | ||
2647 | unsigned long saveflags = 0; | ||
2648 | struct claw_privbk *privptr = dev->ml_priv; | ||
2649 | struct ccwbk*p_ccwbk; | ||
2650 | struct chbk *p_ch; | ||
2651 | struct clawh *p_clawh; | ||
2652 | p_ch = &privptr->channel[READ_CHANNEL]; | ||
2653 | |||
2654 | CLAW_DBF_TEXT(4, trace, "StRdNter"); | ||
2655 | p_clawh=(struct clawh *)privptr->p_claw_signal_blk; | ||
2656 | p_clawh->flag=CLAW_IDLE; /* 0x00 */ | ||
2657 | |||
2658 | if ((privptr->p_write_active_first!=NULL && | ||
2659 | privptr->p_write_active_first->header.flag!=CLAW_PENDING) || | ||
2660 | (privptr->p_read_active_first!=NULL && | ||
2661 | privptr->p_read_active_first->header.flag!=CLAW_PENDING )) { | ||
2662 | p_clawh->flag=CLAW_BUSY; /* 0xff */ | ||
2663 | } | ||
2664 | if (lock==LOCK_YES) { | ||
2665 | spin_lock_irqsave(get_ccwdev_lock(p_ch->cdev), saveflags); | ||
2666 | } | ||
2667 | if (test_and_set_bit(0, (void *)&p_ch->IO_active) == 0) { | ||
2668 | CLAW_DBF_TEXT(4, trace, "HotRead"); | ||
2669 | p_ccwbk=privptr->p_read_active_first; | ||
2670 | parm = (unsigned long) p_ch; | ||
2671 | rc = ccw_device_start (p_ch->cdev, &p_ccwbk->read, parm, | ||
2672 | 0xff, 0); | ||
2673 | if (rc != 0) { | ||
2674 | ccw_check_return_code(p_ch->cdev, rc); | ||
2675 | } | ||
2676 | } | ||
2677 | else { | ||
2678 | CLAW_DBF_TEXT(2, trace, "ReadAct"); | ||
2679 | } | ||
2680 | |||
2681 | if (lock==LOCK_YES) { | ||
2682 | spin_unlock_irqrestore(get_ccwdev_lock(p_ch->cdev), saveflags); | ||
2683 | } | ||
2684 | CLAW_DBF_TEXT(4, trace, "StRdExit"); | ||
2685 | return; | ||
2686 | } /* end of claw_strt_read */ | ||
2687 | |||
2688 | /*-------------------------------------------------------------------* | ||
2689 | * claw_strt_out_IO * | ||
2690 | * * | ||
2691 | *--------------------------------------------------------------------*/ | ||
2692 | |||
2693 | static void | ||
2694 | claw_strt_out_IO( struct net_device *dev ) | ||
2695 | { | ||
2696 | int rc = 0; | ||
2697 | unsigned long parm; | ||
2698 | struct claw_privbk *privptr; | ||
2699 | struct chbk *p_ch; | ||
2700 | struct ccwbk *p_first_ccw; | ||
2701 | |||
2702 | if (!dev) { | ||
2703 | return; | ||
2704 | } | ||
2705 | privptr = (struct claw_privbk *)dev->ml_priv; | ||
2706 | p_ch = &privptr->channel[WRITE_CHANNEL]; | ||
2707 | |||
2708 | CLAW_DBF_TEXT(4, trace, "strt_io"); | ||
2709 | p_first_ccw=privptr->p_write_active_first; | ||
2710 | |||
2711 | if (p_ch->claw_state == CLAW_STOP) | ||
2712 | return; | ||
2713 | if (p_first_ccw == NULL) { | ||
2714 | return; | ||
2715 | } | ||
2716 | if (test_and_set_bit(0, (void *)&p_ch->IO_active) == 0) { | ||
2717 | parm = (unsigned long) p_ch; | ||
2718 | CLAW_DBF_TEXT(2, trace, "StWrtIO"); | ||
2719 | rc = ccw_device_start(p_ch->cdev, &p_first_ccw->write, parm, | ||
2720 | 0xff, 0); | ||
2721 | if (rc != 0) { | ||
2722 | ccw_check_return_code(p_ch->cdev, rc); | ||
2723 | } | ||
2724 | } | ||
2725 | dev->trans_start = jiffies; | ||
2726 | return; | ||
2727 | } /* end of claw_strt_out_IO */ | ||
2728 | |||
2729 | /*-------------------------------------------------------------------* | ||
2730 | * Free write buffers * | ||
2731 | * * | ||
2732 | *--------------------------------------------------------------------*/ | ||
2733 | |||
2734 | static void | ||
2735 | claw_free_wrt_buf( struct net_device *dev ) | ||
2736 | { | ||
2737 | |||
2738 | struct claw_privbk *privptr = (struct claw_privbk *)dev->ml_priv; | ||
2739 | struct ccwbk*p_this_ccw; | ||
2740 | struct ccwbk*p_next_ccw; | ||
2741 | |||
2742 | CLAW_DBF_TEXT(4, trace, "freewrtb"); | ||
2743 | /* scan the write queue to free any completed write packets */ | ||
2744 | p_this_ccw=privptr->p_write_active_first; | ||
2745 | while ( (p_this_ccw!=NULL) && (p_this_ccw->header.flag!=CLAW_PENDING)) | ||
2746 | { | ||
2747 | p_next_ccw = p_this_ccw->next; | ||
2748 | if (((p_next_ccw!=NULL) && | ||
2749 | (p_next_ccw->header.flag!=CLAW_PENDING)) || | ||
2750 | ((p_this_ccw == privptr->p_write_active_last) && | ||
2751 | (p_this_ccw->header.flag!=CLAW_PENDING))) { | ||
2752 | /* The next CCW is OK or this is */ | ||
2753 | /* the last CCW...free it @A1A */ | ||
2754 | privptr->p_write_active_first=p_this_ccw->next; | ||
2755 | p_this_ccw->header.flag=CLAW_PENDING; | ||
2756 | p_this_ccw->next=privptr->p_write_free_chain; | ||
2757 | privptr->p_write_free_chain=p_this_ccw; | ||
2758 | ++privptr->write_free_count; | ||
2759 | privptr->stats.tx_bytes+= p_this_ccw->write.count; | ||
2760 | p_this_ccw=privptr->p_write_active_first; | ||
2761 | privptr->stats.tx_packets++; | ||
2762 | } | ||
2763 | else { | ||
2764 | break; | ||
2765 | } | ||
2766 | } | ||
2767 | if (privptr->write_free_count!=0) { | ||
2768 | claw_clearbit_busy(TB_NOBUFFER,dev); | ||
2769 | } | ||
2770 | /* whole chain removed? */ | ||
2771 | if (privptr->p_write_active_first==NULL) { | ||
2772 | privptr->p_write_active_last=NULL; | ||
2773 | } | ||
2774 | CLAW_DBF_TEXT_(4, trace, "FWC=%d", privptr->write_free_count); | ||
2775 | return; | ||
2776 | } | ||
2777 | |||
2778 | /*-------------------------------------------------------------------* | ||
2779 | * claw free netdevice * | ||
2780 | * * | ||
2781 | *--------------------------------------------------------------------*/ | ||
2782 | static void | ||
2783 | claw_free_netdevice(struct net_device * dev, int free_dev) | ||
2784 | { | ||
2785 | struct claw_privbk *privptr; | ||
2786 | |||
2787 | CLAW_DBF_TEXT(2, setup, "free_dev"); | ||
2788 | if (!dev) | ||
2789 | return; | ||
2790 | CLAW_DBF_TEXT_(2, setup, "%s", dev->name); | ||
2791 | privptr = dev->ml_priv; | ||
2792 | if (dev->flags & IFF_RUNNING) | ||
2793 | claw_release(dev); | ||
2794 | if (privptr) { | ||
2795 | privptr->channel[READ_CHANNEL].ndev = NULL; /* say it's free */ | ||
2796 | } | ||
2797 | dev->ml_priv = NULL; | ||
2798 | #ifdef MODULE | ||
2799 | if (free_dev) { | ||
2800 | free_netdev(dev); | ||
2801 | } | ||
2802 | #endif | ||
2803 | CLAW_DBF_TEXT(2, setup, "free_ok"); | ||
2804 | } | ||
2805 | |||
2806 | /** | ||
2807 | * Claw init netdevice | ||
2808 | * Initialize everything of the net device except the name and the | ||
2809 | * channel structs. | ||
2810 | */ | ||
2811 | static const struct net_device_ops claw_netdev_ops = { | ||
2812 | .ndo_open = claw_open, | ||
2813 | .ndo_stop = claw_release, | ||
2814 | .ndo_get_stats = claw_stats, | ||
2815 | .ndo_start_xmit = claw_tx, | ||
2816 | .ndo_change_mtu = claw_change_mtu, | ||
2817 | }; | ||
2818 | |||
2819 | static void | ||
2820 | claw_init_netdevice(struct net_device * dev) | ||
2821 | { | ||
2822 | CLAW_DBF_TEXT(2, setup, "init_dev"); | ||
2823 | CLAW_DBF_TEXT_(2, setup, "%s", dev->name); | ||
2824 | dev->mtu = CLAW_DEFAULT_MTU_SIZE; | ||
2825 | dev->hard_header_len = 0; | ||
2826 | dev->addr_len = 0; | ||
2827 | dev->type = ARPHRD_SLIP; | ||
2828 | dev->tx_queue_len = 1300; | ||
2829 | dev->flags = IFF_POINTOPOINT | IFF_NOARP; | ||
2830 | dev->netdev_ops = &claw_netdev_ops; | ||
2831 | CLAW_DBF_TEXT(2, setup, "initok"); | ||
2832 | return; | ||
2833 | } | ||
2834 | |||
2835 | /** | ||
2836 | * Init a new channel in the privptr->channel[i]. | ||
2837 | * | ||
2838 | * @param cdev The ccw_device to be added. | ||
2839 | * | ||
2840 | * @return 0 on success, !0 on error. | ||
2841 | */ | ||
2842 | static int | ||
2843 | add_channel(struct ccw_device *cdev,int i,struct claw_privbk *privptr) | ||
2844 | { | ||
2845 | struct chbk *p_ch; | ||
2846 | struct ccw_dev_id dev_id; | ||
2847 | |||
2848 | CLAW_DBF_TEXT_(2, setup, "%s", dev_name(&cdev->dev)); | ||
2849 | privptr->channel[i].flag = i+1; /* Read is 1 Write is 2 */ | ||
2850 | p_ch = &privptr->channel[i]; | ||
2851 | p_ch->cdev = cdev; | ||
2852 | snprintf(p_ch->id, CLAW_ID_SIZE, "cl-%s", dev_name(&cdev->dev)); | ||
2853 | ccw_device_get_id(cdev, &dev_id); | ||
2854 | p_ch->devno = dev_id.devno; | ||
2855 | if ((p_ch->irb = kzalloc(sizeof (struct irb),GFP_KERNEL)) == NULL) { | ||
2856 | return -ENOMEM; | ||
2857 | } | ||
2858 | return 0; | ||
2859 | } | ||
2860 | |||
2861 | |||
2862 | /** | ||
2863 | * | ||
2864 | * Setup an interface. | ||
2865 | * | ||
2866 | * @param cgdev Device to be setup. | ||
2867 | * | ||
2868 | * @returns 0 on success, !0 on failure. | ||
2869 | */ | ||
2870 | static int | ||
2871 | claw_new_device(struct ccwgroup_device *cgdev) | ||
2872 | { | ||
2873 | struct claw_privbk *privptr; | ||
2874 | struct claw_env *p_env; | ||
2875 | struct net_device *dev; | ||
2876 | int ret; | ||
2877 | struct ccw_dev_id dev_id; | ||
2878 | |||
2879 | dev_info(&cgdev->dev, "add for %s\n", | ||
2880 | dev_name(&cgdev->cdev[READ_CHANNEL]->dev)); | ||
2881 | CLAW_DBF_TEXT(2, setup, "new_dev"); | ||
2882 | privptr = dev_get_drvdata(&cgdev->dev); | ||
2883 | dev_set_drvdata(&cgdev->cdev[READ_CHANNEL]->dev, privptr); | ||
2884 | dev_set_drvdata(&cgdev->cdev[WRITE_CHANNEL]->dev, privptr); | ||
2885 | if (!privptr) | ||
2886 | return -ENODEV; | ||
2887 | p_env = privptr->p_env; | ||
2888 | ccw_device_get_id(cgdev->cdev[READ_CHANNEL], &dev_id); | ||
2889 | p_env->devno[READ_CHANNEL] = dev_id.devno; | ||
2890 | ccw_device_get_id(cgdev->cdev[WRITE_CHANNEL], &dev_id); | ||
2891 | p_env->devno[WRITE_CHANNEL] = dev_id.devno; | ||
2892 | ret = add_channel(cgdev->cdev[0],0,privptr); | ||
2893 | if (ret == 0) | ||
2894 | ret = add_channel(cgdev->cdev[1],1,privptr); | ||
2895 | if (ret != 0) { | ||
2896 | dev_warn(&cgdev->dev, "Creating a CLAW group device" | ||
2897 | " failed with error code %d\n", ret); | ||
2898 | goto out; | ||
2899 | } | ||
2900 | ret = ccw_device_set_online(cgdev->cdev[READ_CHANNEL]); | ||
2901 | if (ret != 0) { | ||
2902 | dev_warn(&cgdev->dev, | ||
2903 | "Setting the read subchannel online" | ||
2904 | " failed with error code %d\n", ret); | ||
2905 | goto out; | ||
2906 | } | ||
2907 | ret = ccw_device_set_online(cgdev->cdev[WRITE_CHANNEL]); | ||
2908 | if (ret != 0) { | ||
2909 | dev_warn(&cgdev->dev, | ||
2910 | "Setting the write subchannel online " | ||
2911 | "failed with error code %d\n", ret); | ||
2912 | goto out; | ||
2913 | } | ||
2914 | dev = alloc_netdev(0, "claw%d", NET_NAME_UNKNOWN, claw_init_netdevice); | ||
2915 | if (!dev) { | ||
2916 | dev_warn(&cgdev->dev, | ||
2917 | "Activating the CLAW device failed\n"); | ||
2918 | goto out; | ||
2919 | } | ||
2920 | dev->ml_priv = privptr; | ||
2921 | dev_set_drvdata(&cgdev->dev, privptr); | ||
2922 | dev_set_drvdata(&cgdev->cdev[READ_CHANNEL]->dev, privptr); | ||
2923 | dev_set_drvdata(&cgdev->cdev[WRITE_CHANNEL]->dev, privptr); | ||
2924 | /* sysfs magic */ | ||
2925 | SET_NETDEV_DEV(dev, &cgdev->dev); | ||
2926 | if (register_netdev(dev) != 0) { | ||
2927 | claw_free_netdevice(dev, 1); | ||
2928 | CLAW_DBF_TEXT(2, trace, "regfail"); | ||
2929 | goto out; | ||
2930 | } | ||
2931 | dev->flags &=~IFF_RUNNING; | ||
2932 | if (privptr->buffs_alloc == 0) { | ||
2933 | ret=init_ccw_bk(dev); | ||
2934 | if (ret !=0) { | ||
2935 | unregister_netdev(dev); | ||
2936 | claw_free_netdevice(dev,1); | ||
2937 | CLAW_DBF_TEXT(2, trace, "ccwmem"); | ||
2938 | goto out; | ||
2939 | } | ||
2940 | } | ||
2941 | privptr->channel[READ_CHANNEL].ndev = dev; | ||
2942 | privptr->channel[WRITE_CHANNEL].ndev = dev; | ||
2943 | privptr->p_env->ndev = dev; | ||
2944 | |||
2945 | dev_info(&cgdev->dev, "%s:readsize=%d writesize=%d " | ||
2946 | "readbuffer=%d writebuffer=%d read=0x%04x write=0x%04x\n", | ||
2947 | dev->name, p_env->read_size, | ||
2948 | p_env->write_size, p_env->read_buffers, | ||
2949 | p_env->write_buffers, p_env->devno[READ_CHANNEL], | ||
2950 | p_env->devno[WRITE_CHANNEL]); | ||
2951 | dev_info(&cgdev->dev, "%s:host_name:%.8s, adapter_name " | ||
2952 | ":%.8s api_type: %.8s\n", | ||
2953 | dev->name, p_env->host_name, | ||
2954 | p_env->adapter_name , p_env->api_type); | ||
2955 | return 0; | ||
2956 | out: | ||
2957 | ccw_device_set_offline(cgdev->cdev[1]); | ||
2958 | ccw_device_set_offline(cgdev->cdev[0]); | ||
2959 | return -ENODEV; | ||
2960 | } | ||
2961 | |||
2962 | static void | ||
2963 | claw_purge_skb_queue(struct sk_buff_head *q) | ||
2964 | { | ||
2965 | struct sk_buff *skb; | ||
2966 | |||
2967 | CLAW_DBF_TEXT(4, trace, "purgque"); | ||
2968 | while ((skb = skb_dequeue(q))) { | ||
2969 | atomic_dec(&skb->users); | ||
2970 | dev_kfree_skb_any(skb); | ||
2971 | } | ||
2972 | } | ||
2973 | |||
2974 | /** | ||
2975 | * Shutdown an interface. | ||
2976 | * | ||
2977 | * @param cgdev Device to be shut down. | ||
2978 | * | ||
2979 | * @returns 0 on success, !0 on failure. | ||
2980 | */ | ||
2981 | static int | ||
2982 | claw_shutdown_device(struct ccwgroup_device *cgdev) | ||
2983 | { | ||
2984 | struct claw_privbk *priv; | ||
2985 | struct net_device *ndev; | ||
2986 | int ret = 0; | ||
2987 | |||
2988 | CLAW_DBF_TEXT_(2, setup, "%s", dev_name(&cgdev->dev)); | ||
2989 | priv = dev_get_drvdata(&cgdev->dev); | ||
2990 | if (!priv) | ||
2991 | return -ENODEV; | ||
2992 | ndev = priv->channel[READ_CHANNEL].ndev; | ||
2993 | if (ndev) { | ||
2994 | /* Close the device */ | ||
2995 | dev_info(&cgdev->dev, "%s: shutting down\n", | ||
2996 | ndev->name); | ||
2997 | if (ndev->flags & IFF_RUNNING) | ||
2998 | ret = claw_release(ndev); | ||
2999 | ndev->flags &=~IFF_RUNNING; | ||
3000 | unregister_netdev(ndev); | ||
3001 | ndev->ml_priv = NULL; /* cgdev data, not ndev's to free */ | ||
3002 | claw_free_netdevice(ndev, 1); | ||
3003 | priv->channel[READ_CHANNEL].ndev = NULL; | ||
3004 | priv->channel[WRITE_CHANNEL].ndev = NULL; | ||
3005 | priv->p_env->ndev = NULL; | ||
3006 | } | ||
3007 | ccw_device_set_offline(cgdev->cdev[1]); | ||
3008 | ccw_device_set_offline(cgdev->cdev[0]); | ||
3009 | return ret; | ||
3010 | } | ||
3011 | |||
3012 | static void | ||
3013 | claw_remove_device(struct ccwgroup_device *cgdev) | ||
3014 | { | ||
3015 | struct claw_privbk *priv; | ||
3016 | |||
3017 | CLAW_DBF_TEXT_(2, setup, "%s", dev_name(&cgdev->dev)); | ||
3018 | priv = dev_get_drvdata(&cgdev->dev); | ||
3019 | dev_info(&cgdev->dev, " will be removed.\n"); | ||
3020 | if (cgdev->state == CCWGROUP_ONLINE) | ||
3021 | claw_shutdown_device(cgdev); | ||
3022 | kfree(priv->p_mtc_envelope); | ||
3023 | priv->p_mtc_envelope=NULL; | ||
3024 | kfree(priv->p_env); | ||
3025 | priv->p_env=NULL; | ||
3026 | kfree(priv->channel[0].irb); | ||
3027 | priv->channel[0].irb=NULL; | ||
3028 | kfree(priv->channel[1].irb); | ||
3029 | priv->channel[1].irb=NULL; | ||
3030 | kfree(priv); | ||
3031 | dev_set_drvdata(&cgdev->dev, NULL); | ||
3032 | dev_set_drvdata(&cgdev->cdev[READ_CHANNEL]->dev, NULL); | ||
3033 | dev_set_drvdata(&cgdev->cdev[WRITE_CHANNEL]->dev, NULL); | ||
3034 | put_device(&cgdev->dev); | ||
3035 | |||
3036 | return; | ||
3037 | } | ||
3038 | |||
3039 | |||
3040 | /* | ||
3041 | * sysfs attributes | ||
3042 | */ | ||
3043 | static ssize_t | ||
3044 | claw_hname_show(struct device *dev, struct device_attribute *attr, char *buf) | ||
3045 | { | ||
3046 | struct claw_privbk *priv; | ||
3047 | struct claw_env * p_env; | ||
3048 | |||
3049 | priv = dev_get_drvdata(dev); | ||
3050 | if (!priv) | ||
3051 | return -ENODEV; | ||
3052 | p_env = priv->p_env; | ||
3053 | return sprintf(buf, "%s\n",p_env->host_name); | ||
3054 | } | ||
3055 | |||
3056 | static ssize_t | ||
3057 | claw_hname_write(struct device *dev, struct device_attribute *attr, | ||
3058 | const char *buf, size_t count) | ||
3059 | { | ||
3060 | struct claw_privbk *priv; | ||
3061 | struct claw_env * p_env; | ||
3062 | |||
3063 | priv = dev_get_drvdata(dev); | ||
3064 | if (!priv) | ||
3065 | return -ENODEV; | ||
3066 | p_env = priv->p_env; | ||
3067 | if (count > MAX_NAME_LEN+1) | ||
3068 | return -EINVAL; | ||
3069 | memset(p_env->host_name, 0x20, MAX_NAME_LEN); | ||
3070 | strncpy(p_env->host_name,buf, count); | ||
3071 | p_env->host_name[count-1] = 0x20; /* clear extra 0x0a */ | ||
3072 | p_env->host_name[MAX_NAME_LEN] = 0x00; | ||
3073 | CLAW_DBF_TEXT(2, setup, "HstnSet"); | ||
3074 | CLAW_DBF_TEXT_(2, setup, "%s", p_env->host_name); | ||
3075 | |||
3076 | return count; | ||
3077 | } | ||
3078 | |||
3079 | static DEVICE_ATTR(host_name, 0644, claw_hname_show, claw_hname_write); | ||
3080 | |||
3081 | static ssize_t | ||
3082 | claw_adname_show(struct device *dev, struct device_attribute *attr, char *buf) | ||
3083 | { | ||
3084 | struct claw_privbk *priv; | ||
3085 | struct claw_env * p_env; | ||
3086 | |||
3087 | priv = dev_get_drvdata(dev); | ||
3088 | if (!priv) | ||
3089 | return -ENODEV; | ||
3090 | p_env = priv->p_env; | ||
3091 | return sprintf(buf, "%s\n", p_env->adapter_name); | ||
3092 | } | ||
3093 | |||
3094 | static ssize_t | ||
3095 | claw_adname_write(struct device *dev, struct device_attribute *attr, | ||
3096 | const char *buf, size_t count) | ||
3097 | { | ||
3098 | struct claw_privbk *priv; | ||
3099 | struct claw_env * p_env; | ||
3100 | |||
3101 | priv = dev_get_drvdata(dev); | ||
3102 | if (!priv) | ||
3103 | return -ENODEV; | ||
3104 | p_env = priv->p_env; | ||
3105 | if (count > MAX_NAME_LEN+1) | ||
3106 | return -EINVAL; | ||
3107 | memset(p_env->adapter_name, 0x20, MAX_NAME_LEN); | ||
3108 | strncpy(p_env->adapter_name,buf, count); | ||
3109 | p_env->adapter_name[count-1] = 0x20; /* clear extra 0x0a */ | ||
3110 | p_env->adapter_name[MAX_NAME_LEN] = 0x00; | ||
3111 | CLAW_DBF_TEXT(2, setup, "AdnSet"); | ||
3112 | CLAW_DBF_TEXT_(2, setup, "%s", p_env->adapter_name); | ||
3113 | |||
3114 | return count; | ||
3115 | } | ||
3116 | |||
3117 | static DEVICE_ATTR(adapter_name, 0644, claw_adname_show, claw_adname_write); | ||
3118 | |||
3119 | static ssize_t | ||
3120 | claw_apname_show(struct device *dev, struct device_attribute *attr, char *buf) | ||
3121 | { | ||
3122 | struct claw_privbk *priv; | ||
3123 | struct claw_env * p_env; | ||
3124 | |||
3125 | priv = dev_get_drvdata(dev); | ||
3126 | if (!priv) | ||
3127 | return -ENODEV; | ||
3128 | p_env = priv->p_env; | ||
3129 | return sprintf(buf, "%s\n", | ||
3130 | p_env->api_type); | ||
3131 | } | ||
3132 | |||
3133 | static ssize_t | ||
3134 | claw_apname_write(struct device *dev, struct device_attribute *attr, | ||
3135 | const char *buf, size_t count) | ||
3136 | { | ||
3137 | struct claw_privbk *priv; | ||
3138 | struct claw_env * p_env; | ||
3139 | |||
3140 | priv = dev_get_drvdata(dev); | ||
3141 | if (!priv) | ||
3142 | return -ENODEV; | ||
3143 | p_env = priv->p_env; | ||
3144 | if (count > MAX_NAME_LEN+1) | ||
3145 | return -EINVAL; | ||
3146 | memset(p_env->api_type, 0x20, MAX_NAME_LEN); | ||
3147 | strncpy(p_env->api_type,buf, count); | ||
3148 | p_env->api_type[count-1] = 0x20; /* we get a loose 0x0a */ | ||
3149 | p_env->api_type[MAX_NAME_LEN] = 0x00; | ||
3150 | if(strncmp(p_env->api_type,WS_APPL_NAME_PACKED,6) == 0) { | ||
3151 | p_env->read_size=DEF_PACK_BUFSIZE; | ||
3152 | p_env->write_size=DEF_PACK_BUFSIZE; | ||
3153 | p_env->packing=PACKING_ASK; | ||
3154 | CLAW_DBF_TEXT(2, setup, "PACKING"); | ||
3155 | } | ||
3156 | else { | ||
3157 | p_env->packing=0; | ||
3158 | p_env->read_size=CLAW_FRAME_SIZE; | ||
3159 | p_env->write_size=CLAW_FRAME_SIZE; | ||
3160 | CLAW_DBF_TEXT(2, setup, "ApiSet"); | ||
3161 | } | ||
3162 | CLAW_DBF_TEXT_(2, setup, "%s", p_env->api_type); | ||
3163 | return count; | ||
3164 | } | ||
3165 | |||
3166 | static DEVICE_ATTR(api_type, 0644, claw_apname_show, claw_apname_write); | ||
3167 | |||
3168 | static ssize_t | ||
3169 | claw_wbuff_show(struct device *dev, struct device_attribute *attr, char *buf) | ||
3170 | { | ||
3171 | struct claw_privbk *priv; | ||
3172 | struct claw_env * p_env; | ||
3173 | |||
3174 | priv = dev_get_drvdata(dev); | ||
3175 | if (!priv) | ||
3176 | return -ENODEV; | ||
3177 | p_env = priv->p_env; | ||
3178 | return sprintf(buf, "%d\n", p_env->write_buffers); | ||
3179 | } | ||
3180 | |||
3181 | static ssize_t | ||
3182 | claw_wbuff_write(struct device *dev, struct device_attribute *attr, | ||
3183 | const char *buf, size_t count) | ||
3184 | { | ||
3185 | struct claw_privbk *priv; | ||
3186 | struct claw_env * p_env; | ||
3187 | int nnn,max; | ||
3188 | |||
3189 | priv = dev_get_drvdata(dev); | ||
3190 | if (!priv) | ||
3191 | return -ENODEV; | ||
3192 | p_env = priv->p_env; | ||
3193 | sscanf(buf, "%i", &nnn); | ||
3194 | if (p_env->packing) { | ||
3195 | max = 64; | ||
3196 | } | ||
3197 | else { | ||
3198 | max = 512; | ||
3199 | } | ||
3200 | if ((nnn > max ) || (nnn < 2)) | ||
3201 | return -EINVAL; | ||
3202 | p_env->write_buffers = nnn; | ||
3203 | CLAW_DBF_TEXT(2, setup, "Wbufset"); | ||
3204 | CLAW_DBF_TEXT_(2, setup, "WB=%d", p_env->write_buffers); | ||
3205 | return count; | ||
3206 | } | ||
3207 | |||
3208 | static DEVICE_ATTR(write_buffer, 0644, claw_wbuff_show, claw_wbuff_write); | ||
3209 | |||
3210 | static ssize_t | ||
3211 | claw_rbuff_show(struct device *dev, struct device_attribute *attr, char *buf) | ||
3212 | { | ||
3213 | struct claw_privbk *priv; | ||
3214 | struct claw_env * p_env; | ||
3215 | |||
3216 | priv = dev_get_drvdata(dev); | ||
3217 | if (!priv) | ||
3218 | return -ENODEV; | ||
3219 | p_env = priv->p_env; | ||
3220 | return sprintf(buf, "%d\n", p_env->read_buffers); | ||
3221 | } | ||
3222 | |||
3223 | static ssize_t | ||
3224 | claw_rbuff_write(struct device *dev, struct device_attribute *attr, | ||
3225 | const char *buf, size_t count) | ||
3226 | { | ||
3227 | struct claw_privbk *priv; | ||
3228 | struct claw_env *p_env; | ||
3229 | int nnn,max; | ||
3230 | |||
3231 | priv = dev_get_drvdata(dev); | ||
3232 | if (!priv) | ||
3233 | return -ENODEV; | ||
3234 | p_env = priv->p_env; | ||
3235 | sscanf(buf, "%i", &nnn); | ||
3236 | if (p_env->packing) { | ||
3237 | max = 64; | ||
3238 | } | ||
3239 | else { | ||
3240 | max = 512; | ||
3241 | } | ||
3242 | if ((nnn > max ) || (nnn < 2)) | ||
3243 | return -EINVAL; | ||
3244 | p_env->read_buffers = nnn; | ||
3245 | CLAW_DBF_TEXT(2, setup, "Rbufset"); | ||
3246 | CLAW_DBF_TEXT_(2, setup, "RB=%d", p_env->read_buffers); | ||
3247 | return count; | ||
3248 | } | ||
3249 | static DEVICE_ATTR(read_buffer, 0644, claw_rbuff_show, claw_rbuff_write); | ||
3250 | |||
3251 | static struct attribute *claw_attr[] = { | ||
3252 | &dev_attr_read_buffer.attr, | ||
3253 | &dev_attr_write_buffer.attr, | ||
3254 | &dev_attr_adapter_name.attr, | ||
3255 | &dev_attr_api_type.attr, | ||
3256 | &dev_attr_host_name.attr, | ||
3257 | NULL, | ||
3258 | }; | ||
3259 | static struct attribute_group claw_attr_group = { | ||
3260 | .attrs = claw_attr, | ||
3261 | }; | ||
3262 | static const struct attribute_group *claw_attr_groups[] = { | ||
3263 | &claw_attr_group, | ||
3264 | NULL, | ||
3265 | }; | ||
3266 | static const struct device_type claw_devtype = { | ||
3267 | .name = "claw", | ||
3268 | .groups = claw_attr_groups, | ||
3269 | }; | ||
3270 | |||
3271 | /*----------------------------------------------------------------* | ||
3272 | * claw_probe * | ||
3273 | * this function is called for each CLAW device. * | ||
3274 | *----------------------------------------------------------------*/ | ||
3275 | static int claw_probe(struct ccwgroup_device *cgdev) | ||
3276 | { | ||
3277 | struct claw_privbk *privptr = NULL; | ||
3278 | |||
3279 | CLAW_DBF_TEXT(2, setup, "probe"); | ||
3280 | if (!get_device(&cgdev->dev)) | ||
3281 | return -ENODEV; | ||
3282 | privptr = kzalloc(sizeof(struct claw_privbk), GFP_KERNEL); | ||
3283 | dev_set_drvdata(&cgdev->dev, privptr); | ||
3284 | if (privptr == NULL) { | ||
3285 | probe_error(cgdev); | ||
3286 | put_device(&cgdev->dev); | ||
3287 | CLAW_DBF_TEXT_(2, setup, "probex%d", -ENOMEM); | ||
3288 | return -ENOMEM; | ||
3289 | } | ||
3290 | privptr->p_mtc_envelope = kzalloc(MAX_ENVELOPE_SIZE, GFP_KERNEL); | ||
3291 | privptr->p_env = kzalloc(sizeof(struct claw_env), GFP_KERNEL); | ||
3292 | if ((privptr->p_mtc_envelope == NULL) || (privptr->p_env == NULL)) { | ||
3293 | probe_error(cgdev); | ||
3294 | put_device(&cgdev->dev); | ||
3295 | CLAW_DBF_TEXT_(2, setup, "probex%d", -ENOMEM); | ||
3296 | return -ENOMEM; | ||
3297 | } | ||
3298 | memcpy(privptr->p_env->adapter_name, WS_NAME_NOT_DEF, 8); | ||
3299 | memcpy(privptr->p_env->host_name, WS_NAME_NOT_DEF, 8); | ||
3300 | memcpy(privptr->p_env->api_type, WS_NAME_NOT_DEF, 8); | ||
3301 | privptr->p_env->packing = 0; | ||
3302 | privptr->p_env->write_buffers = 5; | ||
3303 | privptr->p_env->read_buffers = 5; | ||
3304 | privptr->p_env->read_size = CLAW_FRAME_SIZE; | ||
3305 | privptr->p_env->write_size = CLAW_FRAME_SIZE; | ||
3306 | privptr->p_env->p_priv = privptr; | ||
3307 | cgdev->cdev[0]->handler = claw_irq_handler; | ||
3308 | cgdev->cdev[1]->handler = claw_irq_handler; | ||
3309 | cgdev->dev.type = &claw_devtype; | ||
3310 | CLAW_DBF_TEXT(2, setup, "prbext 0"); | ||
3311 | |||
3312 | return 0; | ||
3313 | } /* end of claw_probe */ | ||
3314 | |||
3315 | /*--------------------------------------------------------------------* | ||
3316 | * claw_init and cleanup * | ||
3317 | *---------------------------------------------------------------------*/ | ||
3318 | |||
3319 | static void __exit claw_cleanup(void) | ||
3320 | { | ||
3321 | ccwgroup_driver_unregister(&claw_group_driver); | ||
3322 | ccw_driver_unregister(&claw_ccw_driver); | ||
3323 | root_device_unregister(claw_root_dev); | ||
3324 | claw_unregister_debug_facility(); | ||
3325 | pr_info("Driver unloaded\n"); | ||
3326 | } | ||
3327 | |||
3328 | /** | ||
3329 | * Initialize module. | ||
3330 | * This is called just after the module is loaded. | ||
3331 | * | ||
3332 | * @return 0 on success, !0 on error. | ||
3333 | */ | ||
3334 | static int __init claw_init(void) | ||
3335 | { | ||
3336 | int ret = 0; | ||
3337 | |||
3338 | pr_info("Loading %s\n", version); | ||
3339 | ret = claw_register_debug_facility(); | ||
3340 | if (ret) { | ||
3341 | pr_err("Registering with the S/390 debug feature" | ||
3342 | " failed with error code %d\n", ret); | ||
3343 | goto out_err; | ||
3344 | } | ||
3345 | CLAW_DBF_TEXT(2, setup, "init_mod"); | ||
3346 | claw_root_dev = root_device_register("claw"); | ||
3347 | ret = PTR_ERR_OR_ZERO(claw_root_dev); | ||
3348 | if (ret) | ||
3349 | goto register_err; | ||
3350 | ret = ccw_driver_register(&claw_ccw_driver); | ||
3351 | if (ret) | ||
3352 | goto ccw_err; | ||
3353 | claw_group_driver.driver.groups = claw_drv_attr_groups; | ||
3354 | ret = ccwgroup_driver_register(&claw_group_driver); | ||
3355 | if (ret) | ||
3356 | goto ccwgroup_err; | ||
3357 | return 0; | ||
3358 | |||
3359 | ccwgroup_err: | ||
3360 | ccw_driver_unregister(&claw_ccw_driver); | ||
3361 | ccw_err: | ||
3362 | root_device_unregister(claw_root_dev); | ||
3363 | register_err: | ||
3364 | CLAW_DBF_TEXT(2, setup, "init_bad"); | ||
3365 | claw_unregister_debug_facility(); | ||
3366 | out_err: | ||
3367 | pr_err("Initializing the claw device driver failed\n"); | ||
3368 | return ret; | ||
3369 | } | ||
3370 | |||
3371 | module_init(claw_init); | ||
3372 | module_exit(claw_cleanup); | ||
3373 | |||
3374 | MODULE_AUTHOR("Andy Richter <richtera@us.ibm.com>"); | ||
3375 | MODULE_DESCRIPTION("Linux for System z CLAW Driver\n" \ | ||
3376 | "Copyright IBM Corp. 2000, 2008\n"); | ||
3377 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/s390/net/claw.h b/drivers/s390/net/claw.h deleted file mode 100644 index 3339b9b607b3..000000000000 --- a/drivers/s390/net/claw.h +++ /dev/null | |||
@@ -1,348 +0,0 @@ | |||
1 | /******************************************************* | ||
2 | * Define constants * | ||
3 | * * | ||
4 | ********************************************************/ | ||
5 | |||
6 | /*-----------------------------------------------------* | ||
7 | * CCW command codes for CLAW protocol * | ||
8 | *------------------------------------------------------*/ | ||
9 | |||
10 | #define CCW_CLAW_CMD_WRITE 0x01 /* write - not including link */ | ||
11 | #define CCW_CLAW_CMD_READ 0x02 /* read */ | ||
12 | #define CCW_CLAW_CMD_NOP 0x03 /* NOP */ | ||
13 | #define CCW_CLAW_CMD_SENSE 0x04 /* Sense */ | ||
14 | #define CCW_CLAW_CMD_SIGNAL_SMOD 0x05 /* Signal Status Modifier */ | ||
15 | #define CCW_CLAW_CMD_TIC 0x08 /* TIC */ | ||
16 | #define CCW_CLAW_CMD_READHEADER 0x12 /* read header data */ | ||
17 | #define CCW_CLAW_CMD_READFF 0x22 /* read an FF */ | ||
18 | #define CCW_CLAW_CMD_SENSEID 0xe4 /* Sense ID */ | ||
19 | |||
20 | |||
21 | /*-----------------------------------------------------* | ||
22 | * CLAW Unique constants * | ||
23 | *------------------------------------------------------*/ | ||
24 | |||
25 | #define MORE_to_COME_FLAG 0x04 /* OR with write CCW in case of m-t-c */ | ||
26 | #define CLAW_IDLE 0x00 /* flag to indicate CLAW is idle */ | ||
27 | #define CLAW_BUSY 0xff /* flag to indicate CLAW is busy */ | ||
28 | #define CLAW_PENDING 0x00 /* flag to indicate i/o is pending */ | ||
29 | #define CLAW_COMPLETE 0xff /* flag to indicate i/o completed */ | ||
30 | |||
31 | /*-----------------------------------------------------* | ||
32 | * CLAW control command code * | ||
33 | *------------------------------------------------------*/ | ||
34 | |||
35 | #define SYSTEM_VALIDATE_REQUEST 0x01 /* System Validate request */ | ||
36 | #define SYSTEM_VALIDATE_RESPONSE 0x02 /* System Validate response */ | ||
37 | #define CONNECTION_REQUEST 0x21 /* Connection request */ | ||
38 | #define CONNECTION_RESPONSE 0x22 /* Connection response */ | ||
39 | #define CONNECTION_CONFIRM 0x23 /* Connection confirm */ | ||
40 | #define DISCONNECT 0x24 /* Disconnect */ | ||
41 | #define CLAW_ERROR 0x41 /* CLAW error message */ | ||
42 | #define CLAW_VERSION_ID 2 /* CLAW version ID */ | ||
43 | |||
44 | /*-----------------------------------------------------* | ||
45 | * CLAW adater sense bytes * | ||
46 | *------------------------------------------------------*/ | ||
47 | |||
48 | #define CLAW_ADAPTER_SENSE_BYTE 0x41 /* Stop command issued to adapter */ | ||
49 | |||
50 | /*-----------------------------------------------------* | ||
51 | * CLAW control command return codes * | ||
52 | *------------------------------------------------------*/ | ||
53 | |||
54 | #define CLAW_RC_NAME_MISMATCH 166 /* names do not match */ | ||
55 | #define CLAW_RC_WRONG_VERSION 167 /* wrong CLAW version number */ | ||
56 | #define CLAW_RC_HOST_RCV_TOO_SMALL 180 /* Host maximum receive is */ | ||
57 | /* less than Linux on zSeries*/ | ||
58 | /* transmit size */ | ||
59 | |||
60 | /*-----------------------------------------------------* | ||
61 | * CLAW Constants application name * | ||
62 | *------------------------------------------------------*/ | ||
63 | |||
64 | #define HOST_APPL_NAME "TCPIP " | ||
65 | #define WS_APPL_NAME_IP_LINK "TCPIP " | ||
66 | #define WS_APPL_NAME_IP_NAME "IP " | ||
67 | #define WS_APPL_NAME_API_LINK "API " | ||
68 | #define WS_APPL_NAME_PACKED "PACKED " | ||
69 | #define WS_NAME_NOT_DEF "NOT_DEF " | ||
70 | #define PACKING_ASK 1 | ||
71 | #define PACK_SEND 2 | ||
72 | #define DO_PACKED 3 | ||
73 | |||
74 | #define MAX_ENVELOPE_SIZE 65536 | ||
75 | #define CLAW_DEFAULT_MTU_SIZE 4096 | ||
76 | #define DEF_PACK_BUFSIZE 32768 | ||
77 | #define READ_CHANNEL 0 | ||
78 | #define WRITE_CHANNEL 1 | ||
79 | |||
80 | #define TB_TX 0 /* sk buffer handling in process */ | ||
81 | #define TB_STOP 1 /* network device stop in process */ | ||
82 | #define TB_RETRY 2 /* retry in process */ | ||
83 | #define TB_NOBUFFER 3 /* no buffer on free queue */ | ||
84 | #define CLAW_MAX_LINK_ID 1 | ||
85 | #define CLAW_MAX_DEV 256 /* max claw devices */ | ||
86 | #define MAX_NAME_LEN 8 /* host name, adapter name length */ | ||
87 | #define CLAW_FRAME_SIZE 4096 | ||
88 | #define CLAW_ID_SIZE 20+3 | ||
89 | |||
90 | /* state machine codes used in claw_irq_handler */ | ||
91 | |||
92 | #define CLAW_STOP 0 | ||
93 | #define CLAW_START_HALT_IO 1 | ||
94 | #define CLAW_START_SENSEID 2 | ||
95 | #define CLAW_START_READ 3 | ||
96 | #define CLAW_START_WRITE 4 | ||
97 | |||
98 | /*-----------------------------------------------------* | ||
99 | * Lock flag * | ||
100 | *------------------------------------------------------*/ | ||
101 | #define LOCK_YES 0 | ||
102 | #define LOCK_NO 1 | ||
103 | |||
104 | /*-----------------------------------------------------* | ||
105 | * DBF Debug macros * | ||
106 | *------------------------------------------------------*/ | ||
107 | #define CLAW_DBF_TEXT(level, name, text) \ | ||
108 | do { \ | ||
109 | debug_text_event(claw_dbf_##name, level, text); \ | ||
110 | } while (0) | ||
111 | |||
112 | #define CLAW_DBF_HEX(level,name,addr,len) \ | ||
113 | do { \ | ||
114 | debug_event(claw_dbf_##name,level,(void*)(addr),len); \ | ||
115 | } while (0) | ||
116 | |||
117 | #define CLAW_DBF_TEXT_(level,name,text...) \ | ||
118 | do { \ | ||
119 | if (debug_level_enabled(claw_dbf_##name, level)) { \ | ||
120 | sprintf(debug_buffer, text); \ | ||
121 | debug_text_event(claw_dbf_##name, level, \ | ||
122 | debug_buffer); \ | ||
123 | } \ | ||
124 | } while (0) | ||
125 | |||
126 | /** | ||
127 | * Enum for classifying detected devices. | ||
128 | */ | ||
129 | enum claw_channel_types { | ||
130 | /* Device is not a channel */ | ||
131 | claw_channel_type_none, | ||
132 | |||
133 | /* Device is a CLAW channel device */ | ||
134 | claw_channel_type_claw | ||
135 | }; | ||
136 | |||
137 | |||
138 | /******************************************************* | ||
139 | * Define Control Blocks * | ||
140 | * * | ||
141 | ********************************************************/ | ||
142 | |||
143 | /*------------------------------------------------------*/ | ||
144 | /* CLAW header */ | ||
145 | /*------------------------------------------------------*/ | ||
146 | |||
147 | struct clawh { | ||
148 | __u16 length; /* length of data read by preceding read CCW */ | ||
149 | __u8 opcode; /* equivalent read CCW */ | ||
150 | __u8 flag; /* flag of FF to indicate read was completed */ | ||
151 | }; | ||
152 | |||
153 | /*------------------------------------------------------*/ | ||
154 | /* CLAW Packing header 4 bytes */ | ||
155 | /*------------------------------------------------------*/ | ||
156 | struct clawph { | ||
157 | __u16 len; /* Length of Packed Data Area */ | ||
158 | __u8 flag; /* Reserved not used */ | ||
159 | __u8 link_num; /* Link ID */ | ||
160 | }; | ||
161 | |||
162 | /*------------------------------------------------------*/ | ||
163 | /* CLAW Ending struct ccwbk */ | ||
164 | /*------------------------------------------------------*/ | ||
165 | struct endccw { | ||
166 | __u32 real; /* real address of this block */ | ||
167 | __u8 write1; /* write 1 is active */ | ||
168 | __u8 read1; /* read 1 is active */ | ||
169 | __u16 reserved; /* reserved for future use */ | ||
170 | struct ccw1 write1_nop1; | ||
171 | struct ccw1 write1_nop2; | ||
172 | struct ccw1 write2_nop1; | ||
173 | struct ccw1 write2_nop2; | ||
174 | struct ccw1 read1_nop1; | ||
175 | struct ccw1 read1_nop2; | ||
176 | struct ccw1 read2_nop1; | ||
177 | struct ccw1 read2_nop2; | ||
178 | }; | ||
179 | |||
180 | /*------------------------------------------------------*/ | ||
181 | /* CLAW struct ccwbk */ | ||
182 | /*------------------------------------------------------*/ | ||
183 | struct ccwbk { | ||
184 | void *next; /* pointer to next ccw block */ | ||
185 | __u32 real; /* real address of this ccw */ | ||
186 | void *p_buffer; /* virtual address of data */ | ||
187 | struct clawh header; /* claw header */ | ||
188 | struct ccw1 write; /* write CCW */ | ||
189 | struct ccw1 w_read_FF; /* read FF */ | ||
190 | struct ccw1 w_TIC_1; /* TIC */ | ||
191 | struct ccw1 read; /* read CCW */ | ||
192 | struct ccw1 read_h; /* read header */ | ||
193 | struct ccw1 signal; /* signal SMOD */ | ||
194 | struct ccw1 r_TIC_1; /* TIC1 */ | ||
195 | struct ccw1 r_read_FF; /* read FF */ | ||
196 | struct ccw1 r_TIC_2; /* TIC2 */ | ||
197 | }; | ||
198 | |||
199 | /*------------------------------------------------------*/ | ||
200 | /* CLAW control block */ | ||
201 | /*------------------------------------------------------*/ | ||
202 | struct clawctl { | ||
203 | __u8 command; /* control command */ | ||
204 | __u8 version; /* CLAW protocol version */ | ||
205 | __u8 linkid; /* link ID */ | ||
206 | __u8 correlator; /* correlator */ | ||
207 | __u8 rc; /* return code */ | ||
208 | __u8 reserved1; /* reserved */ | ||
209 | __u8 reserved2; /* reserved */ | ||
210 | __u8 reserved3; /* reserved */ | ||
211 | __u8 data[24]; /* command specific fields */ | ||
212 | }; | ||
213 | |||
214 | /*------------------------------------------------------*/ | ||
215 | /* Data for SYSTEMVALIDATE command */ | ||
216 | /*------------------------------------------------------*/ | ||
217 | struct sysval { | ||
218 | char WS_name[8]; /* Workstation System name */ | ||
219 | char host_name[8]; /* Host system name */ | ||
220 | __u16 read_frame_size; /* read frame size */ | ||
221 | __u16 write_frame_size; /* write frame size */ | ||
222 | __u8 reserved[4]; /* reserved */ | ||
223 | }; | ||
224 | |||
225 | /*------------------------------------------------------*/ | ||
226 | /* Data for Connect command */ | ||
227 | /*------------------------------------------------------*/ | ||
228 | struct conncmd { | ||
229 | char WS_name[8]; /* Workstation application name */ | ||
230 | char host_name[8]; /* Host application name */ | ||
231 | __u16 reserved1[2]; /* read frame size */ | ||
232 | __u8 reserved2[4]; /* reserved */ | ||
233 | }; | ||
234 | |||
235 | /*------------------------------------------------------*/ | ||
236 | /* Data for CLAW error */ | ||
237 | /*------------------------------------------------------*/ | ||
238 | struct clawwerror { | ||
239 | char reserved1[8]; /* reserved */ | ||
240 | char reserved2[8]; /* reserved */ | ||
241 | char reserved3[8]; /* reserved */ | ||
242 | }; | ||
243 | |||
244 | /*------------------------------------------------------*/ | ||
245 | /* Data buffer for CLAW */ | ||
246 | /*------------------------------------------------------*/ | ||
247 | struct clawbuf { | ||
248 | char buffer[MAX_ENVELOPE_SIZE]; /* data buffer */ | ||
249 | }; | ||
250 | |||
251 | /*------------------------------------------------------*/ | ||
252 | /* Channel control block for read and write channel */ | ||
253 | /*------------------------------------------------------*/ | ||
254 | |||
255 | struct chbk { | ||
256 | unsigned int devno; | ||
257 | int irq; | ||
258 | char id[CLAW_ID_SIZE]; | ||
259 | __u32 IO_active; | ||
260 | __u8 claw_state; | ||
261 | struct irb *irb; | ||
262 | struct ccw_device *cdev; /* pointer to the channel device */ | ||
263 | struct net_device *ndev; | ||
264 | wait_queue_head_t wait; | ||
265 | struct tasklet_struct tasklet; | ||
266 | struct timer_list timer; | ||
267 | unsigned long flag_a; /* atomic flags */ | ||
268 | #define CLAW_BH_ACTIVE 0 | ||
269 | unsigned long flag_b; /* atomic flags */ | ||
270 | #define CLAW_WRITE_ACTIVE 0 | ||
271 | __u8 last_dstat; | ||
272 | __u8 flag; | ||
273 | struct sk_buff_head collect_queue; | ||
274 | spinlock_t collect_lock; | ||
275 | #define CLAW_WRITE 0x02 /* - Set if this is a write channel */ | ||
276 | #define CLAW_READ 0x01 /* - Set if this is a read channel */ | ||
277 | #define CLAW_TIMER 0x80 /* - Set if timer made the wake_up */ | ||
278 | }; | ||
279 | |||
280 | /*--------------------------------------------------------------* | ||
281 | * CLAW environment block * | ||
282 | *---------------------------------------------------------------*/ | ||
283 | |||
284 | struct claw_env { | ||
285 | unsigned int devno[2]; /* device number */ | ||
286 | char host_name[9]; /* Host name */ | ||
287 | char adapter_name [9]; /* adapter name */ | ||
288 | char api_type[9]; /* TCPIP, API or PACKED */ | ||
289 | void *p_priv; /* privptr */ | ||
290 | __u16 read_buffers; /* read buffer number */ | ||
291 | __u16 write_buffers; /* write buffer number */ | ||
292 | __u16 read_size; /* read buffer size */ | ||
293 | __u16 write_size; /* write buffer size */ | ||
294 | __u16 dev_id; /* device ident */ | ||
295 | __u8 packing; /* are we packing? */ | ||
296 | __u8 in_use; /* device active flag */ | ||
297 | struct net_device *ndev; /* backward ptr to the net dev*/ | ||
298 | }; | ||
299 | |||
300 | /*--------------------------------------------------------------* | ||
301 | * CLAW main control block * | ||
302 | *---------------------------------------------------------------*/ | ||
303 | |||
304 | struct claw_privbk { | ||
305 | void *p_buff_ccw; | ||
306 | __u32 p_buff_ccw_num; | ||
307 | void *p_buff_read; | ||
308 | __u32 p_buff_read_num; | ||
309 | __u32 p_buff_pages_perread; | ||
310 | void *p_buff_write; | ||
311 | __u32 p_buff_write_num; | ||
312 | __u32 p_buff_pages_perwrite; | ||
313 | long active_link_ID; /* Active logical link ID */ | ||
314 | struct ccwbk *p_write_free_chain; /* pointer to free ccw chain */ | ||
315 | struct ccwbk *p_write_active_first; /* ptr to the first write ccw */ | ||
316 | struct ccwbk *p_write_active_last; /* ptr to the last write ccw */ | ||
317 | struct ccwbk *p_read_active_first; /* ptr to the first read ccw */ | ||
318 | struct ccwbk *p_read_active_last; /* ptr to the last read ccw */ | ||
319 | struct endccw *p_end_ccw; /*ptr to ending ccw */ | ||
320 | struct ccwbk *p_claw_signal_blk; /* ptr to signal block */ | ||
321 | __u32 write_free_count; /* number of free bufs for write */ | ||
322 | struct net_device_stats stats; /* device status */ | ||
323 | struct chbk channel[2]; /* Channel control blocks */ | ||
324 | __u8 mtc_skipping; | ||
325 | int mtc_offset; | ||
326 | int mtc_logical_link; | ||
327 | void *p_mtc_envelope; | ||
328 | struct sk_buff *pk_skb; /* packing buffer */ | ||
329 | int pk_cnt; | ||
330 | struct clawctl ctl_bk; | ||
331 | struct claw_env *p_env; | ||
332 | __u8 system_validate_comp; | ||
333 | __u8 release_pend; | ||
334 | __u8 checksum_received_ip_pkts; | ||
335 | __u8 buffs_alloc; | ||
336 | struct endccw end_ccw; | ||
337 | unsigned long tbusy; | ||
338 | |||
339 | }; | ||
340 | |||
341 | |||
342 | /************************************************************/ | ||
343 | /* define global constants */ | ||
344 | /************************************************************/ | ||
345 | |||
346 | #define CCWBK_SIZE sizeof(struct ccwbk) | ||
347 | |||
348 | |||
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index 642c77c76b84..3466d3cb7647 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c | |||
@@ -4218,7 +4218,7 @@ void qeth_setadp_promisc_mode(struct qeth_card *card) | |||
4218 | QETH_CARD_TEXT_(card, 4, "mode:%x", mode); | 4218 | QETH_CARD_TEXT_(card, 4, "mode:%x", mode); |
4219 | 4219 | ||
4220 | iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_PROMISC_MODE, | 4220 | iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_PROMISC_MODE, |
4221 | sizeof(struct qeth_ipacmd_setadpparms)); | 4221 | sizeof(struct qeth_ipacmd_setadpparms_hdr) + 8); |
4222 | if (!iob) | 4222 | if (!iob) |
4223 | return; | 4223 | return; |
4224 | cmd = (struct qeth_ipa_cmd *)(iob->data + IPA_PDU_HEADER_SIZE); | 4224 | cmd = (struct qeth_ipa_cmd *)(iob->data + IPA_PDU_HEADER_SIZE); |
@@ -4290,7 +4290,8 @@ int qeth_setadpparms_change_macaddr(struct qeth_card *card) | |||
4290 | QETH_CARD_TEXT(card, 4, "chgmac"); | 4290 | QETH_CARD_TEXT(card, 4, "chgmac"); |
4291 | 4291 | ||
4292 | iob = qeth_get_adapter_cmd(card, IPA_SETADP_ALTER_MAC_ADDRESS, | 4292 | iob = qeth_get_adapter_cmd(card, IPA_SETADP_ALTER_MAC_ADDRESS, |
4293 | sizeof(struct qeth_ipacmd_setadpparms)); | 4293 | sizeof(struct qeth_ipacmd_setadpparms_hdr) + |
4294 | sizeof(struct qeth_change_addr)); | ||
4294 | if (!iob) | 4295 | if (!iob) |
4295 | return -ENOMEM; | 4296 | return -ENOMEM; |
4296 | cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); | 4297 | cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); |
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c index 2bbfc25e582c..18f05bff8826 100644 --- a/drivers/vhost/net.c +++ b/drivers/vhost/net.c | |||
@@ -390,7 +390,7 @@ static void handle_tx(struct vhost_net *net) | |||
390 | ubufs = NULL; | 390 | ubufs = NULL; |
391 | } | 391 | } |
392 | /* TODO: Check specific error and bomb out unless ENOBUFS? */ | 392 | /* TODO: Check specific error and bomb out unless ENOBUFS? */ |
393 | err = sock->ops->sendmsg(NULL, sock, &msg, len); | 393 | err = sock->ops->sendmsg(sock, &msg, len); |
394 | if (unlikely(err < 0)) { | 394 | if (unlikely(err < 0)) { |
395 | if (zcopy_used) { | 395 | if (zcopy_used) { |
396 | vhost_net_ubuf_put(ubufs); | 396 | vhost_net_ubuf_put(ubufs); |
@@ -566,7 +566,7 @@ static void handle_rx(struct vhost_net *net) | |||
566 | /* On overrun, truncate and discard */ | 566 | /* On overrun, truncate and discard */ |
567 | if (unlikely(headcount > UIO_MAXIOV)) { | 567 | if (unlikely(headcount > UIO_MAXIOV)) { |
568 | iov_iter_init(&msg.msg_iter, READ, vq->iov, 1, 1); | 568 | iov_iter_init(&msg.msg_iter, READ, vq->iov, 1, 1); |
569 | err = sock->ops->recvmsg(NULL, sock, &msg, | 569 | err = sock->ops->recvmsg(sock, &msg, |
570 | 1, MSG_DONTWAIT | MSG_TRUNC); | 570 | 1, MSG_DONTWAIT | MSG_TRUNC); |
571 | pr_debug("Discarded rx packet: len %zd\n", sock_len); | 571 | pr_debug("Discarded rx packet: len %zd\n", sock_len); |
572 | continue; | 572 | continue; |
@@ -592,7 +592,7 @@ static void handle_rx(struct vhost_net *net) | |||
592 | */ | 592 | */ |
593 | iov_iter_advance(&msg.msg_iter, vhost_hlen); | 593 | iov_iter_advance(&msg.msg_iter, vhost_hlen); |
594 | } | 594 | } |
595 | err = sock->ops->recvmsg(NULL, sock, &msg, | 595 | err = sock->ops->recvmsg(sock, &msg, |
596 | sock_len, MSG_DONTWAIT | MSG_TRUNC); | 596 | sock_len, MSG_DONTWAIT | MSG_TRUNC); |
597 | /* Userspace might have consumed the packet meanwhile: | 597 | /* Userspace might have consumed the packet meanwhile: |
598 | * it's not supposed to do this usually, but might be hard | 598 | * it's not supposed to do this usually, but might be hard |