diff options
author | David S. Miller <davem@davemloft.net> | 2010-11-17 12:56:04 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2010-11-17 12:56:04 -0500 |
commit | a1082bfe7d2d88d9626f5542dda9c0781394e91f (patch) | |
tree | 5bf2c1084bc9ed223a35d1bcd1f494185c1d48e0 /drivers | |
parent | 9d1e5e40d6cac4bf7008e04c202d71918455ca11 (diff) | |
parent | 147b2c8cb4f3e16aafc87096365a913d01ee3a21 (diff) |
Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/jkirsher/net-next-2.6
Diffstat (limited to 'drivers')
43 files changed, 2571 insertions, 1264 deletions
diff --git a/drivers/net/e1000e/82571.c b/drivers/net/e1000e/82571.c index 7236f1a53ba0..9333921010cc 100644 --- a/drivers/net/e1000e/82571.c +++ b/drivers/net/e1000e/82571.c | |||
@@ -74,6 +74,9 @@ static bool e1000_check_mng_mode_82574(struct e1000_hw *hw); | |||
74 | static s32 e1000_led_on_82574(struct e1000_hw *hw); | 74 | static s32 e1000_led_on_82574(struct e1000_hw *hw); |
75 | static void e1000_put_hw_semaphore_82571(struct e1000_hw *hw); | 75 | static void e1000_put_hw_semaphore_82571(struct e1000_hw *hw); |
76 | static void e1000_power_down_phy_copper_82571(struct e1000_hw *hw); | 76 | static void e1000_power_down_phy_copper_82571(struct e1000_hw *hw); |
77 | static void e1000_put_hw_semaphore_82573(struct e1000_hw *hw); | ||
78 | static s32 e1000_get_hw_semaphore_82574(struct e1000_hw *hw); | ||
79 | static void e1000_put_hw_semaphore_82574(struct e1000_hw *hw); | ||
77 | 80 | ||
78 | /** | 81 | /** |
79 | * e1000_init_phy_params_82571 - Init PHY func ptrs. | 82 | * e1000_init_phy_params_82571 - Init PHY func ptrs. |
@@ -107,6 +110,8 @@ static s32 e1000_init_phy_params_82571(struct e1000_hw *hw) | |||
107 | case e1000_82574: | 110 | case e1000_82574: |
108 | case e1000_82583: | 111 | case e1000_82583: |
109 | phy->type = e1000_phy_bm; | 112 | phy->type = e1000_phy_bm; |
113 | phy->ops.acquire = e1000_get_hw_semaphore_82574; | ||
114 | phy->ops.release = e1000_put_hw_semaphore_82574; | ||
110 | break; | 115 | break; |
111 | default: | 116 | default: |
112 | return -E1000_ERR_PHY; | 117 | return -E1000_ERR_PHY; |
@@ -200,6 +205,17 @@ static s32 e1000_init_nvm_params_82571(struct e1000_hw *hw) | |||
200 | break; | 205 | break; |
201 | } | 206 | } |
202 | 207 | ||
208 | /* Function Pointers */ | ||
209 | switch (hw->mac.type) { | ||
210 | case e1000_82574: | ||
211 | case e1000_82583: | ||
212 | nvm->ops.acquire = e1000_get_hw_semaphore_82574; | ||
213 | nvm->ops.release = e1000_put_hw_semaphore_82574; | ||
214 | break; | ||
215 | default: | ||
216 | break; | ||
217 | } | ||
218 | |||
203 | return 0; | 219 | return 0; |
204 | } | 220 | } |
205 | 221 | ||
@@ -542,6 +558,94 @@ static void e1000_put_hw_semaphore_82571(struct e1000_hw *hw) | |||
542 | swsm &= ~(E1000_SWSM_SMBI | E1000_SWSM_SWESMBI); | 558 | swsm &= ~(E1000_SWSM_SMBI | E1000_SWSM_SWESMBI); |
543 | ew32(SWSM, swsm); | 559 | ew32(SWSM, swsm); |
544 | } | 560 | } |
561 | /** | ||
562 | * e1000_get_hw_semaphore_82573 - Acquire hardware semaphore | ||
563 | * @hw: pointer to the HW structure | ||
564 | * | ||
565 | * Acquire the HW semaphore during reset. | ||
566 | * | ||
567 | **/ | ||
568 | static s32 e1000_get_hw_semaphore_82573(struct e1000_hw *hw) | ||
569 | { | ||
570 | u32 extcnf_ctrl; | ||
571 | s32 ret_val = 0; | ||
572 | s32 i = 0; | ||
573 | |||
574 | extcnf_ctrl = er32(EXTCNF_CTRL); | ||
575 | extcnf_ctrl |= E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP; | ||
576 | do { | ||
577 | ew32(EXTCNF_CTRL, extcnf_ctrl); | ||
578 | extcnf_ctrl = er32(EXTCNF_CTRL); | ||
579 | |||
580 | if (extcnf_ctrl & E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP) | ||
581 | break; | ||
582 | |||
583 | extcnf_ctrl |= E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP; | ||
584 | |||
585 | msleep(2); | ||
586 | i++; | ||
587 | } while (i < MDIO_OWNERSHIP_TIMEOUT); | ||
588 | |||
589 | if (i == MDIO_OWNERSHIP_TIMEOUT) { | ||
590 | /* Release semaphores */ | ||
591 | e1000_put_hw_semaphore_82573(hw); | ||
592 | e_dbg("Driver can't access the PHY\n"); | ||
593 | ret_val = -E1000_ERR_PHY; | ||
594 | goto out; | ||
595 | } | ||
596 | |||
597 | out: | ||
598 | return ret_val; | ||
599 | } | ||
600 | |||
601 | /** | ||
602 | * e1000_put_hw_semaphore_82573 - Release hardware semaphore | ||
603 | * @hw: pointer to the HW structure | ||
604 | * | ||
605 | * Release hardware semaphore used during reset. | ||
606 | * | ||
607 | **/ | ||
608 | static void e1000_put_hw_semaphore_82573(struct e1000_hw *hw) | ||
609 | { | ||
610 | u32 extcnf_ctrl; | ||
611 | |||
612 | extcnf_ctrl = er32(EXTCNF_CTRL); | ||
613 | extcnf_ctrl &= ~E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP; | ||
614 | ew32(EXTCNF_CTRL, extcnf_ctrl); | ||
615 | } | ||
616 | |||
617 | static DEFINE_MUTEX(swflag_mutex); | ||
618 | |||
619 | /** | ||
620 | * e1000_get_hw_semaphore_82574 - Acquire hardware semaphore | ||
621 | * @hw: pointer to the HW structure | ||
622 | * | ||
623 | * Acquire the HW semaphore to access the PHY or NVM. | ||
624 | * | ||
625 | **/ | ||
626 | static s32 e1000_get_hw_semaphore_82574(struct e1000_hw *hw) | ||
627 | { | ||
628 | s32 ret_val; | ||
629 | |||
630 | mutex_lock(&swflag_mutex); | ||
631 | ret_val = e1000_get_hw_semaphore_82573(hw); | ||
632 | if (ret_val) | ||
633 | mutex_unlock(&swflag_mutex); | ||
634 | return ret_val; | ||
635 | } | ||
636 | |||
637 | /** | ||
638 | * e1000_put_hw_semaphore_82574 - Release hardware semaphore | ||
639 | * @hw: pointer to the HW structure | ||
640 | * | ||
641 | * Release hardware semaphore used to access the PHY or NVM | ||
642 | * | ||
643 | **/ | ||
644 | static void e1000_put_hw_semaphore_82574(struct e1000_hw *hw) | ||
645 | { | ||
646 | e1000_put_hw_semaphore_82573(hw); | ||
647 | mutex_unlock(&swflag_mutex); | ||
648 | } | ||
545 | 649 | ||
546 | /** | 650 | /** |
547 | * e1000_acquire_nvm_82571 - Request for access to the EEPROM | 651 | * e1000_acquire_nvm_82571 - Request for access to the EEPROM |
@@ -562,8 +666,6 @@ static s32 e1000_acquire_nvm_82571(struct e1000_hw *hw) | |||
562 | 666 | ||
563 | switch (hw->mac.type) { | 667 | switch (hw->mac.type) { |
564 | case e1000_82573: | 668 | case e1000_82573: |
565 | case e1000_82574: | ||
566 | case e1000_82583: | ||
567 | break; | 669 | break; |
568 | default: | 670 | default: |
569 | ret_val = e1000e_acquire_nvm(hw); | 671 | ret_val = e1000e_acquire_nvm(hw); |
@@ -853,9 +955,8 @@ static s32 e1000_set_d0_lplu_state_82571(struct e1000_hw *hw, bool active) | |||
853 | **/ | 955 | **/ |
854 | static s32 e1000_reset_hw_82571(struct e1000_hw *hw) | 956 | static s32 e1000_reset_hw_82571(struct e1000_hw *hw) |
855 | { | 957 | { |
856 | u32 ctrl, extcnf_ctrl, ctrl_ext, icr; | 958 | u32 ctrl, ctrl_ext, icr; |
857 | s32 ret_val; | 959 | s32 ret_val; |
858 | u16 i = 0; | ||
859 | 960 | ||
860 | /* | 961 | /* |
861 | * Prevent the PCI-E bus from sticking if there is no TLP connection | 962 | * Prevent the PCI-E bus from sticking if there is no TLP connection |
@@ -880,33 +981,33 @@ static s32 e1000_reset_hw_82571(struct e1000_hw *hw) | |||
880 | */ | 981 | */ |
881 | switch (hw->mac.type) { | 982 | switch (hw->mac.type) { |
882 | case e1000_82573: | 983 | case e1000_82573: |
984 | ret_val = e1000_get_hw_semaphore_82573(hw); | ||
985 | break; | ||
883 | case e1000_82574: | 986 | case e1000_82574: |
884 | case e1000_82583: | 987 | case e1000_82583: |
885 | extcnf_ctrl = er32(EXTCNF_CTRL); | 988 | ret_val = e1000_get_hw_semaphore_82574(hw); |
886 | extcnf_ctrl |= E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP; | ||
887 | |||
888 | do { | ||
889 | ew32(EXTCNF_CTRL, extcnf_ctrl); | ||
890 | extcnf_ctrl = er32(EXTCNF_CTRL); | ||
891 | |||
892 | if (extcnf_ctrl & E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP) | ||
893 | break; | ||
894 | |||
895 | extcnf_ctrl |= E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP; | ||
896 | |||
897 | msleep(2); | ||
898 | i++; | ||
899 | } while (i < MDIO_OWNERSHIP_TIMEOUT); | ||
900 | break; | 989 | break; |
901 | default: | 990 | default: |
902 | break; | 991 | break; |
903 | } | 992 | } |
993 | if (ret_val) | ||
994 | e_dbg("Cannot acquire MDIO ownership\n"); | ||
904 | 995 | ||
905 | ctrl = er32(CTRL); | 996 | ctrl = er32(CTRL); |
906 | 997 | ||
907 | e_dbg("Issuing a global reset to MAC\n"); | 998 | e_dbg("Issuing a global reset to MAC\n"); |
908 | ew32(CTRL, ctrl | E1000_CTRL_RST); | 999 | ew32(CTRL, ctrl | E1000_CTRL_RST); |
909 | 1000 | ||
1001 | /* Must release MDIO ownership and mutex after MAC reset. */ | ||
1002 | switch (hw->mac.type) { | ||
1003 | case e1000_82574: | ||
1004 | case e1000_82583: | ||
1005 | e1000_put_hw_semaphore_82574(hw); | ||
1006 | break; | ||
1007 | default: | ||
1008 | break; | ||
1009 | } | ||
1010 | |||
910 | if (hw->nvm.type == e1000_nvm_flash_hw) { | 1011 | if (hw->nvm.type == e1000_nvm_flash_hw) { |
911 | udelay(10); | 1012 | udelay(10); |
912 | ctrl_ext = er32(CTRL_EXT); | 1013 | ctrl_ext = er32(CTRL_EXT); |
@@ -1431,8 +1532,10 @@ static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw) | |||
1431 | * auto-negotiation in the TXCW register and disable | 1532 | * auto-negotiation in the TXCW register and disable |
1432 | * forced link in the Device Control register in an | 1533 | * forced link in the Device Control register in an |
1433 | * attempt to auto-negotiate with our link partner. | 1534 | * attempt to auto-negotiate with our link partner. |
1535 | * If the partner code word is null, stop forcing | ||
1536 | * and restart auto negotiation. | ||
1434 | */ | 1537 | */ |
1435 | if (rxcw & E1000_RXCW_C) { | 1538 | if ((rxcw & E1000_RXCW_C) || !(rxcw & E1000_RXCW_CW)) { |
1436 | /* Enable autoneg, and unforce link up */ | 1539 | /* Enable autoneg, and unforce link up */ |
1437 | ew32(TXCW, mac->txcw); | 1540 | ew32(TXCW, mac->txcw); |
1438 | ew32(CTRL, (ctrl & ~E1000_CTRL_SLU)); | 1541 | ew32(CTRL, (ctrl & ~E1000_CTRL_SLU)); |
diff --git a/drivers/net/e1000e/defines.h b/drivers/net/e1000e/defines.h index d3f7a9c3f973..016ea383145a 100644 --- a/drivers/net/e1000e/defines.h +++ b/drivers/net/e1000e/defines.h | |||
@@ -516,6 +516,7 @@ | |||
516 | #define E1000_TXCW_ANE 0x80000000 /* Auto-neg enable */ | 516 | #define E1000_TXCW_ANE 0x80000000 /* Auto-neg enable */ |
517 | 517 | ||
518 | /* Receive Configuration Word */ | 518 | /* Receive Configuration Word */ |
519 | #define E1000_RXCW_CW 0x0000ffff /* RxConfigWord mask */ | ||
519 | #define E1000_RXCW_IV 0x08000000 /* Receive config invalid */ | 520 | #define E1000_RXCW_IV 0x08000000 /* Receive config invalid */ |
520 | #define E1000_RXCW_C 0x20000000 /* Receive config */ | 521 | #define E1000_RXCW_C 0x20000000 /* Receive config */ |
521 | #define E1000_RXCW_SYNCH 0x40000000 /* Receive config synch */ | 522 | #define E1000_RXCW_SYNCH 0x40000000 /* Receive config synch */ |
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c index a6d54e460001..9b3f0a996b00 100644 --- a/drivers/net/e1000e/netdev.c +++ b/drivers/net/e1000e/netdev.c | |||
@@ -5465,6 +5465,36 @@ static void e1000_shutdown(struct pci_dev *pdev) | |||
5465 | } | 5465 | } |
5466 | 5466 | ||
5467 | #ifdef CONFIG_NET_POLL_CONTROLLER | 5467 | #ifdef CONFIG_NET_POLL_CONTROLLER |
5468 | |||
5469 | static irqreturn_t e1000_intr_msix(int irq, void *data) | ||
5470 | { | ||
5471 | struct net_device *netdev = data; | ||
5472 | struct e1000_adapter *adapter = netdev_priv(netdev); | ||
5473 | int vector, msix_irq; | ||
5474 | |||
5475 | if (adapter->msix_entries) { | ||
5476 | vector = 0; | ||
5477 | msix_irq = adapter->msix_entries[vector].vector; | ||
5478 | disable_irq(msix_irq); | ||
5479 | e1000_intr_msix_rx(msix_irq, netdev); | ||
5480 | enable_irq(msix_irq); | ||
5481 | |||
5482 | vector++; | ||
5483 | msix_irq = adapter->msix_entries[vector].vector; | ||
5484 | disable_irq(msix_irq); | ||
5485 | e1000_intr_msix_tx(msix_irq, netdev); | ||
5486 | enable_irq(msix_irq); | ||
5487 | |||
5488 | vector++; | ||
5489 | msix_irq = adapter->msix_entries[vector].vector; | ||
5490 | disable_irq(msix_irq); | ||
5491 | e1000_msix_other(msix_irq, netdev); | ||
5492 | enable_irq(msix_irq); | ||
5493 | } | ||
5494 | |||
5495 | return IRQ_HANDLED; | ||
5496 | } | ||
5497 | |||
5468 | /* | 5498 | /* |
5469 | * Polling 'interrupt' - used by things like netconsole to send skbs | 5499 | * Polling 'interrupt' - used by things like netconsole to send skbs |
5470 | * without having to re-enable interrupts. It's not called while | 5500 | * without having to re-enable interrupts. It's not called while |
@@ -5474,10 +5504,21 @@ static void e1000_netpoll(struct net_device *netdev) | |||
5474 | { | 5504 | { |
5475 | struct e1000_adapter *adapter = netdev_priv(netdev); | 5505 | struct e1000_adapter *adapter = netdev_priv(netdev); |
5476 | 5506 | ||
5477 | disable_irq(adapter->pdev->irq); | 5507 | switch (adapter->int_mode) { |
5478 | e1000_intr(adapter->pdev->irq, netdev); | 5508 | case E1000E_INT_MODE_MSIX: |
5479 | 5509 | e1000_intr_msix(adapter->pdev->irq, netdev); | |
5480 | enable_irq(adapter->pdev->irq); | 5510 | break; |
5511 | case E1000E_INT_MODE_MSI: | ||
5512 | disable_irq(adapter->pdev->irq); | ||
5513 | e1000_intr_msi(adapter->pdev->irq, netdev); | ||
5514 | enable_irq(adapter->pdev->irq); | ||
5515 | break; | ||
5516 | default: /* E1000E_INT_MODE_LEGACY */ | ||
5517 | disable_irq(adapter->pdev->irq); | ||
5518 | e1000_intr(adapter->pdev->irq, netdev); | ||
5519 | enable_irq(adapter->pdev->irq); | ||
5520 | break; | ||
5521 | } | ||
5481 | } | 5522 | } |
5482 | #endif | 5523 | #endif |
5483 | 5524 | ||
diff --git a/drivers/net/igbvf/Makefile b/drivers/net/igbvf/Makefile index c2f150d8f2d9..0fa3db3dd8b6 100644 --- a/drivers/net/igbvf/Makefile +++ b/drivers/net/igbvf/Makefile | |||
@@ -1,7 +1,7 @@ | |||
1 | ################################################################################ | 1 | ################################################################################ |
2 | # | 2 | # |
3 | # Intel(R) 82576 Virtual Function Linux driver | 3 | # Intel(R) 82576 Virtual Function Linux driver |
4 | # Copyright(c) 2009 Intel Corporation. | 4 | # Copyright(c) 2009 - 2010 Intel Corporation. |
5 | # | 5 | # |
6 | # This program is free software; you can redistribute it and/or modify it | 6 | # This program is free software; you can redistribute it and/or modify it |
7 | # under the terms and conditions of the GNU General Public License, | 7 | # under the terms and conditions of the GNU General Public License, |
diff --git a/drivers/net/igbvf/defines.h b/drivers/net/igbvf/defines.h index 88a47537518a..79f2604673fe 100644 --- a/drivers/net/igbvf/defines.h +++ b/drivers/net/igbvf/defines.h | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************************* | 1 | /******************************************************************************* |
2 | 2 | ||
3 | Intel(R) 82576 Virtual Function Linux driver | 3 | Intel(R) 82576 Virtual Function Linux driver |
4 | Copyright(c) 1999 - 2009 Intel Corporation. | 4 | Copyright(c) 1999 - 2010 Intel Corporation. |
5 | 5 | ||
6 | This program is free software; you can redistribute it and/or modify it | 6 | This program is free software; you can redistribute it and/or modify it |
7 | under the terms and conditions of the GNU General Public License, | 7 | under the terms and conditions of the GNU General Public License, |
diff --git a/drivers/net/igbvf/ethtool.c b/drivers/net/igbvf/ethtool.c index 33add708bcbe..abb3606928fb 100644 --- a/drivers/net/igbvf/ethtool.c +++ b/drivers/net/igbvf/ethtool.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************************* | 1 | /******************************************************************************* |
2 | 2 | ||
3 | Intel(R) 82576 Virtual Function Linux driver | 3 | Intel(R) 82576 Virtual Function Linux driver |
4 | Copyright(c) 2009 Intel Corporation. | 4 | Copyright(c) 2009 - 2010 Intel Corporation. |
5 | 5 | ||
6 | This program is free software; you can redistribute it and/or modify it | 6 | This program is free software; you can redistribute it and/or modify it |
7 | under the terms and conditions of the GNU General Public License, | 7 | under the terms and conditions of the GNU General Public License, |
diff --git a/drivers/net/igbvf/igbvf.h b/drivers/net/igbvf/igbvf.h index debeee2dc717..9d4d63e536d4 100644 --- a/drivers/net/igbvf/igbvf.h +++ b/drivers/net/igbvf/igbvf.h | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************************* | 1 | /******************************************************************************* |
2 | 2 | ||
3 | Intel(R) 82576 Virtual Function Linux driver | 3 | Intel(R) 82576 Virtual Function Linux driver |
4 | Copyright(c) 2009 Intel Corporation. | 4 | Copyright(c) 2009 - 2010 Intel Corporation. |
5 | 5 | ||
6 | This program is free software; you can redistribute it and/or modify it | 6 | This program is free software; you can redistribute it and/or modify it |
7 | under the terms and conditions of the GNU General Public License, | 7 | under the terms and conditions of the GNU General Public License, |
@@ -126,7 +126,6 @@ struct igbvf_buffer { | |||
126 | unsigned int page_offset; | 126 | unsigned int page_offset; |
127 | }; | 127 | }; |
128 | }; | 128 | }; |
129 | struct page *page; | ||
130 | }; | 129 | }; |
131 | 130 | ||
132 | union igbvf_desc { | 131 | union igbvf_desc { |
diff --git a/drivers/net/igbvf/mbx.c b/drivers/net/igbvf/mbx.c index 819a8ec901dc..3d6f4cc3998a 100644 --- a/drivers/net/igbvf/mbx.c +++ b/drivers/net/igbvf/mbx.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************************* | 1 | /******************************************************************************* |
2 | 2 | ||
3 | Intel(R) 82576 Virtual Function Linux driver | 3 | Intel(R) 82576 Virtual Function Linux driver |
4 | Copyright(c) 2009 Intel Corporation. | 4 | Copyright(c) 2009 - 2010 Intel Corporation. |
5 | 5 | ||
6 | This program is free software; you can redistribute it and/or modify it | 6 | This program is free software; you can redistribute it and/or modify it |
7 | under the terms and conditions of the GNU General Public License, | 7 | under the terms and conditions of the GNU General Public License, |
diff --git a/drivers/net/igbvf/mbx.h b/drivers/net/igbvf/mbx.h index 4938609dbfb5..c2883c45d477 100644 --- a/drivers/net/igbvf/mbx.h +++ b/drivers/net/igbvf/mbx.h | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************************* | 1 | /******************************************************************************* |
2 | 2 | ||
3 | Intel(R) 82576 Virtual Function Linux driver | 3 | Intel(R) 82576 Virtual Function Linux driver |
4 | Copyright(c) 1999 - 2009 Intel Corporation. | 4 | Copyright(c) 1999 - 2010 Intel Corporation. |
5 | 5 | ||
6 | This program is free software; you can redistribute it and/or modify it | 6 | This program is free software; you can redistribute it and/or modify it |
7 | under the terms and conditions of the GNU General Public License, | 7 | under the terms and conditions of the GNU General Public License, |
diff --git a/drivers/net/igbvf/netdev.c b/drivers/net/igbvf/netdev.c index 28af019c97bb..4c998b7726da 100644 --- a/drivers/net/igbvf/netdev.c +++ b/drivers/net/igbvf/netdev.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************************* | 1 | /******************************************************************************* |
2 | 2 | ||
3 | Intel(R) 82576 Virtual Function Linux driver | 3 | Intel(R) 82576 Virtual Function Linux driver |
4 | Copyright(c) 2009 Intel Corporation. | 4 | Copyright(c) 2009 - 2010 Intel Corporation. |
5 | 5 | ||
6 | This program is free software; you can redistribute it and/or modify it | 6 | This program is free software; you can redistribute it and/or modify it |
7 | under the terms and conditions of the GNU General Public License, | 7 | under the terms and conditions of the GNU General Public License, |
@@ -44,12 +44,13 @@ | |||
44 | 44 | ||
45 | #include "igbvf.h" | 45 | #include "igbvf.h" |
46 | 46 | ||
47 | #define DRV_VERSION "1.0.0-k0" | 47 | #define DRV_VERSION "1.0.8-k0" |
48 | char igbvf_driver_name[] = "igbvf"; | 48 | char igbvf_driver_name[] = "igbvf"; |
49 | const char igbvf_driver_version[] = DRV_VERSION; | 49 | const char igbvf_driver_version[] = DRV_VERSION; |
50 | static const char igbvf_driver_string[] = | 50 | static const char igbvf_driver_string[] = |
51 | "Intel(R) Virtual Function Network Driver"; | 51 | "Intel(R) Virtual Function Network Driver"; |
52 | static const char igbvf_copyright[] = "Copyright (c) 2009 Intel Corporation."; | 52 | static const char igbvf_copyright[] = |
53 | "Copyright (c) 2009 - 2010 Intel Corporation."; | ||
53 | 54 | ||
54 | static int igbvf_poll(struct napi_struct *napi, int budget); | 55 | static int igbvf_poll(struct napi_struct *napi, int budget); |
55 | static void igbvf_reset(struct igbvf_adapter *); | 56 | static void igbvf_reset(struct igbvf_adapter *); |
@@ -1851,8 +1852,6 @@ static void igbvf_watchdog_task(struct work_struct *work) | |||
1851 | 1852 | ||
1852 | if (link) { | 1853 | if (link) { |
1853 | if (!netif_carrier_ok(netdev)) { | 1854 | if (!netif_carrier_ok(netdev)) { |
1854 | bool txb2b = 1; | ||
1855 | |||
1856 | mac->ops.get_link_up_info(&adapter->hw, | 1855 | mac->ops.get_link_up_info(&adapter->hw, |
1857 | &adapter->link_speed, | 1856 | &adapter->link_speed, |
1858 | &adapter->link_duplex); | 1857 | &adapter->link_duplex); |
@@ -1862,11 +1861,9 @@ static void igbvf_watchdog_task(struct work_struct *work) | |||
1862 | adapter->tx_timeout_factor = 1; | 1861 | adapter->tx_timeout_factor = 1; |
1863 | switch (adapter->link_speed) { | 1862 | switch (adapter->link_speed) { |
1864 | case SPEED_10: | 1863 | case SPEED_10: |
1865 | txb2b = 0; | ||
1866 | adapter->tx_timeout_factor = 16; | 1864 | adapter->tx_timeout_factor = 16; |
1867 | break; | 1865 | break; |
1868 | case SPEED_100: | 1866 | case SPEED_100: |
1869 | txb2b = 0; | ||
1870 | /* maybe add some timeout factor ? */ | 1867 | /* maybe add some timeout factor ? */ |
1871 | break; | 1868 | break; |
1872 | } | 1869 | } |
diff --git a/drivers/net/igbvf/regs.h b/drivers/net/igbvf/regs.h index b9e24ed70d0a..77e18d3d6b15 100644 --- a/drivers/net/igbvf/regs.h +++ b/drivers/net/igbvf/regs.h | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************************* | 1 | /******************************************************************************* |
2 | 2 | ||
3 | Intel(R) 82576 Virtual Function Linux driver | 3 | Intel(R) 82576 Virtual Function Linux driver |
4 | Copyright(c) 2009 Intel Corporation. | 4 | Copyright(c) 2009 - 2010 Intel Corporation. |
5 | 5 | ||
6 | This program is free software; you can redistribute it and/or modify it | 6 | This program is free software; you can redistribute it and/or modify it |
7 | under the terms and conditions of the GNU General Public License, | 7 | under the terms and conditions of the GNU General Public License, |
diff --git a/drivers/net/igbvf/vf.c b/drivers/net/igbvf/vf.c index a9a61efa964c..0cc13c6ed418 100644 --- a/drivers/net/igbvf/vf.c +++ b/drivers/net/igbvf/vf.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************************* | 1 | /******************************************************************************* |
2 | 2 | ||
3 | Intel(R) 82576 Virtual Function Linux driver | 3 | Intel(R) 82576 Virtual Function Linux driver |
4 | Copyright(c) 2009 Intel Corporation. | 4 | Copyright(c) 2009 - 2010 Intel Corporation. |
5 | 5 | ||
6 | This program is free software; you can redistribute it and/or modify it | 6 | This program is free software; you can redistribute it and/or modify it |
7 | under the terms and conditions of the GNU General Public License, | 7 | under the terms and conditions of the GNU General Public License, |
diff --git a/drivers/net/igbvf/vf.h b/drivers/net/igbvf/vf.h index 1e8ce3741a67..c36ea21f17fa 100644 --- a/drivers/net/igbvf/vf.h +++ b/drivers/net/igbvf/vf.h | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************************* | 1 | /******************************************************************************* |
2 | 2 | ||
3 | Intel(R) 82576 Virtual Function Linux driver | 3 | Intel(R) 82576 Virtual Function Linux driver |
4 | Copyright(c) 2009 Intel Corporation. | 4 | Copyright(c) 2009 - 2010 Intel Corporation. |
5 | 5 | ||
6 | This program is free software; you can redistribute it and/or modify it | 6 | This program is free software; you can redistribute it and/or modify it |
7 | under the terms and conditions of the GNU General Public License, | 7 | under the terms and conditions of the GNU General Public License, |
diff --git a/drivers/net/ixgbe/Makefile b/drivers/net/ixgbe/Makefile index 8f81efb49169..7d7387fbdecd 100644 --- a/drivers/net/ixgbe/Makefile +++ b/drivers/net/ixgbe/Makefile | |||
@@ -34,7 +34,7 @@ obj-$(CONFIG_IXGBE) += ixgbe.o | |||
34 | 34 | ||
35 | ixgbe-objs := ixgbe_main.o ixgbe_common.o ixgbe_ethtool.o \ | 35 | ixgbe-objs := ixgbe_main.o ixgbe_common.o ixgbe_ethtool.o \ |
36 | ixgbe_82599.o ixgbe_82598.o ixgbe_phy.o ixgbe_sriov.o \ | 36 | ixgbe_82599.o ixgbe_82598.o ixgbe_phy.o ixgbe_sriov.o \ |
37 | ixgbe_mbx.o | 37 | ixgbe_mbx.o ixgbe_x540.o |
38 | 38 | ||
39 | ixgbe-$(CONFIG_IXGBE_DCB) += ixgbe_dcb.o ixgbe_dcb_82598.o \ | 39 | ixgbe-$(CONFIG_IXGBE_DCB) += ixgbe_dcb.o ixgbe_dcb_82598.o \ |
40 | ixgbe_dcb_82599.o ixgbe_dcb_nl.o | 40 | ixgbe_dcb_82599.o ixgbe_dcb_nl.o |
diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h index ed8703cfffb7..3ae30b8cb7d6 100644 --- a/drivers/net/ixgbe/ixgbe.h +++ b/drivers/net/ixgbe/ixgbe.h | |||
@@ -61,10 +61,8 @@ | |||
61 | #define IXGBE_MIN_RXD 64 | 61 | #define IXGBE_MIN_RXD 64 |
62 | 62 | ||
63 | /* flow control */ | 63 | /* flow control */ |
64 | #define IXGBE_DEFAULT_FCRTL 0x10000 | ||
65 | #define IXGBE_MIN_FCRTL 0x40 | 64 | #define IXGBE_MIN_FCRTL 0x40 |
66 | #define IXGBE_MAX_FCRTL 0x7FF80 | 65 | #define IXGBE_MAX_FCRTL 0x7FF80 |
67 | #define IXGBE_DEFAULT_FCRTH 0x20000 | ||
68 | #define IXGBE_MIN_FCRTH 0x600 | 66 | #define IXGBE_MIN_FCRTH 0x600 |
69 | #define IXGBE_MAX_FCRTH 0x7FFF0 | 67 | #define IXGBE_MAX_FCRTH 0x7FFF0 |
70 | #define IXGBE_DEFAULT_FCPAUSE 0xFFFF | 68 | #define IXGBE_DEFAULT_FCPAUSE 0xFFFF |
@@ -130,7 +128,9 @@ struct ixgbe_tx_buffer { | |||
130 | unsigned long time_stamp; | 128 | unsigned long time_stamp; |
131 | u16 length; | 129 | u16 length; |
132 | u16 next_to_watch; | 130 | u16 next_to_watch; |
133 | u16 mapped_as_page; | 131 | unsigned int bytecount; |
132 | u16 gso_segs; | ||
133 | u8 mapped_as_page; | ||
134 | }; | 134 | }; |
135 | 135 | ||
136 | struct ixgbe_rx_buffer { | 136 | struct ixgbe_rx_buffer { |
@@ -146,12 +146,56 @@ struct ixgbe_queue_stats { | |||
146 | u64 bytes; | 146 | u64 bytes; |
147 | }; | 147 | }; |
148 | 148 | ||
149 | struct ixgbe_tx_queue_stats { | ||
150 | u64 restart_queue; | ||
151 | u64 tx_busy; | ||
152 | u64 completed; | ||
153 | u64 tx_done_old; | ||
154 | }; | ||
155 | |||
156 | struct ixgbe_rx_queue_stats { | ||
157 | u64 rsc_count; | ||
158 | u64 rsc_flush; | ||
159 | u64 non_eop_descs; | ||
160 | u64 alloc_rx_page_failed; | ||
161 | u64 alloc_rx_buff_failed; | ||
162 | }; | ||
163 | |||
164 | enum ixbge_ring_state_t { | ||
165 | __IXGBE_TX_FDIR_INIT_DONE, | ||
166 | __IXGBE_TX_DETECT_HANG, | ||
167 | __IXGBE_HANG_CHECK_ARMED, | ||
168 | __IXGBE_RX_PS_ENABLED, | ||
169 | __IXGBE_RX_RSC_ENABLED, | ||
170 | }; | ||
171 | |||
172 | #define ring_is_ps_enabled(ring) \ | ||
173 | test_bit(__IXGBE_RX_PS_ENABLED, &(ring)->state) | ||
174 | #define set_ring_ps_enabled(ring) \ | ||
175 | set_bit(__IXGBE_RX_PS_ENABLED, &(ring)->state) | ||
176 | #define clear_ring_ps_enabled(ring) \ | ||
177 | clear_bit(__IXGBE_RX_PS_ENABLED, &(ring)->state) | ||
178 | #define check_for_tx_hang(ring) \ | ||
179 | test_bit(__IXGBE_TX_DETECT_HANG, &(ring)->state) | ||
180 | #define set_check_for_tx_hang(ring) \ | ||
181 | set_bit(__IXGBE_TX_DETECT_HANG, &(ring)->state) | ||
182 | #define clear_check_for_tx_hang(ring) \ | ||
183 | clear_bit(__IXGBE_TX_DETECT_HANG, &(ring)->state) | ||
184 | #define ring_is_rsc_enabled(ring) \ | ||
185 | test_bit(__IXGBE_RX_RSC_ENABLED, &(ring)->state) | ||
186 | #define set_ring_rsc_enabled(ring) \ | ||
187 | set_bit(__IXGBE_RX_RSC_ENABLED, &(ring)->state) | ||
188 | #define clear_ring_rsc_enabled(ring) \ | ||
189 | clear_bit(__IXGBE_RX_RSC_ENABLED, &(ring)->state) | ||
149 | struct ixgbe_ring { | 190 | struct ixgbe_ring { |
150 | void *desc; /* descriptor ring memory */ | 191 | void *desc; /* descriptor ring memory */ |
192 | struct device *dev; /* device for DMA mapping */ | ||
193 | struct net_device *netdev; /* netdev ring belongs to */ | ||
151 | union { | 194 | union { |
152 | struct ixgbe_tx_buffer *tx_buffer_info; | 195 | struct ixgbe_tx_buffer *tx_buffer_info; |
153 | struct ixgbe_rx_buffer *rx_buffer_info; | 196 | struct ixgbe_rx_buffer *rx_buffer_info; |
154 | }; | 197 | }; |
198 | unsigned long state; | ||
155 | u8 atr_sample_rate; | 199 | u8 atr_sample_rate; |
156 | u8 atr_count; | 200 | u8 atr_count; |
157 | u16 count; /* amount of descriptors */ | 201 | u16 count; /* amount of descriptors */ |
@@ -160,38 +204,30 @@ struct ixgbe_ring { | |||
160 | u16 next_to_clean; | 204 | u16 next_to_clean; |
161 | 205 | ||
162 | u8 queue_index; /* needed for multiqueue queue management */ | 206 | u8 queue_index; /* needed for multiqueue queue management */ |
163 | 207 | u8 reg_idx; /* holds the special value that gets | |
164 | #define IXGBE_RING_RX_PS_ENABLED (u8)(1) | ||
165 | u8 flags; /* per ring feature flags */ | ||
166 | u16 head; | ||
167 | u16 tail; | ||
168 | |||
169 | unsigned int total_bytes; | ||
170 | unsigned int total_packets; | ||
171 | |||
172 | #ifdef CONFIG_IXGBE_DCA | ||
173 | /* cpu for tx queue */ | ||
174 | int cpu; | ||
175 | #endif | ||
176 | |||
177 | u16 work_limit; /* max work per interrupt */ | ||
178 | u16 reg_idx; /* holds the special value that gets | ||
179 | * the hardware register offset | 208 | * the hardware register offset |
180 | * associated with this ring, which is | 209 | * associated with this ring, which is |
181 | * different for DCB and RSS modes | 210 | * different for DCB and RSS modes |
182 | */ | 211 | */ |
183 | 212 | ||
213 | u16 work_limit; /* max work per interrupt */ | ||
214 | |||
215 | u8 __iomem *tail; | ||
216 | |||
217 | unsigned int total_bytes; | ||
218 | unsigned int total_packets; | ||
219 | |||
184 | struct ixgbe_queue_stats stats; | 220 | struct ixgbe_queue_stats stats; |
185 | struct u64_stats_sync syncp; | 221 | struct u64_stats_sync syncp; |
222 | union { | ||
223 | struct ixgbe_tx_queue_stats tx_stats; | ||
224 | struct ixgbe_rx_queue_stats rx_stats; | ||
225 | }; | ||
186 | int numa_node; | 226 | int numa_node; |
187 | unsigned long reinit_state; | ||
188 | u64 rsc_count; /* stat for coalesced packets */ | ||
189 | u64 rsc_flush; /* stats for flushed packets */ | ||
190 | u32 restart_queue; /* track tx queue restarts */ | ||
191 | u32 non_eop_descs; /* track hardware descriptor chaining */ | ||
192 | |||
193 | unsigned int size; /* length in bytes */ | 227 | unsigned int size; /* length in bytes */ |
194 | dma_addr_t dma; /* phys. address of descriptor ring */ | 228 | dma_addr_t dma; /* phys. address of descriptor ring */ |
229 | struct rcu_head rcu; | ||
230 | struct ixgbe_q_vector *q_vector; /* back-pointer to host q_vector */ | ||
195 | } ____cacheline_internodealigned_in_smp; | 231 | } ____cacheline_internodealigned_in_smp; |
196 | 232 | ||
197 | enum ixgbe_ring_f_enum { | 233 | enum ixgbe_ring_f_enum { |
@@ -237,6 +273,9 @@ struct ixgbe_q_vector { | |||
237 | unsigned int v_idx; /* index of q_vector within array, also used for | 273 | unsigned int v_idx; /* index of q_vector within array, also used for |
238 | * finding the bit in EICR and friends that | 274 | * finding the bit in EICR and friends that |
239 | * represents the vector for this ring */ | 275 | * represents the vector for this ring */ |
276 | #ifdef CONFIG_IXGBE_DCA | ||
277 | int cpu; /* CPU for DCA */ | ||
278 | #endif | ||
240 | struct napi_struct napi; | 279 | struct napi_struct napi; |
241 | DECLARE_BITMAP(rxr_idx, MAX_RX_QUEUES); /* Rx ring indices */ | 280 | DECLARE_BITMAP(rxr_idx, MAX_RX_QUEUES); /* Rx ring indices */ |
242 | DECLARE_BITMAP(txr_idx, MAX_TX_QUEUES); /* Tx ring indices */ | 281 | DECLARE_BITMAP(txr_idx, MAX_TX_QUEUES); /* Tx ring indices */ |
@@ -246,6 +285,7 @@ struct ixgbe_q_vector { | |||
246 | u8 rx_itr; | 285 | u8 rx_itr; |
247 | u32 eitr; | 286 | u32 eitr; |
248 | cpumask_var_t affinity_mask; | 287 | cpumask_var_t affinity_mask; |
288 | char name[IFNAMSIZ + 9]; | ||
249 | }; | 289 | }; |
250 | 290 | ||
251 | /* Helper macros to switch between ints/sec and what the register uses. | 291 | /* Helper macros to switch between ints/sec and what the register uses. |
@@ -294,7 +334,6 @@ struct ixgbe_adapter { | |||
294 | u16 bd_number; | 334 | u16 bd_number; |
295 | struct work_struct reset_task; | 335 | struct work_struct reset_task; |
296 | struct ixgbe_q_vector *q_vector[MAX_MSIX_Q_VECTORS]; | 336 | struct ixgbe_q_vector *q_vector[MAX_MSIX_Q_VECTORS]; |
297 | char name[MAX_MSIX_COUNT][IFNAMSIZ + 9]; | ||
298 | struct ixgbe_dcb_config dcb_cfg; | 337 | struct ixgbe_dcb_config dcb_cfg; |
299 | struct ixgbe_dcb_config temp_dcb_cfg; | 338 | struct ixgbe_dcb_config temp_dcb_cfg; |
300 | u8 dcb_set_bitmap; | 339 | u8 dcb_set_bitmap; |
@@ -417,6 +456,7 @@ struct ixgbe_adapter { | |||
417 | int node; | 456 | int node; |
418 | struct work_struct check_overtemp_task; | 457 | struct work_struct check_overtemp_task; |
419 | u32 interrupt_event; | 458 | u32 interrupt_event; |
459 | char lsc_int_name[IFNAMSIZ + 9]; | ||
420 | 460 | ||
421 | /* SR-IOV */ | 461 | /* SR-IOV */ |
422 | DECLARE_BITMAP(active_vfs, IXGBE_MAX_VF_FUNCTIONS); | 462 | DECLARE_BITMAP(active_vfs, IXGBE_MAX_VF_FUNCTIONS); |
@@ -428,17 +468,25 @@ enum ixbge_state_t { | |||
428 | __IXGBE_TESTING, | 468 | __IXGBE_TESTING, |
429 | __IXGBE_RESETTING, | 469 | __IXGBE_RESETTING, |
430 | __IXGBE_DOWN, | 470 | __IXGBE_DOWN, |
431 | __IXGBE_FDIR_INIT_DONE, | ||
432 | __IXGBE_SFP_MODULE_NOT_FOUND | 471 | __IXGBE_SFP_MODULE_NOT_FOUND |
433 | }; | 472 | }; |
434 | 473 | ||
474 | struct ixgbe_rsc_cb { | ||
475 | dma_addr_t dma; | ||
476 | u16 skb_cnt; | ||
477 | bool delay_unmap; | ||
478 | }; | ||
479 | #define IXGBE_RSC_CB(skb) ((struct ixgbe_rsc_cb *)(skb)->cb) | ||
480 | |||
435 | enum ixgbe_boards { | 481 | enum ixgbe_boards { |
436 | board_82598, | 482 | board_82598, |
437 | board_82599, | 483 | board_82599, |
484 | board_X540, | ||
438 | }; | 485 | }; |
439 | 486 | ||
440 | extern struct ixgbe_info ixgbe_82598_info; | 487 | extern struct ixgbe_info ixgbe_82598_info; |
441 | extern struct ixgbe_info ixgbe_82599_info; | 488 | extern struct ixgbe_info ixgbe_82599_info; |
489 | extern struct ixgbe_info ixgbe_X540_info; | ||
442 | #ifdef CONFIG_IXGBE_DCB | 490 | #ifdef CONFIG_IXGBE_DCB |
443 | extern const struct dcbnl_rtnl_ops dcbnl_ops; | 491 | extern const struct dcbnl_rtnl_ops dcbnl_ops; |
444 | extern int ixgbe_copy_dcb_cfg(struct ixgbe_dcb_config *src_dcb_cfg, | 492 | extern int ixgbe_copy_dcb_cfg(struct ixgbe_dcb_config *src_dcb_cfg, |
@@ -454,26 +502,24 @@ extern void ixgbe_down(struct ixgbe_adapter *adapter); | |||
454 | extern void ixgbe_reinit_locked(struct ixgbe_adapter *adapter); | 502 | extern void ixgbe_reinit_locked(struct ixgbe_adapter *adapter); |
455 | extern void ixgbe_reset(struct ixgbe_adapter *adapter); | 503 | extern void ixgbe_reset(struct ixgbe_adapter *adapter); |
456 | extern void ixgbe_set_ethtool_ops(struct net_device *netdev); | 504 | extern void ixgbe_set_ethtool_ops(struct net_device *netdev); |
457 | extern int ixgbe_setup_rx_resources(struct ixgbe_adapter *, struct ixgbe_ring *); | 505 | extern int ixgbe_setup_rx_resources(struct ixgbe_ring *); |
458 | extern int ixgbe_setup_tx_resources(struct ixgbe_adapter *, struct ixgbe_ring *); | 506 | extern int ixgbe_setup_tx_resources(struct ixgbe_ring *); |
459 | extern void ixgbe_free_rx_resources(struct ixgbe_adapter *, struct ixgbe_ring *); | 507 | extern void ixgbe_free_rx_resources(struct ixgbe_ring *); |
460 | extern void ixgbe_free_tx_resources(struct ixgbe_adapter *, struct ixgbe_ring *); | 508 | extern void ixgbe_free_tx_resources(struct ixgbe_ring *); |
461 | extern void ixgbe_configure_rx_ring(struct ixgbe_adapter *,struct ixgbe_ring *); | 509 | extern void ixgbe_configure_rx_ring(struct ixgbe_adapter *,struct ixgbe_ring *); |
462 | extern void ixgbe_configure_tx_ring(struct ixgbe_adapter *,struct ixgbe_ring *); | 510 | extern void ixgbe_configure_tx_ring(struct ixgbe_adapter *,struct ixgbe_ring *); |
463 | extern void ixgbe_update_stats(struct ixgbe_adapter *adapter); | 511 | extern void ixgbe_update_stats(struct ixgbe_adapter *adapter); |
464 | extern int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter); | 512 | extern int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter); |
465 | extern void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter); | 513 | extern void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter); |
466 | extern netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *, | 514 | extern netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *, |
467 | struct net_device *, | ||
468 | struct ixgbe_adapter *, | 515 | struct ixgbe_adapter *, |
469 | struct ixgbe_ring *); | 516 | struct ixgbe_ring *); |
470 | extern void ixgbe_unmap_and_free_tx_resource(struct ixgbe_adapter *, | 517 | extern void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *, |
471 | struct ixgbe_tx_buffer *); | 518 | struct ixgbe_tx_buffer *); |
472 | extern void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter, | 519 | extern void ixgbe_alloc_rx_buffers(struct ixgbe_ring *, u16); |
473 | struct ixgbe_ring *rx_ring, | ||
474 | int cleaned_count); | ||
475 | extern void ixgbe_write_eitr(struct ixgbe_q_vector *); | 520 | extern void ixgbe_write_eitr(struct ixgbe_q_vector *); |
476 | extern int ethtool_ioctl(struct ifreq *ifr); | 521 | extern int ethtool_ioctl(struct ifreq *ifr); |
522 | extern u8 ixgbe_dcb_txq_to_tc(struct ixgbe_adapter *adapter, u8 index); | ||
477 | extern s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw); | 523 | extern s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw); |
478 | extern s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 pballoc); | 524 | extern s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 pballoc); |
479 | extern s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 pballoc); | 525 | extern s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 pballoc); |
@@ -498,6 +544,10 @@ extern s32 ixgbe_atr_set_flex_byte_82599(struct ixgbe_atr_input *input, | |||
498 | u16 flex_byte); | 544 | u16 flex_byte); |
499 | extern s32 ixgbe_atr_set_l4type_82599(struct ixgbe_atr_input *input, | 545 | extern s32 ixgbe_atr_set_l4type_82599(struct ixgbe_atr_input *input, |
500 | u8 l4type); | 546 | u8 l4type); |
547 | extern void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter, | ||
548 | struct ixgbe_ring *ring); | ||
549 | extern void ixgbe_clear_rscctl(struct ixgbe_adapter *adapter, | ||
550 | struct ixgbe_ring *ring); | ||
501 | extern void ixgbe_set_rx_mode(struct net_device *netdev); | 551 | extern void ixgbe_set_rx_mode(struct net_device *netdev); |
502 | #ifdef IXGBE_FCOE | 552 | #ifdef IXGBE_FCOE |
503 | extern void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter); | 553 | extern void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter); |
diff --git a/drivers/net/ixgbe/ixgbe_82598.c b/drivers/net/ixgbe/ixgbe_82598.c index 9c02d6014cc4..d0f1d9d2c416 100644 --- a/drivers/net/ixgbe/ixgbe_82598.c +++ b/drivers/net/ixgbe/ixgbe_82598.c | |||
@@ -38,9 +38,6 @@ | |||
38 | #define IXGBE_82598_MC_TBL_SIZE 128 | 38 | #define IXGBE_82598_MC_TBL_SIZE 128 |
39 | #define IXGBE_82598_VFT_TBL_SIZE 128 | 39 | #define IXGBE_82598_VFT_TBL_SIZE 128 |
40 | 40 | ||
41 | static s32 ixgbe_get_copper_link_capabilities_82598(struct ixgbe_hw *hw, | ||
42 | ixgbe_link_speed *speed, | ||
43 | bool *autoneg); | ||
44 | static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw, | 41 | static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw, |
45 | ixgbe_link_speed speed, | 42 | ixgbe_link_speed speed, |
46 | bool autoneg, | 43 | bool autoneg, |
@@ -156,7 +153,7 @@ static s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw) | |||
156 | if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) { | 153 | if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) { |
157 | mac->ops.setup_link = &ixgbe_setup_copper_link_82598; | 154 | mac->ops.setup_link = &ixgbe_setup_copper_link_82598; |
158 | mac->ops.get_link_capabilities = | 155 | mac->ops.get_link_capabilities = |
159 | &ixgbe_get_copper_link_capabilities_82598; | 156 | &ixgbe_get_copper_link_capabilities_generic; |
160 | } | 157 | } |
161 | 158 | ||
162 | switch (hw->phy.type) { | 159 | switch (hw->phy.type) { |
@@ -274,37 +271,6 @@ static s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw, | |||
274 | } | 271 | } |
275 | 272 | ||
276 | /** | 273 | /** |
277 | * ixgbe_get_copper_link_capabilities_82598 - Determines link capabilities | ||
278 | * @hw: pointer to hardware structure | ||
279 | * @speed: pointer to link speed | ||
280 | * @autoneg: boolean auto-negotiation value | ||
281 | * | ||
282 | * Determines the link capabilities by reading the AUTOC register. | ||
283 | **/ | ||
284 | static s32 ixgbe_get_copper_link_capabilities_82598(struct ixgbe_hw *hw, | ||
285 | ixgbe_link_speed *speed, | ||
286 | bool *autoneg) | ||
287 | { | ||
288 | s32 status = IXGBE_ERR_LINK_SETUP; | ||
289 | u16 speed_ability; | ||
290 | |||
291 | *speed = 0; | ||
292 | *autoneg = true; | ||
293 | |||
294 | status = hw->phy.ops.read_reg(hw, MDIO_SPEED, MDIO_MMD_PMAPMD, | ||
295 | &speed_ability); | ||
296 | |||
297 | if (status == 0) { | ||
298 | if (speed_ability & MDIO_SPEED_10G) | ||
299 | *speed |= IXGBE_LINK_SPEED_10GB_FULL; | ||
300 | if (speed_ability & MDIO_PMA_SPEED_1000) | ||
301 | *speed |= IXGBE_LINK_SPEED_1GB_FULL; | ||
302 | } | ||
303 | |||
304 | return status; | ||
305 | } | ||
306 | |||
307 | /** | ||
308 | * ixgbe_get_media_type_82598 - Determines media type | 274 | * ixgbe_get_media_type_82598 - Determines media type |
309 | * @hw: pointer to hardware structure | 275 | * @hw: pointer to hardware structure |
310 | * | 276 | * |
@@ -357,6 +323,7 @@ static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw, s32 packetbuf_num) | |||
357 | u32 fctrl_reg; | 323 | u32 fctrl_reg; |
358 | u32 rmcs_reg; | 324 | u32 rmcs_reg; |
359 | u32 reg; | 325 | u32 reg; |
326 | u32 rx_pba_size; | ||
360 | u32 link_speed = 0; | 327 | u32 link_speed = 0; |
361 | bool link_up; | 328 | bool link_up; |
362 | 329 | ||
@@ -459,16 +426,18 @@ static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw, s32 packetbuf_num) | |||
459 | 426 | ||
460 | /* Set up and enable Rx high/low water mark thresholds, enable XON. */ | 427 | /* Set up and enable Rx high/low water mark thresholds, enable XON. */ |
461 | if (hw->fc.current_mode & ixgbe_fc_tx_pause) { | 428 | if (hw->fc.current_mode & ixgbe_fc_tx_pause) { |
462 | if (hw->fc.send_xon) { | 429 | rx_pba_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(packetbuf_num)); |
463 | IXGBE_WRITE_REG(hw, IXGBE_FCRTL(packetbuf_num), | 430 | rx_pba_size >>= IXGBE_RXPBSIZE_SHIFT; |
464 | (hw->fc.low_water | IXGBE_FCRTL_XONE)); | 431 | |
465 | } else { | 432 | reg = (rx_pba_size - hw->fc.low_water) << 6; |
466 | IXGBE_WRITE_REG(hw, IXGBE_FCRTL(packetbuf_num), | 433 | if (hw->fc.send_xon) |
467 | hw->fc.low_water); | 434 | reg |= IXGBE_FCRTL_XONE; |
468 | } | 435 | IXGBE_WRITE_REG(hw, IXGBE_FCRTL(packetbuf_num), reg); |
436 | |||
437 | reg = (rx_pba_size - hw->fc.high_water) << 10; | ||
438 | reg |= IXGBE_FCRTH_FCEN; | ||
469 | 439 | ||
470 | IXGBE_WRITE_REG(hw, IXGBE_FCRTH(packetbuf_num), | 440 | IXGBE_WRITE_REG(hw, IXGBE_FCRTH(packetbuf_num), reg); |
471 | (hw->fc.high_water | IXGBE_FCRTH_FCEN)); | ||
472 | } | 441 | } |
473 | 442 | ||
474 | /* Configure pause time (2 TCs per register) */ | 443 | /* Configure pause time (2 TCs per register) */ |
@@ -1222,6 +1191,7 @@ static struct ixgbe_mac_operations mac_ops_82598 = { | |||
1222 | static struct ixgbe_eeprom_operations eeprom_ops_82598 = { | 1191 | static struct ixgbe_eeprom_operations eeprom_ops_82598 = { |
1223 | .init_params = &ixgbe_init_eeprom_params_generic, | 1192 | .init_params = &ixgbe_init_eeprom_params_generic, |
1224 | .read = &ixgbe_read_eerd_generic, | 1193 | .read = &ixgbe_read_eerd_generic, |
1194 | .calc_checksum = &ixgbe_calc_eeprom_checksum_generic, | ||
1225 | .validate_checksum = &ixgbe_validate_eeprom_checksum_generic, | 1195 | .validate_checksum = &ixgbe_validate_eeprom_checksum_generic, |
1226 | .update_checksum = &ixgbe_update_eeprom_checksum_generic, | 1196 | .update_checksum = &ixgbe_update_eeprom_checksum_generic, |
1227 | }; | 1197 | }; |
diff --git a/drivers/net/ixgbe/ixgbe_82599.c b/drivers/net/ixgbe/ixgbe_82599.c index 0bd8fbb5bfd0..e34643eef162 100644 --- a/drivers/net/ixgbe/ixgbe_82599.c +++ b/drivers/net/ixgbe/ixgbe_82599.c | |||
@@ -56,9 +56,6 @@ static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw, | |||
56 | ixgbe_link_speed speed, | 56 | ixgbe_link_speed speed, |
57 | bool autoneg, | 57 | bool autoneg, |
58 | bool autoneg_wait_to_complete); | 58 | bool autoneg_wait_to_complete); |
59 | static s32 ixgbe_get_copper_link_capabilities_82599(struct ixgbe_hw *hw, | ||
60 | ixgbe_link_speed *speed, | ||
61 | bool *autoneg); | ||
62 | static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw, | 59 | static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw, |
63 | ixgbe_link_speed speed, | 60 | ixgbe_link_speed speed, |
64 | bool autoneg, | 61 | bool autoneg, |
@@ -174,7 +171,7 @@ static s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw) | |||
174 | if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) { | 171 | if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) { |
175 | mac->ops.setup_link = &ixgbe_setup_copper_link_82599; | 172 | mac->ops.setup_link = &ixgbe_setup_copper_link_82599; |
176 | mac->ops.get_link_capabilities = | 173 | mac->ops.get_link_capabilities = |
177 | &ixgbe_get_copper_link_capabilities_82599; | 174 | &ixgbe_get_copper_link_capabilities_generic; |
178 | } | 175 | } |
179 | 176 | ||
180 | /* Set necessary function pointers based on phy type */ | 177 | /* Set necessary function pointers based on phy type */ |
@@ -184,6 +181,10 @@ static s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw) | |||
184 | phy->ops.get_firmware_version = | 181 | phy->ops.get_firmware_version = |
185 | &ixgbe_get_phy_firmware_version_tnx; | 182 | &ixgbe_get_phy_firmware_version_tnx; |
186 | break; | 183 | break; |
184 | case ixgbe_phy_aq: | ||
185 | phy->ops.get_firmware_version = | ||
186 | &ixgbe_get_phy_firmware_version_generic; | ||
187 | break; | ||
187 | default: | 188 | default: |
188 | break; | 189 | break; |
189 | } | 190 | } |
@@ -290,37 +291,6 @@ out: | |||
290 | } | 291 | } |
291 | 292 | ||
292 | /** | 293 | /** |
293 | * ixgbe_get_copper_link_capabilities_82599 - Determines link capabilities | ||
294 | * @hw: pointer to hardware structure | ||
295 | * @speed: pointer to link speed | ||
296 | * @autoneg: boolean auto-negotiation value | ||
297 | * | ||
298 | * Determines the link capabilities by reading the AUTOC register. | ||
299 | **/ | ||
300 | static s32 ixgbe_get_copper_link_capabilities_82599(struct ixgbe_hw *hw, | ||
301 | ixgbe_link_speed *speed, | ||
302 | bool *autoneg) | ||
303 | { | ||
304 | s32 status = IXGBE_ERR_LINK_SETUP; | ||
305 | u16 speed_ability; | ||
306 | |||
307 | *speed = 0; | ||
308 | *autoneg = true; | ||
309 | |||
310 | status = hw->phy.ops.read_reg(hw, MDIO_SPEED, MDIO_MMD_PMAPMD, | ||
311 | &speed_ability); | ||
312 | |||
313 | if (status == 0) { | ||
314 | if (speed_ability & MDIO_SPEED_10G) | ||
315 | *speed |= IXGBE_LINK_SPEED_10GB_FULL; | ||
316 | if (speed_ability & MDIO_PMA_SPEED_1000) | ||
317 | *speed |= IXGBE_LINK_SPEED_1GB_FULL; | ||
318 | } | ||
319 | |||
320 | return status; | ||
321 | } | ||
322 | |||
323 | /** | ||
324 | * ixgbe_get_media_type_82599 - Get media type | 294 | * ixgbe_get_media_type_82599 - Get media type |
325 | * @hw: pointer to hardware structure | 295 | * @hw: pointer to hardware structure |
326 | * | 296 | * |
@@ -332,7 +302,8 @@ static enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw) | |||
332 | 302 | ||
333 | /* Detect if there is a copper PHY attached. */ | 303 | /* Detect if there is a copper PHY attached. */ |
334 | if (hw->phy.type == ixgbe_phy_cu_unknown || | 304 | if (hw->phy.type == ixgbe_phy_cu_unknown || |
335 | hw->phy.type == ixgbe_phy_tn) { | 305 | hw->phy.type == ixgbe_phy_tn || |
306 | hw->phy.type == ixgbe_phy_aq) { | ||
336 | media_type = ixgbe_media_type_copper; | 307 | media_type = ixgbe_media_type_copper; |
337 | goto out; | 308 | goto out; |
338 | } | 309 | } |
@@ -1924,6 +1895,7 @@ static u32 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw) | |||
1924 | hw->phy.ops.identify(hw); | 1895 | hw->phy.ops.identify(hw); |
1925 | 1896 | ||
1926 | if (hw->phy.type == ixgbe_phy_tn || | 1897 | if (hw->phy.type == ixgbe_phy_tn || |
1898 | hw->phy.type == ixgbe_phy_aq || | ||
1927 | hw->phy.type == ixgbe_phy_cu_unknown) { | 1899 | hw->phy.type == ixgbe_phy_cu_unknown) { |
1928 | hw->phy.ops.read_reg(hw, MDIO_PMA_EXTABLE, MDIO_MMD_PMAPMD, | 1900 | hw->phy.ops.read_reg(hw, MDIO_PMA_EXTABLE, MDIO_MMD_PMAPMD, |
1929 | &ext_ability); | 1901 | &ext_ability); |
@@ -2125,51 +2097,6 @@ fw_version_out: | |||
2125 | return status; | 2097 | return status; |
2126 | } | 2098 | } |
2127 | 2099 | ||
2128 | /** | ||
2129 | * ixgbe_get_wwn_prefix_82599 - Get alternative WWNN/WWPN prefix from | ||
2130 | * the EEPROM | ||
2131 | * @hw: pointer to hardware structure | ||
2132 | * @wwnn_prefix: the alternative WWNN prefix | ||
2133 | * @wwpn_prefix: the alternative WWPN prefix | ||
2134 | * | ||
2135 | * This function will read the EEPROM from the alternative SAN MAC address | ||
2136 | * block to check the support for the alternative WWNN/WWPN prefix support. | ||
2137 | **/ | ||
2138 | static s32 ixgbe_get_wwn_prefix_82599(struct ixgbe_hw *hw, u16 *wwnn_prefix, | ||
2139 | u16 *wwpn_prefix) | ||
2140 | { | ||
2141 | u16 offset, caps; | ||
2142 | u16 alt_san_mac_blk_offset; | ||
2143 | |||
2144 | /* clear output first */ | ||
2145 | *wwnn_prefix = 0xFFFF; | ||
2146 | *wwpn_prefix = 0xFFFF; | ||
2147 | |||
2148 | /* check if alternative SAN MAC is supported */ | ||
2149 | hw->eeprom.ops.read(hw, IXGBE_ALT_SAN_MAC_ADDR_BLK_PTR, | ||
2150 | &alt_san_mac_blk_offset); | ||
2151 | |||
2152 | if ((alt_san_mac_blk_offset == 0) || | ||
2153 | (alt_san_mac_blk_offset == 0xFFFF)) | ||
2154 | goto wwn_prefix_out; | ||
2155 | |||
2156 | /* check capability in alternative san mac address block */ | ||
2157 | offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_CAPS_OFFSET; | ||
2158 | hw->eeprom.ops.read(hw, offset, &caps); | ||
2159 | if (!(caps & IXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN)) | ||
2160 | goto wwn_prefix_out; | ||
2161 | |||
2162 | /* get the corresponding prefix for WWNN/WWPN */ | ||
2163 | offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWNN_OFFSET; | ||
2164 | hw->eeprom.ops.read(hw, offset, wwnn_prefix); | ||
2165 | |||
2166 | offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWPN_OFFSET; | ||
2167 | hw->eeprom.ops.read(hw, offset, wwpn_prefix); | ||
2168 | |||
2169 | wwn_prefix_out: | ||
2170 | return 0; | ||
2171 | } | ||
2172 | |||
2173 | static struct ixgbe_mac_operations mac_ops_82599 = { | 2100 | static struct ixgbe_mac_operations mac_ops_82599 = { |
2174 | .init_hw = &ixgbe_init_hw_generic, | 2101 | .init_hw = &ixgbe_init_hw_generic, |
2175 | .reset_hw = &ixgbe_reset_hw_82599, | 2102 | .reset_hw = &ixgbe_reset_hw_82599, |
@@ -2181,7 +2108,7 @@ static struct ixgbe_mac_operations mac_ops_82599 = { | |||
2181 | .get_mac_addr = &ixgbe_get_mac_addr_generic, | 2108 | .get_mac_addr = &ixgbe_get_mac_addr_generic, |
2182 | .get_san_mac_addr = &ixgbe_get_san_mac_addr_generic, | 2109 | .get_san_mac_addr = &ixgbe_get_san_mac_addr_generic, |
2183 | .get_device_caps = &ixgbe_get_device_caps_82599, | 2110 | .get_device_caps = &ixgbe_get_device_caps_82599, |
2184 | .get_wwn_prefix = &ixgbe_get_wwn_prefix_82599, | 2111 | .get_wwn_prefix = &ixgbe_get_wwn_prefix_generic, |
2185 | .stop_adapter = &ixgbe_stop_adapter_generic, | 2112 | .stop_adapter = &ixgbe_stop_adapter_generic, |
2186 | .get_bus_info = &ixgbe_get_bus_info_generic, | 2113 | .get_bus_info = &ixgbe_get_bus_info_generic, |
2187 | .set_lan_id = &ixgbe_set_lan_id_multi_port_pcie, | 2114 | .set_lan_id = &ixgbe_set_lan_id_multi_port_pcie, |
@@ -2214,6 +2141,7 @@ static struct ixgbe_eeprom_operations eeprom_ops_82599 = { | |||
2214 | .init_params = &ixgbe_init_eeprom_params_generic, | 2141 | .init_params = &ixgbe_init_eeprom_params_generic, |
2215 | .read = &ixgbe_read_eerd_generic, | 2142 | .read = &ixgbe_read_eerd_generic, |
2216 | .write = &ixgbe_write_eeprom_generic, | 2143 | .write = &ixgbe_write_eeprom_generic, |
2144 | .calc_checksum = &ixgbe_calc_eeprom_checksum_generic, | ||
2217 | .validate_checksum = &ixgbe_validate_eeprom_checksum_generic, | 2145 | .validate_checksum = &ixgbe_validate_eeprom_checksum_generic, |
2218 | .update_checksum = &ixgbe_update_eeprom_checksum_generic, | 2146 | .update_checksum = &ixgbe_update_eeprom_checksum_generic, |
2219 | }; | 2147 | }; |
@@ -2240,5 +2168,5 @@ struct ixgbe_info ixgbe_82599_info = { | |||
2240 | .mac_ops = &mac_ops_82599, | 2168 | .mac_ops = &mac_ops_82599, |
2241 | .eeprom_ops = &eeprom_ops_82599, | 2169 | .eeprom_ops = &eeprom_ops_82599, |
2242 | .phy_ops = &phy_ops_82599, | 2170 | .phy_ops = &phy_ops_82599, |
2243 | .mbx_ops = &mbx_ops_82599, | 2171 | .mbx_ops = &mbx_ops_generic, |
2244 | }; | 2172 | }; |
diff --git a/drivers/net/ixgbe/ixgbe_common.c b/drivers/net/ixgbe/ixgbe_common.c index e3eca1316389..56052570cac5 100644 --- a/drivers/net/ixgbe/ixgbe_common.c +++ b/drivers/net/ixgbe/ixgbe_common.c | |||
@@ -45,14 +45,12 @@ static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count); | |||
45 | static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec); | 45 | static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec); |
46 | static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec); | 46 | static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec); |
47 | static void ixgbe_release_eeprom(struct ixgbe_hw *hw); | 47 | static void ixgbe_release_eeprom(struct ixgbe_hw *hw); |
48 | static u16 ixgbe_calc_eeprom_checksum(struct ixgbe_hw *hw); | ||
49 | 48 | ||
50 | static void ixgbe_enable_rar(struct ixgbe_hw *hw, u32 index); | 49 | static void ixgbe_enable_rar(struct ixgbe_hw *hw, u32 index); |
51 | static void ixgbe_disable_rar(struct ixgbe_hw *hw, u32 index); | 50 | static void ixgbe_disable_rar(struct ixgbe_hw *hw, u32 index); |
52 | static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr); | 51 | static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr); |
53 | static void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq); | 52 | static void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq); |
54 | static s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num); | 53 | static s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num); |
55 | static s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg); | ||
56 | 54 | ||
57 | /** | 55 | /** |
58 | * ixgbe_start_hw_generic - Prepare hardware for Tx/Rx | 56 | * ixgbe_start_hw_generic - Prepare hardware for Tx/Rx |
@@ -638,7 +636,7 @@ out: | |||
638 | * Polls the status bit (bit 1) of the EERD or EEWR to determine when the | 636 | * Polls the status bit (bit 1) of the EERD or EEWR to determine when the |
639 | * read or write is done respectively. | 637 | * read or write is done respectively. |
640 | **/ | 638 | **/ |
641 | static s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg) | 639 | s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg) |
642 | { | 640 | { |
643 | u32 i; | 641 | u32 i; |
644 | u32 reg; | 642 | u32 reg; |
@@ -1009,7 +1007,7 @@ static void ixgbe_release_eeprom(struct ixgbe_hw *hw) | |||
1009 | * ixgbe_calc_eeprom_checksum - Calculates and returns the checksum | 1007 | * ixgbe_calc_eeprom_checksum - Calculates and returns the checksum |
1010 | * @hw: pointer to hardware structure | 1008 | * @hw: pointer to hardware structure |
1011 | **/ | 1009 | **/ |
1012 | static u16 ixgbe_calc_eeprom_checksum(struct ixgbe_hw *hw) | 1010 | u16 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw) |
1013 | { | 1011 | { |
1014 | u16 i; | 1012 | u16 i; |
1015 | u16 j; | 1013 | u16 j; |
@@ -1072,7 +1070,7 @@ s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw, | |||
1072 | status = hw->eeprom.ops.read(hw, 0, &checksum); | 1070 | status = hw->eeprom.ops.read(hw, 0, &checksum); |
1073 | 1071 | ||
1074 | if (status == 0) { | 1072 | if (status == 0) { |
1075 | checksum = ixgbe_calc_eeprom_checksum(hw); | 1073 | checksum = hw->eeprom.ops.calc_checksum(hw); |
1076 | 1074 | ||
1077 | hw->eeprom.ops.read(hw, IXGBE_EEPROM_CHECKSUM, &read_checksum); | 1075 | hw->eeprom.ops.read(hw, IXGBE_EEPROM_CHECKSUM, &read_checksum); |
1078 | 1076 | ||
@@ -1110,7 +1108,7 @@ s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw) | |||
1110 | status = hw->eeprom.ops.read(hw, 0, &checksum); | 1108 | status = hw->eeprom.ops.read(hw, 0, &checksum); |
1111 | 1109 | ||
1112 | if (status == 0) { | 1110 | if (status == 0) { |
1113 | checksum = ixgbe_calc_eeprom_checksum(hw); | 1111 | checksum = hw->eeprom.ops.calc_checksum(hw); |
1114 | status = hw->eeprom.ops.write(hw, IXGBE_EEPROM_CHECKSUM, | 1112 | status = hw->eeprom.ops.write(hw, IXGBE_EEPROM_CHECKSUM, |
1115 | checksum); | 1113 | checksum); |
1116 | } else { | 1114 | } else { |
@@ -1595,6 +1593,7 @@ s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw, s32 packetbuf_num) | |||
1595 | u32 mflcn_reg, fccfg_reg; | 1593 | u32 mflcn_reg, fccfg_reg; |
1596 | u32 reg; | 1594 | u32 reg; |
1597 | u32 rx_pba_size; | 1595 | u32 rx_pba_size; |
1596 | u32 fcrtl, fcrth; | ||
1598 | 1597 | ||
1599 | #ifdef CONFIG_DCB | 1598 | #ifdef CONFIG_DCB |
1600 | if (hw->fc.requested_mode == ixgbe_fc_pfc) | 1599 | if (hw->fc.requested_mode == ixgbe_fc_pfc) |
@@ -1671,41 +1670,21 @@ s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw, s32 packetbuf_num) | |||
1671 | IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn_reg); | 1670 | IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn_reg); |
1672 | IXGBE_WRITE_REG(hw, IXGBE_FCCFG, fccfg_reg); | 1671 | IXGBE_WRITE_REG(hw, IXGBE_FCCFG, fccfg_reg); |
1673 | 1672 | ||
1674 | reg = IXGBE_READ_REG(hw, IXGBE_MTQC); | 1673 | rx_pba_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(packetbuf_num)); |
1675 | /* Thresholds are different for link flow control when in DCB mode */ | 1674 | rx_pba_size >>= IXGBE_RXPBSIZE_SHIFT; |
1676 | if (reg & IXGBE_MTQC_RT_ENA) { | ||
1677 | rx_pba_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(packetbuf_num)); | ||
1678 | 1675 | ||
1679 | /* Always disable XON for LFC when in DCB mode */ | 1676 | fcrth = (rx_pba_size - hw->fc.high_water) << 10; |
1680 | reg = (rx_pba_size >> 5) & 0xFFE0; | 1677 | fcrtl = (rx_pba_size - hw->fc.low_water) << 10; |
1681 | IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(packetbuf_num), reg); | ||
1682 | 1678 | ||
1683 | reg = (rx_pba_size >> 2) & 0xFFE0; | 1679 | if (hw->fc.current_mode & ixgbe_fc_tx_pause) { |
1684 | if (hw->fc.current_mode & ixgbe_fc_tx_pause) | 1680 | fcrth |= IXGBE_FCRTH_FCEN; |
1685 | reg |= IXGBE_FCRTH_FCEN; | 1681 | if (hw->fc.send_xon) |
1686 | IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(packetbuf_num), reg); | 1682 | fcrtl |= IXGBE_FCRTL_XONE; |
1687 | } else { | ||
1688 | /* | ||
1689 | * Set up and enable Rx high/low water mark thresholds, | ||
1690 | * enable XON. | ||
1691 | */ | ||
1692 | if (hw->fc.current_mode & ixgbe_fc_tx_pause) { | ||
1693 | if (hw->fc.send_xon) { | ||
1694 | IXGBE_WRITE_REG(hw, | ||
1695 | IXGBE_FCRTL_82599(packetbuf_num), | ||
1696 | (hw->fc.low_water | | ||
1697 | IXGBE_FCRTL_XONE)); | ||
1698 | } else { | ||
1699 | IXGBE_WRITE_REG(hw, | ||
1700 | IXGBE_FCRTL_82599(packetbuf_num), | ||
1701 | hw->fc.low_water); | ||
1702 | } | ||
1703 | |||
1704 | IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(packetbuf_num), | ||
1705 | (hw->fc.high_water | IXGBE_FCRTH_FCEN)); | ||
1706 | } | ||
1707 | } | 1683 | } |
1708 | 1684 | ||
1685 | IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(packetbuf_num), fcrth); | ||
1686 | IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(packetbuf_num), fcrtl); | ||
1687 | |||
1709 | /* Configure pause time (2 TCs per register) */ | 1688 | /* Configure pause time (2 TCs per register) */ |
1710 | reg = IXGBE_READ_REG(hw, IXGBE_FCTTV(packetbuf_num / 2)); | 1689 | reg = IXGBE_READ_REG(hw, IXGBE_FCTTV(packetbuf_num / 2)); |
1711 | if ((packetbuf_num & 1) == 0) | 1690 | if ((packetbuf_num & 1) == 0) |
@@ -2705,3 +2684,48 @@ s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed, | |||
2705 | 2684 | ||
2706 | return 0; | 2685 | return 0; |
2707 | } | 2686 | } |
2687 | |||
2688 | /** | ||
2689 | * ixgbe_get_wwn_prefix_generic Get alternative WWNN/WWPN prefix from | ||
2690 | * the EEPROM | ||
2691 | * @hw: pointer to hardware structure | ||
2692 | * @wwnn_prefix: the alternative WWNN prefix | ||
2693 | * @wwpn_prefix: the alternative WWPN prefix | ||
2694 | * | ||
2695 | * This function will read the EEPROM from the alternative SAN MAC address | ||
2696 | * block to check the support for the alternative WWNN/WWPN prefix support. | ||
2697 | **/ | ||
2698 | s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix, | ||
2699 | u16 *wwpn_prefix) | ||
2700 | { | ||
2701 | u16 offset, caps; | ||
2702 | u16 alt_san_mac_blk_offset; | ||
2703 | |||
2704 | /* clear output first */ | ||
2705 | *wwnn_prefix = 0xFFFF; | ||
2706 | *wwpn_prefix = 0xFFFF; | ||
2707 | |||
2708 | /* check if alternative SAN MAC is supported */ | ||
2709 | hw->eeprom.ops.read(hw, IXGBE_ALT_SAN_MAC_ADDR_BLK_PTR, | ||
2710 | &alt_san_mac_blk_offset); | ||
2711 | |||
2712 | if ((alt_san_mac_blk_offset == 0) || | ||
2713 | (alt_san_mac_blk_offset == 0xFFFF)) | ||
2714 | goto wwn_prefix_out; | ||
2715 | |||
2716 | /* check capability in alternative san mac address block */ | ||
2717 | offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_CAPS_OFFSET; | ||
2718 | hw->eeprom.ops.read(hw, offset, &caps); | ||
2719 | if (!(caps & IXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN)) | ||
2720 | goto wwn_prefix_out; | ||
2721 | |||
2722 | /* get the corresponding prefix for WWNN/WWPN */ | ||
2723 | offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWNN_OFFSET; | ||
2724 | hw->eeprom.ops.read(hw, offset, wwnn_prefix); | ||
2725 | |||
2726 | offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWPN_OFFSET; | ||
2727 | hw->eeprom.ops.read(hw, offset, wwpn_prefix); | ||
2728 | |||
2729 | wwn_prefix_out: | ||
2730 | return 0; | ||
2731 | } | ||
diff --git a/drivers/net/ixgbe/ixgbe_common.h b/drivers/net/ixgbe/ixgbe_common.h index 424c223437dc..341ca514a281 100644 --- a/drivers/net/ixgbe/ixgbe_common.h +++ b/drivers/net/ixgbe/ixgbe_common.h | |||
@@ -49,9 +49,11 @@ s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data); | |||
49 | s32 ixgbe_read_eerd_generic(struct ixgbe_hw *hw, u16 offset, u16 *data); | 49 | s32 ixgbe_read_eerd_generic(struct ixgbe_hw *hw, u16 offset, u16 *data); |
50 | s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset, | 50 | s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset, |
51 | u16 *data); | 51 | u16 *data); |
52 | u16 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw); | ||
52 | s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw, | 53 | s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw, |
53 | u16 *checksum_val); | 54 | u16 *checksum_val); |
54 | s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw); | 55 | s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw); |
56 | s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg); | ||
55 | 57 | ||
56 | s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq, | 58 | s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq, |
57 | u32 enable_addr); | 59 | u32 enable_addr); |
@@ -81,7 +83,8 @@ s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw); | |||
81 | s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, | 83 | s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, |
82 | ixgbe_link_speed *speed, | 84 | ixgbe_link_speed *speed, |
83 | bool *link_up, bool link_up_wait_to_complete); | 85 | bool *link_up, bool link_up_wait_to_complete); |
84 | 86 | s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix, | |
87 | u16 *wwpn_prefix); | ||
85 | s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index); | 88 | s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index); |
86 | s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index); | 89 | s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index); |
87 | 90 | ||
diff --git a/drivers/net/ixgbe/ixgbe_dcb.c b/drivers/net/ixgbe/ixgbe_dcb.c index 0d44c6470ca3..d16c260c1f50 100644 --- a/drivers/net/ixgbe/ixgbe_dcb.c +++ b/drivers/net/ixgbe/ixgbe_dcb.c | |||
@@ -42,7 +42,8 @@ | |||
42 | * It should be called only after the rules are checked by | 42 | * It should be called only after the rules are checked by |
43 | * ixgbe_dcb_check_config(). | 43 | * ixgbe_dcb_check_config(). |
44 | */ | 44 | */ |
45 | s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_dcb_config *dcb_config, | 45 | s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_hw *hw, |
46 | struct ixgbe_dcb_config *dcb_config, | ||
46 | int max_frame, u8 direction) | 47 | int max_frame, u8 direction) |
47 | { | 48 | { |
48 | struct tc_bw_alloc *p; | 49 | struct tc_bw_alloc *p; |
@@ -124,7 +125,8 @@ s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_dcb_config *dcb_config, | |||
124 | * credit may not be enough to send out a TSO | 125 | * credit may not be enough to send out a TSO |
125 | * packet in descriptor plane arbitration. | 126 | * packet in descriptor plane arbitration. |
126 | */ | 127 | */ |
127 | if (credit_max && | 128 | if ((hw->mac.type == ixgbe_mac_82598EB) && |
129 | credit_max && | ||
128 | (credit_max < MINIMUM_CREDIT_FOR_TSO)) | 130 | (credit_max < MINIMUM_CREDIT_FOR_TSO)) |
129 | credit_max = MINIMUM_CREDIT_FOR_TSO; | 131 | credit_max = MINIMUM_CREDIT_FOR_TSO; |
130 | 132 | ||
@@ -150,10 +152,17 @@ s32 ixgbe_dcb_hw_config(struct ixgbe_hw *hw, | |||
150 | struct ixgbe_dcb_config *dcb_config) | 152 | struct ixgbe_dcb_config *dcb_config) |
151 | { | 153 | { |
152 | s32 ret = 0; | 154 | s32 ret = 0; |
153 | if (hw->mac.type == ixgbe_mac_82598EB) | 155 | switch (hw->mac.type) { |
156 | case ixgbe_mac_82598EB: | ||
154 | ret = ixgbe_dcb_hw_config_82598(hw, dcb_config); | 157 | ret = ixgbe_dcb_hw_config_82598(hw, dcb_config); |
155 | else if (hw->mac.type == ixgbe_mac_82599EB) | 158 | break; |
159 | case ixgbe_mac_82599EB: | ||
160 | case ixgbe_mac_X540: | ||
156 | ret = ixgbe_dcb_hw_config_82599(hw, dcb_config); | 161 | ret = ixgbe_dcb_hw_config_82599(hw, dcb_config); |
162 | break; | ||
163 | default: | ||
164 | break; | ||
165 | } | ||
157 | return ret; | 166 | return ret; |
158 | } | 167 | } |
159 | 168 | ||
diff --git a/drivers/net/ixgbe/ixgbe_dcb.h b/drivers/net/ixgbe/ixgbe_dcb.h index 0208a87b129e..1cfe38ee1644 100644 --- a/drivers/net/ixgbe/ixgbe_dcb.h +++ b/drivers/net/ixgbe/ixgbe_dcb.h | |||
@@ -150,7 +150,8 @@ struct ixgbe_dcb_config { | |||
150 | /* DCB driver APIs */ | 150 | /* DCB driver APIs */ |
151 | 151 | ||
152 | /* DCB credits calculation */ | 152 | /* DCB credits calculation */ |
153 | s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_dcb_config *, int, u8); | 153 | s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_hw *, |
154 | struct ixgbe_dcb_config *, int, u8); | ||
154 | 155 | ||
155 | /* DCB hw initialization */ | 156 | /* DCB hw initialization */ |
156 | s32 ixgbe_dcb_hw_config(struct ixgbe_hw *, struct ixgbe_dcb_config *); | 157 | s32 ixgbe_dcb_hw_config(struct ixgbe_hw *, struct ixgbe_dcb_config *); |
diff --git a/drivers/net/ixgbe/ixgbe_dcb_82598.c b/drivers/net/ixgbe/ixgbe_dcb_82598.c index 50288bcadc59..9a5e89c12e05 100644 --- a/drivers/net/ixgbe/ixgbe_dcb_82598.c +++ b/drivers/net/ixgbe/ixgbe_dcb_82598.c | |||
@@ -256,21 +256,17 @@ s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *hw, | |||
256 | * for each traffic class. | 256 | * for each traffic class. |
257 | */ | 257 | */ |
258 | for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { | 258 | for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { |
259 | if (dcb_config->rx_pba_cfg == pba_equal) { | 259 | rx_pba_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)); |
260 | rx_pba_size = IXGBE_RXPBSIZE_64KB; | 260 | rx_pba_size >>= IXGBE_RXPBSIZE_SHIFT; |
261 | } else { | 261 | reg = (rx_pba_size - hw->fc.low_water) << 10; |
262 | rx_pba_size = (i < 4) ? IXGBE_RXPBSIZE_80KB | ||
263 | : IXGBE_RXPBSIZE_48KB; | ||
264 | } | ||
265 | 262 | ||
266 | reg = ((rx_pba_size >> 5) & 0xFFF0); | ||
267 | if (dcb_config->tc_config[i].dcb_pfc == pfc_enabled_tx || | 263 | if (dcb_config->tc_config[i].dcb_pfc == pfc_enabled_tx || |
268 | dcb_config->tc_config[i].dcb_pfc == pfc_enabled_full) | 264 | dcb_config->tc_config[i].dcb_pfc == pfc_enabled_full) |
269 | reg |= IXGBE_FCRTL_XONE; | 265 | reg |= IXGBE_FCRTL_XONE; |
270 | 266 | ||
271 | IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), reg); | 267 | IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), reg); |
272 | 268 | ||
273 | reg = ((rx_pba_size >> 2) & 0xFFF0); | 269 | reg = (rx_pba_size - hw->fc.high_water) << 10; |
274 | if (dcb_config->tc_config[i].dcb_pfc == pfc_enabled_tx || | 270 | if (dcb_config->tc_config[i].dcb_pfc == pfc_enabled_tx || |
275 | dcb_config->tc_config[i].dcb_pfc == pfc_enabled_full) | 271 | dcb_config->tc_config[i].dcb_pfc == pfc_enabled_full) |
276 | reg |= IXGBE_FCRTH_FCEN; | 272 | reg |= IXGBE_FCRTH_FCEN; |
diff --git a/drivers/net/ixgbe/ixgbe_dcb_82599.c b/drivers/net/ixgbe/ixgbe_dcb_82599.c index 05f224715073..374e1f74d0f5 100644 --- a/drivers/net/ixgbe/ixgbe_dcb_82599.c +++ b/drivers/net/ixgbe/ixgbe_dcb_82599.c | |||
@@ -251,19 +251,17 @@ s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, | |||
251 | 251 | ||
252 | /* Configure PFC Tx thresholds per TC */ | 252 | /* Configure PFC Tx thresholds per TC */ |
253 | for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { | 253 | for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { |
254 | if (dcb_config->rx_pba_cfg == pba_equal) | 254 | rx_pba_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)); |
255 | rx_pba_size = IXGBE_RXPBSIZE_64KB; | 255 | rx_pba_size >>= IXGBE_RXPBSIZE_SHIFT; |
256 | else | 256 | |
257 | rx_pba_size = (i < 4) ? IXGBE_RXPBSIZE_80KB | 257 | reg = (rx_pba_size - hw->fc.low_water) << 10; |
258 | : IXGBE_RXPBSIZE_48KB; | ||
259 | 258 | ||
260 | reg = ((rx_pba_size >> 5) & 0xFFE0); | ||
261 | if (dcb_config->tc_config[i].dcb_pfc == pfc_enabled_full || | 259 | if (dcb_config->tc_config[i].dcb_pfc == pfc_enabled_full || |
262 | dcb_config->tc_config[i].dcb_pfc == pfc_enabled_tx) | 260 | dcb_config->tc_config[i].dcb_pfc == pfc_enabled_tx) |
263 | reg |= IXGBE_FCRTL_XONE; | 261 | reg |= IXGBE_FCRTL_XONE; |
264 | IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), reg); | 262 | IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), reg); |
265 | 263 | ||
266 | reg = ((rx_pba_size >> 2) & 0xFFE0); | 264 | reg = (rx_pba_size - hw->fc.high_water) << 10; |
267 | if (dcb_config->tc_config[i].dcb_pfc == pfc_enabled_full || | 265 | if (dcb_config->tc_config[i].dcb_pfc == pfc_enabled_full || |
268 | dcb_config->tc_config[i].dcb_pfc == pfc_enabled_tx) | 266 | dcb_config->tc_config[i].dcb_pfc == pfc_enabled_tx) |
269 | reg |= IXGBE_FCRTH_FCEN; | 267 | reg |= IXGBE_FCRTH_FCEN; |
diff --git a/drivers/net/ixgbe/ixgbe_dcb_nl.c b/drivers/net/ixgbe/ixgbe_dcb_nl.c index b53b465e24af..bf566e8a455e 100644 --- a/drivers/net/ixgbe/ixgbe_dcb_nl.c +++ b/drivers/net/ixgbe/ixgbe_dcb_nl.c | |||
@@ -130,15 +130,21 @@ static u8 ixgbe_dcbnl_set_state(struct net_device *netdev, u8 state) | |||
130 | netdev->netdev_ops->ndo_stop(netdev); | 130 | netdev->netdev_ops->ndo_stop(netdev); |
131 | ixgbe_clear_interrupt_scheme(adapter); | 131 | ixgbe_clear_interrupt_scheme(adapter); |
132 | 132 | ||
133 | if (adapter->hw.mac.type == ixgbe_mac_82598EB) { | 133 | adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED; |
134 | switch (adapter->hw.mac.type) { | ||
135 | case ixgbe_mac_82598EB: | ||
134 | adapter->last_lfc_mode = adapter->hw.fc.current_mode; | 136 | adapter->last_lfc_mode = adapter->hw.fc.current_mode; |
135 | adapter->hw.fc.requested_mode = ixgbe_fc_none; | 137 | adapter->hw.fc.requested_mode = ixgbe_fc_none; |
136 | } | 138 | break; |
137 | adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED; | 139 | case ixgbe_mac_82599EB: |
138 | if (adapter->hw.mac.type == ixgbe_mac_82599EB) { | 140 | case ixgbe_mac_X540: |
139 | adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; | 141 | adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; |
140 | adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE; | 142 | adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE; |
143 | break; | ||
144 | default: | ||
145 | break; | ||
141 | } | 146 | } |
147 | |||
142 | adapter->flags |= IXGBE_FLAG_DCB_ENABLED; | 148 | adapter->flags |= IXGBE_FLAG_DCB_ENABLED; |
143 | ixgbe_init_interrupt_scheme(adapter); | 149 | ixgbe_init_interrupt_scheme(adapter); |
144 | if (netif_running(netdev)) | 150 | if (netif_running(netdev)) |
@@ -155,8 +161,14 @@ static u8 ixgbe_dcbnl_set_state(struct net_device *netdev, u8 state) | |||
155 | adapter->dcb_cfg.pfc_mode_enable = false; | 161 | adapter->dcb_cfg.pfc_mode_enable = false; |
156 | adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED; | 162 | adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED; |
157 | adapter->flags |= IXGBE_FLAG_RSS_ENABLED; | 163 | adapter->flags |= IXGBE_FLAG_RSS_ENABLED; |
158 | if (adapter->hw.mac.type == ixgbe_mac_82599EB) | 164 | switch (adapter->hw.mac.type) { |
165 | case ixgbe_mac_82599EB: | ||
166 | case ixgbe_mac_X540: | ||
159 | adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE; | 167 | adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE; |
168 | break; | ||
169 | default: | ||
170 | break; | ||
171 | } | ||
160 | 172 | ||
161 | ixgbe_init_interrupt_scheme(adapter); | 173 | ixgbe_init_interrupt_scheme(adapter); |
162 | if (netif_running(netdev)) | 174 | if (netif_running(netdev)) |
@@ -178,9 +190,14 @@ static void ixgbe_dcbnl_get_perm_hw_addr(struct net_device *netdev, | |||
178 | for (i = 0; i < netdev->addr_len; i++) | 190 | for (i = 0; i < netdev->addr_len; i++) |
179 | perm_addr[i] = adapter->hw.mac.perm_addr[i]; | 191 | perm_addr[i] = adapter->hw.mac.perm_addr[i]; |
180 | 192 | ||
181 | if (adapter->hw.mac.type == ixgbe_mac_82599EB) { | 193 | switch (adapter->hw.mac.type) { |
194 | case ixgbe_mac_82599EB: | ||
195 | case ixgbe_mac_X540: | ||
182 | for (j = 0; j < netdev->addr_len; j++, i++) | 196 | for (j = 0; j < netdev->addr_len; j++, i++) |
183 | perm_addr[i] = adapter->hw.mac.san_addr[j]; | 197 | perm_addr[i] = adapter->hw.mac.san_addr[j]; |
198 | break; | ||
199 | default: | ||
200 | break; | ||
184 | } | 201 | } |
185 | } | 202 | } |
186 | 203 | ||
@@ -366,15 +383,29 @@ static u8 ixgbe_dcbnl_set_all(struct net_device *netdev) | |||
366 | } | 383 | } |
367 | 384 | ||
368 | if (adapter->dcb_cfg.pfc_mode_enable) { | 385 | if (adapter->dcb_cfg.pfc_mode_enable) { |
369 | if ((adapter->hw.mac.type != ixgbe_mac_82598EB) && | 386 | switch (adapter->hw.mac.type) { |
370 | (adapter->hw.fc.current_mode != ixgbe_fc_pfc)) | 387 | case ixgbe_mac_82599EB: |
371 | adapter->last_lfc_mode = adapter->hw.fc.current_mode; | 388 | case ixgbe_mac_X540: |
389 | if (adapter->hw.fc.current_mode != ixgbe_fc_pfc) | ||
390 | adapter->last_lfc_mode = | ||
391 | adapter->hw.fc.current_mode; | ||
392 | break; | ||
393 | default: | ||
394 | break; | ||
395 | } | ||
372 | adapter->hw.fc.requested_mode = ixgbe_fc_pfc; | 396 | adapter->hw.fc.requested_mode = ixgbe_fc_pfc; |
373 | } else { | 397 | } else { |
374 | if (adapter->hw.mac.type != ixgbe_mac_82598EB) | 398 | switch (adapter->hw.mac.type) { |
375 | adapter->hw.fc.requested_mode = adapter->last_lfc_mode; | 399 | case ixgbe_mac_82598EB: |
376 | else | ||
377 | adapter->hw.fc.requested_mode = ixgbe_fc_none; | 400 | adapter->hw.fc.requested_mode = ixgbe_fc_none; |
401 | break; | ||
402 | case ixgbe_mac_82599EB: | ||
403 | case ixgbe_mac_X540: | ||
404 | adapter->hw.fc.requested_mode = adapter->last_lfc_mode; | ||
405 | break; | ||
406 | default: | ||
407 | break; | ||
408 | } | ||
378 | } | 409 | } |
379 | 410 | ||
380 | if (adapter->dcb_set_bitmap & BIT_RESETLINK) { | 411 | if (adapter->dcb_set_bitmap & BIT_RESETLINK) { |
diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c index 3dc731c22ff2..f9b58394fbb6 100644 --- a/drivers/net/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ixgbe/ixgbe_ethtool.c | |||
@@ -185,6 +185,16 @@ static int ixgbe_get_settings(struct net_device *netdev, | |||
185 | ADVERTISED_FIBRE); | 185 | ADVERTISED_FIBRE); |
186 | ecmd->port = PORT_FIBRE; | 186 | ecmd->port = PORT_FIBRE; |
187 | ecmd->autoneg = AUTONEG_DISABLE; | 187 | ecmd->autoneg = AUTONEG_DISABLE; |
188 | } else if ((hw->device_id == IXGBE_DEV_ID_82599_COMBO_BACKPLANE) || | ||
189 | (hw->device_id == IXGBE_DEV_ID_82599_KX4_MEZZ)) { | ||
190 | ecmd->supported |= (SUPPORTED_1000baseT_Full | | ||
191 | SUPPORTED_Autoneg | | ||
192 | SUPPORTED_FIBRE); | ||
193 | ecmd->advertising = (ADVERTISED_10000baseT_Full | | ||
194 | ADVERTISED_1000baseT_Full | | ||
195 | ADVERTISED_Autoneg | | ||
196 | ADVERTISED_FIBRE); | ||
197 | ecmd->port = PORT_FIBRE; | ||
188 | } else { | 198 | } else { |
189 | ecmd->supported |= (SUPPORTED_1000baseT_Full | | 199 | ecmd->supported |= (SUPPORTED_1000baseT_Full | |
190 | SUPPORTED_FIBRE); | 200 | SUPPORTED_FIBRE); |
@@ -204,6 +214,7 @@ static int ixgbe_get_settings(struct net_device *netdev, | |||
204 | /* Get PHY type */ | 214 | /* Get PHY type */ |
205 | switch (adapter->hw.phy.type) { | 215 | switch (adapter->hw.phy.type) { |
206 | case ixgbe_phy_tn: | 216 | case ixgbe_phy_tn: |
217 | case ixgbe_phy_aq: | ||
207 | case ixgbe_phy_cu_unknown: | 218 | case ixgbe_phy_cu_unknown: |
208 | /* Copper 10G-BASET */ | 219 | /* Copper 10G-BASET */ |
209 | ecmd->port = PORT_TP; | 220 | ecmd->port = PORT_TP; |
@@ -332,13 +343,6 @@ static void ixgbe_get_pauseparam(struct net_device *netdev, | |||
332 | else | 343 | else |
333 | pause->autoneg = 1; | 344 | pause->autoneg = 1; |
334 | 345 | ||
335 | #ifdef CONFIG_DCB | ||
336 | if (hw->fc.current_mode == ixgbe_fc_pfc) { | ||
337 | pause->rx_pause = 0; | ||
338 | pause->tx_pause = 0; | ||
339 | } | ||
340 | |||
341 | #endif | ||
342 | if (hw->fc.current_mode == ixgbe_fc_rx_pause) { | 346 | if (hw->fc.current_mode == ixgbe_fc_rx_pause) { |
343 | pause->rx_pause = 1; | 347 | pause->rx_pause = 1; |
344 | } else if (hw->fc.current_mode == ixgbe_fc_tx_pause) { | 348 | } else if (hw->fc.current_mode == ixgbe_fc_tx_pause) { |
@@ -346,6 +350,11 @@ static void ixgbe_get_pauseparam(struct net_device *netdev, | |||
346 | } else if (hw->fc.current_mode == ixgbe_fc_full) { | 350 | } else if (hw->fc.current_mode == ixgbe_fc_full) { |
347 | pause->rx_pause = 1; | 351 | pause->rx_pause = 1; |
348 | pause->tx_pause = 1; | 352 | pause->tx_pause = 1; |
353 | #ifdef CONFIG_DCB | ||
354 | } else if (hw->fc.current_mode == ixgbe_fc_pfc) { | ||
355 | pause->rx_pause = 0; | ||
356 | pause->tx_pause = 0; | ||
357 | #endif | ||
349 | } | 358 | } |
350 | } | 359 | } |
351 | 360 | ||
@@ -363,7 +372,6 @@ static int ixgbe_set_pauseparam(struct net_device *netdev, | |||
363 | return -EINVAL; | 372 | return -EINVAL; |
364 | 373 | ||
365 | #endif | 374 | #endif |
366 | |||
367 | fc = hw->fc; | 375 | fc = hw->fc; |
368 | 376 | ||
369 | if (pause->autoneg != AUTONEG_ENABLE) | 377 | if (pause->autoneg != AUTONEG_ENABLE) |
@@ -412,11 +420,6 @@ static int ixgbe_set_rx_csum(struct net_device *netdev, u32 data) | |||
412 | else | 420 | else |
413 | adapter->flags &= ~IXGBE_FLAG_RX_CSUM_ENABLED; | 421 | adapter->flags &= ~IXGBE_FLAG_RX_CSUM_ENABLED; |
414 | 422 | ||
415 | if (netif_running(netdev)) | ||
416 | ixgbe_reinit_locked(adapter); | ||
417 | else | ||
418 | ixgbe_reset(adapter); | ||
419 | |||
420 | return 0; | 423 | return 0; |
421 | } | 424 | } |
422 | 425 | ||
@@ -428,16 +431,21 @@ static u32 ixgbe_get_tx_csum(struct net_device *netdev) | |||
428 | static int ixgbe_set_tx_csum(struct net_device *netdev, u32 data) | 431 | static int ixgbe_set_tx_csum(struct net_device *netdev, u32 data) |
429 | { | 432 | { |
430 | struct ixgbe_adapter *adapter = netdev_priv(netdev); | 433 | struct ixgbe_adapter *adapter = netdev_priv(netdev); |
434 | u32 feature_list; | ||
431 | 435 | ||
432 | if (data) { | 436 | feature_list = (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM); |
433 | netdev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM); | 437 | switch (adapter->hw.mac.type) { |
434 | if (adapter->hw.mac.type == ixgbe_mac_82599EB) | 438 | case ixgbe_mac_82599EB: |
435 | netdev->features |= NETIF_F_SCTP_CSUM; | 439 | case ixgbe_mac_X540: |
436 | } else { | 440 | feature_list |= NETIF_F_SCTP_CSUM; |
437 | netdev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM); | 441 | break; |
438 | if (adapter->hw.mac.type == ixgbe_mac_82599EB) | 442 | default: |
439 | netdev->features &= ~NETIF_F_SCTP_CSUM; | 443 | break; |
440 | } | 444 | } |
445 | if (data) | ||
446 | netdev->features |= feature_list; | ||
447 | else | ||
448 | netdev->features &= ~feature_list; | ||
441 | 449 | ||
442 | return 0; | 450 | return 0; |
443 | } | 451 | } |
@@ -530,10 +538,20 @@ static void ixgbe_get_regs(struct net_device *netdev, | |||
530 | regs_buff[32] = IXGBE_READ_REG(hw, IXGBE_FCTTV(1)); | 538 | regs_buff[32] = IXGBE_READ_REG(hw, IXGBE_FCTTV(1)); |
531 | regs_buff[33] = IXGBE_READ_REG(hw, IXGBE_FCTTV(2)); | 539 | regs_buff[33] = IXGBE_READ_REG(hw, IXGBE_FCTTV(2)); |
532 | regs_buff[34] = IXGBE_READ_REG(hw, IXGBE_FCTTV(3)); | 540 | regs_buff[34] = IXGBE_READ_REG(hw, IXGBE_FCTTV(3)); |
533 | for (i = 0; i < 8; i++) | 541 | for (i = 0; i < 8; i++) { |
534 | regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTL(i)); | 542 | switch (hw->mac.type) { |
535 | for (i = 0; i < 8; i++) | 543 | case ixgbe_mac_82598EB: |
536 | regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTH(i)); | 544 | regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTL(i)); |
545 | regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTH(i)); | ||
546 | break; | ||
547 | case ixgbe_mac_82599EB: | ||
548 | regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTL_82599(i)); | ||
549 | regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTH_82599(i)); | ||
550 | break; | ||
551 | default: | ||
552 | break; | ||
553 | } | ||
554 | } | ||
537 | regs_buff[51] = IXGBE_READ_REG(hw, IXGBE_FCRTV); | 555 | regs_buff[51] = IXGBE_READ_REG(hw, IXGBE_FCRTV); |
538 | regs_buff[52] = IXGBE_READ_REG(hw, IXGBE_TFCS); | 556 | regs_buff[52] = IXGBE_READ_REG(hw, IXGBE_TFCS); |
539 | 557 | ||
@@ -615,6 +633,7 @@ static void ixgbe_get_regs(struct net_device *netdev, | |||
615 | regs_buff[827] = IXGBE_READ_REG(hw, IXGBE_WUPM); | 633 | regs_buff[827] = IXGBE_READ_REG(hw, IXGBE_WUPM); |
616 | regs_buff[828] = IXGBE_READ_REG(hw, IXGBE_FHFT(0)); | 634 | regs_buff[828] = IXGBE_READ_REG(hw, IXGBE_FHFT(0)); |
617 | 635 | ||
636 | /* DCB */ | ||
618 | regs_buff[829] = IXGBE_READ_REG(hw, IXGBE_RMCS); | 637 | regs_buff[829] = IXGBE_READ_REG(hw, IXGBE_RMCS); |
619 | regs_buff[830] = IXGBE_READ_REG(hw, IXGBE_DPMCS); | 638 | regs_buff[830] = IXGBE_READ_REG(hw, IXGBE_DPMCS); |
620 | regs_buff[831] = IXGBE_READ_REG(hw, IXGBE_PDPMCS); | 639 | regs_buff[831] = IXGBE_READ_REG(hw, IXGBE_PDPMCS); |
@@ -905,13 +924,11 @@ static int ixgbe_set_ringparam(struct net_device *netdev, | |||
905 | memcpy(&temp_tx_ring[i], adapter->tx_ring[i], | 924 | memcpy(&temp_tx_ring[i], adapter->tx_ring[i], |
906 | sizeof(struct ixgbe_ring)); | 925 | sizeof(struct ixgbe_ring)); |
907 | temp_tx_ring[i].count = new_tx_count; | 926 | temp_tx_ring[i].count = new_tx_count; |
908 | err = ixgbe_setup_tx_resources(adapter, | 927 | err = ixgbe_setup_tx_resources(&temp_tx_ring[i]); |
909 | &temp_tx_ring[i]); | ||
910 | if (err) { | 928 | if (err) { |
911 | while (i) { | 929 | while (i) { |
912 | i--; | 930 | i--; |
913 | ixgbe_free_tx_resources(adapter, | 931 | ixgbe_free_tx_resources(&temp_tx_ring[i]); |
914 | &temp_tx_ring[i]); | ||
915 | } | 932 | } |
916 | goto clear_reset; | 933 | goto clear_reset; |
917 | } | 934 | } |
@@ -930,13 +947,11 @@ static int ixgbe_set_ringparam(struct net_device *netdev, | |||
930 | memcpy(&temp_rx_ring[i], adapter->rx_ring[i], | 947 | memcpy(&temp_rx_ring[i], adapter->rx_ring[i], |
931 | sizeof(struct ixgbe_ring)); | 948 | sizeof(struct ixgbe_ring)); |
932 | temp_rx_ring[i].count = new_rx_count; | 949 | temp_rx_ring[i].count = new_rx_count; |
933 | err = ixgbe_setup_rx_resources(adapter, | 950 | err = ixgbe_setup_rx_resources(&temp_rx_ring[i]); |
934 | &temp_rx_ring[i]); | ||
935 | if (err) { | 951 | if (err) { |
936 | while (i) { | 952 | while (i) { |
937 | i--; | 953 | i--; |
938 | ixgbe_free_rx_resources(adapter, | 954 | ixgbe_free_rx_resources(&temp_rx_ring[i]); |
939 | &temp_rx_ring[i]); | ||
940 | } | 955 | } |
941 | goto err_setup; | 956 | goto err_setup; |
942 | } | 957 | } |
@@ -951,8 +966,7 @@ static int ixgbe_set_ringparam(struct net_device *netdev, | |||
951 | /* tx */ | 966 | /* tx */ |
952 | if (new_tx_count != adapter->tx_ring_count) { | 967 | if (new_tx_count != adapter->tx_ring_count) { |
953 | for (i = 0; i < adapter->num_tx_queues; i++) { | 968 | for (i = 0; i < adapter->num_tx_queues; i++) { |
954 | ixgbe_free_tx_resources(adapter, | 969 | ixgbe_free_tx_resources(adapter->tx_ring[i]); |
955 | adapter->tx_ring[i]); | ||
956 | memcpy(adapter->tx_ring[i], &temp_tx_ring[i], | 970 | memcpy(adapter->tx_ring[i], &temp_tx_ring[i], |
957 | sizeof(struct ixgbe_ring)); | 971 | sizeof(struct ixgbe_ring)); |
958 | } | 972 | } |
@@ -962,8 +976,7 @@ static int ixgbe_set_ringparam(struct net_device *netdev, | |||
962 | /* rx */ | 976 | /* rx */ |
963 | if (new_rx_count != adapter->rx_ring_count) { | 977 | if (new_rx_count != adapter->rx_ring_count) { |
964 | for (i = 0; i < adapter->num_rx_queues; i++) { | 978 | for (i = 0; i < adapter->num_rx_queues; i++) { |
965 | ixgbe_free_rx_resources(adapter, | 979 | ixgbe_free_rx_resources(adapter->rx_ring[i]); |
966 | adapter->rx_ring[i]); | ||
967 | memcpy(adapter->rx_ring[i], &temp_rx_ring[i], | 980 | memcpy(adapter->rx_ring[i], &temp_rx_ring[i], |
968 | sizeof(struct ixgbe_ring)); | 981 | sizeof(struct ixgbe_ring)); |
969 | } | 982 | } |
@@ -1237,12 +1250,20 @@ static int ixgbe_reg_test(struct ixgbe_adapter *adapter, u64 *data) | |||
1237 | u32 value, before, after; | 1250 | u32 value, before, after; |
1238 | u32 i, toggle; | 1251 | u32 i, toggle; |
1239 | 1252 | ||
1240 | if (adapter->hw.mac.type == ixgbe_mac_82599EB) { | 1253 | switch (adapter->hw.mac.type) { |
1241 | toggle = 0x7FFFF30F; | 1254 | case ixgbe_mac_82598EB: |
1242 | test = reg_test_82599; | ||
1243 | } else { | ||
1244 | toggle = 0x7FFFF3FF; | 1255 | toggle = 0x7FFFF3FF; |
1245 | test = reg_test_82598; | 1256 | test = reg_test_82598; |
1257 | break; | ||
1258 | case ixgbe_mac_82599EB: | ||
1259 | case ixgbe_mac_X540: | ||
1260 | toggle = 0x7FFFF30F; | ||
1261 | test = reg_test_82599; | ||
1262 | break; | ||
1263 | default: | ||
1264 | *data = 1; | ||
1265 | return 1; | ||
1266 | break; | ||
1246 | } | 1267 | } |
1247 | 1268 | ||
1248 | /* | 1269 | /* |
@@ -1460,16 +1481,21 @@ static void ixgbe_free_desc_rings(struct ixgbe_adapter *adapter) | |||
1460 | reg_ctl &= ~IXGBE_TXDCTL_ENABLE; | 1481 | reg_ctl &= ~IXGBE_TXDCTL_ENABLE; |
1461 | IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(tx_ring->reg_idx), reg_ctl); | 1482 | IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(tx_ring->reg_idx), reg_ctl); |
1462 | 1483 | ||
1463 | if (hw->mac.type == ixgbe_mac_82599EB) { | 1484 | switch (hw->mac.type) { |
1485 | case ixgbe_mac_82599EB: | ||
1486 | case ixgbe_mac_X540: | ||
1464 | reg_ctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); | 1487 | reg_ctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); |
1465 | reg_ctl &= ~IXGBE_DMATXCTL_TE; | 1488 | reg_ctl &= ~IXGBE_DMATXCTL_TE; |
1466 | IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg_ctl); | 1489 | IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg_ctl); |
1490 | break; | ||
1491 | default: | ||
1492 | break; | ||
1467 | } | 1493 | } |
1468 | 1494 | ||
1469 | ixgbe_reset(adapter); | 1495 | ixgbe_reset(adapter); |
1470 | 1496 | ||
1471 | ixgbe_free_tx_resources(adapter, &adapter->test_tx_ring); | 1497 | ixgbe_free_tx_resources(&adapter->test_tx_ring); |
1472 | ixgbe_free_rx_resources(adapter, &adapter->test_rx_ring); | 1498 | ixgbe_free_rx_resources(&adapter->test_rx_ring); |
1473 | } | 1499 | } |
1474 | 1500 | ||
1475 | static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter) | 1501 | static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter) |
@@ -1483,17 +1509,24 @@ static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter) | |||
1483 | /* Setup Tx descriptor ring and Tx buffers */ | 1509 | /* Setup Tx descriptor ring and Tx buffers */ |
1484 | tx_ring->count = IXGBE_DEFAULT_TXD; | 1510 | tx_ring->count = IXGBE_DEFAULT_TXD; |
1485 | tx_ring->queue_index = 0; | 1511 | tx_ring->queue_index = 0; |
1512 | tx_ring->dev = &adapter->pdev->dev; | ||
1513 | tx_ring->netdev = adapter->netdev; | ||
1486 | tx_ring->reg_idx = adapter->tx_ring[0]->reg_idx; | 1514 | tx_ring->reg_idx = adapter->tx_ring[0]->reg_idx; |
1487 | tx_ring->numa_node = adapter->node; | 1515 | tx_ring->numa_node = adapter->node; |
1488 | 1516 | ||
1489 | err = ixgbe_setup_tx_resources(adapter, tx_ring); | 1517 | err = ixgbe_setup_tx_resources(tx_ring); |
1490 | if (err) | 1518 | if (err) |
1491 | return 1; | 1519 | return 1; |
1492 | 1520 | ||
1493 | if (adapter->hw.mac.type == ixgbe_mac_82599EB) { | 1521 | switch (adapter->hw.mac.type) { |
1522 | case ixgbe_mac_82599EB: | ||
1523 | case ixgbe_mac_X540: | ||
1494 | reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_DMATXCTL); | 1524 | reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_DMATXCTL); |
1495 | reg_data |= IXGBE_DMATXCTL_TE; | 1525 | reg_data |= IXGBE_DMATXCTL_TE; |
1496 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_DMATXCTL, reg_data); | 1526 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_DMATXCTL, reg_data); |
1527 | break; | ||
1528 | default: | ||
1529 | break; | ||
1497 | } | 1530 | } |
1498 | 1531 | ||
1499 | ixgbe_configure_tx_ring(adapter, tx_ring); | 1532 | ixgbe_configure_tx_ring(adapter, tx_ring); |
@@ -1501,11 +1534,13 @@ static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter) | |||
1501 | /* Setup Rx Descriptor ring and Rx buffers */ | 1534 | /* Setup Rx Descriptor ring and Rx buffers */ |
1502 | rx_ring->count = IXGBE_DEFAULT_RXD; | 1535 | rx_ring->count = IXGBE_DEFAULT_RXD; |
1503 | rx_ring->queue_index = 0; | 1536 | rx_ring->queue_index = 0; |
1537 | rx_ring->dev = &adapter->pdev->dev; | ||
1538 | rx_ring->netdev = adapter->netdev; | ||
1504 | rx_ring->reg_idx = adapter->rx_ring[0]->reg_idx; | 1539 | rx_ring->reg_idx = adapter->rx_ring[0]->reg_idx; |
1505 | rx_ring->rx_buf_len = IXGBE_RXBUFFER_2048; | 1540 | rx_ring->rx_buf_len = IXGBE_RXBUFFER_2048; |
1506 | rx_ring->numa_node = adapter->node; | 1541 | rx_ring->numa_node = adapter->node; |
1507 | 1542 | ||
1508 | err = ixgbe_setup_rx_resources(adapter, rx_ring); | 1543 | err = ixgbe_setup_rx_resources(rx_ring); |
1509 | if (err) { | 1544 | if (err) { |
1510 | ret_val = 4; | 1545 | ret_val = 4; |
1511 | goto err_nomem; | 1546 | goto err_nomem; |
@@ -1604,8 +1639,7 @@ static int ixgbe_check_lbtest_frame(struct sk_buff *skb, | |||
1604 | return 13; | 1639 | return 13; |
1605 | } | 1640 | } |
1606 | 1641 | ||
1607 | static u16 ixgbe_clean_test_rings(struct ixgbe_adapter *adapter, | 1642 | static u16 ixgbe_clean_test_rings(struct ixgbe_ring *rx_ring, |
1608 | struct ixgbe_ring *rx_ring, | ||
1609 | struct ixgbe_ring *tx_ring, | 1643 | struct ixgbe_ring *tx_ring, |
1610 | unsigned int size) | 1644 | unsigned int size) |
1611 | { | 1645 | { |
@@ -1627,7 +1661,7 @@ static u16 ixgbe_clean_test_rings(struct ixgbe_adapter *adapter, | |||
1627 | rx_buffer_info = &rx_ring->rx_buffer_info[rx_ntc]; | 1661 | rx_buffer_info = &rx_ring->rx_buffer_info[rx_ntc]; |
1628 | 1662 | ||
1629 | /* unmap Rx buffer, will be remapped by alloc_rx_buffers */ | 1663 | /* unmap Rx buffer, will be remapped by alloc_rx_buffers */ |
1630 | dma_unmap_single(&adapter->pdev->dev, | 1664 | dma_unmap_single(rx_ring->dev, |
1631 | rx_buffer_info->dma, | 1665 | rx_buffer_info->dma, |
1632 | bufsz, | 1666 | bufsz, |
1633 | DMA_FROM_DEVICE); | 1667 | DMA_FROM_DEVICE); |
@@ -1639,7 +1673,7 @@ static u16 ixgbe_clean_test_rings(struct ixgbe_adapter *adapter, | |||
1639 | 1673 | ||
1640 | /* unmap buffer on Tx side */ | 1674 | /* unmap buffer on Tx side */ |
1641 | tx_buffer_info = &tx_ring->tx_buffer_info[tx_ntc]; | 1675 | tx_buffer_info = &tx_ring->tx_buffer_info[tx_ntc]; |
1642 | ixgbe_unmap_and_free_tx_resource(adapter, tx_buffer_info); | 1676 | ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer_info); |
1643 | 1677 | ||
1644 | /* increment Rx/Tx next to clean counters */ | 1678 | /* increment Rx/Tx next to clean counters */ |
1645 | rx_ntc++; | 1679 | rx_ntc++; |
@@ -1655,7 +1689,7 @@ static u16 ixgbe_clean_test_rings(struct ixgbe_adapter *adapter, | |||
1655 | } | 1689 | } |
1656 | 1690 | ||
1657 | /* re-map buffers to ring, store next to clean values */ | 1691 | /* re-map buffers to ring, store next to clean values */ |
1658 | ixgbe_alloc_rx_buffers(adapter, rx_ring, count); | 1692 | ixgbe_alloc_rx_buffers(rx_ring, count); |
1659 | rx_ring->next_to_clean = rx_ntc; | 1693 | rx_ring->next_to_clean = rx_ntc; |
1660 | tx_ring->next_to_clean = tx_ntc; | 1694 | tx_ring->next_to_clean = tx_ntc; |
1661 | 1695 | ||
@@ -1699,7 +1733,6 @@ static int ixgbe_run_loopback_test(struct ixgbe_adapter *adapter) | |||
1699 | for (i = 0; i < 64; i++) { | 1733 | for (i = 0; i < 64; i++) { |
1700 | skb_get(skb); | 1734 | skb_get(skb); |
1701 | tx_ret_val = ixgbe_xmit_frame_ring(skb, | 1735 | tx_ret_val = ixgbe_xmit_frame_ring(skb, |
1702 | adapter->netdev, | ||
1703 | adapter, | 1736 | adapter, |
1704 | tx_ring); | 1737 | tx_ring); |
1705 | if (tx_ret_val == NETDEV_TX_OK) | 1738 | if (tx_ret_val == NETDEV_TX_OK) |
@@ -1714,8 +1747,7 @@ static int ixgbe_run_loopback_test(struct ixgbe_adapter *adapter) | |||
1714 | /* allow 200 milliseconds for packets to go from Tx to Rx */ | 1747 | /* allow 200 milliseconds for packets to go from Tx to Rx */ |
1715 | msleep(200); | 1748 | msleep(200); |
1716 | 1749 | ||
1717 | good_cnt = ixgbe_clean_test_rings(adapter, rx_ring, | 1750 | good_cnt = ixgbe_clean_test_rings(rx_ring, tx_ring, size); |
1718 | tx_ring, size); | ||
1719 | if (good_cnt != 64) { | 1751 | if (good_cnt != 64) { |
1720 | ret_val = 13; | 1752 | ret_val = 13; |
1721 | break; | 1753 | break; |
@@ -1848,6 +1880,13 @@ static int ixgbe_wol_exclusion(struct ixgbe_adapter *adapter, | |||
1848 | int retval = 1; | 1880 | int retval = 1; |
1849 | 1881 | ||
1850 | switch(hw->device_id) { | 1882 | switch(hw->device_id) { |
1883 | case IXGBE_DEV_ID_82599_COMBO_BACKPLANE: | ||
1884 | /* All except this subdevice support WOL */ | ||
1885 | if (hw->subsystem_device_id == | ||
1886 | IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ) { | ||
1887 | wol->supported = 0; | ||
1888 | break; | ||
1889 | } | ||
1851 | case IXGBE_DEV_ID_82599_KX4: | 1890 | case IXGBE_DEV_ID_82599_KX4: |
1852 | retval = 0; | 1891 | retval = 0; |
1853 | break; | 1892 | break; |
@@ -1985,6 +2024,41 @@ static int ixgbe_get_coalesce(struct net_device *netdev, | |||
1985 | return 0; | 2024 | return 0; |
1986 | } | 2025 | } |
1987 | 2026 | ||
2027 | /* | ||
2028 | * this function must be called before setting the new value of | ||
2029 | * rx_itr_setting | ||
2030 | */ | ||
2031 | static bool ixgbe_update_rsc(struct ixgbe_adapter *adapter, | ||
2032 | struct ethtool_coalesce *ec) | ||
2033 | { | ||
2034 | struct net_device *netdev = adapter->netdev; | ||
2035 | |||
2036 | if (!(adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE)) | ||
2037 | return false; | ||
2038 | |||
2039 | /* if interrupt rate is too high then disable RSC */ | ||
2040 | if (ec->rx_coalesce_usecs != 1 && | ||
2041 | ec->rx_coalesce_usecs <= 1000000/IXGBE_MAX_RSC_INT_RATE) { | ||
2042 | if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) { | ||
2043 | e_info(probe, "rx-usecs set too low, " | ||
2044 | "disabling RSC\n"); | ||
2045 | adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED; | ||
2046 | return true; | ||
2047 | } | ||
2048 | } else { | ||
2049 | /* check the feature flag value and enable RSC if necessary */ | ||
2050 | if ((netdev->features & NETIF_F_LRO) && | ||
2051 | !(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)) { | ||
2052 | e_info(probe, "rx-usecs set to %d, " | ||
2053 | "re-enabling RSC\n", | ||
2054 | ec->rx_coalesce_usecs); | ||
2055 | adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED; | ||
2056 | return true; | ||
2057 | } | ||
2058 | } | ||
2059 | return false; | ||
2060 | } | ||
2061 | |||
1988 | static int ixgbe_set_coalesce(struct net_device *netdev, | 2062 | static int ixgbe_set_coalesce(struct net_device *netdev, |
1989 | struct ethtool_coalesce *ec) | 2063 | struct ethtool_coalesce *ec) |
1990 | { | 2064 | { |
@@ -2002,17 +2076,14 @@ static int ixgbe_set_coalesce(struct net_device *netdev, | |||
2002 | adapter->tx_ring[0]->work_limit = ec->tx_max_coalesced_frames_irq; | 2076 | adapter->tx_ring[0]->work_limit = ec->tx_max_coalesced_frames_irq; |
2003 | 2077 | ||
2004 | if (ec->rx_coalesce_usecs > 1) { | 2078 | if (ec->rx_coalesce_usecs > 1) { |
2005 | u32 max_int; | ||
2006 | if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) | ||
2007 | max_int = IXGBE_MAX_RSC_INT_RATE; | ||
2008 | else | ||
2009 | max_int = IXGBE_MAX_INT_RATE; | ||
2010 | |||
2011 | /* check the limits */ | 2079 | /* check the limits */ |
2012 | if ((1000000/ec->rx_coalesce_usecs > max_int) || | 2080 | if ((1000000/ec->rx_coalesce_usecs > IXGBE_MAX_INT_RATE) || |
2013 | (1000000/ec->rx_coalesce_usecs < IXGBE_MIN_INT_RATE)) | 2081 | (1000000/ec->rx_coalesce_usecs < IXGBE_MIN_INT_RATE)) |
2014 | return -EINVAL; | 2082 | return -EINVAL; |
2015 | 2083 | ||
2084 | /* check the old value and enable RSC if necessary */ | ||
2085 | need_reset = ixgbe_update_rsc(adapter, ec); | ||
2086 | |||
2016 | /* store the value in ints/second */ | 2087 | /* store the value in ints/second */ |
2017 | adapter->rx_eitr_param = 1000000/ec->rx_coalesce_usecs; | 2088 | adapter->rx_eitr_param = 1000000/ec->rx_coalesce_usecs; |
2018 | 2089 | ||
@@ -2021,32 +2092,21 @@ static int ixgbe_set_coalesce(struct net_device *netdev, | |||
2021 | /* clear the lower bit as its used for dynamic state */ | 2092 | /* clear the lower bit as its used for dynamic state */ |
2022 | adapter->rx_itr_setting &= ~1; | 2093 | adapter->rx_itr_setting &= ~1; |
2023 | } else if (ec->rx_coalesce_usecs == 1) { | 2094 | } else if (ec->rx_coalesce_usecs == 1) { |
2095 | /* check the old value and enable RSC if necessary */ | ||
2096 | need_reset = ixgbe_update_rsc(adapter, ec); | ||
2097 | |||
2024 | /* 1 means dynamic mode */ | 2098 | /* 1 means dynamic mode */ |
2025 | adapter->rx_eitr_param = 20000; | 2099 | adapter->rx_eitr_param = 20000; |
2026 | adapter->rx_itr_setting = 1; | 2100 | adapter->rx_itr_setting = 1; |
2027 | } else { | 2101 | } else { |
2102 | /* check the old value and enable RSC if necessary */ | ||
2103 | need_reset = ixgbe_update_rsc(adapter, ec); | ||
2028 | /* | 2104 | /* |
2029 | * any other value means disable eitr, which is best | 2105 | * any other value means disable eitr, which is best |
2030 | * served by setting the interrupt rate very high | 2106 | * served by setting the interrupt rate very high |
2031 | */ | 2107 | */ |
2032 | adapter->rx_eitr_param = IXGBE_MAX_INT_RATE; | 2108 | adapter->rx_eitr_param = IXGBE_MAX_INT_RATE; |
2033 | adapter->rx_itr_setting = 0; | 2109 | adapter->rx_itr_setting = 0; |
2034 | |||
2035 | /* | ||
2036 | * if hardware RSC is enabled, disable it when | ||
2037 | * setting low latency mode, to avoid errata, assuming | ||
2038 | * that when the user set low latency mode they want | ||
2039 | * it at the cost of anything else | ||
2040 | */ | ||
2041 | if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) { | ||
2042 | adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED; | ||
2043 | if (netdev->features & NETIF_F_LRO) { | ||
2044 | netdev->features &= ~NETIF_F_LRO; | ||
2045 | e_info(probe, "rx-usecs set to 0, " | ||
2046 | "disabling RSC\n"); | ||
2047 | } | ||
2048 | need_reset = true; | ||
2049 | } | ||
2050 | } | 2110 | } |
2051 | 2111 | ||
2052 | if (ec->tx_coalesce_usecs > 1) { | 2112 | if (ec->tx_coalesce_usecs > 1) { |
@@ -2133,28 +2193,39 @@ static int ixgbe_set_flags(struct net_device *netdev, u32 data) | |||
2133 | return rc; | 2193 | return rc; |
2134 | 2194 | ||
2135 | /* if state changes we need to update adapter->flags and reset */ | 2195 | /* if state changes we need to update adapter->flags and reset */ |
2136 | if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) { | 2196 | if ((adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) && |
2137 | /* | 2197 | (!!(data & ETH_FLAG_LRO) != |
2138 | * cast both to bool and verify if they are set the same | 2198 | !!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED))) { |
2139 | * but only enable RSC if itr is non-zero, as | 2199 | if ((data & ETH_FLAG_LRO) && |
2140 | * itr=0 and RSC are mutually exclusive | 2200 | (!adapter->rx_itr_setting || |
2141 | */ | 2201 | (adapter->rx_itr_setting > IXGBE_MAX_RSC_INT_RATE))) { |
2142 | if (((!!(data & ETH_FLAG_LRO)) != | 2202 | e_info(probe, "rx-usecs set too low, " |
2143 | (!!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED))) && | 2203 | "not enabling RSC.\n"); |
2144 | adapter->rx_itr_setting) { | 2204 | } else { |
2145 | adapter->flags2 ^= IXGBE_FLAG2_RSC_ENABLED; | 2205 | adapter->flags2 ^= IXGBE_FLAG2_RSC_ENABLED; |
2146 | switch (adapter->hw.mac.type) { | 2206 | switch (adapter->hw.mac.type) { |
2147 | case ixgbe_mac_82599EB: | 2207 | case ixgbe_mac_82599EB: |
2148 | need_reset = true; | 2208 | need_reset = true; |
2149 | break; | 2209 | break; |
2210 | case ixgbe_mac_X540: { | ||
2211 | int i; | ||
2212 | for (i = 0; i < adapter->num_rx_queues; i++) { | ||
2213 | struct ixgbe_ring *ring = | ||
2214 | adapter->rx_ring[i]; | ||
2215 | if (adapter->flags2 & | ||
2216 | IXGBE_FLAG2_RSC_ENABLED) { | ||
2217 | ixgbe_configure_rscctl(adapter, | ||
2218 | ring); | ||
2219 | } else { | ||
2220 | ixgbe_clear_rscctl(adapter, | ||
2221 | ring); | ||
2222 | } | ||
2223 | } | ||
2224 | } | ||
2225 | break; | ||
2150 | default: | 2226 | default: |
2151 | break; | 2227 | break; |
2152 | } | 2228 | } |
2153 | } else if (!adapter->rx_itr_setting) { | ||
2154 | netdev->features &= ~NETIF_F_LRO; | ||
2155 | if (data & ETH_FLAG_LRO) | ||
2156 | e_info(probe, "rx-usecs set to 0, " | ||
2157 | "LRO/RSC cannot be enabled.\n"); | ||
2158 | } | 2229 | } |
2159 | } | 2230 | } |
2160 | 2231 | ||
diff --git a/drivers/net/ixgbe/ixgbe_fcoe.c b/drivers/net/ixgbe/ixgbe_fcoe.c index 05efa6a8ce8e..6342d4859790 100644 --- a/drivers/net/ixgbe/ixgbe_fcoe.c +++ b/drivers/net/ixgbe/ixgbe_fcoe.c | |||
@@ -68,7 +68,7 @@ static inline bool ixgbe_rx_is_fcoe(union ixgbe_adv_rx_desc *rx_desc) | |||
68 | static inline void ixgbe_fcoe_clear_ddp(struct ixgbe_fcoe_ddp *ddp) | 68 | static inline void ixgbe_fcoe_clear_ddp(struct ixgbe_fcoe_ddp *ddp) |
69 | { | 69 | { |
70 | ddp->len = 0; | 70 | ddp->len = 0; |
71 | ddp->err = 0; | 71 | ddp->err = 1; |
72 | ddp->udl = NULL; | 72 | ddp->udl = NULL; |
73 | ddp->udp = 0UL; | 73 | ddp->udp = 0UL; |
74 | ddp->sgl = NULL; | 74 | ddp->sgl = NULL; |
@@ -92,6 +92,7 @@ int ixgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid) | |||
92 | struct ixgbe_fcoe *fcoe; | 92 | struct ixgbe_fcoe *fcoe; |
93 | struct ixgbe_adapter *adapter; | 93 | struct ixgbe_adapter *adapter; |
94 | struct ixgbe_fcoe_ddp *ddp; | 94 | struct ixgbe_fcoe_ddp *ddp; |
95 | u32 fcbuff; | ||
95 | 96 | ||
96 | if (!netdev) | 97 | if (!netdev) |
97 | goto out_ddp_put; | 98 | goto out_ddp_put; |
@@ -115,7 +116,14 @@ int ixgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid) | |||
115 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCBUFF, 0); | 116 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCBUFF, 0); |
116 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCDMARW, | 117 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCDMARW, |
117 | (xid | IXGBE_FCDMARW_WE)); | 118 | (xid | IXGBE_FCDMARW_WE)); |
119 | |||
120 | /* guaranteed to be invalidated after 100us */ | ||
121 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCDMARW, | ||
122 | (xid | IXGBE_FCDMARW_RE)); | ||
123 | fcbuff = IXGBE_READ_REG(&adapter->hw, IXGBE_FCBUFF); | ||
118 | spin_unlock_bh(&fcoe->lock); | 124 | spin_unlock_bh(&fcoe->lock); |
125 | if (fcbuff & IXGBE_FCBUFF_VALID) | ||
126 | udelay(100); | ||
119 | } | 127 | } |
120 | if (ddp->sgl) | 128 | if (ddp->sgl) |
121 | pci_unmap_sg(adapter->pdev, ddp->sgl, ddp->sgc, | 129 | pci_unmap_sg(adapter->pdev, ddp->sgl, ddp->sgc, |
@@ -168,6 +176,11 @@ int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid, | |||
168 | return 0; | 176 | return 0; |
169 | } | 177 | } |
170 | 178 | ||
179 | /* no DDP if we are already down or resetting */ | ||
180 | if (test_bit(__IXGBE_DOWN, &adapter->state) || | ||
181 | test_bit(__IXGBE_RESETTING, &adapter->state)) | ||
182 | return 0; | ||
183 | |||
171 | fcoe = &adapter->fcoe; | 184 | fcoe = &adapter->fcoe; |
172 | if (!fcoe->pool) { | 185 | if (!fcoe->pool) { |
173 | e_warn(drv, "xid=0x%x no ddp pool for fcoe\n", xid); | 186 | e_warn(drv, "xid=0x%x no ddp pool for fcoe\n", xid); |
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index fbad4d819608..5409af3da06c 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c | |||
@@ -59,6 +59,7 @@ static char ixgbe_copyright[] = "Copyright (c) 1999-2010 Intel Corporation."; | |||
59 | static const struct ixgbe_info *ixgbe_info_tbl[] = { | 59 | static const struct ixgbe_info *ixgbe_info_tbl[] = { |
60 | [board_82598] = &ixgbe_82598_info, | 60 | [board_82598] = &ixgbe_82598_info, |
61 | [board_82599] = &ixgbe_82599_info, | 61 | [board_82599] = &ixgbe_82599_info, |
62 | [board_X540] = &ixgbe_X540_info, | ||
62 | }; | 63 | }; |
63 | 64 | ||
64 | /* ixgbe_pci_tbl - PCI Device ID Table | 65 | /* ixgbe_pci_tbl - PCI Device ID Table |
@@ -112,6 +113,8 @@ static DEFINE_PCI_DEVICE_TABLE(ixgbe_pci_tbl) = { | |||
112 | board_82599 }, | 113 | board_82599 }, |
113 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_COMBO_BACKPLANE), | 114 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_COMBO_BACKPLANE), |
114 | board_82599 }, | 115 | board_82599 }, |
116 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540T), | ||
117 | board_82599 }, | ||
115 | 118 | ||
116 | /* required last entry */ | 119 | /* required last entry */ |
117 | {0, } | 120 | {0, } |
@@ -560,6 +563,7 @@ static void ixgbe_set_ivar(struct ixgbe_adapter *adapter, s8 direction, | |||
560 | IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar); | 563 | IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar); |
561 | break; | 564 | break; |
562 | case ixgbe_mac_82599EB: | 565 | case ixgbe_mac_82599EB: |
566 | case ixgbe_mac_X540: | ||
563 | if (direction == -1) { | 567 | if (direction == -1) { |
564 | /* other causes */ | 568 | /* other causes */ |
565 | msix_vector |= IXGBE_IVAR_ALLOC_VAL; | 569 | msix_vector |= IXGBE_IVAR_ALLOC_VAL; |
@@ -589,29 +593,34 @@ static inline void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter, | |||
589 | { | 593 | { |
590 | u32 mask; | 594 | u32 mask; |
591 | 595 | ||
592 | if (adapter->hw.mac.type == ixgbe_mac_82598EB) { | 596 | switch (adapter->hw.mac.type) { |
597 | case ixgbe_mac_82598EB: | ||
593 | mask = (IXGBE_EIMS_RTX_QUEUE & qmask); | 598 | mask = (IXGBE_EIMS_RTX_QUEUE & qmask); |
594 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask); | 599 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask); |
595 | } else { | 600 | break; |
601 | case ixgbe_mac_82599EB: | ||
602 | case ixgbe_mac_X540: | ||
596 | mask = (qmask & 0xFFFFFFFF); | 603 | mask = (qmask & 0xFFFFFFFF); |
597 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask); | 604 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask); |
598 | mask = (qmask >> 32); | 605 | mask = (qmask >> 32); |
599 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask); | 606 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask); |
607 | break; | ||
608 | default: | ||
609 | break; | ||
600 | } | 610 | } |
601 | } | 611 | } |
602 | 612 | ||
603 | void ixgbe_unmap_and_free_tx_resource(struct ixgbe_adapter *adapter, | 613 | void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *tx_ring, |
604 | struct ixgbe_tx_buffer | 614 | struct ixgbe_tx_buffer *tx_buffer_info) |
605 | *tx_buffer_info) | ||
606 | { | 615 | { |
607 | if (tx_buffer_info->dma) { | 616 | if (tx_buffer_info->dma) { |
608 | if (tx_buffer_info->mapped_as_page) | 617 | if (tx_buffer_info->mapped_as_page) |
609 | dma_unmap_page(&adapter->pdev->dev, | 618 | dma_unmap_page(tx_ring->dev, |
610 | tx_buffer_info->dma, | 619 | tx_buffer_info->dma, |
611 | tx_buffer_info->length, | 620 | tx_buffer_info->length, |
612 | DMA_TO_DEVICE); | 621 | DMA_TO_DEVICE); |
613 | else | 622 | else |
614 | dma_unmap_single(&adapter->pdev->dev, | 623 | dma_unmap_single(tx_ring->dev, |
615 | tx_buffer_info->dma, | 624 | tx_buffer_info->dma, |
616 | tx_buffer_info->length, | 625 | tx_buffer_info->length, |
617 | DMA_TO_DEVICE); | 626 | DMA_TO_DEVICE); |
@@ -626,92 +635,166 @@ void ixgbe_unmap_and_free_tx_resource(struct ixgbe_adapter *adapter, | |||
626 | } | 635 | } |
627 | 636 | ||
628 | /** | 637 | /** |
629 | * ixgbe_tx_xon_state - check the tx ring xon state | 638 | * ixgbe_dcb_txq_to_tc - convert a reg index to a traffic class |
630 | * @adapter: the ixgbe adapter | 639 | * @adapter: driver private struct |
631 | * @tx_ring: the corresponding tx_ring | 640 | * @index: reg idx of queue to query (0-127) |
632 | * | 641 | * |
633 | * If not in DCB mode, checks TFCS.TXOFF, otherwise, find out the | 642 | * Helper function to determine the traffic index for a paticular |
634 | * corresponding TC of this tx_ring when checking TFCS. | 643 | * register index. |
635 | * | 644 | * |
636 | * Returns : true if in xon state (currently not paused) | 645 | * Returns : a tc index for use in range 0-7, or 0-3 |
637 | */ | 646 | */ |
638 | static inline bool ixgbe_tx_xon_state(struct ixgbe_adapter *adapter, | 647 | u8 ixgbe_dcb_txq_to_tc(struct ixgbe_adapter *adapter, u8 reg_idx) |
639 | struct ixgbe_ring *tx_ring) | ||
640 | { | 648 | { |
641 | u32 txoff = IXGBE_TFCS_TXOFF; | 649 | int tc = -1; |
650 | int dcb_i = adapter->ring_feature[RING_F_DCB].indices; | ||
642 | 651 | ||
643 | #ifdef CONFIG_IXGBE_DCB | 652 | /* if DCB is not enabled the queues have no TC */ |
644 | if (adapter->dcb_cfg.pfc_mode_enable) { | 653 | if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED)) |
645 | int tc; | 654 | return tc; |
646 | int reg_idx = tx_ring->reg_idx; | 655 | |
647 | int dcb_i = adapter->ring_feature[RING_F_DCB].indices; | 656 | /* check valid range */ |
657 | if (reg_idx >= adapter->hw.mac.max_tx_queues) | ||
658 | return tc; | ||
659 | |||
660 | switch (adapter->hw.mac.type) { | ||
661 | case ixgbe_mac_82598EB: | ||
662 | tc = reg_idx >> 2; | ||
663 | break; | ||
664 | default: | ||
665 | if (dcb_i != 4 && dcb_i != 8) | ||
666 | break; | ||
667 | |||
668 | /* if VMDq is enabled the lowest order bits determine TC */ | ||
669 | if (adapter->flags & (IXGBE_FLAG_SRIOV_ENABLED | | ||
670 | IXGBE_FLAG_VMDQ_ENABLED)) { | ||
671 | tc = reg_idx & (dcb_i - 1); | ||
672 | break; | ||
673 | } | ||
674 | |||
675 | /* | ||
676 | * Convert the reg_idx into the correct TC. This bitmask | ||
677 | * targets the last full 32 ring traffic class and assigns | ||
678 | * it a value of 1. From there the rest of the rings are | ||
679 | * based on shifting the mask further up to include the | ||
680 | * reg_idx / 16 and then reg_idx / 8. It assumes dcB_i | ||
681 | * will only ever be 8 or 4 and that reg_idx will never | ||
682 | * be greater then 128. The code without the power of 2 | ||
683 | * optimizations would be: | ||
684 | * (((reg_idx % 32) + 32) * dcb_i) >> (9 - reg_idx / 32) | ||
685 | */ | ||
686 | tc = ((reg_idx & 0X1F) + 0x20) * dcb_i; | ||
687 | tc >>= 9 - (reg_idx >> 5); | ||
688 | } | ||
689 | |||
690 | return tc; | ||
691 | } | ||
648 | 692 | ||
649 | switch (adapter->hw.mac.type) { | 693 | static void ixgbe_update_xoff_received(struct ixgbe_adapter *adapter) |
694 | { | ||
695 | struct ixgbe_hw *hw = &adapter->hw; | ||
696 | struct ixgbe_hw_stats *hwstats = &adapter->stats; | ||
697 | u32 data = 0; | ||
698 | u32 xoff[8] = {0}; | ||
699 | int i; | ||
700 | |||
701 | if ((hw->fc.current_mode == ixgbe_fc_full) || | ||
702 | (hw->fc.current_mode == ixgbe_fc_rx_pause)) { | ||
703 | switch (hw->mac.type) { | ||
650 | case ixgbe_mac_82598EB: | 704 | case ixgbe_mac_82598EB: |
651 | tc = reg_idx >> 2; | 705 | data = IXGBE_READ_REG(hw, IXGBE_LXOFFRXC); |
652 | txoff = IXGBE_TFCS_TXOFF0; | ||
653 | break; | 706 | break; |
654 | case ixgbe_mac_82599EB: | 707 | default: |
655 | tc = 0; | 708 | data = IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT); |
656 | txoff = IXGBE_TFCS_TXOFF; | 709 | } |
657 | if (dcb_i == 8) { | 710 | hwstats->lxoffrxc += data; |
658 | /* TC0, TC1 */ | 711 | |
659 | tc = reg_idx >> 5; | 712 | /* refill credits (no tx hang) if we received xoff */ |
660 | if (tc == 2) /* TC2, TC3 */ | 713 | if (!data) |
661 | tc += (reg_idx - 64) >> 4; | 714 | return; |
662 | else if (tc == 3) /* TC4, TC5, TC6, TC7 */ | 715 | |
663 | tc += 1 + ((reg_idx - 96) >> 3); | 716 | for (i = 0; i < adapter->num_tx_queues; i++) |
664 | } else if (dcb_i == 4) { | 717 | clear_bit(__IXGBE_HANG_CHECK_ARMED, |
665 | /* TC0, TC1 */ | 718 | &adapter->tx_ring[i]->state); |
666 | tc = reg_idx >> 6; | 719 | return; |
667 | if (tc == 1) { | 720 | } else if (!(adapter->dcb_cfg.pfc_mode_enable)) |
668 | tc += (reg_idx - 64) >> 5; | 721 | return; |
669 | if (tc == 2) /* TC2, TC3 */ | 722 | |
670 | tc += (reg_idx - 96) >> 4; | 723 | /* update stats for each tc, only valid with PFC enabled */ |
671 | } | 724 | for (i = 0; i < MAX_TX_PACKET_BUFFERS; i++) { |
672 | } | 725 | switch (hw->mac.type) { |
726 | case ixgbe_mac_82598EB: | ||
727 | xoff[i] = IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i)); | ||
673 | break; | 728 | break; |
674 | default: | 729 | default: |
675 | tc = 0; | 730 | xoff[i] = IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i)); |
676 | } | 731 | } |
677 | txoff <<= tc; | 732 | hwstats->pxoffrxc[i] += xoff[i]; |
733 | } | ||
734 | |||
735 | /* disarm tx queues that have received xoff frames */ | ||
736 | for (i = 0; i < adapter->num_tx_queues; i++) { | ||
737 | struct ixgbe_ring *tx_ring = adapter->tx_ring[i]; | ||
738 | u32 tc = ixgbe_dcb_txq_to_tc(adapter, tx_ring->reg_idx); | ||
739 | |||
740 | if (xoff[tc]) | ||
741 | clear_bit(__IXGBE_HANG_CHECK_ARMED, &tx_ring->state); | ||
678 | } | 742 | } |
679 | #endif | ||
680 | return IXGBE_READ_REG(&adapter->hw, IXGBE_TFCS) & txoff; | ||
681 | } | 743 | } |
682 | 744 | ||
683 | static inline bool ixgbe_check_tx_hang(struct ixgbe_adapter *adapter, | 745 | static u64 ixgbe_get_tx_completed(struct ixgbe_ring *ring) |
684 | struct ixgbe_ring *tx_ring, | ||
685 | unsigned int eop) | ||
686 | { | 746 | { |
747 | return ring->tx_stats.completed; | ||
748 | } | ||
749 | |||
750 | static u64 ixgbe_get_tx_pending(struct ixgbe_ring *ring) | ||
751 | { | ||
752 | struct ixgbe_adapter *adapter = netdev_priv(ring->netdev); | ||
687 | struct ixgbe_hw *hw = &adapter->hw; | 753 | struct ixgbe_hw *hw = &adapter->hw; |
688 | 754 | ||
689 | /* Detect a transmit hang in hardware, this serializes the | 755 | u32 head = IXGBE_READ_REG(hw, IXGBE_TDH(ring->reg_idx)); |
690 | * check with the clearing of time_stamp and movement of eop */ | 756 | u32 tail = IXGBE_READ_REG(hw, IXGBE_TDT(ring->reg_idx)); |
691 | adapter->detect_tx_hung = false; | 757 | |
692 | if (tx_ring->tx_buffer_info[eop].time_stamp && | 758 | if (head != tail) |
693 | time_after(jiffies, tx_ring->tx_buffer_info[eop].time_stamp + HZ) && | 759 | return (head < tail) ? |
694 | ixgbe_tx_xon_state(adapter, tx_ring)) { | 760 | tail - head : (tail + ring->count - head); |
695 | /* detected Tx unit hang */ | 761 | |
696 | union ixgbe_adv_tx_desc *tx_desc; | 762 | return 0; |
697 | tx_desc = IXGBE_TX_DESC_ADV(tx_ring, eop); | 763 | } |
698 | e_err(drv, "Detected Tx Unit Hang\n" | 764 | |
699 | " Tx Queue <%d>\n" | 765 | static inline bool ixgbe_check_tx_hang(struct ixgbe_ring *tx_ring) |
700 | " TDH, TDT <%x>, <%x>\n" | 766 | { |
701 | " next_to_use <%x>\n" | 767 | u32 tx_done = ixgbe_get_tx_completed(tx_ring); |
702 | " next_to_clean <%x>\n" | 768 | u32 tx_done_old = tx_ring->tx_stats.tx_done_old; |
703 | "tx_buffer_info[next_to_clean]\n" | 769 | u32 tx_pending = ixgbe_get_tx_pending(tx_ring); |
704 | " time_stamp <%lx>\n" | 770 | bool ret = false; |
705 | " jiffies <%lx>\n", | 771 | |
706 | tx_ring->queue_index, | 772 | clear_check_for_tx_hang(tx_ring); |
707 | IXGBE_READ_REG(hw, tx_ring->head), | 773 | |
708 | IXGBE_READ_REG(hw, tx_ring->tail), | 774 | /* |
709 | tx_ring->next_to_use, eop, | 775 | * Check for a hung queue, but be thorough. This verifies |
710 | tx_ring->tx_buffer_info[eop].time_stamp, jiffies); | 776 | * that a transmit has been completed since the previous |
711 | return true; | 777 | * check AND there is at least one packet pending. The |
778 | * ARMED bit is set to indicate a potential hang. The | ||
779 | * bit is cleared if a pause frame is received to remove | ||
780 | * false hang detection due to PFC or 802.3x frames. By | ||
781 | * requiring this to fail twice we avoid races with | ||
782 | * pfc clearing the ARMED bit and conditions where we | ||
783 | * run the check_tx_hang logic with a transmit completion | ||
784 | * pending but without time to complete it yet. | ||
785 | */ | ||
786 | if ((tx_done_old == tx_done) && tx_pending) { | ||
787 | /* make sure it is true for two checks in a row */ | ||
788 | ret = test_and_set_bit(__IXGBE_HANG_CHECK_ARMED, | ||
789 | &tx_ring->state); | ||
790 | } else { | ||
791 | /* update completed stats and continue */ | ||
792 | tx_ring->tx_stats.tx_done_old = tx_done; | ||
793 | /* reset the countdown */ | ||
794 | clear_bit(__IXGBE_HANG_CHECK_ARMED, &tx_ring->state); | ||
712 | } | 795 | } |
713 | 796 | ||
714 | return false; | 797 | return ret; |
715 | } | 798 | } |
716 | 799 | ||
717 | #define IXGBE_MAX_TXD_PWR 14 | 800 | #define IXGBE_MAX_TXD_PWR 14 |
@@ -734,11 +817,10 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector, | |||
734 | struct ixgbe_ring *tx_ring) | 817 | struct ixgbe_ring *tx_ring) |
735 | { | 818 | { |
736 | struct ixgbe_adapter *adapter = q_vector->adapter; | 819 | struct ixgbe_adapter *adapter = q_vector->adapter; |
737 | struct net_device *netdev = adapter->netdev; | ||
738 | union ixgbe_adv_tx_desc *tx_desc, *eop_desc; | 820 | union ixgbe_adv_tx_desc *tx_desc, *eop_desc; |
739 | struct ixgbe_tx_buffer *tx_buffer_info; | 821 | struct ixgbe_tx_buffer *tx_buffer_info; |
740 | unsigned int i, eop, count = 0; | ||
741 | unsigned int total_bytes = 0, total_packets = 0; | 822 | unsigned int total_bytes = 0, total_packets = 0; |
823 | u16 i, eop, count = 0; | ||
742 | 824 | ||
743 | i = tx_ring->next_to_clean; | 825 | i = tx_ring->next_to_clean; |
744 | eop = tx_ring->tx_buffer_info[i].next_to_watch; | 826 | eop = tx_ring->tx_buffer_info[i].next_to_watch; |
@@ -749,148 +831,182 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector, | |||
749 | bool cleaned = false; | 831 | bool cleaned = false; |
750 | rmb(); /* read buffer_info after eop_desc */ | 832 | rmb(); /* read buffer_info after eop_desc */ |
751 | for ( ; !cleaned; count++) { | 833 | for ( ; !cleaned; count++) { |
752 | struct sk_buff *skb; | ||
753 | tx_desc = IXGBE_TX_DESC_ADV(tx_ring, i); | 834 | tx_desc = IXGBE_TX_DESC_ADV(tx_ring, i); |
754 | tx_buffer_info = &tx_ring->tx_buffer_info[i]; | 835 | tx_buffer_info = &tx_ring->tx_buffer_info[i]; |
755 | cleaned = (i == eop); | ||
756 | skb = tx_buffer_info->skb; | ||
757 | |||
758 | if (cleaned && skb) { | ||
759 | unsigned int segs, bytecount; | ||
760 | unsigned int hlen = skb_headlen(skb); | ||
761 | |||
762 | /* gso_segs is currently only valid for tcp */ | ||
763 | segs = skb_shinfo(skb)->gso_segs ?: 1; | ||
764 | #ifdef IXGBE_FCOE | ||
765 | /* adjust for FCoE Sequence Offload */ | ||
766 | if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) | ||
767 | && skb_is_gso(skb) | ||
768 | && vlan_get_protocol(skb) == | ||
769 | htons(ETH_P_FCOE)) { | ||
770 | hlen = skb_transport_offset(skb) + | ||
771 | sizeof(struct fc_frame_header) + | ||
772 | sizeof(struct fcoe_crc_eof); | ||
773 | segs = DIV_ROUND_UP(skb->len - hlen, | ||
774 | skb_shinfo(skb)->gso_size); | ||
775 | } | ||
776 | #endif /* IXGBE_FCOE */ | ||
777 | /* multiply data chunks by size of headers */ | ||
778 | bytecount = ((segs - 1) * hlen) + skb->len; | ||
779 | total_packets += segs; | ||
780 | total_bytes += bytecount; | ||
781 | } | ||
782 | |||
783 | ixgbe_unmap_and_free_tx_resource(adapter, | ||
784 | tx_buffer_info); | ||
785 | 836 | ||
786 | tx_desc->wb.status = 0; | 837 | tx_desc->wb.status = 0; |
838 | cleaned = (i == eop); | ||
787 | 839 | ||
788 | i++; | 840 | i++; |
789 | if (i == tx_ring->count) | 841 | if (i == tx_ring->count) |
790 | i = 0; | 842 | i = 0; |
843 | |||
844 | if (cleaned && tx_buffer_info->skb) { | ||
845 | total_bytes += tx_buffer_info->bytecount; | ||
846 | total_packets += tx_buffer_info->gso_segs; | ||
847 | } | ||
848 | |||
849 | ixgbe_unmap_and_free_tx_resource(tx_ring, | ||
850 | tx_buffer_info); | ||
791 | } | 851 | } |
792 | 852 | ||
853 | tx_ring->tx_stats.completed++; | ||
793 | eop = tx_ring->tx_buffer_info[i].next_to_watch; | 854 | eop = tx_ring->tx_buffer_info[i].next_to_watch; |
794 | eop_desc = IXGBE_TX_DESC_ADV(tx_ring, eop); | 855 | eop_desc = IXGBE_TX_DESC_ADV(tx_ring, eop); |
795 | } | 856 | } |
796 | 857 | ||
797 | tx_ring->next_to_clean = i; | 858 | tx_ring->next_to_clean = i; |
859 | tx_ring->total_bytes += total_bytes; | ||
860 | tx_ring->total_packets += total_packets; | ||
861 | u64_stats_update_begin(&tx_ring->syncp); | ||
862 | tx_ring->stats.packets += total_packets; | ||
863 | tx_ring->stats.bytes += total_bytes; | ||
864 | u64_stats_update_end(&tx_ring->syncp); | ||
865 | |||
866 | if (check_for_tx_hang(tx_ring) && ixgbe_check_tx_hang(tx_ring)) { | ||
867 | /* schedule immediate reset if we believe we hung */ | ||
868 | struct ixgbe_hw *hw = &adapter->hw; | ||
869 | tx_desc = IXGBE_TX_DESC_ADV(tx_ring, eop); | ||
870 | e_err(drv, "Detected Tx Unit Hang\n" | ||
871 | " Tx Queue <%d>\n" | ||
872 | " TDH, TDT <%x>, <%x>\n" | ||
873 | " next_to_use <%x>\n" | ||
874 | " next_to_clean <%x>\n" | ||
875 | "tx_buffer_info[next_to_clean]\n" | ||
876 | " time_stamp <%lx>\n" | ||
877 | " jiffies <%lx>\n", | ||
878 | tx_ring->queue_index, | ||
879 | IXGBE_READ_REG(hw, IXGBE_TDH(tx_ring->reg_idx)), | ||
880 | IXGBE_READ_REG(hw, IXGBE_TDT(tx_ring->reg_idx)), | ||
881 | tx_ring->next_to_use, eop, | ||
882 | tx_ring->tx_buffer_info[eop].time_stamp, jiffies); | ||
883 | |||
884 | netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); | ||
885 | |||
886 | e_info(probe, | ||
887 | "tx hang %d detected on queue %d, resetting adapter\n", | ||
888 | adapter->tx_timeout_count + 1, tx_ring->queue_index); | ||
889 | |||
890 | /* schedule immediate reset if we believe we hung */ | ||
891 | ixgbe_tx_timeout(adapter->netdev); | ||
892 | |||
893 | /* the adapter is about to reset, no point in enabling stuff */ | ||
894 | return true; | ||
895 | } | ||
798 | 896 | ||
799 | #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2) | 897 | #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2) |
800 | if (unlikely(count && netif_carrier_ok(netdev) && | 898 | if (unlikely(count && netif_carrier_ok(tx_ring->netdev) && |
801 | (IXGBE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) { | 899 | (IXGBE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) { |
802 | /* Make sure that anybody stopping the queue after this | 900 | /* Make sure that anybody stopping the queue after this |
803 | * sees the new next_to_clean. | 901 | * sees the new next_to_clean. |
804 | */ | 902 | */ |
805 | smp_mb(); | 903 | smp_mb(); |
806 | if (__netif_subqueue_stopped(netdev, tx_ring->queue_index) && | 904 | if (__netif_subqueue_stopped(tx_ring->netdev, tx_ring->queue_index) && |
807 | !test_bit(__IXGBE_DOWN, &adapter->state)) { | 905 | !test_bit(__IXGBE_DOWN, &adapter->state)) { |
808 | netif_wake_subqueue(netdev, tx_ring->queue_index); | 906 | netif_wake_subqueue(tx_ring->netdev, tx_ring->queue_index); |
809 | ++tx_ring->restart_queue; | 907 | ++tx_ring->tx_stats.restart_queue; |
810 | } | ||
811 | } | ||
812 | |||
813 | if (adapter->detect_tx_hung) { | ||
814 | if (ixgbe_check_tx_hang(adapter, tx_ring, i)) { | ||
815 | /* schedule immediate reset if we believe we hung */ | ||
816 | e_info(probe, "tx hang %d detected, resetting " | ||
817 | "adapter\n", adapter->tx_timeout_count + 1); | ||
818 | ixgbe_tx_timeout(adapter->netdev); | ||
819 | } | 908 | } |
820 | } | 909 | } |
821 | 910 | ||
822 | /* re-arm the interrupt */ | ||
823 | if (count >= tx_ring->work_limit) | ||
824 | ixgbe_irq_rearm_queues(adapter, ((u64)1 << q_vector->v_idx)); | ||
825 | |||
826 | tx_ring->total_bytes += total_bytes; | ||
827 | tx_ring->total_packets += total_packets; | ||
828 | u64_stats_update_begin(&tx_ring->syncp); | ||
829 | tx_ring->stats.packets += total_packets; | ||
830 | tx_ring->stats.bytes += total_bytes; | ||
831 | u64_stats_update_end(&tx_ring->syncp); | ||
832 | return count < tx_ring->work_limit; | 911 | return count < tx_ring->work_limit; |
833 | } | 912 | } |
834 | 913 | ||
835 | #ifdef CONFIG_IXGBE_DCA | 914 | #ifdef CONFIG_IXGBE_DCA |
836 | static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter, | 915 | static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter, |
837 | struct ixgbe_ring *rx_ring) | 916 | struct ixgbe_ring *rx_ring, |
917 | int cpu) | ||
838 | { | 918 | { |
919 | struct ixgbe_hw *hw = &adapter->hw; | ||
839 | u32 rxctrl; | 920 | u32 rxctrl; |
840 | int cpu = get_cpu(); | 921 | u8 reg_idx = rx_ring->reg_idx; |
841 | int q = rx_ring->reg_idx; | 922 | |
842 | 923 | rxctrl = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(reg_idx)); | |
843 | if (rx_ring->cpu != cpu) { | 924 | switch (hw->mac.type) { |
844 | rxctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_DCA_RXCTRL(q)); | 925 | case ixgbe_mac_82598EB: |
845 | if (adapter->hw.mac.type == ixgbe_mac_82598EB) { | 926 | rxctrl &= ~IXGBE_DCA_RXCTRL_CPUID_MASK; |
846 | rxctrl &= ~IXGBE_DCA_RXCTRL_CPUID_MASK; | 927 | rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu); |
847 | rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu); | 928 | break; |
848 | } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) { | 929 | case ixgbe_mac_82599EB: |
849 | rxctrl &= ~IXGBE_DCA_RXCTRL_CPUID_MASK_82599; | 930 | case ixgbe_mac_X540: |
850 | rxctrl |= (dca3_get_tag(&adapter->pdev->dev, cpu) << | 931 | rxctrl &= ~IXGBE_DCA_RXCTRL_CPUID_MASK_82599; |
851 | IXGBE_DCA_RXCTRL_CPUID_SHIFT_82599); | 932 | rxctrl |= (dca3_get_tag(&adapter->pdev->dev, cpu) << |
852 | } | 933 | IXGBE_DCA_RXCTRL_CPUID_SHIFT_82599); |
853 | rxctrl |= IXGBE_DCA_RXCTRL_DESC_DCA_EN; | 934 | break; |
854 | rxctrl |= IXGBE_DCA_RXCTRL_HEAD_DCA_EN; | 935 | default: |
855 | rxctrl &= ~(IXGBE_DCA_RXCTRL_DESC_RRO_EN); | 936 | break; |
856 | rxctrl &= ~(IXGBE_DCA_RXCTRL_DESC_WRO_EN | | ||
857 | IXGBE_DCA_RXCTRL_DESC_HSRO_EN); | ||
858 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_RXCTRL(q), rxctrl); | ||
859 | rx_ring->cpu = cpu; | ||
860 | } | 937 | } |
861 | put_cpu(); | 938 | rxctrl |= IXGBE_DCA_RXCTRL_DESC_DCA_EN; |
939 | rxctrl |= IXGBE_DCA_RXCTRL_HEAD_DCA_EN; | ||
940 | rxctrl &= ~(IXGBE_DCA_RXCTRL_DESC_RRO_EN); | ||
941 | rxctrl &= ~(IXGBE_DCA_RXCTRL_DESC_WRO_EN | | ||
942 | IXGBE_DCA_RXCTRL_DESC_HSRO_EN); | ||
943 | IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(reg_idx), rxctrl); | ||
862 | } | 944 | } |
863 | 945 | ||
864 | static void ixgbe_update_tx_dca(struct ixgbe_adapter *adapter, | 946 | static void ixgbe_update_tx_dca(struct ixgbe_adapter *adapter, |
865 | struct ixgbe_ring *tx_ring) | 947 | struct ixgbe_ring *tx_ring, |
948 | int cpu) | ||
866 | { | 949 | { |
950 | struct ixgbe_hw *hw = &adapter->hw; | ||
867 | u32 txctrl; | 951 | u32 txctrl; |
952 | u8 reg_idx = tx_ring->reg_idx; | ||
953 | |||
954 | switch (hw->mac.type) { | ||
955 | case ixgbe_mac_82598EB: | ||
956 | txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(reg_idx)); | ||
957 | txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK; | ||
958 | txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu); | ||
959 | txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN; | ||
960 | txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN; | ||
961 | IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(reg_idx), txctrl); | ||
962 | break; | ||
963 | case ixgbe_mac_82599EB: | ||
964 | case ixgbe_mac_X540: | ||
965 | txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(reg_idx)); | ||
966 | txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK_82599; | ||
967 | txctrl |= (dca3_get_tag(&adapter->pdev->dev, cpu) << | ||
968 | IXGBE_DCA_TXCTRL_CPUID_SHIFT_82599); | ||
969 | txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN; | ||
970 | txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN; | ||
971 | IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(reg_idx), txctrl); | ||
972 | break; | ||
973 | default: | ||
974 | break; | ||
975 | } | ||
976 | } | ||
977 | |||
978 | static void ixgbe_update_dca(struct ixgbe_q_vector *q_vector) | ||
979 | { | ||
980 | struct ixgbe_adapter *adapter = q_vector->adapter; | ||
868 | int cpu = get_cpu(); | 981 | int cpu = get_cpu(); |
869 | int q = tx_ring->reg_idx; | 982 | long r_idx; |
870 | struct ixgbe_hw *hw = &adapter->hw; | 983 | int i; |
871 | 984 | ||
872 | if (tx_ring->cpu != cpu) { | 985 | if (q_vector->cpu == cpu) |
873 | if (adapter->hw.mac.type == ixgbe_mac_82598EB) { | 986 | goto out_no_update; |
874 | txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(q)); | 987 | |
875 | txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK; | 988 | r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues); |
876 | txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu); | 989 | for (i = 0; i < q_vector->txr_count; i++) { |
877 | txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN; | 990 | ixgbe_update_tx_dca(adapter, adapter->tx_ring[r_idx], cpu); |
878 | IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(q), txctrl); | 991 | r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues, |
879 | } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) { | 992 | r_idx + 1); |
880 | txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(q)); | 993 | } |
881 | txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK_82599; | 994 | |
882 | txctrl |= (dca3_get_tag(&adapter->pdev->dev, cpu) << | 995 | r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); |
883 | IXGBE_DCA_TXCTRL_CPUID_SHIFT_82599); | 996 | for (i = 0; i < q_vector->rxr_count; i++) { |
884 | txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN; | 997 | ixgbe_update_rx_dca(adapter, adapter->rx_ring[r_idx], cpu); |
885 | IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(q), txctrl); | 998 | r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues, |
886 | } | 999 | r_idx + 1); |
887 | tx_ring->cpu = cpu; | ||
888 | } | 1000 | } |
1001 | |||
1002 | q_vector->cpu = cpu; | ||
1003 | out_no_update: | ||
889 | put_cpu(); | 1004 | put_cpu(); |
890 | } | 1005 | } |
891 | 1006 | ||
892 | static void ixgbe_setup_dca(struct ixgbe_adapter *adapter) | 1007 | static void ixgbe_setup_dca(struct ixgbe_adapter *adapter) |
893 | { | 1008 | { |
1009 | int num_q_vectors; | ||
894 | int i; | 1010 | int i; |
895 | 1011 | ||
896 | if (!(adapter->flags & IXGBE_FLAG_DCA_ENABLED)) | 1012 | if (!(adapter->flags & IXGBE_FLAG_DCA_ENABLED)) |
@@ -899,22 +1015,25 @@ static void ixgbe_setup_dca(struct ixgbe_adapter *adapter) | |||
899 | /* always use CB2 mode, difference is masked in the CB driver */ | 1015 | /* always use CB2 mode, difference is masked in the CB driver */ |
900 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 2); | 1016 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 2); |
901 | 1017 | ||
902 | for (i = 0; i < adapter->num_tx_queues; i++) { | 1018 | if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) |
903 | adapter->tx_ring[i]->cpu = -1; | 1019 | num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; |
904 | ixgbe_update_tx_dca(adapter, adapter->tx_ring[i]); | 1020 | else |
905 | } | 1021 | num_q_vectors = 1; |
906 | for (i = 0; i < adapter->num_rx_queues; i++) { | 1022 | |
907 | adapter->rx_ring[i]->cpu = -1; | 1023 | for (i = 0; i < num_q_vectors; i++) { |
908 | ixgbe_update_rx_dca(adapter, adapter->rx_ring[i]); | 1024 | adapter->q_vector[i]->cpu = -1; |
1025 | ixgbe_update_dca(adapter->q_vector[i]); | ||
909 | } | 1026 | } |
910 | } | 1027 | } |
911 | 1028 | ||
912 | static int __ixgbe_notify_dca(struct device *dev, void *data) | 1029 | static int __ixgbe_notify_dca(struct device *dev, void *data) |
913 | { | 1030 | { |
914 | struct net_device *netdev = dev_get_drvdata(dev); | 1031 | struct ixgbe_adapter *adapter = dev_get_drvdata(dev); |
915 | struct ixgbe_adapter *adapter = netdev_priv(netdev); | ||
916 | unsigned long event = *(unsigned long *)data; | 1032 | unsigned long event = *(unsigned long *)data; |
917 | 1033 | ||
1034 | if (!(adapter->flags & IXGBE_FLAG_DCA_ENABLED)) | ||
1035 | return 0; | ||
1036 | |||
918 | switch (event) { | 1037 | switch (event) { |
919 | case DCA_PROVIDER_ADD: | 1038 | case DCA_PROVIDER_ADD: |
920 | /* if we're already enabled, don't do it again */ | 1039 | /* if we're already enabled, don't do it again */ |
@@ -1013,8 +1132,7 @@ static inline void ixgbe_rx_checksum(struct ixgbe_adapter *adapter, | |||
1013 | skb->ip_summed = CHECKSUM_UNNECESSARY; | 1132 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
1014 | } | 1133 | } |
1015 | 1134 | ||
1016 | static inline void ixgbe_release_rx_desc(struct ixgbe_hw *hw, | 1135 | static inline void ixgbe_release_rx_desc(struct ixgbe_ring *rx_ring, u32 val) |
1017 | struct ixgbe_ring *rx_ring, u32 val) | ||
1018 | { | 1136 | { |
1019 | /* | 1137 | /* |
1020 | * Force memory writes to complete before letting h/w | 1138 | * Force memory writes to complete before letting h/w |
@@ -1023,72 +1141,81 @@ static inline void ixgbe_release_rx_desc(struct ixgbe_hw *hw, | |||
1023 | * such as IA-64). | 1141 | * such as IA-64). |
1024 | */ | 1142 | */ |
1025 | wmb(); | 1143 | wmb(); |
1026 | IXGBE_WRITE_REG(hw, IXGBE_RDT(rx_ring->reg_idx), val); | 1144 | writel(val, rx_ring->tail); |
1027 | } | 1145 | } |
1028 | 1146 | ||
1029 | /** | 1147 | /** |
1030 | * ixgbe_alloc_rx_buffers - Replace used receive buffers; packet split | 1148 | * ixgbe_alloc_rx_buffers - Replace used receive buffers; packet split |
1031 | * @adapter: address of board private structure | 1149 | * @rx_ring: ring to place buffers on |
1150 | * @cleaned_count: number of buffers to replace | ||
1032 | **/ | 1151 | **/ |
1033 | void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter, | 1152 | void ixgbe_alloc_rx_buffers(struct ixgbe_ring *rx_ring, u16 cleaned_count) |
1034 | struct ixgbe_ring *rx_ring, | ||
1035 | int cleaned_count) | ||
1036 | { | 1153 | { |
1037 | struct net_device *netdev = adapter->netdev; | ||
1038 | struct pci_dev *pdev = adapter->pdev; | ||
1039 | union ixgbe_adv_rx_desc *rx_desc; | 1154 | union ixgbe_adv_rx_desc *rx_desc; |
1040 | struct ixgbe_rx_buffer *bi; | 1155 | struct ixgbe_rx_buffer *bi; |
1041 | unsigned int i; | 1156 | struct sk_buff *skb; |
1042 | unsigned int bufsz = rx_ring->rx_buf_len; | 1157 | u16 i = rx_ring->next_to_use; |
1043 | 1158 | ||
1044 | i = rx_ring->next_to_use; | 1159 | /* do nothing if no valid netdev defined */ |
1045 | bi = &rx_ring->rx_buffer_info[i]; | 1160 | if (!rx_ring->netdev) |
1161 | return; | ||
1046 | 1162 | ||
1047 | while (cleaned_count--) { | 1163 | while (cleaned_count--) { |
1048 | rx_desc = IXGBE_RX_DESC_ADV(rx_ring, i); | 1164 | rx_desc = IXGBE_RX_DESC_ADV(rx_ring, i); |
1165 | bi = &rx_ring->rx_buffer_info[i]; | ||
1166 | skb = bi->skb; | ||
1049 | 1167 | ||
1050 | if (!bi->page_dma && | 1168 | if (!skb) { |
1051 | (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED)) { | 1169 | skb = netdev_alloc_skb_ip_align(rx_ring->netdev, |
1052 | if (!bi->page) { | 1170 | rx_ring->rx_buf_len); |
1053 | bi->page = netdev_alloc_page(netdev); | ||
1054 | if (!bi->page) { | ||
1055 | adapter->alloc_rx_page_failed++; | ||
1056 | goto no_buffers; | ||
1057 | } | ||
1058 | bi->page_offset = 0; | ||
1059 | } else { | ||
1060 | /* use a half page if we're re-using */ | ||
1061 | bi->page_offset ^= (PAGE_SIZE / 2); | ||
1062 | } | ||
1063 | |||
1064 | bi->page_dma = dma_map_page(&pdev->dev, bi->page, | ||
1065 | bi->page_offset, | ||
1066 | (PAGE_SIZE / 2), | ||
1067 | DMA_FROM_DEVICE); | ||
1068 | } | ||
1069 | |||
1070 | if (!bi->skb) { | ||
1071 | struct sk_buff *skb = netdev_alloc_skb_ip_align(netdev, | ||
1072 | bufsz); | ||
1073 | bi->skb = skb; | ||
1074 | |||
1075 | if (!skb) { | 1171 | if (!skb) { |
1076 | adapter->alloc_rx_buff_failed++; | 1172 | rx_ring->rx_stats.alloc_rx_buff_failed++; |
1077 | goto no_buffers; | 1173 | goto no_buffers; |
1078 | } | 1174 | } |
1079 | /* initialize queue mapping */ | 1175 | /* initialize queue mapping */ |
1080 | skb_record_rx_queue(skb, rx_ring->queue_index); | 1176 | skb_record_rx_queue(skb, rx_ring->queue_index); |
1177 | bi->skb = skb; | ||
1081 | } | 1178 | } |
1082 | 1179 | ||
1083 | if (!bi->dma) { | 1180 | if (!bi->dma) { |
1084 | bi->dma = dma_map_single(&pdev->dev, | 1181 | bi->dma = dma_map_single(rx_ring->dev, |
1085 | bi->skb->data, | 1182 | skb->data, |
1086 | rx_ring->rx_buf_len, | 1183 | rx_ring->rx_buf_len, |
1087 | DMA_FROM_DEVICE); | 1184 | DMA_FROM_DEVICE); |
1185 | if (dma_mapping_error(rx_ring->dev, bi->dma)) { | ||
1186 | rx_ring->rx_stats.alloc_rx_buff_failed++; | ||
1187 | bi->dma = 0; | ||
1188 | goto no_buffers; | ||
1189 | } | ||
1088 | } | 1190 | } |
1089 | /* Refresh the desc even if buffer_addrs didn't change because | 1191 | |
1090 | * each write-back erases this info. */ | 1192 | if (ring_is_ps_enabled(rx_ring)) { |
1091 | if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) { | 1193 | if (!bi->page) { |
1194 | bi->page = netdev_alloc_page(rx_ring->netdev); | ||
1195 | if (!bi->page) { | ||
1196 | rx_ring->rx_stats.alloc_rx_page_failed++; | ||
1197 | goto no_buffers; | ||
1198 | } | ||
1199 | } | ||
1200 | |||
1201 | if (!bi->page_dma) { | ||
1202 | /* use a half page if we're re-using */ | ||
1203 | bi->page_offset ^= PAGE_SIZE / 2; | ||
1204 | bi->page_dma = dma_map_page(rx_ring->dev, | ||
1205 | bi->page, | ||
1206 | bi->page_offset, | ||
1207 | PAGE_SIZE / 2, | ||
1208 | DMA_FROM_DEVICE); | ||
1209 | if (dma_mapping_error(rx_ring->dev, | ||
1210 | bi->page_dma)) { | ||
1211 | rx_ring->rx_stats.alloc_rx_page_failed++; | ||
1212 | bi->page_dma = 0; | ||
1213 | goto no_buffers; | ||
1214 | } | ||
1215 | } | ||
1216 | |||
1217 | /* Refresh the desc even if buffer_addrs didn't change | ||
1218 | * because each write-back erases this info. */ | ||
1092 | rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma); | 1219 | rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma); |
1093 | rx_desc->read.hdr_addr = cpu_to_le64(bi->dma); | 1220 | rx_desc->read.hdr_addr = cpu_to_le64(bi->dma); |
1094 | } else { | 1221 | } else { |
@@ -1099,56 +1226,48 @@ void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter, | |||
1099 | i++; | 1226 | i++; |
1100 | if (i == rx_ring->count) | 1227 | if (i == rx_ring->count) |
1101 | i = 0; | 1228 | i = 0; |
1102 | bi = &rx_ring->rx_buffer_info[i]; | ||
1103 | } | 1229 | } |
1104 | 1230 | ||
1105 | no_buffers: | 1231 | no_buffers: |
1106 | if (rx_ring->next_to_use != i) { | 1232 | if (rx_ring->next_to_use != i) { |
1107 | rx_ring->next_to_use = i; | 1233 | rx_ring->next_to_use = i; |
1108 | if (i-- == 0) | 1234 | ixgbe_release_rx_desc(rx_ring, i); |
1109 | i = (rx_ring->count - 1); | ||
1110 | |||
1111 | ixgbe_release_rx_desc(&adapter->hw, rx_ring, i); | ||
1112 | } | 1235 | } |
1113 | } | 1236 | } |
1114 | 1237 | ||
1115 | static inline u16 ixgbe_get_hdr_info(union ixgbe_adv_rx_desc *rx_desc) | 1238 | static inline u16 ixgbe_get_hlen(union ixgbe_adv_rx_desc *rx_desc) |
1116 | { | 1239 | { |
1117 | return rx_desc->wb.lower.lo_dword.hs_rss.hdr_info; | 1240 | /* HW will not DMA in data larger than the given buffer, even if it |
1118 | } | 1241 | * parses the (NFS, of course) header to be larger. In that case, it |
1119 | 1242 | * fills the header buffer and spills the rest into the page. | |
1120 | static inline u16 ixgbe_get_pkt_info(union ixgbe_adv_rx_desc *rx_desc) | 1243 | */ |
1121 | { | 1244 | u16 hdr_info = le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.hdr_info); |
1122 | return rx_desc->wb.lower.lo_dword.hs_rss.pkt_info; | 1245 | u16 hlen = (hdr_info & IXGBE_RXDADV_HDRBUFLEN_MASK) >> |
1123 | } | 1246 | IXGBE_RXDADV_HDRBUFLEN_SHIFT; |
1124 | 1247 | if (hlen > IXGBE_RX_HDR_SIZE) | |
1125 | static inline u32 ixgbe_get_rsc_count(union ixgbe_adv_rx_desc *rx_desc) | 1248 | hlen = IXGBE_RX_HDR_SIZE; |
1126 | { | 1249 | return hlen; |
1127 | return (le32_to_cpu(rx_desc->wb.lower.lo_dword.data) & | ||
1128 | IXGBE_RXDADV_RSCCNT_MASK) >> | ||
1129 | IXGBE_RXDADV_RSCCNT_SHIFT; | ||
1130 | } | 1250 | } |
1131 | 1251 | ||
1132 | /** | 1252 | /** |
1133 | * ixgbe_transform_rsc_queue - change rsc queue into a full packet | 1253 | * ixgbe_transform_rsc_queue - change rsc queue into a full packet |
1134 | * @skb: pointer to the last skb in the rsc queue | 1254 | * @skb: pointer to the last skb in the rsc queue |
1135 | * @count: pointer to number of packets coalesced in this context | ||
1136 | * | 1255 | * |
1137 | * This function changes a queue full of hw rsc buffers into a completed | 1256 | * This function changes a queue full of hw rsc buffers into a completed |
1138 | * packet. It uses the ->prev pointers to find the first packet and then | 1257 | * packet. It uses the ->prev pointers to find the first packet and then |
1139 | * turns it into the frag list owner. | 1258 | * turns it into the frag list owner. |
1140 | **/ | 1259 | **/ |
1141 | static inline struct sk_buff *ixgbe_transform_rsc_queue(struct sk_buff *skb, | 1260 | static inline struct sk_buff *ixgbe_transform_rsc_queue(struct sk_buff *skb) |
1142 | u64 *count) | ||
1143 | { | 1261 | { |
1144 | unsigned int frag_list_size = 0; | 1262 | unsigned int frag_list_size = 0; |
1263 | unsigned int skb_cnt = 1; | ||
1145 | 1264 | ||
1146 | while (skb->prev) { | 1265 | while (skb->prev) { |
1147 | struct sk_buff *prev = skb->prev; | 1266 | struct sk_buff *prev = skb->prev; |
1148 | frag_list_size += skb->len; | 1267 | frag_list_size += skb->len; |
1149 | skb->prev = NULL; | 1268 | skb->prev = NULL; |
1150 | skb = prev; | 1269 | skb = prev; |
1151 | *count += 1; | 1270 | skb_cnt++; |
1152 | } | 1271 | } |
1153 | 1272 | ||
1154 | skb_shinfo(skb)->frag_list = skb->next; | 1273 | skb_shinfo(skb)->frag_list = skb->next; |
@@ -1156,68 +1275,59 @@ static inline struct sk_buff *ixgbe_transform_rsc_queue(struct sk_buff *skb, | |||
1156 | skb->len += frag_list_size; | 1275 | skb->len += frag_list_size; |
1157 | skb->data_len += frag_list_size; | 1276 | skb->data_len += frag_list_size; |
1158 | skb->truesize += frag_list_size; | 1277 | skb->truesize += frag_list_size; |
1278 | IXGBE_RSC_CB(skb)->skb_cnt = skb_cnt; | ||
1279 | |||
1159 | return skb; | 1280 | return skb; |
1160 | } | 1281 | } |
1161 | 1282 | ||
1162 | struct ixgbe_rsc_cb { | 1283 | static inline bool ixgbe_get_rsc_state(union ixgbe_adv_rx_desc *rx_desc) |
1163 | dma_addr_t dma; | 1284 | { |
1164 | bool delay_unmap; | 1285 | return !!(le32_to_cpu(rx_desc->wb.lower.lo_dword.data) & |
1165 | }; | 1286 | IXGBE_RXDADV_RSCCNT_MASK); |
1166 | 1287 | } | |
1167 | #define IXGBE_RSC_CB(skb) ((struct ixgbe_rsc_cb *)(skb)->cb) | ||
1168 | 1288 | ||
1169 | static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, | 1289 | static void ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, |
1170 | struct ixgbe_ring *rx_ring, | 1290 | struct ixgbe_ring *rx_ring, |
1171 | int *work_done, int work_to_do) | 1291 | int *work_done, int work_to_do) |
1172 | { | 1292 | { |
1173 | struct ixgbe_adapter *adapter = q_vector->adapter; | 1293 | struct ixgbe_adapter *adapter = q_vector->adapter; |
1174 | struct pci_dev *pdev = adapter->pdev; | ||
1175 | union ixgbe_adv_rx_desc *rx_desc, *next_rxd; | 1294 | union ixgbe_adv_rx_desc *rx_desc, *next_rxd; |
1176 | struct ixgbe_rx_buffer *rx_buffer_info, *next_buffer; | 1295 | struct ixgbe_rx_buffer *rx_buffer_info, *next_buffer; |
1177 | struct sk_buff *skb; | 1296 | struct sk_buff *skb; |
1178 | unsigned int i, rsc_count = 0; | ||
1179 | u32 len, staterr; | ||
1180 | u16 hdr_info; | ||
1181 | bool cleaned = false; | ||
1182 | int cleaned_count = 0; | ||
1183 | unsigned int total_rx_bytes = 0, total_rx_packets = 0; | 1297 | unsigned int total_rx_bytes = 0, total_rx_packets = 0; |
1298 | const int current_node = numa_node_id(); | ||
1184 | #ifdef IXGBE_FCOE | 1299 | #ifdef IXGBE_FCOE |
1185 | int ddp_bytes = 0; | 1300 | int ddp_bytes = 0; |
1186 | #endif /* IXGBE_FCOE */ | 1301 | #endif /* IXGBE_FCOE */ |
1302 | u32 staterr; | ||
1303 | u16 i; | ||
1304 | u16 cleaned_count = 0; | ||
1305 | bool pkt_is_rsc = false; | ||
1187 | 1306 | ||
1188 | i = rx_ring->next_to_clean; | 1307 | i = rx_ring->next_to_clean; |
1189 | rx_desc = IXGBE_RX_DESC_ADV(rx_ring, i); | 1308 | rx_desc = IXGBE_RX_DESC_ADV(rx_ring, i); |
1190 | staterr = le32_to_cpu(rx_desc->wb.upper.status_error); | 1309 | staterr = le32_to_cpu(rx_desc->wb.upper.status_error); |
1191 | rx_buffer_info = &rx_ring->rx_buffer_info[i]; | ||
1192 | 1310 | ||
1193 | while (staterr & IXGBE_RXD_STAT_DD) { | 1311 | while (staterr & IXGBE_RXD_STAT_DD) { |
1194 | u32 upper_len = 0; | 1312 | u32 upper_len = 0; |
1195 | if (*work_done >= work_to_do) | ||
1196 | break; | ||
1197 | (*work_done)++; | ||
1198 | 1313 | ||
1199 | rmb(); /* read descriptor and rx_buffer_info after status DD */ | 1314 | rmb(); /* read descriptor and rx_buffer_info after status DD */ |
1200 | if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) { | ||
1201 | hdr_info = le16_to_cpu(ixgbe_get_hdr_info(rx_desc)); | ||
1202 | len = (hdr_info & IXGBE_RXDADV_HDRBUFLEN_MASK) >> | ||
1203 | IXGBE_RXDADV_HDRBUFLEN_SHIFT; | ||
1204 | upper_len = le16_to_cpu(rx_desc->wb.upper.length); | ||
1205 | if ((len > IXGBE_RX_HDR_SIZE) || | ||
1206 | (upper_len && !(hdr_info & IXGBE_RXDADV_SPH))) | ||
1207 | len = IXGBE_RX_HDR_SIZE; | ||
1208 | } else { | ||
1209 | len = le16_to_cpu(rx_desc->wb.upper.length); | ||
1210 | } | ||
1211 | 1315 | ||
1212 | cleaned = true; | 1316 | rx_buffer_info = &rx_ring->rx_buffer_info[i]; |
1317 | |||
1213 | skb = rx_buffer_info->skb; | 1318 | skb = rx_buffer_info->skb; |
1214 | prefetch(skb->data); | ||
1215 | rx_buffer_info->skb = NULL; | 1319 | rx_buffer_info->skb = NULL; |
1320 | prefetch(skb->data); | ||
1321 | |||
1322 | if (ring_is_rsc_enabled(rx_ring)) | ||
1323 | pkt_is_rsc = ixgbe_get_rsc_state(rx_desc); | ||
1216 | 1324 | ||
1325 | /* if this is a skb from previous receive DMA will be 0 */ | ||
1217 | if (rx_buffer_info->dma) { | 1326 | if (rx_buffer_info->dma) { |
1218 | if ((adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) && | 1327 | u16 hlen; |
1219 | (!(staterr & IXGBE_RXD_STAT_EOP)) && | 1328 | if (pkt_is_rsc && |
1220 | (!(skb->prev))) { | 1329 | !(staterr & IXGBE_RXD_STAT_EOP) && |
1330 | !skb->prev) { | ||
1221 | /* | 1331 | /* |
1222 | * When HWRSC is enabled, delay unmapping | 1332 | * When HWRSC is enabled, delay unmapping |
1223 | * of the first packet. It carries the | 1333 | * of the first packet. It carries the |
@@ -1228,29 +1338,42 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, | |||
1228 | IXGBE_RSC_CB(skb)->delay_unmap = true; | 1338 | IXGBE_RSC_CB(skb)->delay_unmap = true; |
1229 | IXGBE_RSC_CB(skb)->dma = rx_buffer_info->dma; | 1339 | IXGBE_RSC_CB(skb)->dma = rx_buffer_info->dma; |
1230 | } else { | 1340 | } else { |
1231 | dma_unmap_single(&pdev->dev, | 1341 | dma_unmap_single(rx_ring->dev, |
1232 | rx_buffer_info->dma, | 1342 | rx_buffer_info->dma, |
1233 | rx_ring->rx_buf_len, | 1343 | rx_ring->rx_buf_len, |
1234 | DMA_FROM_DEVICE); | 1344 | DMA_FROM_DEVICE); |
1235 | } | 1345 | } |
1236 | rx_buffer_info->dma = 0; | 1346 | rx_buffer_info->dma = 0; |
1237 | skb_put(skb, len); | 1347 | |
1348 | if (ring_is_ps_enabled(rx_ring)) { | ||
1349 | hlen = ixgbe_get_hlen(rx_desc); | ||
1350 | upper_len = le16_to_cpu(rx_desc->wb.upper.length); | ||
1351 | } else { | ||
1352 | hlen = le16_to_cpu(rx_desc->wb.upper.length); | ||
1353 | } | ||
1354 | |||
1355 | skb_put(skb, hlen); | ||
1356 | } else { | ||
1357 | /* assume packet split since header is unmapped */ | ||
1358 | upper_len = le16_to_cpu(rx_desc->wb.upper.length); | ||
1238 | } | 1359 | } |
1239 | 1360 | ||
1240 | if (upper_len) { | 1361 | if (upper_len) { |
1241 | dma_unmap_page(&pdev->dev, rx_buffer_info->page_dma, | 1362 | dma_unmap_page(rx_ring->dev, |
1242 | PAGE_SIZE / 2, DMA_FROM_DEVICE); | 1363 | rx_buffer_info->page_dma, |
1364 | PAGE_SIZE / 2, | ||
1365 | DMA_FROM_DEVICE); | ||
1243 | rx_buffer_info->page_dma = 0; | 1366 | rx_buffer_info->page_dma = 0; |
1244 | skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, | 1367 | skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, |
1245 | rx_buffer_info->page, | 1368 | rx_buffer_info->page, |
1246 | rx_buffer_info->page_offset, | 1369 | rx_buffer_info->page_offset, |
1247 | upper_len); | 1370 | upper_len); |
1248 | 1371 | ||
1249 | if ((rx_ring->rx_buf_len > (PAGE_SIZE / 2)) || | 1372 | if ((page_count(rx_buffer_info->page) == 1) && |
1250 | (page_count(rx_buffer_info->page) != 1)) | 1373 | (page_to_nid(rx_buffer_info->page) == current_node)) |
1251 | rx_buffer_info->page = NULL; | ||
1252 | else | ||
1253 | get_page(rx_buffer_info->page); | 1374 | get_page(rx_buffer_info->page); |
1375 | else | ||
1376 | rx_buffer_info->page = NULL; | ||
1254 | 1377 | ||
1255 | skb->len += upper_len; | 1378 | skb->len += upper_len; |
1256 | skb->data_len += upper_len; | 1379 | skb->data_len += upper_len; |
@@ -1265,10 +1388,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, | |||
1265 | prefetch(next_rxd); | 1388 | prefetch(next_rxd); |
1266 | cleaned_count++; | 1389 | cleaned_count++; |
1267 | 1390 | ||
1268 | if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) | 1391 | if (pkt_is_rsc) { |
1269 | rsc_count = ixgbe_get_rsc_count(rx_desc); | ||
1270 | |||
1271 | if (rsc_count) { | ||
1272 | u32 nextp = (staterr & IXGBE_RXDADV_NEXTP_MASK) >> | 1392 | u32 nextp = (staterr & IXGBE_RXDADV_NEXTP_MASK) >> |
1273 | IXGBE_RXDADV_NEXTP_SHIFT; | 1393 | IXGBE_RXDADV_NEXTP_SHIFT; |
1274 | next_buffer = &rx_ring->rx_buffer_info[nextp]; | 1394 | next_buffer = &rx_ring->rx_buffer_info[nextp]; |
@@ -1276,32 +1396,8 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, | |||
1276 | next_buffer = &rx_ring->rx_buffer_info[i]; | 1396 | next_buffer = &rx_ring->rx_buffer_info[i]; |
1277 | } | 1397 | } |
1278 | 1398 | ||
1279 | if (staterr & IXGBE_RXD_STAT_EOP) { | 1399 | if (!(staterr & IXGBE_RXD_STAT_EOP)) { |
1280 | if (skb->prev) | 1400 | if (ring_is_ps_enabled(rx_ring)) { |
1281 | skb = ixgbe_transform_rsc_queue(skb, | ||
1282 | &(rx_ring->rsc_count)); | ||
1283 | if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) { | ||
1284 | if (IXGBE_RSC_CB(skb)->delay_unmap) { | ||
1285 | dma_unmap_single(&pdev->dev, | ||
1286 | IXGBE_RSC_CB(skb)->dma, | ||
1287 | rx_ring->rx_buf_len, | ||
1288 | DMA_FROM_DEVICE); | ||
1289 | IXGBE_RSC_CB(skb)->dma = 0; | ||
1290 | IXGBE_RSC_CB(skb)->delay_unmap = false; | ||
1291 | } | ||
1292 | if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) | ||
1293 | rx_ring->rsc_count += | ||
1294 | skb_shinfo(skb)->nr_frags; | ||
1295 | else | ||
1296 | rx_ring->rsc_count++; | ||
1297 | rx_ring->rsc_flush++; | ||
1298 | } | ||
1299 | u64_stats_update_begin(&rx_ring->syncp); | ||
1300 | rx_ring->stats.packets++; | ||
1301 | rx_ring->stats.bytes += skb->len; | ||
1302 | u64_stats_update_end(&rx_ring->syncp); | ||
1303 | } else { | ||
1304 | if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) { | ||
1305 | rx_buffer_info->skb = next_buffer->skb; | 1401 | rx_buffer_info->skb = next_buffer->skb; |
1306 | rx_buffer_info->dma = next_buffer->dma; | 1402 | rx_buffer_info->dma = next_buffer->dma; |
1307 | next_buffer->skb = skb; | 1403 | next_buffer->skb = skb; |
@@ -1310,12 +1406,45 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, | |||
1310 | skb->next = next_buffer->skb; | 1406 | skb->next = next_buffer->skb; |
1311 | skb->next->prev = skb; | 1407 | skb->next->prev = skb; |
1312 | } | 1408 | } |
1313 | rx_ring->non_eop_descs++; | 1409 | rx_ring->rx_stats.non_eop_descs++; |
1314 | goto next_desc; | 1410 | goto next_desc; |
1315 | } | 1411 | } |
1316 | 1412 | ||
1413 | if (skb->prev) { | ||
1414 | skb = ixgbe_transform_rsc_queue(skb); | ||
1415 | /* if we got here without RSC the packet is invalid */ | ||
1416 | if (!pkt_is_rsc) { | ||
1417 | __pskb_trim(skb, 0); | ||
1418 | rx_buffer_info->skb = skb; | ||
1419 | goto next_desc; | ||
1420 | } | ||
1421 | } | ||
1422 | |||
1423 | if (ring_is_rsc_enabled(rx_ring)) { | ||
1424 | if (IXGBE_RSC_CB(skb)->delay_unmap) { | ||
1425 | dma_unmap_single(rx_ring->dev, | ||
1426 | IXGBE_RSC_CB(skb)->dma, | ||
1427 | rx_ring->rx_buf_len, | ||
1428 | DMA_FROM_DEVICE); | ||
1429 | IXGBE_RSC_CB(skb)->dma = 0; | ||
1430 | IXGBE_RSC_CB(skb)->delay_unmap = false; | ||
1431 | } | ||
1432 | } | ||
1433 | if (pkt_is_rsc) { | ||
1434 | if (ring_is_ps_enabled(rx_ring)) | ||
1435 | rx_ring->rx_stats.rsc_count += | ||
1436 | skb_shinfo(skb)->nr_frags; | ||
1437 | else | ||
1438 | rx_ring->rx_stats.rsc_count += | ||
1439 | IXGBE_RSC_CB(skb)->skb_cnt; | ||
1440 | rx_ring->rx_stats.rsc_flush++; | ||
1441 | } | ||
1442 | |||
1443 | /* ERR_MASK will only have valid bits if EOP set */ | ||
1317 | if (staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK) { | 1444 | if (staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK) { |
1318 | dev_kfree_skb_irq(skb); | 1445 | /* trim packet back to size 0 and recycle it */ |
1446 | __pskb_trim(skb, 0); | ||
1447 | rx_buffer_info->skb = skb; | ||
1319 | goto next_desc; | 1448 | goto next_desc; |
1320 | } | 1449 | } |
1321 | 1450 | ||
@@ -1325,7 +1454,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, | |||
1325 | total_rx_bytes += skb->len; | 1454 | total_rx_bytes += skb->len; |
1326 | total_rx_packets++; | 1455 | total_rx_packets++; |
1327 | 1456 | ||
1328 | skb->protocol = eth_type_trans(skb, adapter->netdev); | 1457 | skb->protocol = eth_type_trans(skb, rx_ring->netdev); |
1329 | #ifdef IXGBE_FCOE | 1458 | #ifdef IXGBE_FCOE |
1330 | /* if ddp, not passing to ULD unless for FCP_RSP or error */ | 1459 | /* if ddp, not passing to ULD unless for FCP_RSP or error */ |
1331 | if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) { | 1460 | if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) { |
@@ -1339,16 +1468,18 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, | |||
1339 | next_desc: | 1468 | next_desc: |
1340 | rx_desc->wb.upper.status_error = 0; | 1469 | rx_desc->wb.upper.status_error = 0; |
1341 | 1470 | ||
1471 | (*work_done)++; | ||
1472 | if (*work_done >= work_to_do) | ||
1473 | break; | ||
1474 | |||
1342 | /* return some buffers to hardware, one at a time is too slow */ | 1475 | /* return some buffers to hardware, one at a time is too slow */ |
1343 | if (cleaned_count >= IXGBE_RX_BUFFER_WRITE) { | 1476 | if (cleaned_count >= IXGBE_RX_BUFFER_WRITE) { |
1344 | ixgbe_alloc_rx_buffers(adapter, rx_ring, cleaned_count); | 1477 | ixgbe_alloc_rx_buffers(rx_ring, cleaned_count); |
1345 | cleaned_count = 0; | 1478 | cleaned_count = 0; |
1346 | } | 1479 | } |
1347 | 1480 | ||
1348 | /* use prefetched values */ | 1481 | /* use prefetched values */ |
1349 | rx_desc = next_rxd; | 1482 | rx_desc = next_rxd; |
1350 | rx_buffer_info = &rx_ring->rx_buffer_info[i]; | ||
1351 | |||
1352 | staterr = le32_to_cpu(rx_desc->wb.upper.status_error); | 1483 | staterr = le32_to_cpu(rx_desc->wb.upper.status_error); |
1353 | } | 1484 | } |
1354 | 1485 | ||
@@ -1356,14 +1487,14 @@ next_desc: | |||
1356 | cleaned_count = IXGBE_DESC_UNUSED(rx_ring); | 1487 | cleaned_count = IXGBE_DESC_UNUSED(rx_ring); |
1357 | 1488 | ||
1358 | if (cleaned_count) | 1489 | if (cleaned_count) |
1359 | ixgbe_alloc_rx_buffers(adapter, rx_ring, cleaned_count); | 1490 | ixgbe_alloc_rx_buffers(rx_ring, cleaned_count); |
1360 | 1491 | ||
1361 | #ifdef IXGBE_FCOE | 1492 | #ifdef IXGBE_FCOE |
1362 | /* include DDPed FCoE data */ | 1493 | /* include DDPed FCoE data */ |
1363 | if (ddp_bytes > 0) { | 1494 | if (ddp_bytes > 0) { |
1364 | unsigned int mss; | 1495 | unsigned int mss; |
1365 | 1496 | ||
1366 | mss = adapter->netdev->mtu - sizeof(struct fcoe_hdr) - | 1497 | mss = rx_ring->netdev->mtu - sizeof(struct fcoe_hdr) - |
1367 | sizeof(struct fc_frame_header) - | 1498 | sizeof(struct fc_frame_header) - |
1368 | sizeof(struct fcoe_crc_eof); | 1499 | sizeof(struct fcoe_crc_eof); |
1369 | if (mss > 512) | 1500 | if (mss > 512) |
@@ -1375,8 +1506,10 @@ next_desc: | |||
1375 | 1506 | ||
1376 | rx_ring->total_packets += total_rx_packets; | 1507 | rx_ring->total_packets += total_rx_packets; |
1377 | rx_ring->total_bytes += total_rx_bytes; | 1508 | rx_ring->total_bytes += total_rx_bytes; |
1378 | 1509 | u64_stats_update_begin(&rx_ring->syncp); | |
1379 | return cleaned; | 1510 | rx_ring->stats.packets += total_rx_packets; |
1511 | rx_ring->stats.bytes += total_rx_bytes; | ||
1512 | u64_stats_update_end(&rx_ring->syncp); | ||
1380 | } | 1513 | } |
1381 | 1514 | ||
1382 | static int ixgbe_clean_rxonly(struct napi_struct *, int); | 1515 | static int ixgbe_clean_rxonly(struct napi_struct *, int); |
@@ -1390,7 +1523,7 @@ static int ixgbe_clean_rxonly(struct napi_struct *, int); | |||
1390 | static void ixgbe_configure_msix(struct ixgbe_adapter *adapter) | 1523 | static void ixgbe_configure_msix(struct ixgbe_adapter *adapter) |
1391 | { | 1524 | { |
1392 | struct ixgbe_q_vector *q_vector; | 1525 | struct ixgbe_q_vector *q_vector; |
1393 | int i, j, q_vectors, v_idx, r_idx; | 1526 | int i, q_vectors, v_idx, r_idx; |
1394 | u32 mask; | 1527 | u32 mask; |
1395 | 1528 | ||
1396 | q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; | 1529 | q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; |
@@ -1406,8 +1539,8 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter) | |||
1406 | adapter->num_rx_queues); | 1539 | adapter->num_rx_queues); |
1407 | 1540 | ||
1408 | for (i = 0; i < q_vector->rxr_count; i++) { | 1541 | for (i = 0; i < q_vector->rxr_count; i++) { |
1409 | j = adapter->rx_ring[r_idx]->reg_idx; | 1542 | u8 reg_idx = adapter->rx_ring[r_idx]->reg_idx; |
1410 | ixgbe_set_ivar(adapter, 0, j, v_idx); | 1543 | ixgbe_set_ivar(adapter, 0, reg_idx, v_idx); |
1411 | r_idx = find_next_bit(q_vector->rxr_idx, | 1544 | r_idx = find_next_bit(q_vector->rxr_idx, |
1412 | adapter->num_rx_queues, | 1545 | adapter->num_rx_queues, |
1413 | r_idx + 1); | 1546 | r_idx + 1); |
@@ -1416,8 +1549,8 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter) | |||
1416 | adapter->num_tx_queues); | 1549 | adapter->num_tx_queues); |
1417 | 1550 | ||
1418 | for (i = 0; i < q_vector->txr_count; i++) { | 1551 | for (i = 0; i < q_vector->txr_count; i++) { |
1419 | j = adapter->tx_ring[r_idx]->reg_idx; | 1552 | u8 reg_idx = adapter->tx_ring[r_idx]->reg_idx; |
1420 | ixgbe_set_ivar(adapter, 1, j, v_idx); | 1553 | ixgbe_set_ivar(adapter, 1, reg_idx, v_idx); |
1421 | r_idx = find_next_bit(q_vector->txr_idx, | 1554 | r_idx = find_next_bit(q_vector->txr_idx, |
1422 | adapter->num_tx_queues, | 1555 | adapter->num_tx_queues, |
1423 | r_idx + 1); | 1556 | r_idx + 1); |
@@ -1448,11 +1581,19 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter) | |||
1448 | } | 1581 | } |
1449 | } | 1582 | } |
1450 | 1583 | ||
1451 | if (adapter->hw.mac.type == ixgbe_mac_82598EB) | 1584 | switch (adapter->hw.mac.type) { |
1585 | case ixgbe_mac_82598EB: | ||
1452 | ixgbe_set_ivar(adapter, -1, IXGBE_IVAR_OTHER_CAUSES_INDEX, | 1586 | ixgbe_set_ivar(adapter, -1, IXGBE_IVAR_OTHER_CAUSES_INDEX, |
1453 | v_idx); | 1587 | v_idx); |
1454 | else if (adapter->hw.mac.type == ixgbe_mac_82599EB) | 1588 | break; |
1589 | case ixgbe_mac_82599EB: | ||
1590 | case ixgbe_mac_X540: | ||
1455 | ixgbe_set_ivar(adapter, -1, 1, v_idx); | 1591 | ixgbe_set_ivar(adapter, -1, 1, v_idx); |
1592 | break; | ||
1593 | |||
1594 | default: | ||
1595 | break; | ||
1596 | } | ||
1456 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(v_idx), 1950); | 1597 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(v_idx), 1950); |
1457 | 1598 | ||
1458 | /* set up to autoclear timer, and the vectors */ | 1599 | /* set up to autoclear timer, and the vectors */ |
@@ -1548,12 +1689,15 @@ void ixgbe_write_eitr(struct ixgbe_q_vector *q_vector) | |||
1548 | int v_idx = q_vector->v_idx; | 1689 | int v_idx = q_vector->v_idx; |
1549 | u32 itr_reg = EITR_INTS_PER_SEC_TO_REG(q_vector->eitr); | 1690 | u32 itr_reg = EITR_INTS_PER_SEC_TO_REG(q_vector->eitr); |
1550 | 1691 | ||
1551 | if (adapter->hw.mac.type == ixgbe_mac_82598EB) { | 1692 | switch (adapter->hw.mac.type) { |
1693 | case ixgbe_mac_82598EB: | ||
1552 | /* must write high and low 16 bits to reset counter */ | 1694 | /* must write high and low 16 bits to reset counter */ |
1553 | itr_reg |= (itr_reg << 16); | 1695 | itr_reg |= (itr_reg << 16); |
1554 | } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) { | 1696 | break; |
1697 | case ixgbe_mac_82599EB: | ||
1698 | case ixgbe_mac_X540: | ||
1555 | /* | 1699 | /* |
1556 | * 82599 can support a value of zero, so allow it for | 1700 | * 82599 and X540 can support a value of zero, so allow it for |
1557 | * max interrupt rate, but there is an errata where it can | 1701 | * max interrupt rate, but there is an errata where it can |
1558 | * not be zero with RSC | 1702 | * not be zero with RSC |
1559 | */ | 1703 | */ |
@@ -1566,6 +1710,9 @@ void ixgbe_write_eitr(struct ixgbe_q_vector *q_vector) | |||
1566 | * immediate assertion of the interrupt | 1710 | * immediate assertion of the interrupt |
1567 | */ | 1711 | */ |
1568 | itr_reg |= IXGBE_EITR_CNT_WDIS; | 1712 | itr_reg |= IXGBE_EITR_CNT_WDIS; |
1713 | break; | ||
1714 | default: | ||
1715 | break; | ||
1569 | } | 1716 | } |
1570 | IXGBE_WRITE_REG(hw, IXGBE_EITR(v_idx), itr_reg); | 1717 | IXGBE_WRITE_REG(hw, IXGBE_EITR(v_idx), itr_reg); |
1571 | } | 1718 | } |
@@ -1573,14 +1720,13 @@ void ixgbe_write_eitr(struct ixgbe_q_vector *q_vector) | |||
1573 | static void ixgbe_set_itr_msix(struct ixgbe_q_vector *q_vector) | 1720 | static void ixgbe_set_itr_msix(struct ixgbe_q_vector *q_vector) |
1574 | { | 1721 | { |
1575 | struct ixgbe_adapter *adapter = q_vector->adapter; | 1722 | struct ixgbe_adapter *adapter = q_vector->adapter; |
1723 | int i, r_idx; | ||
1576 | u32 new_itr; | 1724 | u32 new_itr; |
1577 | u8 current_itr, ret_itr; | 1725 | u8 current_itr, ret_itr; |
1578 | int i, r_idx; | ||
1579 | struct ixgbe_ring *rx_ring, *tx_ring; | ||
1580 | 1726 | ||
1581 | r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues); | 1727 | r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues); |
1582 | for (i = 0; i < q_vector->txr_count; i++) { | 1728 | for (i = 0; i < q_vector->txr_count; i++) { |
1583 | tx_ring = adapter->tx_ring[r_idx]; | 1729 | struct ixgbe_ring *tx_ring = adapter->tx_ring[r_idx]; |
1584 | ret_itr = ixgbe_update_itr(adapter, q_vector->eitr, | 1730 | ret_itr = ixgbe_update_itr(adapter, q_vector->eitr, |
1585 | q_vector->tx_itr, | 1731 | q_vector->tx_itr, |
1586 | tx_ring->total_packets, | 1732 | tx_ring->total_packets, |
@@ -1595,7 +1741,7 @@ static void ixgbe_set_itr_msix(struct ixgbe_q_vector *q_vector) | |||
1595 | 1741 | ||
1596 | r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); | 1742 | r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); |
1597 | for (i = 0; i < q_vector->rxr_count; i++) { | 1743 | for (i = 0; i < q_vector->rxr_count; i++) { |
1598 | rx_ring = adapter->rx_ring[r_idx]; | 1744 | struct ixgbe_ring *rx_ring = adapter->rx_ring[r_idx]; |
1599 | ret_itr = ixgbe_update_itr(adapter, q_vector->eitr, | 1745 | ret_itr = ixgbe_update_itr(adapter, q_vector->eitr, |
1600 | q_vector->rx_itr, | 1746 | q_vector->rx_itr, |
1601 | rx_ring->total_packets, | 1747 | rx_ring->total_packets, |
@@ -1626,7 +1772,7 @@ static void ixgbe_set_itr_msix(struct ixgbe_q_vector *q_vector) | |||
1626 | 1772 | ||
1627 | if (new_itr != q_vector->eitr) { | 1773 | if (new_itr != q_vector->eitr) { |
1628 | /* do an exponential smoothing */ | 1774 | /* do an exponential smoothing */ |
1629 | new_itr = ((q_vector->eitr * 90)/100) + ((new_itr * 10)/100); | 1775 | new_itr = ((q_vector->eitr * 9) + new_itr)/10; |
1630 | 1776 | ||
1631 | /* save the algorithm value here, not the smoothed one */ | 1777 | /* save the algorithm value here, not the smoothed one */ |
1632 | q_vector->eitr = new_itr; | 1778 | q_vector->eitr = new_itr; |
@@ -1694,17 +1840,18 @@ static void ixgbe_check_sfp_event(struct ixgbe_adapter *adapter, u32 eicr) | |||
1694 | { | 1840 | { |
1695 | struct ixgbe_hw *hw = &adapter->hw; | 1841 | struct ixgbe_hw *hw = &adapter->hw; |
1696 | 1842 | ||
1843 | if (eicr & IXGBE_EICR_GPI_SDP2) { | ||
1844 | /* Clear the interrupt */ | ||
1845 | IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP2); | ||
1846 | if (!test_bit(__IXGBE_DOWN, &adapter->state)) | ||
1847 | schedule_work(&adapter->sfp_config_module_task); | ||
1848 | } | ||
1849 | |||
1697 | if (eicr & IXGBE_EICR_GPI_SDP1) { | 1850 | if (eicr & IXGBE_EICR_GPI_SDP1) { |
1698 | /* Clear the interrupt */ | 1851 | /* Clear the interrupt */ |
1699 | IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1); | 1852 | IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1); |
1700 | schedule_work(&adapter->multispeed_fiber_task); | 1853 | if (!test_bit(__IXGBE_DOWN, &adapter->state)) |
1701 | } else if (eicr & IXGBE_EICR_GPI_SDP2) { | 1854 | schedule_work(&adapter->multispeed_fiber_task); |
1702 | /* Clear the interrupt */ | ||
1703 | IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP2); | ||
1704 | schedule_work(&adapter->sfp_config_module_task); | ||
1705 | } else { | ||
1706 | /* Interrupt isn't for us... */ | ||
1707 | return; | ||
1708 | } | 1855 | } |
1709 | } | 1856 | } |
1710 | 1857 | ||
@@ -1744,16 +1891,9 @@ static irqreturn_t ixgbe_msix_lsc(int irq, void *data) | |||
1744 | if (eicr & IXGBE_EICR_MAILBOX) | 1891 | if (eicr & IXGBE_EICR_MAILBOX) |
1745 | ixgbe_msg_task(adapter); | 1892 | ixgbe_msg_task(adapter); |
1746 | 1893 | ||
1747 | if (hw->mac.type == ixgbe_mac_82598EB) | 1894 | switch (hw->mac.type) { |
1748 | ixgbe_check_fan_failure(adapter, eicr); | 1895 | case ixgbe_mac_82599EB: |
1749 | 1896 | case ixgbe_mac_X540: | |
1750 | if (hw->mac.type == ixgbe_mac_82599EB) { | ||
1751 | ixgbe_check_sfp_event(adapter, eicr); | ||
1752 | adapter->interrupt_event = eicr; | ||
1753 | if ((adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) && | ||
1754 | ((eicr & IXGBE_EICR_GPI_SDP0) || (eicr & IXGBE_EICR_LSC))) | ||
1755 | schedule_work(&adapter->check_overtemp_task); | ||
1756 | |||
1757 | /* Handle Flow Director Full threshold interrupt */ | 1897 | /* Handle Flow Director Full threshold interrupt */ |
1758 | if (eicr & IXGBE_EICR_FLOW_DIR) { | 1898 | if (eicr & IXGBE_EICR_FLOW_DIR) { |
1759 | int i; | 1899 | int i; |
@@ -1763,12 +1903,24 @@ static irqreturn_t ixgbe_msix_lsc(int irq, void *data) | |||
1763 | for (i = 0; i < adapter->num_tx_queues; i++) { | 1903 | for (i = 0; i < adapter->num_tx_queues; i++) { |
1764 | struct ixgbe_ring *tx_ring = | 1904 | struct ixgbe_ring *tx_ring = |
1765 | adapter->tx_ring[i]; | 1905 | adapter->tx_ring[i]; |
1766 | if (test_and_clear_bit(__IXGBE_FDIR_INIT_DONE, | 1906 | if (test_and_clear_bit(__IXGBE_TX_FDIR_INIT_DONE, |
1767 | &tx_ring->reinit_state)) | 1907 | &tx_ring->state)) |
1768 | schedule_work(&adapter->fdir_reinit_task); | 1908 | schedule_work(&adapter->fdir_reinit_task); |
1769 | } | 1909 | } |
1770 | } | 1910 | } |
1911 | ixgbe_check_sfp_event(adapter, eicr); | ||
1912 | if ((adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) && | ||
1913 | ((eicr & IXGBE_EICR_GPI_SDP0) || (eicr & IXGBE_EICR_LSC))) { | ||
1914 | adapter->interrupt_event = eicr; | ||
1915 | schedule_work(&adapter->check_overtemp_task); | ||
1916 | } | ||
1917 | break; | ||
1918 | default: | ||
1919 | break; | ||
1771 | } | 1920 | } |
1921 | |||
1922 | ixgbe_check_fan_failure(adapter, eicr); | ||
1923 | |||
1772 | if (!test_bit(__IXGBE_DOWN, &adapter->state)) | 1924 | if (!test_bit(__IXGBE_DOWN, &adapter->state)) |
1773 | IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER); | 1925 | IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER); |
1774 | 1926 | ||
@@ -1779,15 +1931,24 @@ static inline void ixgbe_irq_enable_queues(struct ixgbe_adapter *adapter, | |||
1779 | u64 qmask) | 1931 | u64 qmask) |
1780 | { | 1932 | { |
1781 | u32 mask; | 1933 | u32 mask; |
1934 | struct ixgbe_hw *hw = &adapter->hw; | ||
1782 | 1935 | ||
1783 | if (adapter->hw.mac.type == ixgbe_mac_82598EB) { | 1936 | switch (hw->mac.type) { |
1937 | case ixgbe_mac_82598EB: | ||
1784 | mask = (IXGBE_EIMS_RTX_QUEUE & qmask); | 1938 | mask = (IXGBE_EIMS_RTX_QUEUE & qmask); |
1785 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask); | 1939 | IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask); |
1786 | } else { | 1940 | break; |
1941 | case ixgbe_mac_82599EB: | ||
1942 | case ixgbe_mac_X540: | ||
1787 | mask = (qmask & 0xFFFFFFFF); | 1943 | mask = (qmask & 0xFFFFFFFF); |
1788 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS_EX(0), mask); | 1944 | if (mask) |
1945 | IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask); | ||
1789 | mask = (qmask >> 32); | 1946 | mask = (qmask >> 32); |
1790 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS_EX(1), mask); | 1947 | if (mask) |
1948 | IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask); | ||
1949 | break; | ||
1950 | default: | ||
1951 | break; | ||
1791 | } | 1952 | } |
1792 | /* skip the flush */ | 1953 | /* skip the flush */ |
1793 | } | 1954 | } |
@@ -1796,15 +1957,24 @@ static inline void ixgbe_irq_disable_queues(struct ixgbe_adapter *adapter, | |||
1796 | u64 qmask) | 1957 | u64 qmask) |
1797 | { | 1958 | { |
1798 | u32 mask; | 1959 | u32 mask; |
1960 | struct ixgbe_hw *hw = &adapter->hw; | ||
1799 | 1961 | ||
1800 | if (adapter->hw.mac.type == ixgbe_mac_82598EB) { | 1962 | switch (hw->mac.type) { |
1963 | case ixgbe_mac_82598EB: | ||
1801 | mask = (IXGBE_EIMS_RTX_QUEUE & qmask); | 1964 | mask = (IXGBE_EIMS_RTX_QUEUE & qmask); |
1802 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, mask); | 1965 | IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask); |
1803 | } else { | 1966 | break; |
1967 | case ixgbe_mac_82599EB: | ||
1968 | case ixgbe_mac_X540: | ||
1804 | mask = (qmask & 0xFFFFFFFF); | 1969 | mask = (qmask & 0xFFFFFFFF); |
1805 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), mask); | 1970 | if (mask) |
1971 | IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask); | ||
1806 | mask = (qmask >> 32); | 1972 | mask = (qmask >> 32); |
1807 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), mask); | 1973 | if (mask) |
1974 | IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask); | ||
1975 | break; | ||
1976 | default: | ||
1977 | break; | ||
1808 | } | 1978 | } |
1809 | /* skip the flush */ | 1979 | /* skip the flush */ |
1810 | } | 1980 | } |
@@ -1847,8 +2017,13 @@ static irqreturn_t ixgbe_msix_clean_rx(int irq, void *data) | |||
1847 | int r_idx; | 2017 | int r_idx; |
1848 | int i; | 2018 | int i; |
1849 | 2019 | ||
2020 | #ifdef CONFIG_IXGBE_DCA | ||
2021 | if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) | ||
2022 | ixgbe_update_dca(q_vector); | ||
2023 | #endif | ||
2024 | |||
1850 | r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); | 2025 | r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); |
1851 | for (i = 0; i < q_vector->rxr_count; i++) { | 2026 | for (i = 0; i < q_vector->rxr_count; i++) { |
1852 | rx_ring = adapter->rx_ring[r_idx]; | 2027 | rx_ring = adapter->rx_ring[r_idx]; |
1853 | rx_ring->total_bytes = 0; | 2028 | rx_ring->total_bytes = 0; |
1854 | rx_ring->total_packets = 0; | 2029 | rx_ring->total_packets = 0; |
@@ -1859,7 +2034,6 @@ static irqreturn_t ixgbe_msix_clean_rx(int irq, void *data) | |||
1859 | if (!q_vector->rxr_count) | 2034 | if (!q_vector->rxr_count) |
1860 | return IRQ_HANDLED; | 2035 | return IRQ_HANDLED; |
1861 | 2036 | ||
1862 | /* disable interrupts on this vector only */ | ||
1863 | /* EIAM disabled interrupts (on this vector) for us */ | 2037 | /* EIAM disabled interrupts (on this vector) for us */ |
1864 | napi_schedule(&q_vector->napi); | 2038 | napi_schedule(&q_vector->napi); |
1865 | 2039 | ||
@@ -1918,13 +2092,14 @@ static int ixgbe_clean_rxonly(struct napi_struct *napi, int budget) | |||
1918 | int work_done = 0; | 2092 | int work_done = 0; |
1919 | long r_idx; | 2093 | long r_idx; |
1920 | 2094 | ||
1921 | r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); | ||
1922 | rx_ring = adapter->rx_ring[r_idx]; | ||
1923 | #ifdef CONFIG_IXGBE_DCA | 2095 | #ifdef CONFIG_IXGBE_DCA |
1924 | if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) | 2096 | if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) |
1925 | ixgbe_update_rx_dca(adapter, rx_ring); | 2097 | ixgbe_update_dca(q_vector); |
1926 | #endif | 2098 | #endif |
1927 | 2099 | ||
2100 | r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); | ||
2101 | rx_ring = adapter->rx_ring[r_idx]; | ||
2102 | |||
1928 | ixgbe_clean_rx_irq(q_vector, rx_ring, &work_done, budget); | 2103 | ixgbe_clean_rx_irq(q_vector, rx_ring, &work_done, budget); |
1929 | 2104 | ||
1930 | /* If all Rx work done, exit the polling mode */ | 2105 | /* If all Rx work done, exit the polling mode */ |
@@ -1958,13 +2133,14 @@ static int ixgbe_clean_rxtx_many(struct napi_struct *napi, int budget) | |||
1958 | long r_idx; | 2133 | long r_idx; |
1959 | bool tx_clean_complete = true; | 2134 | bool tx_clean_complete = true; |
1960 | 2135 | ||
2136 | #ifdef CONFIG_IXGBE_DCA | ||
2137 | if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) | ||
2138 | ixgbe_update_dca(q_vector); | ||
2139 | #endif | ||
2140 | |||
1961 | r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues); | 2141 | r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues); |
1962 | for (i = 0; i < q_vector->txr_count; i++) { | 2142 | for (i = 0; i < q_vector->txr_count; i++) { |
1963 | ring = adapter->tx_ring[r_idx]; | 2143 | ring = adapter->tx_ring[r_idx]; |
1964 | #ifdef CONFIG_IXGBE_DCA | ||
1965 | if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) | ||
1966 | ixgbe_update_tx_dca(adapter, ring); | ||
1967 | #endif | ||
1968 | tx_clean_complete &= ixgbe_clean_tx_irq(q_vector, ring); | 2144 | tx_clean_complete &= ixgbe_clean_tx_irq(q_vector, ring); |
1969 | r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues, | 2145 | r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues, |
1970 | r_idx + 1); | 2146 | r_idx + 1); |
@@ -1977,10 +2153,6 @@ static int ixgbe_clean_rxtx_many(struct napi_struct *napi, int budget) | |||
1977 | r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); | 2153 | r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); |
1978 | for (i = 0; i < q_vector->rxr_count; i++) { | 2154 | for (i = 0; i < q_vector->rxr_count; i++) { |
1979 | ring = adapter->rx_ring[r_idx]; | 2155 | ring = adapter->rx_ring[r_idx]; |
1980 | #ifdef CONFIG_IXGBE_DCA | ||
1981 | if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) | ||
1982 | ixgbe_update_rx_dca(adapter, ring); | ||
1983 | #endif | ||
1984 | ixgbe_clean_rx_irq(q_vector, ring, &work_done, budget); | 2156 | ixgbe_clean_rx_irq(q_vector, ring, &work_done, budget); |
1985 | r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues, | 2157 | r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues, |
1986 | r_idx + 1); | 2158 | r_idx + 1); |
@@ -2019,13 +2191,14 @@ static int ixgbe_clean_txonly(struct napi_struct *napi, int budget) | |||
2019 | int work_done = 0; | 2191 | int work_done = 0; |
2020 | long r_idx; | 2192 | long r_idx; |
2021 | 2193 | ||
2022 | r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues); | ||
2023 | tx_ring = adapter->tx_ring[r_idx]; | ||
2024 | #ifdef CONFIG_IXGBE_DCA | 2194 | #ifdef CONFIG_IXGBE_DCA |
2025 | if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) | 2195 | if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) |
2026 | ixgbe_update_tx_dca(adapter, tx_ring); | 2196 | ixgbe_update_dca(q_vector); |
2027 | #endif | 2197 | #endif |
2028 | 2198 | ||
2199 | r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues); | ||
2200 | tx_ring = adapter->tx_ring[r_idx]; | ||
2201 | |||
2029 | if (!ixgbe_clean_tx_irq(q_vector, tx_ring)) | 2202 | if (!ixgbe_clean_tx_irq(q_vector, tx_ring)) |
2030 | work_done = budget; | 2203 | work_done = budget; |
2031 | 2204 | ||
@@ -2046,24 +2219,27 @@ static inline void map_vector_to_rxq(struct ixgbe_adapter *a, int v_idx, | |||
2046 | int r_idx) | 2219 | int r_idx) |
2047 | { | 2220 | { |
2048 | struct ixgbe_q_vector *q_vector = a->q_vector[v_idx]; | 2221 | struct ixgbe_q_vector *q_vector = a->q_vector[v_idx]; |
2222 | struct ixgbe_ring *rx_ring = a->rx_ring[r_idx]; | ||
2049 | 2223 | ||
2050 | set_bit(r_idx, q_vector->rxr_idx); | 2224 | set_bit(r_idx, q_vector->rxr_idx); |
2051 | q_vector->rxr_count++; | 2225 | q_vector->rxr_count++; |
2226 | rx_ring->q_vector = q_vector; | ||
2052 | } | 2227 | } |
2053 | 2228 | ||
2054 | static inline void map_vector_to_txq(struct ixgbe_adapter *a, int v_idx, | 2229 | static inline void map_vector_to_txq(struct ixgbe_adapter *a, int v_idx, |
2055 | int t_idx) | 2230 | int t_idx) |
2056 | { | 2231 | { |
2057 | struct ixgbe_q_vector *q_vector = a->q_vector[v_idx]; | 2232 | struct ixgbe_q_vector *q_vector = a->q_vector[v_idx]; |
2233 | struct ixgbe_ring *tx_ring = a->tx_ring[t_idx]; | ||
2058 | 2234 | ||
2059 | set_bit(t_idx, q_vector->txr_idx); | 2235 | set_bit(t_idx, q_vector->txr_idx); |
2060 | q_vector->txr_count++; | 2236 | q_vector->txr_count++; |
2237 | tx_ring->q_vector = q_vector; | ||
2061 | } | 2238 | } |
2062 | 2239 | ||
2063 | /** | 2240 | /** |
2064 | * ixgbe_map_rings_to_vectors - Maps descriptor rings to vectors | 2241 | * ixgbe_map_rings_to_vectors - Maps descriptor rings to vectors |
2065 | * @adapter: board private structure to initialize | 2242 | * @adapter: board private structure to initialize |
2066 | * @vectors: allotted vector count for descriptor rings | ||
2067 | * | 2243 | * |
2068 | * This function maps descriptor rings to the queue-specific vectors | 2244 | * This function maps descriptor rings to the queue-specific vectors |
2069 | * we were allotted through the MSI-X enabling code. Ideally, we'd have | 2245 | * we were allotted through the MSI-X enabling code. Ideally, we'd have |
@@ -2071,9 +2247,9 @@ static inline void map_vector_to_txq(struct ixgbe_adapter *a, int v_idx, | |||
2071 | * group the rings as "efficiently" as possible. You would add new | 2247 | * group the rings as "efficiently" as possible. You would add new |
2072 | * mapping configurations in here. | 2248 | * mapping configurations in here. |
2073 | **/ | 2249 | **/ |
2074 | static int ixgbe_map_rings_to_vectors(struct ixgbe_adapter *adapter, | 2250 | static int ixgbe_map_rings_to_vectors(struct ixgbe_adapter *adapter) |
2075 | int vectors) | ||
2076 | { | 2251 | { |
2252 | int q_vectors; | ||
2077 | int v_start = 0; | 2253 | int v_start = 0; |
2078 | int rxr_idx = 0, txr_idx = 0; | 2254 | int rxr_idx = 0, txr_idx = 0; |
2079 | int rxr_remaining = adapter->num_rx_queues; | 2255 | int rxr_remaining = adapter->num_rx_queues; |
@@ -2086,11 +2262,13 @@ static int ixgbe_map_rings_to_vectors(struct ixgbe_adapter *adapter, | |||
2086 | if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) | 2262 | if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) |
2087 | goto out; | 2263 | goto out; |
2088 | 2264 | ||
2265 | q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; | ||
2266 | |||
2089 | /* | 2267 | /* |
2090 | * The ideal configuration... | 2268 | * The ideal configuration... |
2091 | * We have enough vectors to map one per queue. | 2269 | * We have enough vectors to map one per queue. |
2092 | */ | 2270 | */ |
2093 | if (vectors == adapter->num_rx_queues + adapter->num_tx_queues) { | 2271 | if (q_vectors == adapter->num_rx_queues + adapter->num_tx_queues) { |
2094 | for (; rxr_idx < rxr_remaining; v_start++, rxr_idx++) | 2272 | for (; rxr_idx < rxr_remaining; v_start++, rxr_idx++) |
2095 | map_vector_to_rxq(adapter, v_start, rxr_idx); | 2273 | map_vector_to_rxq(adapter, v_start, rxr_idx); |
2096 | 2274 | ||
@@ -2106,23 +2284,20 @@ static int ixgbe_map_rings_to_vectors(struct ixgbe_adapter *adapter, | |||
2106 | * multiple queues per vector. | 2284 | * multiple queues per vector. |
2107 | */ | 2285 | */ |
2108 | /* Re-adjusting *qpv takes care of the remainder. */ | 2286 | /* Re-adjusting *qpv takes care of the remainder. */ |
2109 | for (i = v_start; i < vectors; i++) { | 2287 | for (i = v_start; i < q_vectors; i++) { |
2110 | rqpv = DIV_ROUND_UP(rxr_remaining, vectors - i); | 2288 | rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - i); |
2111 | for (j = 0; j < rqpv; j++) { | 2289 | for (j = 0; j < rqpv; j++) { |
2112 | map_vector_to_rxq(adapter, i, rxr_idx); | 2290 | map_vector_to_rxq(adapter, i, rxr_idx); |
2113 | rxr_idx++; | 2291 | rxr_idx++; |
2114 | rxr_remaining--; | 2292 | rxr_remaining--; |
2115 | } | 2293 | } |
2116 | } | 2294 | tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - i); |
2117 | for (i = v_start; i < vectors; i++) { | ||
2118 | tqpv = DIV_ROUND_UP(txr_remaining, vectors - i); | ||
2119 | for (j = 0; j < tqpv; j++) { | 2295 | for (j = 0; j < tqpv; j++) { |
2120 | map_vector_to_txq(adapter, i, txr_idx); | 2296 | map_vector_to_txq(adapter, i, txr_idx); |
2121 | txr_idx++; | 2297 | txr_idx++; |
2122 | txr_remaining--; | 2298 | txr_remaining--; |
2123 | } | 2299 | } |
2124 | } | 2300 | } |
2125 | |||
2126 | out: | 2301 | out: |
2127 | return err; | 2302 | return err; |
2128 | } | 2303 | } |
@@ -2144,30 +2319,36 @@ static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter) | |||
2144 | /* Decrement for Other and TCP Timer vectors */ | 2319 | /* Decrement for Other and TCP Timer vectors */ |
2145 | q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; | 2320 | q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; |
2146 | 2321 | ||
2147 | /* Map the Tx/Rx rings to the vectors we were allotted. */ | 2322 | err = ixgbe_map_rings_to_vectors(adapter); |
2148 | err = ixgbe_map_rings_to_vectors(adapter, q_vectors); | ||
2149 | if (err) | 2323 | if (err) |
2150 | goto out; | 2324 | return err; |
2151 | 2325 | ||
2152 | #define SET_HANDLER(_v) ((!(_v)->rxr_count) ? &ixgbe_msix_clean_tx : \ | 2326 | #define SET_HANDLER(_v) (((_v)->rxr_count && (_v)->txr_count) \ |
2153 | (!(_v)->txr_count) ? &ixgbe_msix_clean_rx : \ | 2327 | ? &ixgbe_msix_clean_many : \ |
2154 | &ixgbe_msix_clean_many) | 2328 | (_v)->rxr_count ? &ixgbe_msix_clean_rx : \ |
2329 | (_v)->txr_count ? &ixgbe_msix_clean_tx : \ | ||
2330 | NULL) | ||
2155 | for (vector = 0; vector < q_vectors; vector++) { | 2331 | for (vector = 0; vector < q_vectors; vector++) { |
2156 | handler = SET_HANDLER(adapter->q_vector[vector]); | 2332 | struct ixgbe_q_vector *q_vector = adapter->q_vector[vector]; |
2333 | handler = SET_HANDLER(q_vector); | ||
2157 | 2334 | ||
2158 | if (handler == &ixgbe_msix_clean_rx) { | 2335 | if (handler == &ixgbe_msix_clean_rx) { |
2159 | sprintf(adapter->name[vector], "%s-%s-%d", | 2336 | sprintf(q_vector->name, "%s-%s-%d", |
2160 | netdev->name, "rx", ri++); | 2337 | netdev->name, "rx", ri++); |
2161 | } else if (handler == &ixgbe_msix_clean_tx) { | 2338 | } else if (handler == &ixgbe_msix_clean_tx) { |
2162 | sprintf(adapter->name[vector], "%s-%s-%d", | 2339 | sprintf(q_vector->name, "%s-%s-%d", |
2163 | netdev->name, "tx", ti++); | 2340 | netdev->name, "tx", ti++); |
2164 | } else | 2341 | } else if (handler == &ixgbe_msix_clean_many) { |
2165 | sprintf(adapter->name[vector], "%s-%s-%d", | 2342 | sprintf(q_vector->name, "%s-%s-%d", |
2166 | netdev->name, "TxRx", vector); | 2343 | netdev->name, "TxRx", ri++); |
2167 | 2344 | ti++; | |
2345 | } else { | ||
2346 | /* skip this unused q_vector */ | ||
2347 | continue; | ||
2348 | } | ||
2168 | err = request_irq(adapter->msix_entries[vector].vector, | 2349 | err = request_irq(adapter->msix_entries[vector].vector, |
2169 | handler, 0, adapter->name[vector], | 2350 | handler, 0, q_vector->name, |
2170 | adapter->q_vector[vector]); | 2351 | q_vector); |
2171 | if (err) { | 2352 | if (err) { |
2172 | e_err(probe, "request_irq failed for MSIX interrupt " | 2353 | e_err(probe, "request_irq failed for MSIX interrupt " |
2173 | "Error: %d\n", err); | 2354 | "Error: %d\n", err); |
@@ -2175,9 +2356,9 @@ static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter) | |||
2175 | } | 2356 | } |
2176 | } | 2357 | } |
2177 | 2358 | ||
2178 | sprintf(adapter->name[vector], "%s:lsc", netdev->name); | 2359 | sprintf(adapter->lsc_int_name, "%s:lsc", netdev->name); |
2179 | err = request_irq(adapter->msix_entries[vector].vector, | 2360 | err = request_irq(adapter->msix_entries[vector].vector, |
2180 | ixgbe_msix_lsc, 0, adapter->name[vector], netdev); | 2361 | ixgbe_msix_lsc, 0, adapter->lsc_int_name, netdev); |
2181 | if (err) { | 2362 | if (err) { |
2182 | e_err(probe, "request_irq for msix_lsc failed: %d\n", err); | 2363 | e_err(probe, "request_irq for msix_lsc failed: %d\n", err); |
2183 | goto free_queue_irqs; | 2364 | goto free_queue_irqs; |
@@ -2193,17 +2374,16 @@ free_queue_irqs: | |||
2193 | pci_disable_msix(adapter->pdev); | 2374 | pci_disable_msix(adapter->pdev); |
2194 | kfree(adapter->msix_entries); | 2375 | kfree(adapter->msix_entries); |
2195 | adapter->msix_entries = NULL; | 2376 | adapter->msix_entries = NULL; |
2196 | out: | ||
2197 | return err; | 2377 | return err; |
2198 | } | 2378 | } |
2199 | 2379 | ||
2200 | static void ixgbe_set_itr(struct ixgbe_adapter *adapter) | 2380 | static void ixgbe_set_itr(struct ixgbe_adapter *adapter) |
2201 | { | 2381 | { |
2202 | struct ixgbe_q_vector *q_vector = adapter->q_vector[0]; | 2382 | struct ixgbe_q_vector *q_vector = adapter->q_vector[0]; |
2203 | u8 current_itr; | ||
2204 | u32 new_itr = q_vector->eitr; | ||
2205 | struct ixgbe_ring *rx_ring = adapter->rx_ring[0]; | 2383 | struct ixgbe_ring *rx_ring = adapter->rx_ring[0]; |
2206 | struct ixgbe_ring *tx_ring = adapter->tx_ring[0]; | 2384 | struct ixgbe_ring *tx_ring = adapter->tx_ring[0]; |
2385 | u32 new_itr = q_vector->eitr; | ||
2386 | u8 current_itr; | ||
2207 | 2387 | ||
2208 | q_vector->tx_itr = ixgbe_update_itr(adapter, new_itr, | 2388 | q_vector->tx_itr = ixgbe_update_itr(adapter, new_itr, |
2209 | q_vector->tx_itr, | 2389 | q_vector->tx_itr, |
@@ -2233,9 +2413,9 @@ static void ixgbe_set_itr(struct ixgbe_adapter *adapter) | |||
2233 | 2413 | ||
2234 | if (new_itr != q_vector->eitr) { | 2414 | if (new_itr != q_vector->eitr) { |
2235 | /* do an exponential smoothing */ | 2415 | /* do an exponential smoothing */ |
2236 | new_itr = ((q_vector->eitr * 90)/100) + ((new_itr * 10)/100); | 2416 | new_itr = ((q_vector->eitr * 9) + new_itr)/10; |
2237 | 2417 | ||
2238 | /* save the algorithm value here, not the smoothed one */ | 2418 | /* save the algorithm value here */ |
2239 | q_vector->eitr = new_itr; | 2419 | q_vector->eitr = new_itr; |
2240 | 2420 | ||
2241 | ixgbe_write_eitr(q_vector); | 2421 | ixgbe_write_eitr(q_vector); |
@@ -2256,12 +2436,17 @@ static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues, | |||
2256 | mask |= IXGBE_EIMS_GPI_SDP0; | 2436 | mask |= IXGBE_EIMS_GPI_SDP0; |
2257 | if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) | 2437 | if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) |
2258 | mask |= IXGBE_EIMS_GPI_SDP1; | 2438 | mask |= IXGBE_EIMS_GPI_SDP1; |
2259 | if (adapter->hw.mac.type == ixgbe_mac_82599EB) { | 2439 | switch (adapter->hw.mac.type) { |
2440 | case ixgbe_mac_82599EB: | ||
2441 | case ixgbe_mac_X540: | ||
2260 | mask |= IXGBE_EIMS_ECC; | 2442 | mask |= IXGBE_EIMS_ECC; |
2261 | mask |= IXGBE_EIMS_GPI_SDP1; | 2443 | mask |= IXGBE_EIMS_GPI_SDP1; |
2262 | mask |= IXGBE_EIMS_GPI_SDP2; | 2444 | mask |= IXGBE_EIMS_GPI_SDP2; |
2263 | if (adapter->num_vfs) | 2445 | if (adapter->num_vfs) |
2264 | mask |= IXGBE_EIMS_MAILBOX; | 2446 | mask |= IXGBE_EIMS_MAILBOX; |
2447 | break; | ||
2448 | default: | ||
2449 | break; | ||
2265 | } | 2450 | } |
2266 | if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE || | 2451 | if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE || |
2267 | adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) | 2452 | adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) |
@@ -2317,13 +2502,21 @@ static irqreturn_t ixgbe_intr(int irq, void *data) | |||
2317 | if (eicr & IXGBE_EICR_LSC) | 2502 | if (eicr & IXGBE_EICR_LSC) |
2318 | ixgbe_check_lsc(adapter); | 2503 | ixgbe_check_lsc(adapter); |
2319 | 2504 | ||
2320 | if (hw->mac.type == ixgbe_mac_82599EB) | 2505 | switch (hw->mac.type) { |
2506 | case ixgbe_mac_82599EB: | ||
2507 | case ixgbe_mac_X540: | ||
2321 | ixgbe_check_sfp_event(adapter, eicr); | 2508 | ixgbe_check_sfp_event(adapter, eicr); |
2509 | if ((adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) && | ||
2510 | ((eicr & IXGBE_EICR_GPI_SDP0) || (eicr & IXGBE_EICR_LSC))) { | ||
2511 | adapter->interrupt_event = eicr; | ||
2512 | schedule_work(&adapter->check_overtemp_task); | ||
2513 | } | ||
2514 | break; | ||
2515 | default: | ||
2516 | break; | ||
2517 | } | ||
2322 | 2518 | ||
2323 | ixgbe_check_fan_failure(adapter, eicr); | 2519 | ixgbe_check_fan_failure(adapter, eicr); |
2324 | if ((adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) && | ||
2325 | ((eicr & IXGBE_EICR_GPI_SDP0) || (eicr & IXGBE_EICR_LSC))) | ||
2326 | schedule_work(&adapter->check_overtemp_task); | ||
2327 | 2520 | ||
2328 | if (napi_schedule_prep(&(q_vector->napi))) { | 2521 | if (napi_schedule_prep(&(q_vector->napi))) { |
2329 | adapter->tx_ring[0]->total_packets = 0; | 2522 | adapter->tx_ring[0]->total_packets = 0; |
@@ -2416,14 +2609,20 @@ static void ixgbe_free_irq(struct ixgbe_adapter *adapter) | |||
2416 | **/ | 2609 | **/ |
2417 | static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter) | 2610 | static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter) |
2418 | { | 2611 | { |
2419 | if (adapter->hw.mac.type == ixgbe_mac_82598EB) { | 2612 | switch (adapter->hw.mac.type) { |
2613 | case ixgbe_mac_82598EB: | ||
2420 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0); | 2614 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0); |
2421 | } else { | 2615 | break; |
2616 | case ixgbe_mac_82599EB: | ||
2617 | case ixgbe_mac_X540: | ||
2422 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000); | 2618 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000); |
2423 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0); | 2619 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0); |
2424 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0); | 2620 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0); |
2425 | if (adapter->num_vfs > 32) | 2621 | if (adapter->num_vfs > 32) |
2426 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, 0); | 2622 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, 0); |
2623 | break; | ||
2624 | default: | ||
2625 | break; | ||
2427 | } | 2626 | } |
2428 | IXGBE_WRITE_FLUSH(&adapter->hw); | 2627 | IXGBE_WRITE_FLUSH(&adapter->hw); |
2429 | if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { | 2628 | if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { |
@@ -2469,7 +2668,7 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter, | |||
2469 | u64 tdba = ring->dma; | 2668 | u64 tdba = ring->dma; |
2470 | int wait_loop = 10; | 2669 | int wait_loop = 10; |
2471 | u32 txdctl; | 2670 | u32 txdctl; |
2472 | u16 reg_idx = ring->reg_idx; | 2671 | u8 reg_idx = ring->reg_idx; |
2473 | 2672 | ||
2474 | /* disable queue to avoid issues while updating state */ | 2673 | /* disable queue to avoid issues while updating state */ |
2475 | txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx)); | 2674 | txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx)); |
@@ -2484,8 +2683,7 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter, | |||
2484 | ring->count * sizeof(union ixgbe_adv_tx_desc)); | 2683 | ring->count * sizeof(union ixgbe_adv_tx_desc)); |
2485 | IXGBE_WRITE_REG(hw, IXGBE_TDH(reg_idx), 0); | 2684 | IXGBE_WRITE_REG(hw, IXGBE_TDH(reg_idx), 0); |
2486 | IXGBE_WRITE_REG(hw, IXGBE_TDT(reg_idx), 0); | 2685 | IXGBE_WRITE_REG(hw, IXGBE_TDT(reg_idx), 0); |
2487 | ring->head = IXGBE_TDH(reg_idx); | 2686 | ring->tail = hw->hw_addr + IXGBE_TDT(reg_idx); |
2488 | ring->tail = IXGBE_TDT(reg_idx); | ||
2489 | 2687 | ||
2490 | /* configure fetching thresholds */ | 2688 | /* configure fetching thresholds */ |
2491 | if (adapter->rx_itr_setting == 0) { | 2689 | if (adapter->rx_itr_setting == 0) { |
@@ -2501,7 +2699,16 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter, | |||
2501 | } | 2699 | } |
2502 | 2700 | ||
2503 | /* reinitialize flowdirector state */ | 2701 | /* reinitialize flowdirector state */ |
2504 | set_bit(__IXGBE_FDIR_INIT_DONE, &ring->reinit_state); | 2702 | if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) && |
2703 | adapter->atr_sample_rate) { | ||
2704 | ring->atr_sample_rate = adapter->atr_sample_rate; | ||
2705 | ring->atr_count = 0; | ||
2706 | set_bit(__IXGBE_TX_FDIR_INIT_DONE, &ring->state); | ||
2707 | } else { | ||
2708 | ring->atr_sample_rate = 0; | ||
2709 | } | ||
2710 | |||
2711 | clear_bit(__IXGBE_HANG_CHECK_ARMED, &ring->state); | ||
2505 | 2712 | ||
2506 | /* enable queue */ | 2713 | /* enable queue */ |
2507 | txdctl |= IXGBE_TXDCTL_ENABLE; | 2714 | txdctl |= IXGBE_TXDCTL_ENABLE; |
@@ -2592,16 +2799,22 @@ static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter, | |||
2592 | struct ixgbe_ring *rx_ring) | 2799 | struct ixgbe_ring *rx_ring) |
2593 | { | 2800 | { |
2594 | u32 srrctl; | 2801 | u32 srrctl; |
2595 | int index; | 2802 | u8 reg_idx = rx_ring->reg_idx; |
2596 | struct ixgbe_ring_feature *feature = adapter->ring_feature; | ||
2597 | 2803 | ||
2598 | index = rx_ring->reg_idx; | 2804 | switch (adapter->hw.mac.type) { |
2599 | if (adapter->hw.mac.type == ixgbe_mac_82598EB) { | 2805 | case ixgbe_mac_82598EB: { |
2600 | unsigned long mask; | 2806 | struct ixgbe_ring_feature *feature = adapter->ring_feature; |
2601 | mask = (unsigned long) feature[RING_F_RSS].mask; | 2807 | const int mask = feature[RING_F_RSS].mask; |
2602 | index = index & mask; | 2808 | reg_idx = reg_idx & mask; |
2809 | } | ||
2810 | break; | ||
2811 | case ixgbe_mac_82599EB: | ||
2812 | case ixgbe_mac_X540: | ||
2813 | default: | ||
2814 | break; | ||
2603 | } | 2815 | } |
2604 | srrctl = IXGBE_READ_REG(&adapter->hw, IXGBE_SRRCTL(index)); | 2816 | |
2817 | srrctl = IXGBE_READ_REG(&adapter->hw, IXGBE_SRRCTL(reg_idx)); | ||
2605 | 2818 | ||
2606 | srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK; | 2819 | srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK; |
2607 | srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK; | 2820 | srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK; |
@@ -2611,7 +2824,7 @@ static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter, | |||
2611 | srrctl |= (IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) & | 2824 | srrctl |= (IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) & |
2612 | IXGBE_SRRCTL_BSIZEHDR_MASK; | 2825 | IXGBE_SRRCTL_BSIZEHDR_MASK; |
2613 | 2826 | ||
2614 | if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) { | 2827 | if (ring_is_ps_enabled(rx_ring)) { |
2615 | #if (PAGE_SIZE / 2) > IXGBE_MAX_RXBUFFER | 2828 | #if (PAGE_SIZE / 2) > IXGBE_MAX_RXBUFFER |
2616 | srrctl |= IXGBE_MAX_RXBUFFER >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; | 2829 | srrctl |= IXGBE_MAX_RXBUFFER >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; |
2617 | #else | 2830 | #else |
@@ -2624,7 +2837,7 @@ static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter, | |||
2624 | srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; | 2837 | srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; |
2625 | } | 2838 | } |
2626 | 2839 | ||
2627 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_SRRCTL(index), srrctl); | 2840 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_SRRCTL(reg_idx), srrctl); |
2628 | } | 2841 | } |
2629 | 2842 | ||
2630 | static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter) | 2843 | static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter) |
@@ -2694,19 +2907,36 @@ static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter) | |||
2694 | } | 2907 | } |
2695 | 2908 | ||
2696 | /** | 2909 | /** |
2910 | * ixgbe_clear_rscctl - disable RSC for the indicated ring | ||
2911 | * @adapter: address of board private structure | ||
2912 | * @ring: structure containing ring specific data | ||
2913 | **/ | ||
2914 | void ixgbe_clear_rscctl(struct ixgbe_adapter *adapter, | ||
2915 | struct ixgbe_ring *ring) | ||
2916 | { | ||
2917 | struct ixgbe_hw *hw = &adapter->hw; | ||
2918 | u32 rscctrl; | ||
2919 | u8 reg_idx = ring->reg_idx; | ||
2920 | |||
2921 | rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(reg_idx)); | ||
2922 | rscctrl &= ~IXGBE_RSCCTL_RSCEN; | ||
2923 | IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(reg_idx), rscctrl); | ||
2924 | } | ||
2925 | |||
2926 | /** | ||
2697 | * ixgbe_configure_rscctl - enable RSC for the indicated ring | 2927 | * ixgbe_configure_rscctl - enable RSC for the indicated ring |
2698 | * @adapter: address of board private structure | 2928 | * @adapter: address of board private structure |
2699 | * @index: index of ring to set | 2929 | * @index: index of ring to set |
2700 | **/ | 2930 | **/ |
2701 | static void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter, | 2931 | void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter, |
2702 | struct ixgbe_ring *ring) | 2932 | struct ixgbe_ring *ring) |
2703 | { | 2933 | { |
2704 | struct ixgbe_hw *hw = &adapter->hw; | 2934 | struct ixgbe_hw *hw = &adapter->hw; |
2705 | u32 rscctrl; | 2935 | u32 rscctrl; |
2706 | int rx_buf_len; | 2936 | int rx_buf_len; |
2707 | u16 reg_idx = ring->reg_idx; | 2937 | u8 reg_idx = ring->reg_idx; |
2708 | 2938 | ||
2709 | if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)) | 2939 | if (!ring_is_rsc_enabled(ring)) |
2710 | return; | 2940 | return; |
2711 | 2941 | ||
2712 | rx_buf_len = ring->rx_buf_len; | 2942 | rx_buf_len = ring->rx_buf_len; |
@@ -2717,7 +2947,7 @@ static void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter, | |||
2717 | * total size of max desc * buf_len is not greater | 2947 | * total size of max desc * buf_len is not greater |
2718 | * than 65535 | 2948 | * than 65535 |
2719 | */ | 2949 | */ |
2720 | if (ring->flags & IXGBE_RING_RX_PS_ENABLED) { | 2950 | if (ring_is_ps_enabled(ring)) { |
2721 | #if (MAX_SKB_FRAGS > 16) | 2951 | #if (MAX_SKB_FRAGS > 16) |
2722 | rscctrl |= IXGBE_RSCCTL_MAXDESC_16; | 2952 | rscctrl |= IXGBE_RSCCTL_MAXDESC_16; |
2723 | #elif (MAX_SKB_FRAGS > 8) | 2953 | #elif (MAX_SKB_FRAGS > 8) |
@@ -2770,9 +3000,9 @@ static void ixgbe_rx_desc_queue_enable(struct ixgbe_adapter *adapter, | |||
2770 | struct ixgbe_ring *ring) | 3000 | struct ixgbe_ring *ring) |
2771 | { | 3001 | { |
2772 | struct ixgbe_hw *hw = &adapter->hw; | 3002 | struct ixgbe_hw *hw = &adapter->hw; |
2773 | int reg_idx = ring->reg_idx; | ||
2774 | int wait_loop = IXGBE_MAX_RX_DESC_POLL; | 3003 | int wait_loop = IXGBE_MAX_RX_DESC_POLL; |
2775 | u32 rxdctl; | 3004 | u32 rxdctl; |
3005 | u8 reg_idx = ring->reg_idx; | ||
2776 | 3006 | ||
2777 | /* RXDCTL.EN will return 0 on 82598 if link is down, so skip it */ | 3007 | /* RXDCTL.EN will return 0 on 82598 if link is down, so skip it */ |
2778 | if (hw->mac.type == ixgbe_mac_82598EB && | 3008 | if (hw->mac.type == ixgbe_mac_82598EB && |
@@ -2796,7 +3026,7 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter, | |||
2796 | struct ixgbe_hw *hw = &adapter->hw; | 3026 | struct ixgbe_hw *hw = &adapter->hw; |
2797 | u64 rdba = ring->dma; | 3027 | u64 rdba = ring->dma; |
2798 | u32 rxdctl; | 3028 | u32 rxdctl; |
2799 | u16 reg_idx = ring->reg_idx; | 3029 | u8 reg_idx = ring->reg_idx; |
2800 | 3030 | ||
2801 | /* disable queue to avoid issues while updating state */ | 3031 | /* disable queue to avoid issues while updating state */ |
2802 | rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx)); | 3032 | rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx)); |
@@ -2810,8 +3040,7 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter, | |||
2810 | ring->count * sizeof(union ixgbe_adv_rx_desc)); | 3040 | ring->count * sizeof(union ixgbe_adv_rx_desc)); |
2811 | IXGBE_WRITE_REG(hw, IXGBE_RDH(reg_idx), 0); | 3041 | IXGBE_WRITE_REG(hw, IXGBE_RDH(reg_idx), 0); |
2812 | IXGBE_WRITE_REG(hw, IXGBE_RDT(reg_idx), 0); | 3042 | IXGBE_WRITE_REG(hw, IXGBE_RDT(reg_idx), 0); |
2813 | ring->head = IXGBE_RDH(reg_idx); | 3043 | ring->tail = hw->hw_addr + IXGBE_RDT(reg_idx); |
2814 | ring->tail = IXGBE_RDT(reg_idx); | ||
2815 | 3044 | ||
2816 | ixgbe_configure_srrctl(adapter, ring); | 3045 | ixgbe_configure_srrctl(adapter, ring); |
2817 | ixgbe_configure_rscctl(adapter, ring); | 3046 | ixgbe_configure_rscctl(adapter, ring); |
@@ -2833,7 +3062,7 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter, | |||
2833 | IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl); | 3062 | IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl); |
2834 | 3063 | ||
2835 | ixgbe_rx_desc_queue_enable(adapter, ring); | 3064 | ixgbe_rx_desc_queue_enable(adapter, ring); |
2836 | ixgbe_alloc_rx_buffers(adapter, ring, IXGBE_DESC_UNUSED(ring)); | 3065 | ixgbe_alloc_rx_buffers(ring, IXGBE_DESC_UNUSED(ring)); |
2837 | } | 3066 | } |
2838 | 3067 | ||
2839 | static void ixgbe_setup_psrtype(struct ixgbe_adapter *adapter) | 3068 | static void ixgbe_setup_psrtype(struct ixgbe_adapter *adapter) |
@@ -2956,24 +3185,32 @@ static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter) | |||
2956 | rx_ring->rx_buf_len = rx_buf_len; | 3185 | rx_ring->rx_buf_len = rx_buf_len; |
2957 | 3186 | ||
2958 | if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) | 3187 | if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) |
2959 | rx_ring->flags |= IXGBE_RING_RX_PS_ENABLED; | 3188 | set_ring_ps_enabled(rx_ring); |
3189 | else | ||
3190 | clear_ring_ps_enabled(rx_ring); | ||
3191 | |||
3192 | if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) | ||
3193 | set_ring_rsc_enabled(rx_ring); | ||
2960 | else | 3194 | else |
2961 | rx_ring->flags &= ~IXGBE_RING_RX_PS_ENABLED; | 3195 | clear_ring_rsc_enabled(rx_ring); |
2962 | 3196 | ||
2963 | #ifdef IXGBE_FCOE | 3197 | #ifdef IXGBE_FCOE |
2964 | if (netdev->features & NETIF_F_FCOE_MTU) { | 3198 | if (netdev->features & NETIF_F_FCOE_MTU) { |
2965 | struct ixgbe_ring_feature *f; | 3199 | struct ixgbe_ring_feature *f; |
2966 | f = &adapter->ring_feature[RING_F_FCOE]; | 3200 | f = &adapter->ring_feature[RING_F_FCOE]; |
2967 | if ((i >= f->mask) && (i < f->mask + f->indices)) { | 3201 | if ((i >= f->mask) && (i < f->mask + f->indices)) { |
2968 | rx_ring->flags &= ~IXGBE_RING_RX_PS_ENABLED; | 3202 | clear_ring_ps_enabled(rx_ring); |
2969 | if (rx_buf_len < IXGBE_FCOE_JUMBO_FRAME_SIZE) | 3203 | if (rx_buf_len < IXGBE_FCOE_JUMBO_FRAME_SIZE) |
2970 | rx_ring->rx_buf_len = | 3204 | rx_ring->rx_buf_len = |
2971 | IXGBE_FCOE_JUMBO_FRAME_SIZE; | 3205 | IXGBE_FCOE_JUMBO_FRAME_SIZE; |
3206 | } else if (!ring_is_rsc_enabled(rx_ring) && | ||
3207 | !ring_is_ps_enabled(rx_ring)) { | ||
3208 | rx_ring->rx_buf_len = | ||
3209 | IXGBE_FCOE_JUMBO_FRAME_SIZE; | ||
2972 | } | 3210 | } |
2973 | } | 3211 | } |
2974 | #endif /* IXGBE_FCOE */ | 3212 | #endif /* IXGBE_FCOE */ |
2975 | } | 3213 | } |
2976 | |||
2977 | } | 3214 | } |
2978 | 3215 | ||
2979 | static void ixgbe_setup_rdrxctl(struct ixgbe_adapter *adapter) | 3216 | static void ixgbe_setup_rdrxctl(struct ixgbe_adapter *adapter) |
@@ -2996,6 +3233,7 @@ static void ixgbe_setup_rdrxctl(struct ixgbe_adapter *adapter) | |||
2996 | rdrxctl |= IXGBE_RDRXCTL_MVMEN; | 3233 | rdrxctl |= IXGBE_RDRXCTL_MVMEN; |
2997 | break; | 3234 | break; |
2998 | case ixgbe_mac_82599EB: | 3235 | case ixgbe_mac_82599EB: |
3236 | case ixgbe_mac_X540: | ||
2999 | /* Disable RSC for ACK packets */ | 3237 | /* Disable RSC for ACK packets */ |
3000 | IXGBE_WRITE_REG(hw, IXGBE_RSCDBU, | 3238 | IXGBE_WRITE_REG(hw, IXGBE_RSCDBU, |
3001 | (IXGBE_RSCDBU_RSCACKDIS | IXGBE_READ_REG(hw, IXGBE_RSCDBU))); | 3239 | (IXGBE_RSCDBU_RSCACKDIS | IXGBE_READ_REG(hw, IXGBE_RSCDBU))); |
@@ -3123,6 +3361,7 @@ static void ixgbe_vlan_strip_disable(struct ixgbe_adapter *adapter) | |||
3123 | IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); | 3361 | IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); |
3124 | break; | 3362 | break; |
3125 | case ixgbe_mac_82599EB: | 3363 | case ixgbe_mac_82599EB: |
3364 | case ixgbe_mac_X540: | ||
3126 | for (i = 0; i < adapter->num_rx_queues; i++) { | 3365 | for (i = 0; i < adapter->num_rx_queues; i++) { |
3127 | j = adapter->rx_ring[i]->reg_idx; | 3366 | j = adapter->rx_ring[i]->reg_idx; |
3128 | vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j)); | 3367 | vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j)); |
@@ -3152,6 +3391,7 @@ static void ixgbe_vlan_strip_enable(struct ixgbe_adapter *adapter) | |||
3152 | IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); | 3391 | IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); |
3153 | break; | 3392 | break; |
3154 | case ixgbe_mac_82599EB: | 3393 | case ixgbe_mac_82599EB: |
3394 | case ixgbe_mac_X540: | ||
3155 | for (i = 0; i < adapter->num_rx_queues; i++) { | 3395 | for (i = 0; i < adapter->num_rx_queues; i++) { |
3156 | j = adapter->rx_ring[i]->reg_idx; | 3396 | j = adapter->rx_ring[i]->reg_idx; |
3157 | vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j)); | 3397 | vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j)); |
@@ -3349,8 +3589,6 @@ static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter) | |||
3349 | { | 3589 | { |
3350 | struct ixgbe_hw *hw = &adapter->hw; | 3590 | struct ixgbe_hw *hw = &adapter->hw; |
3351 | int max_frame = adapter->netdev->mtu + ETH_HLEN + ETH_FCS_LEN; | 3591 | int max_frame = adapter->netdev->mtu + ETH_HLEN + ETH_FCS_LEN; |
3352 | u32 txdctl; | ||
3353 | int i, j; | ||
3354 | 3592 | ||
3355 | if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED)) { | 3593 | if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED)) { |
3356 | if (hw->mac.type == ixgbe_mac_82598EB) | 3594 | if (hw->mac.type == ixgbe_mac_82598EB) |
@@ -3366,25 +3604,18 @@ static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter) | |||
3366 | max_frame = max(max_frame, IXGBE_FCOE_JUMBO_FRAME_SIZE); | 3604 | max_frame = max(max_frame, IXGBE_FCOE_JUMBO_FRAME_SIZE); |
3367 | #endif | 3605 | #endif |
3368 | 3606 | ||
3369 | ixgbe_dcb_calculate_tc_credits(&adapter->dcb_cfg, max_frame, | 3607 | ixgbe_dcb_calculate_tc_credits(hw, &adapter->dcb_cfg, max_frame, |
3370 | DCB_TX_CONFIG); | 3608 | DCB_TX_CONFIG); |
3371 | ixgbe_dcb_calculate_tc_credits(&adapter->dcb_cfg, max_frame, | 3609 | ixgbe_dcb_calculate_tc_credits(hw, &adapter->dcb_cfg, max_frame, |
3372 | DCB_RX_CONFIG); | 3610 | DCB_RX_CONFIG); |
3373 | 3611 | ||
3374 | /* reconfigure the hardware */ | ||
3375 | ixgbe_dcb_hw_config(&adapter->hw, &adapter->dcb_cfg); | ||
3376 | |||
3377 | for (i = 0; i < adapter->num_tx_queues; i++) { | ||
3378 | j = adapter->tx_ring[i]->reg_idx; | ||
3379 | txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j)); | ||
3380 | /* PThresh workaround for Tx hang with DFP enabled. */ | ||
3381 | txdctl |= 32; | ||
3382 | IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j), txdctl); | ||
3383 | } | ||
3384 | /* Enable VLAN tag insert/strip */ | 3612 | /* Enable VLAN tag insert/strip */ |
3385 | adapter->netdev->features |= NETIF_F_HW_VLAN_RX; | 3613 | adapter->netdev->features |= NETIF_F_HW_VLAN_RX; |
3386 | 3614 | ||
3387 | hw->mac.ops.set_vfta(&adapter->hw, 0, 0, true); | 3615 | hw->mac.ops.set_vfta(&adapter->hw, 0, 0, true); |
3616 | |||
3617 | /* reconfigure the hardware */ | ||
3618 | ixgbe_dcb_hw_config(hw, &adapter->dcb_cfg); | ||
3388 | } | 3619 | } |
3389 | 3620 | ||
3390 | #endif | 3621 | #endif |
@@ -3516,8 +3747,9 @@ static void ixgbe_setup_gpie(struct ixgbe_adapter *adapter) | |||
3516 | case ixgbe_mac_82598EB: | 3747 | case ixgbe_mac_82598EB: |
3517 | IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE); | 3748 | IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE); |
3518 | break; | 3749 | break; |
3519 | default: | ||
3520 | case ixgbe_mac_82599EB: | 3750 | case ixgbe_mac_82599EB: |
3751 | case ixgbe_mac_X540: | ||
3752 | default: | ||
3521 | IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF); | 3753 | IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF); |
3522 | IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF); | 3754 | IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF); |
3523 | break; | 3755 | break; |
@@ -3562,12 +3794,20 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter) | |||
3562 | ixgbe_configure_msi_and_legacy(adapter); | 3794 | ixgbe_configure_msi_and_legacy(adapter); |
3563 | 3795 | ||
3564 | /* enable the optics */ | 3796 | /* enable the optics */ |
3565 | if (hw->phy.multispeed_fiber) | 3797 | if (hw->phy.multispeed_fiber && hw->mac.ops.enable_tx_laser) |
3566 | hw->mac.ops.enable_tx_laser(hw); | 3798 | hw->mac.ops.enable_tx_laser(hw); |
3567 | 3799 | ||
3568 | clear_bit(__IXGBE_DOWN, &adapter->state); | 3800 | clear_bit(__IXGBE_DOWN, &adapter->state); |
3569 | ixgbe_napi_enable_all(adapter); | 3801 | ixgbe_napi_enable_all(adapter); |
3570 | 3802 | ||
3803 | if (ixgbe_is_sfp(hw)) { | ||
3804 | ixgbe_sfp_link_config(adapter); | ||
3805 | } else { | ||
3806 | err = ixgbe_non_sfp_link_config(hw); | ||
3807 | if (err) | ||
3808 | e_err(probe, "link_config FAILED %d\n", err); | ||
3809 | } | ||
3810 | |||
3571 | /* clear any pending interrupts, may auto mask */ | 3811 | /* clear any pending interrupts, may auto mask */ |
3572 | IXGBE_READ_REG(hw, IXGBE_EICR); | 3812 | IXGBE_READ_REG(hw, IXGBE_EICR); |
3573 | ixgbe_irq_enable(adapter, true, true); | 3813 | ixgbe_irq_enable(adapter, true, true); |
@@ -3590,26 +3830,8 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter) | |||
3590 | * If we're not hot-pluggable SFP+, we just need to configure link | 3830 | * If we're not hot-pluggable SFP+, we just need to configure link |
3591 | * and bring it up. | 3831 | * and bring it up. |
3592 | */ | 3832 | */ |
3593 | if (hw->phy.type == ixgbe_phy_unknown) { | 3833 | if (hw->phy.type == ixgbe_phy_unknown) |
3594 | err = hw->phy.ops.identify(hw); | 3834 | schedule_work(&adapter->sfp_config_module_task); |
3595 | if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) { | ||
3596 | /* | ||
3597 | * Take the device down and schedule the sfp tasklet | ||
3598 | * which will unregister_netdev and log it. | ||
3599 | */ | ||
3600 | ixgbe_down(adapter); | ||
3601 | schedule_work(&adapter->sfp_config_module_task); | ||
3602 | return err; | ||
3603 | } | ||
3604 | } | ||
3605 | |||
3606 | if (ixgbe_is_sfp(hw)) { | ||
3607 | ixgbe_sfp_link_config(adapter); | ||
3608 | } else { | ||
3609 | err = ixgbe_non_sfp_link_config(hw); | ||
3610 | if (err) | ||
3611 | e_err(probe, "link_config FAILED %d\n", err); | ||
3612 | } | ||
3613 | 3835 | ||
3614 | /* enable transmits */ | 3836 | /* enable transmits */ |
3615 | netif_tx_start_all_queues(adapter->netdev); | 3837 | netif_tx_start_all_queues(adapter->netdev); |
@@ -3687,15 +3909,13 @@ void ixgbe_reset(struct ixgbe_adapter *adapter) | |||
3687 | 3909 | ||
3688 | /** | 3910 | /** |
3689 | * ixgbe_clean_rx_ring - Free Rx Buffers per Queue | 3911 | * ixgbe_clean_rx_ring - Free Rx Buffers per Queue |
3690 | * @adapter: board private structure | ||
3691 | * @rx_ring: ring to free buffers from | 3912 | * @rx_ring: ring to free buffers from |
3692 | **/ | 3913 | **/ |
3693 | static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter, | 3914 | static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring) |
3694 | struct ixgbe_ring *rx_ring) | ||
3695 | { | 3915 | { |
3696 | struct pci_dev *pdev = adapter->pdev; | 3916 | struct device *dev = rx_ring->dev; |
3697 | unsigned long size; | 3917 | unsigned long size; |
3698 | unsigned int i; | 3918 | u16 i; |
3699 | 3919 | ||
3700 | /* ring already cleared, nothing to do */ | 3920 | /* ring already cleared, nothing to do */ |
3701 | if (!rx_ring->rx_buffer_info) | 3921 | if (!rx_ring->rx_buffer_info) |
@@ -3707,7 +3927,7 @@ static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter, | |||
3707 | 3927 | ||
3708 | rx_buffer_info = &rx_ring->rx_buffer_info[i]; | 3928 | rx_buffer_info = &rx_ring->rx_buffer_info[i]; |
3709 | if (rx_buffer_info->dma) { | 3929 | if (rx_buffer_info->dma) { |
3710 | dma_unmap_single(&pdev->dev, rx_buffer_info->dma, | 3930 | dma_unmap_single(rx_ring->dev, rx_buffer_info->dma, |
3711 | rx_ring->rx_buf_len, | 3931 | rx_ring->rx_buf_len, |
3712 | DMA_FROM_DEVICE); | 3932 | DMA_FROM_DEVICE); |
3713 | rx_buffer_info->dma = 0; | 3933 | rx_buffer_info->dma = 0; |
@@ -3718,7 +3938,7 @@ static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter, | |||
3718 | do { | 3938 | do { |
3719 | struct sk_buff *this = skb; | 3939 | struct sk_buff *this = skb; |
3720 | if (IXGBE_RSC_CB(this)->delay_unmap) { | 3940 | if (IXGBE_RSC_CB(this)->delay_unmap) { |
3721 | dma_unmap_single(&pdev->dev, | 3941 | dma_unmap_single(dev, |
3722 | IXGBE_RSC_CB(this)->dma, | 3942 | IXGBE_RSC_CB(this)->dma, |
3723 | rx_ring->rx_buf_len, | 3943 | rx_ring->rx_buf_len, |
3724 | DMA_FROM_DEVICE); | 3944 | DMA_FROM_DEVICE); |
@@ -3732,7 +3952,7 @@ static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter, | |||
3732 | if (!rx_buffer_info->page) | 3952 | if (!rx_buffer_info->page) |
3733 | continue; | 3953 | continue; |
3734 | if (rx_buffer_info->page_dma) { | 3954 | if (rx_buffer_info->page_dma) { |
3735 | dma_unmap_page(&pdev->dev, rx_buffer_info->page_dma, | 3955 | dma_unmap_page(dev, rx_buffer_info->page_dma, |
3736 | PAGE_SIZE / 2, DMA_FROM_DEVICE); | 3956 | PAGE_SIZE / 2, DMA_FROM_DEVICE); |
3737 | rx_buffer_info->page_dma = 0; | 3957 | rx_buffer_info->page_dma = 0; |
3738 | } | 3958 | } |
@@ -3749,24 +3969,17 @@ static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter, | |||
3749 | 3969 | ||
3750 | rx_ring->next_to_clean = 0; | 3970 | rx_ring->next_to_clean = 0; |
3751 | rx_ring->next_to_use = 0; | 3971 | rx_ring->next_to_use = 0; |
3752 | |||
3753 | if (rx_ring->head) | ||
3754 | writel(0, adapter->hw.hw_addr + rx_ring->head); | ||
3755 | if (rx_ring->tail) | ||
3756 | writel(0, adapter->hw.hw_addr + rx_ring->tail); | ||
3757 | } | 3972 | } |
3758 | 3973 | ||
3759 | /** | 3974 | /** |
3760 | * ixgbe_clean_tx_ring - Free Tx Buffers | 3975 | * ixgbe_clean_tx_ring - Free Tx Buffers |
3761 | * @adapter: board private structure | ||
3762 | * @tx_ring: ring to be cleaned | 3976 | * @tx_ring: ring to be cleaned |
3763 | **/ | 3977 | **/ |
3764 | static void ixgbe_clean_tx_ring(struct ixgbe_adapter *adapter, | 3978 | static void ixgbe_clean_tx_ring(struct ixgbe_ring *tx_ring) |
3765 | struct ixgbe_ring *tx_ring) | ||
3766 | { | 3979 | { |
3767 | struct ixgbe_tx_buffer *tx_buffer_info; | 3980 | struct ixgbe_tx_buffer *tx_buffer_info; |
3768 | unsigned long size; | 3981 | unsigned long size; |
3769 | unsigned int i; | 3982 | u16 i; |
3770 | 3983 | ||
3771 | /* ring already cleared, nothing to do */ | 3984 | /* ring already cleared, nothing to do */ |
3772 | if (!tx_ring->tx_buffer_info) | 3985 | if (!tx_ring->tx_buffer_info) |
@@ -3775,7 +3988,7 @@ static void ixgbe_clean_tx_ring(struct ixgbe_adapter *adapter, | |||
3775 | /* Free all the Tx ring sk_buffs */ | 3988 | /* Free all the Tx ring sk_buffs */ |
3776 | for (i = 0; i < tx_ring->count; i++) { | 3989 | for (i = 0; i < tx_ring->count; i++) { |
3777 | tx_buffer_info = &tx_ring->tx_buffer_info[i]; | 3990 | tx_buffer_info = &tx_ring->tx_buffer_info[i]; |
3778 | ixgbe_unmap_and_free_tx_resource(adapter, tx_buffer_info); | 3991 | ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer_info); |
3779 | } | 3992 | } |
3780 | 3993 | ||
3781 | size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count; | 3994 | size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count; |
@@ -3786,11 +3999,6 @@ static void ixgbe_clean_tx_ring(struct ixgbe_adapter *adapter, | |||
3786 | 3999 | ||
3787 | tx_ring->next_to_use = 0; | 4000 | tx_ring->next_to_use = 0; |
3788 | tx_ring->next_to_clean = 0; | 4001 | tx_ring->next_to_clean = 0; |
3789 | |||
3790 | if (tx_ring->head) | ||
3791 | writel(0, adapter->hw.hw_addr + tx_ring->head); | ||
3792 | if (tx_ring->tail) | ||
3793 | writel(0, adapter->hw.hw_addr + tx_ring->tail); | ||
3794 | } | 4002 | } |
3795 | 4003 | ||
3796 | /** | 4004 | /** |
@@ -3802,7 +4010,7 @@ static void ixgbe_clean_all_rx_rings(struct ixgbe_adapter *adapter) | |||
3802 | int i; | 4010 | int i; |
3803 | 4011 | ||
3804 | for (i = 0; i < adapter->num_rx_queues; i++) | 4012 | for (i = 0; i < adapter->num_rx_queues; i++) |
3805 | ixgbe_clean_rx_ring(adapter, adapter->rx_ring[i]); | 4013 | ixgbe_clean_rx_ring(adapter->rx_ring[i]); |
3806 | } | 4014 | } |
3807 | 4015 | ||
3808 | /** | 4016 | /** |
@@ -3814,7 +4022,7 @@ static void ixgbe_clean_all_tx_rings(struct ixgbe_adapter *adapter) | |||
3814 | int i; | 4022 | int i; |
3815 | 4023 | ||
3816 | for (i = 0; i < adapter->num_tx_queues; i++) | 4024 | for (i = 0; i < adapter->num_tx_queues; i++) |
3817 | ixgbe_clean_tx_ring(adapter, adapter->tx_ring[i]); | 4025 | ixgbe_clean_tx_ring(adapter->tx_ring[i]); |
3818 | } | 4026 | } |
3819 | 4027 | ||
3820 | void ixgbe_down(struct ixgbe_adapter *adapter) | 4028 | void ixgbe_down(struct ixgbe_adapter *adapter) |
@@ -3823,7 +4031,7 @@ void ixgbe_down(struct ixgbe_adapter *adapter) | |||
3823 | struct ixgbe_hw *hw = &adapter->hw; | 4031 | struct ixgbe_hw *hw = &adapter->hw; |
3824 | u32 rxctrl; | 4032 | u32 rxctrl; |
3825 | u32 txdctl; | 4033 | u32 txdctl; |
3826 | int i, j; | 4034 | int i; |
3827 | int num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; | 4035 | int num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; |
3828 | 4036 | ||
3829 | /* signal that we are down to the interrupt handler */ | 4037 | /* signal that we are down to the interrupt handler */ |
@@ -3881,19 +4089,25 @@ void ixgbe_down(struct ixgbe_adapter *adapter) | |||
3881 | 4089 | ||
3882 | /* disable transmits in the hardware now that interrupts are off */ | 4090 | /* disable transmits in the hardware now that interrupts are off */ |
3883 | for (i = 0; i < adapter->num_tx_queues; i++) { | 4091 | for (i = 0; i < adapter->num_tx_queues; i++) { |
3884 | j = adapter->tx_ring[i]->reg_idx; | 4092 | u8 reg_idx = adapter->tx_ring[i]->reg_idx; |
3885 | txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j)); | 4093 | txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx)); |
3886 | IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j), | 4094 | IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), |
3887 | (txdctl & ~IXGBE_TXDCTL_ENABLE)); | 4095 | (txdctl & ~IXGBE_TXDCTL_ENABLE)); |
3888 | } | 4096 | } |
3889 | /* Disable the Tx DMA engine on 82599 */ | 4097 | /* Disable the Tx DMA engine on 82599 */ |
3890 | if (hw->mac.type == ixgbe_mac_82599EB) | 4098 | switch (hw->mac.type) { |
4099 | case ixgbe_mac_82599EB: | ||
4100 | case ixgbe_mac_X540: | ||
3891 | IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, | 4101 | IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, |
3892 | (IXGBE_READ_REG(hw, IXGBE_DMATXCTL) & | 4102 | (IXGBE_READ_REG(hw, IXGBE_DMATXCTL) & |
3893 | ~IXGBE_DMATXCTL_TE)); | 4103 | ~IXGBE_DMATXCTL_TE)); |
4104 | break; | ||
4105 | default: | ||
4106 | break; | ||
4107 | } | ||
3894 | 4108 | ||
3895 | /* power down the optics */ | 4109 | /* power down the optics */ |
3896 | if (hw->phy.multispeed_fiber) | 4110 | if (hw->phy.multispeed_fiber && hw->mac.ops.disable_tx_laser) |
3897 | hw->mac.ops.disable_tx_laser(hw); | 4111 | hw->mac.ops.disable_tx_laser(hw); |
3898 | 4112 | ||
3899 | /* clear n-tuple filters that are cached */ | 4113 | /* clear n-tuple filters that are cached */ |
@@ -3925,10 +4139,8 @@ static int ixgbe_poll(struct napi_struct *napi, int budget) | |||
3925 | int tx_clean_complete, work_done = 0; | 4139 | int tx_clean_complete, work_done = 0; |
3926 | 4140 | ||
3927 | #ifdef CONFIG_IXGBE_DCA | 4141 | #ifdef CONFIG_IXGBE_DCA |
3928 | if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) { | 4142 | if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) |
3929 | ixgbe_update_tx_dca(adapter, adapter->tx_ring[0]); | 4143 | ixgbe_update_dca(q_vector); |
3930 | ixgbe_update_rx_dca(adapter, adapter->rx_ring[0]); | ||
3931 | } | ||
3932 | #endif | 4144 | #endif |
3933 | 4145 | ||
3934 | tx_clean_complete = ixgbe_clean_tx_irq(q_vector, adapter->tx_ring[0]); | 4146 | tx_clean_complete = ixgbe_clean_tx_irq(q_vector, adapter->tx_ring[0]); |
@@ -3956,6 +4168,8 @@ static void ixgbe_tx_timeout(struct net_device *netdev) | |||
3956 | { | 4168 | { |
3957 | struct ixgbe_adapter *adapter = netdev_priv(netdev); | 4169 | struct ixgbe_adapter *adapter = netdev_priv(netdev); |
3958 | 4170 | ||
4171 | adapter->tx_timeout_count++; | ||
4172 | |||
3959 | /* Do the reset outside of interrupt context */ | 4173 | /* Do the reset outside of interrupt context */ |
3960 | schedule_work(&adapter->reset_task); | 4174 | schedule_work(&adapter->reset_task); |
3961 | } | 4175 | } |
@@ -3970,8 +4184,6 @@ static void ixgbe_reset_task(struct work_struct *work) | |||
3970 | test_bit(__IXGBE_RESETTING, &adapter->state)) | 4184 | test_bit(__IXGBE_RESETTING, &adapter->state)) |
3971 | return; | 4185 | return; |
3972 | 4186 | ||
3973 | adapter->tx_timeout_count++; | ||
3974 | |||
3975 | ixgbe_dump(adapter); | 4187 | ixgbe_dump(adapter); |
3976 | netdev_err(adapter->netdev, "Reset adapter\n"); | 4188 | netdev_err(adapter->netdev, "Reset adapter\n"); |
3977 | ixgbe_reinit_locked(adapter); | 4189 | ixgbe_reinit_locked(adapter); |
@@ -4221,19 +4433,16 @@ static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter, | |||
4221 | static inline bool ixgbe_cache_ring_rss(struct ixgbe_adapter *adapter) | 4433 | static inline bool ixgbe_cache_ring_rss(struct ixgbe_adapter *adapter) |
4222 | { | 4434 | { |
4223 | int i; | 4435 | int i; |
4224 | bool ret = false; | ||
4225 | 4436 | ||
4226 | if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) { | 4437 | if (!(adapter->flags & IXGBE_FLAG_RSS_ENABLED)) |
4227 | for (i = 0; i < adapter->num_rx_queues; i++) | 4438 | return false; |
4228 | adapter->rx_ring[i]->reg_idx = i; | ||
4229 | for (i = 0; i < adapter->num_tx_queues; i++) | ||
4230 | adapter->tx_ring[i]->reg_idx = i; | ||
4231 | ret = true; | ||
4232 | } else { | ||
4233 | ret = false; | ||
4234 | } | ||
4235 | 4439 | ||
4236 | return ret; | 4440 | for (i = 0; i < adapter->num_rx_queues; i++) |
4441 | adapter->rx_ring[i]->reg_idx = i; | ||
4442 | for (i = 0; i < adapter->num_tx_queues; i++) | ||
4443 | adapter->tx_ring[i]->reg_idx = i; | ||
4444 | |||
4445 | return true; | ||
4237 | } | 4446 | } |
4238 | 4447 | ||
4239 | #ifdef CONFIG_IXGBE_DCB | 4448 | #ifdef CONFIG_IXGBE_DCB |
@@ -4250,71 +4459,67 @@ static inline bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter) | |||
4250 | bool ret = false; | 4459 | bool ret = false; |
4251 | int dcb_i = adapter->ring_feature[RING_F_DCB].indices; | 4460 | int dcb_i = adapter->ring_feature[RING_F_DCB].indices; |
4252 | 4461 | ||
4253 | if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { | 4462 | if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED)) |
4254 | if (adapter->hw.mac.type == ixgbe_mac_82598EB) { | 4463 | return false; |
4255 | /* the number of queues is assumed to be symmetric */ | ||
4256 | for (i = 0; i < dcb_i; i++) { | ||
4257 | adapter->rx_ring[i]->reg_idx = i << 3; | ||
4258 | adapter->tx_ring[i]->reg_idx = i << 2; | ||
4259 | } | ||
4260 | ret = true; | ||
4261 | } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) { | ||
4262 | if (dcb_i == 8) { | ||
4263 | /* | ||
4264 | * Tx TC0 starts at: descriptor queue 0 | ||
4265 | * Tx TC1 starts at: descriptor queue 32 | ||
4266 | * Tx TC2 starts at: descriptor queue 64 | ||
4267 | * Tx TC3 starts at: descriptor queue 80 | ||
4268 | * Tx TC4 starts at: descriptor queue 96 | ||
4269 | * Tx TC5 starts at: descriptor queue 104 | ||
4270 | * Tx TC6 starts at: descriptor queue 112 | ||
4271 | * Tx TC7 starts at: descriptor queue 120 | ||
4272 | * | ||
4273 | * Rx TC0-TC7 are offset by 16 queues each | ||
4274 | */ | ||
4275 | for (i = 0; i < 3; i++) { | ||
4276 | adapter->tx_ring[i]->reg_idx = i << 5; | ||
4277 | adapter->rx_ring[i]->reg_idx = i << 4; | ||
4278 | } | ||
4279 | for ( ; i < 5; i++) { | ||
4280 | adapter->tx_ring[i]->reg_idx = | ||
4281 | ((i + 2) << 4); | ||
4282 | adapter->rx_ring[i]->reg_idx = i << 4; | ||
4283 | } | ||
4284 | for ( ; i < dcb_i; i++) { | ||
4285 | adapter->tx_ring[i]->reg_idx = | ||
4286 | ((i + 8) << 3); | ||
4287 | adapter->rx_ring[i]->reg_idx = i << 4; | ||
4288 | } | ||
4289 | 4464 | ||
4290 | ret = true; | 4465 | /* the number of queues is assumed to be symmetric */ |
4291 | } else if (dcb_i == 4) { | 4466 | switch (adapter->hw.mac.type) { |
4292 | /* | 4467 | case ixgbe_mac_82598EB: |
4293 | * Tx TC0 starts at: descriptor queue 0 | 4468 | for (i = 0; i < dcb_i; i++) { |
4294 | * Tx TC1 starts at: descriptor queue 64 | 4469 | adapter->rx_ring[i]->reg_idx = i << 3; |
4295 | * Tx TC2 starts at: descriptor queue 96 | 4470 | adapter->tx_ring[i]->reg_idx = i << 2; |
4296 | * Tx TC3 starts at: descriptor queue 112 | 4471 | } |
4297 | * | 4472 | ret = true; |
4298 | * Rx TC0-TC3 are offset by 32 queues each | 4473 | break; |
4299 | */ | 4474 | case ixgbe_mac_82599EB: |
4300 | adapter->tx_ring[0]->reg_idx = 0; | 4475 | case ixgbe_mac_X540: |
4301 | adapter->tx_ring[1]->reg_idx = 64; | 4476 | if (dcb_i == 8) { |
4302 | adapter->tx_ring[2]->reg_idx = 96; | 4477 | /* |
4303 | adapter->tx_ring[3]->reg_idx = 112; | 4478 | * Tx TC0 starts at: descriptor queue 0 |
4304 | for (i = 0 ; i < dcb_i; i++) | 4479 | * Tx TC1 starts at: descriptor queue 32 |
4305 | adapter->rx_ring[i]->reg_idx = i << 5; | 4480 | * Tx TC2 starts at: descriptor queue 64 |
4306 | 4481 | * Tx TC3 starts at: descriptor queue 80 | |
4307 | ret = true; | 4482 | * Tx TC4 starts at: descriptor queue 96 |
4308 | } else { | 4483 | * Tx TC5 starts at: descriptor queue 104 |
4309 | ret = false; | 4484 | * Tx TC6 starts at: descriptor queue 112 |
4485 | * Tx TC7 starts at: descriptor queue 120 | ||
4486 | * | ||
4487 | * Rx TC0-TC7 are offset by 16 queues each | ||
4488 | */ | ||
4489 | for (i = 0; i < 3; i++) { | ||
4490 | adapter->tx_ring[i]->reg_idx = i << 5; | ||
4491 | adapter->rx_ring[i]->reg_idx = i << 4; | ||
4310 | } | 4492 | } |
4311 | } else { | 4493 | for ( ; i < 5; i++) { |
4312 | ret = false; | 4494 | adapter->tx_ring[i]->reg_idx = ((i + 2) << 4); |
4495 | adapter->rx_ring[i]->reg_idx = i << 4; | ||
4496 | } | ||
4497 | for ( ; i < dcb_i; i++) { | ||
4498 | adapter->tx_ring[i]->reg_idx = ((i + 8) << 3); | ||
4499 | adapter->rx_ring[i]->reg_idx = i << 4; | ||
4500 | } | ||
4501 | ret = true; | ||
4502 | } else if (dcb_i == 4) { | ||
4503 | /* | ||
4504 | * Tx TC0 starts at: descriptor queue 0 | ||
4505 | * Tx TC1 starts at: descriptor queue 64 | ||
4506 | * Tx TC2 starts at: descriptor queue 96 | ||
4507 | * Tx TC3 starts at: descriptor queue 112 | ||
4508 | * | ||
4509 | * Rx TC0-TC3 are offset by 32 queues each | ||
4510 | */ | ||
4511 | adapter->tx_ring[0]->reg_idx = 0; | ||
4512 | adapter->tx_ring[1]->reg_idx = 64; | ||
4513 | adapter->tx_ring[2]->reg_idx = 96; | ||
4514 | adapter->tx_ring[3]->reg_idx = 112; | ||
4515 | for (i = 0 ; i < dcb_i; i++) | ||
4516 | adapter->rx_ring[i]->reg_idx = i << 5; | ||
4517 | ret = true; | ||
4313 | } | 4518 | } |
4314 | } else { | 4519 | break; |
4315 | ret = false; | 4520 | default: |
4521 | break; | ||
4316 | } | 4522 | } |
4317 | |||
4318 | return ret; | 4523 | return ret; |
4319 | } | 4524 | } |
4320 | #endif | 4525 | #endif |
@@ -4354,55 +4559,55 @@ static inline bool ixgbe_cache_ring_fdir(struct ixgbe_adapter *adapter) | |||
4354 | */ | 4559 | */ |
4355 | static inline bool ixgbe_cache_ring_fcoe(struct ixgbe_adapter *adapter) | 4560 | static inline bool ixgbe_cache_ring_fcoe(struct ixgbe_adapter *adapter) |
4356 | { | 4561 | { |
4357 | int i, fcoe_rx_i = 0, fcoe_tx_i = 0; | ||
4358 | bool ret = false; | ||
4359 | struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE]; | 4562 | struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE]; |
4563 | int i; | ||
4564 | u8 fcoe_rx_i = 0, fcoe_tx_i = 0; | ||
4565 | |||
4566 | if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) | ||
4567 | return false; | ||
4360 | 4568 | ||
4361 | if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) { | ||
4362 | #ifdef CONFIG_IXGBE_DCB | 4569 | #ifdef CONFIG_IXGBE_DCB |
4363 | if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { | 4570 | if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { |
4364 | struct ixgbe_fcoe *fcoe = &adapter->fcoe; | 4571 | struct ixgbe_fcoe *fcoe = &adapter->fcoe; |
4365 | 4572 | ||
4366 | ixgbe_cache_ring_dcb(adapter); | 4573 | ixgbe_cache_ring_dcb(adapter); |
4367 | /* find out queues in TC for FCoE */ | 4574 | /* find out queues in TC for FCoE */ |
4368 | fcoe_rx_i = adapter->rx_ring[fcoe->tc]->reg_idx + 1; | 4575 | fcoe_rx_i = adapter->rx_ring[fcoe->tc]->reg_idx + 1; |
4369 | fcoe_tx_i = adapter->tx_ring[fcoe->tc]->reg_idx + 1; | 4576 | fcoe_tx_i = adapter->tx_ring[fcoe->tc]->reg_idx + 1; |
4370 | /* | 4577 | /* |
4371 | * In 82599, the number of Tx queues for each traffic | 4578 | * In 82599, the number of Tx queues for each traffic |
4372 | * class for both 8-TC and 4-TC modes are: | 4579 | * class for both 8-TC and 4-TC modes are: |
4373 | * TCs : TC0 TC1 TC2 TC3 TC4 TC5 TC6 TC7 | 4580 | * TCs : TC0 TC1 TC2 TC3 TC4 TC5 TC6 TC7 |
4374 | * 8 TCs: 32 32 16 16 8 8 8 8 | 4581 | * 8 TCs: 32 32 16 16 8 8 8 8 |
4375 | * 4 TCs: 64 64 32 32 | 4582 | * 4 TCs: 64 64 32 32 |
4376 | * We have max 8 queues for FCoE, where 8 the is | 4583 | * We have max 8 queues for FCoE, where 8 the is |
4377 | * FCoE redirection table size. If TC for FCoE is | 4584 | * FCoE redirection table size. If TC for FCoE is |
4378 | * less than or equal to TC3, we have enough queues | 4585 | * less than or equal to TC3, we have enough queues |
4379 | * to add max of 8 queues for FCoE, so we start FCoE | 4586 | * to add max of 8 queues for FCoE, so we start FCoE |
4380 | * tx descriptor from the next one, i.e., reg_idx + 1. | 4587 | * Tx queue from the next one, i.e., reg_idx + 1. |
4381 | * If TC for FCoE is above TC3, implying 8 TC mode, | 4588 | * If TC for FCoE is above TC3, implying 8 TC mode, |
4382 | * and we need 8 for FCoE, we have to take all queues | 4589 | * and we need 8 for FCoE, we have to take all queues |
4383 | * in that traffic class for FCoE. | 4590 | * in that traffic class for FCoE. |
4384 | */ | 4591 | */ |
4385 | if ((f->indices == IXGBE_FCRETA_SIZE) && (fcoe->tc > 3)) | 4592 | if ((f->indices == IXGBE_FCRETA_SIZE) && (fcoe->tc > 3)) |
4386 | fcoe_tx_i--; | 4593 | fcoe_tx_i--; |
4387 | } | 4594 | } |
4388 | #endif /* CONFIG_IXGBE_DCB */ | 4595 | #endif /* CONFIG_IXGBE_DCB */ |
4389 | if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) { | 4596 | if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) { |
4390 | if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) || | 4597 | if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) || |
4391 | (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) | 4598 | (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) |
4392 | ixgbe_cache_ring_fdir(adapter); | 4599 | ixgbe_cache_ring_fdir(adapter); |
4393 | else | 4600 | else |
4394 | ixgbe_cache_ring_rss(adapter); | 4601 | ixgbe_cache_ring_rss(adapter); |
4395 | 4602 | ||
4396 | fcoe_rx_i = f->mask; | 4603 | fcoe_rx_i = f->mask; |
4397 | fcoe_tx_i = f->mask; | 4604 | fcoe_tx_i = f->mask; |
4398 | } | ||
4399 | for (i = 0; i < f->indices; i++, fcoe_rx_i++, fcoe_tx_i++) { | ||
4400 | adapter->rx_ring[f->mask + i]->reg_idx = fcoe_rx_i; | ||
4401 | adapter->tx_ring[f->mask + i]->reg_idx = fcoe_tx_i; | ||
4402 | } | ||
4403 | ret = true; | ||
4404 | } | 4605 | } |
4405 | return ret; | 4606 | for (i = 0; i < f->indices; i++, fcoe_rx_i++, fcoe_tx_i++) { |
4607 | adapter->rx_ring[f->mask + i]->reg_idx = fcoe_rx_i; | ||
4608 | adapter->tx_ring[f->mask + i]->reg_idx = fcoe_tx_i; | ||
4609 | } | ||
4610 | return true; | ||
4406 | } | 4611 | } |
4407 | 4612 | ||
4408 | #endif /* IXGBE_FCOE */ | 4613 | #endif /* IXGBE_FCOE */ |
@@ -4471,65 +4676,55 @@ static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter) | |||
4471 | **/ | 4676 | **/ |
4472 | static int ixgbe_alloc_queues(struct ixgbe_adapter *adapter) | 4677 | static int ixgbe_alloc_queues(struct ixgbe_adapter *adapter) |
4473 | { | 4678 | { |
4474 | int i; | 4679 | int rx = 0, tx = 0, nid = adapter->node; |
4475 | int orig_node = adapter->node; | ||
4476 | 4680 | ||
4477 | for (i = 0; i < adapter->num_tx_queues; i++) { | 4681 | if (nid < 0 || !node_online(nid)) |
4478 | struct ixgbe_ring *ring = adapter->tx_ring[i]; | 4682 | nid = first_online_node; |
4479 | if (orig_node == -1) { | 4683 | |
4480 | int cur_node = next_online_node(adapter->node); | 4684 | for (; tx < adapter->num_tx_queues; tx++) { |
4481 | if (cur_node == MAX_NUMNODES) | 4685 | struct ixgbe_ring *ring; |
4482 | cur_node = first_online_node; | 4686 | |
4483 | adapter->node = cur_node; | 4687 | ring = kzalloc_node(sizeof(*ring), GFP_KERNEL, nid); |
4484 | } | ||
4485 | ring = kzalloc_node(sizeof(struct ixgbe_ring), GFP_KERNEL, | ||
4486 | adapter->node); | ||
4487 | if (!ring) | 4688 | if (!ring) |
4488 | ring = kzalloc(sizeof(struct ixgbe_ring), GFP_KERNEL); | 4689 | ring = kzalloc(sizeof(*ring), GFP_KERNEL); |
4489 | if (!ring) | 4690 | if (!ring) |
4490 | goto err_tx_ring_allocation; | 4691 | goto err_allocation; |
4491 | ring->count = adapter->tx_ring_count; | 4692 | ring->count = adapter->tx_ring_count; |
4492 | ring->queue_index = i; | 4693 | ring->queue_index = tx; |
4493 | ring->numa_node = adapter->node; | 4694 | ring->numa_node = nid; |
4695 | ring->dev = &adapter->pdev->dev; | ||
4696 | ring->netdev = adapter->netdev; | ||
4494 | 4697 | ||
4495 | adapter->tx_ring[i] = ring; | 4698 | adapter->tx_ring[tx] = ring; |
4496 | } | 4699 | } |
4497 | 4700 | ||
4498 | /* Restore the adapter's original node */ | 4701 | for (; rx < adapter->num_rx_queues; rx++) { |
4499 | adapter->node = orig_node; | 4702 | struct ixgbe_ring *ring; |
4500 | 4703 | ||
4501 | for (i = 0; i < adapter->num_rx_queues; i++) { | 4704 | ring = kzalloc_node(sizeof(*ring), GFP_KERNEL, nid); |
4502 | struct ixgbe_ring *ring = adapter->rx_ring[i]; | ||
4503 | if (orig_node == -1) { | ||
4504 | int cur_node = next_online_node(adapter->node); | ||
4505 | if (cur_node == MAX_NUMNODES) | ||
4506 | cur_node = first_online_node; | ||
4507 | adapter->node = cur_node; | ||
4508 | } | ||
4509 | ring = kzalloc_node(sizeof(struct ixgbe_ring), GFP_KERNEL, | ||
4510 | adapter->node); | ||
4511 | if (!ring) | 4705 | if (!ring) |
4512 | ring = kzalloc(sizeof(struct ixgbe_ring), GFP_KERNEL); | 4706 | ring = kzalloc(sizeof(*ring), GFP_KERNEL); |
4513 | if (!ring) | 4707 | if (!ring) |
4514 | goto err_rx_ring_allocation; | 4708 | goto err_allocation; |
4515 | ring->count = adapter->rx_ring_count; | 4709 | ring->count = adapter->rx_ring_count; |
4516 | ring->queue_index = i; | 4710 | ring->queue_index = rx; |
4517 | ring->numa_node = adapter->node; | 4711 | ring->numa_node = nid; |
4712 | ring->dev = &adapter->pdev->dev; | ||
4713 | ring->netdev = adapter->netdev; | ||
4518 | 4714 | ||
4519 | adapter->rx_ring[i] = ring; | 4715 | adapter->rx_ring[rx] = ring; |
4520 | } | 4716 | } |
4521 | 4717 | ||
4522 | /* Restore the adapter's original node */ | ||
4523 | adapter->node = orig_node; | ||
4524 | |||
4525 | ixgbe_cache_ring_register(adapter); | 4718 | ixgbe_cache_ring_register(adapter); |
4526 | 4719 | ||
4527 | return 0; | 4720 | return 0; |
4528 | 4721 | ||
4529 | err_rx_ring_allocation: | 4722 | err_allocation: |
4530 | for (i = 0; i < adapter->num_tx_queues; i++) | 4723 | while (tx) |
4531 | kfree(adapter->tx_ring[i]); | 4724 | kfree(adapter->tx_ring[--tx]); |
4532 | err_tx_ring_allocation: | 4725 | |
4726 | while (rx) | ||
4727 | kfree(adapter->rx_ring[--rx]); | ||
4533 | return -ENOMEM; | 4728 | return -ENOMEM; |
4534 | } | 4729 | } |
4535 | 4730 | ||
@@ -4751,6 +4946,11 @@ err_set_interrupt: | |||
4751 | return err; | 4946 | return err; |
4752 | } | 4947 | } |
4753 | 4948 | ||
4949 | static void ring_free_rcu(struct rcu_head *head) | ||
4950 | { | ||
4951 | kfree(container_of(head, struct ixgbe_ring, rcu)); | ||
4952 | } | ||
4953 | |||
4754 | /** | 4954 | /** |
4755 | * ixgbe_clear_interrupt_scheme - Clear the current interrupt scheme settings | 4955 | * ixgbe_clear_interrupt_scheme - Clear the current interrupt scheme settings |
4756 | * @adapter: board private structure to clear interrupt scheme on | 4956 | * @adapter: board private structure to clear interrupt scheme on |
@@ -4767,7 +4967,12 @@ void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter) | |||
4767 | adapter->tx_ring[i] = NULL; | 4967 | adapter->tx_ring[i] = NULL; |
4768 | } | 4968 | } |
4769 | for (i = 0; i < adapter->num_rx_queues; i++) { | 4969 | for (i = 0; i < adapter->num_rx_queues; i++) { |
4770 | kfree(adapter->rx_ring[i]); | 4970 | struct ixgbe_ring *ring = adapter->rx_ring[i]; |
4971 | |||
4972 | /* ixgbe_get_stats64() might access this ring, we must wait | ||
4973 | * a grace period before freeing it. | ||
4974 | */ | ||
4975 | call_rcu(&ring->rcu, ring_free_rcu); | ||
4771 | adapter->rx_ring[i] = NULL; | 4976 | adapter->rx_ring[i] = NULL; |
4772 | } | 4977 | } |
4773 | 4978 | ||
@@ -4844,6 +5049,7 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter) | |||
4844 | int j; | 5049 | int j; |
4845 | struct tc_configuration *tc; | 5050 | struct tc_configuration *tc; |
4846 | #endif | 5051 | #endif |
5052 | int max_frame = dev->mtu + ETH_HLEN + ETH_FCS_LEN; | ||
4847 | 5053 | ||
4848 | /* PCI config space info */ | 5054 | /* PCI config space info */ |
4849 | 5055 | ||
@@ -4858,11 +5064,14 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter) | |||
4858 | adapter->ring_feature[RING_F_RSS].indices = rss; | 5064 | adapter->ring_feature[RING_F_RSS].indices = rss; |
4859 | adapter->flags |= IXGBE_FLAG_RSS_ENABLED; | 5065 | adapter->flags |= IXGBE_FLAG_RSS_ENABLED; |
4860 | adapter->ring_feature[RING_F_DCB].indices = IXGBE_MAX_DCB_INDICES; | 5066 | adapter->ring_feature[RING_F_DCB].indices = IXGBE_MAX_DCB_INDICES; |
4861 | if (hw->mac.type == ixgbe_mac_82598EB) { | 5067 | switch (hw->mac.type) { |
5068 | case ixgbe_mac_82598EB: | ||
4862 | if (hw->device_id == IXGBE_DEV_ID_82598AT) | 5069 | if (hw->device_id == IXGBE_DEV_ID_82598AT) |
4863 | adapter->flags |= IXGBE_FLAG_FAN_FAIL_CAPABLE; | 5070 | adapter->flags |= IXGBE_FLAG_FAN_FAIL_CAPABLE; |
4864 | adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82598; | 5071 | adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82598; |
4865 | } else if (hw->mac.type == ixgbe_mac_82599EB) { | 5072 | break; |
5073 | case ixgbe_mac_82599EB: | ||
5074 | case ixgbe_mac_X540: | ||
4866 | adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82599; | 5075 | adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82599; |
4867 | adapter->flags2 |= IXGBE_FLAG2_RSC_CAPABLE; | 5076 | adapter->flags2 |= IXGBE_FLAG2_RSC_CAPABLE; |
4868 | adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED; | 5077 | adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED; |
@@ -4891,6 +5100,9 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter) | |||
4891 | adapter->fcoe.up = IXGBE_FCOE_DEFTC; | 5100 | adapter->fcoe.up = IXGBE_FCOE_DEFTC; |
4892 | #endif | 5101 | #endif |
4893 | #endif /* IXGBE_FCOE */ | 5102 | #endif /* IXGBE_FCOE */ |
5103 | break; | ||
5104 | default: | ||
5105 | break; | ||
4894 | } | 5106 | } |
4895 | 5107 | ||
4896 | #ifdef CONFIG_IXGBE_DCB | 5108 | #ifdef CONFIG_IXGBE_DCB |
@@ -4920,8 +5132,8 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter) | |||
4920 | #ifdef CONFIG_DCB | 5132 | #ifdef CONFIG_DCB |
4921 | adapter->last_lfc_mode = hw->fc.current_mode; | 5133 | adapter->last_lfc_mode = hw->fc.current_mode; |
4922 | #endif | 5134 | #endif |
4923 | hw->fc.high_water = IXGBE_DEFAULT_FCRTH; | 5135 | hw->fc.high_water = FC_HIGH_WATER(max_frame); |
4924 | hw->fc.low_water = IXGBE_DEFAULT_FCRTL; | 5136 | hw->fc.low_water = FC_LOW_WATER(max_frame); |
4925 | hw->fc.pause_time = IXGBE_DEFAULT_FCPAUSE; | 5137 | hw->fc.pause_time = IXGBE_DEFAULT_FCPAUSE; |
4926 | hw->fc.send_xon = true; | 5138 | hw->fc.send_xon = true; |
4927 | hw->fc.disable_fc_autoneg = false; | 5139 | hw->fc.disable_fc_autoneg = false; |
@@ -4959,15 +5171,13 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter) | |||
4959 | 5171 | ||
4960 | /** | 5172 | /** |
4961 | * ixgbe_setup_tx_resources - allocate Tx resources (Descriptors) | 5173 | * ixgbe_setup_tx_resources - allocate Tx resources (Descriptors) |
4962 | * @adapter: board private structure | ||
4963 | * @tx_ring: tx descriptor ring (for a specific queue) to setup | 5174 | * @tx_ring: tx descriptor ring (for a specific queue) to setup |
4964 | * | 5175 | * |
4965 | * Return 0 on success, negative on failure | 5176 | * Return 0 on success, negative on failure |
4966 | **/ | 5177 | **/ |
4967 | int ixgbe_setup_tx_resources(struct ixgbe_adapter *adapter, | 5178 | int ixgbe_setup_tx_resources(struct ixgbe_ring *tx_ring) |
4968 | struct ixgbe_ring *tx_ring) | ||
4969 | { | 5179 | { |
4970 | struct pci_dev *pdev = adapter->pdev; | 5180 | struct device *dev = tx_ring->dev; |
4971 | int size; | 5181 | int size; |
4972 | 5182 | ||
4973 | size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count; | 5183 | size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count; |
@@ -4982,7 +5192,7 @@ int ixgbe_setup_tx_resources(struct ixgbe_adapter *adapter, | |||
4982 | tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc); | 5192 | tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc); |
4983 | tx_ring->size = ALIGN(tx_ring->size, 4096); | 5193 | tx_ring->size = ALIGN(tx_ring->size, 4096); |
4984 | 5194 | ||
4985 | tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size, | 5195 | tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size, |
4986 | &tx_ring->dma, GFP_KERNEL); | 5196 | &tx_ring->dma, GFP_KERNEL); |
4987 | if (!tx_ring->desc) | 5197 | if (!tx_ring->desc) |
4988 | goto err; | 5198 | goto err; |
@@ -4995,7 +5205,7 @@ int ixgbe_setup_tx_resources(struct ixgbe_adapter *adapter, | |||
4995 | err: | 5205 | err: |
4996 | vfree(tx_ring->tx_buffer_info); | 5206 | vfree(tx_ring->tx_buffer_info); |
4997 | tx_ring->tx_buffer_info = NULL; | 5207 | tx_ring->tx_buffer_info = NULL; |
4998 | e_err(probe, "Unable to allocate memory for the Tx descriptor ring\n"); | 5208 | dev_err(dev, "Unable to allocate memory for the Tx descriptor ring\n"); |
4999 | return -ENOMEM; | 5209 | return -ENOMEM; |
5000 | } | 5210 | } |
5001 | 5211 | ||
@@ -5014,7 +5224,7 @@ static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter *adapter) | |||
5014 | int i, err = 0; | 5224 | int i, err = 0; |
5015 | 5225 | ||
5016 | for (i = 0; i < adapter->num_tx_queues; i++) { | 5226 | for (i = 0; i < adapter->num_tx_queues; i++) { |
5017 | err = ixgbe_setup_tx_resources(adapter, adapter->tx_ring[i]); | 5227 | err = ixgbe_setup_tx_resources(adapter->tx_ring[i]); |
5018 | if (!err) | 5228 | if (!err) |
5019 | continue; | 5229 | continue; |
5020 | e_err(probe, "Allocation for Tx Queue %u failed\n", i); | 5230 | e_err(probe, "Allocation for Tx Queue %u failed\n", i); |
@@ -5026,48 +5236,41 @@ static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter *adapter) | |||
5026 | 5236 | ||
5027 | /** | 5237 | /** |
5028 | * ixgbe_setup_rx_resources - allocate Rx resources (Descriptors) | 5238 | * ixgbe_setup_rx_resources - allocate Rx resources (Descriptors) |
5029 | * @adapter: board private structure | ||
5030 | * @rx_ring: rx descriptor ring (for a specific queue) to setup | 5239 | * @rx_ring: rx descriptor ring (for a specific queue) to setup |
5031 | * | 5240 | * |
5032 | * Returns 0 on success, negative on failure | 5241 | * Returns 0 on success, negative on failure |
5033 | **/ | 5242 | **/ |
5034 | int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter, | 5243 | int ixgbe_setup_rx_resources(struct ixgbe_ring *rx_ring) |
5035 | struct ixgbe_ring *rx_ring) | ||
5036 | { | 5244 | { |
5037 | struct pci_dev *pdev = adapter->pdev; | 5245 | struct device *dev = rx_ring->dev; |
5038 | int size; | 5246 | int size; |
5039 | 5247 | ||
5040 | size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count; | 5248 | size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count; |
5041 | rx_ring->rx_buffer_info = vmalloc_node(size, adapter->node); | 5249 | rx_ring->rx_buffer_info = vmalloc_node(size, rx_ring->numa_node); |
5042 | if (!rx_ring->rx_buffer_info) | 5250 | if (!rx_ring->rx_buffer_info) |
5043 | rx_ring->rx_buffer_info = vmalloc(size); | 5251 | rx_ring->rx_buffer_info = vmalloc(size); |
5044 | if (!rx_ring->rx_buffer_info) { | 5252 | if (!rx_ring->rx_buffer_info) |
5045 | e_err(probe, "vmalloc allocation failed for the Rx " | 5253 | goto err; |
5046 | "descriptor ring\n"); | ||
5047 | goto alloc_failed; | ||
5048 | } | ||
5049 | memset(rx_ring->rx_buffer_info, 0, size); | 5254 | memset(rx_ring->rx_buffer_info, 0, size); |
5050 | 5255 | ||
5051 | /* Round up to nearest 4K */ | 5256 | /* Round up to nearest 4K */ |
5052 | rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc); | 5257 | rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc); |
5053 | rx_ring->size = ALIGN(rx_ring->size, 4096); | 5258 | rx_ring->size = ALIGN(rx_ring->size, 4096); |
5054 | 5259 | ||
5055 | rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size, | 5260 | rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size, |
5056 | &rx_ring->dma, GFP_KERNEL); | 5261 | &rx_ring->dma, GFP_KERNEL); |
5057 | 5262 | ||
5058 | if (!rx_ring->desc) { | 5263 | if (!rx_ring->desc) |
5059 | e_err(probe, "Memory allocation failed for the Rx " | 5264 | goto err; |
5060 | "descriptor ring\n"); | ||
5061 | vfree(rx_ring->rx_buffer_info); | ||
5062 | goto alloc_failed; | ||
5063 | } | ||
5064 | 5265 | ||
5065 | rx_ring->next_to_clean = 0; | 5266 | rx_ring->next_to_clean = 0; |
5066 | rx_ring->next_to_use = 0; | 5267 | rx_ring->next_to_use = 0; |
5067 | 5268 | ||
5068 | return 0; | 5269 | return 0; |
5069 | 5270 | err: | |
5070 | alloc_failed: | 5271 | vfree(rx_ring->rx_buffer_info); |
5272 | rx_ring->rx_buffer_info = NULL; | ||
5273 | dev_err(dev, "Unable to allocate memory for the Rx descriptor ring\n"); | ||
5071 | return -ENOMEM; | 5274 | return -ENOMEM; |
5072 | } | 5275 | } |
5073 | 5276 | ||
@@ -5081,13 +5284,12 @@ alloc_failed: | |||
5081 | * | 5284 | * |
5082 | * Return 0 on success, negative on failure | 5285 | * Return 0 on success, negative on failure |
5083 | **/ | 5286 | **/ |
5084 | |||
5085 | static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter) | 5287 | static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter) |
5086 | { | 5288 | { |
5087 | int i, err = 0; | 5289 | int i, err = 0; |
5088 | 5290 | ||
5089 | for (i = 0; i < adapter->num_rx_queues; i++) { | 5291 | for (i = 0; i < adapter->num_rx_queues; i++) { |
5090 | err = ixgbe_setup_rx_resources(adapter, adapter->rx_ring[i]); | 5292 | err = ixgbe_setup_rx_resources(adapter->rx_ring[i]); |
5091 | if (!err) | 5293 | if (!err) |
5092 | continue; | 5294 | continue; |
5093 | e_err(probe, "Allocation for Rx Queue %u failed\n", i); | 5295 | e_err(probe, "Allocation for Rx Queue %u failed\n", i); |
@@ -5099,23 +5301,23 @@ static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter) | |||
5099 | 5301 | ||
5100 | /** | 5302 | /** |
5101 | * ixgbe_free_tx_resources - Free Tx Resources per Queue | 5303 | * ixgbe_free_tx_resources - Free Tx Resources per Queue |
5102 | * @adapter: board private structure | ||
5103 | * @tx_ring: Tx descriptor ring for a specific queue | 5304 | * @tx_ring: Tx descriptor ring for a specific queue |
5104 | * | 5305 | * |
5105 | * Free all transmit software resources | 5306 | * Free all transmit software resources |
5106 | **/ | 5307 | **/ |
5107 | void ixgbe_free_tx_resources(struct ixgbe_adapter *adapter, | 5308 | void ixgbe_free_tx_resources(struct ixgbe_ring *tx_ring) |
5108 | struct ixgbe_ring *tx_ring) | ||
5109 | { | 5309 | { |
5110 | struct pci_dev *pdev = adapter->pdev; | 5310 | ixgbe_clean_tx_ring(tx_ring); |
5111 | |||
5112 | ixgbe_clean_tx_ring(adapter, tx_ring); | ||
5113 | 5311 | ||
5114 | vfree(tx_ring->tx_buffer_info); | 5312 | vfree(tx_ring->tx_buffer_info); |
5115 | tx_ring->tx_buffer_info = NULL; | 5313 | tx_ring->tx_buffer_info = NULL; |
5116 | 5314 | ||
5117 | dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc, | 5315 | /* if not set, then don't free */ |
5118 | tx_ring->dma); | 5316 | if (!tx_ring->desc) |
5317 | return; | ||
5318 | |||
5319 | dma_free_coherent(tx_ring->dev, tx_ring->size, | ||
5320 | tx_ring->desc, tx_ring->dma); | ||
5119 | 5321 | ||
5120 | tx_ring->desc = NULL; | 5322 | tx_ring->desc = NULL; |
5121 | } | 5323 | } |
@@ -5132,28 +5334,28 @@ static void ixgbe_free_all_tx_resources(struct ixgbe_adapter *adapter) | |||
5132 | 5334 | ||
5133 | for (i = 0; i < adapter->num_tx_queues; i++) | 5335 | for (i = 0; i < adapter->num_tx_queues; i++) |
5134 | if (adapter->tx_ring[i]->desc) | 5336 | if (adapter->tx_ring[i]->desc) |
5135 | ixgbe_free_tx_resources(adapter, adapter->tx_ring[i]); | 5337 | ixgbe_free_tx_resources(adapter->tx_ring[i]); |
5136 | } | 5338 | } |
5137 | 5339 | ||
5138 | /** | 5340 | /** |
5139 | * ixgbe_free_rx_resources - Free Rx Resources | 5341 | * ixgbe_free_rx_resources - Free Rx Resources |
5140 | * @adapter: board private structure | ||
5141 | * @rx_ring: ring to clean the resources from | 5342 | * @rx_ring: ring to clean the resources from |
5142 | * | 5343 | * |
5143 | * Free all receive software resources | 5344 | * Free all receive software resources |
5144 | **/ | 5345 | **/ |
5145 | void ixgbe_free_rx_resources(struct ixgbe_adapter *adapter, | 5346 | void ixgbe_free_rx_resources(struct ixgbe_ring *rx_ring) |
5146 | struct ixgbe_ring *rx_ring) | ||
5147 | { | 5347 | { |
5148 | struct pci_dev *pdev = adapter->pdev; | 5348 | ixgbe_clean_rx_ring(rx_ring); |
5149 | |||
5150 | ixgbe_clean_rx_ring(adapter, rx_ring); | ||
5151 | 5349 | ||
5152 | vfree(rx_ring->rx_buffer_info); | 5350 | vfree(rx_ring->rx_buffer_info); |
5153 | rx_ring->rx_buffer_info = NULL; | 5351 | rx_ring->rx_buffer_info = NULL; |
5154 | 5352 | ||
5155 | dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc, | 5353 | /* if not set, then don't free */ |
5156 | rx_ring->dma); | 5354 | if (!rx_ring->desc) |
5355 | return; | ||
5356 | |||
5357 | dma_free_coherent(rx_ring->dev, rx_ring->size, | ||
5358 | rx_ring->desc, rx_ring->dma); | ||
5157 | 5359 | ||
5158 | rx_ring->desc = NULL; | 5360 | rx_ring->desc = NULL; |
5159 | } | 5361 | } |
@@ -5170,7 +5372,7 @@ static void ixgbe_free_all_rx_resources(struct ixgbe_adapter *adapter) | |||
5170 | 5372 | ||
5171 | for (i = 0; i < adapter->num_rx_queues; i++) | 5373 | for (i = 0; i < adapter->num_rx_queues; i++) |
5172 | if (adapter->rx_ring[i]->desc) | 5374 | if (adapter->rx_ring[i]->desc) |
5173 | ixgbe_free_rx_resources(adapter, adapter->rx_ring[i]); | 5375 | ixgbe_free_rx_resources(adapter->rx_ring[i]); |
5174 | } | 5376 | } |
5175 | 5377 | ||
5176 | /** | 5378 | /** |
@@ -5183,6 +5385,7 @@ static void ixgbe_free_all_rx_resources(struct ixgbe_adapter *adapter) | |||
5183 | static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu) | 5385 | static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu) |
5184 | { | 5386 | { |
5185 | struct ixgbe_adapter *adapter = netdev_priv(netdev); | 5387 | struct ixgbe_adapter *adapter = netdev_priv(netdev); |
5388 | struct ixgbe_hw *hw = &adapter->hw; | ||
5186 | int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; | 5389 | int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; |
5187 | 5390 | ||
5188 | /* MTU < 68 is an error and causes problems on some kernels */ | 5391 | /* MTU < 68 is an error and causes problems on some kernels */ |
@@ -5193,6 +5396,9 @@ static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu) | |||
5193 | /* must set new MTU before calling down or up */ | 5396 | /* must set new MTU before calling down or up */ |
5194 | netdev->mtu = new_mtu; | 5397 | netdev->mtu = new_mtu; |
5195 | 5398 | ||
5399 | hw->fc.high_water = FC_HIGH_WATER(max_frame); | ||
5400 | hw->fc.low_water = FC_LOW_WATER(max_frame); | ||
5401 | |||
5196 | if (netif_running(netdev)) | 5402 | if (netif_running(netdev)) |
5197 | ixgbe_reinit_locked(adapter); | 5403 | ixgbe_reinit_locked(adapter); |
5198 | 5404 | ||
@@ -5288,8 +5494,8 @@ static int ixgbe_close(struct net_device *netdev) | |||
5288 | #ifdef CONFIG_PM | 5494 | #ifdef CONFIG_PM |
5289 | static int ixgbe_resume(struct pci_dev *pdev) | 5495 | static int ixgbe_resume(struct pci_dev *pdev) |
5290 | { | 5496 | { |
5291 | struct net_device *netdev = pci_get_drvdata(pdev); | 5497 | struct ixgbe_adapter *adapter = pci_get_drvdata(pdev); |
5292 | struct ixgbe_adapter *adapter = netdev_priv(netdev); | 5498 | struct net_device *netdev = adapter->netdev; |
5293 | u32 err; | 5499 | u32 err; |
5294 | 5500 | ||
5295 | pci_set_power_state(pdev, PCI_D0); | 5501 | pci_set_power_state(pdev, PCI_D0); |
@@ -5320,7 +5526,7 @@ static int ixgbe_resume(struct pci_dev *pdev) | |||
5320 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0); | 5526 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0); |
5321 | 5527 | ||
5322 | if (netif_running(netdev)) { | 5528 | if (netif_running(netdev)) { |
5323 | err = ixgbe_open(adapter->netdev); | 5529 | err = ixgbe_open(netdev); |
5324 | if (err) | 5530 | if (err) |
5325 | return err; | 5531 | return err; |
5326 | } | 5532 | } |
@@ -5333,8 +5539,8 @@ static int ixgbe_resume(struct pci_dev *pdev) | |||
5333 | 5539 | ||
5334 | static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake) | 5540 | static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake) |
5335 | { | 5541 | { |
5336 | struct net_device *netdev = pci_get_drvdata(pdev); | 5542 | struct ixgbe_adapter *adapter = pci_get_drvdata(pdev); |
5337 | struct ixgbe_adapter *adapter = netdev_priv(netdev); | 5543 | struct net_device *netdev = adapter->netdev; |
5338 | struct ixgbe_hw *hw = &adapter->hw; | 5544 | struct ixgbe_hw *hw = &adapter->hw; |
5339 | u32 ctrl, fctrl; | 5545 | u32 ctrl, fctrl; |
5340 | u32 wufc = adapter->wol; | 5546 | u32 wufc = adapter->wol; |
@@ -5351,6 +5557,8 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake) | |||
5351 | ixgbe_free_all_rx_resources(adapter); | 5557 | ixgbe_free_all_rx_resources(adapter); |
5352 | } | 5558 | } |
5353 | 5559 | ||
5560 | ixgbe_clear_interrupt_scheme(adapter); | ||
5561 | |||
5354 | #ifdef CONFIG_PM | 5562 | #ifdef CONFIG_PM |
5355 | retval = pci_save_state(pdev); | 5563 | retval = pci_save_state(pdev); |
5356 | if (retval) | 5564 | if (retval) |
@@ -5377,15 +5585,20 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake) | |||
5377 | IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0); | 5585 | IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0); |
5378 | } | 5586 | } |
5379 | 5587 | ||
5380 | if (wufc && hw->mac.type == ixgbe_mac_82599EB) | 5588 | switch (hw->mac.type) { |
5381 | pci_wake_from_d3(pdev, true); | 5589 | case ixgbe_mac_82598EB: |
5382 | else | ||
5383 | pci_wake_from_d3(pdev, false); | 5590 | pci_wake_from_d3(pdev, false); |
5591 | break; | ||
5592 | case ixgbe_mac_82599EB: | ||
5593 | case ixgbe_mac_X540: | ||
5594 | pci_wake_from_d3(pdev, !!wufc); | ||
5595 | break; | ||
5596 | default: | ||
5597 | break; | ||
5598 | } | ||
5384 | 5599 | ||
5385 | *enable_wake = !!wufc; | 5600 | *enable_wake = !!wufc; |
5386 | 5601 | ||
5387 | ixgbe_clear_interrupt_scheme(adapter); | ||
5388 | |||
5389 | ixgbe_release_hw_control(adapter); | 5602 | ixgbe_release_hw_control(adapter); |
5390 | 5603 | ||
5391 | pci_disable_device(pdev); | 5604 | pci_disable_device(pdev); |
@@ -5434,10 +5647,12 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter) | |||
5434 | { | 5647 | { |
5435 | struct net_device *netdev = adapter->netdev; | 5648 | struct net_device *netdev = adapter->netdev; |
5436 | struct ixgbe_hw *hw = &adapter->hw; | 5649 | struct ixgbe_hw *hw = &adapter->hw; |
5650 | struct ixgbe_hw_stats *hwstats = &adapter->stats; | ||
5437 | u64 total_mpc = 0; | 5651 | u64 total_mpc = 0; |
5438 | u32 i, missed_rx = 0, mpc, bprc, lxon, lxoff, xon_off_tot; | 5652 | u32 i, missed_rx = 0, mpc, bprc, lxon, lxoff, xon_off_tot; |
5439 | u64 non_eop_descs = 0, restart_queue = 0; | 5653 | u64 non_eop_descs = 0, restart_queue = 0, tx_busy = 0; |
5440 | struct ixgbe_hw_stats *hwstats = &adapter->stats; | 5654 | u64 alloc_rx_page_failed = 0, alloc_rx_buff_failed = 0; |
5655 | u64 bytes = 0, packets = 0; | ||
5441 | 5656 | ||
5442 | if (test_bit(__IXGBE_DOWN, &adapter->state) || | 5657 | if (test_bit(__IXGBE_DOWN, &adapter->state) || |
5443 | test_bit(__IXGBE_RESETTING, &adapter->state)) | 5658 | test_bit(__IXGBE_RESETTING, &adapter->state)) |
@@ -5450,21 +5665,41 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter) | |||
5450 | adapter->hw_rx_no_dma_resources += | 5665 | adapter->hw_rx_no_dma_resources += |
5451 | IXGBE_READ_REG(hw, IXGBE_QPRDC(i)); | 5666 | IXGBE_READ_REG(hw, IXGBE_QPRDC(i)); |
5452 | for (i = 0; i < adapter->num_rx_queues; i++) { | 5667 | for (i = 0; i < adapter->num_rx_queues; i++) { |
5453 | rsc_count += adapter->rx_ring[i]->rsc_count; | 5668 | rsc_count += adapter->rx_ring[i]->rx_stats.rsc_count; |
5454 | rsc_flush += adapter->rx_ring[i]->rsc_flush; | 5669 | rsc_flush += adapter->rx_ring[i]->rx_stats.rsc_flush; |
5455 | } | 5670 | } |
5456 | adapter->rsc_total_count = rsc_count; | 5671 | adapter->rsc_total_count = rsc_count; |
5457 | adapter->rsc_total_flush = rsc_flush; | 5672 | adapter->rsc_total_flush = rsc_flush; |
5458 | } | 5673 | } |
5459 | 5674 | ||
5675 | for (i = 0; i < adapter->num_rx_queues; i++) { | ||
5676 | struct ixgbe_ring *rx_ring = adapter->rx_ring[i]; | ||
5677 | non_eop_descs += rx_ring->rx_stats.non_eop_descs; | ||
5678 | alloc_rx_page_failed += rx_ring->rx_stats.alloc_rx_page_failed; | ||
5679 | alloc_rx_buff_failed += rx_ring->rx_stats.alloc_rx_buff_failed; | ||
5680 | bytes += rx_ring->stats.bytes; | ||
5681 | packets += rx_ring->stats.packets; | ||
5682 | } | ||
5683 | adapter->non_eop_descs = non_eop_descs; | ||
5684 | adapter->alloc_rx_page_failed = alloc_rx_page_failed; | ||
5685 | adapter->alloc_rx_buff_failed = alloc_rx_buff_failed; | ||
5686 | netdev->stats.rx_bytes = bytes; | ||
5687 | netdev->stats.rx_packets = packets; | ||
5688 | |||
5689 | bytes = 0; | ||
5690 | packets = 0; | ||
5460 | /* gather some stats to the adapter struct that are per queue */ | 5691 | /* gather some stats to the adapter struct that are per queue */ |
5461 | for (i = 0; i < adapter->num_tx_queues; i++) | 5692 | for (i = 0; i < adapter->num_tx_queues; i++) { |
5462 | restart_queue += adapter->tx_ring[i]->restart_queue; | 5693 | struct ixgbe_ring *tx_ring = adapter->tx_ring[i]; |
5694 | restart_queue += tx_ring->tx_stats.restart_queue; | ||
5695 | tx_busy += tx_ring->tx_stats.tx_busy; | ||
5696 | bytes += tx_ring->stats.bytes; | ||
5697 | packets += tx_ring->stats.packets; | ||
5698 | } | ||
5463 | adapter->restart_queue = restart_queue; | 5699 | adapter->restart_queue = restart_queue; |
5464 | 5700 | adapter->tx_busy = tx_busy; | |
5465 | for (i = 0; i < adapter->num_rx_queues; i++) | 5701 | netdev->stats.tx_bytes = bytes; |
5466 | non_eop_descs += adapter->rx_ring[i]->non_eop_descs; | 5702 | netdev->stats.tx_packets = packets; |
5467 | adapter->non_eop_descs = non_eop_descs; | ||
5468 | 5703 | ||
5469 | hwstats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS); | 5704 | hwstats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS); |
5470 | for (i = 0; i < 8; i++) { | 5705 | for (i = 0; i < 8; i++) { |
@@ -5479,17 +5714,18 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter) | |||
5479 | hwstats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC(i)); | 5714 | hwstats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC(i)); |
5480 | hwstats->qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i)); | 5715 | hwstats->qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i)); |
5481 | hwstats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC(i)); | 5716 | hwstats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC(i)); |
5482 | if (hw->mac.type == ixgbe_mac_82599EB) { | 5717 | switch (hw->mac.type) { |
5483 | hwstats->pxonrxc[i] += | 5718 | case ixgbe_mac_82598EB: |
5484 | IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i)); | ||
5485 | hwstats->pxoffrxc[i] += | ||
5486 | IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i)); | ||
5487 | hwstats->qprdc[i] += IXGBE_READ_REG(hw, IXGBE_QPRDC(i)); | ||
5488 | } else { | ||
5489 | hwstats->pxonrxc[i] += | 5719 | hwstats->pxonrxc[i] += |
5490 | IXGBE_READ_REG(hw, IXGBE_PXONRXC(i)); | 5720 | IXGBE_READ_REG(hw, IXGBE_PXONRXC(i)); |
5491 | hwstats->pxoffrxc[i] += | 5721 | break; |
5492 | IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i)); | 5722 | case ixgbe_mac_82599EB: |
5723 | case ixgbe_mac_X540: | ||
5724 | hwstats->pxonrxc[i] += | ||
5725 | IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i)); | ||
5726 | break; | ||
5727 | default: | ||
5728 | break; | ||
5493 | } | 5729 | } |
5494 | hwstats->pxontxc[i] += IXGBE_READ_REG(hw, IXGBE_PXONTXC(i)); | 5730 | hwstats->pxontxc[i] += IXGBE_READ_REG(hw, IXGBE_PXONTXC(i)); |
5495 | hwstats->pxofftxc[i] += IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i)); | 5731 | hwstats->pxofftxc[i] += IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i)); |
@@ -5498,21 +5734,25 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter) | |||
5498 | /* work around hardware counting issue */ | 5734 | /* work around hardware counting issue */ |
5499 | hwstats->gprc -= missed_rx; | 5735 | hwstats->gprc -= missed_rx; |
5500 | 5736 | ||
5737 | ixgbe_update_xoff_received(adapter); | ||
5738 | |||
5501 | /* 82598 hardware only has a 32 bit counter in the high register */ | 5739 | /* 82598 hardware only has a 32 bit counter in the high register */ |
5502 | if (hw->mac.type == ixgbe_mac_82599EB) { | 5740 | switch (hw->mac.type) { |
5503 | u64 tmp; | 5741 | case ixgbe_mac_82598EB: |
5742 | hwstats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC); | ||
5743 | hwstats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH); | ||
5744 | hwstats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH); | ||
5745 | hwstats->tor += IXGBE_READ_REG(hw, IXGBE_TORH); | ||
5746 | break; | ||
5747 | case ixgbe_mac_82599EB: | ||
5748 | case ixgbe_mac_X540: | ||
5504 | hwstats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL); | 5749 | hwstats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL); |
5505 | tmp = IXGBE_READ_REG(hw, IXGBE_GORCH) & 0xF; | 5750 | IXGBE_READ_REG(hw, IXGBE_GORCH); /* to clear */ |
5506 | /* 4 high bits of GORC */ | ||
5507 | hwstats->gorc += (tmp << 32); | ||
5508 | hwstats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL); | 5751 | hwstats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL); |
5509 | tmp = IXGBE_READ_REG(hw, IXGBE_GOTCH) & 0xF; | 5752 | IXGBE_READ_REG(hw, IXGBE_GOTCH); /* to clear */ |
5510 | /* 4 high bits of GOTC */ | ||
5511 | hwstats->gotc += (tmp << 32); | ||
5512 | hwstats->tor += IXGBE_READ_REG(hw, IXGBE_TORL); | 5753 | hwstats->tor += IXGBE_READ_REG(hw, IXGBE_TORL); |
5513 | IXGBE_READ_REG(hw, IXGBE_TORH); /* to clear */ | 5754 | IXGBE_READ_REG(hw, IXGBE_TORH); /* to clear */ |
5514 | hwstats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT); | 5755 | hwstats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT); |
5515 | hwstats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT); | ||
5516 | hwstats->fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH); | 5756 | hwstats->fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH); |
5517 | hwstats->fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS); | 5757 | hwstats->fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS); |
5518 | #ifdef IXGBE_FCOE | 5758 | #ifdef IXGBE_FCOE |
@@ -5523,12 +5763,9 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter) | |||
5523 | hwstats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC); | 5763 | hwstats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC); |
5524 | hwstats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC); | 5764 | hwstats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC); |
5525 | #endif /* IXGBE_FCOE */ | 5765 | #endif /* IXGBE_FCOE */ |
5526 | } else { | 5766 | break; |
5527 | hwstats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC); | 5767 | default: |
5528 | hwstats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC); | 5768 | break; |
5529 | hwstats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH); | ||
5530 | hwstats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH); | ||
5531 | hwstats->tor += IXGBE_READ_REG(hw, IXGBE_TORH); | ||
5532 | } | 5769 | } |
5533 | bprc = IXGBE_READ_REG(hw, IXGBE_BPRC); | 5770 | bprc = IXGBE_READ_REG(hw, IXGBE_BPRC); |
5534 | hwstats->bprc += bprc; | 5771 | hwstats->bprc += bprc; |
@@ -5701,8 +5938,8 @@ static void ixgbe_fdir_reinit_task(struct work_struct *work) | |||
5701 | 5938 | ||
5702 | if (ixgbe_reinit_fdir_tables_82599(hw) == 0) { | 5939 | if (ixgbe_reinit_fdir_tables_82599(hw) == 0) { |
5703 | for (i = 0; i < adapter->num_tx_queues; i++) | 5940 | for (i = 0; i < adapter->num_tx_queues; i++) |
5704 | set_bit(__IXGBE_FDIR_INIT_DONE, | 5941 | set_bit(__IXGBE_TX_FDIR_INIT_DONE, |
5705 | &(adapter->tx_ring[i]->reinit_state)); | 5942 | &(adapter->tx_ring[i]->state)); |
5706 | } else { | 5943 | } else { |
5707 | e_err(probe, "failed to finish FDIR re-initialization, " | 5944 | e_err(probe, "failed to finish FDIR re-initialization, " |
5708 | "ignored adding FDIR ATR filters\n"); | 5945 | "ignored adding FDIR ATR filters\n"); |
@@ -5764,17 +6001,27 @@ static void ixgbe_watchdog_task(struct work_struct *work) | |||
5764 | if (!netif_carrier_ok(netdev)) { | 6001 | if (!netif_carrier_ok(netdev)) { |
5765 | bool flow_rx, flow_tx; | 6002 | bool flow_rx, flow_tx; |
5766 | 6003 | ||
5767 | if (hw->mac.type == ixgbe_mac_82599EB) { | 6004 | switch (hw->mac.type) { |
5768 | u32 mflcn = IXGBE_READ_REG(hw, IXGBE_MFLCN); | 6005 | case ixgbe_mac_82598EB: { |
5769 | u32 fccfg = IXGBE_READ_REG(hw, IXGBE_FCCFG); | ||
5770 | flow_rx = !!(mflcn & IXGBE_MFLCN_RFCE); | ||
5771 | flow_tx = !!(fccfg & IXGBE_FCCFG_TFCE_802_3X); | ||
5772 | } else { | ||
5773 | u32 frctl = IXGBE_READ_REG(hw, IXGBE_FCTRL); | 6006 | u32 frctl = IXGBE_READ_REG(hw, IXGBE_FCTRL); |
5774 | u32 rmcs = IXGBE_READ_REG(hw, IXGBE_RMCS); | 6007 | u32 rmcs = IXGBE_READ_REG(hw, IXGBE_RMCS); |
5775 | flow_rx = !!(frctl & IXGBE_FCTRL_RFCE); | 6008 | flow_rx = !!(frctl & IXGBE_FCTRL_RFCE); |
5776 | flow_tx = !!(rmcs & IXGBE_RMCS_TFCE_802_3X); | 6009 | flow_tx = !!(rmcs & IXGBE_RMCS_TFCE_802_3X); |
5777 | } | 6010 | } |
6011 | break; | ||
6012 | case ixgbe_mac_82599EB: | ||
6013 | case ixgbe_mac_X540: { | ||
6014 | u32 mflcn = IXGBE_READ_REG(hw, IXGBE_MFLCN); | ||
6015 | u32 fccfg = IXGBE_READ_REG(hw, IXGBE_FCCFG); | ||
6016 | flow_rx = !!(mflcn & IXGBE_MFLCN_RFCE); | ||
6017 | flow_tx = !!(fccfg & IXGBE_FCCFG_TFCE_802_3X); | ||
6018 | } | ||
6019 | break; | ||
6020 | default: | ||
6021 | flow_tx = false; | ||
6022 | flow_rx = false; | ||
6023 | break; | ||
6024 | } | ||
5778 | 6025 | ||
5779 | e_info(drv, "NIC Link is Up %s, Flow Control: %s\n", | 6026 | e_info(drv, "NIC Link is Up %s, Flow Control: %s\n", |
5780 | (link_speed == IXGBE_LINK_SPEED_10GB_FULL ? | 6027 | (link_speed == IXGBE_LINK_SPEED_10GB_FULL ? |
@@ -5788,7 +6035,10 @@ static void ixgbe_watchdog_task(struct work_struct *work) | |||
5788 | netif_carrier_on(netdev); | 6035 | netif_carrier_on(netdev); |
5789 | } else { | 6036 | } else { |
5790 | /* Force detection of hung controller */ | 6037 | /* Force detection of hung controller */ |
5791 | adapter->detect_tx_hung = true; | 6038 | for (i = 0; i < adapter->num_tx_queues; i++) { |
6039 | tx_ring = adapter->tx_ring[i]; | ||
6040 | set_check_for_tx_hang(tx_ring); | ||
6041 | } | ||
5792 | } | 6042 | } |
5793 | } else { | 6043 | } else { |
5794 | adapter->link_up = false; | 6044 | adapter->link_up = false; |
@@ -6000,15 +6250,17 @@ static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter, | |||
6000 | static int ixgbe_tx_map(struct ixgbe_adapter *adapter, | 6250 | static int ixgbe_tx_map(struct ixgbe_adapter *adapter, |
6001 | struct ixgbe_ring *tx_ring, | 6251 | struct ixgbe_ring *tx_ring, |
6002 | struct sk_buff *skb, u32 tx_flags, | 6252 | struct sk_buff *skb, u32 tx_flags, |
6003 | unsigned int first) | 6253 | unsigned int first, const u8 hdr_len) |
6004 | { | 6254 | { |
6005 | struct pci_dev *pdev = adapter->pdev; | 6255 | struct device *dev = tx_ring->dev; |
6006 | struct ixgbe_tx_buffer *tx_buffer_info; | 6256 | struct ixgbe_tx_buffer *tx_buffer_info; |
6007 | unsigned int len; | 6257 | unsigned int len; |
6008 | unsigned int total = skb->len; | 6258 | unsigned int total = skb->len; |
6009 | unsigned int offset = 0, size, count = 0, i; | 6259 | unsigned int offset = 0, size, count = 0, i; |
6010 | unsigned int nr_frags = skb_shinfo(skb)->nr_frags; | 6260 | unsigned int nr_frags = skb_shinfo(skb)->nr_frags; |
6011 | unsigned int f; | 6261 | unsigned int f; |
6262 | unsigned int bytecount = skb->len; | ||
6263 | u16 gso_segs = 1; | ||
6012 | 6264 | ||
6013 | i = tx_ring->next_to_use; | 6265 | i = tx_ring->next_to_use; |
6014 | 6266 | ||
@@ -6023,10 +6275,10 @@ static int ixgbe_tx_map(struct ixgbe_adapter *adapter, | |||
6023 | 6275 | ||
6024 | tx_buffer_info->length = size; | 6276 | tx_buffer_info->length = size; |
6025 | tx_buffer_info->mapped_as_page = false; | 6277 | tx_buffer_info->mapped_as_page = false; |
6026 | tx_buffer_info->dma = dma_map_single(&pdev->dev, | 6278 | tx_buffer_info->dma = dma_map_single(dev, |
6027 | skb->data + offset, | 6279 | skb->data + offset, |
6028 | size, DMA_TO_DEVICE); | 6280 | size, DMA_TO_DEVICE); |
6029 | if (dma_mapping_error(&pdev->dev, tx_buffer_info->dma)) | 6281 | if (dma_mapping_error(dev, tx_buffer_info->dma)) |
6030 | goto dma_error; | 6282 | goto dma_error; |
6031 | tx_buffer_info->time_stamp = jiffies; | 6283 | tx_buffer_info->time_stamp = jiffies; |
6032 | tx_buffer_info->next_to_watch = i; | 6284 | tx_buffer_info->next_to_watch = i; |
@@ -6059,12 +6311,12 @@ static int ixgbe_tx_map(struct ixgbe_adapter *adapter, | |||
6059 | size = min(len, (uint)IXGBE_MAX_DATA_PER_TXD); | 6311 | size = min(len, (uint)IXGBE_MAX_DATA_PER_TXD); |
6060 | 6312 | ||
6061 | tx_buffer_info->length = size; | 6313 | tx_buffer_info->length = size; |
6062 | tx_buffer_info->dma = dma_map_page(&adapter->pdev->dev, | 6314 | tx_buffer_info->dma = dma_map_page(dev, |
6063 | frag->page, | 6315 | frag->page, |
6064 | offset, size, | 6316 | offset, size, |
6065 | DMA_TO_DEVICE); | 6317 | DMA_TO_DEVICE); |
6066 | tx_buffer_info->mapped_as_page = true; | 6318 | tx_buffer_info->mapped_as_page = true; |
6067 | if (dma_mapping_error(&pdev->dev, tx_buffer_info->dma)) | 6319 | if (dma_mapping_error(dev, tx_buffer_info->dma)) |
6068 | goto dma_error; | 6320 | goto dma_error; |
6069 | tx_buffer_info->time_stamp = jiffies; | 6321 | tx_buffer_info->time_stamp = jiffies; |
6070 | tx_buffer_info->next_to_watch = i; | 6322 | tx_buffer_info->next_to_watch = i; |
@@ -6078,6 +6330,19 @@ static int ixgbe_tx_map(struct ixgbe_adapter *adapter, | |||
6078 | break; | 6330 | break; |
6079 | } | 6331 | } |
6080 | 6332 | ||
6333 | if (tx_flags & IXGBE_TX_FLAGS_TSO) | ||
6334 | gso_segs = skb_shinfo(skb)->gso_segs; | ||
6335 | #ifdef IXGBE_FCOE | ||
6336 | /* adjust for FCoE Sequence Offload */ | ||
6337 | else if (tx_flags & IXGBE_TX_FLAGS_FSO) | ||
6338 | gso_segs = DIV_ROUND_UP(skb->len - hdr_len, | ||
6339 | skb_shinfo(skb)->gso_size); | ||
6340 | #endif /* IXGBE_FCOE */ | ||
6341 | bytecount += (gso_segs - 1) * hdr_len; | ||
6342 | |||
6343 | /* multiply data chunks by size of headers */ | ||
6344 | tx_ring->tx_buffer_info[i].bytecount = bytecount; | ||
6345 | tx_ring->tx_buffer_info[i].gso_segs = gso_segs; | ||
6081 | tx_ring->tx_buffer_info[i].skb = skb; | 6346 | tx_ring->tx_buffer_info[i].skb = skb; |
6082 | tx_ring->tx_buffer_info[first].next_to_watch = i; | 6347 | tx_ring->tx_buffer_info[first].next_to_watch = i; |
6083 | 6348 | ||
@@ -6099,14 +6364,13 @@ dma_error: | |||
6099 | i += tx_ring->count; | 6364 | i += tx_ring->count; |
6100 | i--; | 6365 | i--; |
6101 | tx_buffer_info = &tx_ring->tx_buffer_info[i]; | 6366 | tx_buffer_info = &tx_ring->tx_buffer_info[i]; |
6102 | ixgbe_unmap_and_free_tx_resource(adapter, tx_buffer_info); | 6367 | ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer_info); |
6103 | } | 6368 | } |
6104 | 6369 | ||
6105 | return 0; | 6370 | return 0; |
6106 | } | 6371 | } |
6107 | 6372 | ||
6108 | static void ixgbe_tx_queue(struct ixgbe_adapter *adapter, | 6373 | static void ixgbe_tx_queue(struct ixgbe_ring *tx_ring, |
6109 | struct ixgbe_ring *tx_ring, | ||
6110 | int tx_flags, int count, u32 paylen, u8 hdr_len) | 6374 | int tx_flags, int count, u32 paylen, u8 hdr_len) |
6111 | { | 6375 | { |
6112 | union ixgbe_adv_tx_desc *tx_desc = NULL; | 6376 | union ixgbe_adv_tx_desc *tx_desc = NULL; |
@@ -6171,60 +6435,46 @@ static void ixgbe_tx_queue(struct ixgbe_adapter *adapter, | |||
6171 | wmb(); | 6435 | wmb(); |
6172 | 6436 | ||
6173 | tx_ring->next_to_use = i; | 6437 | tx_ring->next_to_use = i; |
6174 | writel(i, adapter->hw.hw_addr + tx_ring->tail); | 6438 | writel(i, tx_ring->tail); |
6175 | } | 6439 | } |
6176 | 6440 | ||
6177 | static void ixgbe_atr(struct ixgbe_adapter *adapter, struct sk_buff *skb, | 6441 | static void ixgbe_atr(struct ixgbe_adapter *adapter, struct sk_buff *skb, |
6178 | int queue, u32 tx_flags, __be16 protocol) | 6442 | u8 queue, u32 tx_flags, __be16 protocol) |
6179 | { | 6443 | { |
6180 | struct ixgbe_atr_input atr_input; | 6444 | struct ixgbe_atr_input atr_input; |
6181 | struct tcphdr *th; | ||
6182 | struct iphdr *iph = ip_hdr(skb); | 6445 | struct iphdr *iph = ip_hdr(skb); |
6183 | struct ethhdr *eth = (struct ethhdr *)skb->data; | 6446 | struct ethhdr *eth = (struct ethhdr *)skb->data; |
6184 | u16 vlan_id, src_port, dst_port, flex_bytes; | 6447 | struct tcphdr *th; |
6185 | u32 src_ipv4_addr, dst_ipv4_addr; | 6448 | u16 vlan_id; |
6186 | u8 l4type = 0; | ||
6187 | 6449 | ||
6188 | /* Right now, we support IPv4 only */ | 6450 | /* Right now, we support IPv4 w/ TCP only */ |
6189 | if (protocol != htons(ETH_P_IP)) | 6451 | if (protocol != htons(ETH_P_IP) || |
6190 | return; | 6452 | iph->protocol != IPPROTO_TCP) |
6191 | /* check if we're UDP or TCP */ | ||
6192 | if (iph->protocol == IPPROTO_TCP) { | ||
6193 | th = tcp_hdr(skb); | ||
6194 | src_port = th->source; | ||
6195 | dst_port = th->dest; | ||
6196 | l4type |= IXGBE_ATR_L4TYPE_TCP; | ||
6197 | /* l4type IPv4 type is 0, no need to assign */ | ||
6198 | } else { | ||
6199 | /* Unsupported L4 header, just bail here */ | ||
6200 | return; | 6453 | return; |
6201 | } | ||
6202 | 6454 | ||
6203 | memset(&atr_input, 0, sizeof(struct ixgbe_atr_input)); | 6455 | memset(&atr_input, 0, sizeof(struct ixgbe_atr_input)); |
6204 | 6456 | ||
6205 | vlan_id = (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK) >> | 6457 | vlan_id = (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK) >> |
6206 | IXGBE_TX_FLAGS_VLAN_SHIFT; | 6458 | IXGBE_TX_FLAGS_VLAN_SHIFT; |
6207 | src_ipv4_addr = iph->saddr; | 6459 | |
6208 | dst_ipv4_addr = iph->daddr; | 6460 | th = tcp_hdr(skb); |
6209 | flex_bytes = eth->h_proto; | ||
6210 | 6461 | ||
6211 | ixgbe_atr_set_vlan_id_82599(&atr_input, vlan_id); | 6462 | ixgbe_atr_set_vlan_id_82599(&atr_input, vlan_id); |
6212 | ixgbe_atr_set_src_port_82599(&atr_input, dst_port); | 6463 | ixgbe_atr_set_src_port_82599(&atr_input, th->dest); |
6213 | ixgbe_atr_set_dst_port_82599(&atr_input, src_port); | 6464 | ixgbe_atr_set_dst_port_82599(&atr_input, th->source); |
6214 | ixgbe_atr_set_flex_byte_82599(&atr_input, flex_bytes); | 6465 | ixgbe_atr_set_flex_byte_82599(&atr_input, eth->h_proto); |
6215 | ixgbe_atr_set_l4type_82599(&atr_input, l4type); | 6466 | ixgbe_atr_set_l4type_82599(&atr_input, IXGBE_ATR_L4TYPE_TCP); |
6216 | /* src and dst are inverted, think how the receiver sees them */ | 6467 | /* src and dst are inverted, think how the receiver sees them */ |
6217 | ixgbe_atr_set_src_ipv4_82599(&atr_input, dst_ipv4_addr); | 6468 | ixgbe_atr_set_src_ipv4_82599(&atr_input, iph->daddr); |
6218 | ixgbe_atr_set_dst_ipv4_82599(&atr_input, src_ipv4_addr); | 6469 | ixgbe_atr_set_dst_ipv4_82599(&atr_input, iph->saddr); |
6219 | 6470 | ||
6220 | /* This assumes the Rx queue and Tx queue are bound to the same CPU */ | 6471 | /* This assumes the Rx queue and Tx queue are bound to the same CPU */ |
6221 | ixgbe_fdir_add_signature_filter_82599(&adapter->hw, &atr_input, queue); | 6472 | ixgbe_fdir_add_signature_filter_82599(&adapter->hw, &atr_input, queue); |
6222 | } | 6473 | } |
6223 | 6474 | ||
6224 | static int __ixgbe_maybe_stop_tx(struct net_device *netdev, | 6475 | static int __ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, int size) |
6225 | struct ixgbe_ring *tx_ring, int size) | ||
6226 | { | 6476 | { |
6227 | netif_stop_subqueue(netdev, tx_ring->queue_index); | 6477 | netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); |
6228 | /* Herbert's original patch had: | 6478 | /* Herbert's original patch had: |
6229 | * smp_mb__after_netif_stop_queue(); | 6479 | * smp_mb__after_netif_stop_queue(); |
6230 | * but since that doesn't exist yet, just open code it. */ | 6480 | * but since that doesn't exist yet, just open code it. */ |
@@ -6236,17 +6486,16 @@ static int __ixgbe_maybe_stop_tx(struct net_device *netdev, | |||
6236 | return -EBUSY; | 6486 | return -EBUSY; |
6237 | 6487 | ||
6238 | /* A reprieve! - use start_queue because it doesn't call schedule */ | 6488 | /* A reprieve! - use start_queue because it doesn't call schedule */ |
6239 | netif_start_subqueue(netdev, tx_ring->queue_index); | 6489 | netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index); |
6240 | ++tx_ring->restart_queue; | 6490 | ++tx_ring->tx_stats.restart_queue; |
6241 | return 0; | 6491 | return 0; |
6242 | } | 6492 | } |
6243 | 6493 | ||
6244 | static int ixgbe_maybe_stop_tx(struct net_device *netdev, | 6494 | static int ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, int size) |
6245 | struct ixgbe_ring *tx_ring, int size) | ||
6246 | { | 6495 | { |
6247 | if (likely(IXGBE_DESC_UNUSED(tx_ring) >= size)) | 6496 | if (likely(IXGBE_DESC_UNUSED(tx_ring) >= size)) |
6248 | return 0; | 6497 | return 0; |
6249 | return __ixgbe_maybe_stop_tx(netdev, tx_ring, size); | 6498 | return __ixgbe_maybe_stop_tx(tx_ring, size); |
6250 | } | 6499 | } |
6251 | 6500 | ||
6252 | static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb) | 6501 | static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb) |
@@ -6291,10 +6540,11 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb) | |||
6291 | return skb_tx_hash(dev, skb); | 6540 | return skb_tx_hash(dev, skb); |
6292 | } | 6541 | } |
6293 | 6542 | ||
6294 | netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, struct net_device *netdev, | 6543 | netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, |
6295 | struct ixgbe_adapter *adapter, | 6544 | struct ixgbe_adapter *adapter, |
6296 | struct ixgbe_ring *tx_ring) | 6545 | struct ixgbe_ring *tx_ring) |
6297 | { | 6546 | { |
6547 | struct net_device *netdev = tx_ring->netdev; | ||
6298 | struct netdev_queue *txq; | 6548 | struct netdev_queue *txq; |
6299 | unsigned int first; | 6549 | unsigned int first; |
6300 | unsigned int tx_flags = 0; | 6550 | unsigned int tx_flags = 0; |
@@ -6352,8 +6602,8 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, struct net_device *netdev | |||
6352 | for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) | 6602 | for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) |
6353 | count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size); | 6603 | count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size); |
6354 | 6604 | ||
6355 | if (ixgbe_maybe_stop_tx(netdev, tx_ring, count)) { | 6605 | if (ixgbe_maybe_stop_tx(tx_ring, count)) { |
6356 | adapter->tx_busy++; | 6606 | tx_ring->tx_stats.tx_busy++; |
6357 | return NETDEV_TX_BUSY; | 6607 | return NETDEV_TX_BUSY; |
6358 | } | 6608 | } |
6359 | 6609 | ||
@@ -6387,14 +6637,14 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, struct net_device *netdev | |||
6387 | tx_flags |= IXGBE_TX_FLAGS_CSUM; | 6637 | tx_flags |= IXGBE_TX_FLAGS_CSUM; |
6388 | } | 6638 | } |
6389 | 6639 | ||
6390 | count = ixgbe_tx_map(adapter, tx_ring, skb, tx_flags, first); | 6640 | count = ixgbe_tx_map(adapter, tx_ring, skb, tx_flags, first, hdr_len); |
6391 | if (count) { | 6641 | if (count) { |
6392 | /* add the ATR filter if ATR is on */ | 6642 | /* add the ATR filter if ATR is on */ |
6393 | if (tx_ring->atr_sample_rate) { | 6643 | if (tx_ring->atr_sample_rate) { |
6394 | ++tx_ring->atr_count; | 6644 | ++tx_ring->atr_count; |
6395 | if ((tx_ring->atr_count >= tx_ring->atr_sample_rate) && | 6645 | if ((tx_ring->atr_count >= tx_ring->atr_sample_rate) && |
6396 | test_bit(__IXGBE_FDIR_INIT_DONE, | 6646 | test_bit(__IXGBE_TX_FDIR_INIT_DONE, |
6397 | &tx_ring->reinit_state)) { | 6647 | &tx_ring->state)) { |
6398 | ixgbe_atr(adapter, skb, tx_ring->queue_index, | 6648 | ixgbe_atr(adapter, skb, tx_ring->queue_index, |
6399 | tx_flags, protocol); | 6649 | tx_flags, protocol); |
6400 | tx_ring->atr_count = 0; | 6650 | tx_ring->atr_count = 0; |
@@ -6403,9 +6653,8 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, struct net_device *netdev | |||
6403 | txq = netdev_get_tx_queue(netdev, tx_ring->queue_index); | 6653 | txq = netdev_get_tx_queue(netdev, tx_ring->queue_index); |
6404 | txq->tx_bytes += skb->len; | 6654 | txq->tx_bytes += skb->len; |
6405 | txq->tx_packets++; | 6655 | txq->tx_packets++; |
6406 | ixgbe_tx_queue(adapter, tx_ring, tx_flags, count, skb->len, | 6656 | ixgbe_tx_queue(tx_ring, tx_flags, count, skb->len, hdr_len); |
6407 | hdr_len); | 6657 | ixgbe_maybe_stop_tx(tx_ring, DESC_NEEDED); |
6408 | ixgbe_maybe_stop_tx(netdev, tx_ring, DESC_NEEDED); | ||
6409 | 6658 | ||
6410 | } else { | 6659 | } else { |
6411 | dev_kfree_skb_any(skb); | 6660 | dev_kfree_skb_any(skb); |
@@ -6422,7 +6671,7 @@ static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb, struct net_device *netd | |||
6422 | struct ixgbe_ring *tx_ring; | 6671 | struct ixgbe_ring *tx_ring; |
6423 | 6672 | ||
6424 | tx_ring = adapter->tx_ring[skb->queue_mapping]; | 6673 | tx_ring = adapter->tx_ring[skb->queue_mapping]; |
6425 | return ixgbe_xmit_frame_ring(skb, netdev, adapter, tx_ring); | 6674 | return ixgbe_xmit_frame_ring(skb, adapter, tx_ring); |
6426 | } | 6675 | } |
6427 | 6676 | ||
6428 | /** | 6677 | /** |
@@ -6563,20 +6812,23 @@ static struct rtnl_link_stats64 *ixgbe_get_stats64(struct net_device *netdev, | |||
6563 | 6812 | ||
6564 | /* accurate rx/tx bytes/packets stats */ | 6813 | /* accurate rx/tx bytes/packets stats */ |
6565 | dev_txq_stats_fold(netdev, stats); | 6814 | dev_txq_stats_fold(netdev, stats); |
6815 | rcu_read_lock(); | ||
6566 | for (i = 0; i < adapter->num_rx_queues; i++) { | 6816 | for (i = 0; i < adapter->num_rx_queues; i++) { |
6567 | struct ixgbe_ring *ring = adapter->rx_ring[i]; | 6817 | struct ixgbe_ring *ring = ACCESS_ONCE(adapter->rx_ring[i]); |
6568 | u64 bytes, packets; | 6818 | u64 bytes, packets; |
6569 | unsigned int start; | 6819 | unsigned int start; |
6570 | 6820 | ||
6571 | do { | 6821 | if (ring) { |
6572 | start = u64_stats_fetch_begin_bh(&ring->syncp); | 6822 | do { |
6573 | packets = ring->stats.packets; | 6823 | start = u64_stats_fetch_begin_bh(&ring->syncp); |
6574 | bytes = ring->stats.bytes; | 6824 | packets = ring->stats.packets; |
6575 | } while (u64_stats_fetch_retry_bh(&ring->syncp, start)); | 6825 | bytes = ring->stats.bytes; |
6576 | stats->rx_packets += packets; | 6826 | } while (u64_stats_fetch_retry_bh(&ring->syncp, start)); |
6577 | stats->rx_bytes += bytes; | 6827 | stats->rx_packets += packets; |
6828 | stats->rx_bytes += bytes; | ||
6829 | } | ||
6578 | } | 6830 | } |
6579 | 6831 | rcu_read_unlock(); | |
6580 | /* following stats updated by ixgbe_watchdog_task() */ | 6832 | /* following stats updated by ixgbe_watchdog_task() */ |
6581 | stats->multicast = netdev->stats.multicast; | 6833 | stats->multicast = netdev->stats.multicast; |
6582 | stats->rx_errors = netdev->stats.rx_errors; | 6834 | stats->rx_errors = netdev->stats.rx_errors; |
@@ -6758,8 +7010,8 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, | |||
6758 | 7010 | ||
6759 | SET_NETDEV_DEV(netdev, &pdev->dev); | 7011 | SET_NETDEV_DEV(netdev, &pdev->dev); |
6760 | 7012 | ||
6761 | pci_set_drvdata(pdev, netdev); | ||
6762 | adapter = netdev_priv(netdev); | 7013 | adapter = netdev_priv(netdev); |
7014 | pci_set_drvdata(pdev, adapter); | ||
6763 | 7015 | ||
6764 | adapter->netdev = netdev; | 7016 | adapter->netdev = netdev; |
6765 | adapter->pdev = pdev; | 7017 | adapter->pdev = pdev; |
@@ -6832,8 +7084,14 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, | |||
6832 | goto err_sw_init; | 7084 | goto err_sw_init; |
6833 | 7085 | ||
6834 | /* Make it possible the adapter to be woken up via WOL */ | 7086 | /* Make it possible the adapter to be woken up via WOL */ |
6835 | if (adapter->hw.mac.type == ixgbe_mac_82599EB) | 7087 | switch (adapter->hw.mac.type) { |
7088 | case ixgbe_mac_82599EB: | ||
7089 | case ixgbe_mac_X540: | ||
6836 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0); | 7090 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0); |
7091 | break; | ||
7092 | default: | ||
7093 | break; | ||
7094 | } | ||
6837 | 7095 | ||
6838 | /* | 7096 | /* |
6839 | * If there is a fan on this device and it has failed log the | 7097 | * If there is a fan on this device and it has failed log the |
@@ -6942,7 +7200,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, | |||
6942 | } | 7200 | } |
6943 | 7201 | ||
6944 | /* power down the optics */ | 7202 | /* power down the optics */ |
6945 | if (hw->phy.multispeed_fiber) | 7203 | if (hw->phy.multispeed_fiber && hw->mac.ops.disable_tx_laser) |
6946 | hw->mac.ops.disable_tx_laser(hw); | 7204 | hw->mac.ops.disable_tx_laser(hw); |
6947 | 7205 | ||
6948 | init_timer(&adapter->watchdog_timer); | 7206 | init_timer(&adapter->watchdog_timer); |
@@ -6957,6 +7215,13 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, | |||
6957 | goto err_sw_init; | 7215 | goto err_sw_init; |
6958 | 7216 | ||
6959 | switch (pdev->device) { | 7217 | switch (pdev->device) { |
7218 | case IXGBE_DEV_ID_82599_COMBO_BACKPLANE: | ||
7219 | /* All except this subdevice support WOL */ | ||
7220 | if (pdev->subsystem_device == | ||
7221 | IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ) { | ||
7222 | adapter->wol = 0; | ||
7223 | break; | ||
7224 | } | ||
6960 | case IXGBE_DEV_ID_82599_KX4: | 7225 | case IXGBE_DEV_ID_82599_KX4: |
6961 | adapter->wol = (IXGBE_WUFC_MAG | IXGBE_WUFC_EX | | 7226 | adapter->wol = (IXGBE_WUFC_MAG | IXGBE_WUFC_EX | |
6962 | IXGBE_WUFC_MC | IXGBE_WUFC_BC); | 7227 | IXGBE_WUFC_MC | IXGBE_WUFC_BC); |
@@ -7082,8 +7347,8 @@ err_dma: | |||
7082 | **/ | 7347 | **/ |
7083 | static void __devexit ixgbe_remove(struct pci_dev *pdev) | 7348 | static void __devexit ixgbe_remove(struct pci_dev *pdev) |
7084 | { | 7349 | { |
7085 | struct net_device *netdev = pci_get_drvdata(pdev); | 7350 | struct ixgbe_adapter *adapter = pci_get_drvdata(pdev); |
7086 | struct ixgbe_adapter *adapter = netdev_priv(netdev); | 7351 | struct net_device *netdev = adapter->netdev; |
7087 | 7352 | ||
7088 | set_bit(__IXGBE_DOWN, &adapter->state); | 7353 | set_bit(__IXGBE_DOWN, &adapter->state); |
7089 | /* clear the module not found bit to make sure the worker won't | 7354 | /* clear the module not found bit to make sure the worker won't |
@@ -7153,8 +7418,8 @@ static void __devexit ixgbe_remove(struct pci_dev *pdev) | |||
7153 | static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev, | 7418 | static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev, |
7154 | pci_channel_state_t state) | 7419 | pci_channel_state_t state) |
7155 | { | 7420 | { |
7156 | struct net_device *netdev = pci_get_drvdata(pdev); | 7421 | struct ixgbe_adapter *adapter = pci_get_drvdata(pdev); |
7157 | struct ixgbe_adapter *adapter = netdev_priv(netdev); | 7422 | struct net_device *netdev = adapter->netdev; |
7158 | 7423 | ||
7159 | netif_device_detach(netdev); | 7424 | netif_device_detach(netdev); |
7160 | 7425 | ||
@@ -7177,8 +7442,7 @@ static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev, | |||
7177 | */ | 7442 | */ |
7178 | static pci_ers_result_t ixgbe_io_slot_reset(struct pci_dev *pdev) | 7443 | static pci_ers_result_t ixgbe_io_slot_reset(struct pci_dev *pdev) |
7179 | { | 7444 | { |
7180 | struct net_device *netdev = pci_get_drvdata(pdev); | 7445 | struct ixgbe_adapter *adapter = pci_get_drvdata(pdev); |
7181 | struct ixgbe_adapter *adapter = netdev_priv(netdev); | ||
7182 | pci_ers_result_t result; | 7446 | pci_ers_result_t result; |
7183 | int err; | 7447 | int err; |
7184 | 7448 | ||
@@ -7216,8 +7480,8 @@ static pci_ers_result_t ixgbe_io_slot_reset(struct pci_dev *pdev) | |||
7216 | */ | 7480 | */ |
7217 | static void ixgbe_io_resume(struct pci_dev *pdev) | 7481 | static void ixgbe_io_resume(struct pci_dev *pdev) |
7218 | { | 7482 | { |
7219 | struct net_device *netdev = pci_get_drvdata(pdev); | 7483 | struct ixgbe_adapter *adapter = pci_get_drvdata(pdev); |
7220 | struct ixgbe_adapter *adapter = netdev_priv(netdev); | 7484 | struct net_device *netdev = adapter->netdev; |
7221 | 7485 | ||
7222 | if (netif_running(netdev)) { | 7486 | if (netif_running(netdev)) { |
7223 | if (ixgbe_up(adapter)) { | 7487 | if (ixgbe_up(adapter)) { |
@@ -7282,6 +7546,7 @@ static void __exit ixgbe_exit_module(void) | |||
7282 | dca_unregister_notify(&dca_notifier); | 7546 | dca_unregister_notify(&dca_notifier); |
7283 | #endif | 7547 | #endif |
7284 | pci_unregister_driver(&ixgbe_driver); | 7548 | pci_unregister_driver(&ixgbe_driver); |
7549 | rcu_barrier(); /* Wait for completion of call_rcu()'s */ | ||
7285 | } | 7550 | } |
7286 | 7551 | ||
7287 | #ifdef CONFIG_IXGBE_DCA | 7552 | #ifdef CONFIG_IXGBE_DCA |
diff --git a/drivers/net/ixgbe/ixgbe_mbx.c b/drivers/net/ixgbe/ixgbe_mbx.c index 471f0f2cdb98..027c628c3aae 100644 --- a/drivers/net/ixgbe/ixgbe_mbx.c +++ b/drivers/net/ixgbe/ixgbe_mbx.c | |||
@@ -319,8 +319,14 @@ static s32 ixgbe_check_for_rst_pf(struct ixgbe_hw *hw, u16 vf_number) | |||
319 | u32 vflre = 0; | 319 | u32 vflre = 0; |
320 | s32 ret_val = IXGBE_ERR_MBX; | 320 | s32 ret_val = IXGBE_ERR_MBX; |
321 | 321 | ||
322 | if (hw->mac.type == ixgbe_mac_82599EB) | 322 | switch (hw->mac.type) { |
323 | case ixgbe_mac_82599EB: | ||
324 | case ixgbe_mac_X540: | ||
323 | vflre = IXGBE_READ_REG(hw, IXGBE_VFLRE(reg_offset)); | 325 | vflre = IXGBE_READ_REG(hw, IXGBE_VFLRE(reg_offset)); |
326 | break; | ||
327 | default: | ||
328 | break; | ||
329 | } | ||
324 | 330 | ||
325 | if (vflre & (1 << vf_shift)) { | 331 | if (vflre & (1 << vf_shift)) { |
326 | ret_val = 0; | 332 | ret_val = 0; |
@@ -439,22 +445,26 @@ void ixgbe_init_mbx_params_pf(struct ixgbe_hw *hw) | |||
439 | { | 445 | { |
440 | struct ixgbe_mbx_info *mbx = &hw->mbx; | 446 | struct ixgbe_mbx_info *mbx = &hw->mbx; |
441 | 447 | ||
442 | if (hw->mac.type != ixgbe_mac_82599EB) | 448 | switch (hw->mac.type) { |
443 | return; | 449 | case ixgbe_mac_82599EB: |
444 | 450 | case ixgbe_mac_X540: | |
445 | mbx->timeout = 0; | 451 | mbx->timeout = 0; |
446 | mbx->usec_delay = 0; | 452 | mbx->usec_delay = 0; |
447 | 453 | ||
448 | mbx->size = IXGBE_VFMAILBOX_SIZE; | 454 | mbx->size = IXGBE_VFMAILBOX_SIZE; |
449 | 455 | ||
450 | mbx->stats.msgs_tx = 0; | 456 | mbx->stats.msgs_tx = 0; |
451 | mbx->stats.msgs_rx = 0; | 457 | mbx->stats.msgs_rx = 0; |
452 | mbx->stats.reqs = 0; | 458 | mbx->stats.reqs = 0; |
453 | mbx->stats.acks = 0; | 459 | mbx->stats.acks = 0; |
454 | mbx->stats.rsts = 0; | 460 | mbx->stats.rsts = 0; |
461 | break; | ||
462 | default: | ||
463 | break; | ||
464 | } | ||
455 | } | 465 | } |
456 | 466 | ||
457 | struct ixgbe_mbx_operations mbx_ops_82599 = { | 467 | struct ixgbe_mbx_operations mbx_ops_generic = { |
458 | .read = ixgbe_read_mbx_pf, | 468 | .read = ixgbe_read_mbx_pf, |
459 | .write = ixgbe_write_mbx_pf, | 469 | .write = ixgbe_write_mbx_pf, |
460 | .read_posted = ixgbe_read_posted_mbx, | 470 | .read_posted = ixgbe_read_posted_mbx, |
diff --git a/drivers/net/ixgbe/ixgbe_mbx.h b/drivers/net/ixgbe/ixgbe_mbx.h index 7e0d08ff5b53..3df9b1590218 100644 --- a/drivers/net/ixgbe/ixgbe_mbx.h +++ b/drivers/net/ixgbe/ixgbe_mbx.h | |||
@@ -88,6 +88,6 @@ s32 ixgbe_check_for_ack(struct ixgbe_hw *, u16); | |||
88 | s32 ixgbe_check_for_rst(struct ixgbe_hw *, u16); | 88 | s32 ixgbe_check_for_rst(struct ixgbe_hw *, u16); |
89 | void ixgbe_init_mbx_params_pf(struct ixgbe_hw *); | 89 | void ixgbe_init_mbx_params_pf(struct ixgbe_hw *); |
90 | 90 | ||
91 | extern struct ixgbe_mbx_operations mbx_ops_82599; | 91 | extern struct ixgbe_mbx_operations mbx_ops_generic; |
92 | 92 | ||
93 | #endif /* _IXGBE_MBX_H_ */ | 93 | #endif /* _IXGBE_MBX_H_ */ |
diff --git a/drivers/net/ixgbe/ixgbe_phy.c b/drivers/net/ixgbe/ixgbe_phy.c index 6c0d42e33f21..c445fbce56ee 100644 --- a/drivers/net/ixgbe/ixgbe_phy.c +++ b/drivers/net/ixgbe/ixgbe_phy.c | |||
@@ -115,6 +115,9 @@ static enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id) | |||
115 | case TN1010_PHY_ID: | 115 | case TN1010_PHY_ID: |
116 | phy_type = ixgbe_phy_tn; | 116 | phy_type = ixgbe_phy_tn; |
117 | break; | 117 | break; |
118 | case AQ1202_PHY_ID: | ||
119 | phy_type = ixgbe_phy_aq; | ||
120 | break; | ||
118 | case QT2022_PHY_ID: | 121 | case QT2022_PHY_ID: |
119 | phy_type = ixgbe_phy_qt; | 122 | phy_type = ixgbe_phy_qt; |
120 | break; | 123 | break; |
@@ -425,6 +428,39 @@ s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw, | |||
425 | } | 428 | } |
426 | 429 | ||
427 | /** | 430 | /** |
431 | * ixgbe_get_copper_link_capabilities_generic - Determines link capabilities | ||
432 | * @hw: pointer to hardware structure | ||
433 | * @speed: pointer to link speed | ||
434 | * @autoneg: boolean auto-negotiation value | ||
435 | * | ||
436 | * Determines the link capabilities by reading the AUTOC register. | ||
437 | */ | ||
438 | s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw, | ||
439 | ixgbe_link_speed *speed, | ||
440 | bool *autoneg) | ||
441 | { | ||
442 | s32 status = IXGBE_ERR_LINK_SETUP; | ||
443 | u16 speed_ability; | ||
444 | |||
445 | *speed = 0; | ||
446 | *autoneg = true; | ||
447 | |||
448 | status = hw->phy.ops.read_reg(hw, MDIO_SPEED, MDIO_MMD_PMAPMD, | ||
449 | &speed_ability); | ||
450 | |||
451 | if (status == 0) { | ||
452 | if (speed_ability & MDIO_SPEED_10G) | ||
453 | *speed |= IXGBE_LINK_SPEED_10GB_FULL; | ||
454 | if (speed_ability & MDIO_PMA_SPEED_1000) | ||
455 | *speed |= IXGBE_LINK_SPEED_1GB_FULL; | ||
456 | if (speed_ability & MDIO_PMA_SPEED_100) | ||
457 | *speed |= IXGBE_LINK_SPEED_100_FULL; | ||
458 | } | ||
459 | |||
460 | return status; | ||
461 | } | ||
462 | |||
463 | /** | ||
428 | * ixgbe_reset_phy_nl - Performs a PHY reset | 464 | * ixgbe_reset_phy_nl - Performs a PHY reset |
429 | * @hw: pointer to hardware structure | 465 | * @hw: pointer to hardware structure |
430 | **/ | 466 | **/ |
@@ -1378,6 +1414,22 @@ s32 ixgbe_get_phy_firmware_version_tnx(struct ixgbe_hw *hw, | |||
1378 | } | 1414 | } |
1379 | 1415 | ||
1380 | /** | 1416 | /** |
1417 | * ixgbe_get_phy_firmware_version_generic - Gets the PHY Firmware Version | ||
1418 | * @hw: pointer to hardware structure | ||
1419 | * @firmware_version: pointer to the PHY Firmware Version | ||
1420 | **/ | ||
1421 | s32 ixgbe_get_phy_firmware_version_generic(struct ixgbe_hw *hw, | ||
1422 | u16 *firmware_version) | ||
1423 | { | ||
1424 | s32 status = 0; | ||
1425 | |||
1426 | status = hw->phy.ops.read_reg(hw, AQ_FW_REV, MDIO_MMD_VEND1, | ||
1427 | firmware_version); | ||
1428 | |||
1429 | return status; | ||
1430 | } | ||
1431 | |||
1432 | /** | ||
1381 | * ixgbe_tn_check_overtemp - Checks if an overtemp occured. | 1433 | * ixgbe_tn_check_overtemp - Checks if an overtemp occured. |
1382 | * @hw: pointer to hardware structure | 1434 | * @hw: pointer to hardware structure |
1383 | * | 1435 | * |
diff --git a/drivers/net/ixgbe/ixgbe_phy.h b/drivers/net/ixgbe/ixgbe_phy.h index fb3898f12fc5..e2c6b7eac641 100644 --- a/drivers/net/ixgbe/ixgbe_phy.h +++ b/drivers/net/ixgbe/ixgbe_phy.h | |||
@@ -96,6 +96,9 @@ s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw, | |||
96 | ixgbe_link_speed speed, | 96 | ixgbe_link_speed speed, |
97 | bool autoneg, | 97 | bool autoneg, |
98 | bool autoneg_wait_to_complete); | 98 | bool autoneg_wait_to_complete); |
99 | s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw, | ||
100 | ixgbe_link_speed *speed, | ||
101 | bool *autoneg); | ||
99 | 102 | ||
100 | /* PHY specific */ | 103 | /* PHY specific */ |
101 | s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw, | 104 | s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw, |
@@ -103,6 +106,8 @@ s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw, | |||
103 | bool *link_up); | 106 | bool *link_up); |
104 | s32 ixgbe_get_phy_firmware_version_tnx(struct ixgbe_hw *hw, | 107 | s32 ixgbe_get_phy_firmware_version_tnx(struct ixgbe_hw *hw, |
105 | u16 *firmware_version); | 108 | u16 *firmware_version); |
109 | s32 ixgbe_get_phy_firmware_version_generic(struct ixgbe_hw *hw, | ||
110 | u16 *firmware_version); | ||
106 | 111 | ||
107 | s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw); | 112 | s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw); |
108 | s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw); | 113 | s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw); |
diff --git a/drivers/net/ixgbe/ixgbe_sriov.c b/drivers/net/ixgbe/ixgbe_sriov.c index 93f40bcf683c..6e3e94b5a5f6 100644 --- a/drivers/net/ixgbe/ixgbe_sriov.c +++ b/drivers/net/ixgbe/ixgbe_sriov.c | |||
@@ -178,8 +178,7 @@ static int ixgbe_set_vf_mac(struct ixgbe_adapter *adapter, | |||
178 | int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask) | 178 | int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask) |
179 | { | 179 | { |
180 | unsigned char vf_mac_addr[6]; | 180 | unsigned char vf_mac_addr[6]; |
181 | struct net_device *netdev = pci_get_drvdata(pdev); | 181 | struct ixgbe_adapter *adapter = pci_get_drvdata(pdev); |
182 | struct ixgbe_adapter *adapter = netdev_priv(netdev); | ||
183 | unsigned int vfn = (event_mask & 0x3f); | 182 | unsigned int vfn = (event_mask & 0x3f); |
184 | 183 | ||
185 | bool enable = ((event_mask & 0x10000000U) != 0); | 184 | bool enable = ((event_mask & 0x10000000U) != 0); |
diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h index d3cc6ce7c973..42c607339a62 100644 --- a/drivers/net/ixgbe/ixgbe_type.h +++ b/drivers/net/ixgbe/ixgbe_type.h | |||
@@ -57,6 +57,8 @@ | |||
57 | #define IXGBE_DEV_ID_82599_SFP_EM 0x1507 | 57 | #define IXGBE_DEV_ID_82599_SFP_EM 0x1507 |
58 | #define IXGBE_DEV_ID_82599_XAUI_LOM 0x10FC | 58 | #define IXGBE_DEV_ID_82599_XAUI_LOM 0x10FC |
59 | #define IXGBE_DEV_ID_82599_COMBO_BACKPLANE 0x10F8 | 59 | #define IXGBE_DEV_ID_82599_COMBO_BACKPLANE 0x10F8 |
60 | #define IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ 0x000C | ||
61 | #define IXGBE_DEV_ID_X540T 0x1528 | ||
60 | 62 | ||
61 | /* General Registers */ | 63 | /* General Registers */ |
62 | #define IXGBE_CTRL 0x00000 | 64 | #define IXGBE_CTRL 0x00000 |
@@ -994,8 +996,10 @@ | |||
994 | /* PHY IDs*/ | 996 | /* PHY IDs*/ |
995 | #define TN1010_PHY_ID 0x00A19410 | 997 | #define TN1010_PHY_ID 0x00A19410 |
996 | #define TNX_FW_REV 0xB | 998 | #define TNX_FW_REV 0xB |
999 | #define AQ1202_PHY_ID 0x03A1B440 | ||
997 | #define QT2022_PHY_ID 0x0043A400 | 1000 | #define QT2022_PHY_ID 0x0043A400 |
998 | #define ATH_PHY_ID 0x03429050 | 1001 | #define ATH_PHY_ID 0x03429050 |
1002 | #define AQ_FW_REV 0x20 | ||
999 | 1003 | ||
1000 | /* PHY Types */ | 1004 | /* PHY Types */ |
1001 | #define IXGBE_M88E1145_E_PHY_ID 0x01410CD0 | 1005 | #define IXGBE_M88E1145_E_PHY_ID 0x01410CD0 |
@@ -1491,6 +1495,7 @@ | |||
1491 | #define IXGBE_EEC_PRES 0x00000100 /* EEPROM Present */ | 1495 | #define IXGBE_EEC_PRES 0x00000100 /* EEPROM Present */ |
1492 | #define IXGBE_EEC_ARD 0x00000200 /* EEPROM Auto Read Done */ | 1496 | #define IXGBE_EEC_ARD 0x00000200 /* EEPROM Auto Read Done */ |
1493 | #define IXGBE_EEC_FLUP 0x00800000 /* Flash update command */ | 1497 | #define IXGBE_EEC_FLUP 0x00800000 /* Flash update command */ |
1498 | #define IXGBE_EEC_SEC1VAL 0x02000000 /* Sector 1 Valid */ | ||
1494 | #define IXGBE_EEC_FLUDONE 0x04000000 /* Flash update done */ | 1499 | #define IXGBE_EEC_FLUDONE 0x04000000 /* Flash update done */ |
1495 | /* EEPROM Addressing bits based on type (0-small, 1-large) */ | 1500 | /* EEPROM Addressing bits based on type (0-small, 1-large) */ |
1496 | #define IXGBE_EEC_ADDR_SIZE 0x00000400 | 1501 | #define IXGBE_EEC_ADDR_SIZE 0x00000400 |
@@ -1505,7 +1510,9 @@ | |||
1505 | #define IXGBE_EEPROM_SUM 0xBABA | 1510 | #define IXGBE_EEPROM_SUM 0xBABA |
1506 | #define IXGBE_PCIE_ANALOG_PTR 0x03 | 1511 | #define IXGBE_PCIE_ANALOG_PTR 0x03 |
1507 | #define IXGBE_ATLAS0_CONFIG_PTR 0x04 | 1512 | #define IXGBE_ATLAS0_CONFIG_PTR 0x04 |
1513 | #define IXGBE_PHY_PTR 0x04 | ||
1508 | #define IXGBE_ATLAS1_CONFIG_PTR 0x05 | 1514 | #define IXGBE_ATLAS1_CONFIG_PTR 0x05 |
1515 | #define IXGBE_OPTION_ROM_PTR 0x05 | ||
1509 | #define IXGBE_PCIE_GENERAL_PTR 0x06 | 1516 | #define IXGBE_PCIE_GENERAL_PTR 0x06 |
1510 | #define IXGBE_PCIE_CONFIG0_PTR 0x07 | 1517 | #define IXGBE_PCIE_CONFIG0_PTR 0x07 |
1511 | #define IXGBE_PCIE_CONFIG1_PTR 0x08 | 1518 | #define IXGBE_PCIE_CONFIG1_PTR 0x08 |
@@ -2113,6 +2120,14 @@ typedef u32 ixgbe_physical_layer; | |||
2113 | #define IXGBE_PHYSICAL_LAYER_10GBASE_XAUI 0x1000 | 2120 | #define IXGBE_PHYSICAL_LAYER_10GBASE_XAUI 0x1000 |
2114 | #define IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA 0x2000 | 2121 | #define IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA 0x2000 |
2115 | 2122 | ||
2123 | /* Flow Control Macros */ | ||
2124 | #define PAUSE_RTT 8 | ||
2125 | #define PAUSE_MTU(MTU) ((MTU + 1024 - 1) / 1024) | ||
2126 | |||
2127 | #define FC_HIGH_WATER(MTU) ((((PAUSE_RTT + PAUSE_MTU(MTU)) * 144) + 99) / 100 +\ | ||
2128 | PAUSE_MTU(MTU)) | ||
2129 | #define FC_LOW_WATER(MTU) (2 * (2 * PAUSE_MTU(MTU) + PAUSE_RTT)) | ||
2130 | |||
2116 | /* Software ATR hash keys */ | 2131 | /* Software ATR hash keys */ |
2117 | #define IXGBE_ATR_BUCKET_HASH_KEY 0xE214AD3D | 2132 | #define IXGBE_ATR_BUCKET_HASH_KEY 0xE214AD3D |
2118 | #define IXGBE_ATR_SIGNATURE_HASH_KEY 0x14364D17 | 2133 | #define IXGBE_ATR_SIGNATURE_HASH_KEY 0x14364D17 |
@@ -2164,6 +2179,7 @@ struct ixgbe_atr_input_masks { | |||
2164 | enum ixgbe_eeprom_type { | 2179 | enum ixgbe_eeprom_type { |
2165 | ixgbe_eeprom_uninitialized = 0, | 2180 | ixgbe_eeprom_uninitialized = 0, |
2166 | ixgbe_eeprom_spi, | 2181 | ixgbe_eeprom_spi, |
2182 | ixgbe_flash, | ||
2167 | ixgbe_eeprom_none /* No NVM support */ | 2183 | ixgbe_eeprom_none /* No NVM support */ |
2168 | }; | 2184 | }; |
2169 | 2185 | ||
@@ -2171,12 +2187,14 @@ enum ixgbe_mac_type { | |||
2171 | ixgbe_mac_unknown = 0, | 2187 | ixgbe_mac_unknown = 0, |
2172 | ixgbe_mac_82598EB, | 2188 | ixgbe_mac_82598EB, |
2173 | ixgbe_mac_82599EB, | 2189 | ixgbe_mac_82599EB, |
2190 | ixgbe_mac_X540, | ||
2174 | ixgbe_num_macs | 2191 | ixgbe_num_macs |
2175 | }; | 2192 | }; |
2176 | 2193 | ||
2177 | enum ixgbe_phy_type { | 2194 | enum ixgbe_phy_type { |
2178 | ixgbe_phy_unknown = 0, | 2195 | ixgbe_phy_unknown = 0, |
2179 | ixgbe_phy_tn, | 2196 | ixgbe_phy_tn, |
2197 | ixgbe_phy_aq, | ||
2180 | ixgbe_phy_cu_unknown, | 2198 | ixgbe_phy_cu_unknown, |
2181 | ixgbe_phy_qt, | 2199 | ixgbe_phy_qt, |
2182 | ixgbe_phy_xaui, | 2200 | ixgbe_phy_xaui, |
@@ -2405,6 +2423,7 @@ struct ixgbe_eeprom_operations { | |||
2405 | s32 (*write)(struct ixgbe_hw *, u16, u16); | 2423 | s32 (*write)(struct ixgbe_hw *, u16, u16); |
2406 | s32 (*validate_checksum)(struct ixgbe_hw *, u16 *); | 2424 | s32 (*validate_checksum)(struct ixgbe_hw *, u16 *); |
2407 | s32 (*update_checksum)(struct ixgbe_hw *); | 2425 | s32 (*update_checksum)(struct ixgbe_hw *); |
2426 | u16 (*calc_checksum)(struct ixgbe_hw *); | ||
2408 | }; | 2427 | }; |
2409 | 2428 | ||
2410 | struct ixgbe_mac_operations { | 2429 | struct ixgbe_mac_operations { |
@@ -2574,6 +2593,7 @@ struct ixgbe_hw { | |||
2574 | u16 subsystem_vendor_id; | 2593 | u16 subsystem_vendor_id; |
2575 | u8 revision_id; | 2594 | u8 revision_id; |
2576 | bool adapter_stopped; | 2595 | bool adapter_stopped; |
2596 | bool force_full_reset; | ||
2577 | }; | 2597 | }; |
2578 | 2598 | ||
2579 | struct ixgbe_info { | 2599 | struct ixgbe_info { |
diff --git a/drivers/net/ixgbe/ixgbe_x540.c b/drivers/net/ixgbe/ixgbe_x540.c new file mode 100644 index 000000000000..9649fa727e31 --- /dev/null +++ b/drivers/net/ixgbe/ixgbe_x540.c | |||
@@ -0,0 +1,722 @@ | |||
1 | /******************************************************************************* | ||
2 | |||
3 | Intel 10 Gigabit PCI Express Linux driver | ||
4 | Copyright(c) 1999 - 2010 Intel Corporation. | ||
5 | |||
6 | This program is free software; you can redistribute it and/or modify it | ||
7 | under the terms and conditions of the GNU General Public License, | ||
8 | version 2, as published by the Free Software Foundation. | ||
9 | |||
10 | This program is distributed in the hope it will be useful, but WITHOUT | ||
11 | ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
12 | FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
13 | more details. | ||
14 | |||
15 | You should have received a copy of the GNU General Public License along with | ||
16 | this program; if not, write to the Free Software Foundation, Inc., | ||
17 | 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | ||
18 | |||
19 | The full GNU General Public License is included in this distribution in | ||
20 | the file called "COPYING". | ||
21 | |||
22 | Contact Information: | ||
23 | e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> | ||
24 | Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 | ||
25 | |||
26 | *******************************************************************************/ | ||
27 | |||
28 | #include <linux/pci.h> | ||
29 | #include <linux/delay.h> | ||
30 | #include <linux/sched.h> | ||
31 | |||
32 | #include "ixgbe.h" | ||
33 | #include "ixgbe_phy.h" | ||
34 | //#include "ixgbe_mbx.h" | ||
35 | |||
36 | #define IXGBE_X540_MAX_TX_QUEUES 128 | ||
37 | #define IXGBE_X540_MAX_RX_QUEUES 128 | ||
38 | #define IXGBE_X540_RAR_ENTRIES 128 | ||
39 | #define IXGBE_X540_MC_TBL_SIZE 128 | ||
40 | #define IXGBE_X540_VFT_TBL_SIZE 128 | ||
41 | |||
42 | static s32 ixgbe_update_flash_X540(struct ixgbe_hw *hw); | ||
43 | static s32 ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw); | ||
44 | static s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask); | ||
45 | static void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask); | ||
46 | static s32 ixgbe_get_swfw_sync_semaphore(struct ixgbe_hw *hw); | ||
47 | static void ixgbe_release_swfw_sync_semaphore(struct ixgbe_hw *hw); | ||
48 | |||
49 | static enum ixgbe_media_type ixgbe_get_media_type_X540(struct ixgbe_hw *hw) | ||
50 | { | ||
51 | return ixgbe_media_type_copper; | ||
52 | } | ||
53 | |||
54 | static s32 ixgbe_get_invariants_X540(struct ixgbe_hw *hw) | ||
55 | { | ||
56 | struct ixgbe_mac_info *mac = &hw->mac; | ||
57 | |||
58 | /* Call PHY identify routine to get the phy type */ | ||
59 | ixgbe_identify_phy_generic(hw); | ||
60 | |||
61 | mac->mcft_size = IXGBE_X540_MC_TBL_SIZE; | ||
62 | mac->vft_size = IXGBE_X540_VFT_TBL_SIZE; | ||
63 | mac->num_rar_entries = IXGBE_X540_RAR_ENTRIES; | ||
64 | mac->max_rx_queues = IXGBE_X540_MAX_RX_QUEUES; | ||
65 | mac->max_tx_queues = IXGBE_X540_MAX_TX_QUEUES; | ||
66 | mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw); | ||
67 | |||
68 | return 0; | ||
69 | } | ||
70 | |||
71 | /** | ||
72 | * ixgbe_setup_mac_link_X540 - Set the auto advertised capabilitires | ||
73 | * @hw: pointer to hardware structure | ||
74 | * @speed: new link speed | ||
75 | * @autoneg: true if autonegotiation enabled | ||
76 | * @autoneg_wait_to_complete: true when waiting for completion is needed | ||
77 | **/ | ||
78 | static s32 ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw, | ||
79 | ixgbe_link_speed speed, bool autoneg, | ||
80 | bool autoneg_wait_to_complete) | ||
81 | { | ||
82 | return hw->phy.ops.setup_link_speed(hw, speed, autoneg, | ||
83 | autoneg_wait_to_complete); | ||
84 | } | ||
85 | |||
86 | /** | ||
87 | * ixgbe_reset_hw_X540 - Perform hardware reset | ||
88 | * @hw: pointer to hardware structure | ||
89 | * | ||
90 | * Resets the hardware by resetting the transmit and receive units, masks | ||
91 | * and clears all interrupts, perform a PHY reset, and perform a link (MAC) | ||
92 | * reset. | ||
93 | **/ | ||
94 | static s32 ixgbe_reset_hw_X540(struct ixgbe_hw *hw) | ||
95 | { | ||
96 | ixgbe_link_speed link_speed; | ||
97 | s32 status = 0; | ||
98 | u32 ctrl; | ||
99 | u32 ctrl_ext; | ||
100 | u32 reset_bit; | ||
101 | u32 i; | ||
102 | u32 autoc; | ||
103 | u32 autoc2; | ||
104 | bool link_up = false; | ||
105 | |||
106 | /* Call adapter stop to disable tx/rx and clear interrupts */ | ||
107 | hw->mac.ops.stop_adapter(hw); | ||
108 | |||
109 | /* | ||
110 | * Prevent the PCI-E bus from from hanging by disabling PCI-E master | ||
111 | * access and verify no pending requests before reset | ||
112 | */ | ||
113 | status = ixgbe_disable_pcie_master(hw); | ||
114 | if (status != 0) { | ||
115 | status = IXGBE_ERR_MASTER_REQUESTS_PENDING; | ||
116 | hw_dbg(hw, "PCI-E Master disable polling has failed.\n"); | ||
117 | } | ||
118 | |||
119 | /* | ||
120 | * Issue global reset to the MAC. Needs to be SW reset if link is up. | ||
121 | * If link reset is used when link is up, it might reset the PHY when | ||
122 | * mng is using it. If link is down or the flag to force full link | ||
123 | * reset is set, then perform link reset. | ||
124 | */ | ||
125 | if (hw->force_full_reset) { | ||
126 | reset_bit = IXGBE_CTRL_LNK_RST; | ||
127 | } else { | ||
128 | hw->mac.ops.check_link(hw, &link_speed, &link_up, false); | ||
129 | if (!link_up) | ||
130 | reset_bit = IXGBE_CTRL_LNK_RST; | ||
131 | else | ||
132 | reset_bit = IXGBE_CTRL_RST; | ||
133 | } | ||
134 | |||
135 | ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); | ||
136 | IXGBE_WRITE_REG(hw, IXGBE_CTRL, (ctrl | IXGBE_CTRL_RST)); | ||
137 | IXGBE_WRITE_FLUSH(hw); | ||
138 | |||
139 | /* Poll for reset bit to self-clear indicating reset is complete */ | ||
140 | for (i = 0; i < 10; i++) { | ||
141 | udelay(1); | ||
142 | ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); | ||
143 | if (!(ctrl & IXGBE_CTRL_RST)) | ||
144 | break; | ||
145 | } | ||
146 | if (ctrl & IXGBE_CTRL_RST) { | ||
147 | status = IXGBE_ERR_RESET_FAILED; | ||
148 | hw_dbg(hw, "Reset polling failed to complete.\n"); | ||
149 | } | ||
150 | |||
151 | /* Clear PF Reset Done bit so PF/VF Mail Ops can work */ | ||
152 | ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); | ||
153 | ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD; | ||
154 | IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext); | ||
155 | |||
156 | msleep(50); | ||
157 | |||
158 | /* Set the Rx packet buffer size. */ | ||
159 | IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(0), 384 << IXGBE_RXPBSIZE_SHIFT); | ||
160 | |||
161 | /* Store the permanent mac address */ | ||
162 | hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr); | ||
163 | |||
164 | /* | ||
165 | * Store the original AUTOC/AUTOC2 values if they have not been | ||
166 | * stored off yet. Otherwise restore the stored original | ||
167 | * values since the reset operation sets back to defaults. | ||
168 | */ | ||
169 | autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); | ||
170 | autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2); | ||
171 | if (hw->mac.orig_link_settings_stored == false) { | ||
172 | hw->mac.orig_autoc = autoc; | ||
173 | hw->mac.orig_autoc2 = autoc2; | ||
174 | hw->mac.orig_link_settings_stored = true; | ||
175 | } else { | ||
176 | if (autoc != hw->mac.orig_autoc) | ||
177 | IXGBE_WRITE_REG(hw, IXGBE_AUTOC, (hw->mac.orig_autoc | | ||
178 | IXGBE_AUTOC_AN_RESTART)); | ||
179 | |||
180 | if ((autoc2 & IXGBE_AUTOC2_UPPER_MASK) != | ||
181 | (hw->mac.orig_autoc2 & IXGBE_AUTOC2_UPPER_MASK)) { | ||
182 | autoc2 &= ~IXGBE_AUTOC2_UPPER_MASK; | ||
183 | autoc2 |= (hw->mac.orig_autoc2 & | ||
184 | IXGBE_AUTOC2_UPPER_MASK); | ||
185 | IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2); | ||
186 | } | ||
187 | } | ||
188 | |||
189 | /* | ||
190 | * Store MAC address from RAR0, clear receive address registers, and | ||
191 | * clear the multicast table. Also reset num_rar_entries to 128, | ||
192 | * since we modify this value when programming the SAN MAC address. | ||
193 | */ | ||
194 | hw->mac.num_rar_entries = 128; | ||
195 | hw->mac.ops.init_rx_addrs(hw); | ||
196 | |||
197 | /* Store the permanent mac address */ | ||
198 | hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr); | ||
199 | |||
200 | /* Store the permanent SAN mac address */ | ||
201 | hw->mac.ops.get_san_mac_addr(hw, hw->mac.san_addr); | ||
202 | |||
203 | /* Add the SAN MAC address to the RAR only if it's a valid address */ | ||
204 | if (ixgbe_validate_mac_addr(hw->mac.san_addr) == 0) { | ||
205 | hw->mac.ops.set_rar(hw, hw->mac.num_rar_entries - 1, | ||
206 | hw->mac.san_addr, 0, IXGBE_RAH_AV); | ||
207 | |||
208 | /* Reserve the last RAR for the SAN MAC address */ | ||
209 | hw->mac.num_rar_entries--; | ||
210 | } | ||
211 | |||
212 | /* Store the alternative WWNN/WWPN prefix */ | ||
213 | hw->mac.ops.get_wwn_prefix(hw, &hw->mac.wwnn_prefix, | ||
214 | &hw->mac.wwpn_prefix); | ||
215 | |||
216 | return status; | ||
217 | } | ||
218 | |||
219 | /** | ||
220 | * ixgbe_get_supported_physical_layer_X540 - Returns physical layer type | ||
221 | * @hw: pointer to hardware structure | ||
222 | * | ||
223 | * Determines physical layer capabilities of the current configuration. | ||
224 | **/ | ||
225 | static u32 ixgbe_get_supported_physical_layer_X540(struct ixgbe_hw *hw) | ||
226 | { | ||
227 | u32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN; | ||
228 | u16 ext_ability = 0; | ||
229 | |||
230 | hw->phy.ops.identify(hw); | ||
231 | |||
232 | hw->phy.ops.read_reg(hw, MDIO_PMA_EXTABLE, MDIO_MMD_PMAPMD, | ||
233 | &ext_ability); | ||
234 | if (ext_ability & MDIO_PMA_EXTABLE_10GBT) | ||
235 | physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T; | ||
236 | if (ext_ability & MDIO_PMA_EXTABLE_1000BT) | ||
237 | physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T; | ||
238 | if (ext_ability & MDIO_PMA_EXTABLE_100BTX) | ||
239 | physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX; | ||
240 | |||
241 | return physical_layer; | ||
242 | } | ||
243 | |||
244 | /** | ||
245 | * ixgbe_init_eeprom_params_X540 - Initialize EEPROM params | ||
246 | * @hw: pointer to hardware structure | ||
247 | **/ | ||
248 | static s32 ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw) | ||
249 | { | ||
250 | struct ixgbe_eeprom_info *eeprom = &hw->eeprom; | ||
251 | u32 eec; | ||
252 | u16 eeprom_size; | ||
253 | |||
254 | if (eeprom->type == ixgbe_eeprom_uninitialized) { | ||
255 | eeprom->semaphore_delay = 10; | ||
256 | eeprom->type = ixgbe_flash; | ||
257 | |||
258 | eec = IXGBE_READ_REG(hw, IXGBE_EEC); | ||
259 | eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >> | ||
260 | IXGBE_EEC_SIZE_SHIFT); | ||
261 | eeprom->word_size = 1 << (eeprom_size + | ||
262 | IXGBE_EEPROM_WORD_SIZE_SHIFT); | ||
263 | |||
264 | hw_dbg(hw, "Eeprom params: type = %d, size = %d\n", | ||
265 | eeprom->type, eeprom->word_size); | ||
266 | } | ||
267 | |||
268 | return 0; | ||
269 | } | ||
270 | |||
271 | /** | ||
272 | * ixgbe_read_eerd_X540 - Read EEPROM word using EERD | ||
273 | * @hw: pointer to hardware structure | ||
274 | * @offset: offset of word in the EEPROM to read | ||
275 | * @data: word read from the EERPOM | ||
276 | **/ | ||
277 | static s32 ixgbe_read_eerd_X540(struct ixgbe_hw *hw, u16 offset, u16 *data) | ||
278 | { | ||
279 | s32 status; | ||
280 | |||
281 | if (ixgbe_acquire_swfw_sync_X540(hw, IXGBE_GSSR_EEP_SM)) | ||
282 | status = ixgbe_read_eerd_generic(hw, offset, data); | ||
283 | else | ||
284 | status = IXGBE_ERR_SWFW_SYNC; | ||
285 | |||
286 | ixgbe_release_swfw_sync_X540(hw, IXGBE_GSSR_EEP_SM); | ||
287 | return status; | ||
288 | } | ||
289 | |||
290 | /** | ||
291 | * ixgbe_write_eewr_X540 - Write EEPROM word using EEWR | ||
292 | * @hw: pointer to hardware structure | ||
293 | * @offset: offset of word in the EEPROM to write | ||
294 | * @data: word write to the EEPROM | ||
295 | * | ||
296 | * Write a 16 bit word to the EEPROM using the EEWR register. | ||
297 | **/ | ||
298 | static s32 ixgbe_write_eewr_X540(struct ixgbe_hw *hw, u16 offset, u16 data) | ||
299 | { | ||
300 | u32 eewr; | ||
301 | s32 status; | ||
302 | |||
303 | hw->eeprom.ops.init_params(hw); | ||
304 | |||
305 | if (offset >= hw->eeprom.word_size) { | ||
306 | status = IXGBE_ERR_EEPROM; | ||
307 | goto out; | ||
308 | } | ||
309 | |||
310 | eewr = (offset << IXGBE_EEPROM_RW_ADDR_SHIFT) | | ||
311 | (data << IXGBE_EEPROM_RW_REG_DATA) | | ||
312 | IXGBE_EEPROM_RW_REG_START; | ||
313 | |||
314 | if (ixgbe_acquire_swfw_sync_X540(hw, IXGBE_GSSR_EEP_SM)) { | ||
315 | status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE); | ||
316 | if (status != 0) { | ||
317 | hw_dbg(hw, "Eeprom write EEWR timed out\n"); | ||
318 | goto out; | ||
319 | } | ||
320 | |||
321 | IXGBE_WRITE_REG(hw, IXGBE_EEWR, eewr); | ||
322 | |||
323 | status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE); | ||
324 | if (status != 0) { | ||
325 | hw_dbg(hw, "Eeprom write EEWR timed out\n"); | ||
326 | goto out; | ||
327 | } | ||
328 | } else { | ||
329 | status = IXGBE_ERR_SWFW_SYNC; | ||
330 | } | ||
331 | |||
332 | out: | ||
333 | ixgbe_release_swfw_sync_X540(hw, IXGBE_GSSR_EEP_SM); | ||
334 | return status; | ||
335 | } | ||
336 | |||
337 | /** | ||
338 | * ixgbe_calc_eeprom_checksum_X540 - Calculates and returns the checksum | ||
339 | * @hw: pointer to hardware structure | ||
340 | **/ | ||
341 | static u16 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw) | ||
342 | { | ||
343 | u16 i; | ||
344 | u16 j; | ||
345 | u16 checksum = 0; | ||
346 | u16 length = 0; | ||
347 | u16 pointer = 0; | ||
348 | u16 word = 0; | ||
349 | |||
350 | /* Include 0x0-0x3F in the checksum */ | ||
351 | for (i = 0; i < IXGBE_EEPROM_CHECKSUM; i++) { | ||
352 | if (hw->eeprom.ops.read(hw, i, &word) != 0) { | ||
353 | hw_dbg(hw, "EEPROM read failed\n"); | ||
354 | break; | ||
355 | } | ||
356 | checksum += word; | ||
357 | } | ||
358 | |||
359 | /* | ||
360 | * Include all data from pointers 0x3, 0x6-0xE. This excludes the | ||
361 | * FW, PHY module, and PCIe Expansion/Option ROM pointers. | ||
362 | */ | ||
363 | for (i = IXGBE_PCIE_ANALOG_PTR; i < IXGBE_FW_PTR; i++) { | ||
364 | if (i == IXGBE_PHY_PTR || i == IXGBE_OPTION_ROM_PTR) | ||
365 | continue; | ||
366 | |||
367 | if (hw->eeprom.ops.read(hw, i, &pointer) != 0) { | ||
368 | hw_dbg(hw, "EEPROM read failed\n"); | ||
369 | break; | ||
370 | } | ||
371 | |||
372 | /* Skip pointer section if the pointer is invalid. */ | ||
373 | if (pointer == 0xFFFF || pointer == 0 || | ||
374 | pointer >= hw->eeprom.word_size) | ||
375 | continue; | ||
376 | |||
377 | if (hw->eeprom.ops.read(hw, pointer, &length) != 0) { | ||
378 | hw_dbg(hw, "EEPROM read failed\n"); | ||
379 | break; | ||
380 | } | ||
381 | |||
382 | /* Skip pointer section if length is invalid. */ | ||
383 | if (length == 0xFFFF || length == 0 || | ||
384 | (pointer + length) >= hw->eeprom.word_size) | ||
385 | continue; | ||
386 | |||
387 | for (j = pointer+1; j <= pointer+length; j++) { | ||
388 | if (hw->eeprom.ops.read(hw, j, &word) != 0) { | ||
389 | hw_dbg(hw, "EEPROM read failed\n"); | ||
390 | break; | ||
391 | } | ||
392 | checksum += word; | ||
393 | } | ||
394 | } | ||
395 | |||
396 | checksum = (u16)IXGBE_EEPROM_SUM - checksum; | ||
397 | |||
398 | return checksum; | ||
399 | } | ||
400 | |||
401 | /** | ||
402 | * ixgbe_update_eeprom_checksum_X540 - Updates the EEPROM checksum and flash | ||
403 | * @hw: pointer to hardware structure | ||
404 | * | ||
405 | * After writing EEPROM to shadow RAM using EEWR register, software calculates | ||
406 | * checksum and updates the EEPROM and instructs the hardware to update | ||
407 | * the flash. | ||
408 | **/ | ||
409 | static s32 ixgbe_update_eeprom_checksum_X540(struct ixgbe_hw *hw) | ||
410 | { | ||
411 | s32 status; | ||
412 | |||
413 | status = ixgbe_update_eeprom_checksum_generic(hw); | ||
414 | |||
415 | if (status) | ||
416 | status = ixgbe_update_flash_X540(hw); | ||
417 | |||
418 | return status; | ||
419 | } | ||
420 | |||
421 | /** | ||
422 | * ixgbe_update_flash_X540 - Instruct HW to copy EEPROM to Flash device | ||
423 | * @hw: pointer to hardware structure | ||
424 | * | ||
425 | * Set FLUP (bit 23) of the EEC register to instruct Hardware to copy | ||
426 | * EEPROM from shadow RAM to the flash device. | ||
427 | **/ | ||
428 | static s32 ixgbe_update_flash_X540(struct ixgbe_hw *hw) | ||
429 | { | ||
430 | u32 flup; | ||
431 | s32 status = IXGBE_ERR_EEPROM; | ||
432 | |||
433 | status = ixgbe_poll_flash_update_done_X540(hw); | ||
434 | if (status == IXGBE_ERR_EEPROM) { | ||
435 | hw_dbg(hw, "Flash update time out\n"); | ||
436 | goto out; | ||
437 | } | ||
438 | |||
439 | flup = IXGBE_READ_REG(hw, IXGBE_EEC) | IXGBE_EEC_FLUP; | ||
440 | IXGBE_WRITE_REG(hw, IXGBE_EEC, flup); | ||
441 | |||
442 | status = ixgbe_poll_flash_update_done_X540(hw); | ||
443 | if (status) | ||
444 | hw_dbg(hw, "Flash update complete\n"); | ||
445 | else | ||
446 | hw_dbg(hw, "Flash update time out\n"); | ||
447 | |||
448 | if (hw->revision_id == 0) { | ||
449 | flup = IXGBE_READ_REG(hw, IXGBE_EEC); | ||
450 | |||
451 | if (flup & IXGBE_EEC_SEC1VAL) { | ||
452 | flup |= IXGBE_EEC_FLUP; | ||
453 | IXGBE_WRITE_REG(hw, IXGBE_EEC, flup); | ||
454 | } | ||
455 | |||
456 | status = ixgbe_poll_flash_update_done_X540(hw); | ||
457 | if (status) | ||
458 | hw_dbg(hw, "Flash update complete\n"); | ||
459 | else | ||
460 | hw_dbg(hw, "Flash update time out\n"); | ||
461 | |||
462 | } | ||
463 | out: | ||
464 | return status; | ||
465 | } | ||
466 | |||
467 | /** | ||
468 | * ixgbe_poll_flash_update_done_X540 - Poll flash update status | ||
469 | * @hw: pointer to hardware structure | ||
470 | * | ||
471 | * Polls the FLUDONE (bit 26) of the EEC Register to determine when the | ||
472 | * flash update is done. | ||
473 | **/ | ||
474 | static s32 ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw) | ||
475 | { | ||
476 | u32 i; | ||
477 | u32 reg; | ||
478 | s32 status = IXGBE_ERR_EEPROM; | ||
479 | |||
480 | for (i = 0; i < IXGBE_FLUDONE_ATTEMPTS; i++) { | ||
481 | reg = IXGBE_READ_REG(hw, IXGBE_EEC); | ||
482 | if (reg & IXGBE_EEC_FLUDONE) { | ||
483 | status = 0; | ||
484 | break; | ||
485 | } | ||
486 | udelay(5); | ||
487 | } | ||
488 | return status; | ||
489 | } | ||
490 | |||
491 | /** | ||
492 | * ixgbe_acquire_swfw_sync_X540 - Acquire SWFW semaphore | ||
493 | * @hw: pointer to hardware structure | ||
494 | * @mask: Mask to specify which semaphore to acquire | ||
495 | * | ||
496 | * Acquires the SWFW semaphore thought the SW_FW_SYNC register for | ||
497 | * the specified function (CSR, PHY0, PHY1, NVM, Flash) | ||
498 | **/ | ||
499 | static s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask) | ||
500 | { | ||
501 | u32 swfw_sync; | ||
502 | u32 swmask = mask; | ||
503 | u32 fwmask = mask << 5; | ||
504 | u32 hwmask = 0; | ||
505 | u32 timeout = 200; | ||
506 | u32 i; | ||
507 | |||
508 | if (swmask == IXGBE_GSSR_EEP_SM) | ||
509 | hwmask = IXGBE_GSSR_FLASH_SM; | ||
510 | |||
511 | for (i = 0; i < timeout; i++) { | ||
512 | /* | ||
513 | * SW NVM semaphore bit is used for access to all | ||
514 | * SW_FW_SYNC bits (not just NVM) | ||
515 | */ | ||
516 | if (ixgbe_get_swfw_sync_semaphore(hw)) | ||
517 | return IXGBE_ERR_SWFW_SYNC; | ||
518 | |||
519 | swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC); | ||
520 | if (!(swfw_sync & (fwmask | swmask | hwmask))) { | ||
521 | swfw_sync |= swmask; | ||
522 | IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC, swfw_sync); | ||
523 | ixgbe_release_swfw_sync_semaphore(hw); | ||
524 | break; | ||
525 | } else { | ||
526 | /* | ||
527 | * Firmware currently using resource (fwmask), | ||
528 | * hardware currently using resource (hwmask), | ||
529 | * or other software thread currently using | ||
530 | * resource (swmask) | ||
531 | */ | ||
532 | ixgbe_release_swfw_sync_semaphore(hw); | ||
533 | msleep(5); | ||
534 | } | ||
535 | } | ||
536 | |||
537 | /* | ||
538 | * If the resource is not released by the FW/HW the SW can assume that | ||
539 | * the FW/HW malfunctions. In that case the SW should sets the | ||
540 | * SW bit(s) of the requested resource(s) while ignoring the | ||
541 | * corresponding FW/HW bits in the SW_FW_SYNC register. | ||
542 | */ | ||
543 | if (i >= timeout) { | ||
544 | swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC); | ||
545 | if (swfw_sync & (fwmask | hwmask)) { | ||
546 | if (ixgbe_get_swfw_sync_semaphore(hw)) | ||
547 | return IXGBE_ERR_SWFW_SYNC; | ||
548 | |||
549 | swfw_sync |= swmask; | ||
550 | IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC, swfw_sync); | ||
551 | ixgbe_release_swfw_sync_semaphore(hw); | ||
552 | } | ||
553 | } | ||
554 | |||
555 | msleep(5); | ||
556 | return 0; | ||
557 | } | ||
558 | |||
559 | /** | ||
560 | * ixgbe_release_swfw_sync_X540 - Release SWFW semaphore | ||
561 | * @hw: pointer to hardware structure | ||
562 | * @mask: Mask to specify which semaphore to release | ||
563 | * | ||
564 | * Releases the SWFW semaphore throught the SW_FW_SYNC register | ||
565 | * for the specified function (CSR, PHY0, PHY1, EVM, Flash) | ||
566 | **/ | ||
567 | static void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask) | ||
568 | { | ||
569 | u32 swfw_sync; | ||
570 | u32 swmask = mask; | ||
571 | |||
572 | ixgbe_get_swfw_sync_semaphore(hw); | ||
573 | |||
574 | swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC); | ||
575 | swfw_sync &= ~swmask; | ||
576 | IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC, swfw_sync); | ||
577 | |||
578 | ixgbe_release_swfw_sync_semaphore(hw); | ||
579 | msleep(5); | ||
580 | } | ||
581 | |||
582 | /** | ||
583 | * ixgbe_get_nvm_semaphore - Get hardware semaphore | ||
584 | * @hw: pointer to hardware structure | ||
585 | * | ||
586 | * Sets the hardware semaphores so SW/FW can gain control of shared resources | ||
587 | **/ | ||
588 | static s32 ixgbe_get_swfw_sync_semaphore(struct ixgbe_hw *hw) | ||
589 | { | ||
590 | s32 status = IXGBE_ERR_EEPROM; | ||
591 | u32 timeout = 2000; | ||
592 | u32 i; | ||
593 | u32 swsm; | ||
594 | |||
595 | /* Get SMBI software semaphore between device drivers first */ | ||
596 | for (i = 0; i < timeout; i++) { | ||
597 | /* | ||
598 | * If the SMBI bit is 0 when we read it, then the bit will be | ||
599 | * set and we have the semaphore | ||
600 | */ | ||
601 | swsm = IXGBE_READ_REG(hw, IXGBE_SWSM); | ||
602 | if (!(swsm & IXGBE_SWSM_SMBI)) { | ||
603 | status = 0; | ||
604 | break; | ||
605 | } | ||
606 | udelay(50); | ||
607 | } | ||
608 | |||
609 | /* Now get the semaphore between SW/FW through the REGSMP bit */ | ||
610 | if (status) { | ||
611 | for (i = 0; i < timeout; i++) { | ||
612 | swsm = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC); | ||
613 | if (!(swsm & IXGBE_SWFW_REGSMP)) | ||
614 | break; | ||
615 | |||
616 | udelay(50); | ||
617 | } | ||
618 | } else { | ||
619 | hw_dbg(hw, "Software semaphore SMBI between device drivers " | ||
620 | "not granted.\n"); | ||
621 | } | ||
622 | |||
623 | return status; | ||
624 | } | ||
625 | |||
626 | /** | ||
627 | * ixgbe_release_nvm_semaphore - Release hardware semaphore | ||
628 | * @hw: pointer to hardware structure | ||
629 | * | ||
630 | * This function clears hardware semaphore bits. | ||
631 | **/ | ||
632 | static void ixgbe_release_swfw_sync_semaphore(struct ixgbe_hw *hw) | ||
633 | { | ||
634 | u32 swsm; | ||
635 | |||
636 | /* Release both semaphores by writing 0 to the bits REGSMP and SMBI */ | ||
637 | |||
638 | swsm = IXGBE_READ_REG(hw, IXGBE_SWSM); | ||
639 | swsm &= ~IXGBE_SWSM_SMBI; | ||
640 | IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm); | ||
641 | |||
642 | swsm = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC); | ||
643 | swsm &= ~IXGBE_SWFW_REGSMP; | ||
644 | IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC, swsm); | ||
645 | |||
646 | IXGBE_WRITE_FLUSH(hw); | ||
647 | } | ||
648 | |||
649 | static struct ixgbe_mac_operations mac_ops_X540 = { | ||
650 | .init_hw = &ixgbe_init_hw_generic, | ||
651 | .reset_hw = &ixgbe_reset_hw_X540, | ||
652 | .start_hw = &ixgbe_start_hw_generic, | ||
653 | .clear_hw_cntrs = &ixgbe_clear_hw_cntrs_generic, | ||
654 | .get_media_type = &ixgbe_get_media_type_X540, | ||
655 | .get_supported_physical_layer = | ||
656 | &ixgbe_get_supported_physical_layer_X540, | ||
657 | .enable_rx_dma = &ixgbe_enable_rx_dma_generic, | ||
658 | .get_mac_addr = &ixgbe_get_mac_addr_generic, | ||
659 | .get_san_mac_addr = &ixgbe_get_san_mac_addr_generic, | ||
660 | .get_device_caps = NULL, | ||
661 | .get_wwn_prefix = &ixgbe_get_wwn_prefix_generic, | ||
662 | .stop_adapter = &ixgbe_stop_adapter_generic, | ||
663 | .get_bus_info = &ixgbe_get_bus_info_generic, | ||
664 | .set_lan_id = &ixgbe_set_lan_id_multi_port_pcie, | ||
665 | .read_analog_reg8 = NULL, | ||
666 | .write_analog_reg8 = NULL, | ||
667 | .setup_link = &ixgbe_setup_mac_link_X540, | ||
668 | .check_link = &ixgbe_check_mac_link_generic, | ||
669 | .get_link_capabilities = &ixgbe_get_copper_link_capabilities_generic, | ||
670 | .led_on = &ixgbe_led_on_generic, | ||
671 | .led_off = &ixgbe_led_off_generic, | ||
672 | .blink_led_start = &ixgbe_blink_led_start_generic, | ||
673 | .blink_led_stop = &ixgbe_blink_led_stop_generic, | ||
674 | .set_rar = &ixgbe_set_rar_generic, | ||
675 | .clear_rar = &ixgbe_clear_rar_generic, | ||
676 | .set_vmdq = &ixgbe_set_vmdq_generic, | ||
677 | .clear_vmdq = &ixgbe_clear_vmdq_generic, | ||
678 | .init_rx_addrs = &ixgbe_init_rx_addrs_generic, | ||
679 | .update_uc_addr_list = &ixgbe_update_uc_addr_list_generic, | ||
680 | .update_mc_addr_list = &ixgbe_update_mc_addr_list_generic, | ||
681 | .enable_mc = &ixgbe_enable_mc_generic, | ||
682 | .disable_mc = &ixgbe_disable_mc_generic, | ||
683 | .clear_vfta = &ixgbe_clear_vfta_generic, | ||
684 | .set_vfta = &ixgbe_set_vfta_generic, | ||
685 | .fc_enable = &ixgbe_fc_enable_generic, | ||
686 | .init_uta_tables = &ixgbe_init_uta_tables_generic, | ||
687 | .setup_sfp = NULL, | ||
688 | }; | ||
689 | |||
690 | static struct ixgbe_eeprom_operations eeprom_ops_X540 = { | ||
691 | .init_params = &ixgbe_init_eeprom_params_X540, | ||
692 | .read = &ixgbe_read_eerd_X540, | ||
693 | .write = &ixgbe_write_eewr_X540, | ||
694 | .calc_checksum = &ixgbe_calc_eeprom_checksum_X540, | ||
695 | .validate_checksum = &ixgbe_validate_eeprom_checksum_generic, | ||
696 | .update_checksum = &ixgbe_update_eeprom_checksum_X540, | ||
697 | }; | ||
698 | |||
699 | static struct ixgbe_phy_operations phy_ops_X540 = { | ||
700 | .identify = &ixgbe_identify_phy_generic, | ||
701 | .identify_sfp = &ixgbe_identify_sfp_module_generic, | ||
702 | .init = NULL, | ||
703 | .reset = &ixgbe_reset_phy_generic, | ||
704 | .read_reg = &ixgbe_read_phy_reg_generic, | ||
705 | .write_reg = &ixgbe_write_phy_reg_generic, | ||
706 | .setup_link = &ixgbe_setup_phy_link_generic, | ||
707 | .setup_link_speed = &ixgbe_setup_phy_link_speed_generic, | ||
708 | .read_i2c_byte = &ixgbe_read_i2c_byte_generic, | ||
709 | .write_i2c_byte = &ixgbe_write_i2c_byte_generic, | ||
710 | .read_i2c_eeprom = &ixgbe_read_i2c_eeprom_generic, | ||
711 | .write_i2c_eeprom = &ixgbe_write_i2c_eeprom_generic, | ||
712 | .check_overtemp = &ixgbe_tn_check_overtemp, | ||
713 | }; | ||
714 | |||
715 | struct ixgbe_info ixgbe_X540_info = { | ||
716 | .mac = ixgbe_mac_X540, | ||
717 | .get_invariants = &ixgbe_get_invariants_X540, | ||
718 | .mac_ops = &mac_ops_X540, | ||
719 | .eeprom_ops = &eeprom_ops_X540, | ||
720 | .phy_ops = &phy_ops_X540, | ||
721 | .mbx_ops = &mbx_ops_generic, | ||
722 | }; | ||
diff --git a/drivers/net/ixgbevf/Makefile b/drivers/net/ixgbevf/Makefile index dd4e0d27e8cc..1f35d229e71a 100644 --- a/drivers/net/ixgbevf/Makefile +++ b/drivers/net/ixgbevf/Makefile | |||
@@ -1,7 +1,7 @@ | |||
1 | ################################################################################ | 1 | ################################################################################ |
2 | # | 2 | # |
3 | # Intel 82599 Virtual Function driver | 3 | # Intel 82599 Virtual Function driver |
4 | # Copyright(c) 1999 - 2009 Intel Corporation. | 4 | # Copyright(c) 1999 - 2010 Intel Corporation. |
5 | # | 5 | # |
6 | # This program is free software; you can redistribute it and/or modify it | 6 | # This program is free software; you can redistribute it and/or modify it |
7 | # under the terms and conditions of the GNU General Public License, | 7 | # under the terms and conditions of the GNU General Public License, |
diff --git a/drivers/net/ixgbevf/defines.h b/drivers/net/ixgbevf/defines.h index ca2c81f49a05..f8a807d606c7 100644 --- a/drivers/net/ixgbevf/defines.h +++ b/drivers/net/ixgbevf/defines.h | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************************* | 1 | /******************************************************************************* |
2 | 2 | ||
3 | Intel 82599 Virtual Function driver | 3 | Intel 82599 Virtual Function driver |
4 | Copyright(c) 1999 - 2009 Intel Corporation. | 4 | Copyright(c) 1999 - 2010 Intel Corporation. |
5 | 5 | ||
6 | This program is free software; you can redistribute it and/or modify it | 6 | This program is free software; you can redistribute it and/or modify it |
7 | under the terms and conditions of the GNU General Public License, | 7 | under the terms and conditions of the GNU General Public License, |
diff --git a/drivers/net/ixgbevf/ixgbevf.h b/drivers/net/ixgbevf/ixgbevf.h index da4033c6efa2..0cd6abcf9306 100644 --- a/drivers/net/ixgbevf/ixgbevf.h +++ b/drivers/net/ixgbevf/ixgbevf.h | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************************* | 1 | /******************************************************************************* |
2 | 2 | ||
3 | Intel 82599 Virtual Function driver | 3 | Intel 82599 Virtual Function driver |
4 | Copyright(c) 1999 - 2009 Intel Corporation. | 4 | Copyright(c) 1999 - 2010 Intel Corporation. |
5 | 5 | ||
6 | This program is free software; you can redistribute it and/or modify it | 6 | This program is free software; you can redistribute it and/or modify it |
7 | under the terms and conditions of the GNU General Public License, | 7 | under the terms and conditions of the GNU General Public License, |
diff --git a/drivers/net/ixgbevf/ixgbevf_main.c b/drivers/net/ixgbevf/ixgbevf_main.c index dc03c9652389..5b8063cb4e6c 100644 --- a/drivers/net/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ixgbevf/ixgbevf_main.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************************* | 1 | /******************************************************************************* |
2 | 2 | ||
3 | Intel 82599 Virtual Function driver | 3 | Intel 82599 Virtual Function driver |
4 | Copyright(c) 1999 - 2009 Intel Corporation. | 4 | Copyright(c) 1999 - 2010 Intel Corporation. |
5 | 5 | ||
6 | This program is free software; you can redistribute it and/or modify it | 6 | This program is free software; you can redistribute it and/or modify it |
7 | under the terms and conditions of the GNU General Public License, | 7 | under the terms and conditions of the GNU General Public License, |
@@ -51,9 +51,10 @@ char ixgbevf_driver_name[] = "ixgbevf"; | |||
51 | static const char ixgbevf_driver_string[] = | 51 | static const char ixgbevf_driver_string[] = |
52 | "Intel(R) 82599 Virtual Function"; | 52 | "Intel(R) 82599 Virtual Function"; |
53 | 53 | ||
54 | #define DRV_VERSION "1.0.0-k0" | 54 | #define DRV_VERSION "1.0.12-k0" |
55 | const char ixgbevf_driver_version[] = DRV_VERSION; | 55 | const char ixgbevf_driver_version[] = DRV_VERSION; |
56 | static char ixgbevf_copyright[] = "Copyright (c) 2009 Intel Corporation."; | 56 | static char ixgbevf_copyright[] = |
57 | "Copyright (c) 2009 - 2010 Intel Corporation."; | ||
57 | 58 | ||
58 | static const struct ixgbevf_info *ixgbevf_info_tbl[] = { | 59 | static const struct ixgbevf_info *ixgbevf_info_tbl[] = { |
59 | [board_82599_vf] = &ixgbevf_vf_info, | 60 | [board_82599_vf] = &ixgbevf_vf_info, |
@@ -3424,10 +3425,6 @@ static int __devinit ixgbevf_probe(struct pci_dev *pdev, | |||
3424 | if (hw->mac.ops.get_bus_info) | 3425 | if (hw->mac.ops.get_bus_info) |
3425 | hw->mac.ops.get_bus_info(hw); | 3426 | hw->mac.ops.get_bus_info(hw); |
3426 | 3427 | ||
3427 | |||
3428 | netif_carrier_off(netdev); | ||
3429 | netif_tx_stop_all_queues(netdev); | ||
3430 | |||
3431 | strcpy(netdev->name, "eth%d"); | 3428 | strcpy(netdev->name, "eth%d"); |
3432 | 3429 | ||
3433 | err = register_netdev(netdev); | 3430 | err = register_netdev(netdev); |
@@ -3436,6 +3433,8 @@ static int __devinit ixgbevf_probe(struct pci_dev *pdev, | |||
3436 | 3433 | ||
3437 | adapter->netdev_registered = true; | 3434 | adapter->netdev_registered = true; |
3438 | 3435 | ||
3436 | netif_carrier_off(netdev); | ||
3437 | |||
3439 | ixgbevf_init_last_counter_stats(adapter); | 3438 | ixgbevf_init_last_counter_stats(adapter); |
3440 | 3439 | ||
3441 | /* print the MAC address */ | 3440 | /* print the MAC address */ |
diff --git a/drivers/net/ixgbevf/mbx.c b/drivers/net/ixgbevf/mbx.c index 84ac486f4a65..7a8833125770 100644 --- a/drivers/net/ixgbevf/mbx.c +++ b/drivers/net/ixgbevf/mbx.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************************* | 1 | /******************************************************************************* |
2 | 2 | ||
3 | Intel 82599 Virtual Function driver | 3 | Intel 82599 Virtual Function driver |
4 | Copyright(c) 1999 - 2009 Intel Corporation. | 4 | Copyright(c) 1999 - 2010 Intel Corporation. |
5 | 5 | ||
6 | This program is free software; you can redistribute it and/or modify it | 6 | This program is free software; you can redistribute it and/or modify it |
7 | under the terms and conditions of the GNU General Public License, | 7 | under the terms and conditions of the GNU General Public License, |
diff --git a/drivers/net/ixgbevf/mbx.h b/drivers/net/ixgbevf/mbx.h index 8c063bebee7f..b2b5bf5daa3d 100644 --- a/drivers/net/ixgbevf/mbx.h +++ b/drivers/net/ixgbevf/mbx.h | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************************* | 1 | /******************************************************************************* |
2 | 2 | ||
3 | Intel 82599 Virtual Function driver | 3 | Intel 82599 Virtual Function driver |
4 | Copyright(c) 1999 - 2009 Intel Corporation. | 4 | Copyright(c) 1999 - 2010 Intel Corporation. |
5 | 5 | ||
6 | This program is free software; you can redistribute it and/or modify it | 6 | This program is free software; you can redistribute it and/or modify it |
7 | under the terms and conditions of the GNU General Public License, | 7 | under the terms and conditions of the GNU General Public License, |
diff --git a/drivers/net/ixgbevf/regs.h b/drivers/net/ixgbevf/regs.h index 12f75960aec1..fb80ca1bcc93 100644 --- a/drivers/net/ixgbevf/regs.h +++ b/drivers/net/ixgbevf/regs.h | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************************* | 1 | /******************************************************************************* |
2 | 2 | ||
3 | Intel 82599 Virtual Function driver | 3 | Intel 82599 Virtual Function driver |
4 | Copyright(c) 1999 - 2009 Intel Corporation. | 4 | Copyright(c) 1999 - 2010 Intel Corporation. |
5 | 5 | ||
6 | This program is free software; you can redistribute it and/or modify it | 6 | This program is free software; you can redistribute it and/or modify it |
7 | under the terms and conditions of the GNU General Public License, | 7 | under the terms and conditions of the GNU General Public License, |
diff --git a/drivers/net/ixgbevf/vf.c b/drivers/net/ixgbevf/vf.c index bfe42c1fcfaf..971019d819b4 100644 --- a/drivers/net/ixgbevf/vf.c +++ b/drivers/net/ixgbevf/vf.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************************* | 1 | /******************************************************************************* |
2 | 2 | ||
3 | Intel 82599 Virtual Function driver | 3 | Intel 82599 Virtual Function driver |
4 | Copyright(c) 1999 - 2009 Intel Corporation. | 4 | Copyright(c) 1999 - 2010 Intel Corporation. |
5 | 5 | ||
6 | This program is free software; you can redistribute it and/or modify it | 6 | This program is free software; you can redistribute it and/or modify it |
7 | under the terms and conditions of the GNU General Public License, | 7 | under the terms and conditions of the GNU General Public License, |
diff --git a/drivers/net/ixgbevf/vf.h b/drivers/net/ixgbevf/vf.h index 61f9dc831424..144c99d5363a 100644 --- a/drivers/net/ixgbevf/vf.h +++ b/drivers/net/ixgbevf/vf.h | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************************* | 1 | /******************************************************************************* |
2 | 2 | ||
3 | Intel 82599 Virtual Function driver | 3 | Intel 82599 Virtual Function driver |
4 | Copyright(c) 1999 - 2009 Intel Corporation. | 4 | Copyright(c) 1999 - 2010 Intel Corporation. |
5 | 5 | ||
6 | This program is free software; you can redistribute it and/or modify it | 6 | This program is free software; you can redistribute it and/or modify it |
7 | under the terms and conditions of the GNU General Public License, | 7 | under the terms and conditions of the GNU General Public License, |