diff options
author | David S. Miller <davem@davemloft.net> | 2012-07-18 12:20:03 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2012-07-18 12:20:03 -0400 |
commit | c3fe065cea56f1fcd4f8e4ae1fb15fa785d2bb79 (patch) | |
tree | ed1157551ab340effe440c73f4ee63b5e6dbee42 | |
parent | 1c652966d9215d7c9fac7394bb7014213bffabb7 (diff) | |
parent | 908421f6cc6b6e5db6e8e8c35ab8fc0fb64f25c2 (diff) |
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/net-next
Jerr Kirsher says:
====================
This series contains updates to ixgbe & ixgbevf.
...
Alexander Duyck (6):
ixgbe: Ping the VFs on link status change to trigger link change
ixgbe: Handle failures in the ixgbe_setup_rx/tx_resources calls
ixgbe: Move configuration of set_real_num_rx/tx_queues into open
ixgbe: Update the logic for ixgbe_cache_ring_dcb and DCB RSS
configuration
ixgbe: Cleanup logic for MRQC and MTQC configuration
ixgbevf: Update descriptor macros to accept pointers and drop _ADV
suffix
====================
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c | 138 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 190 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ixgbevf/ixgbevf.h | 12 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c | 18 |
4 files changed, 183 insertions, 175 deletions
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c index d308e7140171..4c3822f04bb9 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c | |||
@@ -42,42 +42,37 @@ static void ixgbe_get_first_reg_idx(struct ixgbe_adapter *adapter, u8 tc, | |||
42 | 42 | ||
43 | switch (hw->mac.type) { | 43 | switch (hw->mac.type) { |
44 | case ixgbe_mac_82598EB: | 44 | case ixgbe_mac_82598EB: |
45 | *tx = tc << 2; | 45 | /* TxQs/TC: 4 RxQs/TC: 8 */ |
46 | *rx = tc << 3; | 46 | *tx = tc << 2; /* 0, 4, 8, 12, 16, 20, 24, 28 */ |
47 | *rx = tc << 3; /* 0, 8, 16, 24, 32, 40, 48, 56 */ | ||
47 | break; | 48 | break; |
48 | case ixgbe_mac_82599EB: | 49 | case ixgbe_mac_82599EB: |
49 | case ixgbe_mac_X540: | 50 | case ixgbe_mac_X540: |
50 | if (num_tcs > 4) { | 51 | if (num_tcs > 4) { |
51 | if (tc < 3) { | 52 | /* |
52 | *tx = tc << 5; | 53 | * TCs : TC0/1 TC2/3 TC4-7 |
53 | *rx = tc << 4; | 54 | * TxQs/TC: 32 16 8 |
54 | } else if (tc < 5) { | 55 | * RxQs/TC: 16 16 16 |
55 | *tx = ((tc + 2) << 4); | 56 | */ |
56 | *rx = tc << 4; | 57 | *rx = tc << 4; |
57 | } else if (tc < num_tcs) { | 58 | if (tc < 3) |
58 | *tx = ((tc + 8) << 3); | 59 | *tx = tc << 5; /* 0, 32, 64 */ |
59 | *rx = tc << 4; | 60 | else if (tc < 5) |
60 | } | 61 | *tx = (tc + 2) << 4; /* 80, 96 */ |
62 | else | ||
63 | *tx = (tc + 8) << 3; /* 104, 112, 120 */ | ||
61 | } else { | 64 | } else { |
62 | *rx = tc << 5; | 65 | /* |
63 | switch (tc) { | 66 | * TCs : TC0 TC1 TC2/3 |
64 | case 0: | 67 | * TxQs/TC: 64 32 16 |
65 | *tx = 0; | 68 | * RxQs/TC: 32 32 32 |
66 | break; | 69 | */ |
67 | case 1: | 70 | *rx = tc << 5; |
68 | *tx = 64; | 71 | if (tc < 2) |
69 | break; | 72 | *tx = tc << 6; /* 0, 64 */ |
70 | case 2: | 73 | else |
71 | *tx = 96; | 74 | *tx = (tc + 4) << 4; /* 96, 112 */ |
72 | break; | ||
73 | case 3: | ||
74 | *tx = 112; | ||
75 | break; | ||
76 | default: | ||
77 | break; | ||
78 | } | ||
79 | } | 75 | } |
80 | break; | ||
81 | default: | 76 | default: |
82 | break; | 77 | break; |
83 | } | 78 | } |
@@ -90,25 +85,26 @@ static void ixgbe_get_first_reg_idx(struct ixgbe_adapter *adapter, u8 tc, | |||
90 | * Cache the descriptor ring offsets for DCB to the assigned rings. | 85 | * Cache the descriptor ring offsets for DCB to the assigned rings. |
91 | * | 86 | * |
92 | **/ | 87 | **/ |
93 | static inline bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter) | 88 | static bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter) |
94 | { | 89 | { |
95 | struct net_device *dev = adapter->netdev; | 90 | struct net_device *dev = adapter->netdev; |
96 | int i, j, k; | 91 | unsigned int tx_idx, rx_idx; |
92 | int tc, offset, rss_i, i; | ||
97 | u8 num_tcs = netdev_get_num_tc(dev); | 93 | u8 num_tcs = netdev_get_num_tc(dev); |
98 | 94 | ||
99 | if (!num_tcs) | 95 | /* verify we have DCB queueing enabled before proceeding */ |
96 | if (num_tcs <= 1) | ||
100 | return false; | 97 | return false; |
101 | 98 | ||
102 | for (i = 0, k = 0; i < num_tcs; i++) { | 99 | rss_i = adapter->ring_feature[RING_F_RSS].indices; |
103 | unsigned int tx_s, rx_s; | ||
104 | u16 count = dev->tc_to_txq[i].count; | ||
105 | 100 | ||
106 | ixgbe_get_first_reg_idx(adapter, i, &tx_s, &rx_s); | 101 | for (tc = 0, offset = 0; tc < num_tcs; tc++, offset += rss_i) { |
107 | for (j = 0; j < count; j++, k++) { | 102 | ixgbe_get_first_reg_idx(adapter, tc, &tx_idx, &rx_idx); |
108 | adapter->tx_ring[k]->reg_idx = tx_s + j; | 103 | for (i = 0; i < rss_i; i++, tx_idx++, rx_idx++) { |
109 | adapter->rx_ring[k]->reg_idx = rx_s + j; | 104 | adapter->tx_ring[offset + i]->reg_idx = tx_idx; |
110 | adapter->tx_ring[k]->dcb_tc = i; | 105 | adapter->rx_ring[offset + i]->reg_idx = rx_idx; |
111 | adapter->rx_ring[k]->dcb_tc = i; | 106 | adapter->tx_ring[offset + i]->dcb_tc = tc; |
107 | adapter->rx_ring[offset + i]->dcb_tc = tc; | ||
112 | } | 108 | } |
113 | } | 109 | } |
114 | 110 | ||
@@ -349,7 +345,7 @@ static bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter) | |||
349 | * fallthrough conditions. | 345 | * fallthrough conditions. |
350 | * | 346 | * |
351 | **/ | 347 | **/ |
352 | static int ixgbe_set_num_queues(struct ixgbe_adapter *adapter) | 348 | static void ixgbe_set_num_queues(struct ixgbe_adapter *adapter) |
353 | { | 349 | { |
354 | /* Start with base case */ | 350 | /* Start with base case */ |
355 | adapter->num_rx_queues = 1; | 351 | adapter->num_rx_queues = 1; |
@@ -358,29 +354,14 @@ static int ixgbe_set_num_queues(struct ixgbe_adapter *adapter) | |||
358 | adapter->num_rx_queues_per_pool = 1; | 354 | adapter->num_rx_queues_per_pool = 1; |
359 | 355 | ||
360 | if (ixgbe_set_sriov_queues(adapter)) | 356 | if (ixgbe_set_sriov_queues(adapter)) |
361 | goto done; | 357 | return; |
362 | 358 | ||
363 | #ifdef CONFIG_IXGBE_DCB | 359 | #ifdef CONFIG_IXGBE_DCB |
364 | if (ixgbe_set_dcb_queues(adapter)) | 360 | if (ixgbe_set_dcb_queues(adapter)) |
365 | goto done; | 361 | return; |
366 | 362 | ||
367 | #endif | 363 | #endif |
368 | if (ixgbe_set_rss_queues(adapter)) | 364 | ixgbe_set_rss_queues(adapter); |
369 | goto done; | ||
370 | |||
371 | /* fallback to base case */ | ||
372 | adapter->num_rx_queues = 1; | ||
373 | adapter->num_tx_queues = 1; | ||
374 | |||
375 | done: | ||
376 | if ((adapter->netdev->reg_state == NETREG_UNREGISTERED) || | ||
377 | (adapter->netdev->reg_state == NETREG_UNREGISTERING)) | ||
378 | return 0; | ||
379 | |||
380 | /* Notify the stack of the (possibly) reduced queue counts. */ | ||
381 | netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues); | ||
382 | return netif_set_real_num_rx_queues(adapter->netdev, | ||
383 | adapter->num_rx_queues); | ||
384 | } | 365 | } |
385 | 366 | ||
386 | static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter, | 367 | static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter, |
@@ -710,11 +691,10 @@ static void ixgbe_reset_interrupt_capability(struct ixgbe_adapter *adapter) | |||
710 | * Attempt to configure the interrupts using the best available | 691 | * Attempt to configure the interrupts using the best available |
711 | * capabilities of the hardware and the kernel. | 692 | * capabilities of the hardware and the kernel. |
712 | **/ | 693 | **/ |
713 | static int ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter) | 694 | static void ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter) |
714 | { | 695 | { |
715 | struct ixgbe_hw *hw = &adapter->hw; | 696 | struct ixgbe_hw *hw = &adapter->hw; |
716 | int err = 0; | 697 | int vector, v_budget, err; |
717 | int vector, v_budget; | ||
718 | 698 | ||
719 | /* | 699 | /* |
720 | * It's easy to be greedy for MSI-X vectors, but it really | 700 | * It's easy to be greedy for MSI-X vectors, but it really |
@@ -747,7 +727,7 @@ static int ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter) | |||
747 | ixgbe_acquire_msix_vectors(adapter, v_budget); | 727 | ixgbe_acquire_msix_vectors(adapter, v_budget); |
748 | 728 | ||
749 | if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) | 729 | if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) |
750 | goto out; | 730 | return; |
751 | } | 731 | } |
752 | 732 | ||
753 | adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED; | 733 | adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED; |
@@ -762,25 +742,17 @@ static int ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter) | |||
762 | if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) | 742 | if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) |
763 | ixgbe_disable_sriov(adapter); | 743 | ixgbe_disable_sriov(adapter); |
764 | 744 | ||
765 | err = ixgbe_set_num_queues(adapter); | 745 | ixgbe_set_num_queues(adapter); |
766 | if (err) | ||
767 | return err; | ||
768 | |||
769 | adapter->num_q_vectors = 1; | 746 | adapter->num_q_vectors = 1; |
770 | 747 | ||
771 | err = pci_enable_msi(adapter->pdev); | 748 | err = pci_enable_msi(adapter->pdev); |
772 | if (!err) { | 749 | if (err) { |
773 | adapter->flags |= IXGBE_FLAG_MSI_ENABLED; | ||
774 | } else { | ||
775 | netif_printk(adapter, hw, KERN_DEBUG, adapter->netdev, | 750 | netif_printk(adapter, hw, KERN_DEBUG, adapter->netdev, |
776 | "Unable to allocate MSI interrupt, " | 751 | "Unable to allocate MSI interrupt, " |
777 | "falling back to legacy. Error: %d\n", err); | 752 | "falling back to legacy. Error: %d\n", err); |
778 | /* reset err */ | 753 | return; |
779 | err = 0; | ||
780 | } | 754 | } |
781 | 755 | adapter->flags |= IXGBE_FLAG_MSI_ENABLED; | |
782 | out: | ||
783 | return err; | ||
784 | } | 756 | } |
785 | 757 | ||
786 | /** | 758 | /** |
@@ -798,15 +770,10 @@ int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter) | |||
798 | int err; | 770 | int err; |
799 | 771 | ||
800 | /* Number of supported queues */ | 772 | /* Number of supported queues */ |
801 | err = ixgbe_set_num_queues(adapter); | 773 | ixgbe_set_num_queues(adapter); |
802 | if (err) | ||
803 | return err; | ||
804 | 774 | ||
805 | err = ixgbe_set_interrupt_capability(adapter); | 775 | /* Set interrupt mode */ |
806 | if (err) { | 776 | ixgbe_set_interrupt_capability(adapter); |
807 | e_dev_err("Unable to setup interrupt capabilities\n"); | ||
808 | goto err_set_interrupt; | ||
809 | } | ||
810 | 777 | ||
811 | err = ixgbe_alloc_q_vectors(adapter); | 778 | err = ixgbe_alloc_q_vectors(adapter); |
812 | if (err) { | 779 | if (err) { |
@@ -826,7 +793,6 @@ int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter) | |||
826 | 793 | ||
827 | err_alloc_q_vectors: | 794 | err_alloc_q_vectors: |
828 | ixgbe_reset_interrupt_capability(adapter); | 795 | ixgbe_reset_interrupt_capability(adapter); |
829 | err_set_interrupt: | ||
830 | return err; | 796 | return err; |
831 | } | 797 | } |
832 | 798 | ||
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index ee230f533ee3..2b4b79178858 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | |||
@@ -2719,8 +2719,7 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter, | |||
2719 | static void ixgbe_setup_mtqc(struct ixgbe_adapter *adapter) | 2719 | static void ixgbe_setup_mtqc(struct ixgbe_adapter *adapter) |
2720 | { | 2720 | { |
2721 | struct ixgbe_hw *hw = &adapter->hw; | 2721 | struct ixgbe_hw *hw = &adapter->hw; |
2722 | u32 rttdcs; | 2722 | u32 rttdcs, mtqc; |
2723 | u32 reg; | ||
2724 | u8 tcs = netdev_get_num_tc(adapter->netdev); | 2723 | u8 tcs = netdev_get_num_tc(adapter->netdev); |
2725 | 2724 | ||
2726 | if (hw->mac.type == ixgbe_mac_82598EB) | 2725 | if (hw->mac.type == ixgbe_mac_82598EB) |
@@ -2732,28 +2731,32 @@ static void ixgbe_setup_mtqc(struct ixgbe_adapter *adapter) | |||
2732 | IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs); | 2731 | IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs); |
2733 | 2732 | ||
2734 | /* set transmit pool layout */ | 2733 | /* set transmit pool layout */ |
2735 | switch (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { | 2734 | if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { |
2736 | case (IXGBE_FLAG_SRIOV_ENABLED): | 2735 | mtqc = IXGBE_MTQC_VT_ENA; |
2737 | IXGBE_WRITE_REG(hw, IXGBE_MTQC, | 2736 | if (tcs > 4) |
2738 | (IXGBE_MTQC_VT_ENA | IXGBE_MTQC_64VF)); | 2737 | mtqc |= IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ; |
2739 | break; | 2738 | else if (tcs > 1) |
2740 | default: | 2739 | mtqc |= IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ; |
2741 | if (!tcs) | 2740 | else if (adapter->ring_feature[RING_F_RSS].indices == 4) |
2742 | reg = IXGBE_MTQC_64Q_1PB; | 2741 | mtqc |= IXGBE_MTQC_32VF; |
2743 | else if (tcs <= 4) | 2742 | else |
2744 | reg = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ; | 2743 | mtqc |= IXGBE_MTQC_64VF; |
2744 | } else { | ||
2745 | if (tcs > 4) | ||
2746 | mtqc = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ; | ||
2747 | else if (tcs > 1) | ||
2748 | mtqc = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ; | ||
2745 | else | 2749 | else |
2746 | reg = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ; | 2750 | mtqc = IXGBE_MTQC_64Q_1PB; |
2751 | } | ||
2747 | 2752 | ||
2748 | IXGBE_WRITE_REG(hw, IXGBE_MTQC, reg); | 2753 | IXGBE_WRITE_REG(hw, IXGBE_MTQC, mtqc); |
2749 | 2754 | ||
2750 | /* Enable Security TX Buffer IFG for multiple pb */ | 2755 | /* Enable Security TX Buffer IFG for multiple pb */ |
2751 | if (tcs) { | 2756 | if (tcs) { |
2752 | reg = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG); | 2757 | u32 sectx = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG); |
2753 | reg |= IXGBE_SECTX_DCB; | 2758 | sectx |= IXGBE_SECTX_DCB; |
2754 | IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, reg); | 2759 | IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, sectx); |
2755 | } | ||
2756 | break; | ||
2757 | } | 2760 | } |
2758 | 2761 | ||
2759 | /* re-enable the arbiter */ | 2762 | /* re-enable the arbiter */ |
@@ -2886,11 +2889,18 @@ static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter) | |||
2886 | u32 mrqc = 0, reta = 0; | 2889 | u32 mrqc = 0, reta = 0; |
2887 | u32 rxcsum; | 2890 | u32 rxcsum; |
2888 | int i, j; | 2891 | int i, j; |
2889 | u8 tcs = netdev_get_num_tc(adapter->netdev); | 2892 | u16 rss_i = adapter->ring_feature[RING_F_RSS].indices; |
2890 | int maxq = adapter->ring_feature[RING_F_RSS].indices; | ||
2891 | 2893 | ||
2892 | if (tcs) | 2894 | if (!(adapter->flags & IXGBE_FLAG_RSS_ENABLED)) |
2893 | maxq = min(maxq, adapter->num_tx_queues / tcs); | 2895 | rss_i = 1; |
2896 | |||
2897 | /* | ||
2898 | * Program table for at least 2 queues w/ SR-IOV so that VFs can | ||
2899 | * make full use of any rings they may have. We will use the | ||
2900 | * PSRTYPE register to control how many rings we use within the PF. | ||
2901 | */ | ||
2902 | if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) && (rss_i < 2)) | ||
2903 | rss_i = 2; | ||
2894 | 2904 | ||
2895 | /* Fill out hash function seeds */ | 2905 | /* Fill out hash function seeds */ |
2896 | for (i = 0; i < 10; i++) | 2906 | for (i = 0; i < 10; i++) |
@@ -2898,7 +2908,7 @@ static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter) | |||
2898 | 2908 | ||
2899 | /* Fill out redirection table */ | 2909 | /* Fill out redirection table */ |
2900 | for (i = 0, j = 0; i < 128; i++, j++) { | 2910 | for (i = 0, j = 0; i < 128; i++, j++) { |
2901 | if (j == maxq) | 2911 | if (j == rss_i) |
2902 | j = 0; | 2912 | j = 0; |
2903 | /* reta = 4-byte sliding window of | 2913 | /* reta = 4-byte sliding window of |
2904 | * 0x00..(indices-1)(indices-1)00..etc. */ | 2914 | * 0x00..(indices-1)(indices-1)00..etc. */ |
@@ -2912,35 +2922,36 @@ static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter) | |||
2912 | rxcsum |= IXGBE_RXCSUM_PCSD; | 2922 | rxcsum |= IXGBE_RXCSUM_PCSD; |
2913 | IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum); | 2923 | IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum); |
2914 | 2924 | ||
2915 | if (adapter->hw.mac.type == ixgbe_mac_82598EB && | 2925 | if (adapter->hw.mac.type == ixgbe_mac_82598EB) { |
2916 | (adapter->flags & IXGBE_FLAG_RSS_ENABLED)) { | 2926 | if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) |
2917 | mrqc = IXGBE_MRQC_RSSEN; | 2927 | mrqc = IXGBE_MRQC_RSSEN; |
2918 | } else { | 2928 | } else { |
2919 | int mask = adapter->flags & (IXGBE_FLAG_RSS_ENABLED | 2929 | u8 tcs = netdev_get_num_tc(adapter->netdev); |
2920 | | IXGBE_FLAG_SRIOV_ENABLED); | 2930 | |
2921 | 2931 | if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { | |
2922 | switch (mask) { | 2932 | if (tcs > 4) |
2923 | case (IXGBE_FLAG_RSS_ENABLED): | 2933 | mrqc = IXGBE_MRQC_VMDQRT8TCEN; /* 8 TCs */ |
2924 | if (!tcs) | 2934 | else if (tcs > 1) |
2925 | mrqc = IXGBE_MRQC_RSSEN; | 2935 | mrqc = IXGBE_MRQC_VMDQRT4TCEN; /* 4 TCs */ |
2926 | else if (tcs <= 4) | 2936 | else if (adapter->ring_feature[RING_F_RSS].indices == 4) |
2927 | mrqc = IXGBE_MRQC_RTRSS4TCEN; | 2937 | mrqc = IXGBE_MRQC_VMDQRSS32EN; |
2928 | else | 2938 | else |
2939 | mrqc = IXGBE_MRQC_VMDQRSS64EN; | ||
2940 | } else { | ||
2941 | if (tcs > 4) | ||
2929 | mrqc = IXGBE_MRQC_RTRSS8TCEN; | 2942 | mrqc = IXGBE_MRQC_RTRSS8TCEN; |
2930 | break; | 2943 | else if (tcs > 1) |
2931 | case (IXGBE_FLAG_SRIOV_ENABLED): | 2944 | mrqc = IXGBE_MRQC_RTRSS4TCEN; |
2932 | mrqc = IXGBE_MRQC_VMDQEN; | 2945 | else |
2933 | break; | 2946 | mrqc = IXGBE_MRQC_RSSEN; |
2934 | default: | ||
2935 | break; | ||
2936 | } | 2947 | } |
2937 | } | 2948 | } |
2938 | 2949 | ||
2939 | /* Perform hash on these packet types */ | 2950 | /* Perform hash on these packet types */ |
2940 | mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4 | 2951 | mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4 | |
2941 | | IXGBE_MRQC_RSS_FIELD_IPV4_TCP | 2952 | IXGBE_MRQC_RSS_FIELD_IPV4_TCP | |
2942 | | IXGBE_MRQC_RSS_FIELD_IPV6 | 2953 | IXGBE_MRQC_RSS_FIELD_IPV6 | |
2943 | | IXGBE_MRQC_RSS_FIELD_IPV6_TCP; | 2954 | IXGBE_MRQC_RSS_FIELD_IPV6_TCP; |
2944 | 2955 | ||
2945 | if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV4_UDP) | 2956 | if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV4_UDP) |
2946 | mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP; | 2957 | mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP; |
@@ -3103,8 +3114,13 @@ static void ixgbe_setup_psrtype(struct ixgbe_adapter *adapter) | |||
3103 | if (hw->mac.type == ixgbe_mac_82598EB) | 3114 | if (hw->mac.type == ixgbe_mac_82598EB) |
3104 | return; | 3115 | return; |
3105 | 3116 | ||
3106 | if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) | 3117 | if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) { |
3107 | psrtype |= (adapter->num_rx_queues_per_pool << 29); | 3118 | int rss_i = adapter->ring_feature[RING_F_RSS].indices; |
3119 | if (rss_i > 3) | ||
3120 | psrtype |= 2 << 29; | ||
3121 | else if (rss_i > 1) | ||
3122 | psrtype |= 1 << 29; | ||
3123 | } | ||
3108 | 3124 | ||
3109 | for (p = 0; p < adapter->num_rx_pools; p++) | 3125 | for (p = 0; p < adapter->num_rx_pools; p++) |
3110 | IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(adapter->num_vfs + p), | 3126 | IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(adapter->num_vfs + p), |
@@ -3608,20 +3624,16 @@ static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter) | |||
3608 | 3624 | ||
3609 | /* Enable RSS Hash per TC */ | 3625 | /* Enable RSS Hash per TC */ |
3610 | if (hw->mac.type != ixgbe_mac_82598EB) { | 3626 | if (hw->mac.type != ixgbe_mac_82598EB) { |
3611 | int i; | 3627 | u32 msb = 0; |
3612 | u32 reg = 0; | 3628 | u16 rss_i = adapter->ring_feature[RING_F_RSS].indices - 1; |
3613 | u8 msb = 0; | ||
3614 | u8 rss_i = adapter->netdev->tc_to_txq[0].count - 1; | ||
3615 | 3629 | ||
3616 | while (rss_i) { | 3630 | while (rss_i) { |
3617 | msb++; | 3631 | msb++; |
3618 | rss_i >>= 1; | 3632 | rss_i >>= 1; |
3619 | } | 3633 | } |
3620 | 3634 | ||
3621 | for (i = 0; i < MAX_TRAFFIC_CLASS; i++) | 3635 | /* write msb to all 8 TCs in one write */ |
3622 | reg |= msb << IXGBE_RQTC_SHIFT_TC(i); | 3636 | IXGBE_WRITE_REG(hw, IXGBE_RQTC, msb * 0x11111111); |
3623 | |||
3624 | IXGBE_WRITE_REG(hw, IXGBE_RQTC, reg); | ||
3625 | } | 3637 | } |
3626 | } | 3638 | } |
3627 | #endif | 3639 | #endif |
@@ -4549,10 +4561,16 @@ static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter *adapter) | |||
4549 | err = ixgbe_setup_tx_resources(adapter->tx_ring[i]); | 4561 | err = ixgbe_setup_tx_resources(adapter->tx_ring[i]); |
4550 | if (!err) | 4562 | if (!err) |
4551 | continue; | 4563 | continue; |
4564 | |||
4552 | e_err(probe, "Allocation for Tx Queue %u failed\n", i); | 4565 | e_err(probe, "Allocation for Tx Queue %u failed\n", i); |
4553 | break; | 4566 | goto err_setup_tx; |
4554 | } | 4567 | } |
4555 | 4568 | ||
4569 | return 0; | ||
4570 | err_setup_tx: | ||
4571 | /* rewind the index freeing the rings as we go */ | ||
4572 | while (i--) | ||
4573 | ixgbe_free_tx_resources(adapter->tx_ring[i]); | ||
4556 | return err; | 4574 | return err; |
4557 | } | 4575 | } |
4558 | 4576 | ||
@@ -4627,10 +4645,16 @@ static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter) | |||
4627 | err = ixgbe_setup_rx_resources(adapter->rx_ring[i]); | 4645 | err = ixgbe_setup_rx_resources(adapter->rx_ring[i]); |
4628 | if (!err) | 4646 | if (!err) |
4629 | continue; | 4647 | continue; |
4648 | |||
4630 | e_err(probe, "Allocation for Rx Queue %u failed\n", i); | 4649 | e_err(probe, "Allocation for Rx Queue %u failed\n", i); |
4631 | break; | 4650 | goto err_setup_rx; |
4632 | } | 4651 | } |
4633 | 4652 | ||
4653 | return 0; | ||
4654 | err_setup_rx: | ||
4655 | /* rewind the index freeing the rings as we go */ | ||
4656 | while (i--) | ||
4657 | ixgbe_free_rx_resources(adapter->rx_ring[i]); | ||
4634 | return err; | 4658 | return err; |
4635 | } | 4659 | } |
4636 | 4660 | ||
@@ -4786,15 +4810,31 @@ static int ixgbe_open(struct net_device *netdev) | |||
4786 | if (err) | 4810 | if (err) |
4787 | goto err_req_irq; | 4811 | goto err_req_irq; |
4788 | 4812 | ||
4813 | /* Notify the stack of the actual queue counts. */ | ||
4814 | err = netif_set_real_num_tx_queues(netdev, | ||
4815 | adapter->num_rx_pools > 1 ? 1 : | ||
4816 | adapter->num_tx_queues); | ||
4817 | if (err) | ||
4818 | goto err_set_queues; | ||
4819 | |||
4820 | |||
4821 | err = netif_set_real_num_rx_queues(netdev, | ||
4822 | adapter->num_rx_pools > 1 ? 1 : | ||
4823 | adapter->num_rx_queues); | ||
4824 | if (err) | ||
4825 | goto err_set_queues; | ||
4826 | |||
4789 | ixgbe_up_complete(adapter); | 4827 | ixgbe_up_complete(adapter); |
4790 | 4828 | ||
4791 | return 0; | 4829 | return 0; |
4792 | 4830 | ||
4831 | err_set_queues: | ||
4832 | ixgbe_free_irq(adapter); | ||
4793 | err_req_irq: | 4833 | err_req_irq: |
4794 | err_setup_rx: | ||
4795 | ixgbe_free_all_rx_resources(adapter); | 4834 | ixgbe_free_all_rx_resources(adapter); |
4796 | err_setup_tx: | 4835 | err_setup_rx: |
4797 | ixgbe_free_all_tx_resources(adapter); | 4836 | ixgbe_free_all_tx_resources(adapter); |
4837 | err_setup_tx: | ||
4798 | ixgbe_reset(adapter); | 4838 | ixgbe_reset(adapter); |
4799 | 4839 | ||
4800 | return err; | 4840 | return err; |
@@ -4852,23 +4892,19 @@ static int ixgbe_resume(struct pci_dev *pdev) | |||
4852 | 4892 | ||
4853 | pci_wake_from_d3(pdev, false); | 4893 | pci_wake_from_d3(pdev, false); |
4854 | 4894 | ||
4855 | rtnl_lock(); | ||
4856 | err = ixgbe_init_interrupt_scheme(adapter); | ||
4857 | rtnl_unlock(); | ||
4858 | if (err) { | ||
4859 | e_dev_err("Cannot initialize interrupts for device\n"); | ||
4860 | return err; | ||
4861 | } | ||
4862 | |||
4863 | ixgbe_reset(adapter); | 4895 | ixgbe_reset(adapter); |
4864 | 4896 | ||
4865 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0); | 4897 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0); |
4866 | 4898 | ||
4867 | if (netif_running(netdev)) { | 4899 | rtnl_lock(); |
4900 | err = ixgbe_init_interrupt_scheme(adapter); | ||
4901 | if (!err && netif_running(netdev)) | ||
4868 | err = ixgbe_open(netdev); | 4902 | err = ixgbe_open(netdev); |
4869 | if (err) | 4903 | |
4870 | return err; | 4904 | rtnl_unlock(); |
4871 | } | 4905 | |
4906 | if (err) | ||
4907 | return err; | ||
4872 | 4908 | ||
4873 | netif_device_attach(netdev); | 4909 | netif_device_attach(netdev); |
4874 | 4910 | ||
@@ -5390,6 +5426,9 @@ static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter) | |||
5390 | 5426 | ||
5391 | netif_carrier_on(netdev); | 5427 | netif_carrier_on(netdev); |
5392 | ixgbe_check_vf_rate_limit(adapter); | 5428 | ixgbe_check_vf_rate_limit(adapter); |
5429 | |||
5430 | /* ping all the active vfs to let them know link has changed */ | ||
5431 | ixgbe_ping_all_vfs(adapter); | ||
5393 | } | 5432 | } |
5394 | 5433 | ||
5395 | /** | 5434 | /** |
@@ -5419,6 +5458,9 @@ static void ixgbe_watchdog_link_is_down(struct ixgbe_adapter *adapter) | |||
5419 | 5458 | ||
5420 | e_info(drv, "NIC Link is Down\n"); | 5459 | e_info(drv, "NIC Link is Down\n"); |
5421 | netif_carrier_off(netdev); | 5460 | netif_carrier_off(netdev); |
5461 | |||
5462 | /* ping all the active vfs to let them know link has changed */ | ||
5463 | ixgbe_ping_all_vfs(adapter); | ||
5422 | } | 5464 | } |
5423 | 5465 | ||
5424 | /** | 5466 | /** |
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h index f92daca249f8..1f1376515f70 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h | |||
@@ -164,12 +164,12 @@ struct ixgbevf_q_vector { | |||
164 | ((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \ | 164 | ((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \ |
165 | (R)->next_to_clean - (R)->next_to_use - 1) | 165 | (R)->next_to_clean - (R)->next_to_use - 1) |
166 | 166 | ||
167 | #define IXGBE_RX_DESC_ADV(R, i) \ | 167 | #define IXGBEVF_RX_DESC(R, i) \ |
168 | (&(((union ixgbe_adv_rx_desc *)((R).desc))[i])) | 168 | (&(((union ixgbe_adv_rx_desc *)((R)->desc))[i])) |
169 | #define IXGBE_TX_DESC_ADV(R, i) \ | 169 | #define IXGBEVF_TX_DESC(R, i) \ |
170 | (&(((union ixgbe_adv_tx_desc *)((R).desc))[i])) | 170 | (&(((union ixgbe_adv_tx_desc *)((R)->desc))[i])) |
171 | #define IXGBE_TX_CTXTDESC_ADV(R, i) \ | 171 | #define IXGBEVF_TX_CTXTDESC(R, i) \ |
172 | (&(((struct ixgbe_adv_tx_context_desc *)((R).desc))[i])) | 172 | (&(((struct ixgbe_adv_tx_context_desc *)((R)->desc))[i])) |
173 | 173 | ||
174 | #define IXGBE_MAX_JUMBO_FRAME_SIZE 16128 | 174 | #define IXGBE_MAX_JUMBO_FRAME_SIZE 16128 |
175 | 175 | ||
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index 8e022c6f4b90..c98cdf7de49d 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c | |||
@@ -195,7 +195,7 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector, | |||
195 | 195 | ||
196 | i = tx_ring->next_to_clean; | 196 | i = tx_ring->next_to_clean; |
197 | eop = tx_ring->tx_buffer_info[i].next_to_watch; | 197 | eop = tx_ring->tx_buffer_info[i].next_to_watch; |
198 | eop_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop); | 198 | eop_desc = IXGBEVF_TX_DESC(tx_ring, eop); |
199 | 199 | ||
200 | while ((eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)) && | 200 | while ((eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)) && |
201 | (count < tx_ring->count)) { | 201 | (count < tx_ring->count)) { |
@@ -206,7 +206,7 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector, | |||
206 | goto cont_loop; | 206 | goto cont_loop; |
207 | for ( ; !cleaned; count++) { | 207 | for ( ; !cleaned; count++) { |
208 | struct sk_buff *skb; | 208 | struct sk_buff *skb; |
209 | tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i); | 209 | tx_desc = IXGBEVF_TX_DESC(tx_ring, i); |
210 | tx_buffer_info = &tx_ring->tx_buffer_info[i]; | 210 | tx_buffer_info = &tx_ring->tx_buffer_info[i]; |
211 | cleaned = (i == eop); | 211 | cleaned = (i == eop); |
212 | skb = tx_buffer_info->skb; | 212 | skb = tx_buffer_info->skb; |
@@ -235,7 +235,7 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector, | |||
235 | 235 | ||
236 | cont_loop: | 236 | cont_loop: |
237 | eop = tx_ring->tx_buffer_info[i].next_to_watch; | 237 | eop = tx_ring->tx_buffer_info[i].next_to_watch; |
238 | eop_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop); | 238 | eop_desc = IXGBEVF_TX_DESC(tx_ring, eop); |
239 | } | 239 | } |
240 | 240 | ||
241 | tx_ring->next_to_clean = i; | 241 | tx_ring->next_to_clean = i; |
@@ -339,7 +339,7 @@ static void ixgbevf_alloc_rx_buffers(struct ixgbevf_adapter *adapter, | |||
339 | bi = &rx_ring->rx_buffer_info[i]; | 339 | bi = &rx_ring->rx_buffer_info[i]; |
340 | 340 | ||
341 | while (cleaned_count--) { | 341 | while (cleaned_count--) { |
342 | rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i); | 342 | rx_desc = IXGBEVF_RX_DESC(rx_ring, i); |
343 | skb = bi->skb; | 343 | skb = bi->skb; |
344 | if (!skb) { | 344 | if (!skb) { |
345 | skb = netdev_alloc_skb(adapter->netdev, | 345 | skb = netdev_alloc_skb(adapter->netdev, |
@@ -405,7 +405,7 @@ static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector, | |||
405 | unsigned int total_rx_bytes = 0, total_rx_packets = 0; | 405 | unsigned int total_rx_bytes = 0, total_rx_packets = 0; |
406 | 406 | ||
407 | i = rx_ring->next_to_clean; | 407 | i = rx_ring->next_to_clean; |
408 | rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i); | 408 | rx_desc = IXGBEVF_RX_DESC(rx_ring, i); |
409 | staterr = le32_to_cpu(rx_desc->wb.upper.status_error); | 409 | staterr = le32_to_cpu(rx_desc->wb.upper.status_error); |
410 | rx_buffer_info = &rx_ring->rx_buffer_info[i]; | 410 | rx_buffer_info = &rx_ring->rx_buffer_info[i]; |
411 | 411 | ||
@@ -432,7 +432,7 @@ static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector, | |||
432 | if (i == rx_ring->count) | 432 | if (i == rx_ring->count) |
433 | i = 0; | 433 | i = 0; |
434 | 434 | ||
435 | next_rxd = IXGBE_RX_DESC_ADV(*rx_ring, i); | 435 | next_rxd = IXGBEVF_RX_DESC(rx_ring, i); |
436 | prefetch(next_rxd); | 436 | prefetch(next_rxd); |
437 | cleaned_count++; | 437 | cleaned_count++; |
438 | 438 | ||
@@ -2437,7 +2437,7 @@ static int ixgbevf_tso(struct ixgbevf_adapter *adapter, | |||
2437 | i = tx_ring->next_to_use; | 2437 | i = tx_ring->next_to_use; |
2438 | 2438 | ||
2439 | tx_buffer_info = &tx_ring->tx_buffer_info[i]; | 2439 | tx_buffer_info = &tx_ring->tx_buffer_info[i]; |
2440 | context_desc = IXGBE_TX_CTXTDESC_ADV(*tx_ring, i); | 2440 | context_desc = IXGBEVF_TX_CTXTDESC(tx_ring, i); |
2441 | 2441 | ||
2442 | /* VLAN MACLEN IPLEN */ | 2442 | /* VLAN MACLEN IPLEN */ |
2443 | if (tx_flags & IXGBE_TX_FLAGS_VLAN) | 2443 | if (tx_flags & IXGBE_TX_FLAGS_VLAN) |
@@ -2497,7 +2497,7 @@ static bool ixgbevf_tx_csum(struct ixgbevf_adapter *adapter, | |||
2497 | (tx_flags & IXGBE_TX_FLAGS_VLAN)) { | 2497 | (tx_flags & IXGBE_TX_FLAGS_VLAN)) { |
2498 | i = tx_ring->next_to_use; | 2498 | i = tx_ring->next_to_use; |
2499 | tx_buffer_info = &tx_ring->tx_buffer_info[i]; | 2499 | tx_buffer_info = &tx_ring->tx_buffer_info[i]; |
2500 | context_desc = IXGBE_TX_CTXTDESC_ADV(*tx_ring, i); | 2500 | context_desc = IXGBEVF_TX_CTXTDESC(tx_ring, i); |
2501 | 2501 | ||
2502 | if (tx_flags & IXGBE_TX_FLAGS_VLAN) | 2502 | if (tx_flags & IXGBE_TX_FLAGS_VLAN) |
2503 | vlan_macip_lens |= (tx_flags & | 2503 | vlan_macip_lens |= (tx_flags & |
@@ -2700,7 +2700,7 @@ static void ixgbevf_tx_queue(struct ixgbevf_adapter *adapter, | |||
2700 | i = tx_ring->next_to_use; | 2700 | i = tx_ring->next_to_use; |
2701 | while (count--) { | 2701 | while (count--) { |
2702 | tx_buffer_info = &tx_ring->tx_buffer_info[i]; | 2702 | tx_buffer_info = &tx_ring->tx_buffer_info[i]; |
2703 | tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i); | 2703 | tx_desc = IXGBEVF_TX_DESC(tx_ring, i); |
2704 | tx_desc->read.buffer_addr = cpu_to_le64(tx_buffer_info->dma); | 2704 | tx_desc->read.buffer_addr = cpu_to_le64(tx_buffer_info->dma); |
2705 | tx_desc->read.cmd_type_len = | 2705 | tx_desc->read.cmd_type_len = |
2706 | cpu_to_le32(cmd_type_len | tx_buffer_info->length); | 2706 | cpu_to_le32(cmd_type_len | tx_buffer_info->length); |