aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb_nl.c16
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c235
-rw-r--r--drivers/net/ixgbe/ixgbe_type.h1
3 files changed, 138 insertions, 114 deletions
diff --git a/drivers/net/ixgbe/ixgbe_dcb_nl.c b/drivers/net/ixgbe/ixgbe_dcb_nl.c
index 293ff06ba16f..b229febdbc1b 100644
--- a/drivers/net/ixgbe/ixgbe_dcb_nl.c
+++ b/drivers/net/ixgbe/ixgbe_dcb_nl.c
@@ -125,9 +125,7 @@ static u8 ixgbe_dcbnl_set_state(struct net_device *netdev, u8 state)
125 goto out; 125 goto out;
126 } 126 }
127 127
128 if (netif_running(netdev)) 128 adapter->flags |= IXGBE_FLAG_DCB_ENABLED;
129 netdev->netdev_ops->ndo_stop(netdev);
130 ixgbe_clear_interrupt_scheme(adapter);
131 129
132 switch (adapter->hw.mac.type) { 130 switch (adapter->hw.mac.type) {
133 case ixgbe_mac_82598EB: 131 case ixgbe_mac_82598EB:
@@ -143,18 +141,12 @@ static u8 ixgbe_dcbnl_set_state(struct net_device *netdev, u8 state)
143 break; 141 break;
144 } 142 }
145 143
146 adapter->flags |= IXGBE_FLAG_DCB_ENABLED; 144 ixgbe_setup_tc(netdev, MAX_TRAFFIC_CLASS);
147 if (!netdev_get_num_tc(netdev))
148 ixgbe_setup_tc(netdev, MAX_TRAFFIC_CLASS);
149 } else { 145 } else {
150 /* Turn off DCB */ 146 /* Turn off DCB */
151 if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED)) 147 if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED))
152 goto out; 148 goto out;
153 149
154 if (netif_running(netdev))
155 netdev->netdev_ops->ndo_stop(netdev);
156 ixgbe_clear_interrupt_scheme(adapter);
157
158 adapter->hw.fc.requested_mode = adapter->last_lfc_mode; 150 adapter->hw.fc.requested_mode = adapter->last_lfc_mode;
159 adapter->temp_dcb_cfg.pfc_mode_enable = false; 151 adapter->temp_dcb_cfg.pfc_mode_enable = false;
160 adapter->dcb_cfg.pfc_mode_enable = false; 152 adapter->dcb_cfg.pfc_mode_enable = false;
@@ -167,13 +159,9 @@ static u8 ixgbe_dcbnl_set_state(struct net_device *netdev, u8 state)
167 default: 159 default:
168 break; 160 break;
169 } 161 }
170
171 ixgbe_setup_tc(netdev, 0); 162 ixgbe_setup_tc(netdev, 0);
172 } 163 }
173 164
174 ixgbe_init_interrupt_scheme(adapter);
175 if (netif_running(netdev))
176 netdev->netdev_ops->ndo_open(netdev);
177out: 165out:
178 return err; 166 return err;
179} 167}
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index 20467da4f90e..7e3850ab4223 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -2815,8 +2815,8 @@ static void ixgbe_setup_mtqc(struct ixgbe_adapter *adapter)
2815{ 2815{
2816 struct ixgbe_hw *hw = &adapter->hw; 2816 struct ixgbe_hw *hw = &adapter->hw;
2817 u32 rttdcs; 2817 u32 rttdcs;
2818 u32 mask;
2819 u32 reg; 2818 u32 reg;
2819 u8 tcs = netdev_get_num_tc(adapter->netdev);
2820 2820
2821 if (hw->mac.type == ixgbe_mac_82598EB) 2821 if (hw->mac.type == ixgbe_mac_82598EB)
2822 return; 2822 return;
@@ -2827,28 +2827,27 @@ static void ixgbe_setup_mtqc(struct ixgbe_adapter *adapter)
2827 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs); 2827 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
2828 2828
2829 /* set transmit pool layout */ 2829 /* set transmit pool layout */
2830 mask = (IXGBE_FLAG_SRIOV_ENABLED | IXGBE_FLAG_DCB_ENABLED); 2830 switch (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
2831 switch (adapter->flags & mask) {
2832
2833 case (IXGBE_FLAG_SRIOV_ENABLED): 2831 case (IXGBE_FLAG_SRIOV_ENABLED):
2834 IXGBE_WRITE_REG(hw, IXGBE_MTQC, 2832 IXGBE_WRITE_REG(hw, IXGBE_MTQC,
2835 (IXGBE_MTQC_VT_ENA | IXGBE_MTQC_64VF)); 2833 (IXGBE_MTQC_VT_ENA | IXGBE_MTQC_64VF));
2836 break; 2834 break;
2835 default:
2836 if (!tcs)
2837 reg = IXGBE_MTQC_64Q_1PB;
2838 else if (tcs <= 4)
2839 reg = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ;
2840 else
2841 reg = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ;
2837 2842
2838 case (IXGBE_FLAG_DCB_ENABLED): 2843 IXGBE_WRITE_REG(hw, IXGBE_MTQC, reg);
2839 /* We enable 8 traffic classes, DCB only */
2840 IXGBE_WRITE_REG(hw, IXGBE_MTQC,
2841 (IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ));
2842
2843 /* Enable Security TX Buffer IFG for DCB */
2844 reg = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG);
2845 reg |= IXGBE_SECTX_DCB;
2846 IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, reg);
2847
2848 break;
2849 2844
2850 default: 2845 /* Enable Security TX Buffer IFG for multiple pb */
2851 IXGBE_WRITE_REG(hw, IXGBE_MTQC, IXGBE_MTQC_64Q_1PB); 2846 if (tcs) {
2847 reg = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG);
2848 reg |= IXGBE_SECTX_DCB;
2849 IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, reg);
2850 }
2852 break; 2851 break;
2853 } 2852 }
2854 2853
@@ -2939,7 +2938,7 @@ static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
2939 u32 mrqc = 0, reta = 0; 2938 u32 mrqc = 0, reta = 0;
2940 u32 rxcsum; 2939 u32 rxcsum;
2941 int i, j; 2940 int i, j;
2942 int mask; 2941 u8 tcs = netdev_get_num_tc(adapter->netdev);
2943 2942
2944 /* Fill out hash function seeds */ 2943 /* Fill out hash function seeds */
2945 for (i = 0; i < 10; i++) 2944 for (i = 0; i < 10; i++)
@@ -2961,33 +2960,28 @@ static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
2961 rxcsum |= IXGBE_RXCSUM_PCSD; 2960 rxcsum |= IXGBE_RXCSUM_PCSD;
2962 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum); 2961 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
2963 2962
2964 if (adapter->hw.mac.type == ixgbe_mac_82598EB) 2963 if (adapter->hw.mac.type == ixgbe_mac_82598EB &&
2965 mask = adapter->flags & IXGBE_FLAG_RSS_ENABLED; 2964 (adapter->flags & IXGBE_FLAG_RSS_ENABLED)) {
2966 else
2967 mask = adapter->flags & (IXGBE_FLAG_RSS_ENABLED
2968#ifdef CONFIG_IXGBE_DCB
2969 | IXGBE_FLAG_DCB_ENABLED
2970#endif
2971 | IXGBE_FLAG_SRIOV_ENABLED
2972 );
2973
2974 switch (mask) {
2975#ifdef CONFIG_IXGBE_DCB
2976 case (IXGBE_FLAG_DCB_ENABLED | IXGBE_FLAG_RSS_ENABLED):
2977 mrqc = IXGBE_MRQC_RTRSS8TCEN;
2978 break;
2979 case (IXGBE_FLAG_DCB_ENABLED):
2980 mrqc = IXGBE_MRQC_RT8TCEN;
2981 break;
2982#endif /* CONFIG_IXGBE_DCB */
2983 case (IXGBE_FLAG_RSS_ENABLED):
2984 mrqc = IXGBE_MRQC_RSSEN; 2965 mrqc = IXGBE_MRQC_RSSEN;
2985 break; 2966 } else {
2986 case (IXGBE_FLAG_SRIOV_ENABLED): 2967 int mask = adapter->flags & (IXGBE_FLAG_RSS_ENABLED
2987 mrqc = IXGBE_MRQC_VMDQEN; 2968 | IXGBE_FLAG_SRIOV_ENABLED);
2988 break; 2969
2989 default: 2970 switch (mask) {
2990 break; 2971 case (IXGBE_FLAG_RSS_ENABLED):
2972 if (!tcs)
2973 mrqc = IXGBE_MRQC_RSSEN;
2974 else if (tcs <= 4)
2975 mrqc = IXGBE_MRQC_RTRSS4TCEN;
2976 else
2977 mrqc = IXGBE_MRQC_RTRSS8TCEN;
2978 break;
2979 case (IXGBE_FLAG_SRIOV_ENABLED):
2980 mrqc = IXGBE_MRQC_VMDQEN;
2981 break;
2982 default:
2983 break;
2984 }
2991 } 2985 }
2992 2986
2993 /* Perform hash on these packet types */ 2987 /* Perform hash on these packet types */
@@ -4461,14 +4455,17 @@ static inline bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter)
4461{ 4455{
4462 bool ret = false; 4456 bool ret = false;
4463 struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_DCB]; 4457 struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_DCB];
4464 int i, q; 4458 int tcs = netdev_get_num_tc(adapter->netdev);
4459 int max_q, i, q;
4465 4460
4466 if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED)) 4461 if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED) || !tcs)
4467 return ret; 4462 return ret;
4468 4463
4464 max_q = adapter->netdev->num_tx_queues / tcs;
4465
4469 f->indices = 0; 4466 f->indices = 0;
4470 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { 4467 for (i = 0; i < tcs; i++) {
4471 q = min((int)num_online_cpus(), MAX_TRAFFIC_CLASS); 4468 q = min((int)num_online_cpus(), max_q);
4472 f->indices += q; 4469 f->indices += q;
4473 } 4470 }
4474 4471
@@ -4680,55 +4677,6 @@ static void ixgbe_get_first_reg_idx(struct ixgbe_adapter *adapter, u8 tc,
4680 } 4677 }
4681} 4678}
4682 4679
4683#define IXGBE_MAX_Q_PER_TC (IXGBE_MAX_DCB_INDICES / MAX_TRAFFIC_CLASS)
4684
4685/* ixgbe_setup_tc - routine to configure net_device for multiple traffic
4686 * classes.
4687 *
4688 * @netdev: net device to configure
4689 * @tc: number of traffic classes to enable
4690 */
4691int ixgbe_setup_tc(struct net_device *dev, u8 tc)
4692{
4693 int i;
4694 unsigned int q, offset = 0;
4695
4696 if (!tc) {
4697 netdev_reset_tc(dev);
4698 } else {
4699 struct ixgbe_adapter *adapter = netdev_priv(dev);
4700
4701 /* Hardware supports up to 8 traffic classes */
4702 if (tc > MAX_TRAFFIC_CLASS || netdev_set_num_tc(dev, tc))
4703 return -EINVAL;
4704
4705 /* Partition Tx queues evenly amongst traffic classes */
4706 for (i = 0; i < tc; i++) {
4707 q = min((int)num_online_cpus(), IXGBE_MAX_Q_PER_TC);
4708 netdev_set_prio_tc_map(dev, i, i);
4709 netdev_set_tc_queue(dev, i, q, offset);
4710 offset += q;
4711 }
4712
4713 /* This enables multiple traffic class support in the hardware
4714 * which defaults to strict priority transmission by default.
4715 * If traffic classes are already enabled perhaps through DCB
4716 * code path then existing configuration will be used.
4717 */
4718 if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED) &&
4719 dev->dcbnl_ops && dev->dcbnl_ops->setdcbx) {
4720 struct ieee_ets ets = {
4721 .prio_tc = {0, 1, 2, 3, 4, 5, 6, 7},
4722 };
4723 u8 mode = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE;
4724
4725 dev->dcbnl_ops->setdcbx(dev, mode);
4726 dev->dcbnl_ops->ieee_setets(dev, &ets);
4727 }
4728 }
4729 return 0;
4730}
4731
4732/** 4680/**
4733 * ixgbe_cache_ring_dcb - Descriptor ring to register mapping for DCB 4681 * ixgbe_cache_ring_dcb - Descriptor ring to register mapping for DCB
4734 * @adapter: board private structure to initialize 4682 * @adapter: board private structure to initialize
@@ -4742,7 +4690,7 @@ static inline bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter)
4742 int i, j, k; 4690 int i, j, k;
4743 u8 num_tcs = netdev_get_num_tc(dev); 4691 u8 num_tcs = netdev_get_num_tc(dev);
4744 4692
4745 if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED)) 4693 if (!num_tcs)
4746 return false; 4694 return false;
4747 4695
4748 for (i = 0, k = 0; i < num_tcs; i++) { 4696 for (i = 0, k = 0; i < num_tcs; i++) {
@@ -7220,6 +7168,95 @@ static struct rtnl_link_stats64 *ixgbe_get_stats64(struct net_device *netdev,
7220 return stats; 7168 return stats;
7221} 7169}
7222 7170
7171/* ixgbe_validate_rtr - verify 802.1Qp to Rx packet buffer mapping is valid.
7172 * #adapter: pointer to ixgbe_adapter
7173 * @tc: number of traffic classes currently enabled
7174 *
7175 * Configure a valid 802.1Qp to Rx packet buffer mapping ie confirm
7176 * 802.1Q priority maps to a packet buffer that exists.
7177 */
7178static void ixgbe_validate_rtr(struct ixgbe_adapter *adapter, u8 tc)
7179{
7180 struct ixgbe_hw *hw = &adapter->hw;
7181 u32 reg, rsave;
7182 int i;
7183
7184 /* 82598 have a static priority to TC mapping that can not
7185 * be changed so no validation is needed.
7186 */
7187 if (hw->mac.type == ixgbe_mac_82598EB)
7188 return;
7189
7190 reg = IXGBE_READ_REG(hw, IXGBE_RTRUP2TC);
7191 rsave = reg;
7192
7193 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
7194 u8 up2tc = reg >> (i * IXGBE_RTRUP2TC_UP_SHIFT);
7195
7196 /* If up2tc is out of bounds default to zero */
7197 if (up2tc > tc)
7198 reg &= ~(0x7 << IXGBE_RTRUP2TC_UP_SHIFT);
7199 }
7200
7201 if (reg != rsave)
7202 IXGBE_WRITE_REG(hw, IXGBE_RTRUP2TC, reg);
7203
7204 return;
7205}
7206
7207
7208/* ixgbe_setup_tc - routine to configure net_device for multiple traffic
7209 * classes.
7210 *
7211 * @netdev: net device to configure
7212 * @tc: number of traffic classes to enable
7213 */
7214int ixgbe_setup_tc(struct net_device *dev, u8 tc)
7215{
7216 unsigned int q, i, offset = 0;
7217 struct ixgbe_adapter *adapter = netdev_priv(dev);
7218 struct ixgbe_hw *hw = &adapter->hw;
7219 int max_q = adapter->netdev->num_tx_queues / tc;
7220
7221 /* If DCB is anabled do not remove traffic classes, multiple
7222 * traffic classes are required to implement DCB
7223 */
7224 if (!tc && (adapter->flags & IXGBE_FLAG_DCB_ENABLED))
7225 return 0;
7226
7227 /* Hardware supports up to 8 traffic classes */
7228 if (tc > MAX_TRAFFIC_CLASS ||
7229 (hw->mac.type == ixgbe_mac_82598EB && tc < MAX_TRAFFIC_CLASS))
7230 return -EINVAL;
7231
7232 /* Hardware has to reinitialize queues and interrupts to
7233 * match packet buffer alignment. Unfortunantly, the
7234 * hardware is not flexible enough to do this dynamically.
7235 */
7236 if (netif_running(dev))
7237 ixgbe_close(dev);
7238 ixgbe_clear_interrupt_scheme(adapter);
7239
7240 if (tc)
7241 netdev_set_num_tc(dev, tc);
7242 else
7243 netdev_reset_tc(dev);
7244
7245 /* Partition Tx queues evenly amongst traffic classes */
7246 for (i = 0; i < tc; i++) {
7247 q = min((int)num_online_cpus(), max_q);
7248 netdev_set_prio_tc_map(dev, i, i);
7249 netdev_set_tc_queue(dev, i, q, offset);
7250 offset += q;
7251 }
7252
7253 ixgbe_init_interrupt_scheme(adapter);
7254 ixgbe_validate_rtr(adapter, tc);
7255 if (netif_running(dev))
7256 ixgbe_open(dev);
7257
7258 return 0;
7259}
7223 7260
7224static const struct net_device_ops ixgbe_netdev_ops = { 7261static const struct net_device_ops ixgbe_netdev_ops = {
7225 .ndo_open = ixgbe_open, 7262 .ndo_open = ixgbe_open,
@@ -7240,9 +7277,7 @@ static const struct net_device_ops ixgbe_netdev_ops = {
7240 .ndo_set_vf_tx_rate = ixgbe_ndo_set_vf_bw, 7277 .ndo_set_vf_tx_rate = ixgbe_ndo_set_vf_bw,
7241 .ndo_get_vf_config = ixgbe_ndo_get_vf_config, 7278 .ndo_get_vf_config = ixgbe_ndo_get_vf_config,
7242 .ndo_get_stats64 = ixgbe_get_stats64, 7279 .ndo_get_stats64 = ixgbe_get_stats64,
7243#ifdef CONFIG_IXGBE_DCB
7244 .ndo_setup_tc = ixgbe_setup_tc, 7280 .ndo_setup_tc = ixgbe_setup_tc,
7245#endif
7246#ifdef CONFIG_NET_POLL_CONTROLLER 7281#ifdef CONFIG_NET_POLL_CONTROLLER
7247 .ndo_poll_controller = ixgbe_netpoll, 7282 .ndo_poll_controller = ixgbe_netpoll,
7248#endif 7283#endif
diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h
index c0849a649d4e..54550646c5c2 100644
--- a/drivers/net/ixgbe/ixgbe_type.h
+++ b/drivers/net/ixgbe/ixgbe_type.h
@@ -1881,6 +1881,7 @@ enum {
1881#define IXGBE_MTQC_32VF 0x8 /* 4 TX Queues per pool w/32VF's */ 1881#define IXGBE_MTQC_32VF 0x8 /* 4 TX Queues per pool w/32VF's */
1882#define IXGBE_MTQC_64VF 0x4 /* 2 TX Queues per pool w/64VF's */ 1882#define IXGBE_MTQC_64VF 0x4 /* 2 TX Queues per pool w/64VF's */
1883#define IXGBE_MTQC_8TC_8TQ 0xC /* 8 TC if RT_ENA or 8 TQ if VT_ENA */ 1883#define IXGBE_MTQC_8TC_8TQ 0xC /* 8 TC if RT_ENA or 8 TQ if VT_ENA */
1884#define IXGBE_MTQC_4TC_4TQ 0x8 /* 4 TC if RT_ENA or 4 TQ if VT_ENA */
1884 1885
1885/* Receive Descriptor bit definitions */ 1886/* Receive Descriptor bit definitions */
1886#define IXGBE_RXD_STAT_DD 0x01 /* Descriptor Done */ 1887#define IXGBE_RXD_STAT_DD 0x01 /* Descriptor Done */