aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c')
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c61
1 files changed, 35 insertions, 26 deletions
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
index cceafbc3f1db..b3c282d09b18 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
@@ -46,8 +46,8 @@ static bool ixgbe_cache_ring_dcb_sriov(struct ixgbe_adapter *adapter)
46#endif /* IXGBE_FCOE */ 46#endif /* IXGBE_FCOE */
47 struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; 47 struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
48 int i; 48 int i;
49 u16 reg_idx; 49 u16 reg_idx, pool;
50 u8 tcs = netdev_get_num_tc(adapter->netdev); 50 u8 tcs = adapter->hw_tcs;
51 51
52 /* verify we have DCB queueing enabled before proceeding */ 52 /* verify we have DCB queueing enabled before proceeding */
53 if (tcs <= 1) 53 if (tcs <= 1)
@@ -58,12 +58,16 @@ static bool ixgbe_cache_ring_dcb_sriov(struct ixgbe_adapter *adapter)
58 return false; 58 return false;
59 59
60 /* start at VMDq register offset for SR-IOV enabled setups */ 60 /* start at VMDq register offset for SR-IOV enabled setups */
61 pool = 0;
61 reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask); 62 reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask);
62 for (i = 0; i < adapter->num_rx_queues; i++, reg_idx++) { 63 for (i = 0, pool = 0; i < adapter->num_rx_queues; i++, reg_idx++) {
63 /* If we are greater than indices move to next pool */ 64 /* If we are greater than indices move to next pool */
64 if ((reg_idx & ~vmdq->mask) >= tcs) 65 if ((reg_idx & ~vmdq->mask) >= tcs) {
66 pool++;
65 reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask); 67 reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask);
68 }
66 adapter->rx_ring[i]->reg_idx = reg_idx; 69 adapter->rx_ring[i]->reg_idx = reg_idx;
70 adapter->rx_ring[i]->netdev = pool ? NULL : adapter->netdev;
67 } 71 }
68 72
69 reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask); 73 reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask);
@@ -92,6 +96,7 @@ static bool ixgbe_cache_ring_dcb_sriov(struct ixgbe_adapter *adapter)
92 for (i = fcoe->offset; i < adapter->num_rx_queues; i++) { 96 for (i = fcoe->offset; i < adapter->num_rx_queues; i++) {
93 reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask) + fcoe_tc; 97 reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask) + fcoe_tc;
94 adapter->rx_ring[i]->reg_idx = reg_idx; 98 adapter->rx_ring[i]->reg_idx = reg_idx;
99 adapter->rx_ring[i]->netdev = adapter->netdev;
95 reg_idx++; 100 reg_idx++;
96 } 101 }
97 102
@@ -111,9 +116,8 @@ static bool ixgbe_cache_ring_dcb_sriov(struct ixgbe_adapter *adapter)
111static void ixgbe_get_first_reg_idx(struct ixgbe_adapter *adapter, u8 tc, 116static void ixgbe_get_first_reg_idx(struct ixgbe_adapter *adapter, u8 tc,
112 unsigned int *tx, unsigned int *rx) 117 unsigned int *tx, unsigned int *rx)
113{ 118{
114 struct net_device *dev = adapter->netdev;
115 struct ixgbe_hw *hw = &adapter->hw; 119 struct ixgbe_hw *hw = &adapter->hw;
116 u8 num_tcs = netdev_get_num_tc(dev); 120 u8 num_tcs = adapter->hw_tcs;
117 121
118 *tx = 0; 122 *tx = 0;
119 *rx = 0; 123 *rx = 0;
@@ -168,10 +172,9 @@ static void ixgbe_get_first_reg_idx(struct ixgbe_adapter *adapter, u8 tc,
168 **/ 172 **/
169static bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter) 173static bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter)
170{ 174{
171 struct net_device *dev = adapter->netdev; 175 u8 num_tcs = adapter->hw_tcs;
172 unsigned int tx_idx, rx_idx; 176 unsigned int tx_idx, rx_idx;
173 int tc, offset, rss_i, i; 177 int tc, offset, rss_i, i;
174 u8 num_tcs = netdev_get_num_tc(dev);
175 178
176 /* verify we have DCB queueing enabled before proceeding */ 179 /* verify we have DCB queueing enabled before proceeding */
177 if (num_tcs <= 1) 180 if (num_tcs <= 1)
@@ -184,6 +187,7 @@ static bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter)
184 for (i = 0; i < rss_i; i++, tx_idx++, rx_idx++) { 187 for (i = 0; i < rss_i; i++, tx_idx++, rx_idx++) {
185 adapter->tx_ring[offset + i]->reg_idx = tx_idx; 188 adapter->tx_ring[offset + i]->reg_idx = tx_idx;
186 adapter->rx_ring[offset + i]->reg_idx = rx_idx; 189 adapter->rx_ring[offset + i]->reg_idx = rx_idx;
190 adapter->rx_ring[offset + i]->netdev = adapter->netdev;
187 adapter->tx_ring[offset + i]->dcb_tc = tc; 191 adapter->tx_ring[offset + i]->dcb_tc = tc;
188 adapter->rx_ring[offset + i]->dcb_tc = tc; 192 adapter->rx_ring[offset + i]->dcb_tc = tc;
189 } 193 }
@@ -208,14 +212,15 @@ static bool ixgbe_cache_ring_sriov(struct ixgbe_adapter *adapter)
208#endif /* IXGBE_FCOE */ 212#endif /* IXGBE_FCOE */
209 struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; 213 struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
210 struct ixgbe_ring_feature *rss = &adapter->ring_feature[RING_F_RSS]; 214 struct ixgbe_ring_feature *rss = &adapter->ring_feature[RING_F_RSS];
215 u16 reg_idx, pool;
211 int i; 216 int i;
212 u16 reg_idx;
213 217
214 /* only proceed if VMDq is enabled */ 218 /* only proceed if VMDq is enabled */
215 if (!(adapter->flags & IXGBE_FLAG_VMDQ_ENABLED)) 219 if (!(adapter->flags & IXGBE_FLAG_VMDQ_ENABLED))
216 return false; 220 return false;
217 221
218 /* start at VMDq register offset for SR-IOV enabled setups */ 222 /* start at VMDq register offset for SR-IOV enabled setups */
223 pool = 0;
219 reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask); 224 reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask);
220 for (i = 0; i < adapter->num_rx_queues; i++, reg_idx++) { 225 for (i = 0; i < adapter->num_rx_queues; i++, reg_idx++) {
221#ifdef IXGBE_FCOE 226#ifdef IXGBE_FCOE
@@ -224,15 +229,20 @@ static bool ixgbe_cache_ring_sriov(struct ixgbe_adapter *adapter)
224 break; 229 break;
225#endif 230#endif
226 /* If we are greater than indices move to next pool */ 231 /* If we are greater than indices move to next pool */
227 if ((reg_idx & ~vmdq->mask) >= rss->indices) 232 if ((reg_idx & ~vmdq->mask) >= rss->indices) {
233 pool++;
228 reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask); 234 reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask);
235 }
229 adapter->rx_ring[i]->reg_idx = reg_idx; 236 adapter->rx_ring[i]->reg_idx = reg_idx;
237 adapter->rx_ring[i]->netdev = pool ? NULL : adapter->netdev;
230 } 238 }
231 239
232#ifdef IXGBE_FCOE 240#ifdef IXGBE_FCOE
233 /* FCoE uses a linear block of queues so just assigning 1:1 */ 241 /* FCoE uses a linear block of queues so just assigning 1:1 */
234 for (; i < adapter->num_rx_queues; i++, reg_idx++) 242 for (; i < adapter->num_rx_queues; i++, reg_idx++) {
235 adapter->rx_ring[i]->reg_idx = reg_idx; 243 adapter->rx_ring[i]->reg_idx = reg_idx;
244 adapter->rx_ring[i]->netdev = adapter->netdev;
245 }
236 246
237#endif 247#endif
238 reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask); 248 reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask);
@@ -269,8 +279,10 @@ static bool ixgbe_cache_ring_rss(struct ixgbe_adapter *adapter)
269{ 279{
270 int i, reg_idx; 280 int i, reg_idx;
271 281
272 for (i = 0; i < adapter->num_rx_queues; i++) 282 for (i = 0; i < adapter->num_rx_queues; i++) {
273 adapter->rx_ring[i]->reg_idx = i; 283 adapter->rx_ring[i]->reg_idx = i;
284 adapter->rx_ring[i]->netdev = adapter->netdev;
285 }
274 for (i = 0, reg_idx = 0; i < adapter->num_tx_queues; i++, reg_idx++) 286 for (i = 0, reg_idx = 0; i < adapter->num_tx_queues; i++, reg_idx++)
275 adapter->tx_ring[i]->reg_idx = reg_idx; 287 adapter->tx_ring[i]->reg_idx = reg_idx;
276 for (i = 0; i < adapter->num_xdp_queues; i++, reg_idx++) 288 for (i = 0; i < adapter->num_xdp_queues; i++, reg_idx++)
@@ -340,7 +352,7 @@ static bool ixgbe_set_dcb_sriov_queues(struct ixgbe_adapter *adapter)
340#ifdef IXGBE_FCOE 352#ifdef IXGBE_FCOE
341 u16 fcoe_i = 0; 353 u16 fcoe_i = 0;
342#endif 354#endif
343 u8 tcs = netdev_get_num_tc(adapter->netdev); 355 u8 tcs = adapter->hw_tcs;
344 356
345 /* verify we have DCB queueing enabled before proceeding */ 357 /* verify we have DCB queueing enabled before proceeding */
346 if (tcs <= 1) 358 if (tcs <= 1)
@@ -440,7 +452,7 @@ static bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter)
440 int tcs; 452 int tcs;
441 453
442 /* Map queue offset and counts onto allocated tx queues */ 454 /* Map queue offset and counts onto allocated tx queues */
443 tcs = netdev_get_num_tc(dev); 455 tcs = adapter->hw_tcs;
444 456
445 /* verify we have DCB queueing enabled before proceeding */ 457 /* verify we have DCB queueing enabled before proceeding */
446 if (tcs <= 1) 458 if (tcs <= 1)
@@ -607,6 +619,10 @@ static bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter)
607 } 619 }
608 620
609#endif 621#endif
622 /* populate TC0 for use by pool 0 */
623 netdev_set_tc_queue(adapter->netdev, 0,
624 adapter->num_rx_queues_per_pool, 0);
625
610 return true; 626 return true;
611} 627}
612 628
@@ -839,7 +855,7 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter,
839 int node = NUMA_NO_NODE; 855 int node = NUMA_NO_NODE;
840 int cpu = -1; 856 int cpu = -1;
841 int ring_count, size; 857 int ring_count, size;
842 u8 tcs = netdev_get_num_tc(adapter->netdev); 858 u8 tcs = adapter->hw_tcs;
843 859
844 ring_count = txr_count + rxr_count + xdp_count; 860 ring_count = txr_count + rxr_count + xdp_count;
845 size = sizeof(struct ixgbe_q_vector) + 861 size = sizeof(struct ixgbe_q_vector) +
@@ -922,11 +938,7 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter,
922 938
923 /* apply Tx specific ring traits */ 939 /* apply Tx specific ring traits */
924 ring->count = adapter->tx_ring_count; 940 ring->count = adapter->tx_ring_count;
925 if (adapter->num_rx_pools > 1) 941 ring->queue_index = txr_idx;
926 ring->queue_index =
927 txr_idx % adapter->num_rx_queues_per_pool;
928 else
929 ring->queue_index = txr_idx;
930 942
931 /* assign ring to adapter */ 943 /* assign ring to adapter */
932 adapter->tx_ring[txr_idx] = ring; 944 adapter->tx_ring[txr_idx] = ring;
@@ -996,11 +1008,7 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter,
996#endif /* IXGBE_FCOE */ 1008#endif /* IXGBE_FCOE */
997 /* apply Rx specific ring traits */ 1009 /* apply Rx specific ring traits */
998 ring->count = adapter->rx_ring_count; 1010 ring->count = adapter->rx_ring_count;
999 if (adapter->num_rx_pools > 1) 1011 ring->queue_index = rxr_idx;
1000 ring->queue_index =
1001 rxr_idx % adapter->num_rx_queues_per_pool;
1002 else
1003 ring->queue_index = rxr_idx;
1004 1012
1005 /* assign ring to adapter */ 1013 /* assign ring to adapter */
1006 adapter->rx_ring[rxr_idx] = ring; 1014 adapter->rx_ring[rxr_idx] = ring;
@@ -1176,7 +1184,7 @@ static void ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter)
1176 */ 1184 */
1177 1185
1178 /* Disable DCB unless we only have a single traffic class */ 1186 /* Disable DCB unless we only have a single traffic class */
1179 if (netdev_get_num_tc(adapter->netdev) > 1) { 1187 if (adapter->hw_tcs > 1) {
1180 e_dev_warn("Number of DCB TCs exceeds number of available queues. Disabling DCB support.\n"); 1188 e_dev_warn("Number of DCB TCs exceeds number of available queues. Disabling DCB support.\n");
1181 netdev_reset_tc(adapter->netdev); 1189 netdev_reset_tc(adapter->netdev);
1182 1190
@@ -1188,6 +1196,7 @@ static void ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter)
1188 adapter->dcb_cfg.pfc_mode_enable = false; 1196 adapter->dcb_cfg.pfc_mode_enable = false;
1189 } 1197 }
1190 1198
1199 adapter->hw_tcs = 0;
1191 adapter->dcb_cfg.num_tcs.pg_tcs = 1; 1200 adapter->dcb_cfg.num_tcs.pg_tcs = 1;
1192 adapter->dcb_cfg.num_tcs.pfc_tcs = 1; 1201 adapter->dcb_cfg.num_tcs.pfc_tcs = 1;
1193 1202