diff options
-rw-r--r-- | drivers/net/ethernet/intel/ixgbe/ixgbe.h | 20 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c | 8 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c | 8 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c | 150 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 150 |
5 files changed, 127 insertions, 209 deletions
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h index d1acf2451d52..2ffdc8f4c276 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h | |||
@@ -278,8 +278,10 @@ enum ixgbe_ring_f_enum { | |||
278 | #define MAX_TX_QUEUES IXGBE_MAX_FDIR_INDICES | 278 | #define MAX_TX_QUEUES IXGBE_MAX_FDIR_INDICES |
279 | #endif /* IXGBE_FCOE */ | 279 | #endif /* IXGBE_FCOE */ |
280 | struct ixgbe_ring_feature { | 280 | struct ixgbe_ring_feature { |
281 | int indices; | 281 | u16 limit; /* upper limit on feature indices */ |
282 | int mask; | 282 | u16 indices; /* current value of indices */ |
283 | u16 mask; /* Mask used for feature to ring mapping */ | ||
284 | u16 offset; /* offset to start of feature */ | ||
283 | } ____cacheline_internodealigned_in_smp; | 285 | } ____cacheline_internodealigned_in_smp; |
284 | 286 | ||
285 | /* | 287 | /* |
@@ -315,7 +317,7 @@ struct ixgbe_ring_container { | |||
315 | ? 8 : 1) | 317 | ? 8 : 1) |
316 | #define MAX_TX_PACKET_BUFFERS MAX_RX_PACKET_BUFFERS | 318 | #define MAX_TX_PACKET_BUFFERS MAX_RX_PACKET_BUFFERS |
317 | 319 | ||
318 | /* MAX_MSIX_Q_VECTORS of these are allocated, | 320 | /* MAX_Q_VECTORS of these are allocated, |
319 | * but we only use one per queue-specific vector. | 321 | * but we only use one per queue-specific vector. |
320 | */ | 322 | */ |
321 | struct ixgbe_q_vector { | 323 | struct ixgbe_q_vector { |
@@ -401,11 +403,11 @@ static inline u16 ixgbe_desc_unused(struct ixgbe_ring *ring) | |||
401 | #define NON_Q_VECTORS (OTHER_VECTOR) | 403 | #define NON_Q_VECTORS (OTHER_VECTOR) |
402 | 404 | ||
403 | #define MAX_MSIX_VECTORS_82599 64 | 405 | #define MAX_MSIX_VECTORS_82599 64 |
404 | #define MAX_MSIX_Q_VECTORS_82599 64 | 406 | #define MAX_Q_VECTORS_82599 64 |
405 | #define MAX_MSIX_VECTORS_82598 18 | 407 | #define MAX_MSIX_VECTORS_82598 18 |
406 | #define MAX_MSIX_Q_VECTORS_82598 16 | 408 | #define MAX_Q_VECTORS_82598 16 |
407 | 409 | ||
408 | #define MAX_MSIX_Q_VECTORS MAX_MSIX_Q_VECTORS_82599 | 410 | #define MAX_Q_VECTORS MAX_Q_VECTORS_82599 |
409 | #define MAX_MSIX_COUNT MAX_MSIX_VECTORS_82599 | 411 | #define MAX_MSIX_COUNT MAX_MSIX_VECTORS_82599 |
410 | 412 | ||
411 | #define MIN_MSIX_Q_VECTORS 1 | 413 | #define MIN_MSIX_Q_VECTORS 1 |
@@ -496,7 +498,7 @@ struct ixgbe_adapter { | |||
496 | u32 alloc_rx_page_failed; | 498 | u32 alloc_rx_page_failed; |
497 | u32 alloc_rx_buff_failed; | 499 | u32 alloc_rx_buff_failed; |
498 | 500 | ||
499 | struct ixgbe_q_vector *q_vector[MAX_MSIX_Q_VECTORS]; | 501 | struct ixgbe_q_vector *q_vector[MAX_Q_VECTORS]; |
500 | 502 | ||
501 | /* DCB parameters */ | 503 | /* DCB parameters */ |
502 | struct ieee_pfc *ixgbe_ieee_pfc; | 504 | struct ieee_pfc *ixgbe_ieee_pfc; |
@@ -507,8 +509,8 @@ struct ixgbe_adapter { | |||
507 | u8 dcbx_cap; | 509 | u8 dcbx_cap; |
508 | enum ixgbe_fc_mode last_lfc_mode; | 510 | enum ixgbe_fc_mode last_lfc_mode; |
509 | 511 | ||
510 | int num_msix_vectors; | 512 | int num_q_vectors; /* current number of q_vectors for device */ |
511 | int max_msix_q_vectors; /* true count of q_vectors for device */ | 513 | int max_q_vectors; /* true count of q_vectors for device */ |
512 | struct ixgbe_ring_feature ring_feature[RING_F_ARRAY_SIZE]; | 514 | struct ixgbe_ring_feature ring_feature[RING_F_ARRAY_SIZE]; |
513 | struct msix_entry *msix_entries; | 515 | struct msix_entry *msix_entries; |
514 | 516 | ||
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c index bbc7da5cdb4d..8e1be50af70a 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c | |||
@@ -2090,7 +2090,6 @@ static int ixgbe_set_coalesce(struct net_device *netdev, | |||
2090 | struct ixgbe_adapter *adapter = netdev_priv(netdev); | 2090 | struct ixgbe_adapter *adapter = netdev_priv(netdev); |
2091 | struct ixgbe_q_vector *q_vector; | 2091 | struct ixgbe_q_vector *q_vector; |
2092 | int i; | 2092 | int i; |
2093 | int num_vectors; | ||
2094 | u16 tx_itr_param, rx_itr_param; | 2093 | u16 tx_itr_param, rx_itr_param; |
2095 | bool need_reset = false; | 2094 | bool need_reset = false; |
2096 | 2095 | ||
@@ -2126,12 +2125,7 @@ static int ixgbe_set_coalesce(struct net_device *netdev, | |||
2126 | /* check the old value and enable RSC if necessary */ | 2125 | /* check the old value and enable RSC if necessary */ |
2127 | need_reset = ixgbe_update_rsc(adapter); | 2126 | need_reset = ixgbe_update_rsc(adapter); |
2128 | 2127 | ||
2129 | if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) | 2128 | for (i = 0; i < adapter->num_q_vectors; i++) { |
2130 | num_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; | ||
2131 | else | ||
2132 | num_vectors = 1; | ||
2133 | |||
2134 | for (i = 0; i < num_vectors; i++) { | ||
2135 | q_vector = adapter->q_vector[i]; | 2129 | q_vector = adapter->q_vector[i]; |
2136 | if (q_vector->tx.count && !q_vector->rx.count) | 2130 | if (q_vector->tx.count && !q_vector->rx.count) |
2137 | /* tx only */ | 2131 | /* tx only */ |
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c index 0ee4dbf4a752..0922ece4d853 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c | |||
@@ -674,7 +674,7 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter) | |||
674 | if (adapter->ring_feature[RING_F_FCOE].indices) { | 674 | if (adapter->ring_feature[RING_F_FCOE].indices) { |
675 | /* Use multiple rx queues for FCoE by redirection table */ | 675 | /* Use multiple rx queues for FCoE by redirection table */ |
676 | for (i = 0; i < IXGBE_FCRETA_SIZE; i++) { | 676 | for (i = 0; i < IXGBE_FCRETA_SIZE; i++) { |
677 | fcoe_i = f->mask + i % f->indices; | 677 | fcoe_i = f->offset + i % f->indices; |
678 | fcoe_i &= IXGBE_FCRETA_ENTRY_MASK; | 678 | fcoe_i &= IXGBE_FCRETA_ENTRY_MASK; |
679 | fcoe_q = adapter->rx_ring[fcoe_i]->reg_idx; | 679 | fcoe_q = adapter->rx_ring[fcoe_i]->reg_idx; |
680 | IXGBE_WRITE_REG(hw, IXGBE_FCRETA(i), fcoe_q); | 680 | IXGBE_WRITE_REG(hw, IXGBE_FCRETA(i), fcoe_q); |
@@ -683,7 +683,7 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter) | |||
683 | IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FCOE), 0); | 683 | IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FCOE), 0); |
684 | } else { | 684 | } else { |
685 | /* Use single rx queue for FCoE */ | 685 | /* Use single rx queue for FCoE */ |
686 | fcoe_i = f->mask; | 686 | fcoe_i = f->offset; |
687 | fcoe_q = adapter->rx_ring[fcoe_i]->reg_idx; | 687 | fcoe_q = adapter->rx_ring[fcoe_i]->reg_idx; |
688 | IXGBE_WRITE_REG(hw, IXGBE_FCRECTL, 0); | 688 | IXGBE_WRITE_REG(hw, IXGBE_FCRECTL, 0); |
689 | IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FCOE), | 689 | IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FCOE), |
@@ -691,7 +691,7 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter) | |||
691 | (fcoe_q << IXGBE_ETQS_RX_QUEUE_SHIFT)); | 691 | (fcoe_q << IXGBE_ETQS_RX_QUEUE_SHIFT)); |
692 | } | 692 | } |
693 | /* send FIP frames to the first FCoE queue */ | 693 | /* send FIP frames to the first FCoE queue */ |
694 | fcoe_i = f->mask; | 694 | fcoe_i = f->offset; |
695 | fcoe_q = adapter->rx_ring[fcoe_i]->reg_idx; | 695 | fcoe_q = adapter->rx_ring[fcoe_i]->reg_idx; |
696 | IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FIP), | 696 | IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FIP), |
697 | IXGBE_ETQS_QUEUE_EN | | 697 | IXGBE_ETQS_QUEUE_EN | |
@@ -770,7 +770,7 @@ int ixgbe_fcoe_enable(struct net_device *netdev) | |||
770 | ixgbe_clear_interrupt_scheme(adapter); | 770 | ixgbe_clear_interrupt_scheme(adapter); |
771 | 771 | ||
772 | adapter->flags |= IXGBE_FLAG_FCOE_ENABLED; | 772 | adapter->flags |= IXGBE_FLAG_FCOE_ENABLED; |
773 | adapter->ring_feature[RING_F_FCOE].indices = IXGBE_FCRETA_SIZE; | 773 | adapter->ring_feature[RING_F_FCOE].limit = IXGBE_FCRETA_SIZE; |
774 | netdev->features |= NETIF_F_FCOE_CRC; | 774 | netdev->features |= NETIF_F_FCOE_CRC; |
775 | netdev->features |= NETIF_F_FSO; | 775 | netdev->features |= NETIF_F_FSO; |
776 | netdev->features |= NETIF_F_FCOE_MTU; | 776 | netdev->features |= NETIF_F_FCOE_MTU; |
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c index f36c3c38dbcb..83eadd019e6b 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c | |||
@@ -138,30 +138,6 @@ static inline bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter) | |||
138 | } | 138 | } |
139 | #endif | 139 | #endif |
140 | 140 | ||
141 | /** | ||
142 | * ixgbe_cache_ring_fdir - Descriptor ring to register mapping for Flow Director | ||
143 | * @adapter: board private structure to initialize | ||
144 | * | ||
145 | * Cache the descriptor ring offsets for Flow Director to the assigned rings. | ||
146 | * | ||
147 | **/ | ||
148 | static inline bool ixgbe_cache_ring_fdir(struct ixgbe_adapter *adapter) | ||
149 | { | ||
150 | int i; | ||
151 | bool ret = false; | ||
152 | |||
153 | if ((adapter->flags & IXGBE_FLAG_RSS_ENABLED) && | ||
154 | (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE)) { | ||
155 | for (i = 0; i < adapter->num_rx_queues; i++) | ||
156 | adapter->rx_ring[i]->reg_idx = i; | ||
157 | for (i = 0; i < adapter->num_tx_queues; i++) | ||
158 | adapter->tx_ring[i]->reg_idx = i; | ||
159 | ret = true; | ||
160 | } | ||
161 | |||
162 | return ret; | ||
163 | } | ||
164 | |||
165 | #ifdef IXGBE_FCOE | 141 | #ifdef IXGBE_FCOE |
166 | /** | 142 | /** |
167 | * ixgbe_cache_ring_fcoe - Descriptor ring to register mapping for the FCoE | 143 | * ixgbe_cache_ring_fcoe - Descriptor ring to register mapping for the FCoE |
@@ -180,17 +156,14 @@ static inline bool ixgbe_cache_ring_fcoe(struct ixgbe_adapter *adapter) | |||
180 | return false; | 156 | return false; |
181 | 157 | ||
182 | if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) { | 158 | if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) { |
183 | if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) | 159 | ixgbe_cache_ring_rss(adapter); |
184 | ixgbe_cache_ring_fdir(adapter); | ||
185 | else | ||
186 | ixgbe_cache_ring_rss(adapter); | ||
187 | 160 | ||
188 | fcoe_rx_i = f->mask; | 161 | fcoe_rx_i = f->offset; |
189 | fcoe_tx_i = f->mask; | 162 | fcoe_tx_i = f->offset; |
190 | } | 163 | } |
191 | for (i = 0; i < f->indices; i++, fcoe_rx_i++, fcoe_tx_i++) { | 164 | for (i = 0; i < f->indices; i++, fcoe_rx_i++, fcoe_tx_i++) { |
192 | adapter->rx_ring[f->mask + i]->reg_idx = fcoe_rx_i; | 165 | adapter->rx_ring[f->offset + i]->reg_idx = fcoe_rx_i; |
193 | adapter->tx_ring[f->mask + i]->reg_idx = fcoe_tx_i; | 166 | adapter->tx_ring[f->offset + i]->reg_idx = fcoe_tx_i; |
194 | } | 167 | } |
195 | return true; | 168 | return true; |
196 | } | 169 | } |
@@ -244,9 +217,6 @@ static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter) | |||
244 | return; | 217 | return; |
245 | #endif /* IXGBE_FCOE */ | 218 | #endif /* IXGBE_FCOE */ |
246 | 219 | ||
247 | if (ixgbe_cache_ring_fdir(adapter)) | ||
248 | return; | ||
249 | |||
250 | if (ixgbe_cache_ring_rss(adapter)) | 220 | if (ixgbe_cache_ring_rss(adapter)) |
251 | return; | 221 | return; |
252 | } | 222 | } |
@@ -272,53 +242,39 @@ static inline bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter) | |||
272 | * to allocate one Rx queue per CPU, and if available, one Tx queue per CPU. | 242 | * to allocate one Rx queue per CPU, and if available, one Tx queue per CPU. |
273 | * | 243 | * |
274 | **/ | 244 | **/ |
275 | static inline bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter) | 245 | static bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter) |
276 | { | 246 | { |
277 | bool ret = false; | 247 | struct ixgbe_ring_feature *f; |
278 | struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_RSS]; | 248 | u16 rss_i; |
279 | 249 | ||
280 | if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) { | 250 | if (!(adapter->flags & IXGBE_FLAG_RSS_ENABLED)) { |
281 | f->mask = 0xF; | 251 | adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; |
282 | adapter->num_rx_queues = f->indices; | 252 | return false; |
283 | adapter->num_tx_queues = f->indices; | ||
284 | ret = true; | ||
285 | } | 253 | } |
286 | 254 | ||
287 | return ret; | 255 | /* set mask for 16 queue limit of RSS */ |
288 | } | 256 | f = &adapter->ring_feature[RING_F_RSS]; |
257 | rss_i = f->limit; | ||
289 | 258 | ||
290 | /** | 259 | f->indices = rss_i; |
291 | * ixgbe_set_fdir_queues - Allocate queues for Flow Director | 260 | f->mask = 0xF; |
292 | * @adapter: board private structure to initialize | ||
293 | * | ||
294 | * Flow Director is an advanced Rx filter, attempting to get Rx flows back | ||
295 | * to the original CPU that initiated the Tx session. This runs in addition | ||
296 | * to RSS, so if a packet doesn't match an FDIR filter, we can still spread the | ||
297 | * Rx load across CPUs using RSS. | ||
298 | * | ||
299 | **/ | ||
300 | static inline bool ixgbe_set_fdir_queues(struct ixgbe_adapter *adapter) | ||
301 | { | ||
302 | bool ret = false; | ||
303 | struct ixgbe_ring_feature *f_fdir = &adapter->ring_feature[RING_F_FDIR]; | ||
304 | |||
305 | f_fdir->indices = min_t(int, num_online_cpus(), f_fdir->indices); | ||
306 | f_fdir->mask = 0; | ||
307 | 261 | ||
308 | /* | 262 | /* |
309 | * Use RSS in addition to Flow Director to ensure the best | 263 | * Use Flow Director in addition to RSS to ensure the best |
310 | * distribution of flows across cores, even when an FDIR flow | 264 | * distribution of flows across cores, even when an FDIR flow |
311 | * isn't matched. | 265 | * isn't matched. |
312 | */ | 266 | */ |
313 | if ((adapter->flags & IXGBE_FLAG_RSS_ENABLED) && | 267 | if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) { |
314 | (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE)) { | 268 | f = &adapter->ring_feature[RING_F_FDIR]; |
315 | adapter->num_tx_queues = f_fdir->indices; | 269 | |
316 | adapter->num_rx_queues = f_fdir->indices; | 270 | f->indices = min_t(u16, num_online_cpus(), f->limit); |
317 | ret = true; | 271 | rss_i = max_t(u16, rss_i, f->indices); |
318 | } else { | ||
319 | adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; | ||
320 | } | 272 | } |
321 | return ret; | 273 | |
274 | adapter->num_rx_queues = rss_i; | ||
275 | adapter->num_tx_queues = rss_i; | ||
276 | |||
277 | return true; | ||
322 | } | 278 | } |
323 | 279 | ||
324 | #ifdef IXGBE_FCOE | 280 | #ifdef IXGBE_FCOE |
@@ -327,10 +283,7 @@ static inline bool ixgbe_set_fdir_queues(struct ixgbe_adapter *adapter) | |||
327 | * @adapter: board private structure to initialize | 283 | * @adapter: board private structure to initialize |
328 | * | 284 | * |
329 | * FCoE RX FCRETA can use up to 8 rx queues for up to 8 different exchanges. | 285 | * FCoE RX FCRETA can use up to 8 rx queues for up to 8 different exchanges. |
330 | * The ring feature mask is not used as a mask for FCoE, as it can take any 8 | 286 | * Offset is used as the index of the first rx queue used by FCoE. |
331 | * rx queues out of the max number of rx queues, instead, it is used as the | ||
332 | * index of the first rx queue used by FCoE. | ||
333 | * | ||
334 | **/ | 287 | **/ |
335 | static inline bool ixgbe_set_fcoe_queues(struct ixgbe_adapter *adapter) | 288 | static inline bool ixgbe_set_fcoe_queues(struct ixgbe_adapter *adapter) |
336 | { | 289 | { |
@@ -339,21 +292,18 @@ static inline bool ixgbe_set_fcoe_queues(struct ixgbe_adapter *adapter) | |||
339 | if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) | 292 | if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) |
340 | return false; | 293 | return false; |
341 | 294 | ||
342 | f->indices = min_t(int, num_online_cpus(), f->indices); | 295 | f->indices = min_t(int, num_online_cpus(), f->limit); |
343 | 296 | ||
344 | adapter->num_rx_queues = 1; | 297 | adapter->num_rx_queues = 1; |
345 | adapter->num_tx_queues = 1; | 298 | adapter->num_tx_queues = 1; |
346 | 299 | ||
347 | if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) { | 300 | if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) { |
348 | e_info(probe, "FCoE enabled with RSS\n"); | 301 | e_info(probe, "FCoE enabled with RSS\n"); |
349 | if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) | 302 | ixgbe_set_rss_queues(adapter); |
350 | ixgbe_set_fdir_queues(adapter); | ||
351 | else | ||
352 | ixgbe_set_rss_queues(adapter); | ||
353 | } | 303 | } |
354 | 304 | ||
355 | /* adding FCoE rx rings to the end */ | 305 | /* adding FCoE rx rings to the end */ |
356 | f->mask = adapter->num_rx_queues; | 306 | f->offset = adapter->num_rx_queues; |
357 | adapter->num_rx_queues += f->indices; | 307 | adapter->num_rx_queues += f->indices; |
358 | adapter->num_tx_queues += f->indices; | 308 | adapter->num_tx_queues += f->indices; |
359 | 309 | ||
@@ -388,7 +338,7 @@ static inline bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter) | |||
388 | 338 | ||
389 | #ifdef IXGBE_FCOE | 339 | #ifdef IXGBE_FCOE |
390 | /* FCoE enabled queues require special configuration indexed | 340 | /* FCoE enabled queues require special configuration indexed |
391 | * by feature specific indices and mask. Here we map FCoE | 341 | * by feature specific indices and offset. Here we map FCoE |
392 | * indices onto the DCB queue pairs allowing FCoE to own | 342 | * indices onto the DCB queue pairs allowing FCoE to own |
393 | * configuration later. | 343 | * configuration later. |
394 | */ | 344 | */ |
@@ -401,7 +351,7 @@ static inline bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter) | |||
401 | ixgbe_dcb_unpack_map(&adapter->dcb_cfg, DCB_TX_CONFIG, prio_tc); | 351 | ixgbe_dcb_unpack_map(&adapter->dcb_cfg, DCB_TX_CONFIG, prio_tc); |
402 | tc = prio_tc[adapter->fcoe.up]; | 352 | tc = prio_tc[adapter->fcoe.up]; |
403 | f->indices = dev->tc_to_txq[tc].count; | 353 | f->indices = dev->tc_to_txq[tc].count; |
404 | f->mask = dev->tc_to_txq[tc].offset; | 354 | f->offset = dev->tc_to_txq[tc].offset; |
405 | } | 355 | } |
406 | #endif | 356 | #endif |
407 | 357 | ||
@@ -441,9 +391,6 @@ static int ixgbe_set_num_queues(struct ixgbe_adapter *adapter) | |||
441 | goto done; | 391 | goto done; |
442 | 392 | ||
443 | #endif /* IXGBE_FCOE */ | 393 | #endif /* IXGBE_FCOE */ |
444 | if (ixgbe_set_fdir_queues(adapter)) | ||
445 | goto done; | ||
446 | |||
447 | if (ixgbe_set_rss_queues(adapter)) | 394 | if (ixgbe_set_rss_queues(adapter)) |
448 | goto done; | 395 | goto done; |
449 | 396 | ||
@@ -507,8 +454,8 @@ static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter, | |||
507 | * of max_msix_q_vectors + NON_Q_VECTORS, or the number of | 454 | * of max_msix_q_vectors + NON_Q_VECTORS, or the number of |
508 | * vectors we were allocated. | 455 | * vectors we were allocated. |
509 | */ | 456 | */ |
510 | adapter->num_msix_vectors = min(vectors, | 457 | vectors -= NON_Q_VECTORS; |
511 | adapter->max_msix_q_vectors + NON_Q_VECTORS); | 458 | adapter->num_q_vectors = min(vectors, adapter->max_q_vectors); |
512 | } | 459 | } |
513 | } | 460 | } |
514 | 461 | ||
@@ -632,8 +579,8 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter, | |||
632 | if (adapter->netdev->features & NETIF_F_FCOE_MTU) { | 579 | if (adapter->netdev->features & NETIF_F_FCOE_MTU) { |
633 | struct ixgbe_ring_feature *f; | 580 | struct ixgbe_ring_feature *f; |
634 | f = &adapter->ring_feature[RING_F_FCOE]; | 581 | f = &adapter->ring_feature[RING_F_FCOE]; |
635 | if ((rxr_idx >= f->mask) && | 582 | if ((rxr_idx >= f->offset) && |
636 | (rxr_idx < f->mask + f->indices)) | 583 | (rxr_idx < f->offset + f->indices)) |
637 | set_bit(__IXGBE_RX_FCOE, &ring->state); | 584 | set_bit(__IXGBE_RX_FCOE, &ring->state); |
638 | } | 585 | } |
639 | 586 | ||
@@ -695,7 +642,7 @@ static void ixgbe_free_q_vector(struct ixgbe_adapter *adapter, int v_idx) | |||
695 | **/ | 642 | **/ |
696 | static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter) | 643 | static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter) |
697 | { | 644 | { |
698 | int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; | 645 | int q_vectors = adapter->num_q_vectors; |
699 | int rxr_remaining = adapter->num_rx_queues; | 646 | int rxr_remaining = adapter->num_rx_queues; |
700 | int txr_remaining = adapter->num_tx_queues; | 647 | int txr_remaining = adapter->num_tx_queues; |
701 | int rxr_idx = 0, txr_idx = 0, v_idx = 0; | 648 | int rxr_idx = 0, txr_idx = 0, v_idx = 0; |
@@ -739,10 +686,12 @@ static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter) | |||
739 | return 0; | 686 | return 0; |
740 | 687 | ||
741 | err_out: | 688 | err_out: |
742 | while (v_idx) { | 689 | adapter->num_tx_queues = 0; |
743 | v_idx--; | 690 | adapter->num_rx_queues = 0; |
691 | adapter->num_q_vectors = 0; | ||
692 | |||
693 | while (v_idx--) | ||
744 | ixgbe_free_q_vector(adapter, v_idx); | 694 | ixgbe_free_q_vector(adapter, v_idx); |
745 | } | ||
746 | 695 | ||
747 | return -ENOMEM; | 696 | return -ENOMEM; |
748 | } | 697 | } |
@@ -757,14 +706,13 @@ err_out: | |||
757 | **/ | 706 | **/ |
758 | static void ixgbe_free_q_vectors(struct ixgbe_adapter *adapter) | 707 | static void ixgbe_free_q_vectors(struct ixgbe_adapter *adapter) |
759 | { | 708 | { |
760 | int v_idx, q_vectors; | 709 | int v_idx = adapter->num_q_vectors; |
761 | 710 | ||
762 | if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) | 711 | adapter->num_tx_queues = 0; |
763 | q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; | 712 | adapter->num_rx_queues = 0; |
764 | else | 713 | adapter->num_q_vectors = 0; |
765 | q_vectors = 1; | ||
766 | 714 | ||
767 | for (v_idx = 0; v_idx < q_vectors; v_idx++) | 715 | while (v_idx--) |
768 | ixgbe_free_q_vector(adapter, v_idx); | 716 | ixgbe_free_q_vector(adapter, v_idx); |
769 | } | 717 | } |
770 | 718 | ||
@@ -844,6 +792,8 @@ static int ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter) | |||
844 | if (err) | 792 | if (err) |
845 | return err; | 793 | return err; |
846 | 794 | ||
795 | adapter->num_q_vectors = 1; | ||
796 | |||
847 | err = pci_enable_msi(adapter->pdev); | 797 | err = pci_enable_msi(adapter->pdev); |
848 | if (!err) { | 798 | if (!err) { |
849 | adapter->flags |= IXGBE_FLAG_MSI_ENABLED; | 799 | adapter->flags |= IXGBE_FLAG_MSI_ENABLED; |
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 59a3f141feb1..d3cf8873d483 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | |||
@@ -993,7 +993,6 @@ out_no_update: | |||
993 | 993 | ||
994 | static void ixgbe_setup_dca(struct ixgbe_adapter *adapter) | 994 | static void ixgbe_setup_dca(struct ixgbe_adapter *adapter) |
995 | { | 995 | { |
996 | int num_q_vectors; | ||
997 | int i; | 996 | int i; |
998 | 997 | ||
999 | if (!(adapter->flags & IXGBE_FLAG_DCA_ENABLED)) | 998 | if (!(adapter->flags & IXGBE_FLAG_DCA_ENABLED)) |
@@ -1002,12 +1001,7 @@ static void ixgbe_setup_dca(struct ixgbe_adapter *adapter) | |||
1002 | /* always use CB2 mode, difference is masked in the CB driver */ | 1001 | /* always use CB2 mode, difference is masked in the CB driver */ |
1003 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 2); | 1002 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 2); |
1004 | 1003 | ||
1005 | if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) | 1004 | for (i = 0; i < adapter->num_q_vectors; i++) { |
1006 | num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; | ||
1007 | else | ||
1008 | num_q_vectors = 1; | ||
1009 | |||
1010 | for (i = 0; i < num_q_vectors; i++) { | ||
1011 | adapter->q_vector[i]->cpu = -1; | 1005 | adapter->q_vector[i]->cpu = -1; |
1012 | ixgbe_update_dca(adapter->q_vector[i]); | 1006 | ixgbe_update_dca(adapter->q_vector[i]); |
1013 | } | 1007 | } |
@@ -1831,11 +1825,9 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, | |||
1831 | static void ixgbe_configure_msix(struct ixgbe_adapter *adapter) | 1825 | static void ixgbe_configure_msix(struct ixgbe_adapter *adapter) |
1832 | { | 1826 | { |
1833 | struct ixgbe_q_vector *q_vector; | 1827 | struct ixgbe_q_vector *q_vector; |
1834 | int q_vectors, v_idx; | 1828 | int v_idx; |
1835 | u32 mask; | 1829 | u32 mask; |
1836 | 1830 | ||
1837 | q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; | ||
1838 | |||
1839 | /* Populate MSIX to EITR Select */ | 1831 | /* Populate MSIX to EITR Select */ |
1840 | if (adapter->num_vfs > 32) { | 1832 | if (adapter->num_vfs > 32) { |
1841 | u32 eitrsel = (1 << (adapter->num_vfs - 32)) - 1; | 1833 | u32 eitrsel = (1 << (adapter->num_vfs - 32)) - 1; |
@@ -1846,7 +1838,7 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter) | |||
1846 | * Populate the IVAR table and set the ITR values to the | 1838 | * Populate the IVAR table and set the ITR values to the |
1847 | * corresponding register. | 1839 | * corresponding register. |
1848 | */ | 1840 | */ |
1849 | for (v_idx = 0; v_idx < q_vectors; v_idx++) { | 1841 | for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) { |
1850 | struct ixgbe_ring *ring; | 1842 | struct ixgbe_ring *ring; |
1851 | q_vector = adapter->q_vector[v_idx]; | 1843 | q_vector = adapter->q_vector[v_idx]; |
1852 | 1844 | ||
@@ -2410,11 +2402,10 @@ int ixgbe_poll(struct napi_struct *napi, int budget) | |||
2410 | static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter) | 2402 | static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter) |
2411 | { | 2403 | { |
2412 | struct net_device *netdev = adapter->netdev; | 2404 | struct net_device *netdev = adapter->netdev; |
2413 | int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; | ||
2414 | int vector, err; | 2405 | int vector, err; |
2415 | int ri = 0, ti = 0; | 2406 | int ri = 0, ti = 0; |
2416 | 2407 | ||
2417 | for (vector = 0; vector < q_vectors; vector++) { | 2408 | for (vector = 0; vector < adapter->num_q_vectors; vector++) { |
2418 | struct ixgbe_q_vector *q_vector = adapter->q_vector[vector]; | 2409 | struct ixgbe_q_vector *q_vector = adapter->q_vector[vector]; |
2419 | struct msix_entry *entry = &adapter->msix_entries[vector]; | 2410 | struct msix_entry *entry = &adapter->msix_entries[vector]; |
2420 | 2411 | ||
@@ -2569,30 +2560,28 @@ static int ixgbe_request_irq(struct ixgbe_adapter *adapter) | |||
2569 | 2560 | ||
2570 | static void ixgbe_free_irq(struct ixgbe_adapter *adapter) | 2561 | static void ixgbe_free_irq(struct ixgbe_adapter *adapter) |
2571 | { | 2562 | { |
2572 | if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { | 2563 | int vector; |
2573 | int i, q_vectors; | ||
2574 | 2564 | ||
2575 | q_vectors = adapter->num_msix_vectors; | 2565 | if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) { |
2576 | i = q_vectors - 1; | 2566 | free_irq(adapter->pdev->irq, adapter); |
2577 | free_irq(adapter->msix_entries[i].vector, adapter); | 2567 | return; |
2578 | i--; | 2568 | } |
2579 | 2569 | ||
2580 | for (; i >= 0; i--) { | 2570 | for (vector = 0; vector < adapter->num_q_vectors; vector++) { |
2581 | /* free only the irqs that were actually requested */ | 2571 | struct ixgbe_q_vector *q_vector = adapter->q_vector[vector]; |
2582 | if (!adapter->q_vector[i]->rx.ring && | 2572 | struct msix_entry *entry = &adapter->msix_entries[vector]; |
2583 | !adapter->q_vector[i]->tx.ring) | ||
2584 | continue; | ||
2585 | 2573 | ||
2586 | /* clear the affinity_mask in the IRQ descriptor */ | 2574 | /* free only the irqs that were actually requested */ |
2587 | irq_set_affinity_hint(adapter->msix_entries[i].vector, | 2575 | if (!q_vector->rx.ring && !q_vector->tx.ring) |
2588 | NULL); | 2576 | continue; |
2589 | 2577 | ||
2590 | free_irq(adapter->msix_entries[i].vector, | 2578 | /* clear the affinity_mask in the IRQ descriptor */ |
2591 | adapter->q_vector[i]); | 2579 | irq_set_affinity_hint(entry->vector, NULL); |
2592 | } | 2580 | |
2593 | } else { | 2581 | free_irq(entry->vector, q_vector); |
2594 | free_irq(adapter->pdev->irq, adapter); | ||
2595 | } | 2582 | } |
2583 | |||
2584 | free_irq(adapter->msix_entries[vector++].vector, adapter); | ||
2596 | } | 2585 | } |
2597 | 2586 | ||
2598 | /** | 2587 | /** |
@@ -2616,9 +2605,12 @@ static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter) | |||
2616 | } | 2605 | } |
2617 | IXGBE_WRITE_FLUSH(&adapter->hw); | 2606 | IXGBE_WRITE_FLUSH(&adapter->hw); |
2618 | if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { | 2607 | if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { |
2619 | int i; | 2608 | int vector; |
2620 | for (i = 0; i < adapter->num_msix_vectors; i++) | 2609 | |
2621 | synchronize_irq(adapter->msix_entries[i].vector); | 2610 | for (vector = 0; vector < adapter->num_q_vectors; vector++) |
2611 | synchronize_irq(adapter->msix_entries[vector].vector); | ||
2612 | |||
2613 | synchronize_irq(adapter->msix_entries[vector++].vector); | ||
2622 | } else { | 2614 | } else { |
2623 | synchronize_irq(adapter->pdev->irq); | 2615 | synchronize_irq(adapter->pdev->irq); |
2624 | } | 2616 | } |
@@ -2855,40 +2847,34 @@ static void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter) | |||
2855 | static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter, | 2847 | static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter, |
2856 | struct ixgbe_ring *rx_ring) | 2848 | struct ixgbe_ring *rx_ring) |
2857 | { | 2849 | { |
2850 | struct ixgbe_hw *hw = &adapter->hw; | ||
2858 | u32 srrctl; | 2851 | u32 srrctl; |
2859 | u8 reg_idx = rx_ring->reg_idx; | 2852 | u8 reg_idx = rx_ring->reg_idx; |
2860 | 2853 | ||
2861 | switch (adapter->hw.mac.type) { | 2854 | if (hw->mac.type == ixgbe_mac_82598EB) { |
2862 | case ixgbe_mac_82598EB: { | 2855 | u16 mask = adapter->ring_feature[RING_F_RSS].mask; |
2863 | struct ixgbe_ring_feature *feature = adapter->ring_feature; | ||
2864 | const int mask = feature[RING_F_RSS].mask; | ||
2865 | reg_idx = reg_idx & mask; | ||
2866 | } | ||
2867 | break; | ||
2868 | case ixgbe_mac_82599EB: | ||
2869 | case ixgbe_mac_X540: | ||
2870 | default: | ||
2871 | break; | ||
2872 | } | ||
2873 | |||
2874 | srrctl = IXGBE_READ_REG(&adapter->hw, IXGBE_SRRCTL(reg_idx)); | ||
2875 | 2856 | ||
2876 | srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK; | 2857 | /* |
2877 | srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK; | 2858 | * if VMDq is not active we must program one srrctl register |
2878 | if (adapter->num_vfs) | 2859 | * per RSS queue since we have enabled RDRXCTL.MVMEN |
2879 | srrctl |= IXGBE_SRRCTL_DROP_EN; | 2860 | */ |
2861 | reg_idx &= mask; | ||
2862 | } | ||
2880 | 2863 | ||
2881 | srrctl |= (IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) & | 2864 | /* configure header buffer length, needed for RSC */ |
2882 | IXGBE_SRRCTL_BSIZEHDR_MASK; | 2865 | srrctl = IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT; |
2883 | 2866 | ||
2867 | /* configure the packet buffer length */ | ||
2884 | #if PAGE_SIZE > IXGBE_MAX_RXBUFFER | 2868 | #if PAGE_SIZE > IXGBE_MAX_RXBUFFER |
2885 | srrctl |= IXGBE_MAX_RXBUFFER >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; | 2869 | srrctl |= IXGBE_MAX_RXBUFFER >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; |
2886 | #else | 2870 | #else |
2887 | srrctl |= ixgbe_rx_bufsz(rx_ring) >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; | 2871 | srrctl |= ixgbe_rx_bufsz(rx_ring) >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; |
2888 | #endif | 2872 | #endif |
2873 | |||
2874 | /* configure descriptor type */ | ||
2889 | srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; | 2875 | srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; |
2890 | 2876 | ||
2891 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_SRRCTL(reg_idx), srrctl); | 2877 | IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(reg_idx), srrctl); |
2892 | } | 2878 | } |
2893 | 2879 | ||
2894 | static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter) | 2880 | static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter) |
@@ -3561,33 +3547,17 @@ void ixgbe_set_rx_mode(struct net_device *netdev) | |||
3561 | static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter) | 3547 | static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter) |
3562 | { | 3548 | { |
3563 | int q_idx; | 3549 | int q_idx; |
3564 | struct ixgbe_q_vector *q_vector; | ||
3565 | int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; | ||
3566 | 3550 | ||
3567 | /* legacy and MSI only use one vector */ | 3551 | for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++) |
3568 | if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) | 3552 | napi_enable(&adapter->q_vector[q_idx]->napi); |
3569 | q_vectors = 1; | ||
3570 | |||
3571 | for (q_idx = 0; q_idx < q_vectors; q_idx++) { | ||
3572 | q_vector = adapter->q_vector[q_idx]; | ||
3573 | napi_enable(&q_vector->napi); | ||
3574 | } | ||
3575 | } | 3553 | } |
3576 | 3554 | ||
3577 | static void ixgbe_napi_disable_all(struct ixgbe_adapter *adapter) | 3555 | static void ixgbe_napi_disable_all(struct ixgbe_adapter *adapter) |
3578 | { | 3556 | { |
3579 | int q_idx; | 3557 | int q_idx; |
3580 | struct ixgbe_q_vector *q_vector; | ||
3581 | int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; | ||
3582 | 3558 | ||
3583 | /* legacy and MSI only use one vector */ | 3559 | for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++) |
3584 | if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) | 3560 | napi_disable(&adapter->q_vector[q_idx]->napi); |
3585 | q_vectors = 1; | ||
3586 | |||
3587 | for (q_idx = 0; q_idx < q_vectors; q_idx++) { | ||
3588 | q_vector = adapter->q_vector[q_idx]; | ||
3589 | napi_disable(&q_vector->napi); | ||
3590 | } | ||
3591 | } | 3561 | } |
3592 | 3562 | ||
3593 | #ifdef CONFIG_IXGBE_DCB | 3563 | #ifdef CONFIG_IXGBE_DCB |
@@ -4410,18 +4380,18 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter) | |||
4410 | 4380 | ||
4411 | /* Set capability flags */ | 4381 | /* Set capability flags */ |
4412 | rss = min_t(int, IXGBE_MAX_RSS_INDICES, num_online_cpus()); | 4382 | rss = min_t(int, IXGBE_MAX_RSS_INDICES, num_online_cpus()); |
4413 | adapter->ring_feature[RING_F_RSS].indices = rss; | 4383 | adapter->ring_feature[RING_F_RSS].limit = rss; |
4414 | adapter->flags |= IXGBE_FLAG_RSS_ENABLED; | 4384 | adapter->flags |= IXGBE_FLAG_RSS_ENABLED; |
4415 | switch (hw->mac.type) { | 4385 | switch (hw->mac.type) { |
4416 | case ixgbe_mac_82598EB: | 4386 | case ixgbe_mac_82598EB: |
4417 | if (hw->device_id == IXGBE_DEV_ID_82598AT) | 4387 | if (hw->device_id == IXGBE_DEV_ID_82598AT) |
4418 | adapter->flags |= IXGBE_FLAG_FAN_FAIL_CAPABLE; | 4388 | adapter->flags |= IXGBE_FLAG_FAN_FAIL_CAPABLE; |
4419 | adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82598; | 4389 | adapter->max_q_vectors = MAX_Q_VECTORS_82598; |
4420 | break; | 4390 | break; |
4421 | case ixgbe_mac_X540: | 4391 | case ixgbe_mac_X540: |
4422 | adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE; | 4392 | adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE; |
4423 | case ixgbe_mac_82599EB: | 4393 | case ixgbe_mac_82599EB: |
4424 | adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82599; | 4394 | adapter->max_q_vectors = MAX_Q_VECTORS_82599; |
4425 | adapter->flags2 |= IXGBE_FLAG2_RSC_CAPABLE; | 4395 | adapter->flags2 |= IXGBE_FLAG2_RSC_CAPABLE; |
4426 | adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED; | 4396 | adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED; |
4427 | if (hw->device_id == IXGBE_DEV_ID_82599_T3_LOM) | 4397 | if (hw->device_id == IXGBE_DEV_ID_82599_T3_LOM) |
@@ -4429,13 +4399,12 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter) | |||
4429 | /* Flow Director hash filters enabled */ | 4399 | /* Flow Director hash filters enabled */ |
4430 | adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE; | 4400 | adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE; |
4431 | adapter->atr_sample_rate = 20; | 4401 | adapter->atr_sample_rate = 20; |
4432 | adapter->ring_feature[RING_F_FDIR].indices = | 4402 | adapter->ring_feature[RING_F_FDIR].limit = |
4433 | IXGBE_MAX_FDIR_INDICES; | 4403 | IXGBE_MAX_FDIR_INDICES; |
4434 | adapter->fdir_pballoc = IXGBE_FDIR_PBALLOC_64K; | 4404 | adapter->fdir_pballoc = IXGBE_FDIR_PBALLOC_64K; |
4435 | #ifdef IXGBE_FCOE | 4405 | #ifdef IXGBE_FCOE |
4436 | adapter->flags |= IXGBE_FLAG_FCOE_CAPABLE; | 4406 | adapter->flags |= IXGBE_FLAG_FCOE_CAPABLE; |
4437 | adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED; | 4407 | adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED; |
4438 | adapter->ring_feature[RING_F_FCOE].indices = 0; | ||
4439 | #ifdef CONFIG_IXGBE_DCB | 4408 | #ifdef CONFIG_IXGBE_DCB |
4440 | /* Default traffic class to use for FCoE */ | 4409 | /* Default traffic class to use for FCoE */ |
4441 | adapter->fcoe.up = IXGBE_FCOE_DEFTC; | 4410 | adapter->fcoe.up = IXGBE_FCOE_DEFTC; |
@@ -5313,7 +5282,7 @@ static void ixgbe_check_hang_subtask(struct ixgbe_adapter *adapter) | |||
5313 | (IXGBE_EICS_TCP_TIMER | IXGBE_EICS_OTHER)); | 5282 | (IXGBE_EICS_TCP_TIMER | IXGBE_EICS_OTHER)); |
5314 | } else { | 5283 | } else { |
5315 | /* get one bit for every active tx/rx interrupt vector */ | 5284 | /* get one bit for every active tx/rx interrupt vector */ |
5316 | for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) { | 5285 | for (i = 0; i < adapter->num_q_vectors; i++) { |
5317 | struct ixgbe_q_vector *qv = adapter->q_vector[i]; | 5286 | struct ixgbe_q_vector *qv = adapter->q_vector[i]; |
5318 | if (qv->rx.ring || qv->tx.ring) | 5287 | if (qv->rx.ring || qv->tx.ring) |
5319 | eics |= ((u64)1 << i); | 5288 | eics |= ((u64)1 << i); |
@@ -6230,8 +6199,14 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb) | |||
6230 | if (((protocol == htons(ETH_P_FCOE)) || | 6199 | if (((protocol == htons(ETH_P_FCOE)) || |
6231 | (protocol == htons(ETH_P_FIP))) && | 6200 | (protocol == htons(ETH_P_FIP))) && |
6232 | (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) { | 6201 | (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) { |
6233 | txq &= (adapter->ring_feature[RING_F_FCOE].indices - 1); | 6202 | struct ixgbe_ring_feature *f; |
6234 | txq += adapter->ring_feature[RING_F_FCOE].mask; | 6203 | |
6204 | f = &adapter->ring_feature[RING_F_FCOE]; | ||
6205 | |||
6206 | while (txq >= f->indices) | ||
6207 | txq -= f->indices; | ||
6208 | txq += adapter->ring_feature[RING_F_FCOE].offset; | ||
6209 | |||
6235 | return txq; | 6210 | return txq; |
6236 | } | 6211 | } |
6237 | #endif | 6212 | #endif |
@@ -6525,11 +6500,8 @@ static void ixgbe_netpoll(struct net_device *netdev) | |||
6525 | 6500 | ||
6526 | adapter->flags |= IXGBE_FLAG_IN_NETPOLL; | 6501 | adapter->flags |= IXGBE_FLAG_IN_NETPOLL; |
6527 | if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { | 6502 | if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { |
6528 | int num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; | 6503 | for (i = 0; i < adapter->num_q_vectors; i++) |
6529 | for (i = 0; i < num_q_vectors; i++) { | 6504 | ixgbe_msix_clean_rings(0, adapter->q_vector[i]); |
6530 | struct ixgbe_q_vector *q_vector = adapter->q_vector[i]; | ||
6531 | ixgbe_msix_clean_rings(0, q_vector); | ||
6532 | } | ||
6533 | } else { | 6505 | } else { |
6534 | ixgbe_intr(adapter->pdev->irq, netdev); | 6506 | ixgbe_intr(adapter->pdev->irq, netdev); |
6535 | } | 6507 | } |