aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2011-07-22 02:18:00 -0400
committerDavid S. Miller <davem@davemloft.net>2011-07-22 02:18:00 -0400
commit97c7b1798f3de24315f0a3a7abcc7cf5de3285b9 (patch)
tree9d35320f2cd153359811980a4070eae766ce6646
parente933d0198d399842f075c2c8af0f38630e7e4bee (diff)
parent082757afcf7d6e44b24c4927ce5b158196d63e84 (diff)
Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/jkirsher/net-next-2.6
-rw-r--r--drivers/net/e1000/e1000_main.c11
-rw-r--r--drivers/net/igb/e1000_defines.h10
-rw-r--r--drivers/net/igb/igb_ethtool.c33
-rw-r--r--drivers/net/ixgbe/ixgbe.h50
-rw-r--r--drivers/net/ixgbe/ixgbe_ethtool.c200
-rw-r--r--drivers/net/ixgbe/ixgbe_fcoe.c35
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c480
7 files changed, 338 insertions, 481 deletions
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index acaebecf0ca7..f97afda941d7 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -2402,13 +2402,16 @@ bool e1000_has_link(struct e1000_adapter *adapter)
2402 struct e1000_hw *hw = &adapter->hw; 2402 struct e1000_hw *hw = &adapter->hw;
2403 bool link_active = false; 2403 bool link_active = false;
2404 2404
2405 /* get_link_status is set on LSC (link status) interrupt or 2405 /* get_link_status is set on LSC (link status) interrupt or rx
2406 * rx sequence error interrupt. get_link_status will stay 2406 * sequence error interrupt (except on intel ce4100).
2407 * false until the e1000_check_for_link establishes link 2407 * get_link_status will stay false until the
2408 * for copper adapters ONLY 2408 * e1000_check_for_link establishes link for copper adapters
2409 * ONLY
2409 */ 2410 */
2410 switch (hw->media_type) { 2411 switch (hw->media_type) {
2411 case e1000_media_type_copper: 2412 case e1000_media_type_copper:
2413 if (hw->mac_type == e1000_ce4100)
2414 hw->get_link_status = 1;
2412 if (hw->get_link_status) { 2415 if (hw->get_link_status) {
2413 e1000_check_for_link(hw); 2416 e1000_check_for_link(hw);
2414 link_active = !hw->get_link_status; 2417 link_active = !hw->get_link_status;
diff --git a/drivers/net/igb/e1000_defines.h b/drivers/net/igb/e1000_defines.h
index 2cd4082c86ca..7b8ddd830f19 100644
--- a/drivers/net/igb/e1000_defines.h
+++ b/drivers/net/igb/e1000_defines.h
@@ -512,6 +512,16 @@
512#define E1000_GCR_CMPL_TMOUT_RESEND 0x00010000 512#define E1000_GCR_CMPL_TMOUT_RESEND 0x00010000
513#define E1000_GCR_CAP_VER2 0x00040000 513#define E1000_GCR_CAP_VER2 0x00040000
514 514
515/* mPHY Address Control and Data Registers */
516#define E1000_MPHY_ADDR_CTL 0x0024 /* mPHY Address Control Register */
517#define E1000_MPHY_ADDR_CTL_OFFSET_MASK 0xFFFF0000
518#define E1000_MPHY_DATA 0x0E10 /* mPHY Data Register */
519
520/* mPHY PCS CLK Register */
521#define E1000_MPHY_PCS_CLK_REG_OFFSET 0x0004 /* mPHY PCS CLK AFE CSR Offset */
522/* mPHY Near End Digital Loopback Override Bit */
523#define E1000_MPHY_PCS_CLK_REG_DIGINELBEN 0x10
524
515/* PHY Control Register */ 525/* PHY Control Register */
516#define MII_CR_FULL_DUPLEX 0x0100 /* FDX =1, half duplex =0 */ 526#define MII_CR_FULL_DUPLEX 0x0100 /* FDX =1, half duplex =0 */
517#define MII_CR_RESTART_AUTO_NEG 0x0200 /* Restart auto negotiation */ 527#define MII_CR_RESTART_AUTO_NEG 0x0200 /* Restart auto negotiation */
diff --git a/drivers/net/igb/igb_ethtool.c b/drivers/net/igb/igb_ethtool.c
index ed63ff4cf6d6..ff244ce803ce 100644
--- a/drivers/net/igb/igb_ethtool.c
+++ b/drivers/net/igb/igb_ethtool.c
@@ -1461,6 +1461,22 @@ static int igb_setup_loopback_test(struct igb_adapter *adapter)
1461 1461
1462 /* use CTRL_EXT to identify link type as SGMII can appear as copper */ 1462 /* use CTRL_EXT to identify link type as SGMII can appear as copper */
1463 if (reg & E1000_CTRL_EXT_LINK_MODE_MASK) { 1463 if (reg & E1000_CTRL_EXT_LINK_MODE_MASK) {
1464 if ((hw->device_id == E1000_DEV_ID_DH89XXCC_SGMII) ||
1465 (hw->device_id == E1000_DEV_ID_DH89XXCC_SERDES) ||
1466 (hw->device_id == E1000_DEV_ID_DH89XXCC_BACKPLANE) ||
1467 (hw->device_id == E1000_DEV_ID_DH89XXCC_SFP)) {
1468
1469 /* Enable DH89xxCC MPHY for near end loopback */
1470 reg = rd32(E1000_MPHY_ADDR_CTL);
1471 reg = (reg & E1000_MPHY_ADDR_CTL_OFFSET_MASK) |
1472 E1000_MPHY_PCS_CLK_REG_OFFSET;
1473 wr32(E1000_MPHY_ADDR_CTL, reg);
1474
1475 reg = rd32(E1000_MPHY_DATA);
1476 reg |= E1000_MPHY_PCS_CLK_REG_DIGINELBEN;
1477 wr32(E1000_MPHY_DATA, reg);
1478 }
1479
1464 reg = rd32(E1000_RCTL); 1480 reg = rd32(E1000_RCTL);
1465 reg |= E1000_RCTL_LBM_TCVR; 1481 reg |= E1000_RCTL_LBM_TCVR;
1466 wr32(E1000_RCTL, reg); 1482 wr32(E1000_RCTL, reg);
@@ -1502,6 +1518,23 @@ static void igb_loopback_cleanup(struct igb_adapter *adapter)
1502 u32 rctl; 1518 u32 rctl;
1503 u16 phy_reg; 1519 u16 phy_reg;
1504 1520
1521 if ((hw->device_id == E1000_DEV_ID_DH89XXCC_SGMII) ||
1522 (hw->device_id == E1000_DEV_ID_DH89XXCC_SERDES) ||
1523 (hw->device_id == E1000_DEV_ID_DH89XXCC_BACKPLANE) ||
1524 (hw->device_id == E1000_DEV_ID_DH89XXCC_SFP)) {
1525 u32 reg;
1526
1527 /* Disable near end loopback on DH89xxCC */
1528 reg = rd32(E1000_MPHY_ADDR_CTL);
1529 reg = (reg & E1000_MPHY_ADDR_CTL_OFFSET_MASK) |
1530 E1000_MPHY_PCS_CLK_REG_OFFSET;
1531 wr32(E1000_MPHY_ADDR_CTL, reg);
1532
1533 reg = rd32(E1000_MPHY_DATA);
1534 reg &= ~E1000_MPHY_PCS_CLK_REG_DIGINELBEN;
1535 wr32(E1000_MPHY_DATA, reg);
1536 }
1537
1505 rctl = rd32(E1000_RCTL); 1538 rctl = rd32(E1000_RCTL);
1506 rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC); 1539 rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC);
1507 wr32(E1000_RCTL, rctl); 1540 wr32(E1000_RCTL, rctl);
diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h
index 744b64108130..e04a8e49e6dc 100644
--- a/drivers/net/ixgbe/ixgbe.h
+++ b/drivers/net/ixgbe/ixgbe.h
@@ -214,12 +214,10 @@ struct ixgbe_ring {
214 struct ixgbe_rx_buffer *rx_buffer_info; 214 struct ixgbe_rx_buffer *rx_buffer_info;
215 }; 215 };
216 unsigned long state; 216 unsigned long state;
217 u8 atr_sample_rate; 217 u8 __iomem *tail;
218 u8 atr_count; 218
219 u16 count; /* amount of descriptors */ 219 u16 count; /* amount of descriptors */
220 u16 rx_buf_len; 220 u16 rx_buf_len;
221 u16 next_to_use;
222 u16 next_to_clean;
223 221
224 u8 queue_index; /* needed for multiqueue queue management */ 222 u8 queue_index; /* needed for multiqueue queue management */
225 u8 reg_idx; /* holds the special value that gets 223 u8 reg_idx; /* holds the special value that gets
@@ -227,15 +225,13 @@ struct ixgbe_ring {
227 * associated with this ring, which is 225 * associated with this ring, which is
228 * different for DCB and RSS modes 226 * different for DCB and RSS modes
229 */ 227 */
230 u8 dcb_tc; 228 u8 atr_sample_rate;
231 229 u8 atr_count;
232 u16 work_limit; /* max work per interrupt */
233
234 u8 __iomem *tail;
235 230
236 unsigned int total_bytes; 231 u16 next_to_use;
237 unsigned int total_packets; 232 u16 next_to_clean;
238 233
234 u8 dcb_tc;
239 struct ixgbe_queue_stats stats; 235 struct ixgbe_queue_stats stats;
240 struct u64_stats_sync syncp; 236 struct u64_stats_sync syncp;
241 union { 237 union {
@@ -277,6 +273,18 @@ struct ixgbe_ring_feature {
277 int mask; 273 int mask;
278} ____cacheline_internodealigned_in_smp; 274} ____cacheline_internodealigned_in_smp;
279 275
276struct ixgbe_ring_container {
277#if MAX_RX_QUEUES > MAX_TX_QUEUES
278 DECLARE_BITMAP(idx, MAX_RX_QUEUES);
279#else
280 DECLARE_BITMAP(idx, MAX_TX_QUEUES);
281#endif
282 unsigned int total_bytes; /* total bytes processed this int */
283 unsigned int total_packets; /* total packets processed this int */
284 u16 work_limit; /* total work allowed per interrupt */
285 u8 count; /* total number of rings in vector */
286 u8 itr; /* current ITR setting for ring */
287};
280 288
281#define MAX_RX_PACKET_BUFFERS ((adapter->flags & IXGBE_FLAG_DCB_ENABLED) \ 289#define MAX_RX_PACKET_BUFFERS ((adapter->flags & IXGBE_FLAG_DCB_ENABLED) \
282 ? 8 : 1) 290 ? 8 : 1)
@@ -294,12 +302,7 @@ struct ixgbe_q_vector {
294 int cpu; /* CPU for DCA */ 302 int cpu; /* CPU for DCA */
295#endif 303#endif
296 struct napi_struct napi; 304 struct napi_struct napi;
297 DECLARE_BITMAP(rxr_idx, MAX_RX_QUEUES); /* Rx ring indices */ 305 struct ixgbe_ring_container rx, tx;
298 DECLARE_BITMAP(txr_idx, MAX_TX_QUEUES); /* Tx ring indices */
299 u8 rxr_count; /* Rx ring count assigned to this vector */
300 u8 txr_count; /* Tx ring count assigned to this vector */
301 u8 tx_itr;
302 u8 rx_itr;
303 u32 eitr; 306 u32 eitr;
304 cpumask_var_t affinity_mask; 307 cpumask_var_t affinity_mask;
305 char name[IFNAMSIZ + 9]; 308 char name[IFNAMSIZ + 9];
@@ -413,6 +416,9 @@ struct ixgbe_adapter {
413 u16 eitr_low; 416 u16 eitr_low;
414 u16 eitr_high; 417 u16 eitr_high;
415 418
419 /* Work limits */
420 u16 tx_work_limit;
421
416 /* TX */ 422 /* TX */
417 struct ixgbe_ring *tx_ring[MAX_TX_QUEUES] ____cacheline_aligned_in_smp; 423 struct ixgbe_ring *tx_ring[MAX_TX_QUEUES] ____cacheline_aligned_in_smp;
418 int num_tx_queues; 424 int num_tx_queues;
@@ -581,21 +587,19 @@ extern s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw,
581 u16 soft_id); 587 u16 soft_id);
582extern void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input, 588extern void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
583 union ixgbe_atr_input *mask); 589 union ixgbe_atr_input *mask);
584extern void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter,
585 struct ixgbe_ring *ring);
586extern void ixgbe_clear_rscctl(struct ixgbe_adapter *adapter,
587 struct ixgbe_ring *ring);
588extern void ixgbe_set_rx_mode(struct net_device *netdev); 590extern void ixgbe_set_rx_mode(struct net_device *netdev);
589extern int ixgbe_setup_tc(struct net_device *dev, u8 tc); 591extern int ixgbe_setup_tc(struct net_device *dev, u8 tc);
590extern void ixgbe_tx_ctxtdesc(struct ixgbe_ring *, u32, u32, u32, u32); 592extern void ixgbe_tx_ctxtdesc(struct ixgbe_ring *, u32, u32, u32, u32);
593extern void ixgbe_do_reset(struct net_device *netdev);
591#ifdef IXGBE_FCOE 594#ifdef IXGBE_FCOE
592extern void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter); 595extern void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter);
593extern int ixgbe_fso(struct ixgbe_ring *tx_ring, struct sk_buff *skb, 596extern int ixgbe_fso(struct ixgbe_ring *tx_ring, struct sk_buff *skb,
594 u32 tx_flags, u8 *hdr_len); 597 u32 tx_flags, u8 *hdr_len);
595extern void ixgbe_cleanup_fcoe(struct ixgbe_adapter *adapter); 598extern void ixgbe_cleanup_fcoe(struct ixgbe_adapter *adapter);
596extern int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter, 599extern int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter,
597 union ixgbe_adv_rx_desc *rx_desc, 600 union ixgbe_adv_rx_desc *rx_desc,
598 struct sk_buff *skb); 601 struct sk_buff *skb,
602 u32 staterr);
599extern int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid, 603extern int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid,
600 struct scatterlist *sgl, unsigned int sgc); 604 struct scatterlist *sgl, unsigned int sgc);
601extern int ixgbe_fcoe_ddp_target(struct net_device *netdev, u16 xid, 605extern int ixgbe_fcoe_ddp_target(struct net_device *netdev, u16 xid,
diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c
index 074e9baf069a..dc649553a0a6 100644
--- a/drivers/net/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ixgbe/ixgbe_ethtool.c
@@ -442,109 +442,6 @@ static int ixgbe_set_pauseparam(struct net_device *netdev,
442 return 0; 442 return 0;
443} 443}
444 444
445static void ixgbe_do_reset(struct net_device *netdev)
446{
447 struct ixgbe_adapter *adapter = netdev_priv(netdev);
448
449 if (netif_running(netdev))
450 ixgbe_reinit_locked(adapter);
451 else
452 ixgbe_reset(adapter);
453}
454
455static u32 ixgbe_get_rx_csum(struct net_device *netdev)
456{
457 struct ixgbe_adapter *adapter = netdev_priv(netdev);
458 return adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED;
459}
460
461static void ixgbe_set_rsc(struct ixgbe_adapter *adapter)
462{
463 int i;
464
465 for (i = 0; i < adapter->num_rx_queues; i++) {
466 struct ixgbe_ring *ring = adapter->rx_ring[i];
467 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
468 set_ring_rsc_enabled(ring);
469 ixgbe_configure_rscctl(adapter, ring);
470 } else {
471 ixgbe_clear_rscctl(adapter, ring);
472 }
473 }
474}
475
476static int ixgbe_set_rx_csum(struct net_device *netdev, u32 data)
477{
478 struct ixgbe_adapter *adapter = netdev_priv(netdev);
479 bool need_reset = false;
480
481 if (data) {
482 adapter->flags |= IXGBE_FLAG_RX_CSUM_ENABLED;
483 } else {
484 adapter->flags &= ~IXGBE_FLAG_RX_CSUM_ENABLED;
485
486 if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) {
487 adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED;
488 netdev->features &= ~NETIF_F_LRO;
489 }
490
491 switch (adapter->hw.mac.type) {
492 case ixgbe_mac_X540:
493 ixgbe_set_rsc(adapter);
494 break;
495 case ixgbe_mac_82599EB:
496 need_reset = true;
497 break;
498 default:
499 break;
500 }
501 }
502
503 if (need_reset)
504 ixgbe_do_reset(netdev);
505
506 return 0;
507}
508
509static u32 ixgbe_get_tx_csum(struct net_device *netdev)
510{
511 return (netdev->features & NETIF_F_IP_CSUM) != 0;
512}
513
514static int ixgbe_set_tx_csum(struct net_device *netdev, u32 data)
515{
516 struct ixgbe_adapter *adapter = netdev_priv(netdev);
517 u32 feature_list;
518
519 feature_list = (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
520 switch (adapter->hw.mac.type) {
521 case ixgbe_mac_82599EB:
522 case ixgbe_mac_X540:
523 feature_list |= NETIF_F_SCTP_CSUM;
524 break;
525 default:
526 break;
527 }
528 if (data)
529 netdev->features |= feature_list;
530 else
531 netdev->features &= ~feature_list;
532
533 return 0;
534}
535
536static int ixgbe_set_tso(struct net_device *netdev, u32 data)
537{
538 if (data) {
539 netdev->features |= NETIF_F_TSO;
540 netdev->features |= NETIF_F_TSO6;
541 } else {
542 netdev->features &= ~NETIF_F_TSO;
543 netdev->features &= ~NETIF_F_TSO6;
544 }
545 return 0;
546}
547
548static u32 ixgbe_get_msglevel(struct net_device *netdev) 445static u32 ixgbe_get_msglevel(struct net_device *netdev)
549{ 446{
550 struct ixgbe_adapter *adapter = netdev_priv(netdev); 447 struct ixgbe_adapter *adapter = netdev_priv(netdev);
@@ -2103,7 +2000,7 @@ static int ixgbe_get_coalesce(struct net_device *netdev,
2103{ 2000{
2104 struct ixgbe_adapter *adapter = netdev_priv(netdev); 2001 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2105 2002
2106 ec->tx_max_coalesced_frames_irq = adapter->tx_ring[0]->work_limit; 2003 ec->tx_max_coalesced_frames_irq = adapter->tx_work_limit;
2107 2004
2108 /* only valid if in constant ITR mode */ 2005 /* only valid if in constant ITR mode */
2109 switch (adapter->rx_itr_setting) { 2006 switch (adapter->rx_itr_setting) {
@@ -2122,7 +2019,7 @@ static int ixgbe_get_coalesce(struct net_device *netdev,
2122 } 2019 }
2123 2020
2124 /* if in mixed tx/rx queues per vector mode, report only rx settings */ 2021 /* if in mixed tx/rx queues per vector mode, report only rx settings */
2125 if (adapter->q_vector[0]->txr_count && adapter->q_vector[0]->rxr_count) 2022 if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count)
2126 return 0; 2023 return 0;
2127 2024
2128 /* only valid if in constant ITR mode */ 2025 /* only valid if in constant ITR mode */
@@ -2187,12 +2084,12 @@ static int ixgbe_set_coalesce(struct net_device *netdev,
2187 bool need_reset = false; 2084 bool need_reset = false;
2188 2085
2189 /* don't accept tx specific changes if we've got mixed RxTx vectors */ 2086 /* don't accept tx specific changes if we've got mixed RxTx vectors */
2190 if (adapter->q_vector[0]->txr_count && adapter->q_vector[0]->rxr_count 2087 if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count
2191 && ec->tx_coalesce_usecs) 2088 && ec->tx_coalesce_usecs)
2192 return -EINVAL; 2089 return -EINVAL;
2193 2090
2194 if (ec->tx_max_coalesced_frames_irq) 2091 if (ec->tx_max_coalesced_frames_irq)
2195 adapter->tx_ring[0]->work_limit = ec->tx_max_coalesced_frames_irq; 2092 adapter->tx_work_limit = ec->tx_max_coalesced_frames_irq;
2196 2093
2197 if (ec->rx_coalesce_usecs > 1) { 2094 if (ec->rx_coalesce_usecs > 1) {
2198 /* check the limits */ 2095 /* check the limits */
@@ -2261,18 +2158,20 @@ static int ixgbe_set_coalesce(struct net_device *netdev,
2261 int num_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; 2158 int num_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
2262 for (i = 0; i < num_vectors; i++) { 2159 for (i = 0; i < num_vectors; i++) {
2263 q_vector = adapter->q_vector[i]; 2160 q_vector = adapter->q_vector[i];
2264 if (q_vector->txr_count && !q_vector->rxr_count) 2161 if (q_vector->tx.count && !q_vector->rx.count)
2265 /* tx only */ 2162 /* tx only */
2266 q_vector->eitr = adapter->tx_eitr_param; 2163 q_vector->eitr = adapter->tx_eitr_param;
2267 else 2164 else
2268 /* rx only or mixed */ 2165 /* rx only or mixed */
2269 q_vector->eitr = adapter->rx_eitr_param; 2166 q_vector->eitr = adapter->rx_eitr_param;
2167 q_vector->tx.work_limit = adapter->tx_work_limit;
2270 ixgbe_write_eitr(q_vector); 2168 ixgbe_write_eitr(q_vector);
2271 } 2169 }
2272 /* Legacy Interrupt Mode */ 2170 /* Legacy Interrupt Mode */
2273 } else { 2171 } else {
2274 q_vector = adapter->q_vector[0]; 2172 q_vector = adapter->q_vector[0];
2275 q_vector->eitr = adapter->rx_eitr_param; 2173 q_vector->eitr = adapter->rx_eitr_param;
2174 q_vector->tx.work_limit = adapter->tx_work_limit;
2276 ixgbe_write_eitr(q_vector); 2175 ixgbe_write_eitr(q_vector);
2277 } 2176 }
2278 2177
@@ -2287,81 +2186,6 @@ static int ixgbe_set_coalesce(struct net_device *netdev,
2287 return 0; 2186 return 0;
2288} 2187}
2289 2188
2290static int ixgbe_set_flags(struct net_device *netdev, u32 data)
2291{
2292 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2293 bool need_reset = false;
2294 int rc;
2295
2296#ifdef CONFIG_IXGBE_DCB
2297 if ((adapter->flags & IXGBE_FLAG_DCB_ENABLED) &&
2298 !(data & ETH_FLAG_RXVLAN))
2299 return -EINVAL;
2300#endif
2301
2302 need_reset = (data & ETH_FLAG_RXVLAN) !=
2303 (netdev->features & NETIF_F_HW_VLAN_RX);
2304
2305 if ((data & ETH_FLAG_RXHASH) &&
2306 !(adapter->flags & IXGBE_FLAG_RSS_ENABLED))
2307 return -EOPNOTSUPP;
2308
2309 rc = ethtool_op_set_flags(netdev, data, ETH_FLAG_LRO | ETH_FLAG_NTUPLE |
2310 ETH_FLAG_RXVLAN | ETH_FLAG_TXVLAN |
2311 ETH_FLAG_RXHASH);
2312 if (rc)
2313 return rc;
2314
2315 /* if state changes we need to update adapter->flags and reset */
2316 if ((adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) &&
2317 (!!(data & ETH_FLAG_LRO) !=
2318 !!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED))) {
2319 if ((data & ETH_FLAG_LRO) &&
2320 (!adapter->rx_itr_setting ||
2321 (adapter->rx_itr_setting > IXGBE_MAX_RSC_INT_RATE))) {
2322 e_info(probe, "rx-usecs set too low, "
2323 "not enabling RSC.\n");
2324 } else {
2325 adapter->flags2 ^= IXGBE_FLAG2_RSC_ENABLED;
2326 switch (adapter->hw.mac.type) {
2327 case ixgbe_mac_X540:
2328 ixgbe_set_rsc(adapter);
2329 break;
2330 case ixgbe_mac_82599EB:
2331 need_reset = true;
2332 break;
2333 default:
2334 break;
2335 }
2336 }
2337 }
2338
2339 /*
2340 * Check if Flow Director n-tuple support was enabled or disabled. If
2341 * the state changed, we need to reset.
2342 */
2343 if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) {
2344 /* turn off ATR, enable perfect filters and reset */
2345 if (data & ETH_FLAG_NTUPLE) {
2346 adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
2347 adapter->flags |= IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
2348 need_reset = true;
2349 }
2350 } else if (!(data & ETH_FLAG_NTUPLE)) {
2351 /* turn off Flow Director, set ATR and reset */
2352 adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
2353 if ((adapter->flags & IXGBE_FLAG_RSS_ENABLED) &&
2354 !(adapter->flags & IXGBE_FLAG_DCB_ENABLED))
2355 adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
2356 need_reset = true;
2357 }
2358
2359 if (need_reset)
2360 ixgbe_do_reset(netdev);
2361
2362 return 0;
2363}
2364
2365static int ixgbe_get_ethtool_fdir_entry(struct ixgbe_adapter *adapter, 2189static int ixgbe_get_ethtool_fdir_entry(struct ixgbe_adapter *adapter,
2366 struct ethtool_rxnfc *cmd) 2190 struct ethtool_rxnfc *cmd)
2367{ 2191{
@@ -2744,16 +2568,8 @@ static const struct ethtool_ops ixgbe_ethtool_ops = {
2744 .set_ringparam = ixgbe_set_ringparam, 2568 .set_ringparam = ixgbe_set_ringparam,
2745 .get_pauseparam = ixgbe_get_pauseparam, 2569 .get_pauseparam = ixgbe_get_pauseparam,
2746 .set_pauseparam = ixgbe_set_pauseparam, 2570 .set_pauseparam = ixgbe_set_pauseparam,
2747 .get_rx_csum = ixgbe_get_rx_csum,
2748 .set_rx_csum = ixgbe_set_rx_csum,
2749 .get_tx_csum = ixgbe_get_tx_csum,
2750 .set_tx_csum = ixgbe_set_tx_csum,
2751 .get_sg = ethtool_op_get_sg,
2752 .set_sg = ethtool_op_set_sg,
2753 .get_msglevel = ixgbe_get_msglevel, 2571 .get_msglevel = ixgbe_get_msglevel,
2754 .set_msglevel = ixgbe_set_msglevel, 2572 .set_msglevel = ixgbe_set_msglevel,
2755 .get_tso = ethtool_op_get_tso,
2756 .set_tso = ixgbe_set_tso,
2757 .self_test = ixgbe_diag_test, 2573 .self_test = ixgbe_diag_test,
2758 .get_strings = ixgbe_get_strings, 2574 .get_strings = ixgbe_get_strings,
2759 .set_phys_id = ixgbe_set_phys_id, 2575 .set_phys_id = ixgbe_set_phys_id,
@@ -2761,8 +2577,6 @@ static const struct ethtool_ops ixgbe_ethtool_ops = {
2761 .get_ethtool_stats = ixgbe_get_ethtool_stats, 2577 .get_ethtool_stats = ixgbe_get_ethtool_stats,
2762 .get_coalesce = ixgbe_get_coalesce, 2578 .get_coalesce = ixgbe_get_coalesce,
2763 .set_coalesce = ixgbe_set_coalesce, 2579 .set_coalesce = ixgbe_set_coalesce,
2764 .get_flags = ethtool_op_get_flags,
2765 .set_flags = ixgbe_set_flags,
2766 .get_rxnfc = ixgbe_get_rxnfc, 2580 .get_rxnfc = ixgbe_get_rxnfc,
2767 .set_rxnfc = ixgbe_set_rxnfc, 2581 .set_rxnfc = ixgbe_set_rxnfc,
2768}; 2582};
diff --git a/drivers/net/ixgbe/ixgbe_fcoe.c b/drivers/net/ixgbe/ixgbe_fcoe.c
index f0c1018bbf31..824edae77865 100644
--- a/drivers/net/ixgbe/ixgbe_fcoe.c
+++ b/drivers/net/ixgbe/ixgbe_fcoe.c
@@ -37,25 +37,6 @@
37#include <scsi/libfcoe.h> 37#include <scsi/libfcoe.h>
38 38
39/** 39/**
40 * ixgbe_rx_is_fcoe - check the rx desc for incoming pkt type
41 * @rx_desc: advanced rx descriptor
42 *
43 * Returns : true if it is FCoE pkt
44 */
45static inline bool ixgbe_rx_is_fcoe(union ixgbe_adv_rx_desc *rx_desc)
46{
47 u16 p;
48
49 p = le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.pkt_info);
50 if (p & IXGBE_RXDADV_PKTTYPE_ETQF) {
51 p &= IXGBE_RXDADV_PKTTYPE_ETQF_MASK;
52 p >>= IXGBE_RXDADV_PKTTYPE_ETQF_SHIFT;
53 return p == IXGBE_ETQF_FILTER_FCOE;
54 }
55 return false;
56}
57
58/**
59 * ixgbe_fcoe_clear_ddp - clear the given ddp context 40 * ixgbe_fcoe_clear_ddp - clear the given ddp context
60 * @ddp - ptr to the ixgbe_fcoe_ddp 41 * @ddp - ptr to the ixgbe_fcoe_ddp
61 * 42 *
@@ -136,7 +117,6 @@ out_ddp_put:
136 return len; 117 return len;
137} 118}
138 119
139
140/** 120/**
141 * ixgbe_fcoe_ddp_setup - called to set up ddp context 121 * ixgbe_fcoe_ddp_setup - called to set up ddp context
142 * @netdev: the corresponding net_device 122 * @netdev: the corresponding net_device
@@ -380,23 +360,20 @@ int ixgbe_fcoe_ddp_target(struct net_device *netdev, u16 xid,
380 */ 360 */
381int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter, 361int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter,
382 union ixgbe_adv_rx_desc *rx_desc, 362 union ixgbe_adv_rx_desc *rx_desc,
383 struct sk_buff *skb) 363 struct sk_buff *skb,
364 u32 staterr)
384{ 365{
385 u16 xid; 366 u16 xid;
386 u32 fctl; 367 u32 fctl;
387 u32 sterr, fceofe, fcerr, fcstat; 368 u32 fceofe, fcerr, fcstat;
388 int rc = -EINVAL; 369 int rc = -EINVAL;
389 struct ixgbe_fcoe *fcoe; 370 struct ixgbe_fcoe *fcoe;
390 struct ixgbe_fcoe_ddp *ddp; 371 struct ixgbe_fcoe_ddp *ddp;
391 struct fc_frame_header *fh; 372 struct fc_frame_header *fh;
392 struct fcoe_crc_eof *crc; 373 struct fcoe_crc_eof *crc;
393 374
394 if (!ixgbe_rx_is_fcoe(rx_desc)) 375 fcerr = (staterr & IXGBE_RXDADV_ERR_FCERR);
395 goto ddp_out; 376 fceofe = (staterr & IXGBE_RXDADV_ERR_FCEOFE);
396
397 sterr = le32_to_cpu(rx_desc->wb.upper.status_error);
398 fcerr = (sterr & IXGBE_RXDADV_ERR_FCERR);
399 fceofe = (sterr & IXGBE_RXDADV_ERR_FCEOFE);
400 if (fcerr == IXGBE_FCERR_BADCRC) 377 if (fcerr == IXGBE_FCERR_BADCRC)
401 skb_checksum_none_assert(skb); 378 skb_checksum_none_assert(skb);
402 else 379 else
@@ -425,7 +402,7 @@ int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter,
425 if (fcerr | fceofe) 402 if (fcerr | fceofe)
426 goto ddp_out; 403 goto ddp_out;
427 404
428 fcstat = (sterr & IXGBE_RXDADV_STAT_FCSTAT); 405 fcstat = (staterr & IXGBE_RXDADV_STAT_FCSTAT);
429 if (fcstat) { 406 if (fcstat) {
430 /* update length of DDPed data */ 407 /* update length of DDPed data */
431 ddp->len = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss); 408 ddp->len = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss);
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index de307965dfee..1be617545dc9 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -805,7 +805,7 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
805 eop_desc = IXGBE_TX_DESC_ADV(tx_ring, eop); 805 eop_desc = IXGBE_TX_DESC_ADV(tx_ring, eop);
806 806
807 while ((eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)) && 807 while ((eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)) &&
808 (count < tx_ring->work_limit)) { 808 (count < q_vector->tx.work_limit)) {
809 bool cleaned = false; 809 bool cleaned = false;
810 rmb(); /* read buffer_info after eop_desc */ 810 rmb(); /* read buffer_info after eop_desc */
811 for ( ; !cleaned; count++) { 811 for ( ; !cleaned; count++) {
@@ -834,11 +834,11 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
834 } 834 }
835 835
836 tx_ring->next_to_clean = i; 836 tx_ring->next_to_clean = i;
837 tx_ring->total_bytes += total_bytes;
838 tx_ring->total_packets += total_packets;
839 u64_stats_update_begin(&tx_ring->syncp);
840 tx_ring->stats.packets += total_packets;
841 tx_ring->stats.bytes += total_bytes; 837 tx_ring->stats.bytes += total_bytes;
838 tx_ring->stats.packets += total_packets;
839 u64_stats_update_begin(&tx_ring->syncp);
840 q_vector->tx.total_bytes += total_bytes;
841 q_vector->tx.total_packets += total_packets;
842 u64_stats_update_end(&tx_ring->syncp); 842 u64_stats_update_end(&tx_ring->syncp);
843 843
844 if (check_for_tx_hang(tx_ring) && ixgbe_check_tx_hang(tx_ring)) { 844 if (check_for_tx_hang(tx_ring) && ixgbe_check_tx_hang(tx_ring)) {
@@ -886,7 +886,7 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
886 } 886 }
887 } 887 }
888 888
889 return count < tx_ring->work_limit; 889 return count < q_vector->tx.work_limit;
890} 890}
891 891
892#ifdef CONFIG_IXGBE_DCA 892#ifdef CONFIG_IXGBE_DCA
@@ -959,17 +959,17 @@ static void ixgbe_update_dca(struct ixgbe_q_vector *q_vector)
959 if (q_vector->cpu == cpu) 959 if (q_vector->cpu == cpu)
960 goto out_no_update; 960 goto out_no_update;
961 961
962 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues); 962 r_idx = find_first_bit(q_vector->tx.idx, adapter->num_tx_queues);
963 for (i = 0; i < q_vector->txr_count; i++) { 963 for (i = 0; i < q_vector->tx.count; i++) {
964 ixgbe_update_tx_dca(adapter, adapter->tx_ring[r_idx], cpu); 964 ixgbe_update_tx_dca(adapter, adapter->tx_ring[r_idx], cpu);
965 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues, 965 r_idx = find_next_bit(q_vector->tx.idx, adapter->num_tx_queues,
966 r_idx + 1); 966 r_idx + 1);
967 } 967 }
968 968
969 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); 969 r_idx = find_first_bit(q_vector->rx.idx, adapter->num_rx_queues);
970 for (i = 0; i < q_vector->rxr_count; i++) { 970 for (i = 0; i < q_vector->rx.count; i++) {
971 ixgbe_update_rx_dca(adapter, adapter->rx_ring[r_idx], cpu); 971 ixgbe_update_rx_dca(adapter, adapter->rx_ring[r_idx], cpu);
972 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues, 972 r_idx = find_next_bit(q_vector->rx.idx, adapter->num_rx_queues,
973 r_idx + 1); 973 r_idx + 1);
974 } 974 }
975 975
@@ -1039,6 +1039,24 @@ static inline void ixgbe_rx_hash(union ixgbe_adv_rx_desc *rx_desc,
1039} 1039}
1040 1040
1041/** 1041/**
1042 * ixgbe_rx_is_fcoe - check the rx desc for incoming pkt type
1043 * @adapter: address of board private structure
1044 * @rx_desc: advanced rx descriptor
1045 *
1046 * Returns : true if it is FCoE pkt
1047 */
1048static inline bool ixgbe_rx_is_fcoe(struct ixgbe_adapter *adapter,
1049 union ixgbe_adv_rx_desc *rx_desc)
1050{
1051 __le16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
1052
1053 return (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) &&
1054 ((pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_ETQF_MASK)) ==
1055 (cpu_to_le16(IXGBE_ETQF_FILTER_FCOE <<
1056 IXGBE_RXDADV_PKTTYPE_ETQF_SHIFT)));
1057}
1058
1059/**
1042 * ixgbe_receive_skb - Send a completed packet up the stack 1060 * ixgbe_receive_skb - Send a completed packet up the stack
1043 * @adapter: board private structure 1061 * @adapter: board private structure
1044 * @skb: packet to send up 1062 * @skb: packet to send up
@@ -1070,14 +1088,14 @@ static void ixgbe_receive_skb(struct ixgbe_q_vector *q_vector,
1070 * @adapter: address of board private structure 1088 * @adapter: address of board private structure
1071 * @status_err: hardware indication of status of receive 1089 * @status_err: hardware indication of status of receive
1072 * @skb: skb currently being received and modified 1090 * @skb: skb currently being received and modified
1091 * @status_err: status error value of last descriptor in packet
1073 **/ 1092 **/
1074static inline void ixgbe_rx_checksum(struct ixgbe_adapter *adapter, 1093static inline void ixgbe_rx_checksum(struct ixgbe_adapter *adapter,
1075 union ixgbe_adv_rx_desc *rx_desc, 1094 union ixgbe_adv_rx_desc *rx_desc,
1076 struct sk_buff *skb) 1095 struct sk_buff *skb,
1096 u32 status_err)
1077{ 1097{
1078 u32 status_err = le32_to_cpu(rx_desc->wb.upper.status_error); 1098 skb->ip_summed = CHECKSUM_NONE;
1079
1080 skb_checksum_none_assert(skb);
1081 1099
1082 /* Rx csum disabled */ 1100 /* Rx csum disabled */
1083 if (!(adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED)) 1101 if (!(adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED))
@@ -1421,14 +1439,12 @@ static void ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
1421 } 1439 }
1422 1440
1423 /* ERR_MASK will only have valid bits if EOP set */ 1441 /* ERR_MASK will only have valid bits if EOP set */
1424 if (staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK) { 1442 if (unlikely(staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK)) {
1425 /* trim packet back to size 0 and recycle it */ 1443 dev_kfree_skb_any(skb);
1426 __pskb_trim(skb, 0);
1427 rx_buffer_info->skb = skb;
1428 goto next_desc; 1444 goto next_desc;
1429 } 1445 }
1430 1446
1431 ixgbe_rx_checksum(adapter, rx_desc, skb); 1447 ixgbe_rx_checksum(adapter, rx_desc, skb, staterr);
1432 if (adapter->netdev->features & NETIF_F_RXHASH) 1448 if (adapter->netdev->features & NETIF_F_RXHASH)
1433 ixgbe_rx_hash(rx_desc, skb); 1449 ixgbe_rx_hash(rx_desc, skb);
1434 1450
@@ -1439,8 +1455,9 @@ static void ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
1439 skb->protocol = eth_type_trans(skb, rx_ring->netdev); 1455 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
1440#ifdef IXGBE_FCOE 1456#ifdef IXGBE_FCOE
1441 /* if ddp, not passing to ULD unless for FCP_RSP or error */ 1457 /* if ddp, not passing to ULD unless for FCP_RSP or error */
1442 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) { 1458 if (ixgbe_rx_is_fcoe(adapter, rx_desc)) {
1443 ddp_bytes = ixgbe_fcoe_ddp(adapter, rx_desc, skb); 1459 ddp_bytes = ixgbe_fcoe_ddp(adapter, rx_desc, skb,
1460 staterr);
1444 if (!ddp_bytes) 1461 if (!ddp_bytes)
1445 goto next_desc; 1462 goto next_desc;
1446 } 1463 }
@@ -1486,12 +1503,12 @@ next_desc:
1486 } 1503 }
1487#endif /* IXGBE_FCOE */ 1504#endif /* IXGBE_FCOE */
1488 1505
1489 rx_ring->total_packets += total_rx_packets;
1490 rx_ring->total_bytes += total_rx_bytes;
1491 u64_stats_update_begin(&rx_ring->syncp); 1506 u64_stats_update_begin(&rx_ring->syncp);
1492 rx_ring->stats.packets += total_rx_packets; 1507 rx_ring->stats.packets += total_rx_packets;
1493 rx_ring->stats.bytes += total_rx_bytes; 1508 rx_ring->stats.bytes += total_rx_bytes;
1494 u64_stats_update_end(&rx_ring->syncp); 1509 u64_stats_update_end(&rx_ring->syncp);
1510 q_vector->rx.total_packets += total_rx_packets;
1511 q_vector->rx.total_bytes += total_rx_bytes;
1495} 1512}
1496 1513
1497static int ixgbe_clean_rxonly(struct napi_struct *, int); 1514static int ixgbe_clean_rxonly(struct napi_struct *, int);
@@ -1517,31 +1534,31 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
1517 for (v_idx = 0; v_idx < q_vectors; v_idx++) { 1534 for (v_idx = 0; v_idx < q_vectors; v_idx++) {
1518 q_vector = adapter->q_vector[v_idx]; 1535 q_vector = adapter->q_vector[v_idx];
1519 /* XXX for_each_set_bit(...) */ 1536 /* XXX for_each_set_bit(...) */
1520 r_idx = find_first_bit(q_vector->rxr_idx, 1537 r_idx = find_first_bit(q_vector->rx.idx,
1521 adapter->num_rx_queues); 1538 adapter->num_rx_queues);
1522 1539
1523 for (i = 0; i < q_vector->rxr_count; i++) { 1540 for (i = 0; i < q_vector->rx.count; i++) {
1524 u8 reg_idx = adapter->rx_ring[r_idx]->reg_idx; 1541 u8 reg_idx = adapter->rx_ring[r_idx]->reg_idx;
1525 ixgbe_set_ivar(adapter, 0, reg_idx, v_idx); 1542 ixgbe_set_ivar(adapter, 0, reg_idx, v_idx);
1526 r_idx = find_next_bit(q_vector->rxr_idx, 1543 r_idx = find_next_bit(q_vector->rx.idx,
1527 adapter->num_rx_queues, 1544 adapter->num_rx_queues,
1528 r_idx + 1); 1545 r_idx + 1);
1529 } 1546 }
1530 r_idx = find_first_bit(q_vector->txr_idx, 1547 r_idx = find_first_bit(q_vector->tx.idx,
1531 adapter->num_tx_queues); 1548 adapter->num_tx_queues);
1532 1549
1533 for (i = 0; i < q_vector->txr_count; i++) { 1550 for (i = 0; i < q_vector->tx.count; i++) {
1534 u8 reg_idx = adapter->tx_ring[r_idx]->reg_idx; 1551 u8 reg_idx = adapter->tx_ring[r_idx]->reg_idx;
1535 ixgbe_set_ivar(adapter, 1, reg_idx, v_idx); 1552 ixgbe_set_ivar(adapter, 1, reg_idx, v_idx);
1536 r_idx = find_next_bit(q_vector->txr_idx, 1553 r_idx = find_next_bit(q_vector->tx.idx,
1537 adapter->num_tx_queues, 1554 adapter->num_tx_queues,
1538 r_idx + 1); 1555 r_idx + 1);
1539 } 1556 }
1540 1557
1541 if (q_vector->txr_count && !q_vector->rxr_count) 1558 if (q_vector->tx.count && !q_vector->rx.count)
1542 /* tx only */ 1559 /* tx only */
1543 q_vector->eitr = adapter->tx_eitr_param; 1560 q_vector->eitr = adapter->tx_eitr_param;
1544 else if (q_vector->rxr_count) 1561 else if (q_vector->rx.count)
1545 /* rx or mixed */ 1562 /* rx or mixed */
1546 q_vector->eitr = adapter->rx_eitr_param; 1563 q_vector->eitr = adapter->rx_eitr_param;
1547 1564
@@ -1597,11 +1614,8 @@ enum latency_range {
1597 1614
1598/** 1615/**
1599 * ixgbe_update_itr - update the dynamic ITR value based on statistics 1616 * ixgbe_update_itr - update the dynamic ITR value based on statistics
1600 * @adapter: pointer to adapter 1617 * @q_vector: structure containing interrupt and ring information
1601 * @eitr: eitr setting (ints per sec) to give last timeslice 1618 * @ring_container: structure containing ring performance data
1602 * @itr_setting: current throttle rate in ints/second
1603 * @packets: the number of packets during this measurement interval
1604 * @bytes: the number of bytes during this measurement interval
1605 * 1619 *
1606 * Stores a new ITR value based on packets and byte 1620 * Stores a new ITR value based on packets and byte
1607 * counts during the last interrupt. The advantage of per interrupt 1621 * counts during the last interrupt. The advantage of per interrupt
@@ -1613,17 +1627,18 @@ enum latency_range {
1613 * this functionality is controlled by the InterruptThrottleRate module 1627 * this functionality is controlled by the InterruptThrottleRate module
1614 * parameter (see ixgbe_param.c) 1628 * parameter (see ixgbe_param.c)
1615 **/ 1629 **/
1616static u8 ixgbe_update_itr(struct ixgbe_adapter *adapter, 1630static void ixgbe_update_itr(struct ixgbe_q_vector *q_vector,
1617 u32 eitr, u8 itr_setting, 1631 struct ixgbe_ring_container *ring_container)
1618 int packets, int bytes)
1619{ 1632{
1620 unsigned int retval = itr_setting;
1621 u32 timepassed_us;
1622 u64 bytes_perint; 1633 u64 bytes_perint;
1634 struct ixgbe_adapter *adapter = q_vector->adapter;
1635 int bytes = ring_container->total_bytes;
1636 int packets = ring_container->total_packets;
1637 u32 timepassed_us;
1638 u8 itr_setting = ring_container->itr;
1623 1639
1624 if (packets == 0) 1640 if (packets == 0)
1625 goto update_itr_done; 1641 return;
1626
1627 1642
1628 /* simple throttlerate management 1643 /* simple throttlerate management
1629 * 0-20MB/s lowest (100000 ints/s) 1644 * 0-20MB/s lowest (100000 ints/s)
@@ -1631,28 +1646,32 @@ static u8 ixgbe_update_itr(struct ixgbe_adapter *adapter,
1631 * 100-1249MB/s bulk (8000 ints/s) 1646 * 100-1249MB/s bulk (8000 ints/s)
1632 */ 1647 */
1633 /* what was last interrupt timeslice? */ 1648 /* what was last interrupt timeslice? */
1634 timepassed_us = 1000000/eitr; 1649 timepassed_us = 1000000/q_vector->eitr;
1635 bytes_perint = bytes / timepassed_us; /* bytes/usec */ 1650 bytes_perint = bytes / timepassed_us; /* bytes/usec */
1636 1651
1637 switch (itr_setting) { 1652 switch (itr_setting) {
1638 case lowest_latency: 1653 case lowest_latency:
1639 if (bytes_perint > adapter->eitr_low) 1654 if (bytes_perint > adapter->eitr_low)
1640 retval = low_latency; 1655 itr_setting = low_latency;
1641 break; 1656 break;
1642 case low_latency: 1657 case low_latency:
1643 if (bytes_perint > adapter->eitr_high) 1658 if (bytes_perint > adapter->eitr_high)
1644 retval = bulk_latency; 1659 itr_setting = bulk_latency;
1645 else if (bytes_perint <= adapter->eitr_low) 1660 else if (bytes_perint <= adapter->eitr_low)
1646 retval = lowest_latency; 1661 itr_setting = lowest_latency;
1647 break; 1662 break;
1648 case bulk_latency: 1663 case bulk_latency:
1649 if (bytes_perint <= adapter->eitr_high) 1664 if (bytes_perint <= adapter->eitr_high)
1650 retval = low_latency; 1665 itr_setting = low_latency;
1651 break; 1666 break;
1652 } 1667 }
1653 1668
1654update_itr_done: 1669 /* clear work counters since we have the values we need */
1655 return retval; 1670 ring_container->total_bytes = 0;
1671 ring_container->total_packets = 0;
1672
1673 /* write updated itr to ring container */
1674 ring_container->itr = itr_setting;
1656} 1675}
1657 1676
1658/** 1677/**
@@ -1698,44 +1717,15 @@ void ixgbe_write_eitr(struct ixgbe_q_vector *q_vector)
1698 IXGBE_WRITE_REG(hw, IXGBE_EITR(v_idx), itr_reg); 1717 IXGBE_WRITE_REG(hw, IXGBE_EITR(v_idx), itr_reg);
1699} 1718}
1700 1719
1701static void ixgbe_set_itr_msix(struct ixgbe_q_vector *q_vector) 1720static void ixgbe_set_itr(struct ixgbe_q_vector *q_vector)
1702{ 1721{
1703 struct ixgbe_adapter *adapter = q_vector->adapter; 1722 u32 new_itr = q_vector->eitr;
1704 int i, r_idx; 1723 u8 current_itr;
1705 u32 new_itr;
1706 u8 current_itr, ret_itr;
1707
1708 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
1709 for (i = 0; i < q_vector->txr_count; i++) {
1710 struct ixgbe_ring *tx_ring = adapter->tx_ring[r_idx];
1711 ret_itr = ixgbe_update_itr(adapter, q_vector->eitr,
1712 q_vector->tx_itr,
1713 tx_ring->total_packets,
1714 tx_ring->total_bytes);
1715 /* if the result for this queue would decrease interrupt
1716 * rate for this vector then use that result */
1717 q_vector->tx_itr = ((q_vector->tx_itr > ret_itr) ?
1718 q_vector->tx_itr - 1 : ret_itr);
1719 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
1720 r_idx + 1);
1721 }
1722 1724
1723 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); 1725 ixgbe_update_itr(q_vector, &q_vector->tx);
1724 for (i = 0; i < q_vector->rxr_count; i++) { 1726 ixgbe_update_itr(q_vector, &q_vector->rx);
1725 struct ixgbe_ring *rx_ring = adapter->rx_ring[r_idx];
1726 ret_itr = ixgbe_update_itr(adapter, q_vector->eitr,
1727 q_vector->rx_itr,
1728 rx_ring->total_packets,
1729 rx_ring->total_bytes);
1730 /* if the result for this queue would decrease interrupt
1731 * rate for this vector then use that result */
1732 q_vector->rx_itr = ((q_vector->rx_itr > ret_itr) ?
1733 q_vector->rx_itr - 1 : ret_itr);
1734 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
1735 r_idx + 1);
1736 }
1737 1727
1738 current_itr = max(q_vector->rx_itr, q_vector->tx_itr); 1728 current_itr = max(q_vector->rx.itr, q_vector->tx.itr);
1739 1729
1740 switch (current_itr) { 1730 switch (current_itr) {
1741 /* counts and packets in update_itr are dependent on these numbers */ 1731 /* counts and packets in update_itr are dependent on these numbers */
@@ -1746,16 +1736,17 @@ static void ixgbe_set_itr_msix(struct ixgbe_q_vector *q_vector)
1746 new_itr = 20000; /* aka hwitr = ~200 */ 1736 new_itr = 20000; /* aka hwitr = ~200 */
1747 break; 1737 break;
1748 case bulk_latency: 1738 case bulk_latency:
1749 default:
1750 new_itr = 8000; 1739 new_itr = 8000;
1751 break; 1740 break;
1741 default:
1742 break;
1752 } 1743 }
1753 1744
1754 if (new_itr != q_vector->eitr) { 1745 if (new_itr != q_vector->eitr) {
1755 /* do an exponential smoothing */ 1746 /* do an exponential smoothing */
1756 new_itr = ((q_vector->eitr * 9) + new_itr)/10; 1747 new_itr = ((q_vector->eitr * 9) + new_itr)/10;
1757 1748
1758 /* save the algorithm value here, not the smoothed one */ 1749 /* save the algorithm value here */
1759 q_vector->eitr = new_itr; 1750 q_vector->eitr = new_itr;
1760 1751
1761 ixgbe_write_eitr(q_vector); 1752 ixgbe_write_eitr(q_vector);
@@ -1995,15 +1986,13 @@ static irqreturn_t ixgbe_msix_clean_tx(int irq, void *data)
1995 struct ixgbe_ring *tx_ring; 1986 struct ixgbe_ring *tx_ring;
1996 int i, r_idx; 1987 int i, r_idx;
1997 1988
1998 if (!q_vector->txr_count) 1989 if (!q_vector->tx.count)
1999 return IRQ_HANDLED; 1990 return IRQ_HANDLED;
2000 1991
2001 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues); 1992 r_idx = find_first_bit(q_vector->tx.idx, adapter->num_tx_queues);
2002 for (i = 0; i < q_vector->txr_count; i++) { 1993 for (i = 0; i < q_vector->tx.count; i++) {
2003 tx_ring = adapter->tx_ring[r_idx]; 1994 tx_ring = adapter->tx_ring[r_idx];
2004 tx_ring->total_bytes = 0; 1995 r_idx = find_next_bit(q_vector->tx.idx, adapter->num_tx_queues,
2005 tx_ring->total_packets = 0;
2006 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
2007 r_idx + 1); 1996 r_idx + 1);
2008 } 1997 }
2009 1998
@@ -2031,16 +2020,14 @@ static irqreturn_t ixgbe_msix_clean_rx(int irq, void *data)
2031 ixgbe_update_dca(q_vector); 2020 ixgbe_update_dca(q_vector);
2032#endif 2021#endif
2033 2022
2034 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); 2023 r_idx = find_first_bit(q_vector->rx.idx, adapter->num_rx_queues);
2035 for (i = 0; i < q_vector->rxr_count; i++) { 2024 for (i = 0; i < q_vector->rx.count; i++) {
2036 rx_ring = adapter->rx_ring[r_idx]; 2025 rx_ring = adapter->rx_ring[r_idx];
2037 rx_ring->total_bytes = 0; 2026 r_idx = find_next_bit(q_vector->rx.idx, adapter->num_rx_queues,
2038 rx_ring->total_packets = 0;
2039 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
2040 r_idx + 1); 2027 r_idx + 1);
2041 } 2028 }
2042 2029
2043 if (!q_vector->rxr_count) 2030 if (!q_vector->rx.count)
2044 return IRQ_HANDLED; 2031 return IRQ_HANDLED;
2045 2032
2046 /* EIAM disabled interrupts (on this vector) for us */ 2033 /* EIAM disabled interrupts (on this vector) for us */
@@ -2057,24 +2044,20 @@ static irqreturn_t ixgbe_msix_clean_many(int irq, void *data)
2057 int r_idx; 2044 int r_idx;
2058 int i; 2045 int i;
2059 2046
2060 if (!q_vector->txr_count && !q_vector->rxr_count) 2047 if (!q_vector->tx.count && !q_vector->rx.count)
2061 return IRQ_HANDLED; 2048 return IRQ_HANDLED;
2062 2049
2063 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues); 2050 r_idx = find_first_bit(q_vector->tx.idx, adapter->num_tx_queues);
2064 for (i = 0; i < q_vector->txr_count; i++) { 2051 for (i = 0; i < q_vector->tx.count; i++) {
2065 ring = adapter->tx_ring[r_idx]; 2052 ring = adapter->tx_ring[r_idx];
2066 ring->total_bytes = 0; 2053 r_idx = find_next_bit(q_vector->tx.idx, adapter->num_tx_queues,
2067 ring->total_packets = 0;
2068 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
2069 r_idx + 1); 2054 r_idx + 1);
2070 } 2055 }
2071 2056
2072 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); 2057 r_idx = find_first_bit(q_vector->rx.idx, adapter->num_rx_queues);
2073 for (i = 0; i < q_vector->rxr_count; i++) { 2058 for (i = 0; i < q_vector->rx.count; i++) {
2074 ring = adapter->rx_ring[r_idx]; 2059 ring = adapter->rx_ring[r_idx];
2075 ring->total_bytes = 0; 2060 r_idx = find_next_bit(q_vector->rx.idx, adapter->num_rx_queues,
2076 ring->total_packets = 0;
2077 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
2078 r_idx + 1); 2061 r_idx + 1);
2079 } 2062 }
2080 2063
@@ -2106,7 +2089,7 @@ static int ixgbe_clean_rxonly(struct napi_struct *napi, int budget)
2106 ixgbe_update_dca(q_vector); 2089 ixgbe_update_dca(q_vector);
2107#endif 2090#endif
2108 2091
2109 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); 2092 r_idx = find_first_bit(q_vector->rx.idx, adapter->num_rx_queues);
2110 rx_ring = adapter->rx_ring[r_idx]; 2093 rx_ring = adapter->rx_ring[r_idx];
2111 2094
2112 ixgbe_clean_rx_irq(q_vector, rx_ring, &work_done, budget); 2095 ixgbe_clean_rx_irq(q_vector, rx_ring, &work_done, budget);
@@ -2115,7 +2098,7 @@ static int ixgbe_clean_rxonly(struct napi_struct *napi, int budget)
2115 if (work_done < budget) { 2098 if (work_done < budget) {
2116 napi_complete(napi); 2099 napi_complete(napi);
2117 if (adapter->rx_itr_setting & 1) 2100 if (adapter->rx_itr_setting & 1)
2118 ixgbe_set_itr_msix(q_vector); 2101 ixgbe_set_itr(q_vector);
2119 if (!test_bit(__IXGBE_DOWN, &adapter->state)) 2102 if (!test_bit(__IXGBE_DOWN, &adapter->state))
2120 ixgbe_irq_enable_queues(adapter, 2103 ixgbe_irq_enable_queues(adapter,
2121 ((u64)1 << q_vector->v_idx)); 2104 ((u64)1 << q_vector->v_idx));
@@ -2147,33 +2130,33 @@ static int ixgbe_clean_rxtx_many(struct napi_struct *napi, int budget)
2147 ixgbe_update_dca(q_vector); 2130 ixgbe_update_dca(q_vector);
2148#endif 2131#endif
2149 2132
2150 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues); 2133 r_idx = find_first_bit(q_vector->tx.idx, adapter->num_tx_queues);
2151 for (i = 0; i < q_vector->txr_count; i++) { 2134 for (i = 0; i < q_vector->tx.count; i++) {
2152 ring = adapter->tx_ring[r_idx]; 2135 ring = adapter->tx_ring[r_idx];
2153 tx_clean_complete &= ixgbe_clean_tx_irq(q_vector, ring); 2136 tx_clean_complete &= ixgbe_clean_tx_irq(q_vector, ring);
2154 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues, 2137 r_idx = find_next_bit(q_vector->tx.idx, adapter->num_tx_queues,
2155 r_idx + 1); 2138 r_idx + 1);
2156 } 2139 }
2157 2140
2158 /* attempt to distribute budget to each queue fairly, but don't allow 2141 /* attempt to distribute budget to each queue fairly, but don't allow
2159 * the budget to go below 1 because we'll exit polling */ 2142 * the budget to go below 1 because we'll exit polling */
2160 budget /= (q_vector->rxr_count ?: 1); 2143 budget /= (q_vector->rx.count ?: 1);
2161 budget = max(budget, 1); 2144 budget = max(budget, 1);
2162 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); 2145 r_idx = find_first_bit(q_vector->rx.idx, adapter->num_rx_queues);
2163 for (i = 0; i < q_vector->rxr_count; i++) { 2146 for (i = 0; i < q_vector->rx.count; i++) {
2164 ring = adapter->rx_ring[r_idx]; 2147 ring = adapter->rx_ring[r_idx];
2165 ixgbe_clean_rx_irq(q_vector, ring, &work_done, budget); 2148 ixgbe_clean_rx_irq(q_vector, ring, &work_done, budget);
2166 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues, 2149 r_idx = find_next_bit(q_vector->rx.idx, adapter->num_rx_queues,
2167 r_idx + 1); 2150 r_idx + 1);
2168 } 2151 }
2169 2152
2170 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); 2153 r_idx = find_first_bit(q_vector->rx.idx, adapter->num_rx_queues);
2171 ring = adapter->rx_ring[r_idx]; 2154 ring = adapter->rx_ring[r_idx];
2172 /* If all Rx work done, exit the polling mode */ 2155 /* If all Rx work done, exit the polling mode */
2173 if (work_done < budget) { 2156 if (work_done < budget) {
2174 napi_complete(napi); 2157 napi_complete(napi);
2175 if (adapter->rx_itr_setting & 1) 2158 if (adapter->rx_itr_setting & 1)
2176 ixgbe_set_itr_msix(q_vector); 2159 ixgbe_set_itr(q_vector);
2177 if (!test_bit(__IXGBE_DOWN, &adapter->state)) 2160 if (!test_bit(__IXGBE_DOWN, &adapter->state))
2178 ixgbe_irq_enable_queues(adapter, 2161 ixgbe_irq_enable_queues(adapter,
2179 ((u64)1 << q_vector->v_idx)); 2162 ((u64)1 << q_vector->v_idx));
@@ -2205,7 +2188,7 @@ static int ixgbe_clean_txonly(struct napi_struct *napi, int budget)
2205 ixgbe_update_dca(q_vector); 2188 ixgbe_update_dca(q_vector);
2206#endif 2189#endif
2207 2190
2208 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues); 2191 r_idx = find_first_bit(q_vector->tx.idx, adapter->num_tx_queues);
2209 tx_ring = adapter->tx_ring[r_idx]; 2192 tx_ring = adapter->tx_ring[r_idx];
2210 2193
2211 if (!ixgbe_clean_tx_irq(q_vector, tx_ring)) 2194 if (!ixgbe_clean_tx_irq(q_vector, tx_ring))
@@ -2215,7 +2198,7 @@ static int ixgbe_clean_txonly(struct napi_struct *napi, int budget)
2215 if (work_done < budget) { 2198 if (work_done < budget) {
2216 napi_complete(napi); 2199 napi_complete(napi);
2217 if (adapter->tx_itr_setting & 1) 2200 if (adapter->tx_itr_setting & 1)
2218 ixgbe_set_itr_msix(q_vector); 2201 ixgbe_set_itr(q_vector);
2219 if (!test_bit(__IXGBE_DOWN, &adapter->state)) 2202 if (!test_bit(__IXGBE_DOWN, &adapter->state))
2220 ixgbe_irq_enable_queues(adapter, 2203 ixgbe_irq_enable_queues(adapter,
2221 ((u64)1 << q_vector->v_idx)); 2204 ((u64)1 << q_vector->v_idx));
@@ -2230,8 +2213,8 @@ static inline void map_vector_to_rxq(struct ixgbe_adapter *a, int v_idx,
2230 struct ixgbe_q_vector *q_vector = a->q_vector[v_idx]; 2213 struct ixgbe_q_vector *q_vector = a->q_vector[v_idx];
2231 struct ixgbe_ring *rx_ring = a->rx_ring[r_idx]; 2214 struct ixgbe_ring *rx_ring = a->rx_ring[r_idx];
2232 2215
2233 set_bit(r_idx, q_vector->rxr_idx); 2216 set_bit(r_idx, q_vector->rx.idx);
2234 q_vector->rxr_count++; 2217 q_vector->rx.count++;
2235 rx_ring->q_vector = q_vector; 2218 rx_ring->q_vector = q_vector;
2236} 2219}
2237 2220
@@ -2241,9 +2224,10 @@ static inline void map_vector_to_txq(struct ixgbe_adapter *a, int v_idx,
2241 struct ixgbe_q_vector *q_vector = a->q_vector[v_idx]; 2224 struct ixgbe_q_vector *q_vector = a->q_vector[v_idx];
2242 struct ixgbe_ring *tx_ring = a->tx_ring[t_idx]; 2225 struct ixgbe_ring *tx_ring = a->tx_ring[t_idx];
2243 2226
2244 set_bit(t_idx, q_vector->txr_idx); 2227 set_bit(t_idx, q_vector->tx.idx);
2245 q_vector->txr_count++; 2228 q_vector->tx.count++;
2246 tx_ring->q_vector = q_vector; 2229 tx_ring->q_vector = q_vector;
2230 q_vector->tx.work_limit = a->tx_work_limit;
2247} 2231}
2248 2232
2249/** 2233/**
@@ -2332,10 +2316,10 @@ static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter)
2332 if (err) 2316 if (err)
2333 return err; 2317 return err;
2334 2318
2335#define SET_HANDLER(_v) (((_v)->rxr_count && (_v)->txr_count) \ 2319#define SET_HANDLER(_v) (((_v)->rx.count && (_v)->tx.count) \
2336 ? &ixgbe_msix_clean_many : \ 2320 ? &ixgbe_msix_clean_many : \
2337 (_v)->rxr_count ? &ixgbe_msix_clean_rx : \ 2321 (_v)->rx.count ? &ixgbe_msix_clean_rx : \
2338 (_v)->txr_count ? &ixgbe_msix_clean_tx : \ 2322 (_v)->tx.count ? &ixgbe_msix_clean_tx : \
2339 NULL) 2323 NULL)
2340 for (vector = 0; vector < q_vectors; vector++) { 2324 for (vector = 0; vector < q_vectors; vector++) {
2341 struct ixgbe_q_vector *q_vector = adapter->q_vector[vector]; 2325 struct ixgbe_q_vector *q_vector = adapter->q_vector[vector];
@@ -2386,51 +2370,6 @@ free_queue_irqs:
2386 return err; 2370 return err;
2387} 2371}
2388 2372
2389static void ixgbe_set_itr(struct ixgbe_adapter *adapter)
2390{
2391 struct ixgbe_q_vector *q_vector = adapter->q_vector[0];
2392 struct ixgbe_ring *rx_ring = adapter->rx_ring[0];
2393 struct ixgbe_ring *tx_ring = adapter->tx_ring[0];
2394 u32 new_itr = q_vector->eitr;
2395 u8 current_itr;
2396
2397 q_vector->tx_itr = ixgbe_update_itr(adapter, new_itr,
2398 q_vector->tx_itr,
2399 tx_ring->total_packets,
2400 tx_ring->total_bytes);
2401 q_vector->rx_itr = ixgbe_update_itr(adapter, new_itr,
2402 q_vector->rx_itr,
2403 rx_ring->total_packets,
2404 rx_ring->total_bytes);
2405
2406 current_itr = max(q_vector->rx_itr, q_vector->tx_itr);
2407
2408 switch (current_itr) {
2409 /* counts and packets in update_itr are dependent on these numbers */
2410 case lowest_latency:
2411 new_itr = 100000;
2412 break;
2413 case low_latency:
2414 new_itr = 20000; /* aka hwitr = ~200 */
2415 break;
2416 case bulk_latency:
2417 new_itr = 8000;
2418 break;
2419 default:
2420 break;
2421 }
2422
2423 if (new_itr != q_vector->eitr) {
2424 /* do an exponential smoothing */
2425 new_itr = ((q_vector->eitr * 9) + new_itr)/10;
2426
2427 /* save the algorithm value here */
2428 q_vector->eitr = new_itr;
2429
2430 ixgbe_write_eitr(q_vector);
2431 }
2432}
2433
2434/** 2373/**
2435 * ixgbe_irq_enable - Enable default interrupt generation settings 2374 * ixgbe_irq_enable - Enable default interrupt generation settings
2436 * @adapter: board private structure 2375 * @adapter: board private structure
@@ -2528,10 +2467,6 @@ static irqreturn_t ixgbe_intr(int irq, void *data)
2528 ixgbe_check_fan_failure(adapter, eicr); 2467 ixgbe_check_fan_failure(adapter, eicr);
2529 2468
2530 if (napi_schedule_prep(&(q_vector->napi))) { 2469 if (napi_schedule_prep(&(q_vector->napi))) {
2531 adapter->tx_ring[0]->total_packets = 0;
2532 adapter->tx_ring[0]->total_bytes = 0;
2533 adapter->rx_ring[0]->total_packets = 0;
2534 adapter->rx_ring[0]->total_bytes = 0;
2535 /* would disable interrupts here but EIAM disabled it */ 2470 /* would disable interrupts here but EIAM disabled it */
2536 __napi_schedule(&(q_vector->napi)); 2471 __napi_schedule(&(q_vector->napi));
2537 } 2472 }
@@ -2553,10 +2488,10 @@ static inline void ixgbe_reset_q_vectors(struct ixgbe_adapter *adapter)
2553 2488
2554 for (i = 0; i < q_vectors; i++) { 2489 for (i = 0; i < q_vectors; i++) {
2555 struct ixgbe_q_vector *q_vector = adapter->q_vector[i]; 2490 struct ixgbe_q_vector *q_vector = adapter->q_vector[i];
2556 bitmap_zero(q_vector->rxr_idx, MAX_RX_QUEUES); 2491 bitmap_zero(q_vector->rx.idx, MAX_RX_QUEUES);
2557 bitmap_zero(q_vector->txr_idx, MAX_TX_QUEUES); 2492 bitmap_zero(q_vector->tx.idx, MAX_TX_QUEUES);
2558 q_vector->rxr_count = 0; 2493 q_vector->rx.count = 0;
2559 q_vector->txr_count = 0; 2494 q_vector->tx.count = 0;
2560 } 2495 }
2561} 2496}
2562 2497
@@ -2601,8 +2536,8 @@ static void ixgbe_free_irq(struct ixgbe_adapter *adapter)
2601 i--; 2536 i--;
2602 for (; i >= 0; i--) { 2537 for (; i >= 0; i--) {
2603 /* free only the irqs that were actually requested */ 2538 /* free only the irqs that were actually requested */
2604 if (!adapter->q_vector[i]->rxr_count && 2539 if (!adapter->q_vector[i]->rx.count &&
2605 !adapter->q_vector[i]->txr_count) 2540 !adapter->q_vector[i]->tx.count)
2606 continue; 2541 continue;
2607 2542
2608 free_irq(adapter->msix_entries[i].vector, 2543 free_irq(adapter->msix_entries[i].vector,
@@ -2927,28 +2862,11 @@ static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
2927} 2862}
2928 2863
2929/** 2864/**
2930 * ixgbe_clear_rscctl - disable RSC for the indicated ring
2931 * @adapter: address of board private structure
2932 * @ring: structure containing ring specific data
2933 **/
2934void ixgbe_clear_rscctl(struct ixgbe_adapter *adapter,
2935 struct ixgbe_ring *ring)
2936{
2937 struct ixgbe_hw *hw = &adapter->hw;
2938 u32 rscctrl;
2939 u8 reg_idx = ring->reg_idx;
2940
2941 rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(reg_idx));
2942 rscctrl &= ~IXGBE_RSCCTL_RSCEN;
2943 IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(reg_idx), rscctrl);
2944}
2945
2946/**
2947 * ixgbe_configure_rscctl - enable RSC for the indicated ring 2865 * ixgbe_configure_rscctl - enable RSC for the indicated ring
2948 * @adapter: address of board private structure 2866 * @adapter: address of board private structure
2949 * @index: index of ring to set 2867 * @index: index of ring to set
2950 **/ 2868 **/
2951void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter, 2869static void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter,
2952 struct ixgbe_ring *ring) 2870 struct ixgbe_ring *ring)
2953{ 2871{
2954 struct ixgbe_hw *hw = &adapter->hw; 2872 struct ixgbe_hw *hw = &adapter->hw;
@@ -3616,10 +3534,10 @@ static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter)
3616 q_vector = adapter->q_vector[q_idx]; 3534 q_vector = adapter->q_vector[q_idx];
3617 napi = &q_vector->napi; 3535 napi = &q_vector->napi;
3618 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { 3536 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
3619 if (!q_vector->rxr_count || !q_vector->txr_count) { 3537 if (!q_vector->rx.count || !q_vector->tx.count) {
3620 if (q_vector->txr_count == 1) 3538 if (q_vector->tx.count == 1)
3621 napi->poll = &ixgbe_clean_txonly; 3539 napi->poll = &ixgbe_clean_txonly;
3622 else if (q_vector->rxr_count == 1) 3540 else if (q_vector->rx.count == 1)
3623 napi->poll = &ixgbe_clean_rxonly; 3541 napi->poll = &ixgbe_clean_rxonly;
3624 } 3542 }
3625 } 3543 }
@@ -4299,7 +4217,7 @@ static int ixgbe_poll(struct napi_struct *napi, int budget)
4299 if (work_done < budget) { 4217 if (work_done < budget) {
4300 napi_complete(napi); 4218 napi_complete(napi);
4301 if (adapter->rx_itr_setting & 1) 4219 if (adapter->rx_itr_setting & 1)
4302 ixgbe_set_itr(adapter); 4220 ixgbe_set_itr(q_vector);
4303 if (!test_bit(__IXGBE_DOWN, &adapter->state)) 4221 if (!test_bit(__IXGBE_DOWN, &adapter->state))
4304 ixgbe_irq_enable_queues(adapter, IXGBE_EIMS_RTX_QUEUE); 4222 ixgbe_irq_enable_queues(adapter, IXGBE_EIMS_RTX_QUEUE);
4305 } 4223 }
@@ -4965,7 +4883,7 @@ static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter)
4965 if (!q_vector) 4883 if (!q_vector)
4966 goto err_out; 4884 goto err_out;
4967 q_vector->adapter = adapter; 4885 q_vector->adapter = adapter;
4968 if (q_vector->txr_count && !q_vector->rxr_count) 4886 if (q_vector->tx.count && !q_vector->rx.count)
4969 q_vector->eitr = adapter->tx_eitr_param; 4887 q_vector->eitr = adapter->tx_eitr_param;
4970 else 4888 else
4971 q_vector->eitr = adapter->rx_eitr_param; 4889 q_vector->eitr = adapter->rx_eitr_param;
@@ -5224,6 +5142,9 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
5224 adapter->tx_ring_count = IXGBE_DEFAULT_TXD; 5142 adapter->tx_ring_count = IXGBE_DEFAULT_TXD;
5225 adapter->rx_ring_count = IXGBE_DEFAULT_RXD; 5143 adapter->rx_ring_count = IXGBE_DEFAULT_RXD;
5226 5144
5145 /* set default work limits */
5146 adapter->tx_work_limit = adapter->tx_ring_count;
5147
5227 /* initialize eeprom parameters */ 5148 /* initialize eeprom parameters */
5228 if (ixgbe_init_eeprom_params_generic(hw)) { 5149 if (ixgbe_init_eeprom_params_generic(hw)) {
5229 e_dev_err("EEPROM initialization failed\n"); 5150 e_dev_err("EEPROM initialization failed\n");
@@ -5270,7 +5191,6 @@ int ixgbe_setup_tx_resources(struct ixgbe_ring *tx_ring)
5270 5191
5271 tx_ring->next_to_use = 0; 5192 tx_ring->next_to_use = 0;
5272 tx_ring->next_to_clean = 0; 5193 tx_ring->next_to_clean = 0;
5273 tx_ring->work_limit = tx_ring->count;
5274 return 0; 5194 return 0;
5275 5195
5276err: 5196err:
@@ -5979,7 +5899,7 @@ static void ixgbe_check_hang_subtask(struct ixgbe_adapter *adapter)
5979 /* get one bit for every active tx/rx interrupt vector */ 5899 /* get one bit for every active tx/rx interrupt vector */
5980 for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) { 5900 for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) {
5981 struct ixgbe_q_vector *qv = adapter->q_vector[i]; 5901 struct ixgbe_q_vector *qv = adapter->q_vector[i];
5982 if (qv->rxr_count || qv->txr_count) 5902 if (qv->rx.count || qv->tx.count)
5983 eics |= ((u64)1 << i); 5903 eics |= ((u64)1 << i);
5984 } 5904 }
5985 } 5905 }
@@ -6084,9 +6004,7 @@ static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter)
6084 (flow_tx ? "TX" : "None")))); 6004 (flow_tx ? "TX" : "None"))));
6085 6005
6086 netif_carrier_on(netdev); 6006 netif_carrier_on(netdev);
6087#ifdef HAVE_IPLINK_VF_CONFIG
6088 ixgbe_check_vf_rate_limit(adapter); 6007 ixgbe_check_vf_rate_limit(adapter);
6089#endif /* HAVE_IPLINK_VF_CONFIG */
6090} 6008}
6091 6009
6092/** 6010/**
@@ -6785,7 +6703,7 @@ static int __ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, u16 size)
6785 return 0; 6703 return 0;
6786} 6704}
6787 6705
6788static int ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, u16 size) 6706static inline int ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, u16 size)
6789{ 6707{
6790 if (likely(ixgbe_desc_unused(tx_ring) >= size)) 6708 if (likely(ixgbe_desc_unused(tx_ring) >= size))
6791 return 0; 6709 return 0;
@@ -6795,11 +6713,10 @@ static int ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, u16 size)
6795static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb) 6713static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb)
6796{ 6714{
6797 struct ixgbe_adapter *adapter = netdev_priv(dev); 6715 struct ixgbe_adapter *adapter = netdev_priv(dev);
6798 int txq = smp_processor_id(); 6716 int txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) :
6717 smp_processor_id();
6799#ifdef IXGBE_FCOE 6718#ifdef IXGBE_FCOE
6800 __be16 protocol; 6719 __be16 protocol = vlan_get_protocol(skb);
6801
6802 protocol = vlan_get_protocol(skb);
6803 6720
6804 if (((protocol == htons(ETH_P_FCOE)) || 6721 if (((protocol == htons(ETH_P_FCOE)) ||
6805 (protocol == htons(ETH_P_FIP))) && 6722 (protocol == htons(ETH_P_FIP))) &&
@@ -7188,6 +7105,98 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc)
7188 return 0; 7105 return 0;
7189} 7106}
7190 7107
7108void ixgbe_do_reset(struct net_device *netdev)
7109{
7110 struct ixgbe_adapter *adapter = netdev_priv(netdev);
7111
7112 if (netif_running(netdev))
7113 ixgbe_reinit_locked(adapter);
7114 else
7115 ixgbe_reset(adapter);
7116}
7117
7118static u32 ixgbe_fix_features(struct net_device *netdev, u32 data)
7119{
7120 struct ixgbe_adapter *adapter = netdev_priv(netdev);
7121
7122#ifdef CONFIG_DCB
7123 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED)
7124 data &= ~NETIF_F_HW_VLAN_RX;
7125#endif
7126
7127 /* return error if RXHASH is being enabled when RSS is not supported */
7128 if (!(adapter->flags & IXGBE_FLAG_RSS_ENABLED))
7129 data &= ~NETIF_F_RXHASH;
7130
7131 /* If Rx checksum is disabled, then RSC/LRO should also be disabled */
7132 if (!(data & NETIF_F_RXCSUM))
7133 data &= ~NETIF_F_LRO;
7134
7135 /* Turn off LRO if not RSC capable or invalid ITR settings */
7136 if (!(adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE)) {
7137 data &= ~NETIF_F_LRO;
7138 } else if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) &&
7139 (adapter->rx_itr_setting != 1 &&
7140 adapter->rx_itr_setting > IXGBE_MAX_RSC_INT_RATE)) {
7141 data &= ~NETIF_F_LRO;
7142 e_info(probe, "rx-usecs set too low, not enabling RSC\n");
7143 }
7144
7145 return data;
7146}
7147
7148static int ixgbe_set_features(struct net_device *netdev, u32 data)
7149{
7150 struct ixgbe_adapter *adapter = netdev_priv(netdev);
7151 bool need_reset = false;
7152
7153 /* If Rx checksum is disabled, then RSC/LRO should also be disabled */
7154 if (!(data & NETIF_F_RXCSUM))
7155 adapter->flags &= ~IXGBE_FLAG_RX_CSUM_ENABLED;
7156 else
7157 adapter->flags |= IXGBE_FLAG_RX_CSUM_ENABLED;
7158
7159 /* Make sure RSC matches LRO, reset if change */
7160 if (!!(data & NETIF_F_LRO) !=
7161 !!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)) {
7162 adapter->flags2 ^= IXGBE_FLAG2_RSC_ENABLED;
7163 switch (adapter->hw.mac.type) {
7164 case ixgbe_mac_X540:
7165 case ixgbe_mac_82599EB:
7166 need_reset = true;
7167 break;
7168 default:
7169 break;
7170 }
7171 }
7172
7173 /*
7174 * Check if Flow Director n-tuple support was enabled or disabled. If
7175 * the state changed, we need to reset.
7176 */
7177 if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) {
7178 /* turn off ATR, enable perfect filters and reset */
7179 if (data & NETIF_F_NTUPLE) {
7180 adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
7181 adapter->flags |= IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
7182 need_reset = true;
7183 }
7184 } else if (!(data & NETIF_F_NTUPLE)) {
7185 /* turn off Flow Director, set ATR and reset */
7186 adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
7187 if ((adapter->flags & IXGBE_FLAG_RSS_ENABLED) &&
7188 !(adapter->flags & IXGBE_FLAG_DCB_ENABLED))
7189 adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
7190 need_reset = true;
7191 }
7192
7193 if (need_reset)
7194 ixgbe_do_reset(netdev);
7195
7196 return 0;
7197
7198}
7199
7191static const struct net_device_ops ixgbe_netdev_ops = { 7200static const struct net_device_ops ixgbe_netdev_ops = {
7192 .ndo_open = ixgbe_open, 7201 .ndo_open = ixgbe_open,
7193 .ndo_stop = ixgbe_close, 7202 .ndo_stop = ixgbe_close,
@@ -7219,6 +7228,8 @@ static const struct net_device_ops ixgbe_netdev_ops = {
7219 .ndo_fcoe_disable = ixgbe_fcoe_disable, 7228 .ndo_fcoe_disable = ixgbe_fcoe_disable,
7220 .ndo_fcoe_get_wwn = ixgbe_fcoe_get_wwn, 7229 .ndo_fcoe_get_wwn = ixgbe_fcoe_get_wwn,
7221#endif /* IXGBE_FCOE */ 7230#endif /* IXGBE_FCOE */
7231 .ndo_set_features = ixgbe_set_features,
7232 .ndo_fix_features = ixgbe_fix_features,
7222}; 7233};
7223 7234
7224static void __devinit ixgbe_probe_vf(struct ixgbe_adapter *adapter, 7235static void __devinit ixgbe_probe_vf(struct ixgbe_adapter *adapter,
@@ -7486,20 +7497,24 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
7486 7497
7487 netdev->features = NETIF_F_SG | 7498 netdev->features = NETIF_F_SG |
7488 NETIF_F_IP_CSUM | 7499 NETIF_F_IP_CSUM |
7500 NETIF_F_IPV6_CSUM |
7489 NETIF_F_HW_VLAN_TX | 7501 NETIF_F_HW_VLAN_TX |
7490 NETIF_F_HW_VLAN_RX | 7502 NETIF_F_HW_VLAN_RX |
7491 NETIF_F_HW_VLAN_FILTER; 7503 NETIF_F_HW_VLAN_FILTER |
7504 NETIF_F_TSO |
7505 NETIF_F_TSO6 |
7506 NETIF_F_GRO |
7507 NETIF_F_RXHASH |
7508 NETIF_F_RXCSUM;
7492 7509
7493 netdev->features |= NETIF_F_IPV6_CSUM; 7510 netdev->hw_features = netdev->features;
7494 netdev->features |= NETIF_F_TSO;
7495 netdev->features |= NETIF_F_TSO6;
7496 netdev->features |= NETIF_F_GRO;
7497 netdev->features |= NETIF_F_RXHASH;
7498 7511
7499 switch (adapter->hw.mac.type) { 7512 switch (adapter->hw.mac.type) {
7500 case ixgbe_mac_82599EB: 7513 case ixgbe_mac_82599EB:
7501 case ixgbe_mac_X540: 7514 case ixgbe_mac_X540:
7502 netdev->features |= NETIF_F_SCTP_CSUM; 7515 netdev->features |= NETIF_F_SCTP_CSUM;
7516 netdev->hw_features |= NETIF_F_SCTP_CSUM |
7517 NETIF_F_NTUPLE;
7503 break; 7518 break;
7504 default: 7519 default:
7505 break; 7520 break;
@@ -7538,6 +7553,8 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
7538 netdev->vlan_features |= NETIF_F_HIGHDMA; 7553 netdev->vlan_features |= NETIF_F_HIGHDMA;
7539 } 7554 }
7540 7555
7556 if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE)
7557 netdev->hw_features |= NETIF_F_LRO;
7541 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) 7558 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
7542 netdev->features |= NETIF_F_LRO; 7559 netdev->features |= NETIF_F_LRO;
7543 7560
@@ -7574,25 +7591,24 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
7574 if (err) 7591 if (err)
7575 goto err_sw_init; 7592 goto err_sw_init;
7576 7593
7577 if (!(adapter->flags & IXGBE_FLAG_RSS_ENABLED)) 7594 if (!(adapter->flags & IXGBE_FLAG_RSS_ENABLED)) {
7595 netdev->hw_features &= ~NETIF_F_RXHASH;
7578 netdev->features &= ~NETIF_F_RXHASH; 7596 netdev->features &= ~NETIF_F_RXHASH;
7597 }
7579 7598
7580 switch (pdev->device) { 7599 switch (pdev->device) {
7581 case IXGBE_DEV_ID_82599_SFP: 7600 case IXGBE_DEV_ID_82599_SFP:
7582 /* Only this subdevice supports WOL */ 7601 /* Only this subdevice supports WOL */
7583 if (pdev->subsystem_device == IXGBE_SUBDEV_ID_82599_SFP) 7602 if (pdev->subsystem_device == IXGBE_SUBDEV_ID_82599_SFP)
7584 adapter->wol = (IXGBE_WUFC_MAG | IXGBE_WUFC_EX | 7603 adapter->wol = IXGBE_WUFC_MAG;
7585 IXGBE_WUFC_MC | IXGBE_WUFC_BC);
7586 break; 7604 break;
7587 case IXGBE_DEV_ID_82599_COMBO_BACKPLANE: 7605 case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
7588 /* All except this subdevice support WOL */ 7606 /* All except this subdevice support WOL */
7589 if (pdev->subsystem_device != IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ) 7607 if (pdev->subsystem_device != IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ)
7590 adapter->wol = (IXGBE_WUFC_MAG | IXGBE_WUFC_EX | 7608 adapter->wol = IXGBE_WUFC_MAG;
7591 IXGBE_WUFC_MC | IXGBE_WUFC_BC);
7592 break; 7609 break;
7593 case IXGBE_DEV_ID_82599_KX4: 7610 case IXGBE_DEV_ID_82599_KX4:
7594 adapter->wol = (IXGBE_WUFC_MAG | IXGBE_WUFC_EX | 7611 adapter->wol = IXGBE_WUFC_MAG;
7595 IXGBE_WUFC_MC | IXGBE_WUFC_BC);
7596 break; 7612 break;
7597 default: 7613 default:
7598 adapter->wol = 0; 7614 adapter->wol = 0;