aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/igb
diff options
context:
space:
mode:
authorAlexander Duyck <alexander.h.duyck@intel.com>2008-07-08 18:10:46 -0400
committerJeff Garzik <jgarzik@redhat.com>2008-07-11 01:20:32 -0400
commit7dfc16fab1186769d7d0086830ab3fbc8fddfcba (patch)
tree2b5c7a9fbb1497b34d1081e2cb4052cb3893cb9c /drivers/net/igb
parent2d064c06fecadadcb81a452acd373af00dfb1fec (diff)
igb: Add support for quad port WOL and feature flags
Change igb from using a series of boolean operators to using a single flags value that contains a number of different bit flags for all the different features of the adapter. This patch also adds WOL support for quad port adapters. Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com> Signed-off-by: Jeff Garzik <jgarzik@redhat.com>
Diffstat (limited to 'drivers/net/igb')
-rw-r--r--drivers/net/igb/igb.h13
-rw-r--r--drivers/net/igb/igb_ethtool.c12
-rw-r--r--drivers/net/igb/igb_main.c92
3 files changed, 81 insertions, 36 deletions
diff --git a/drivers/net/igb/igb.h b/drivers/net/igb/igb.h
index d4a042344728..ee08010d2c4f 100644
--- a/drivers/net/igb/igb.h
+++ b/drivers/net/igb/igb.h
@@ -270,10 +270,7 @@ struct igb_adapter {
270 270
271 /* to not mess up cache alignment, always add to the bottom */ 271 /* to not mess up cache alignment, always add to the bottom */
272 unsigned long state; 272 unsigned long state;
273 unsigned int msi_enabled; 273 unsigned int flags;
274#ifdef CONFIG_DCA
275 unsigned int dca_enabled;
276#endif
277 u32 eeprom_wol; 274 u32 eeprom_wol;
278 275
279 /* for ioport free */ 276 /* for ioport free */
@@ -285,6 +282,14 @@ struct igb_adapter {
285#endif /* CONFIG_NETDEVICES_MULTIQUEUE */ 282#endif /* CONFIG_NETDEVICES_MULTIQUEUE */
286}; 283};
287 284
285#define IGB_FLAG_HAS_MSI (1 << 0)
286#define IGB_FLAG_MSI_ENABLE (1 << 1)
287#define IGB_FLAG_HAS_DCA (1 << 2)
288#define IGB_FLAG_DCA_ENABLED (1 << 3)
289#define IGB_FLAG_IN_NETPOLL (1 << 5)
290#define IGB_FLAG_QUAD_PORT_A (1 << 6)
291#define IGB_FLAG_NEED_CTX_IDX (1 << 7)
292
288enum e1000_state_t { 293enum e1000_state_t {
289 __IGB_TESTING, 294 __IGB_TESTING,
290 __IGB_RESETTING, 295 __IGB_RESETTING,
diff --git a/drivers/net/igb/igb_ethtool.c b/drivers/net/igb/igb_ethtool.c
index e27d5a533b4f..ef209b5cd390 100644
--- a/drivers/net/igb/igb_ethtool.c
+++ b/drivers/net/igb/igb_ethtool.c
@@ -1097,7 +1097,7 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data)
1097 if (adapter->msix_entries) { 1097 if (adapter->msix_entries) {
1098 /* NOTE: we don't test MSI-X interrupts here, yet */ 1098 /* NOTE: we don't test MSI-X interrupts here, yet */
1099 return 0; 1099 return 0;
1100 } else if (adapter->msi_enabled) { 1100 } else if (adapter->flags & IGB_FLAG_HAS_MSI) {
1101 shared_int = false; 1101 shared_int = false;
1102 if (request_irq(irq, &igb_test_intr, 0, netdev->name, netdev)) { 1102 if (request_irq(irq, &igb_test_intr, 0, netdev->name, netdev)) {
1103 *data = 1; 1103 *data = 1;
@@ -1727,7 +1727,6 @@ static int igb_wol_exclusion(struct igb_adapter *adapter,
1727 1727
1728 switch (hw->device_id) { 1728 switch (hw->device_id) {
1729 case E1000_DEV_ID_82575GB_QUAD_COPPER: 1729 case E1000_DEV_ID_82575GB_QUAD_COPPER:
1730 case E1000_DEV_ID_82576_QUAD_COPPER:
1731 /* WoL not supported */ 1730 /* WoL not supported */
1732 wol->supported = 0; 1731 wol->supported = 0;
1733 break; 1732 break;
@@ -1742,6 +1741,15 @@ static int igb_wol_exclusion(struct igb_adapter *adapter,
1742 /* return success for non excluded adapter ports */ 1741 /* return success for non excluded adapter ports */
1743 retval = 0; 1742 retval = 0;
1744 break; 1743 break;
1744 case E1000_DEV_ID_82576_QUAD_COPPER:
1745 /* quad port adapters only support WoL on port A */
1746 if (!(adapter->flags & IGB_FLAG_QUAD_PORT_A)) {
1747 wol->supported = 0;
1748 break;
1749 }
1750 /* return success for non excluded adapter ports */
1751 retval = 0;
1752 break;
1745 default: 1753 default:
1746 /* dual port cards only support WoL on port A from now on 1754 /* dual port cards only support WoL on port A from now on
1747 * unless it was enabled in the eeprom for port B 1755 * unless it was enabled in the eeprom for port B
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c
index ba043c4e1ca2..68a4fef3df9a 100644
--- a/drivers/net/igb/igb_main.c
+++ b/drivers/net/igb/igb_main.c
@@ -53,7 +53,6 @@ static const char igb_driver_string[] =
53 "Intel(R) Gigabit Ethernet Network Driver"; 53 "Intel(R) Gigabit Ethernet Network Driver";
54static const char igb_copyright[] = "Copyright (c) 2008 Intel Corporation."; 54static const char igb_copyright[] = "Copyright (c) 2008 Intel Corporation.";
55 55
56
57static const struct e1000_info *igb_info_tbl[] = { 56static const struct e1000_info *igb_info_tbl[] = {
58 [board_82575] = &e1000_82575_info, 57 [board_82575] = &e1000_82575_info,
59}; 58};
@@ -170,6 +169,8 @@ static struct pci_driver igb_driver = {
170 .err_handler = &igb_err_handler 169 .err_handler = &igb_err_handler
171}; 170};
172 171
172static int global_quad_port_a; /* global quad port a indication */
173
173MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>"); 174MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
174MODULE_DESCRIPTION("Intel(R) Gigabit Ethernet Network Driver"); 175MODULE_DESCRIPTION("Intel(R) Gigabit Ethernet Network Driver");
175MODULE_LICENSE("GPL"); 176MODULE_LICENSE("GPL");
@@ -201,6 +202,8 @@ static int __init igb_init_module(void)
201 202
202 printk(KERN_INFO "%s\n", igb_copyright); 203 printk(KERN_INFO "%s\n", igb_copyright);
203 204
205 global_quad_port_a = 0;
206
204 ret = pci_register_driver(&igb_driver); 207 ret = pci_register_driver(&igb_driver);
205#ifdef CONFIG_DCA 208#ifdef CONFIG_DCA
206 dca_register_notify(&dca_notifier); 209 dca_register_notify(&dca_notifier);
@@ -471,7 +474,7 @@ static void igb_reset_interrupt_capability(struct igb_adapter *adapter)
471 pci_disable_msix(adapter->pdev); 474 pci_disable_msix(adapter->pdev);
472 kfree(adapter->msix_entries); 475 kfree(adapter->msix_entries);
473 adapter->msix_entries = NULL; 476 adapter->msix_entries = NULL;
474 } else if (adapter->msi_enabled) 477 } else if (adapter->flags & IGB_FLAG_HAS_MSI)
475 pci_disable_msi(adapter->pdev); 478 pci_disable_msi(adapter->pdev);
476 return; 479 return;
477} 480}
@@ -510,7 +513,7 @@ msi_only:
510 adapter->num_rx_queues = 1; 513 adapter->num_rx_queues = 1;
511 adapter->num_tx_queues = 1; 514 adapter->num_tx_queues = 1;
512 if (!pci_enable_msi(adapter->pdev)) 515 if (!pci_enable_msi(adapter->pdev))
513 adapter->msi_enabled = 1; 516 adapter->flags |= IGB_FLAG_HAS_MSI;
514 517
515#ifdef CONFIG_NETDEVICES_MULTIQUEUE 518#ifdef CONFIG_NETDEVICES_MULTIQUEUE
516 /* Notify the stack of the (possibly) reduced Tx Queue count. */ 519 /* Notify the stack of the (possibly) reduced Tx Queue count. */
@@ -538,7 +541,7 @@ static int igb_request_irq(struct igb_adapter *adapter)
538 /* fall back to MSI */ 541 /* fall back to MSI */
539 igb_reset_interrupt_capability(adapter); 542 igb_reset_interrupt_capability(adapter);
540 if (!pci_enable_msi(adapter->pdev)) 543 if (!pci_enable_msi(adapter->pdev))
541 adapter->msi_enabled = 1; 544 adapter->flags |= IGB_FLAG_HAS_MSI;
542 igb_free_all_tx_resources(adapter); 545 igb_free_all_tx_resources(adapter);
543 igb_free_all_rx_resources(adapter); 546 igb_free_all_rx_resources(adapter);
544 adapter->num_rx_queues = 1; 547 adapter->num_rx_queues = 1;
@@ -557,14 +560,14 @@ static int igb_request_irq(struct igb_adapter *adapter)
557 } 560 }
558 } 561 }
559 562
560 if (adapter->msi_enabled) { 563 if (adapter->flags & IGB_FLAG_HAS_MSI) {
561 err = request_irq(adapter->pdev->irq, &igb_intr_msi, 0, 564 err = request_irq(adapter->pdev->irq, &igb_intr_msi, 0,
562 netdev->name, netdev); 565 netdev->name, netdev);
563 if (!err) 566 if (!err)
564 goto request_done; 567 goto request_done;
565 /* fall back to legacy interrupts */ 568 /* fall back to legacy interrupts */
566 igb_reset_interrupt_capability(adapter); 569 igb_reset_interrupt_capability(adapter);
567 adapter->msi_enabled = 0; 570 adapter->flags &= ~IGB_FLAG_HAS_MSI;
568 } 571 }
569 572
570 err = request_irq(adapter->pdev->irq, &igb_intr, IRQF_SHARED, 573 err = request_irq(adapter->pdev->irq, &igb_intr, IRQF_SHARED,
@@ -1097,6 +1100,17 @@ static int __devinit igb_probe(struct pci_dev *pdev,
1097 1100
1098 igb_get_bus_info_pcie(hw); 1101 igb_get_bus_info_pcie(hw);
1099 1102
1103 /* set flags */
1104 switch (hw->mac.type) {
1105 case e1000_82576:
1106 case e1000_82575:
1107 adapter->flags |= IGB_FLAG_HAS_DCA;
1108 adapter->flags |= IGB_FLAG_NEED_CTX_IDX;
1109 break;
1110 default:
1111 break;
1112 }
1113
1100 hw->phy.autoneg_wait_to_complete = false; 1114 hw->phy.autoneg_wait_to_complete = false;
1101 hw->mac.adaptive_ifs = true; 1115 hw->mac.adaptive_ifs = true;
1102 1116
@@ -1209,7 +1223,6 @@ static int __devinit igb_probe(struct pci_dev *pdev,
1209 * lan on a particular port */ 1223 * lan on a particular port */
1210 switch (pdev->device) { 1224 switch (pdev->device) {
1211 case E1000_DEV_ID_82575GB_QUAD_COPPER: 1225 case E1000_DEV_ID_82575GB_QUAD_COPPER:
1212 case E1000_DEV_ID_82576_QUAD_COPPER:
1213 adapter->eeprom_wol = 0; 1226 adapter->eeprom_wol = 0;
1214 break; 1227 break;
1215 case E1000_DEV_ID_82575EB_FIBER_SERDES: 1228 case E1000_DEV_ID_82575EB_FIBER_SERDES:
@@ -1220,6 +1233,16 @@ static int __devinit igb_probe(struct pci_dev *pdev,
1220 if (rd32(E1000_STATUS) & E1000_STATUS_FUNC_1) 1233 if (rd32(E1000_STATUS) & E1000_STATUS_FUNC_1)
1221 adapter->eeprom_wol = 0; 1234 adapter->eeprom_wol = 0;
1222 break; 1235 break;
1236 case E1000_DEV_ID_82576_QUAD_COPPER:
1237 /* if quad port adapter, disable WoL on all but port A */
1238 if (global_quad_port_a != 0)
1239 adapter->eeprom_wol = 0;
1240 else
1241 adapter->flags |= IGB_FLAG_QUAD_PORT_A;
1242 /* Reset for multiple quad port adapters */
1243 if (++global_quad_port_a == 4)
1244 global_quad_port_a = 0;
1245 break;
1223 } 1246 }
1224 1247
1225 /* initialize the wol settings based on the eeprom settings */ 1248 /* initialize the wol settings based on the eeprom settings */
@@ -1246,8 +1269,9 @@ static int __devinit igb_probe(struct pci_dev *pdev,
1246 goto err_register; 1269 goto err_register;
1247 1270
1248#ifdef CONFIG_DCA 1271#ifdef CONFIG_DCA
1249 if (dca_add_requester(&pdev->dev) == 0) { 1272 if ((adapter->flags & IGB_FLAG_HAS_DCA) &&
1250 adapter->dca_enabled = true; 1273 (dca_add_requester(&pdev->dev) == 0)) {
1274 adapter->flags |= IGB_FLAG_DCA_ENABLED;
1251 dev_info(&pdev->dev, "DCA enabled\n"); 1275 dev_info(&pdev->dev, "DCA enabled\n");
1252 /* Always use CB2 mode, difference is masked 1276 /* Always use CB2 mode, difference is masked
1253 * in the CB driver. */ 1277 * in the CB driver. */
@@ -1276,7 +1300,7 @@ static int __devinit igb_probe(struct pci_dev *pdev,
1276 dev_info(&pdev->dev, 1300 dev_info(&pdev->dev,
1277 "Using %s interrupts. %d rx queue(s), %d tx queue(s)\n", 1301 "Using %s interrupts. %d rx queue(s), %d tx queue(s)\n",
1278 adapter->msix_entries ? "MSI-X" : 1302 adapter->msix_entries ? "MSI-X" :
1279 adapter->msi_enabled ? "MSI" : "legacy", 1303 (adapter->flags & IGB_FLAG_HAS_MSI) ? "MSI" : "legacy",
1280 adapter->num_rx_queues, adapter->num_tx_queues); 1304 adapter->num_rx_queues, adapter->num_tx_queues);
1281 1305
1282 return 0; 1306 return 0;
@@ -1330,10 +1354,10 @@ static void __devexit igb_remove(struct pci_dev *pdev)
1330 flush_scheduled_work(); 1354 flush_scheduled_work();
1331 1355
1332#ifdef CONFIG_DCA 1356#ifdef CONFIG_DCA
1333 if (adapter->dca_enabled) { 1357 if (adapter->flags & IGB_FLAG_DCA_ENABLED) {
1334 dev_info(&pdev->dev, "DCA disabled\n"); 1358 dev_info(&pdev->dev, "DCA disabled\n");
1335 dca_remove_requester(&pdev->dev); 1359 dca_remove_requester(&pdev->dev);
1336 adapter->dca_enabled = false; 1360 adapter->flags &= ~IGB_FLAG_DCA_ENABLED;
1337 wr32(E1000_DCA_CTRL, 1); 1361 wr32(E1000_DCA_CTRL, 1);
1338 } 1362 }
1339#endif 1363#endif
@@ -2650,9 +2674,9 @@ static inline int igb_tso_adv(struct igb_adapter *adapter,
2650 mss_l4len_idx = (skb_shinfo(skb)->gso_size << E1000_ADVTXD_MSS_SHIFT); 2674 mss_l4len_idx = (skb_shinfo(skb)->gso_size << E1000_ADVTXD_MSS_SHIFT);
2651 mss_l4len_idx |= (l4len << E1000_ADVTXD_L4LEN_SHIFT); 2675 mss_l4len_idx |= (l4len << E1000_ADVTXD_L4LEN_SHIFT);
2652 2676
2653 /* Context index must be unique per ring. Luckily, so is the interrupt 2677 /* Context index must be unique per ring. */
2654 * mask value. */ 2678 if (adapter->flags & IGB_FLAG_NEED_CTX_IDX)
2655 mss_l4len_idx |= tx_ring->eims_value >> 4; 2679 mss_l4len_idx |= tx_ring->queue_index << 4;
2656 2680
2657 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx); 2681 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
2658 context_desc->seqnum_seed = 0; 2682 context_desc->seqnum_seed = 0;
@@ -2716,8 +2740,9 @@ static inline bool igb_tx_csum_adv(struct igb_adapter *adapter,
2716 2740
2717 context_desc->type_tucmd_mlhl = cpu_to_le32(tu_cmd); 2741 context_desc->type_tucmd_mlhl = cpu_to_le32(tu_cmd);
2718 context_desc->seqnum_seed = 0; 2742 context_desc->seqnum_seed = 0;
2719 context_desc->mss_l4len_idx = 2743 if (adapter->flags & IGB_FLAG_NEED_CTX_IDX)
2720 cpu_to_le32(tx_ring->queue_index << 4); 2744 context_desc->mss_l4len_idx =
2745 cpu_to_le32(tx_ring->queue_index << 4);
2721 2746
2722 buffer_info->time_stamp = jiffies; 2747 buffer_info->time_stamp = jiffies;
2723 buffer_info->dma = 0; 2748 buffer_info->dma = 0;
@@ -2818,8 +2843,9 @@ static inline void igb_tx_queue_adv(struct igb_adapter *adapter,
2818 olinfo_status |= E1000_TXD_POPTS_TXSM << 8; 2843 olinfo_status |= E1000_TXD_POPTS_TXSM << 8;
2819 } 2844 }
2820 2845
2821 if (tx_flags & (IGB_TX_FLAGS_CSUM | IGB_TX_FLAGS_TSO | 2846 if ((adapter->flags & IGB_FLAG_NEED_CTX_IDX) &&
2822 IGB_TX_FLAGS_VLAN)) 2847 (tx_flags & (IGB_TX_FLAGS_CSUM | IGB_TX_FLAGS_TSO |
2848 IGB_TX_FLAGS_VLAN)))
2823 olinfo_status |= tx_ring->queue_index << 4; 2849 olinfo_status |= tx_ring->queue_index << 4;
2824 2850
2825 olinfo_status |= ((paylen - hdr_len) << E1000_ADVTXD_PAYLEN_SHIFT); 2851 olinfo_status |= ((paylen - hdr_len) << E1000_ADVTXD_PAYLEN_SHIFT);
@@ -3255,7 +3281,7 @@ static irqreturn_t igb_msix_tx(int irq, void *data)
3255 if (!tx_ring->itr_val) 3281 if (!tx_ring->itr_val)
3256 wr32(E1000_EIMC, tx_ring->eims_value); 3282 wr32(E1000_EIMC, tx_ring->eims_value);
3257#ifdef CONFIG_DCA 3283#ifdef CONFIG_DCA
3258 if (adapter->dca_enabled) 3284 if (adapter->flags & IGB_FLAG_DCA_ENABLED)
3259 igb_update_tx_dca(tx_ring); 3285 igb_update_tx_dca(tx_ring);
3260#endif 3286#endif
3261 tx_ring->total_bytes = 0; 3287 tx_ring->total_bytes = 0;
@@ -3292,7 +3318,7 @@ static irqreturn_t igb_msix_rx(int irq, void *data)
3292 __netif_rx_schedule(adapter->netdev, &rx_ring->napi); 3318 __netif_rx_schedule(adapter->netdev, &rx_ring->napi);
3293 3319
3294#ifdef CONFIG_DCA 3320#ifdef CONFIG_DCA
3295 if (adapter->dca_enabled) 3321 if (adapter->flags & IGB_FLAG_DCA_ENABLED)
3296 igb_update_rx_dca(rx_ring); 3322 igb_update_rx_dca(rx_ring);
3297#endif 3323#endif
3298 return IRQ_HANDLED; 3324 return IRQ_HANDLED;
@@ -3355,7 +3381,7 @@ static void igb_setup_dca(struct igb_adapter *adapter)
3355{ 3381{
3356 int i; 3382 int i;
3357 3383
3358 if (!(adapter->dca_enabled)) 3384 if (!(adapter->flags & IGB_FLAG_DCA_ENABLED))
3359 return; 3385 return;
3360 3386
3361 for (i = 0; i < adapter->num_tx_queues; i++) { 3387 for (i = 0; i < adapter->num_tx_queues; i++) {
@@ -3375,12 +3401,15 @@ static int __igb_notify_dca(struct device *dev, void *data)
3375 struct e1000_hw *hw = &adapter->hw; 3401 struct e1000_hw *hw = &adapter->hw;
3376 unsigned long event = *(unsigned long *)data; 3402 unsigned long event = *(unsigned long *)data;
3377 3403
3404 if (!(adapter->flags & IGB_FLAG_HAS_DCA))
3405 goto out;
3406
3378 switch (event) { 3407 switch (event) {
3379 case DCA_PROVIDER_ADD: 3408 case DCA_PROVIDER_ADD:
3380 /* if already enabled, don't do it again */ 3409 /* if already enabled, don't do it again */
3381 if (adapter->dca_enabled) 3410 if (adapter->flags & IGB_FLAG_DCA_ENABLED)
3382 break; 3411 break;
3383 adapter->dca_enabled = true; 3412 adapter->flags |= IGB_FLAG_DCA_ENABLED;
3384 /* Always use CB2 mode, difference is masked 3413 /* Always use CB2 mode, difference is masked
3385 * in the CB driver. */ 3414 * in the CB driver. */
3386 wr32(E1000_DCA_CTRL, 2); 3415 wr32(E1000_DCA_CTRL, 2);
@@ -3391,17 +3420,17 @@ static int __igb_notify_dca(struct device *dev, void *data)
3391 } 3420 }
3392 /* Fall Through since DCA is disabled. */ 3421 /* Fall Through since DCA is disabled. */
3393 case DCA_PROVIDER_REMOVE: 3422 case DCA_PROVIDER_REMOVE:
3394 if (adapter->dca_enabled) { 3423 if (adapter->flags & IGB_FLAG_DCA_ENABLED) {
3395 /* without this a class_device is left 3424 /* without this a class_device is left
3396 * hanging around in the sysfs model */ 3425 * hanging around in the sysfs model */
3397 dca_remove_requester(dev); 3426 dca_remove_requester(dev);
3398 dev_info(&adapter->pdev->dev, "DCA disabled\n"); 3427 dev_info(&adapter->pdev->dev, "DCA disabled\n");
3399 adapter->dca_enabled = false; 3428 adapter->flags &= ~IGB_FLAG_DCA_ENABLED;
3400 wr32(E1000_DCA_CTRL, 1); 3429 wr32(E1000_DCA_CTRL, 1);
3401 } 3430 }
3402 break; 3431 break;
3403 } 3432 }
3404 3433out:
3405 return 0; 3434 return 0;
3406} 3435}
3407 3436
@@ -3507,13 +3536,13 @@ static int igb_poll(struct napi_struct *napi, int budget)
3507 3536
3508 /* this poll routine only supports one tx and one rx queue */ 3537 /* this poll routine only supports one tx and one rx queue */
3509#ifdef CONFIG_DCA 3538#ifdef CONFIG_DCA
3510 if (adapter->dca_enabled) 3539 if (adapter->flags & IGB_FLAG_DCA_ENABLED)
3511 igb_update_tx_dca(&adapter->tx_ring[0]); 3540 igb_update_tx_dca(&adapter->tx_ring[0]);
3512#endif 3541#endif
3513 tx_clean_complete = igb_clean_tx_irq(&adapter->tx_ring[0]); 3542 tx_clean_complete = igb_clean_tx_irq(&adapter->tx_ring[0]);
3514 3543
3515#ifdef CONFIG_DCA 3544#ifdef CONFIG_DCA
3516 if (adapter->dca_enabled) 3545 if (adapter->flags & IGB_FLAG_DCA_ENABLED)
3517 igb_update_rx_dca(&adapter->rx_ring[0]); 3546 igb_update_rx_dca(&adapter->rx_ring[0]);
3518#endif 3547#endif
3519 igb_clean_rx_irq_adv(&adapter->rx_ring[0], &work_done, budget); 3548 igb_clean_rx_irq_adv(&adapter->rx_ring[0], &work_done, budget);
@@ -3545,7 +3574,7 @@ static int igb_clean_rx_ring_msix(struct napi_struct *napi, int budget)
3545 goto quit_polling; 3574 goto quit_polling;
3546 3575
3547#ifdef CONFIG_DCA 3576#ifdef CONFIG_DCA
3548 if (adapter->dca_enabled) 3577 if (adapter->flags & IGB_FLAG_DCA_ENABLED)
3549 igb_update_rx_dca(rx_ring); 3578 igb_update_rx_dca(rx_ring);
3550#endif 3579#endif
3551 igb_clean_rx_irq_adv(rx_ring, &work_done, budget); 3580 igb_clean_rx_irq_adv(rx_ring, &work_done, budget);
@@ -4350,6 +4379,8 @@ static void igb_netpoll(struct net_device *netdev)
4350 int work_done = 0; 4379 int work_done = 0;
4351 4380
4352 igb_irq_disable(adapter); 4381 igb_irq_disable(adapter);
4382 adapter->flags |= IGB_FLAG_IN_NETPOLL;
4383
4353 for (i = 0; i < adapter->num_tx_queues; i++) 4384 for (i = 0; i < adapter->num_tx_queues; i++)
4354 igb_clean_tx_irq(&adapter->tx_ring[i]); 4385 igb_clean_tx_irq(&adapter->tx_ring[i]);
4355 4386
@@ -4358,6 +4389,7 @@ static void igb_netpoll(struct net_device *netdev)
4358 &work_done, 4389 &work_done,
4359 adapter->rx_ring[i].napi.weight); 4390 adapter->rx_ring[i].napi.weight);
4360 4391
4392 adapter->flags &= ~IGB_FLAG_IN_NETPOLL;
4361 igb_irq_enable(adapter); 4393 igb_irq_enable(adapter);
4362} 4394}
4363#endif /* CONFIG_NET_POLL_CONTROLLER */ 4395#endif /* CONFIG_NET_POLL_CONTROLLER */