aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/e1000/e1000_main.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/e1000/e1000_main.c')
-rw-r--r--drivers/net/e1000/e1000_main.c131
1 files changed, 60 insertions, 71 deletions
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index 0991648c53dc..757d02f443a5 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -169,21 +169,21 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu);
169static int e1000_set_mac(struct net_device *netdev, void *p); 169static int e1000_set_mac(struct net_device *netdev, void *p);
170static irqreturn_t e1000_intr(int irq, void *data); 170static irqreturn_t e1000_intr(int irq, void *data);
171static irqreturn_t e1000_intr_msi(int irq, void *data); 171static irqreturn_t e1000_intr_msi(int irq, void *data);
172static boolean_t e1000_clean_tx_irq(struct e1000_adapter *adapter, 172static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
173 struct e1000_tx_ring *tx_ring); 173 struct e1000_tx_ring *tx_ring);
174#ifdef CONFIG_E1000_NAPI 174#ifdef CONFIG_E1000_NAPI
175static int e1000_clean(struct napi_struct *napi, int budget); 175static int e1000_clean(struct napi_struct *napi, int budget);
176static boolean_t e1000_clean_rx_irq(struct e1000_adapter *adapter, 176static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
177 struct e1000_rx_ring *rx_ring, 177 struct e1000_rx_ring *rx_ring,
178 int *work_done, int work_to_do); 178 int *work_done, int work_to_do);
179static boolean_t e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, 179static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
180 struct e1000_rx_ring *rx_ring, 180 struct e1000_rx_ring *rx_ring,
181 int *work_done, int work_to_do); 181 int *work_done, int work_to_do);
182#else 182#else
183static boolean_t e1000_clean_rx_irq(struct e1000_adapter *adapter, 183static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
184 struct e1000_rx_ring *rx_ring); 184 struct e1000_rx_ring *rx_ring);
185static boolean_t e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, 185static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
186 struct e1000_rx_ring *rx_ring); 186 struct e1000_rx_ring *rx_ring);
187#endif 187#endif
188static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter, 188static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
189 struct e1000_rx_ring *rx_ring, 189 struct e1000_rx_ring *rx_ring,
@@ -347,7 +347,6 @@ static void e1000_free_irq(struct e1000_adapter *adapter)
347static void 347static void
348e1000_irq_disable(struct e1000_adapter *adapter) 348e1000_irq_disable(struct e1000_adapter *adapter)
349{ 349{
350 atomic_inc(&adapter->irq_sem);
351 E1000_WRITE_REG(&adapter->hw, IMC, ~0); 350 E1000_WRITE_REG(&adapter->hw, IMC, ~0);
352 E1000_WRITE_FLUSH(&adapter->hw); 351 E1000_WRITE_FLUSH(&adapter->hw);
353 synchronize_irq(adapter->pdev->irq); 352 synchronize_irq(adapter->pdev->irq);
@@ -361,10 +360,8 @@ e1000_irq_disable(struct e1000_adapter *adapter)
361static void 360static void
362e1000_irq_enable(struct e1000_adapter *adapter) 361e1000_irq_enable(struct e1000_adapter *adapter)
363{ 362{
364 if (likely(atomic_dec_and_test(&adapter->irq_sem))) { 363 E1000_WRITE_REG(&adapter->hw, IMS, IMS_ENABLE_MASK);
365 E1000_WRITE_REG(&adapter->hw, IMS, IMS_ENABLE_MASK); 364 E1000_WRITE_FLUSH(&adapter->hw);
366 E1000_WRITE_FLUSH(&adapter->hw);
367 }
368} 365}
369 366
370static void 367static void
@@ -584,7 +581,7 @@ void e1000_power_up_phy(struct e1000_adapter *adapter)
584static void e1000_power_down_phy(struct e1000_adapter *adapter) 581static void e1000_power_down_phy(struct e1000_adapter *adapter)
585{ 582{
586 /* Power down the PHY so no link is implied when interface is down * 583 /* Power down the PHY so no link is implied when interface is down *
587 * The PHY cannot be powered down if any of the following is TRUE * 584 * The PHY cannot be powered down if any of the following is true *
588 * (a) WoL is enabled 585 * (a) WoL is enabled
589 * (b) AMT is active 586 * (b) AMT is active
590 * (c) SoL/IDER session is active */ 587 * (c) SoL/IDER session is active */
@@ -638,7 +635,6 @@ e1000_down(struct e1000_adapter *adapter)
638 635
639#ifdef CONFIG_E1000_NAPI 636#ifdef CONFIG_E1000_NAPI
640 napi_disable(&adapter->napi); 637 napi_disable(&adapter->napi);
641 atomic_set(&adapter->irq_sem, 0);
642#endif 638#endif
643 e1000_irq_disable(adapter); 639 e1000_irq_disable(adapter);
644 640
@@ -673,7 +669,7 @@ e1000_reset(struct e1000_adapter *adapter)
673{ 669{
674 uint32_t pba = 0, tx_space, min_tx_space, min_rx_space; 670 uint32_t pba = 0, tx_space, min_tx_space, min_rx_space;
675 uint16_t fc_high_water_mark = E1000_FC_HIGH_DIFF; 671 uint16_t fc_high_water_mark = E1000_FC_HIGH_DIFF;
676 boolean_t legacy_pba_adjust = FALSE; 672 bool legacy_pba_adjust = false;
677 673
678 /* Repartition Pba for greater than 9k mtu 674 /* Repartition Pba for greater than 9k mtu
679 * To take effect CTRL.RST is required. 675 * To take effect CTRL.RST is required.
@@ -687,7 +683,7 @@ e1000_reset(struct e1000_adapter *adapter)
687 case e1000_82540: 683 case e1000_82540:
688 case e1000_82541: 684 case e1000_82541:
689 case e1000_82541_rev_2: 685 case e1000_82541_rev_2:
690 legacy_pba_adjust = TRUE; 686 legacy_pba_adjust = true;
691 pba = E1000_PBA_48K; 687 pba = E1000_PBA_48K;
692 break; 688 break;
693 case e1000_82545: 689 case e1000_82545:
@@ -698,7 +694,7 @@ e1000_reset(struct e1000_adapter *adapter)
698 break; 694 break;
699 case e1000_82547: 695 case e1000_82547:
700 case e1000_82547_rev_2: 696 case e1000_82547_rev_2:
701 legacy_pba_adjust = TRUE; 697 legacy_pba_adjust = true;
702 pba = E1000_PBA_30K; 698 pba = E1000_PBA_30K;
703 break; 699 break;
704 case e1000_82571: 700 case e1000_82571:
@@ -716,7 +712,7 @@ e1000_reset(struct e1000_adapter *adapter)
716 break; 712 break;
717 } 713 }
718 714
719 if (legacy_pba_adjust == TRUE) { 715 if (legacy_pba_adjust) {
720 if (adapter->netdev->mtu > E1000_RXBUFFER_8192) 716 if (adapter->netdev->mtu > E1000_RXBUFFER_8192)
721 pba -= 8; /* allocate more FIFO for Tx */ 717 pba -= 8; /* allocate more FIFO for Tx */
722 718
@@ -1366,15 +1362,15 @@ e1000_sw_init(struct e1000_adapter *adapter)
1366 1362
1367 e1000_set_media_type(hw); 1363 e1000_set_media_type(hw);
1368 1364
1369 hw->wait_autoneg_complete = FALSE; 1365 hw->wait_autoneg_complete = false;
1370 hw->tbi_compatibility_en = TRUE; 1366 hw->tbi_compatibility_en = true;
1371 hw->adaptive_ifs = TRUE; 1367 hw->adaptive_ifs = true;
1372 1368
1373 /* Copper options */ 1369 /* Copper options */
1374 1370
1375 if (hw->media_type == e1000_media_type_copper) { 1371 if (hw->media_type == e1000_media_type_copper) {
1376 hw->mdix = AUTO_ALL_MODES; 1372 hw->mdix = AUTO_ALL_MODES;
1377 hw->disable_polarity_correction = FALSE; 1373 hw->disable_polarity_correction = false;
1378 hw->master_slave = E1000_MASTER_SLAVE; 1374 hw->master_slave = E1000_MASTER_SLAVE;
1379 } 1375 }
1380 1376
@@ -1396,7 +1392,6 @@ e1000_sw_init(struct e1000_adapter *adapter)
1396#endif 1392#endif
1397 1393
1398 /* Explicitly disable IRQ since the NIC can be in any state. */ 1394 /* Explicitly disable IRQ since the NIC can be in any state. */
1399 atomic_set(&adapter->irq_sem, 0);
1400 e1000_irq_disable(adapter); 1395 e1000_irq_disable(adapter);
1401 1396
1402 spin_lock_init(&adapter->stats_lock); 1397 spin_lock_init(&adapter->stats_lock);
@@ -1576,7 +1571,7 @@ e1000_close(struct net_device *netdev)
1576 * @start: address of beginning of memory 1571 * @start: address of beginning of memory
1577 * @len: length of memory 1572 * @len: length of memory
1578 **/ 1573 **/
1579static boolean_t 1574static bool
1580e1000_check_64k_bound(struct e1000_adapter *adapter, 1575e1000_check_64k_bound(struct e1000_adapter *adapter,
1581 void *start, unsigned long len) 1576 void *start, unsigned long len)
1582{ 1577{
@@ -1587,10 +1582,10 @@ e1000_check_64k_bound(struct e1000_adapter *adapter,
1587 * write location to cross 64k boundary due to errata 23 */ 1582 * write location to cross 64k boundary due to errata 23 */
1588 if (adapter->hw.mac_type == e1000_82545 || 1583 if (adapter->hw.mac_type == e1000_82545 ||
1589 adapter->hw.mac_type == e1000_82546) { 1584 adapter->hw.mac_type == e1000_82546) {
1590 return ((begin ^ (end - 1)) >> 16) != 0 ? FALSE : TRUE; 1585 return ((begin ^ (end - 1)) >> 16) != 0 ? false : true;
1591 } 1586 }
1592 1587
1593 return TRUE; 1588 return true;
1594} 1589}
1595 1590
1596/** 1591/**
@@ -2133,7 +2128,7 @@ e1000_configure_rx(struct e1000_adapter *adapter)
2133 /* Enable 82543 Receive Checksum Offload for TCP and UDP */ 2128 /* Enable 82543 Receive Checksum Offload for TCP and UDP */
2134 if (hw->mac_type >= e1000_82543) { 2129 if (hw->mac_type >= e1000_82543) {
2135 rxcsum = E1000_READ_REG(hw, RXCSUM); 2130 rxcsum = E1000_READ_REG(hw, RXCSUM);
2136 if (adapter->rx_csum == TRUE) { 2131 if (adapter->rx_csum) {
2137 rxcsum |= E1000_RXCSUM_TUOFL; 2132 rxcsum |= E1000_RXCSUM_TUOFL;
2138 2133
2139 /* Enable 82571 IPv4 payload checksum for UDP fragments 2134 /* Enable 82571 IPv4 payload checksum for UDP fragments
@@ -2669,7 +2664,7 @@ e1000_watchdog(unsigned long data)
2669 if (link) { 2664 if (link) {
2670 if (!netif_carrier_ok(netdev)) { 2665 if (!netif_carrier_ok(netdev)) {
2671 uint32_t ctrl; 2666 uint32_t ctrl;
2672 boolean_t txb2b = 1; 2667 bool txb2b = true;
2673 e1000_get_speed_and_duplex(&adapter->hw, 2668 e1000_get_speed_and_duplex(&adapter->hw,
2674 &adapter->link_speed, 2669 &adapter->link_speed,
2675 &adapter->link_duplex); 2670 &adapter->link_duplex);
@@ -2691,12 +2686,12 @@ e1000_watchdog(unsigned long data)
2691 adapter->tx_timeout_factor = 1; 2686 adapter->tx_timeout_factor = 1;
2692 switch (adapter->link_speed) { 2687 switch (adapter->link_speed) {
2693 case SPEED_10: 2688 case SPEED_10:
2694 txb2b = 0; 2689 txb2b = false;
2695 netdev->tx_queue_len = 10; 2690 netdev->tx_queue_len = 10;
2696 adapter->tx_timeout_factor = 8; 2691 adapter->tx_timeout_factor = 8;
2697 break; 2692 break;
2698 case SPEED_100: 2693 case SPEED_100:
2699 txb2b = 0; 2694 txb2b = false;
2700 netdev->tx_queue_len = 100; 2695 netdev->tx_queue_len = 100;
2701 /* maybe add some timeout factor ? */ 2696 /* maybe add some timeout factor ? */
2702 break; 2697 break;
@@ -2704,7 +2699,7 @@ e1000_watchdog(unsigned long data)
2704 2699
2705 if ((adapter->hw.mac_type == e1000_82571 || 2700 if ((adapter->hw.mac_type == e1000_82571 ||
2706 adapter->hw.mac_type == e1000_82572) && 2701 adapter->hw.mac_type == e1000_82572) &&
2707 txb2b == 0) { 2702 !txb2b) {
2708 uint32_t tarc0; 2703 uint32_t tarc0;
2709 tarc0 = E1000_READ_REG(&adapter->hw, TARC0); 2704 tarc0 = E1000_READ_REG(&adapter->hw, TARC0);
2710 tarc0 &= ~(1 << 21); 2705 tarc0 &= ~(1 << 21);
@@ -2802,7 +2797,7 @@ e1000_watchdog(unsigned long data)
2802 E1000_WRITE_REG(&adapter->hw, ICS, E1000_ICS_RXDMT0); 2797 E1000_WRITE_REG(&adapter->hw, ICS, E1000_ICS_RXDMT0);
2803 2798
2804 /* Force detection of hung controller every watchdog period */ 2799 /* Force detection of hung controller every watchdog period */
2805 adapter->detect_tx_hung = TRUE; 2800 adapter->detect_tx_hung = true;
2806 2801
2807 /* With 82571 controllers, LAA may be overwritten due to controller 2802 /* With 82571 controllers, LAA may be overwritten due to controller
2808 * reset from the other port. Set the appropriate LAA in RAR[0] */ 2803 * reset from the other port. Set the appropriate LAA in RAR[0] */
@@ -3025,12 +3020,12 @@ e1000_tso(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
3025 if (++i == tx_ring->count) i = 0; 3020 if (++i == tx_ring->count) i = 0;
3026 tx_ring->next_to_use = i; 3021 tx_ring->next_to_use = i;
3027 3022
3028 return TRUE; 3023 return true;
3029 } 3024 }
3030 return FALSE; 3025 return false;
3031} 3026}
3032 3027
3033static boolean_t 3028static bool
3034e1000_tx_csum(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring, 3029e1000_tx_csum(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
3035 struct sk_buff *skb) 3030 struct sk_buff *skb)
3036{ 3031{
@@ -3060,10 +3055,10 @@ e1000_tx_csum(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
3060 if (unlikely(++i == tx_ring->count)) i = 0; 3055 if (unlikely(++i == tx_ring->count)) i = 0;
3061 tx_ring->next_to_use = i; 3056 tx_ring->next_to_use = i;
3062 3057
3063 return TRUE; 3058 return true;
3064 } 3059 }
3065 3060
3066 return FALSE; 3061 return false;
3067} 3062}
3068 3063
3069#define E1000_MAX_TXD_PWR 12 3064#define E1000_MAX_TXD_PWR 12
@@ -3836,11 +3831,8 @@ e1000_intr_msi(int irq, void *data)
3836#endif 3831#endif
3837 uint32_t icr = E1000_READ_REG(hw, ICR); 3832 uint32_t icr = E1000_READ_REG(hw, ICR);
3838 3833
3839#ifdef CONFIG_E1000_NAPI 3834 /* in NAPI mode read ICR disables interrupts using IAM */
3840 /* read ICR disables interrupts using IAM, so keep up with our 3835
3841 * enable/disable accounting */
3842 atomic_inc(&adapter->irq_sem);
3843#endif
3844 if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) { 3836 if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
3845 hw->get_link_status = 1; 3837 hw->get_link_status = 1;
3846 /* 80003ES2LAN workaround-- For packet buffer work-around on 3838 /* 80003ES2LAN workaround-- For packet buffer work-around on
@@ -3910,12 +3902,8 @@ e1000_intr(int irq, void *data)
3910 !(icr & E1000_ICR_INT_ASSERTED))) 3902 !(icr & E1000_ICR_INT_ASSERTED)))
3911 return IRQ_NONE; 3903 return IRQ_NONE;
3912 3904
3913 /* Interrupt Auto-Mask...upon reading ICR, 3905 /* Interrupt Auto-Mask...upon reading ICR, interrupts are masked. No
3914 * interrupts are masked. No need for the 3906 * need for the IMC write */
3915 * IMC write, but it does mean we should
3916 * account for it ASAP. */
3917 if (likely(hw->mac_type >= e1000_82571))
3918 atomic_inc(&adapter->irq_sem);
3919#endif 3907#endif
3920 3908
3921 if (unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) { 3909 if (unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) {
@@ -3939,7 +3927,6 @@ e1000_intr(int irq, void *data)
3939#ifdef CONFIG_E1000_NAPI 3927#ifdef CONFIG_E1000_NAPI
3940 if (unlikely(hw->mac_type < e1000_82571)) { 3928 if (unlikely(hw->mac_type < e1000_82571)) {
3941 /* disable interrupts, without the synchronize_irq bit */ 3929 /* disable interrupts, without the synchronize_irq bit */
3942 atomic_inc(&adapter->irq_sem);
3943 E1000_WRITE_REG(hw, IMC, ~0); 3930 E1000_WRITE_REG(hw, IMC, ~0);
3944 E1000_WRITE_FLUSH(hw); 3931 E1000_WRITE_FLUSH(hw);
3945 } 3932 }
@@ -3964,10 +3951,8 @@ e1000_intr(int irq, void *data)
3964 * in dead lock. Writing IMC forces 82547 into 3951 * in dead lock. Writing IMC forces 82547 into
3965 * de-assertion state. 3952 * de-assertion state.
3966 */ 3953 */
3967 if (hw->mac_type == e1000_82547 || hw->mac_type == e1000_82547_rev_2) { 3954 if (hw->mac_type == e1000_82547 || hw->mac_type == e1000_82547_rev_2)
3968 atomic_inc(&adapter->irq_sem);
3969 E1000_WRITE_REG(hw, IMC, ~0); 3955 E1000_WRITE_REG(hw, IMC, ~0);
3970 }
3971 3956
3972 adapter->total_tx_bytes = 0; 3957 adapter->total_tx_bytes = 0;
3973 adapter->total_rx_bytes = 0; 3958 adapter->total_rx_bytes = 0;
@@ -4038,7 +4023,7 @@ e1000_clean(struct napi_struct *napi, int budget)
4038 * @adapter: board private structure 4023 * @adapter: board private structure
4039 **/ 4024 **/
4040 4025
4041static boolean_t 4026static bool
4042e1000_clean_tx_irq(struct e1000_adapter *adapter, 4027e1000_clean_tx_irq(struct e1000_adapter *adapter,
4043 struct e1000_tx_ring *tx_ring) 4028 struct e1000_tx_ring *tx_ring)
4044{ 4029{
@@ -4049,7 +4034,7 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter,
4049#ifdef CONFIG_E1000_NAPI 4034#ifdef CONFIG_E1000_NAPI
4050 unsigned int count = 0; 4035 unsigned int count = 0;
4051#endif 4036#endif
4052 boolean_t cleaned = FALSE; 4037 bool cleaned = false;
4053 unsigned int total_tx_bytes=0, total_tx_packets=0; 4038 unsigned int total_tx_bytes=0, total_tx_packets=0;
4054 4039
4055 i = tx_ring->next_to_clean; 4040 i = tx_ring->next_to_clean;
@@ -4057,7 +4042,7 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter,
4057 eop_desc = E1000_TX_DESC(*tx_ring, eop); 4042 eop_desc = E1000_TX_DESC(*tx_ring, eop);
4058 4043
4059 while (eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) { 4044 while (eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) {
4060 for (cleaned = FALSE; !cleaned; ) { 4045 for (cleaned = false; !cleaned; ) {
4061 tx_desc = E1000_TX_DESC(*tx_ring, i); 4046 tx_desc = E1000_TX_DESC(*tx_ring, i);
4062 buffer_info = &tx_ring->buffer_info[i]; 4047 buffer_info = &tx_ring->buffer_info[i];
4063 cleaned = (i == eop); 4048 cleaned = (i == eop);
@@ -4105,7 +4090,7 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter,
4105 if (adapter->detect_tx_hung) { 4090 if (adapter->detect_tx_hung) {
4106 /* Detect a transmit hang in hardware, this serializes the 4091 /* Detect a transmit hang in hardware, this serializes the
4107 * check with the clearing of time_stamp and movement of i */ 4092 * check with the clearing of time_stamp and movement of i */
4108 adapter->detect_tx_hung = FALSE; 4093 adapter->detect_tx_hung = false;
4109 if (tx_ring->buffer_info[eop].dma && 4094 if (tx_ring->buffer_info[eop].dma &&
4110 time_after(jiffies, tx_ring->buffer_info[eop].time_stamp + 4095 time_after(jiffies, tx_ring->buffer_info[eop].time_stamp +
4111 (adapter->tx_timeout_factor * HZ)) 4096 (adapter->tx_timeout_factor * HZ))
@@ -4200,7 +4185,7 @@ e1000_rx_checksum(struct e1000_adapter *adapter,
4200 * @adapter: board private structure 4185 * @adapter: board private structure
4201 **/ 4186 **/
4202 4187
4203static boolean_t 4188static bool
4204#ifdef CONFIG_E1000_NAPI 4189#ifdef CONFIG_E1000_NAPI
4205e1000_clean_rx_irq(struct e1000_adapter *adapter, 4190e1000_clean_rx_irq(struct e1000_adapter *adapter,
4206 struct e1000_rx_ring *rx_ring, 4191 struct e1000_rx_ring *rx_ring,
@@ -4219,7 +4204,7 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter,
4219 uint8_t last_byte; 4204 uint8_t last_byte;
4220 unsigned int i; 4205 unsigned int i;
4221 int cleaned_count = 0; 4206 int cleaned_count = 0;
4222 boolean_t cleaned = FALSE; 4207 bool cleaned = false;
4223 unsigned int total_rx_bytes=0, total_rx_packets=0; 4208 unsigned int total_rx_bytes=0, total_rx_packets=0;
4224 4209
4225 i = rx_ring->next_to_clean; 4210 i = rx_ring->next_to_clean;
@@ -4247,7 +4232,7 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter,
4247 4232
4248 next_buffer = &rx_ring->buffer_info[i]; 4233 next_buffer = &rx_ring->buffer_info[i];
4249 4234
4250 cleaned = TRUE; 4235 cleaned = true;
4251 cleaned_count++; 4236 cleaned_count++;
4252 pci_unmap_single(pdev, 4237 pci_unmap_single(pdev,
4253 buffer_info->dma, 4238 buffer_info->dma,
@@ -4373,7 +4358,7 @@ next_desc:
4373 * @adapter: board private structure 4358 * @adapter: board private structure
4374 **/ 4359 **/
4375 4360
4376static boolean_t 4361static bool
4377#ifdef CONFIG_E1000_NAPI 4362#ifdef CONFIG_E1000_NAPI
4378e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, 4363e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
4379 struct e1000_rx_ring *rx_ring, 4364 struct e1000_rx_ring *rx_ring,
@@ -4393,7 +4378,7 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
4393 unsigned int i, j; 4378 unsigned int i, j;
4394 uint32_t length, staterr; 4379 uint32_t length, staterr;
4395 int cleaned_count = 0; 4380 int cleaned_count = 0;
4396 boolean_t cleaned = FALSE; 4381 bool cleaned = false;
4397 unsigned int total_rx_bytes=0, total_rx_packets=0; 4382 unsigned int total_rx_bytes=0, total_rx_packets=0;
4398 4383
4399 i = rx_ring->next_to_clean; 4384 i = rx_ring->next_to_clean;
@@ -4420,7 +4405,7 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
4420 4405
4421 next_buffer = &rx_ring->buffer_info[i]; 4406 next_buffer = &rx_ring->buffer_info[i];
4422 4407
4423 cleaned = TRUE; 4408 cleaned = true;
4424 cleaned_count++; 4409 cleaned_count++;
4425 pci_unmap_single(pdev, buffer_info->dma, 4410 pci_unmap_single(pdev, buffer_info->dma,
4426 buffer_info->length, 4411 buffer_info->length,
@@ -5001,7 +4986,8 @@ e1000_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
5001 struct e1000_adapter *adapter = netdev_priv(netdev); 4986 struct e1000_adapter *adapter = netdev_priv(netdev);
5002 uint32_t ctrl, rctl; 4987 uint32_t ctrl, rctl;
5003 4988
5004 e1000_irq_disable(adapter); 4989 if (!test_bit(__E1000_DOWN, &adapter->flags))
4990 e1000_irq_disable(adapter);
5005 adapter->vlgrp = grp; 4991 adapter->vlgrp = grp;
5006 4992
5007 if (grp) { 4993 if (grp) {
@@ -5038,7 +5024,8 @@ e1000_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
5038 } 5024 }
5039 } 5025 }
5040 5026
5041 e1000_irq_enable(adapter); 5027 if (!test_bit(__E1000_DOWN, &adapter->flags))
5028 e1000_irq_enable(adapter);
5042} 5029}
5043 5030
5044static void 5031static void
@@ -5064,9 +5051,11 @@ e1000_vlan_rx_kill_vid(struct net_device *netdev, uint16_t vid)
5064 struct e1000_adapter *adapter = netdev_priv(netdev); 5051 struct e1000_adapter *adapter = netdev_priv(netdev);
5065 uint32_t vfta, index; 5052 uint32_t vfta, index;
5066 5053
5067 e1000_irq_disable(adapter); 5054 if (!test_bit(__E1000_DOWN, &adapter->flags))
5055 e1000_irq_disable(adapter);
5068 vlan_group_set_device(adapter->vlgrp, vid, NULL); 5056 vlan_group_set_device(adapter->vlgrp, vid, NULL);
5069 e1000_irq_enable(adapter); 5057 if (!test_bit(__E1000_DOWN, &adapter->flags))
5058 e1000_irq_enable(adapter);
5070 5059
5071 if ((adapter->hw.mng_cookie.status & 5060 if ((adapter->hw.mng_cookie.status &
5072 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) && 5061 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&