diff options
author | Yi Zou <yi.zou@intel.com> | 2011-01-06 09:29:56 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2011-01-10 02:44:10 -0500 |
commit | 2d39d576fad0fd4bb79a0de26fca50a4be1ffdc1 (patch) | |
tree | 12d38ef31a7d6c2a2b1e73e6564091eb9c85ce98 /drivers/net | |
parent | 5377a4160bb65ee4dd11b4b1d081d86d56d92bff (diff) |
ixgbe: make sure per Rx queue is disabled before unmapping the receive buffer
When disable the Rx logic globally, we would also want to disable the per Rx
queue receive logic by per queue Rx control register RXDCTL so no more DMA is
happening from the packet buffer to the receive buffer associated with the Rx
ring, before we start unmapping Rx ring receive buffer. The hardware may take
max of 100us before the corresponding Rx queue is really disabled. Added
ixgbe_disable_rx_queue() for this purpose.
Signed-off-by: Yi Zou <yi.zou@intel.com>
Tested-by: Stephen Ko <stephen.s.ko@intel.com>
Tested-by: Ross Brattain <ross.b.brattain@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net')
-rw-r--r-- | drivers/net/ixgbe/ixgbe.h | 2 | ||||
-rw-r--r-- | drivers/net/ixgbe/ixgbe_ethtool.c | 4 | ||||
-rw-r--r-- | drivers/net/ixgbe/ixgbe_main.c | 40 |
3 files changed, 39 insertions, 7 deletions
diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h index 3ae30b8cb7d6..bdeaa9e06c00 100644 --- a/drivers/net/ixgbe/ixgbe.h +++ b/drivers/net/ixgbe/ixgbe.h | |||
@@ -508,6 +508,8 @@ extern void ixgbe_free_rx_resources(struct ixgbe_ring *); | |||
508 | extern void ixgbe_free_tx_resources(struct ixgbe_ring *); | 508 | extern void ixgbe_free_tx_resources(struct ixgbe_ring *); |
509 | extern void ixgbe_configure_rx_ring(struct ixgbe_adapter *,struct ixgbe_ring *); | 509 | extern void ixgbe_configure_rx_ring(struct ixgbe_adapter *,struct ixgbe_ring *); |
510 | extern void ixgbe_configure_tx_ring(struct ixgbe_adapter *,struct ixgbe_ring *); | 510 | extern void ixgbe_configure_tx_ring(struct ixgbe_adapter *,struct ixgbe_ring *); |
511 | extern void ixgbe_disable_rx_queue(struct ixgbe_adapter *adapter, | ||
512 | struct ixgbe_ring *); | ||
511 | extern void ixgbe_update_stats(struct ixgbe_adapter *adapter); | 513 | extern void ixgbe_update_stats(struct ixgbe_adapter *adapter); |
512 | extern int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter); | 514 | extern int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter); |
513 | extern void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter); | 515 | extern void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter); |
diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c index 23ff23e8b393..a8bab1564d07 100644 --- a/drivers/net/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ixgbe/ixgbe_ethtool.c | |||
@@ -1477,9 +1477,7 @@ static void ixgbe_free_desc_rings(struct ixgbe_adapter *adapter) | |||
1477 | reg_ctl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); | 1477 | reg_ctl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); |
1478 | reg_ctl &= ~IXGBE_RXCTRL_RXEN; | 1478 | reg_ctl &= ~IXGBE_RXCTRL_RXEN; |
1479 | IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, reg_ctl); | 1479 | IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, reg_ctl); |
1480 | reg_ctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rx_ring->reg_idx)); | 1480 | ixgbe_disable_rx_queue(adapter, rx_ring); |
1481 | reg_ctl &= ~IXGBE_RXDCTL_ENABLE; | ||
1482 | IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rx_ring->reg_idx), reg_ctl); | ||
1483 | 1481 | ||
1484 | /* now Tx */ | 1482 | /* now Tx */ |
1485 | reg_ctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(tx_ring->reg_idx)); | 1483 | reg_ctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(tx_ring->reg_idx)); |
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index 38ab4f3f8197..e8ae311bbbe5 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c | |||
@@ -3024,6 +3024,36 @@ static void ixgbe_rx_desc_queue_enable(struct ixgbe_adapter *adapter, | |||
3024 | } | 3024 | } |
3025 | } | 3025 | } |
3026 | 3026 | ||
3027 | void ixgbe_disable_rx_queue(struct ixgbe_adapter *adapter, | ||
3028 | struct ixgbe_ring *ring) | ||
3029 | { | ||
3030 | struct ixgbe_hw *hw = &adapter->hw; | ||
3031 | int wait_loop = IXGBE_MAX_RX_DESC_POLL; | ||
3032 | u32 rxdctl; | ||
3033 | u8 reg_idx = ring->reg_idx; | ||
3034 | |||
3035 | rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx)); | ||
3036 | rxdctl &= ~IXGBE_RXDCTL_ENABLE; | ||
3037 | |||
3038 | /* write value back with RXDCTL.ENABLE bit cleared */ | ||
3039 | IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl); | ||
3040 | |||
3041 | if (hw->mac.type == ixgbe_mac_82598EB && | ||
3042 | !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP)) | ||
3043 | return; | ||
3044 | |||
3045 | /* the hardware may take up to 100us to really disable the rx queue */ | ||
3046 | do { | ||
3047 | udelay(10); | ||
3048 | rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx)); | ||
3049 | } while (--wait_loop && (rxdctl & IXGBE_RXDCTL_ENABLE)); | ||
3050 | |||
3051 | if (!wait_loop) { | ||
3052 | e_err(drv, "RXDCTL.ENABLE on Rx queue %d not cleared within " | ||
3053 | "the polling period\n", reg_idx); | ||
3054 | } | ||
3055 | } | ||
3056 | |||
3027 | void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter, | 3057 | void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter, |
3028 | struct ixgbe_ring *ring) | 3058 | struct ixgbe_ring *ring) |
3029 | { | 3059 | { |
@@ -3034,9 +3064,7 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter, | |||
3034 | 3064 | ||
3035 | /* disable queue to avoid issues while updating state */ | 3065 | /* disable queue to avoid issues while updating state */ |
3036 | rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx)); | 3066 | rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx)); |
3037 | IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), | 3067 | ixgbe_disable_rx_queue(adapter, ring); |
3038 | rxdctl & ~IXGBE_RXDCTL_ENABLE); | ||
3039 | IXGBE_WRITE_FLUSH(hw); | ||
3040 | 3068 | ||
3041 | IXGBE_WRITE_REG(hw, IXGBE_RDBAL(reg_idx), (rdba & DMA_BIT_MASK(32))); | 3069 | IXGBE_WRITE_REG(hw, IXGBE_RDBAL(reg_idx), (rdba & DMA_BIT_MASK(32))); |
3042 | IXGBE_WRITE_REG(hw, IXGBE_RDBAH(reg_idx), (rdba >> 32)); | 3070 | IXGBE_WRITE_REG(hw, IXGBE_RDBAH(reg_idx), (rdba >> 32)); |
@@ -4064,7 +4092,11 @@ void ixgbe_down(struct ixgbe_adapter *adapter) | |||
4064 | rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); | 4092 | rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); |
4065 | IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN); | 4093 | IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN); |
4066 | 4094 | ||
4067 | IXGBE_WRITE_FLUSH(hw); | 4095 | /* disable all enabled rx queues */ |
4096 | for (i = 0; i < adapter->num_rx_queues; i++) | ||
4097 | /* this call also flushes the previous write */ | ||
4098 | ixgbe_disable_rx_queue(adapter, adapter->rx_ring[i]); | ||
4099 | |||
4068 | msleep(10); | 4100 | msleep(10); |
4069 | 4101 | ||
4070 | netif_tx_stop_all_queues(netdev); | 4102 | netif_tx_stop_all_queues(netdev); |