diff options
author | Alexander Duyck <alexander.h.duyck@intel.com> | 2010-11-16 22:27:07 -0500 |
---|---|---|
committer | Jeff Kirsher <jeffrey.t.kirsher@intel.com> | 2010-11-16 22:27:07 -0500 |
commit | bf29ee6c4819a86ba0209281550b230889b8ebe6 (patch) | |
tree | a453bd2001ad464f82c86d159ef83975b0089897 /drivers/net/ixgbe | |
parent | 9d6b758f428d2ad9ca4208d5c4d4cdbd4261b0d8 (diff) |
ixgbe: cleanup unclear references to reg_idx
There are a number of places where we use the variable j to contain the
register index of the ring. Instead of using such a non-descriptive
variable name it is better that we name it reg_idx so that it is clear what
the variable contains.
Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com>
Tested-by: Ross Brattain <ross.b.brattain@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Diffstat (limited to 'drivers/net/ixgbe')
-rw-r--r-- | drivers/net/ixgbe/ixgbe_main.c | 120 |
1 files changed, 60 insertions, 60 deletions
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index 5f7929f52fe4..f2e81a21186a 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c | |||
@@ -647,8 +647,8 @@ static inline bool ixgbe_tx_xon_state(struct ixgbe_adapter *adapter, | |||
647 | #ifdef CONFIG_IXGBE_DCB | 647 | #ifdef CONFIG_IXGBE_DCB |
648 | if (adapter->dcb_cfg.pfc_mode_enable) { | 648 | if (adapter->dcb_cfg.pfc_mode_enable) { |
649 | int tc; | 649 | int tc; |
650 | int reg_idx = tx_ring->reg_idx; | ||
651 | int dcb_i = adapter->ring_feature[RING_F_DCB].indices; | 650 | int dcb_i = adapter->ring_feature[RING_F_DCB].indices; |
651 | u8 reg_idx = tx_ring->reg_idx; | ||
652 | 652 | ||
653 | switch (adapter->hw.mac.type) { | 653 | switch (adapter->hw.mac.type) { |
654 | case ixgbe_mac_82598EB: | 654 | case ixgbe_mac_82598EB: |
@@ -1422,7 +1422,7 @@ static int ixgbe_clean_rxonly(struct napi_struct *, int); | |||
1422 | static void ixgbe_configure_msix(struct ixgbe_adapter *adapter) | 1422 | static void ixgbe_configure_msix(struct ixgbe_adapter *adapter) |
1423 | { | 1423 | { |
1424 | struct ixgbe_q_vector *q_vector; | 1424 | struct ixgbe_q_vector *q_vector; |
1425 | int i, j, q_vectors, v_idx, r_idx; | 1425 | int i, q_vectors, v_idx, r_idx; |
1426 | u32 mask; | 1426 | u32 mask; |
1427 | 1427 | ||
1428 | q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; | 1428 | q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; |
@@ -1438,8 +1438,8 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter) | |||
1438 | adapter->num_rx_queues); | 1438 | adapter->num_rx_queues); |
1439 | 1439 | ||
1440 | for (i = 0; i < q_vector->rxr_count; i++) { | 1440 | for (i = 0; i < q_vector->rxr_count; i++) { |
1441 | j = adapter->rx_ring[r_idx]->reg_idx; | 1441 | u8 reg_idx = adapter->rx_ring[r_idx]->reg_idx; |
1442 | ixgbe_set_ivar(adapter, 0, j, v_idx); | 1442 | ixgbe_set_ivar(adapter, 0, reg_idx, v_idx); |
1443 | r_idx = find_next_bit(q_vector->rxr_idx, | 1443 | r_idx = find_next_bit(q_vector->rxr_idx, |
1444 | adapter->num_rx_queues, | 1444 | adapter->num_rx_queues, |
1445 | r_idx + 1); | 1445 | r_idx + 1); |
@@ -1448,8 +1448,8 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter) | |||
1448 | adapter->num_tx_queues); | 1448 | adapter->num_tx_queues); |
1449 | 1449 | ||
1450 | for (i = 0; i < q_vector->txr_count; i++) { | 1450 | for (i = 0; i < q_vector->txr_count; i++) { |
1451 | j = adapter->tx_ring[r_idx]->reg_idx; | 1451 | u8 reg_idx = adapter->tx_ring[r_idx]->reg_idx; |
1452 | ixgbe_set_ivar(adapter, 1, j, v_idx); | 1452 | ixgbe_set_ivar(adapter, 1, reg_idx, v_idx); |
1453 | r_idx = find_next_bit(q_vector->txr_idx, | 1453 | r_idx = find_next_bit(q_vector->txr_idx, |
1454 | adapter->num_tx_queues, | 1454 | adapter->num_tx_queues, |
1455 | r_idx + 1); | 1455 | r_idx + 1); |
@@ -2555,7 +2555,7 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter, | |||
2555 | u64 tdba = ring->dma; | 2555 | u64 tdba = ring->dma; |
2556 | int wait_loop = 10; | 2556 | int wait_loop = 10; |
2557 | u32 txdctl; | 2557 | u32 txdctl; |
2558 | u16 reg_idx = ring->reg_idx; | 2558 | u8 reg_idx = ring->reg_idx; |
2559 | 2559 | ||
2560 | /* disable queue to avoid issues while updating state */ | 2560 | /* disable queue to avoid issues while updating state */ |
2561 | txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx)); | 2561 | txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx)); |
@@ -2684,13 +2684,13 @@ static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter, | |||
2684 | struct ixgbe_ring *rx_ring) | 2684 | struct ixgbe_ring *rx_ring) |
2685 | { | 2685 | { |
2686 | u32 srrctl; | 2686 | u32 srrctl; |
2687 | int index = rx_ring->reg_idx; | 2687 | u8 reg_idx = rx_ring->reg_idx; |
2688 | 2688 | ||
2689 | switch (adapter->hw.mac.type) { | 2689 | switch (adapter->hw.mac.type) { |
2690 | case ixgbe_mac_82598EB: { | 2690 | case ixgbe_mac_82598EB: { |
2691 | struct ixgbe_ring_feature *feature = adapter->ring_feature; | 2691 | struct ixgbe_ring_feature *feature = adapter->ring_feature; |
2692 | const int mask = feature[RING_F_RSS].mask; | 2692 | const int mask = feature[RING_F_RSS].mask; |
2693 | index = index & mask; | 2693 | reg_idx = reg_idx & mask; |
2694 | } | 2694 | } |
2695 | break; | 2695 | break; |
2696 | case ixgbe_mac_82599EB: | 2696 | case ixgbe_mac_82599EB: |
@@ -2698,7 +2698,7 @@ static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter, | |||
2698 | break; | 2698 | break; |
2699 | } | 2699 | } |
2700 | 2700 | ||
2701 | srrctl = IXGBE_READ_REG(&adapter->hw, IXGBE_SRRCTL(index)); | 2701 | srrctl = IXGBE_READ_REG(&adapter->hw, IXGBE_SRRCTL(reg_idx)); |
2702 | 2702 | ||
2703 | srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK; | 2703 | srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK; |
2704 | srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK; | 2704 | srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK; |
@@ -2721,7 +2721,7 @@ static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter, | |||
2721 | srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; | 2721 | srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; |
2722 | } | 2722 | } |
2723 | 2723 | ||
2724 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_SRRCTL(index), srrctl); | 2724 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_SRRCTL(reg_idx), srrctl); |
2725 | } | 2725 | } |
2726 | 2726 | ||
2727 | static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter) | 2727 | static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter) |
@@ -2801,7 +2801,7 @@ static void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter, | |||
2801 | struct ixgbe_hw *hw = &adapter->hw; | 2801 | struct ixgbe_hw *hw = &adapter->hw; |
2802 | u32 rscctrl; | 2802 | u32 rscctrl; |
2803 | int rx_buf_len; | 2803 | int rx_buf_len; |
2804 | u16 reg_idx = ring->reg_idx; | 2804 | u8 reg_idx = ring->reg_idx; |
2805 | 2805 | ||
2806 | if (!ring_is_rsc_enabled(ring)) | 2806 | if (!ring_is_rsc_enabled(ring)) |
2807 | return; | 2807 | return; |
@@ -2867,9 +2867,9 @@ static void ixgbe_rx_desc_queue_enable(struct ixgbe_adapter *adapter, | |||
2867 | struct ixgbe_ring *ring) | 2867 | struct ixgbe_ring *ring) |
2868 | { | 2868 | { |
2869 | struct ixgbe_hw *hw = &adapter->hw; | 2869 | struct ixgbe_hw *hw = &adapter->hw; |
2870 | int reg_idx = ring->reg_idx; | ||
2871 | int wait_loop = IXGBE_MAX_RX_DESC_POLL; | 2870 | int wait_loop = IXGBE_MAX_RX_DESC_POLL; |
2872 | u32 rxdctl; | 2871 | u32 rxdctl; |
2872 | u8 reg_idx = ring->reg_idx; | ||
2873 | 2873 | ||
2874 | /* RXDCTL.EN will return 0 on 82598 if link is down, so skip it */ | 2874 | /* RXDCTL.EN will return 0 on 82598 if link is down, so skip it */ |
2875 | if (hw->mac.type == ixgbe_mac_82598EB && | 2875 | if (hw->mac.type == ixgbe_mac_82598EB && |
@@ -2893,7 +2893,7 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter, | |||
2893 | struct ixgbe_hw *hw = &adapter->hw; | 2893 | struct ixgbe_hw *hw = &adapter->hw; |
2894 | u64 rdba = ring->dma; | 2894 | u64 rdba = ring->dma; |
2895 | u32 rxdctl; | 2895 | u32 rxdctl; |
2896 | u16 reg_idx = ring->reg_idx; | 2896 | u8 reg_idx = ring->reg_idx; |
2897 | 2897 | ||
2898 | /* disable queue to avoid issues while updating state */ | 2898 | /* disable queue to avoid issues while updating state */ |
2899 | rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx)); | 2899 | rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx)); |
@@ -3894,7 +3894,7 @@ void ixgbe_down(struct ixgbe_adapter *adapter) | |||
3894 | struct ixgbe_hw *hw = &adapter->hw; | 3894 | struct ixgbe_hw *hw = &adapter->hw; |
3895 | u32 rxctrl; | 3895 | u32 rxctrl; |
3896 | u32 txdctl; | 3896 | u32 txdctl; |
3897 | int i, j; | 3897 | int i; |
3898 | int num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; | 3898 | int num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; |
3899 | 3899 | ||
3900 | /* signal that we are down to the interrupt handler */ | 3900 | /* signal that we are down to the interrupt handler */ |
@@ -3952,9 +3952,9 @@ void ixgbe_down(struct ixgbe_adapter *adapter) | |||
3952 | 3952 | ||
3953 | /* disable transmits in the hardware now that interrupts are off */ | 3953 | /* disable transmits in the hardware now that interrupts are off */ |
3954 | for (i = 0; i < adapter->num_tx_queues; i++) { | 3954 | for (i = 0; i < adapter->num_tx_queues; i++) { |
3955 | j = adapter->tx_ring[i]->reg_idx; | 3955 | u8 reg_idx = adapter->tx_ring[i]->reg_idx; |
3956 | txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j)); | 3956 | txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx)); |
3957 | IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j), | 3957 | IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), |
3958 | (txdctl & ~IXGBE_TXDCTL_ENABLE)); | 3958 | (txdctl & ~IXGBE_TXDCTL_ENABLE)); |
3959 | } | 3959 | } |
3960 | /* Disable the Tx DMA engine on 82599 */ | 3960 | /* Disable the Tx DMA engine on 82599 */ |
@@ -4420,55 +4420,55 @@ static inline bool ixgbe_cache_ring_fdir(struct ixgbe_adapter *adapter) | |||
4420 | */ | 4420 | */ |
4421 | static inline bool ixgbe_cache_ring_fcoe(struct ixgbe_adapter *adapter) | 4421 | static inline bool ixgbe_cache_ring_fcoe(struct ixgbe_adapter *adapter) |
4422 | { | 4422 | { |
4423 | int i, fcoe_rx_i = 0, fcoe_tx_i = 0; | ||
4424 | bool ret = false; | ||
4425 | struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE]; | 4423 | struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE]; |
4424 | int i; | ||
4425 | u8 fcoe_rx_i = 0, fcoe_tx_i = 0; | ||
4426 | |||
4427 | if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) | ||
4428 | return false; | ||
4426 | 4429 | ||
4427 | if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) { | ||
4428 | #ifdef CONFIG_IXGBE_DCB | 4430 | #ifdef CONFIG_IXGBE_DCB |
4429 | if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { | 4431 | if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { |
4430 | struct ixgbe_fcoe *fcoe = &adapter->fcoe; | 4432 | struct ixgbe_fcoe *fcoe = &adapter->fcoe; |
4431 | 4433 | ||
4432 | ixgbe_cache_ring_dcb(adapter); | 4434 | ixgbe_cache_ring_dcb(adapter); |
4433 | /* find out queues in TC for FCoE */ | 4435 | /* find out queues in TC for FCoE */ |
4434 | fcoe_rx_i = adapter->rx_ring[fcoe->tc]->reg_idx + 1; | 4436 | fcoe_rx_i = adapter->rx_ring[fcoe->tc]->reg_idx + 1; |
4435 | fcoe_tx_i = adapter->tx_ring[fcoe->tc]->reg_idx + 1; | 4437 | fcoe_tx_i = adapter->tx_ring[fcoe->tc]->reg_idx + 1; |
4436 | /* | 4438 | /* |
4437 | * In 82599, the number of Tx queues for each traffic | 4439 | * In 82599, the number of Tx queues for each traffic |
4438 | * class for both 8-TC and 4-TC modes are: | 4440 | * class for both 8-TC and 4-TC modes are: |
4439 | * TCs : TC0 TC1 TC2 TC3 TC4 TC5 TC6 TC7 | 4441 | * TCs : TC0 TC1 TC2 TC3 TC4 TC5 TC6 TC7 |
4440 | * 8 TCs: 32 32 16 16 8 8 8 8 | 4442 | * 8 TCs: 32 32 16 16 8 8 8 8 |
4441 | * 4 TCs: 64 64 32 32 | 4443 | * 4 TCs: 64 64 32 32 |
4442 | * We have max 8 queues for FCoE, where 8 the is | 4444 | * We have max 8 queues for FCoE, where 8 the is |
4443 | * FCoE redirection table size. If TC for FCoE is | 4445 | * FCoE redirection table size. If TC for FCoE is |
4444 | * less than or equal to TC3, we have enough queues | 4446 | * less than or equal to TC3, we have enough queues |
4445 | * to add max of 8 queues for FCoE, so we start FCoE | 4447 | * to add max of 8 queues for FCoE, so we start FCoE |
4446 | * tx descriptor from the next one, i.e., reg_idx + 1. | 4448 | * Tx queue from the next one, i.e., reg_idx + 1. |
4447 | * If TC for FCoE is above TC3, implying 8 TC mode, | 4449 | * If TC for FCoE is above TC3, implying 8 TC mode, |
4448 | * and we need 8 for FCoE, we have to take all queues | 4450 | * and we need 8 for FCoE, we have to take all queues |
4449 | * in that traffic class for FCoE. | 4451 | * in that traffic class for FCoE. |
4450 | */ | 4452 | */ |
4451 | if ((f->indices == IXGBE_FCRETA_SIZE) && (fcoe->tc > 3)) | 4453 | if ((f->indices == IXGBE_FCRETA_SIZE) && (fcoe->tc > 3)) |
4452 | fcoe_tx_i--; | 4454 | fcoe_tx_i--; |
4453 | } | 4455 | } |
4454 | #endif /* CONFIG_IXGBE_DCB */ | 4456 | #endif /* CONFIG_IXGBE_DCB */ |
4455 | if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) { | 4457 | if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) { |
4456 | if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) || | 4458 | if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) || |
4457 | (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) | 4459 | (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) |
4458 | ixgbe_cache_ring_fdir(adapter); | 4460 | ixgbe_cache_ring_fdir(adapter); |
4459 | else | 4461 | else |
4460 | ixgbe_cache_ring_rss(adapter); | 4462 | ixgbe_cache_ring_rss(adapter); |
4461 | 4463 | ||
4462 | fcoe_rx_i = f->mask; | 4464 | fcoe_rx_i = f->mask; |
4463 | fcoe_tx_i = f->mask; | 4465 | fcoe_tx_i = f->mask; |
4464 | } | ||
4465 | for (i = 0; i < f->indices; i++, fcoe_rx_i++, fcoe_tx_i++) { | ||
4466 | adapter->rx_ring[f->mask + i]->reg_idx = fcoe_rx_i; | ||
4467 | adapter->tx_ring[f->mask + i]->reg_idx = fcoe_tx_i; | ||
4468 | } | ||
4469 | ret = true; | ||
4470 | } | 4466 | } |
4471 | return ret; | 4467 | for (i = 0; i < f->indices; i++, fcoe_rx_i++, fcoe_tx_i++) { |
4468 | adapter->rx_ring[f->mask + i]->reg_idx = fcoe_rx_i; | ||
4469 | adapter->tx_ring[f->mask + i]->reg_idx = fcoe_tx_i; | ||
4470 | } | ||
4471 | return true; | ||
4472 | } | 4472 | } |
4473 | 4473 | ||
4474 | #endif /* IXGBE_FCOE */ | 4474 | #endif /* IXGBE_FCOE */ |