diff options
author | Alexander Duyck <alexander.h.duyck@intel.com> | 2010-08-19 09:40:06 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2010-08-19 19:44:26 -0400 |
commit | 9e10e045f8223e09f2c70cd6849ff86803d50c88 (patch) | |
tree | f22af24923e1d62d030947a066d4d607c1fe45b5 /drivers/net/ixgbe/ixgbe_main.c | |
parent | 2f1860b8d94a4457e401895be6fc9b9ffa2c8b2c (diff) |
ixgbe: combine Rx into into ixgbe_configure_rx
The Rx init is currently split over ixgbe_configure, ixgbe_configure_rx,
and ixgbe_up_complete. Instead of leaving it split over 3 function it is
easier to consolidate them all into ixgbe_configure_rx.
Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ixgbe/ixgbe_main.c')
-rw-r--r-- | drivers/net/ixgbe/ixgbe_main.c | 152 |
1 files changed, 95 insertions, 57 deletions
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index fd2026efae88..c88ba13c1131 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c | |||
@@ -2574,6 +2574,8 @@ static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter, | |||
2574 | 2574 | ||
2575 | srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK; | 2575 | srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK; |
2576 | srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK; | 2576 | srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK; |
2577 | if (adapter->num_vfs) | ||
2578 | srrctl |= IXGBE_SRRCTL_DROP_EN; | ||
2577 | 2579 | ||
2578 | srrctl |= (IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) & | 2580 | srrctl |= (IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) & |
2579 | IXGBE_SRRCTL_BSIZEHDR_MASK; | 2581 | IXGBE_SRRCTL_BSIZEHDR_MASK; |
@@ -2705,13 +2707,72 @@ static void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter, | |||
2705 | IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(reg_idx), rscctrl); | 2707 | IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(reg_idx), rscctrl); |
2706 | } | 2708 | } |
2707 | 2709 | ||
2710 | /** | ||
2711 | * ixgbe_set_uta - Set unicast filter table address | ||
2712 | * @adapter: board private structure | ||
2713 | * | ||
2714 | * The unicast table address is a register array of 32-bit registers. | ||
2715 | * The table is meant to be used in a way similar to how the MTA is used | ||
2716 | * however due to certain limitations in the hardware it is necessary to | ||
2717 | * set all the hash bits to 1 and use the VMOLR ROPE bit as a promiscuous | ||
2718 | * enable bit to allow vlan tag stripping when promiscuous mode is enabled | ||
2719 | **/ | ||
2720 | static void ixgbe_set_uta(struct ixgbe_adapter *adapter) | ||
2721 | { | ||
2722 | struct ixgbe_hw *hw = &adapter->hw; | ||
2723 | int i; | ||
2724 | |||
2725 | /* The UTA table only exists on 82599 hardware and newer */ | ||
2726 | if (hw->mac.type < ixgbe_mac_82599EB) | ||
2727 | return; | ||
2728 | |||
2729 | /* we only need to do this if VMDq is enabled */ | ||
2730 | if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) | ||
2731 | return; | ||
2732 | |||
2733 | for (i = 0; i < 128; i++) | ||
2734 | IXGBE_WRITE_REG(hw, IXGBE_UTA(i), ~0); | ||
2735 | } | ||
2736 | |||
2737 | #define IXGBE_MAX_RX_DESC_POLL 10 | ||
2738 | static void ixgbe_rx_desc_queue_enable(struct ixgbe_adapter *adapter, | ||
2739 | struct ixgbe_ring *ring) | ||
2740 | { | ||
2741 | struct ixgbe_hw *hw = &adapter->hw; | ||
2742 | int reg_idx = ring->reg_idx; | ||
2743 | int wait_loop = IXGBE_MAX_RX_DESC_POLL; | ||
2744 | u32 rxdctl; | ||
2745 | |||
2746 | /* RXDCTL.EN will return 0 on 82598 if link is down, so skip it */ | ||
2747 | if (hw->mac.type == ixgbe_mac_82598EB && | ||
2748 | !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP)) | ||
2749 | return; | ||
2750 | |||
2751 | do { | ||
2752 | msleep(1); | ||
2753 | rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx)); | ||
2754 | } while (--wait_loop && !(rxdctl & IXGBE_RXDCTL_ENABLE)); | ||
2755 | |||
2756 | if (!wait_loop) { | ||
2757 | e_err(drv, "RXDCTL.ENABLE on Rx queue %d not set within " | ||
2758 | "the polling period\n", reg_idx); | ||
2759 | } | ||
2760 | } | ||
2761 | |||
2708 | static void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter, | 2762 | static void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter, |
2709 | struct ixgbe_ring *ring) | 2763 | struct ixgbe_ring *ring) |
2710 | { | 2764 | { |
2711 | struct ixgbe_hw *hw = &adapter->hw; | 2765 | struct ixgbe_hw *hw = &adapter->hw; |
2712 | u64 rdba = ring->dma; | 2766 | u64 rdba = ring->dma; |
2767 | u32 rxdctl; | ||
2713 | u16 reg_idx = ring->reg_idx; | 2768 | u16 reg_idx = ring->reg_idx; |
2714 | 2769 | ||
2770 | /* disable queue to avoid issues while updating state */ | ||
2771 | rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx)); | ||
2772 | IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), | ||
2773 | rxdctl & ~IXGBE_RXDCTL_ENABLE); | ||
2774 | IXGBE_WRITE_FLUSH(hw); | ||
2775 | |||
2715 | IXGBE_WRITE_REG(hw, IXGBE_RDBAL(reg_idx), (rdba & DMA_BIT_MASK(32))); | 2776 | IXGBE_WRITE_REG(hw, IXGBE_RDBAL(reg_idx), (rdba & DMA_BIT_MASK(32))); |
2716 | IXGBE_WRITE_REG(hw, IXGBE_RDBAH(reg_idx), (rdba >> 32)); | 2777 | IXGBE_WRITE_REG(hw, IXGBE_RDBAH(reg_idx), (rdba >> 32)); |
2717 | IXGBE_WRITE_REG(hw, IXGBE_RDLEN(reg_idx), | 2778 | IXGBE_WRITE_REG(hw, IXGBE_RDLEN(reg_idx), |
@@ -2720,6 +2781,28 @@ static void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter, | |||
2720 | IXGBE_WRITE_REG(hw, IXGBE_RDT(reg_idx), 0); | 2781 | IXGBE_WRITE_REG(hw, IXGBE_RDT(reg_idx), 0); |
2721 | ring->head = IXGBE_RDH(reg_idx); | 2782 | ring->head = IXGBE_RDH(reg_idx); |
2722 | ring->tail = IXGBE_RDT(reg_idx); | 2783 | ring->tail = IXGBE_RDT(reg_idx); |
2784 | |||
2785 | ixgbe_configure_srrctl(adapter, ring); | ||
2786 | ixgbe_configure_rscctl(adapter, ring); | ||
2787 | |||
2788 | if (hw->mac.type == ixgbe_mac_82598EB) { | ||
2789 | /* | ||
2790 | * enable cache line friendly hardware writes: | ||
2791 | * PTHRESH=32 descriptors (half the internal cache), | ||
2792 | * this also removes ugly rx_no_buffer_count increment | ||
2793 | * HTHRESH=4 descriptors (to minimize latency on fetch) | ||
2794 | * WTHRESH=8 burst writeback up to two cache lines | ||
2795 | */ | ||
2796 | rxdctl &= ~0x3FFFFF; | ||
2797 | rxdctl |= 0x080420; | ||
2798 | } | ||
2799 | |||
2800 | /* enable receive descriptor ring */ | ||
2801 | rxdctl |= IXGBE_RXDCTL_ENABLE; | ||
2802 | IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl); | ||
2803 | |||
2804 | ixgbe_rx_desc_queue_enable(adapter, ring); | ||
2805 | ixgbe_alloc_rx_buffers(adapter, ring, IXGBE_DESC_UNUSED(ring)); | ||
2723 | } | 2806 | } |
2724 | 2807 | ||
2725 | static void ixgbe_setup_psrtype(struct ixgbe_adapter *adapter) | 2808 | static void ixgbe_setup_psrtype(struct ixgbe_adapter *adapter) |
@@ -2908,7 +2991,6 @@ static void ixgbe_setup_rdrxctl(struct ixgbe_adapter *adapter) | |||
2908 | static void ixgbe_configure_rx(struct ixgbe_adapter *adapter) | 2991 | static void ixgbe_configure_rx(struct ixgbe_adapter *adapter) |
2909 | { | 2992 | { |
2910 | struct ixgbe_hw *hw = &adapter->hw; | 2993 | struct ixgbe_hw *hw = &adapter->hw; |
2911 | struct ixgbe_ring *rx_ring; | ||
2912 | int i; | 2994 | int i; |
2913 | u32 rxctrl; | 2995 | u32 rxctrl; |
2914 | 2996 | ||
@@ -2919,10 +3001,12 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter) | |||
2919 | ixgbe_setup_psrtype(adapter); | 3001 | ixgbe_setup_psrtype(adapter); |
2920 | ixgbe_setup_rdrxctl(adapter); | 3002 | ixgbe_setup_rdrxctl(adapter); |
2921 | 3003 | ||
2922 | /* Program MRQC for the distribution of queues */ | 3004 | /* Program registers for the distribution of queues */ |
2923 | ixgbe_setup_mrqc(adapter); | 3005 | ixgbe_setup_mrqc(adapter); |
2924 | ixgbe_configure_virtualization(adapter); | 3006 | ixgbe_configure_virtualization(adapter); |
2925 | 3007 | ||
3008 | ixgbe_set_uta(adapter); | ||
3009 | |||
2926 | /* set_rx_buffer_len must be called before ring initialization */ | 3010 | /* set_rx_buffer_len must be called before ring initialization */ |
2927 | ixgbe_set_rx_buffer_len(adapter); | 3011 | ixgbe_set_rx_buffer_len(adapter); |
2928 | 3012 | ||
@@ -2930,13 +3014,16 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter) | |||
2930 | * Setup the HW Rx Head and Tail Descriptor Pointers and | 3014 | * Setup the HW Rx Head and Tail Descriptor Pointers and |
2931 | * the Base and Length of the Rx Descriptor Ring | 3015 | * the Base and Length of the Rx Descriptor Ring |
2932 | */ | 3016 | */ |
2933 | for (i = 0; i < adapter->num_rx_queues; i++) { | 3017 | for (i = 0; i < adapter->num_rx_queues; i++) |
2934 | rx_ring = adapter->rx_ring[i]; | 3018 | ixgbe_configure_rx_ring(adapter, adapter->rx_ring[i]); |
2935 | ixgbe_configure_rx_ring(adapter, rx_ring); | ||
2936 | ixgbe_configure_srrctl(adapter, rx_ring); | ||
2937 | ixgbe_configure_rscctl(adapter, rx_ring); | ||
2938 | } | ||
2939 | 3019 | ||
3020 | /* disable drop enable for 82598 parts */ | ||
3021 | if (hw->mac.type == ixgbe_mac_82598EB) | ||
3022 | rxctrl |= IXGBE_RXCTRL_DMBYPS; | ||
3023 | |||
3024 | /* enable all receives */ | ||
3025 | rxctrl |= IXGBE_RXCTRL_RXEN; | ||
3026 | hw->mac.ops.enable_rx_dma(hw, rxctrl); | ||
2940 | } | 3027 | } |
2941 | 3028 | ||
2942 | static void ixgbe_vlan_rx_add_vid(struct net_device *netdev, u16 vid) | 3029 | static void ixgbe_vlan_rx_add_vid(struct net_device *netdev, u16 vid) |
@@ -3306,9 +3393,6 @@ static void ixgbe_configure(struct ixgbe_adapter *adapter) | |||
3306 | 3393 | ||
3307 | ixgbe_configure_tx(adapter); | 3394 | ixgbe_configure_tx(adapter); |
3308 | ixgbe_configure_rx(adapter); | 3395 | ixgbe_configure_rx(adapter); |
3309 | for (i = 0; i < adapter->num_rx_queues; i++) | ||
3310 | ixgbe_alloc_rx_buffers(adapter, adapter->rx_ring[i], | ||
3311 | (adapter->rx_ring[i]->count - 1)); | ||
3312 | } | 3396 | } |
3313 | 3397 | ||
3314 | static inline bool ixgbe_is_sfp(struct ixgbe_hw *hw) | 3398 | static inline bool ixgbe_is_sfp(struct ixgbe_hw *hw) |
@@ -3389,28 +3473,6 @@ link_cfg_out: | |||
3389 | return ret; | 3473 | return ret; |
3390 | } | 3474 | } |
3391 | 3475 | ||
3392 | #define IXGBE_MAX_RX_DESC_POLL 10 | ||
3393 | static inline void ixgbe_rx_desc_queue_enable(struct ixgbe_adapter *adapter, | ||
3394 | int rxr) | ||
3395 | { | ||
3396 | int j = adapter->rx_ring[rxr]->reg_idx; | ||
3397 | int k; | ||
3398 | |||
3399 | for (k = 0; k < IXGBE_MAX_RX_DESC_POLL; k++) { | ||
3400 | if (IXGBE_READ_REG(&adapter->hw, | ||
3401 | IXGBE_RXDCTL(j)) & IXGBE_RXDCTL_ENABLE) | ||
3402 | break; | ||
3403 | else | ||
3404 | msleep(1); | ||
3405 | } | ||
3406 | if (k >= IXGBE_MAX_RX_DESC_POLL) { | ||
3407 | e_err(drv, "RXDCTL.ENABLE on Rx queue %d not set within " | ||
3408 | "the polling period\n", rxr); | ||
3409 | } | ||
3410 | ixgbe_release_rx_desc(&adapter->hw, adapter->rx_ring[rxr], | ||
3411 | (adapter->rx_ring[rxr]->count - 1)); | ||
3412 | } | ||
3413 | |||
3414 | static void ixgbe_setup_gpie(struct ixgbe_adapter *adapter) | 3476 | static void ixgbe_setup_gpie(struct ixgbe_adapter *adapter) |
3415 | { | 3477 | { |
3416 | struct ixgbe_hw *hw = &adapter->hw; | 3478 | struct ixgbe_hw *hw = &adapter->hw; |
@@ -3462,35 +3524,12 @@ static void ixgbe_setup_gpie(struct ixgbe_adapter *adapter) | |||
3462 | static int ixgbe_up_complete(struct ixgbe_adapter *adapter) | 3524 | static int ixgbe_up_complete(struct ixgbe_adapter *adapter) |
3463 | { | 3525 | { |
3464 | struct ixgbe_hw *hw = &adapter->hw; | 3526 | struct ixgbe_hw *hw = &adapter->hw; |
3465 | int i, j = 0; | ||
3466 | int num_rx_rings = adapter->num_rx_queues; | ||
3467 | int err; | 3527 | int err; |
3468 | u32 rxdctl; | ||
3469 | u32 ctrl_ext; | 3528 | u32 ctrl_ext; |
3470 | 3529 | ||
3471 | ixgbe_get_hw_control(adapter); | 3530 | ixgbe_get_hw_control(adapter); |
3472 | ixgbe_setup_gpie(adapter); | 3531 | ixgbe_setup_gpie(adapter); |
3473 | 3532 | ||
3474 | for (i = 0; i < num_rx_rings; i++) { | ||
3475 | j = adapter->rx_ring[i]->reg_idx; | ||
3476 | rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j)); | ||
3477 | /* enable PTHRESH=32 descriptors (half the internal cache) | ||
3478 | * and HTHRESH=0 descriptors (to minimize latency on fetch), | ||
3479 | * this also removes a pesky rx_no_buffer_count increment */ | ||
3480 | rxdctl |= 0x0020; | ||
3481 | rxdctl |= IXGBE_RXDCTL_ENABLE; | ||
3482 | IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), rxdctl); | ||
3483 | if (hw->mac.type == ixgbe_mac_82599EB) | ||
3484 | ixgbe_rx_desc_queue_enable(adapter, i); | ||
3485 | } | ||
3486 | /* enable all receives */ | ||
3487 | rxdctl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); | ||
3488 | if (hw->mac.type == ixgbe_mac_82598EB) | ||
3489 | rxdctl |= (IXGBE_RXCTRL_DMBYPS | IXGBE_RXCTRL_RXEN); | ||
3490 | else | ||
3491 | rxdctl |= IXGBE_RXCTRL_RXEN; | ||
3492 | hw->mac.ops.enable_rx_dma(hw, rxdctl); | ||
3493 | |||
3494 | if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) | 3533 | if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) |
3495 | ixgbe_configure_msix(adapter); | 3534 | ixgbe_configure_msix(adapter); |
3496 | else | 3535 | else |
@@ -3505,7 +3544,6 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter) | |||
3505 | 3544 | ||
3506 | /* clear any pending interrupts, may auto mask */ | 3545 | /* clear any pending interrupts, may auto mask */ |
3507 | IXGBE_READ_REG(hw, IXGBE_EICR); | 3546 | IXGBE_READ_REG(hw, IXGBE_EICR); |
3508 | |||
3509 | ixgbe_irq_enable(adapter); | 3547 | ixgbe_irq_enable(adapter); |
3510 | 3548 | ||
3511 | /* | 3549 | /* |