diff options
author | Alexander Duyck <alexander.h.duyck@intel.com> | 2010-08-19 09:39:43 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2010-08-19 19:44:25 -0400 |
commit | 2f1860b8d94a4457e401895be6fc9b9ffa2c8b2c (patch) | |
tree | 42500d2c37b4328df2ed8aad69a3591b4f3e5068 | |
parent | a34bcfffae8ebbba9dcbacbc3de718cca66689dd (diff) |
ixgbe: pull all Tx init into ixgbe_configure_tx
The Tx init was spread out over ixgbe_configure, ixgbe_configure_tx, and
ixgbe_up_complete. This change combines all of that into the
ixgbe_configure_tx function in order to simplify the Tx init path.
Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | drivers/net/ixgbe/ixgbe_main.c | 91 |
1 files changed, 52 insertions, 39 deletions
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index 0b235221f14..fd2026efae8 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c | |||
@@ -2436,8 +2436,16 @@ static void ixgbe_configure_msi_and_legacy(struct ixgbe_adapter *adapter) | |||
2436 | { | 2436 | { |
2437 | struct ixgbe_hw *hw = &adapter->hw; | 2437 | struct ixgbe_hw *hw = &adapter->hw; |
2438 | u64 tdba = ring->dma; | 2438 | u64 tdba = ring->dma; |
2439 | int wait_loop = 10; | ||
2440 | u32 txdctl; | ||
2439 | u16 reg_idx = ring->reg_idx; | 2441 | u16 reg_idx = ring->reg_idx; |
2440 | 2442 | ||
2443 | /* disable queue to avoid issues while updating state */ | ||
2444 | txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx)); | ||
2445 | IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), | ||
2446 | txdctl & ~IXGBE_TXDCTL_ENABLE); | ||
2447 | IXGBE_WRITE_FLUSH(hw); | ||
2448 | |||
2441 | IXGBE_WRITE_REG(hw, IXGBE_TDBAL(reg_idx), | 2449 | IXGBE_WRITE_REG(hw, IXGBE_TDBAL(reg_idx), |
2442 | (tdba & DMA_BIT_MASK(32))); | 2450 | (tdba & DMA_BIT_MASK(32))); |
2443 | IXGBE_WRITE_REG(hw, IXGBE_TDBAH(reg_idx), (tdba >> 32)); | 2451 | IXGBE_WRITE_REG(hw, IXGBE_TDBAH(reg_idx), (tdba >> 32)); |
@@ -2448,6 +2456,38 @@ static void ixgbe_configure_msi_and_legacy(struct ixgbe_adapter *adapter) | |||
2448 | ring->head = IXGBE_TDH(reg_idx); | 2456 | ring->head = IXGBE_TDH(reg_idx); |
2449 | ring->tail = IXGBE_TDT(reg_idx); | 2457 | ring->tail = IXGBE_TDT(reg_idx); |
2450 | 2458 | ||
2459 | /* configure fetching thresholds */ | ||
2460 | if (adapter->rx_itr_setting == 0) { | ||
2461 | /* cannot set wthresh when itr==0 */ | ||
2462 | txdctl &= ~0x007F0000; | ||
2463 | } else { | ||
2464 | /* enable WTHRESH=8 descriptors, to encourage burst writeback */ | ||
2465 | txdctl |= (8 << 16); | ||
2466 | } | ||
2467 | if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { | ||
2468 | /* PThresh workaround for Tx hang with DFP enabled. */ | ||
2469 | txdctl |= 32; | ||
2470 | } | ||
2471 | |||
2472 | /* reinitialize flowdirector state */ | ||
2473 | set_bit(__IXGBE_FDIR_INIT_DONE, &ring->reinit_state); | ||
2474 | |||
2475 | /* enable queue */ | ||
2476 | txdctl |= IXGBE_TXDCTL_ENABLE; | ||
2477 | IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), txdctl); | ||
2478 | |||
2479 | /* TXDCTL.EN will return 0 on 82598 if link is down, so skip it */ | ||
2480 | if (hw->mac.type == ixgbe_mac_82598EB && | ||
2481 | !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP)) | ||
2482 | return; | ||
2483 | |||
2484 | /* poll to verify queue is enabled */ | ||
2485 | do { | ||
2486 | msleep(1); | ||
2487 | txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx)); | ||
2488 | } while (--wait_loop && !(txdctl & IXGBE_TXDCTL_ENABLE)); | ||
2489 | if (!wait_loop) | ||
2490 | e_err(drv, "Could not enable Tx Queue %d\n", reg_idx); | ||
2451 | } | 2491 | } |
2452 | 2492 | ||
2453 | static void ixgbe_setup_mtqc(struct ixgbe_adapter *adapter) | 2493 | static void ixgbe_setup_mtqc(struct ixgbe_adapter *adapter) |
@@ -2497,13 +2537,22 @@ static void ixgbe_setup_mtqc(struct ixgbe_adapter *adapter) | |||
2497 | **/ | 2537 | **/ |
2498 | static void ixgbe_configure_tx(struct ixgbe_adapter *adapter) | 2538 | static void ixgbe_configure_tx(struct ixgbe_adapter *adapter) |
2499 | { | 2539 | { |
2540 | struct ixgbe_hw *hw = &adapter->hw; | ||
2541 | u32 dmatxctl; | ||
2500 | u32 i; | 2542 | u32 i; |
2501 | 2543 | ||
2544 | ixgbe_setup_mtqc(adapter); | ||
2545 | |||
2546 | if (hw->mac.type != ixgbe_mac_82598EB) { | ||
2547 | /* DMATXCTL.EN must be before Tx queues are enabled */ | ||
2548 | dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); | ||
2549 | dmatxctl |= IXGBE_DMATXCTL_TE; | ||
2550 | IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl); | ||
2551 | } | ||
2552 | |||
2502 | /* Setup the HW Tx Head and Tail descriptor pointers */ | 2553 | /* Setup the HW Tx Head and Tail descriptor pointers */ |
2503 | for (i = 0; i < adapter->num_tx_queues; i++) | 2554 | for (i = 0; i < adapter->num_tx_queues; i++) |
2504 | ixgbe_configure_tx_ring(adapter, adapter->tx_ring[i]); | 2555 | ixgbe_configure_tx_ring(adapter, adapter->tx_ring[i]); |
2505 | |||
2506 | ixgbe_setup_mtqc(adapter); | ||
2507 | } | 2556 | } |
2508 | 2557 | ||
2509 | #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2 | 2558 | #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2 |
@@ -3416,44 +3465,12 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter) | |||
3416 | int i, j = 0; | 3465 | int i, j = 0; |
3417 | int num_rx_rings = adapter->num_rx_queues; | 3466 | int num_rx_rings = adapter->num_rx_queues; |
3418 | int err; | 3467 | int err; |
3419 | u32 txdctl, rxdctl; | 3468 | u32 rxdctl; |
3420 | u32 dmatxctl; | ||
3421 | u32 ctrl_ext; | 3469 | u32 ctrl_ext; |
3422 | 3470 | ||
3423 | ixgbe_get_hw_control(adapter); | 3471 | ixgbe_get_hw_control(adapter); |
3424 | ixgbe_setup_gpie(adapter); | 3472 | ixgbe_setup_gpie(adapter); |
3425 | 3473 | ||
3426 | if (hw->mac.type == ixgbe_mac_82599EB) { | ||
3427 | /* DMATXCTL.EN must be set after all Tx queue config is done */ | ||
3428 | dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); | ||
3429 | dmatxctl |= IXGBE_DMATXCTL_TE; | ||
3430 | IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl); | ||
3431 | } | ||
3432 | for (i = 0; i < adapter->num_tx_queues; i++) { | ||
3433 | j = adapter->tx_ring[i]->reg_idx; | ||
3434 | txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j)); | ||
3435 | if (adapter->rx_itr_setting == 0) { | ||
3436 | /* cannot set wthresh when itr==0 */ | ||
3437 | txdctl &= ~0x007F0000; | ||
3438 | } else { | ||
3439 | /* enable WTHRESH=8 descriptors, to encourage burst writeback */ | ||
3440 | txdctl |= (8 << 16); | ||
3441 | } | ||
3442 | txdctl |= IXGBE_TXDCTL_ENABLE; | ||
3443 | IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j), txdctl); | ||
3444 | if (hw->mac.type == ixgbe_mac_82599EB) { | ||
3445 | int wait_loop = 10; | ||
3446 | /* poll for Tx Enable ready */ | ||
3447 | do { | ||
3448 | msleep(1); | ||
3449 | txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j)); | ||
3450 | } while (--wait_loop && | ||
3451 | !(txdctl & IXGBE_TXDCTL_ENABLE)); | ||
3452 | if (!wait_loop) | ||
3453 | e_err(drv, "Could not enable Tx Queue %d\n", j); | ||
3454 | } | ||
3455 | } | ||
3456 | |||
3457 | for (i = 0; i < num_rx_rings; i++) { | 3474 | for (i = 0; i < num_rx_rings; i++) { |
3458 | j = adapter->rx_ring[i]->reg_idx; | 3475 | j = adapter->rx_ring[i]->reg_idx; |
3459 | rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j)); | 3476 | rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j)); |
@@ -3530,10 +3547,6 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter) | |||
3530 | e_err(probe, "link_config FAILED %d\n", err); | 3547 | e_err(probe, "link_config FAILED %d\n", err); |
3531 | } | 3548 | } |
3532 | 3549 | ||
3533 | for (i = 0; i < adapter->num_tx_queues; i++) | ||
3534 | set_bit(__IXGBE_FDIR_INIT_DONE, | ||
3535 | &(adapter->tx_ring[i]->reinit_state)); | ||
3536 | |||
3537 | /* enable transmits */ | 3550 | /* enable transmits */ |
3538 | netif_tx_start_all_queues(adapter->netdev); | 3551 | netif_tx_start_all_queues(adapter->netdev); |
3539 | 3552 | ||