diff options
Diffstat (limited to 'drivers/net/ixgbe/ixgbe_main.c')
-rw-r--r-- | drivers/net/ixgbe/ixgbe_main.c | 871 |
1 files changed, 432 insertions, 439 deletions
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index 08e8e25c159d..fa671ae0ab69 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c | |||
@@ -32,8 +32,10 @@ | |||
32 | #include <linux/vmalloc.h> | 32 | #include <linux/vmalloc.h> |
33 | #include <linux/string.h> | 33 | #include <linux/string.h> |
34 | #include <linux/in.h> | 34 | #include <linux/in.h> |
35 | #include <linux/interrupt.h> | ||
35 | #include <linux/ip.h> | 36 | #include <linux/ip.h> |
36 | #include <linux/tcp.h> | 37 | #include <linux/tcp.h> |
38 | #include <linux/sctp.h> | ||
37 | #include <linux/pkt_sched.h> | 39 | #include <linux/pkt_sched.h> |
38 | #include <linux/ipv6.h> | 40 | #include <linux/ipv6.h> |
39 | #include <linux/slab.h> | 41 | #include <linux/slab.h> |
@@ -53,11 +55,10 @@ char ixgbe_driver_name[] = "ixgbe"; | |||
53 | static const char ixgbe_driver_string[] = | 55 | static const char ixgbe_driver_string[] = |
54 | "Intel(R) 10 Gigabit PCI Express Network Driver"; | 56 | "Intel(R) 10 Gigabit PCI Express Network Driver"; |
55 | #define MAJ 3 | 57 | #define MAJ 3 |
56 | #define MIN 3 | 58 | #define MIN 4 |
57 | #define BUILD 8 | 59 | #define BUILD 8 |
58 | #define KFIX 2 | ||
59 | #define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \ | 60 | #define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \ |
60 | __stringify(BUILD) "-k" __stringify(KFIX) | 61 | __stringify(BUILD) "-k" |
61 | const char ixgbe_driver_version[] = DRV_VERSION; | 62 | const char ixgbe_driver_version[] = DRV_VERSION; |
62 | static const char ixgbe_copyright[] = | 63 | static const char ixgbe_copyright[] = |
63 | "Copyright (c) 1999-2011 Intel Corporation."; | 64 | "Copyright (c) 1999-2011 Intel Corporation."; |
@@ -664,62 +665,6 @@ void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *tx_ring, | |||
664 | /* tx_buffer_info must be completely set up in the transmit path */ | 665 | /* tx_buffer_info must be completely set up in the transmit path */ |
665 | } | 666 | } |
666 | 667 | ||
667 | /** | ||
668 | * ixgbe_dcb_txq_to_tc - convert a reg index to a traffic class | ||
669 | * @adapter: driver private struct | ||
670 | * @index: reg idx of queue to query (0-127) | ||
671 | * | ||
672 | * Helper function to determine the traffic index for a particular | ||
673 | * register index. | ||
674 | * | ||
675 | * Returns : a tc index for use in range 0-7, or 0-3 | ||
676 | */ | ||
677 | static u8 ixgbe_dcb_txq_to_tc(struct ixgbe_adapter *adapter, u8 reg_idx) | ||
678 | { | ||
679 | int tc = -1; | ||
680 | int dcb_i = netdev_get_num_tc(adapter->netdev); | ||
681 | |||
682 | /* if DCB is not enabled the queues have no TC */ | ||
683 | if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED)) | ||
684 | return tc; | ||
685 | |||
686 | /* check valid range */ | ||
687 | if (reg_idx >= adapter->hw.mac.max_tx_queues) | ||
688 | return tc; | ||
689 | |||
690 | switch (adapter->hw.mac.type) { | ||
691 | case ixgbe_mac_82598EB: | ||
692 | tc = reg_idx >> 2; | ||
693 | break; | ||
694 | default: | ||
695 | if (dcb_i != 4 && dcb_i != 8) | ||
696 | break; | ||
697 | |||
698 | /* if VMDq is enabled the lowest order bits determine TC */ | ||
699 | if (adapter->flags & (IXGBE_FLAG_SRIOV_ENABLED | | ||
700 | IXGBE_FLAG_VMDQ_ENABLED)) { | ||
701 | tc = reg_idx & (dcb_i - 1); | ||
702 | break; | ||
703 | } | ||
704 | |||
705 | /* | ||
706 | * Convert the reg_idx into the correct TC. This bitmask | ||
707 | * targets the last full 32 ring traffic class and assigns | ||
708 | * it a value of 1. From there the rest of the rings are | ||
709 | * based on shifting the mask further up to include the | ||
710 | * reg_idx / 16 and then reg_idx / 8. It assumes dcB_i | ||
711 | * will only ever be 8 or 4 and that reg_idx will never | ||
712 | * be greater then 128. The code without the power of 2 | ||
713 | * optimizations would be: | ||
714 | * (((reg_idx % 32) + 32) * dcb_i) >> (9 - reg_idx / 32) | ||
715 | */ | ||
716 | tc = ((reg_idx & 0X1F) + 0x20) * dcb_i; | ||
717 | tc >>= 9 - (reg_idx >> 5); | ||
718 | } | ||
719 | |||
720 | return tc; | ||
721 | } | ||
722 | |||
723 | static void ixgbe_update_xoff_received(struct ixgbe_adapter *adapter) | 668 | static void ixgbe_update_xoff_received(struct ixgbe_adapter *adapter) |
724 | { | 669 | { |
725 | struct ixgbe_hw *hw = &adapter->hw; | 670 | struct ixgbe_hw *hw = &adapter->hw; |
@@ -765,7 +710,7 @@ static void ixgbe_update_xoff_received(struct ixgbe_adapter *adapter) | |||
765 | /* disarm tx queues that have received xoff frames */ | 710 | /* disarm tx queues that have received xoff frames */ |
766 | for (i = 0; i < adapter->num_tx_queues; i++) { | 711 | for (i = 0; i < adapter->num_tx_queues; i++) { |
767 | struct ixgbe_ring *tx_ring = adapter->tx_ring[i]; | 712 | struct ixgbe_ring *tx_ring = adapter->tx_ring[i]; |
768 | u32 tc = ixgbe_dcb_txq_to_tc(adapter, tx_ring->reg_idx); | 713 | u8 tc = tx_ring->dcb_tc; |
769 | 714 | ||
770 | if (xoff[tc]) | 715 | if (xoff[tc]) |
771 | clear_bit(__IXGBE_HANG_CHECK_ARMED, &tx_ring->state); | 716 | clear_bit(__IXGBE_HANG_CHECK_ARMED, &tx_ring->state); |
@@ -827,15 +772,6 @@ static inline bool ixgbe_check_tx_hang(struct ixgbe_ring *tx_ring) | |||
827 | return ret; | 772 | return ret; |
828 | } | 773 | } |
829 | 774 | ||
830 | #define IXGBE_MAX_TXD_PWR 14 | ||
831 | #define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR) | ||
832 | |||
833 | /* Tx Descriptors needed, worst case */ | ||
834 | #define TXD_USE_COUNT(S) (((S) >> IXGBE_MAX_TXD_PWR) + \ | ||
835 | (((S) & (IXGBE_MAX_DATA_PER_TXD - 1)) ? 1 : 0)) | ||
836 | #define DESC_NEEDED (TXD_USE_COUNT(IXGBE_MAX_DATA_PER_TXD) /* skb->data */ + \ | ||
837 | MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1) /* for context */ | ||
838 | |||
839 | /** | 775 | /** |
840 | * ixgbe_tx_timeout_reset - initiate reset due to Tx timeout | 776 | * ixgbe_tx_timeout_reset - initiate reset due to Tx timeout |
841 | * @adapter: driver private struct | 777 | * @adapter: driver private struct |
@@ -938,7 +874,7 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector, | |||
938 | 874 | ||
939 | #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2) | 875 | #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2) |
940 | if (unlikely(count && netif_carrier_ok(tx_ring->netdev) && | 876 | if (unlikely(count && netif_carrier_ok(tx_ring->netdev) && |
941 | (IXGBE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) { | 877 | (ixgbe_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) { |
942 | /* Make sure that anybody stopping the queue after this | 878 | /* Make sure that anybody stopping the queue after this |
943 | * sees the new next_to_clean. | 879 | * sees the new next_to_clean. |
944 | */ | 880 | */ |
@@ -1530,7 +1466,7 @@ next_desc: | |||
1530 | } | 1466 | } |
1531 | 1467 | ||
1532 | rx_ring->next_to_clean = i; | 1468 | rx_ring->next_to_clean = i; |
1533 | cleaned_count = IXGBE_DESC_UNUSED(rx_ring); | 1469 | cleaned_count = ixgbe_desc_unused(rx_ring); |
1534 | 1470 | ||
1535 | if (cleaned_count) | 1471 | if (cleaned_count) |
1536 | ixgbe_alloc_rx_buffers(rx_ring, cleaned_count); | 1472 | ixgbe_alloc_rx_buffers(rx_ring, cleaned_count); |
@@ -1610,9 +1546,8 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter) | |||
1610 | q_vector->eitr = adapter->rx_eitr_param; | 1546 | q_vector->eitr = adapter->rx_eitr_param; |
1611 | 1547 | ||
1612 | ixgbe_write_eitr(q_vector); | 1548 | ixgbe_write_eitr(q_vector); |
1613 | /* If Flow Director is enabled, set interrupt affinity */ | 1549 | /* If ATR is enabled, set interrupt affinity */ |
1614 | if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) || | 1550 | if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) { |
1615 | (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) { | ||
1616 | /* | 1551 | /* |
1617 | * Allocate the affinity_hint cpumask, assign the mask | 1552 | * Allocate the affinity_hint cpumask, assign the mask |
1618 | * for this vector, and set our affinity_hint for | 1553 | * for this vector, and set our affinity_hint for |
@@ -1937,8 +1872,7 @@ static void ixgbe_check_lsc(struct ixgbe_adapter *adapter) | |||
1937 | 1872 | ||
1938 | static irqreturn_t ixgbe_msix_lsc(int irq, void *data) | 1873 | static irqreturn_t ixgbe_msix_lsc(int irq, void *data) |
1939 | { | 1874 | { |
1940 | struct net_device *netdev = data; | 1875 | struct ixgbe_adapter *adapter = data; |
1941 | struct ixgbe_adapter *adapter = netdev_priv(netdev); | ||
1942 | struct ixgbe_hw *hw = &adapter->hw; | 1876 | struct ixgbe_hw *hw = &adapter->hw; |
1943 | u32 eicr; | 1877 | u32 eicr; |
1944 | 1878 | ||
@@ -2433,7 +2367,7 @@ static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter) | |||
2433 | 2367 | ||
2434 | sprintf(adapter->lsc_int_name, "%s:lsc", netdev->name); | 2368 | sprintf(adapter->lsc_int_name, "%s:lsc", netdev->name); |
2435 | err = request_irq(adapter->msix_entries[vector].vector, | 2369 | err = request_irq(adapter->msix_entries[vector].vector, |
2436 | ixgbe_msix_lsc, 0, adapter->lsc_int_name, netdev); | 2370 | ixgbe_msix_lsc, 0, adapter->lsc_int_name, adapter); |
2437 | if (err) { | 2371 | if (err) { |
2438 | e_err(probe, "request_irq for msix_lsc failed: %d\n", err); | 2372 | e_err(probe, "request_irq for msix_lsc failed: %d\n", err); |
2439 | goto free_queue_irqs; | 2373 | goto free_queue_irqs; |
@@ -2523,8 +2457,7 @@ static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues, | |||
2523 | default: | 2457 | default: |
2524 | break; | 2458 | break; |
2525 | } | 2459 | } |
2526 | if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE || | 2460 | if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) |
2527 | adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) | ||
2528 | mask |= IXGBE_EIMS_FLOW_DIR; | 2461 | mask |= IXGBE_EIMS_FLOW_DIR; |
2529 | 2462 | ||
2530 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask); | 2463 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask); |
@@ -2546,8 +2479,7 @@ static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues, | |||
2546 | **/ | 2479 | **/ |
2547 | static irqreturn_t ixgbe_intr(int irq, void *data) | 2480 | static irqreturn_t ixgbe_intr(int irq, void *data) |
2548 | { | 2481 | { |
2549 | struct net_device *netdev = data; | 2482 | struct ixgbe_adapter *adapter = data; |
2550 | struct ixgbe_adapter *adapter = netdev_priv(netdev); | ||
2551 | struct ixgbe_hw *hw = &adapter->hw; | 2483 | struct ixgbe_hw *hw = &adapter->hw; |
2552 | struct ixgbe_q_vector *q_vector = adapter->q_vector[0]; | 2484 | struct ixgbe_q_vector *q_vector = adapter->q_vector[0]; |
2553 | u32 eicr; | 2485 | u32 eicr; |
@@ -2644,10 +2576,10 @@ static int ixgbe_request_irq(struct ixgbe_adapter *adapter) | |||
2644 | err = ixgbe_request_msix_irqs(adapter); | 2576 | err = ixgbe_request_msix_irqs(adapter); |
2645 | } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) { | 2577 | } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) { |
2646 | err = request_irq(adapter->pdev->irq, ixgbe_intr, 0, | 2578 | err = request_irq(adapter->pdev->irq, ixgbe_intr, 0, |
2647 | netdev->name, netdev); | 2579 | netdev->name, adapter); |
2648 | } else { | 2580 | } else { |
2649 | err = request_irq(adapter->pdev->irq, ixgbe_intr, IRQF_SHARED, | 2581 | err = request_irq(adapter->pdev->irq, ixgbe_intr, IRQF_SHARED, |
2650 | netdev->name, netdev); | 2582 | netdev->name, adapter); |
2651 | } | 2583 | } |
2652 | 2584 | ||
2653 | if (err) | 2585 | if (err) |
@@ -2658,15 +2590,13 @@ static int ixgbe_request_irq(struct ixgbe_adapter *adapter) | |||
2658 | 2590 | ||
2659 | static void ixgbe_free_irq(struct ixgbe_adapter *adapter) | 2591 | static void ixgbe_free_irq(struct ixgbe_adapter *adapter) |
2660 | { | 2592 | { |
2661 | struct net_device *netdev = adapter->netdev; | ||
2662 | |||
2663 | if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { | 2593 | if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { |
2664 | int i, q_vectors; | 2594 | int i, q_vectors; |
2665 | 2595 | ||
2666 | q_vectors = adapter->num_msix_vectors; | 2596 | q_vectors = adapter->num_msix_vectors; |
2667 | 2597 | ||
2668 | i = q_vectors - 1; | 2598 | i = q_vectors - 1; |
2669 | free_irq(adapter->msix_entries[i].vector, netdev); | 2599 | free_irq(adapter->msix_entries[i].vector, adapter); |
2670 | 2600 | ||
2671 | i--; | 2601 | i--; |
2672 | for (; i >= 0; i--) { | 2602 | for (; i >= 0; i--) { |
@@ -2681,7 +2611,7 @@ static void ixgbe_free_irq(struct ixgbe_adapter *adapter) | |||
2681 | 2611 | ||
2682 | ixgbe_reset_q_vectors(adapter); | 2612 | ixgbe_reset_q_vectors(adapter); |
2683 | } else { | 2613 | } else { |
2684 | free_irq(adapter->pdev->irq, netdev); | 2614 | free_irq(adapter->pdev->irq, adapter); |
2685 | } | 2615 | } |
2686 | } | 2616 | } |
2687 | 2617 | ||
@@ -2814,7 +2744,8 @@ static void ixgbe_setup_mtqc(struct ixgbe_adapter *adapter) | |||
2814 | { | 2744 | { |
2815 | struct ixgbe_hw *hw = &adapter->hw; | 2745 | struct ixgbe_hw *hw = &adapter->hw; |
2816 | u32 rttdcs; | 2746 | u32 rttdcs; |
2817 | u32 mask; | 2747 | u32 reg; |
2748 | u8 tcs = netdev_get_num_tc(adapter->netdev); | ||
2818 | 2749 | ||
2819 | if (hw->mac.type == ixgbe_mac_82598EB) | 2750 | if (hw->mac.type == ixgbe_mac_82598EB) |
2820 | return; | 2751 | return; |
@@ -2825,22 +2756,27 @@ static void ixgbe_setup_mtqc(struct ixgbe_adapter *adapter) | |||
2825 | IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs); | 2756 | IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs); |
2826 | 2757 | ||
2827 | /* set transmit pool layout */ | 2758 | /* set transmit pool layout */ |
2828 | mask = (IXGBE_FLAG_SRIOV_ENABLED | IXGBE_FLAG_DCB_ENABLED); | 2759 | switch (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { |
2829 | switch (adapter->flags & mask) { | ||
2830 | |||
2831 | case (IXGBE_FLAG_SRIOV_ENABLED): | 2760 | case (IXGBE_FLAG_SRIOV_ENABLED): |
2832 | IXGBE_WRITE_REG(hw, IXGBE_MTQC, | 2761 | IXGBE_WRITE_REG(hw, IXGBE_MTQC, |
2833 | (IXGBE_MTQC_VT_ENA | IXGBE_MTQC_64VF)); | 2762 | (IXGBE_MTQC_VT_ENA | IXGBE_MTQC_64VF)); |
2834 | break; | 2763 | break; |
2764 | default: | ||
2765 | if (!tcs) | ||
2766 | reg = IXGBE_MTQC_64Q_1PB; | ||
2767 | else if (tcs <= 4) | ||
2768 | reg = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ; | ||
2769 | else | ||
2770 | reg = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ; | ||
2835 | 2771 | ||
2836 | case (IXGBE_FLAG_DCB_ENABLED): | 2772 | IXGBE_WRITE_REG(hw, IXGBE_MTQC, reg); |
2837 | /* We enable 8 traffic classes, DCB only */ | ||
2838 | IXGBE_WRITE_REG(hw, IXGBE_MTQC, | ||
2839 | (IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ)); | ||
2840 | break; | ||
2841 | 2773 | ||
2842 | default: | 2774 | /* Enable Security TX Buffer IFG for multiple pb */ |
2843 | IXGBE_WRITE_REG(hw, IXGBE_MTQC, IXGBE_MTQC_64Q_1PB); | 2775 | if (tcs) { |
2776 | reg = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG); | ||
2777 | reg |= IXGBE_SECTX_DCB; | ||
2778 | IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, reg); | ||
2779 | } | ||
2844 | break; | 2780 | break; |
2845 | } | 2781 | } |
2846 | 2782 | ||
@@ -2931,7 +2867,11 @@ static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter) | |||
2931 | u32 mrqc = 0, reta = 0; | 2867 | u32 mrqc = 0, reta = 0; |
2932 | u32 rxcsum; | 2868 | u32 rxcsum; |
2933 | int i, j; | 2869 | int i, j; |
2934 | int mask; | 2870 | u8 tcs = netdev_get_num_tc(adapter->netdev); |
2871 | int maxq = adapter->ring_feature[RING_F_RSS].indices; | ||
2872 | |||
2873 | if (tcs) | ||
2874 | maxq = min(maxq, adapter->num_tx_queues / tcs); | ||
2935 | 2875 | ||
2936 | /* Fill out hash function seeds */ | 2876 | /* Fill out hash function seeds */ |
2937 | for (i = 0; i < 10; i++) | 2877 | for (i = 0; i < 10; i++) |
@@ -2939,7 +2879,7 @@ static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter) | |||
2939 | 2879 | ||
2940 | /* Fill out redirection table */ | 2880 | /* Fill out redirection table */ |
2941 | for (i = 0, j = 0; i < 128; i++, j++) { | 2881 | for (i = 0, j = 0; i < 128; i++, j++) { |
2942 | if (j == adapter->ring_feature[RING_F_RSS].indices) | 2882 | if (j == maxq) |
2943 | j = 0; | 2883 | j = 0; |
2944 | /* reta = 4-byte sliding window of | 2884 | /* reta = 4-byte sliding window of |
2945 | * 0x00..(indices-1)(indices-1)00..etc. */ | 2885 | * 0x00..(indices-1)(indices-1)00..etc. */ |
@@ -2953,33 +2893,28 @@ static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter) | |||
2953 | rxcsum |= IXGBE_RXCSUM_PCSD; | 2893 | rxcsum |= IXGBE_RXCSUM_PCSD; |
2954 | IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum); | 2894 | IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum); |
2955 | 2895 | ||
2956 | if (adapter->hw.mac.type == ixgbe_mac_82598EB) | 2896 | if (adapter->hw.mac.type == ixgbe_mac_82598EB && |
2957 | mask = adapter->flags & IXGBE_FLAG_RSS_ENABLED; | 2897 | (adapter->flags & IXGBE_FLAG_RSS_ENABLED)) { |
2958 | else | ||
2959 | mask = adapter->flags & (IXGBE_FLAG_RSS_ENABLED | ||
2960 | #ifdef CONFIG_IXGBE_DCB | ||
2961 | | IXGBE_FLAG_DCB_ENABLED | ||
2962 | #endif | ||
2963 | | IXGBE_FLAG_SRIOV_ENABLED | ||
2964 | ); | ||
2965 | |||
2966 | switch (mask) { | ||
2967 | #ifdef CONFIG_IXGBE_DCB | ||
2968 | case (IXGBE_FLAG_DCB_ENABLED | IXGBE_FLAG_RSS_ENABLED): | ||
2969 | mrqc = IXGBE_MRQC_RTRSS8TCEN; | ||
2970 | break; | ||
2971 | case (IXGBE_FLAG_DCB_ENABLED): | ||
2972 | mrqc = IXGBE_MRQC_RT8TCEN; | ||
2973 | break; | ||
2974 | #endif /* CONFIG_IXGBE_DCB */ | ||
2975 | case (IXGBE_FLAG_RSS_ENABLED): | ||
2976 | mrqc = IXGBE_MRQC_RSSEN; | 2898 | mrqc = IXGBE_MRQC_RSSEN; |
2977 | break; | 2899 | } else { |
2978 | case (IXGBE_FLAG_SRIOV_ENABLED): | 2900 | int mask = adapter->flags & (IXGBE_FLAG_RSS_ENABLED |
2979 | mrqc = IXGBE_MRQC_VMDQEN; | 2901 | | IXGBE_FLAG_SRIOV_ENABLED); |
2980 | break; | 2902 | |
2981 | default: | 2903 | switch (mask) { |
2982 | break; | 2904 | case (IXGBE_FLAG_RSS_ENABLED): |
2905 | if (!tcs) | ||
2906 | mrqc = IXGBE_MRQC_RSSEN; | ||
2907 | else if (tcs <= 4) | ||
2908 | mrqc = IXGBE_MRQC_RTRSS4TCEN; | ||
2909 | else | ||
2910 | mrqc = IXGBE_MRQC_RTRSS8TCEN; | ||
2911 | break; | ||
2912 | case (IXGBE_FLAG_SRIOV_ENABLED): | ||
2913 | mrqc = IXGBE_MRQC_VMDQEN; | ||
2914 | break; | ||
2915 | default: | ||
2916 | break; | ||
2917 | } | ||
2983 | } | 2918 | } |
2984 | 2919 | ||
2985 | /* Perform hash on these packet types */ | 2920 | /* Perform hash on these packet types */ |
@@ -3183,7 +3118,7 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter, | |||
3183 | IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl); | 3118 | IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl); |
3184 | 3119 | ||
3185 | ixgbe_rx_desc_queue_enable(adapter, ring); | 3120 | ixgbe_rx_desc_queue_enable(adapter, ring); |
3186 | ixgbe_alloc_rx_buffers(ring, IXGBE_DESC_UNUSED(ring)); | 3121 | ixgbe_alloc_rx_buffers(ring, ixgbe_desc_unused(ring)); |
3187 | } | 3122 | } |
3188 | 3123 | ||
3189 | static void ixgbe_setup_psrtype(struct ixgbe_adapter *adapter) | 3124 | static void ixgbe_setup_psrtype(struct ixgbe_adapter *adapter) |
@@ -3739,7 +3674,7 @@ static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter) | |||
3739 | hw->mac.ops.set_vfta(&adapter->hw, 0, 0, true); | 3674 | hw->mac.ops.set_vfta(&adapter->hw, 0, 0, true); |
3740 | 3675 | ||
3741 | /* reconfigure the hardware */ | 3676 | /* reconfigure the hardware */ |
3742 | if (adapter->dcbx_cap & (DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_CEE)) { | 3677 | if (adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE) { |
3743 | #ifdef CONFIG_FCOE | 3678 | #ifdef CONFIG_FCOE |
3744 | if (adapter->netdev->features & NETIF_F_FCOE_MTU) | 3679 | if (adapter->netdev->features & NETIF_F_FCOE_MTU) |
3745 | max_frame = max(max_frame, IXGBE_FCOE_JUMBO_FRAME_SIZE); | 3680 | max_frame = max(max_frame, IXGBE_FCOE_JUMBO_FRAME_SIZE); |
@@ -3779,12 +3714,51 @@ static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter) | |||
3779 | } | 3714 | } |
3780 | 3715 | ||
3781 | #endif | 3716 | #endif |
3717 | |||
3718 | static void ixgbe_configure_pb(struct ixgbe_adapter *adapter) | ||
3719 | { | ||
3720 | int hdrm = 0; | ||
3721 | int num_tc = netdev_get_num_tc(adapter->netdev); | ||
3722 | struct ixgbe_hw *hw = &adapter->hw; | ||
3723 | |||
3724 | if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE || | ||
3725 | adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) | ||
3726 | hdrm = 64 << adapter->fdir_pballoc; | ||
3727 | |||
3728 | hw->mac.ops.set_rxpba(&adapter->hw, num_tc, hdrm, PBA_STRATEGY_EQUAL); | ||
3729 | } | ||
3730 | |||
3731 | static void ixgbe_fdir_filter_restore(struct ixgbe_adapter *adapter) | ||
3732 | { | ||
3733 | struct ixgbe_hw *hw = &adapter->hw; | ||
3734 | struct hlist_node *node, *node2; | ||
3735 | struct ixgbe_fdir_filter *filter; | ||
3736 | |||
3737 | spin_lock(&adapter->fdir_perfect_lock); | ||
3738 | |||
3739 | if (!hlist_empty(&adapter->fdir_filter_list)) | ||
3740 | ixgbe_fdir_set_input_mask_82599(hw, &adapter->fdir_mask); | ||
3741 | |||
3742 | hlist_for_each_entry_safe(filter, node, node2, | ||
3743 | &adapter->fdir_filter_list, fdir_node) { | ||
3744 | ixgbe_fdir_write_perfect_filter_82599(hw, | ||
3745 | &filter->filter, | ||
3746 | filter->sw_idx, | ||
3747 | (filter->action == IXGBE_FDIR_DROP_QUEUE) ? | ||
3748 | IXGBE_FDIR_DROP_QUEUE : | ||
3749 | adapter->rx_ring[filter->action]->reg_idx); | ||
3750 | } | ||
3751 | |||
3752 | spin_unlock(&adapter->fdir_perfect_lock); | ||
3753 | } | ||
3754 | |||
3782 | static void ixgbe_configure(struct ixgbe_adapter *adapter) | 3755 | static void ixgbe_configure(struct ixgbe_adapter *adapter) |
3783 | { | 3756 | { |
3784 | struct net_device *netdev = adapter->netdev; | 3757 | struct net_device *netdev = adapter->netdev; |
3785 | struct ixgbe_hw *hw = &adapter->hw; | 3758 | struct ixgbe_hw *hw = &adapter->hw; |
3786 | int i; | 3759 | int i; |
3787 | 3760 | ||
3761 | ixgbe_configure_pb(adapter); | ||
3788 | #ifdef CONFIG_IXGBE_DCB | 3762 | #ifdef CONFIG_IXGBE_DCB |
3789 | ixgbe_configure_dcb(adapter); | 3763 | ixgbe_configure_dcb(adapter); |
3790 | #endif | 3764 | #endif |
@@ -3803,7 +3777,9 @@ static void ixgbe_configure(struct ixgbe_adapter *adapter) | |||
3803 | adapter->atr_sample_rate; | 3777 | adapter->atr_sample_rate; |
3804 | ixgbe_init_fdir_signature_82599(hw, adapter->fdir_pballoc); | 3778 | ixgbe_init_fdir_signature_82599(hw, adapter->fdir_pballoc); |
3805 | } else if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) { | 3779 | } else if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) { |
3806 | ixgbe_init_fdir_perfect_82599(hw, adapter->fdir_pballoc); | 3780 | ixgbe_init_fdir_perfect_82599(&adapter->hw, |
3781 | adapter->fdir_pballoc); | ||
3782 | ixgbe_fdir_filter_restore(adapter); | ||
3807 | } | 3783 | } |
3808 | ixgbe_configure_virtualization(adapter); | 3784 | ixgbe_configure_virtualization(adapter); |
3809 | 3785 | ||
@@ -4180,6 +4156,23 @@ static void ixgbe_clean_all_tx_rings(struct ixgbe_adapter *adapter) | |||
4180 | ixgbe_clean_tx_ring(adapter->tx_ring[i]); | 4156 | ixgbe_clean_tx_ring(adapter->tx_ring[i]); |
4181 | } | 4157 | } |
4182 | 4158 | ||
4159 | static void ixgbe_fdir_filter_exit(struct ixgbe_adapter *adapter) | ||
4160 | { | ||
4161 | struct hlist_node *node, *node2; | ||
4162 | struct ixgbe_fdir_filter *filter; | ||
4163 | |||
4164 | spin_lock(&adapter->fdir_perfect_lock); | ||
4165 | |||
4166 | hlist_for_each_entry_safe(filter, node, node2, | ||
4167 | &adapter->fdir_filter_list, fdir_node) { | ||
4168 | hlist_del(&filter->fdir_node); | ||
4169 | kfree(filter); | ||
4170 | } | ||
4171 | adapter->fdir_filter_count = 0; | ||
4172 | |||
4173 | spin_unlock(&adapter->fdir_perfect_lock); | ||
4174 | } | ||
4175 | |||
4183 | void ixgbe_down(struct ixgbe_adapter *adapter) | 4176 | void ixgbe_down(struct ixgbe_adapter *adapter) |
4184 | { | 4177 | { |
4185 | struct net_device *netdev = adapter->netdev; | 4178 | struct net_device *netdev = adapter->netdev; |
@@ -4369,15 +4362,13 @@ static inline bool ixgbe_set_fdir_queues(struct ixgbe_adapter *adapter) | |||
4369 | f_fdir->mask = 0; | 4362 | f_fdir->mask = 0; |
4370 | 4363 | ||
4371 | /* Flow Director must have RSS enabled */ | 4364 | /* Flow Director must have RSS enabled */ |
4372 | if (adapter->flags & IXGBE_FLAG_RSS_ENABLED && | 4365 | if ((adapter->flags & IXGBE_FLAG_RSS_ENABLED) && |
4373 | ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE || | 4366 | (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE)) { |
4374 | (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)))) { | ||
4375 | adapter->num_tx_queues = f_fdir->indices; | 4367 | adapter->num_tx_queues = f_fdir->indices; |
4376 | adapter->num_rx_queues = f_fdir->indices; | 4368 | adapter->num_rx_queues = f_fdir->indices; |
4377 | ret = true; | 4369 | ret = true; |
4378 | } else { | 4370 | } else { |
4379 | adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; | 4371 | adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; |
4380 | adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE; | ||
4381 | } | 4372 | } |
4382 | return ret; | 4373 | return ret; |
4383 | } | 4374 | } |
@@ -4400,69 +4391,72 @@ static inline bool ixgbe_set_fcoe_queues(struct ixgbe_adapter *adapter) | |||
4400 | if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) | 4391 | if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) |
4401 | return false; | 4392 | return false; |
4402 | 4393 | ||
4403 | if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { | 4394 | f->indices = min((int)num_online_cpus(), f->indices); |
4404 | #ifdef CONFIG_IXGBE_DCB | ||
4405 | int tc; | ||
4406 | struct net_device *dev = adapter->netdev; | ||
4407 | 4395 | ||
4408 | tc = netdev_get_prio_tc_map(dev, adapter->fcoe.up); | 4396 | adapter->num_rx_queues = 1; |
4409 | f->indices = dev->tc_to_txq[tc].count; | 4397 | adapter->num_tx_queues = 1; |
4410 | f->mask = dev->tc_to_txq[tc].offset; | ||
4411 | #endif | ||
4412 | } else { | ||
4413 | f->indices = min((int)num_online_cpus(), f->indices); | ||
4414 | |||
4415 | adapter->num_rx_queues = 1; | ||
4416 | adapter->num_tx_queues = 1; | ||
4417 | 4398 | ||
4418 | if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) { | 4399 | if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) { |
4419 | e_info(probe, "FCoE enabled with RSS\n"); | 4400 | e_info(probe, "FCoE enabled with RSS\n"); |
4420 | if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) || | 4401 | if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) |
4421 | (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) | 4402 | ixgbe_set_fdir_queues(adapter); |
4422 | ixgbe_set_fdir_queues(adapter); | 4403 | else |
4423 | else | 4404 | ixgbe_set_rss_queues(adapter); |
4424 | ixgbe_set_rss_queues(adapter); | ||
4425 | } | ||
4426 | /* adding FCoE rx rings to the end */ | ||
4427 | f->mask = adapter->num_rx_queues; | ||
4428 | adapter->num_rx_queues += f->indices; | ||
4429 | adapter->num_tx_queues += f->indices; | ||
4430 | } | 4405 | } |
4431 | 4406 | ||
4407 | /* adding FCoE rx rings to the end */ | ||
4408 | f->mask = adapter->num_rx_queues; | ||
4409 | adapter->num_rx_queues += f->indices; | ||
4410 | adapter->num_tx_queues += f->indices; | ||
4411 | |||
4432 | return true; | 4412 | return true; |
4433 | } | 4413 | } |
4434 | #endif /* IXGBE_FCOE */ | 4414 | #endif /* IXGBE_FCOE */ |
4435 | 4415 | ||
4416 | /* Artificial max queue cap per traffic class in DCB mode */ | ||
4417 | #define DCB_QUEUE_CAP 8 | ||
4418 | |||
4436 | #ifdef CONFIG_IXGBE_DCB | 4419 | #ifdef CONFIG_IXGBE_DCB |
4437 | static inline bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter) | 4420 | static inline bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter) |
4438 | { | 4421 | { |
4439 | bool ret = false; | 4422 | int per_tc_q, q, i, offset = 0; |
4440 | struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_DCB]; | 4423 | struct net_device *dev = adapter->netdev; |
4441 | int i, q; | 4424 | int tcs = netdev_get_num_tc(dev); |
4442 | 4425 | ||
4443 | if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED)) | 4426 | if (!tcs) |
4444 | return ret; | 4427 | return false; |
4445 | 4428 | ||
4446 | f->indices = 0; | 4429 | /* Map queue offset and counts onto allocated tx queues */ |
4447 | for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { | 4430 | per_tc_q = min(dev->num_tx_queues / tcs, (unsigned int)DCB_QUEUE_CAP); |
4448 | q = min((int)num_online_cpus(), MAX_TRAFFIC_CLASS); | 4431 | q = min((int)num_online_cpus(), per_tc_q); |
4449 | f->indices += q; | 4432 | |
4433 | for (i = 0; i < tcs; i++) { | ||
4434 | netdev_set_prio_tc_map(dev, i, i); | ||
4435 | netdev_set_tc_queue(dev, i, q, offset); | ||
4436 | offset += q; | ||
4450 | } | 4437 | } |
4451 | 4438 | ||
4452 | f->mask = 0x7 << 3; | 4439 | adapter->num_tx_queues = q * tcs; |
4453 | adapter->num_rx_queues = f->indices; | 4440 | adapter->num_rx_queues = q * tcs; |
4454 | adapter->num_tx_queues = f->indices; | ||
4455 | ret = true; | ||
4456 | 4441 | ||
4457 | #ifdef IXGBE_FCOE | 4442 | #ifdef IXGBE_FCOE |
4458 | /* FCoE enabled queues require special configuration done through | 4443 | /* FCoE enabled queues require special configuration indexed |
4459 | * configure_fcoe() and others. Here we map FCoE indices onto the | 4444 | * by feature specific indices and mask. Here we map FCoE |
4460 | * DCB queue pairs allowing FCoE to own configuration later. | 4445 | * indices onto the DCB queue pairs allowing FCoE to own |
4446 | * configuration later. | ||
4461 | */ | 4447 | */ |
4462 | ixgbe_set_fcoe_queues(adapter); | 4448 | if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) { |
4449 | int tc; | ||
4450 | struct ixgbe_ring_feature *f = | ||
4451 | &adapter->ring_feature[RING_F_FCOE]; | ||
4452 | |||
4453 | tc = netdev_get_prio_tc_map(dev, adapter->fcoe.up); | ||
4454 | f->indices = dev->tc_to_txq[tc].count; | ||
4455 | f->mask = dev->tc_to_txq[tc].offset; | ||
4456 | } | ||
4463 | #endif | 4457 | #endif |
4464 | 4458 | ||
4465 | return ret; | 4459 | return true; |
4466 | } | 4460 | } |
4467 | #endif | 4461 | #endif |
4468 | 4462 | ||
@@ -4616,8 +4610,8 @@ static void ixgbe_get_first_reg_idx(struct ixgbe_adapter *adapter, u8 tc, | |||
4616 | 4610 | ||
4617 | switch (hw->mac.type) { | 4611 | switch (hw->mac.type) { |
4618 | case ixgbe_mac_82598EB: | 4612 | case ixgbe_mac_82598EB: |
4619 | *tx = tc << 3; | 4613 | *tx = tc << 2; |
4620 | *rx = tc << 2; | 4614 | *rx = tc << 3; |
4621 | break; | 4615 | break; |
4622 | case ixgbe_mac_82599EB: | 4616 | case ixgbe_mac_82599EB: |
4623 | case ixgbe_mac_X540: | 4617 | case ixgbe_mac_X540: |
@@ -4657,55 +4651,6 @@ static void ixgbe_get_first_reg_idx(struct ixgbe_adapter *adapter, u8 tc, | |||
4657 | } | 4651 | } |
4658 | } | 4652 | } |
4659 | 4653 | ||
4660 | #define IXGBE_MAX_Q_PER_TC (IXGBE_MAX_DCB_INDICES / MAX_TRAFFIC_CLASS) | ||
4661 | |||
4662 | /* ixgbe_setup_tc - routine to configure net_device for multiple traffic | ||
4663 | * classes. | ||
4664 | * | ||
4665 | * @netdev: net device to configure | ||
4666 | * @tc: number of traffic classes to enable | ||
4667 | */ | ||
4668 | int ixgbe_setup_tc(struct net_device *dev, u8 tc) | ||
4669 | { | ||
4670 | int i; | ||
4671 | unsigned int q, offset = 0; | ||
4672 | |||
4673 | if (!tc) { | ||
4674 | netdev_reset_tc(dev); | ||
4675 | } else { | ||
4676 | struct ixgbe_adapter *adapter = netdev_priv(dev); | ||
4677 | |||
4678 | /* Hardware supports up to 8 traffic classes */ | ||
4679 | if (tc > MAX_TRAFFIC_CLASS || netdev_set_num_tc(dev, tc)) | ||
4680 | return -EINVAL; | ||
4681 | |||
4682 | /* Partition Tx queues evenly amongst traffic classes */ | ||
4683 | for (i = 0; i < tc; i++) { | ||
4684 | q = min((int)num_online_cpus(), IXGBE_MAX_Q_PER_TC); | ||
4685 | netdev_set_prio_tc_map(dev, i, i); | ||
4686 | netdev_set_tc_queue(dev, i, q, offset); | ||
4687 | offset += q; | ||
4688 | } | ||
4689 | |||
4690 | /* This enables multiple traffic class support in the hardware | ||
4691 | * which defaults to strict priority transmission by default. | ||
4692 | * If traffic classes are already enabled perhaps through DCB | ||
4693 | * code path then existing configuration will be used. | ||
4694 | */ | ||
4695 | if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED) && | ||
4696 | dev->dcbnl_ops && dev->dcbnl_ops->setdcbx) { | ||
4697 | struct ieee_ets ets = { | ||
4698 | .prio_tc = {0, 1, 2, 3, 4, 5, 6, 7}, | ||
4699 | }; | ||
4700 | u8 mode = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE; | ||
4701 | |||
4702 | dev->dcbnl_ops->setdcbx(dev, mode); | ||
4703 | dev->dcbnl_ops->ieee_setets(dev, &ets); | ||
4704 | } | ||
4705 | } | ||
4706 | return 0; | ||
4707 | } | ||
4708 | |||
4709 | /** | 4654 | /** |
4710 | * ixgbe_cache_ring_dcb - Descriptor ring to register mapping for DCB | 4655 | * ixgbe_cache_ring_dcb - Descriptor ring to register mapping for DCB |
4711 | * @adapter: board private structure to initialize | 4656 | * @adapter: board private structure to initialize |
@@ -4719,7 +4664,7 @@ static inline bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter) | |||
4719 | int i, j, k; | 4664 | int i, j, k; |
4720 | u8 num_tcs = netdev_get_num_tc(dev); | 4665 | u8 num_tcs = netdev_get_num_tc(dev); |
4721 | 4666 | ||
4722 | if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED)) | 4667 | if (!num_tcs) |
4723 | return false; | 4668 | return false; |
4724 | 4669 | ||
4725 | for (i = 0, k = 0; i < num_tcs; i++) { | 4670 | for (i = 0, k = 0; i < num_tcs; i++) { |
@@ -4751,9 +4696,8 @@ static inline bool ixgbe_cache_ring_fdir(struct ixgbe_adapter *adapter) | |||
4751 | int i; | 4696 | int i; |
4752 | bool ret = false; | 4697 | bool ret = false; |
4753 | 4698 | ||
4754 | if (adapter->flags & IXGBE_FLAG_RSS_ENABLED && | 4699 | if ((adapter->flags & IXGBE_FLAG_RSS_ENABLED) && |
4755 | ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) || | 4700 | (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE)) { |
4756 | (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))) { | ||
4757 | for (i = 0; i < adapter->num_rx_queues; i++) | 4701 | for (i = 0; i < adapter->num_rx_queues; i++) |
4758 | adapter->rx_ring[i]->reg_idx = i; | 4702 | adapter->rx_ring[i]->reg_idx = i; |
4759 | for (i = 0; i < adapter->num_tx_queues; i++) | 4703 | for (i = 0; i < adapter->num_tx_queues; i++) |
@@ -4782,8 +4726,7 @@ static inline bool ixgbe_cache_ring_fcoe(struct ixgbe_adapter *adapter) | |||
4782 | return false; | 4726 | return false; |
4783 | 4727 | ||
4784 | if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) { | 4728 | if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) { |
4785 | if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) || | 4729 | if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) |
4786 | (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) | ||
4787 | ixgbe_cache_ring_fdir(adapter); | 4730 | ixgbe_cache_ring_fdir(adapter); |
4788 | else | 4731 | else |
4789 | ixgbe_cache_ring_rss(adapter); | 4732 | ixgbe_cache_ring_rss(adapter); |
@@ -4963,14 +4906,12 @@ static int ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter) | |||
4963 | 4906 | ||
4964 | adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED; | 4907 | adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED; |
4965 | adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED; | 4908 | adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED; |
4966 | if (adapter->flags & (IXGBE_FLAG_FDIR_HASH_CAPABLE | | 4909 | if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) { |
4967 | IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) { | ||
4968 | e_err(probe, | 4910 | e_err(probe, |
4969 | "Flow Director is not supported while multiple " | 4911 | "ATR is not supported while multiple " |
4970 | "queues are disabled. Disabling Flow Director\n"); | 4912 | "queues are disabled. Disabling Flow Director\n"); |
4971 | } | 4913 | } |
4972 | adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; | 4914 | adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; |
4973 | adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE; | ||
4974 | adapter->atr_sample_rate = 0; | 4915 | adapter->atr_sample_rate = 0; |
4975 | if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) | 4916 | if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) |
4976 | ixgbe_disable_sriov(adapter); | 4917 | ixgbe_disable_sriov(adapter); |
@@ -5201,7 +5142,6 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter) | |||
5201 | rss = min(IXGBE_MAX_RSS_INDICES, (int)num_online_cpus()); | 5142 | rss = min(IXGBE_MAX_RSS_INDICES, (int)num_online_cpus()); |
5202 | adapter->ring_feature[RING_F_RSS].indices = rss; | 5143 | adapter->ring_feature[RING_F_RSS].indices = rss; |
5203 | adapter->flags |= IXGBE_FLAG_RSS_ENABLED; | 5144 | adapter->flags |= IXGBE_FLAG_RSS_ENABLED; |
5204 | adapter->ring_feature[RING_F_DCB].indices = IXGBE_MAX_DCB_INDICES; | ||
5205 | switch (hw->mac.type) { | 5145 | switch (hw->mac.type) { |
5206 | case ixgbe_mac_82598EB: | 5146 | case ixgbe_mac_82598EB: |
5207 | if (hw->device_id == IXGBE_DEV_ID_82598AT) | 5147 | if (hw->device_id == IXGBE_DEV_ID_82598AT) |
@@ -5222,14 +5162,13 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter) | |||
5222 | adapter->atr_sample_rate = 20; | 5162 | adapter->atr_sample_rate = 20; |
5223 | adapter->ring_feature[RING_F_FDIR].indices = | 5163 | adapter->ring_feature[RING_F_FDIR].indices = |
5224 | IXGBE_MAX_FDIR_INDICES; | 5164 | IXGBE_MAX_FDIR_INDICES; |
5225 | adapter->fdir_pballoc = 0; | 5165 | adapter->fdir_pballoc = IXGBE_FDIR_PBALLOC_64K; |
5226 | #ifdef IXGBE_FCOE | 5166 | #ifdef IXGBE_FCOE |
5227 | adapter->flags |= IXGBE_FLAG_FCOE_CAPABLE; | 5167 | adapter->flags |= IXGBE_FLAG_FCOE_CAPABLE; |
5228 | adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED; | 5168 | adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED; |
5229 | adapter->ring_feature[RING_F_FCOE].indices = 0; | 5169 | adapter->ring_feature[RING_F_FCOE].indices = 0; |
5230 | #ifdef CONFIG_IXGBE_DCB | 5170 | #ifdef CONFIG_IXGBE_DCB |
5231 | /* Default traffic class to use for FCoE */ | 5171 | /* Default traffic class to use for FCoE */ |
5232 | adapter->fcoe.tc = IXGBE_FCOE_DEFTC; | ||
5233 | adapter->fcoe.up = IXGBE_FCOE_DEFTC; | 5172 | adapter->fcoe.up = IXGBE_FCOE_DEFTC; |
5234 | #endif | 5173 | #endif |
5235 | #endif /* IXGBE_FCOE */ | 5174 | #endif /* IXGBE_FCOE */ |
@@ -5250,7 +5189,6 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter) | |||
5250 | } | 5189 | } |
5251 | adapter->dcb_cfg.bw_percentage[DCB_TX_CONFIG][0] = 100; | 5190 | adapter->dcb_cfg.bw_percentage[DCB_TX_CONFIG][0] = 100; |
5252 | adapter->dcb_cfg.bw_percentage[DCB_RX_CONFIG][0] = 100; | 5191 | adapter->dcb_cfg.bw_percentage[DCB_RX_CONFIG][0] = 100; |
5253 | adapter->dcb_cfg.rx_pba_cfg = pba_equal; | ||
5254 | adapter->dcb_cfg.pfc_mode_enable = false; | 5192 | adapter->dcb_cfg.pfc_mode_enable = false; |
5255 | adapter->dcb_set_bitmap = 0x00; | 5193 | adapter->dcb_set_bitmap = 0x00; |
5256 | adapter->dcbx_cap = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_CEE; | 5194 | adapter->dcbx_cap = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_CEE; |
@@ -5620,6 +5558,8 @@ static int ixgbe_close(struct net_device *netdev) | |||
5620 | ixgbe_down(adapter); | 5558 | ixgbe_down(adapter); |
5621 | ixgbe_free_irq(adapter); | 5559 | ixgbe_free_irq(adapter); |
5622 | 5560 | ||
5561 | ixgbe_fdir_filter_exit(adapter); | ||
5562 | |||
5623 | ixgbe_free_all_tx_resources(adapter); | 5563 | ixgbe_free_all_tx_resources(adapter); |
5624 | ixgbe_free_all_rx_resources(adapter); | 5564 | ixgbe_free_all_rx_resources(adapter); |
5625 | 5565 | ||
@@ -6404,179 +6344,145 @@ static void ixgbe_service_task(struct work_struct *work) | |||
6404 | ixgbe_service_event_complete(adapter); | 6344 | ixgbe_service_event_complete(adapter); |
6405 | } | 6345 | } |
6406 | 6346 | ||
6407 | static int ixgbe_tso(struct ixgbe_adapter *adapter, | 6347 | void ixgbe_tx_ctxtdesc(struct ixgbe_ring *tx_ring, u32 vlan_macip_lens, |
6408 | struct ixgbe_ring *tx_ring, struct sk_buff *skb, | 6348 | u32 fcoe_sof_eof, u32 type_tucmd, u32 mss_l4len_idx) |
6409 | u32 tx_flags, u8 *hdr_len, __be16 protocol) | ||
6410 | { | 6349 | { |
6411 | struct ixgbe_adv_tx_context_desc *context_desc; | 6350 | struct ixgbe_adv_tx_context_desc *context_desc; |
6412 | unsigned int i; | 6351 | u16 i = tx_ring->next_to_use; |
6413 | int err; | ||
6414 | struct ixgbe_tx_buffer *tx_buffer_info; | ||
6415 | u32 vlan_macip_lens = 0, type_tucmd_mlhl; | ||
6416 | u32 mss_l4len_idx, l4len; | ||
6417 | 6352 | ||
6418 | if (skb_is_gso(skb)) { | 6353 | context_desc = IXGBE_TX_CTXTDESC_ADV(tx_ring, i); |
6419 | if (skb_header_cloned(skb)) { | ||
6420 | err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); | ||
6421 | if (err) | ||
6422 | return err; | ||
6423 | } | ||
6424 | l4len = tcp_hdrlen(skb); | ||
6425 | *hdr_len += l4len; | ||
6426 | |||
6427 | if (protocol == htons(ETH_P_IP)) { | ||
6428 | struct iphdr *iph = ip_hdr(skb); | ||
6429 | iph->tot_len = 0; | ||
6430 | iph->check = 0; | ||
6431 | tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, | ||
6432 | iph->daddr, 0, | ||
6433 | IPPROTO_TCP, | ||
6434 | 0); | ||
6435 | } else if (skb_is_gso_v6(skb)) { | ||
6436 | ipv6_hdr(skb)->payload_len = 0; | ||
6437 | tcp_hdr(skb)->check = | ||
6438 | ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, | ||
6439 | &ipv6_hdr(skb)->daddr, | ||
6440 | 0, IPPROTO_TCP, 0); | ||
6441 | } | ||
6442 | 6354 | ||
6443 | i = tx_ring->next_to_use; | 6355 | i++; |
6356 | tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; | ||
6444 | 6357 | ||
6445 | tx_buffer_info = &tx_ring->tx_buffer_info[i]; | 6358 | /* set bits to identify this as an advanced context descriptor */ |
6446 | context_desc = IXGBE_TX_CTXTDESC_ADV(tx_ring, i); | 6359 | type_tucmd |= IXGBE_TXD_CMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT; |
6447 | |||
6448 | /* VLAN MACLEN IPLEN */ | ||
6449 | if (tx_flags & IXGBE_TX_FLAGS_VLAN) | ||
6450 | vlan_macip_lens |= | ||
6451 | (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK); | ||
6452 | vlan_macip_lens |= ((skb_network_offset(skb)) << | ||
6453 | IXGBE_ADVTXD_MACLEN_SHIFT); | ||
6454 | *hdr_len += skb_network_offset(skb); | ||
6455 | vlan_macip_lens |= | ||
6456 | (skb_transport_header(skb) - skb_network_header(skb)); | ||
6457 | *hdr_len += | ||
6458 | (skb_transport_header(skb) - skb_network_header(skb)); | ||
6459 | context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens); | ||
6460 | context_desc->seqnum_seed = 0; | ||
6461 | |||
6462 | /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */ | ||
6463 | type_tucmd_mlhl = (IXGBE_TXD_CMD_DEXT | | ||
6464 | IXGBE_ADVTXD_DTYP_CTXT); | ||
6465 | 6360 | ||
6466 | if (protocol == htons(ETH_P_IP)) | 6361 | context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens); |
6467 | type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4; | 6362 | context_desc->seqnum_seed = cpu_to_le32(fcoe_sof_eof); |
6468 | type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP; | 6363 | context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd); |
6469 | context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl); | 6364 | context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx); |
6470 | 6365 | } | |
6471 | /* MSS L4LEN IDX */ | ||
6472 | mss_l4len_idx = | ||
6473 | (skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT); | ||
6474 | mss_l4len_idx |= (l4len << IXGBE_ADVTXD_L4LEN_SHIFT); | ||
6475 | /* use index 1 for TSO */ | ||
6476 | mss_l4len_idx |= (1 << IXGBE_ADVTXD_IDX_SHIFT); | ||
6477 | context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx); | ||
6478 | 6366 | ||
6479 | tx_buffer_info->time_stamp = jiffies; | 6367 | static int ixgbe_tso(struct ixgbe_ring *tx_ring, struct sk_buff *skb, |
6480 | tx_buffer_info->next_to_watch = i; | 6368 | u32 tx_flags, __be16 protocol, u8 *hdr_len) |
6369 | { | ||
6370 | int err; | ||
6371 | u32 vlan_macip_lens, type_tucmd; | ||
6372 | u32 mss_l4len_idx, l4len; | ||
6481 | 6373 | ||
6482 | i++; | 6374 | if (!skb_is_gso(skb)) |
6483 | if (i == tx_ring->count) | 6375 | return 0; |
6484 | i = 0; | ||
6485 | tx_ring->next_to_use = i; | ||
6486 | 6376 | ||
6487 | return true; | 6377 | if (skb_header_cloned(skb)) { |
6378 | err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); | ||
6379 | if (err) | ||
6380 | return err; | ||
6488 | } | 6381 | } |
6489 | return false; | ||
6490 | } | ||
6491 | 6382 | ||
6492 | static u32 ixgbe_psum(struct ixgbe_adapter *adapter, struct sk_buff *skb, | 6383 | /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */ |
6493 | __be16 protocol) | 6384 | type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP; |
6385 | |||
6386 | if (protocol == __constant_htons(ETH_P_IP)) { | ||
6387 | struct iphdr *iph = ip_hdr(skb); | ||
6388 | iph->tot_len = 0; | ||
6389 | iph->check = 0; | ||
6390 | tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, | ||
6391 | iph->daddr, 0, | ||
6392 | IPPROTO_TCP, | ||
6393 | 0); | ||
6394 | type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4; | ||
6395 | } else if (skb_is_gso_v6(skb)) { | ||
6396 | ipv6_hdr(skb)->payload_len = 0; | ||
6397 | tcp_hdr(skb)->check = | ||
6398 | ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, | ||
6399 | &ipv6_hdr(skb)->daddr, | ||
6400 | 0, IPPROTO_TCP, 0); | ||
6401 | } | ||
6402 | |||
6403 | l4len = tcp_hdrlen(skb); | ||
6404 | *hdr_len = skb_transport_offset(skb) + l4len; | ||
6405 | |||
6406 | /* mss_l4len_id: use 1 as index for TSO */ | ||
6407 | mss_l4len_idx = l4len << IXGBE_ADVTXD_L4LEN_SHIFT; | ||
6408 | mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT; | ||
6409 | mss_l4len_idx |= 1 << IXGBE_ADVTXD_IDX_SHIFT; | ||
6410 | |||
6411 | /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */ | ||
6412 | vlan_macip_lens = skb_network_header_len(skb); | ||
6413 | vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT; | ||
6414 | vlan_macip_lens |= tx_flags & IXGBE_TX_FLAGS_VLAN_MASK; | ||
6415 | |||
6416 | ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, 0, type_tucmd, | ||
6417 | mss_l4len_idx); | ||
6418 | |||
6419 | return 1; | ||
6420 | } | ||
6421 | |||
6422 | static bool ixgbe_tx_csum(struct ixgbe_ring *tx_ring, | ||
6423 | struct sk_buff *skb, u32 tx_flags, | ||
6424 | __be16 protocol) | ||
6494 | { | 6425 | { |
6495 | u32 rtn = 0; | 6426 | u32 vlan_macip_lens = 0; |
6427 | u32 mss_l4len_idx = 0; | ||
6428 | u32 type_tucmd = 0; | ||
6496 | 6429 | ||
6497 | switch (protocol) { | 6430 | if (skb->ip_summed != CHECKSUM_PARTIAL) { |
6498 | case cpu_to_be16(ETH_P_IP): | 6431 | if (!(tx_flags & IXGBE_TX_FLAGS_VLAN)) |
6499 | rtn |= IXGBE_ADVTXD_TUCMD_IPV4; | 6432 | return false; |
6500 | switch (ip_hdr(skb)->protocol) { | 6433 | } else { |
6501 | case IPPROTO_TCP: | 6434 | u8 l4_hdr = 0; |
6502 | rtn |= IXGBE_ADVTXD_TUCMD_L4T_TCP; | 6435 | switch (protocol) { |
6436 | case __constant_htons(ETH_P_IP): | ||
6437 | vlan_macip_lens |= skb_network_header_len(skb); | ||
6438 | type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4; | ||
6439 | l4_hdr = ip_hdr(skb)->protocol; | ||
6503 | break; | 6440 | break; |
6504 | case IPPROTO_SCTP: | 6441 | case __constant_htons(ETH_P_IPV6): |
6505 | rtn |= IXGBE_ADVTXD_TUCMD_L4T_SCTP; | 6442 | vlan_macip_lens |= skb_network_header_len(skb); |
6443 | l4_hdr = ipv6_hdr(skb)->nexthdr; | ||
6444 | break; | ||
6445 | default: | ||
6446 | if (unlikely(net_ratelimit())) { | ||
6447 | dev_warn(tx_ring->dev, | ||
6448 | "partial checksum but proto=%x!\n", | ||
6449 | skb->protocol); | ||
6450 | } | ||
6506 | break; | 6451 | break; |
6507 | } | 6452 | } |
6508 | break; | 6453 | |
6509 | case cpu_to_be16(ETH_P_IPV6): | 6454 | switch (l4_hdr) { |
6510 | /* XXX what about other V6 headers?? */ | ||
6511 | switch (ipv6_hdr(skb)->nexthdr) { | ||
6512 | case IPPROTO_TCP: | 6455 | case IPPROTO_TCP: |
6513 | rtn |= IXGBE_ADVTXD_TUCMD_L4T_TCP; | 6456 | type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_TCP; |
6457 | mss_l4len_idx = tcp_hdrlen(skb) << | ||
6458 | IXGBE_ADVTXD_L4LEN_SHIFT; | ||
6514 | break; | 6459 | break; |
6515 | case IPPROTO_SCTP: | 6460 | case IPPROTO_SCTP: |
6516 | rtn |= IXGBE_ADVTXD_TUCMD_L4T_SCTP; | 6461 | type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_SCTP; |
6462 | mss_l4len_idx = sizeof(struct sctphdr) << | ||
6463 | IXGBE_ADVTXD_L4LEN_SHIFT; | ||
6464 | break; | ||
6465 | case IPPROTO_UDP: | ||
6466 | mss_l4len_idx = sizeof(struct udphdr) << | ||
6467 | IXGBE_ADVTXD_L4LEN_SHIFT; | ||
6468 | break; | ||
6469 | default: | ||
6470 | if (unlikely(net_ratelimit())) { | ||
6471 | dev_warn(tx_ring->dev, | ||
6472 | "partial checksum but l4 proto=%x!\n", | ||
6473 | skb->protocol); | ||
6474 | } | ||
6517 | break; | 6475 | break; |
6518 | } | 6476 | } |
6519 | break; | ||
6520 | default: | ||
6521 | if (unlikely(net_ratelimit())) | ||
6522 | e_warn(probe, "partial checksum but proto=%x!\n", | ||
6523 | protocol); | ||
6524 | break; | ||
6525 | } | 6477 | } |
6526 | 6478 | ||
6527 | return rtn; | 6479 | vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT; |
6528 | } | 6480 | vlan_macip_lens |= tx_flags & IXGBE_TX_FLAGS_VLAN_MASK; |
6529 | |||
6530 | static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter, | ||
6531 | struct ixgbe_ring *tx_ring, | ||
6532 | struct sk_buff *skb, u32 tx_flags, | ||
6533 | __be16 protocol) | ||
6534 | { | ||
6535 | struct ixgbe_adv_tx_context_desc *context_desc; | ||
6536 | unsigned int i; | ||
6537 | struct ixgbe_tx_buffer *tx_buffer_info; | ||
6538 | u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0; | ||
6539 | |||
6540 | if (skb->ip_summed == CHECKSUM_PARTIAL || | ||
6541 | (tx_flags & IXGBE_TX_FLAGS_VLAN)) { | ||
6542 | i = tx_ring->next_to_use; | ||
6543 | tx_buffer_info = &tx_ring->tx_buffer_info[i]; | ||
6544 | context_desc = IXGBE_TX_CTXTDESC_ADV(tx_ring, i); | ||
6545 | 6481 | ||
6546 | if (tx_flags & IXGBE_TX_FLAGS_VLAN) | 6482 | ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, 0, |
6547 | vlan_macip_lens |= | 6483 | type_tucmd, mss_l4len_idx); |
6548 | (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK); | ||
6549 | vlan_macip_lens |= (skb_network_offset(skb) << | ||
6550 | IXGBE_ADVTXD_MACLEN_SHIFT); | ||
6551 | if (skb->ip_summed == CHECKSUM_PARTIAL) | ||
6552 | vlan_macip_lens |= (skb_transport_header(skb) - | ||
6553 | skb_network_header(skb)); | ||
6554 | 6484 | ||
6555 | context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens); | 6485 | return (skb->ip_summed == CHECKSUM_PARTIAL); |
6556 | context_desc->seqnum_seed = 0; | ||
6557 | |||
6558 | type_tucmd_mlhl |= (IXGBE_TXD_CMD_DEXT | | ||
6559 | IXGBE_ADVTXD_DTYP_CTXT); | ||
6560 | |||
6561 | if (skb->ip_summed == CHECKSUM_PARTIAL) | ||
6562 | type_tucmd_mlhl |= ixgbe_psum(adapter, skb, protocol); | ||
6563 | |||
6564 | context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl); | ||
6565 | /* use index zero for tx checksum offload */ | ||
6566 | context_desc->mss_l4len_idx = 0; | ||
6567 | |||
6568 | tx_buffer_info->time_stamp = jiffies; | ||
6569 | tx_buffer_info->next_to_watch = i; | ||
6570 | |||
6571 | i++; | ||
6572 | if (i == tx_ring->count) | ||
6573 | i = 0; | ||
6574 | tx_ring->next_to_use = i; | ||
6575 | |||
6576 | return true; | ||
6577 | } | ||
6578 | |||
6579 | return false; | ||
6580 | } | 6486 | } |
6581 | 6487 | ||
6582 | static int ixgbe_tx_map(struct ixgbe_adapter *adapter, | 6488 | static int ixgbe_tx_map(struct ixgbe_adapter *adapter, |
@@ -6588,11 +6494,12 @@ static int ixgbe_tx_map(struct ixgbe_adapter *adapter, | |||
6588 | struct ixgbe_tx_buffer *tx_buffer_info; | 6494 | struct ixgbe_tx_buffer *tx_buffer_info; |
6589 | unsigned int len; | 6495 | unsigned int len; |
6590 | unsigned int total = skb->len; | 6496 | unsigned int total = skb->len; |
6591 | unsigned int offset = 0, size, count = 0, i; | 6497 | unsigned int offset = 0, size, count = 0; |
6592 | unsigned int nr_frags = skb_shinfo(skb)->nr_frags; | 6498 | unsigned int nr_frags = skb_shinfo(skb)->nr_frags; |
6593 | unsigned int f; | 6499 | unsigned int f; |
6594 | unsigned int bytecount = skb->len; | 6500 | unsigned int bytecount = skb->len; |
6595 | u16 gso_segs = 1; | 6501 | u16 gso_segs = 1; |
6502 | u16 i; | ||
6596 | 6503 | ||
6597 | i = tx_ring->next_to_use; | 6504 | i = tx_ring->next_to_use; |
6598 | 6505 | ||
@@ -6858,7 +6765,7 @@ static void ixgbe_atr(struct ixgbe_ring *ring, struct sk_buff *skb, | |||
6858 | input, common, ring->queue_index); | 6765 | input, common, ring->queue_index); |
6859 | } | 6766 | } |
6860 | 6767 | ||
6861 | static int __ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, int size) | 6768 | static int __ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, u16 size) |
6862 | { | 6769 | { |
6863 | netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); | 6770 | netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); |
6864 | /* Herbert's original patch had: | 6771 | /* Herbert's original patch had: |
@@ -6868,7 +6775,7 @@ static int __ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, int size) | |||
6868 | 6775 | ||
6869 | /* We need to check again in a case another CPU has just | 6776 | /* We need to check again in a case another CPU has just |
6870 | * made room available. */ | 6777 | * made room available. */ |
6871 | if (likely(IXGBE_DESC_UNUSED(tx_ring) < size)) | 6778 | if (likely(ixgbe_desc_unused(tx_ring) < size)) |
6872 | return -EBUSY; | 6779 | return -EBUSY; |
6873 | 6780 | ||
6874 | /* A reprieve! - use start_queue because it doesn't call schedule */ | 6781 | /* A reprieve! - use start_queue because it doesn't call schedule */ |
@@ -6877,9 +6784,9 @@ static int __ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, int size) | |||
6877 | return 0; | 6784 | return 0; |
6878 | } | 6785 | } |
6879 | 6786 | ||
6880 | static int ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, int size) | 6787 | static int ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, u16 size) |
6881 | { | 6788 | { |
6882 | if (likely(IXGBE_DESC_UNUSED(tx_ring) >= size)) | 6789 | if (likely(ixgbe_desc_unused(tx_ring) >= size)) |
6883 | return 0; | 6790 | return 0; |
6884 | return __ixgbe_maybe_stop_tx(tx_ring, size); | 6791 | return __ixgbe_maybe_stop_tx(tx_ring, size); |
6885 | } | 6792 | } |
@@ -6915,13 +6822,33 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, | |||
6915 | struct ixgbe_adapter *adapter, | 6822 | struct ixgbe_adapter *adapter, |
6916 | struct ixgbe_ring *tx_ring) | 6823 | struct ixgbe_ring *tx_ring) |
6917 | { | 6824 | { |
6918 | unsigned int first; | ||
6919 | unsigned int tx_flags = 0; | ||
6920 | u8 hdr_len = 0; | ||
6921 | int tso; | 6825 | int tso; |
6922 | int count = 0; | 6826 | u32 tx_flags = 0; |
6923 | unsigned int f; | 6827 | #if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD |
6828 | unsigned short f; | ||
6829 | #endif | ||
6830 | u16 first; | ||
6831 | u16 count = TXD_USE_COUNT(skb_headlen(skb)); | ||
6924 | __be16 protocol; | 6832 | __be16 protocol; |
6833 | u8 hdr_len = 0; | ||
6834 | |||
6835 | /* | ||
6836 | * need: 1 descriptor per page * PAGE_SIZE/IXGBE_MAX_DATA_PER_TXD, | ||
6837 | * + 1 desc for skb_head_len/IXGBE_MAX_DATA_PER_TXD, | ||
6838 | * + 2 desc gap to keep tail from touching head, | ||
6839 | * + 1 desc for context descriptor, | ||
6840 | * otherwise try next time | ||
6841 | */ | ||
6842 | #if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD | ||
6843 | for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) | ||
6844 | count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size); | ||
6845 | #else | ||
6846 | count += skb_shinfo(skb)->nr_frags; | ||
6847 | #endif | ||
6848 | if (ixgbe_maybe_stop_tx(tx_ring, count + 3)) { | ||
6849 | tx_ring->tx_stats.tx_busy++; | ||
6850 | return NETDEV_TX_BUSY; | ||
6851 | } | ||
6925 | 6852 | ||
6926 | protocol = vlan_get_protocol(skb); | 6853 | protocol = vlan_get_protocol(skb); |
6927 | 6854 | ||
@@ -6946,51 +6873,29 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, | |||
6946 | if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED && | 6873 | if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED && |
6947 | (protocol == htons(ETH_P_FCOE))) | 6874 | (protocol == htons(ETH_P_FCOE))) |
6948 | tx_flags |= IXGBE_TX_FLAGS_FCOE; | 6875 | tx_flags |= IXGBE_TX_FLAGS_FCOE; |
6949 | #endif | ||
6950 | |||
6951 | /* four things can cause us to need a context descriptor */ | ||
6952 | if (skb_is_gso(skb) || | ||
6953 | (skb->ip_summed == CHECKSUM_PARTIAL) || | ||
6954 | (tx_flags & IXGBE_TX_FLAGS_VLAN) || | ||
6955 | (tx_flags & IXGBE_TX_FLAGS_FCOE)) | ||
6956 | count++; | ||
6957 | |||
6958 | count += TXD_USE_COUNT(skb_headlen(skb)); | ||
6959 | for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) | ||
6960 | count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size); | ||
6961 | |||
6962 | if (ixgbe_maybe_stop_tx(tx_ring, count)) { | ||
6963 | tx_ring->tx_stats.tx_busy++; | ||
6964 | return NETDEV_TX_BUSY; | ||
6965 | } | ||
6966 | 6876 | ||
6877 | #endif | ||
6878 | /* record the location of the first descriptor for this packet */ | ||
6967 | first = tx_ring->next_to_use; | 6879 | first = tx_ring->next_to_use; |
6880 | |||
6968 | if (tx_flags & IXGBE_TX_FLAGS_FCOE) { | 6881 | if (tx_flags & IXGBE_TX_FLAGS_FCOE) { |
6969 | #ifdef IXGBE_FCOE | 6882 | #ifdef IXGBE_FCOE |
6970 | /* setup tx offload for FCoE */ | 6883 | /* setup tx offload for FCoE */ |
6971 | tso = ixgbe_fso(adapter, tx_ring, skb, tx_flags, &hdr_len); | 6884 | tso = ixgbe_fso(tx_ring, skb, tx_flags, &hdr_len); |
6972 | if (tso < 0) { | 6885 | if (tso < 0) |
6973 | dev_kfree_skb_any(skb); | 6886 | goto out_drop; |
6974 | return NETDEV_TX_OK; | 6887 | else if (tso) |
6975 | } | ||
6976 | if (tso) | ||
6977 | tx_flags |= IXGBE_TX_FLAGS_FSO; | 6888 | tx_flags |= IXGBE_TX_FLAGS_FSO; |
6978 | #endif /* IXGBE_FCOE */ | 6889 | #endif /* IXGBE_FCOE */ |
6979 | } else { | 6890 | } else { |
6980 | if (protocol == htons(ETH_P_IP)) | 6891 | if (protocol == htons(ETH_P_IP)) |
6981 | tx_flags |= IXGBE_TX_FLAGS_IPV4; | 6892 | tx_flags |= IXGBE_TX_FLAGS_IPV4; |
6982 | tso = ixgbe_tso(adapter, tx_ring, skb, tx_flags, &hdr_len, | 6893 | tso = ixgbe_tso(tx_ring, skb, tx_flags, protocol, &hdr_len); |
6983 | protocol); | 6894 | if (tso < 0) |
6984 | if (tso < 0) { | 6895 | goto out_drop; |
6985 | dev_kfree_skb_any(skb); | 6896 | else if (tso) |
6986 | return NETDEV_TX_OK; | ||
6987 | } | ||
6988 | |||
6989 | if (tso) | ||
6990 | tx_flags |= IXGBE_TX_FLAGS_TSO; | 6897 | tx_flags |= IXGBE_TX_FLAGS_TSO; |
6991 | else if (ixgbe_tx_csum(adapter, tx_ring, skb, tx_flags, | 6898 | else if (ixgbe_tx_csum(tx_ring, skb, tx_flags, protocol)) |
6992 | protocol) && | ||
6993 | (skb->ip_summed == CHECKSUM_PARTIAL)) | ||
6994 | tx_flags |= IXGBE_TX_FLAGS_CSUM; | 6899 | tx_flags |= IXGBE_TX_FLAGS_CSUM; |
6995 | } | 6900 | } |
6996 | 6901 | ||
@@ -7003,12 +6908,16 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, | |||
7003 | ixgbe_maybe_stop_tx(tx_ring, DESC_NEEDED); | 6908 | ixgbe_maybe_stop_tx(tx_ring, DESC_NEEDED); |
7004 | 6909 | ||
7005 | } else { | 6910 | } else { |
7006 | dev_kfree_skb_any(skb); | ||
7007 | tx_ring->tx_buffer_info[first].time_stamp = 0; | 6911 | tx_ring->tx_buffer_info[first].time_stamp = 0; |
7008 | tx_ring->next_to_use = first; | 6912 | tx_ring->next_to_use = first; |
6913 | goto out_drop; | ||
7009 | } | 6914 | } |
7010 | 6915 | ||
7011 | return NETDEV_TX_OK; | 6916 | return NETDEV_TX_OK; |
6917 | |||
6918 | out_drop: | ||
6919 | dev_kfree_skb_any(skb); | ||
6920 | return NETDEV_TX_OK; | ||
7012 | } | 6921 | } |
7013 | 6922 | ||
7014 | static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | 6923 | static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev) |
@@ -7198,6 +7107,85 @@ static struct rtnl_link_stats64 *ixgbe_get_stats64(struct net_device *netdev, | |||
7198 | return stats; | 7107 | return stats; |
7199 | } | 7108 | } |
7200 | 7109 | ||
7110 | /* ixgbe_validate_rtr - verify 802.1Qp to Rx packet buffer mapping is valid. | ||
7111 | * #adapter: pointer to ixgbe_adapter | ||
7112 | * @tc: number of traffic classes currently enabled | ||
7113 | * | ||
7114 | * Configure a valid 802.1Qp to Rx packet buffer mapping ie confirm | ||
7115 | * 802.1Q priority maps to a packet buffer that exists. | ||
7116 | */ | ||
7117 | static void ixgbe_validate_rtr(struct ixgbe_adapter *adapter, u8 tc) | ||
7118 | { | ||
7119 | struct ixgbe_hw *hw = &adapter->hw; | ||
7120 | u32 reg, rsave; | ||
7121 | int i; | ||
7122 | |||
7123 | /* 82598 have a static priority to TC mapping that can not | ||
7124 | * be changed so no validation is needed. | ||
7125 | */ | ||
7126 | if (hw->mac.type == ixgbe_mac_82598EB) | ||
7127 | return; | ||
7128 | |||
7129 | reg = IXGBE_READ_REG(hw, IXGBE_RTRUP2TC); | ||
7130 | rsave = reg; | ||
7131 | |||
7132 | for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { | ||
7133 | u8 up2tc = reg >> (i * IXGBE_RTRUP2TC_UP_SHIFT); | ||
7134 | |||
7135 | /* If up2tc is out of bounds default to zero */ | ||
7136 | if (up2tc > tc) | ||
7137 | reg &= ~(0x7 << IXGBE_RTRUP2TC_UP_SHIFT); | ||
7138 | } | ||
7139 | |||
7140 | if (reg != rsave) | ||
7141 | IXGBE_WRITE_REG(hw, IXGBE_RTRUP2TC, reg); | ||
7142 | |||
7143 | return; | ||
7144 | } | ||
7145 | |||
7146 | |||
7147 | /* ixgbe_setup_tc - routine to configure net_device for multiple traffic | ||
7148 | * classes. | ||
7149 | * | ||
7150 | * @netdev: net device to configure | ||
7151 | * @tc: number of traffic classes to enable | ||
7152 | */ | ||
7153 | int ixgbe_setup_tc(struct net_device *dev, u8 tc) | ||
7154 | { | ||
7155 | struct ixgbe_adapter *adapter = netdev_priv(dev); | ||
7156 | struct ixgbe_hw *hw = &adapter->hw; | ||
7157 | |||
7158 | /* If DCB is anabled do not remove traffic classes, multiple | ||
7159 | * traffic classes are required to implement DCB | ||
7160 | */ | ||
7161 | if (!tc && (adapter->flags & IXGBE_FLAG_DCB_ENABLED)) | ||
7162 | return 0; | ||
7163 | |||
7164 | /* Hardware supports up to 8 traffic classes */ | ||
7165 | if (tc > MAX_TRAFFIC_CLASS || | ||
7166 | (hw->mac.type == ixgbe_mac_82598EB && tc < MAX_TRAFFIC_CLASS)) | ||
7167 | return -EINVAL; | ||
7168 | |||
7169 | /* Hardware has to reinitialize queues and interrupts to | ||
7170 | * match packet buffer alignment. Unfortunantly, the | ||
7171 | * hardware is not flexible enough to do this dynamically. | ||
7172 | */ | ||
7173 | if (netif_running(dev)) | ||
7174 | ixgbe_close(dev); | ||
7175 | ixgbe_clear_interrupt_scheme(adapter); | ||
7176 | |||
7177 | if (tc) | ||
7178 | netdev_set_num_tc(dev, tc); | ||
7179 | else | ||
7180 | netdev_reset_tc(dev); | ||
7181 | |||
7182 | ixgbe_init_interrupt_scheme(adapter); | ||
7183 | ixgbe_validate_rtr(adapter, tc); | ||
7184 | if (netif_running(dev)) | ||
7185 | ixgbe_open(dev); | ||
7186 | |||
7187 | return 0; | ||
7188 | } | ||
7201 | 7189 | ||
7202 | static const struct net_device_ops ixgbe_netdev_ops = { | 7190 | static const struct net_device_ops ixgbe_netdev_ops = { |
7203 | .ndo_open = ixgbe_open, | 7191 | .ndo_open = ixgbe_open, |
@@ -7218,9 +7206,7 @@ static const struct net_device_ops ixgbe_netdev_ops = { | |||
7218 | .ndo_set_vf_tx_rate = ixgbe_ndo_set_vf_bw, | 7206 | .ndo_set_vf_tx_rate = ixgbe_ndo_set_vf_bw, |
7219 | .ndo_get_vf_config = ixgbe_ndo_get_vf_config, | 7207 | .ndo_get_vf_config = ixgbe_ndo_get_vf_config, |
7220 | .ndo_get_stats64 = ixgbe_get_stats64, | 7208 | .ndo_get_stats64 = ixgbe_get_stats64, |
7221 | #ifdef CONFIG_IXGBE_DCB | ||
7222 | .ndo_setup_tc = ixgbe_setup_tc, | 7209 | .ndo_setup_tc = ixgbe_setup_tc, |
7223 | #endif | ||
7224 | #ifdef CONFIG_NET_POLL_CONTROLLER | 7210 | #ifdef CONFIG_NET_POLL_CONTROLLER |
7225 | .ndo_poll_controller = ixgbe_netpoll, | 7211 | .ndo_poll_controller = ixgbe_netpoll, |
7226 | #endif | 7212 | #endif |
@@ -7379,14 +7365,16 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, | |||
7379 | pci_set_master(pdev); | 7365 | pci_set_master(pdev); |
7380 | pci_save_state(pdev); | 7366 | pci_save_state(pdev); |
7381 | 7367 | ||
7368 | #ifdef CONFIG_IXGBE_DCB | ||
7369 | indices *= MAX_TRAFFIC_CLASS; | ||
7370 | #endif | ||
7371 | |||
7382 | if (ii->mac == ixgbe_mac_82598EB) | 7372 | if (ii->mac == ixgbe_mac_82598EB) |
7383 | indices = min_t(unsigned int, indices, IXGBE_MAX_RSS_INDICES); | 7373 | indices = min_t(unsigned int, indices, IXGBE_MAX_RSS_INDICES); |
7384 | else | 7374 | else |
7385 | indices = min_t(unsigned int, indices, IXGBE_MAX_FDIR_INDICES); | 7375 | indices = min_t(unsigned int, indices, IXGBE_MAX_FDIR_INDICES); |
7386 | 7376 | ||
7387 | #if defined(CONFIG_DCB) | 7377 | #ifdef IXGBE_FCOE |
7388 | indices = max_t(unsigned int, indices, IXGBE_MAX_DCB_INDICES); | ||
7389 | #elif defined(IXGBE_FCOE) | ||
7390 | indices += min_t(unsigned int, num_possible_cpus(), | 7378 | indices += min_t(unsigned int, num_possible_cpus(), |
7391 | IXGBE_MAX_FCOE_INDICES); | 7379 | IXGBE_MAX_FCOE_INDICES); |
7392 | #endif | 7380 | #endif |
@@ -7678,6 +7666,11 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, | |||
7678 | ixgbe_vf_configuration(pdev, (i | 0x10000000)); | 7666 | ixgbe_vf_configuration(pdev, (i | 0x10000000)); |
7679 | } | 7667 | } |
7680 | 7668 | ||
7669 | /* Inform firmware of driver version */ | ||
7670 | if (hw->mac.ops.set_fw_drv_ver) | ||
7671 | hw->mac.ops.set_fw_drv_ver(hw, MAJ, MIN, BUILD, | ||
7672 | FW_CEM_UNUSED_VER); | ||
7673 | |||
7681 | /* add san mac addr to netdev */ | 7674 | /* add san mac addr to netdev */ |
7682 | ixgbe_add_sanmac_netdev(netdev); | 7675 | ixgbe_add_sanmac_netdev(netdev); |
7683 | 7676 | ||