diff options
author | David S. Miller <davem@davemloft.net> | 2012-03-19 17:24:27 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2012-03-19 17:24:27 -0400 |
commit | f24fd89ab5148d00f58aaf1d8999e9966fc7215c (patch) | |
tree | 63497b88c2fcaf49fa5dc21a33e5ce35d30dede6 /drivers/net | |
parent | fb04121417b32329f92a260b490da8434d704e3d (diff) | |
parent | 8af3c33f4dab8c20c0a0eb1a7e00d2303d7f47eb (diff) |
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/net-next
Diffstat (limited to 'drivers/net')
-rw-r--r-- | drivers/net/ethernet/intel/ixgbe/Makefile | 2 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ixgbe/ixgbe.h | 31 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c | 191 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c | 13 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c | 929 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 1226 |
6 files changed, 1286 insertions, 1106 deletions
diff --git a/drivers/net/ethernet/intel/ixgbe/Makefile b/drivers/net/ethernet/intel/ixgbe/Makefile index 7a16177a12a5..8be1d1b2132e 100644 --- a/drivers/net/ethernet/intel/ixgbe/Makefile +++ b/drivers/net/ethernet/intel/ixgbe/Makefile | |||
@@ -34,7 +34,7 @@ obj-$(CONFIG_IXGBE) += ixgbe.o | |||
34 | 34 | ||
35 | ixgbe-objs := ixgbe_main.o ixgbe_common.o ixgbe_ethtool.o \ | 35 | ixgbe-objs := ixgbe_main.o ixgbe_common.o ixgbe_ethtool.o \ |
36 | ixgbe_82599.o ixgbe_82598.o ixgbe_phy.o ixgbe_sriov.o \ | 36 | ixgbe_82599.o ixgbe_82598.o ixgbe_phy.o ixgbe_sriov.o \ |
37 | ixgbe_mbx.o ixgbe_x540.o | 37 | ixgbe_mbx.o ixgbe_x540.o ixgbe_lib.o |
38 | 38 | ||
39 | ixgbe-$(CONFIG_IXGBE_DCB) += ixgbe_dcb.o ixgbe_dcb_82598.o \ | 39 | ixgbe-$(CONFIG_IXGBE_DCB) += ixgbe_dcb.o ixgbe_dcb_82598.o \ |
40 | ixgbe_dcb_82599.o ixgbe_dcb_nl.o | 40 | ixgbe_dcb_82599.o ixgbe_dcb_nl.o |
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h index e0d809d0ed75..80e26ff30ebf 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h | |||
@@ -101,8 +101,6 @@ | |||
101 | #define IXGBE_TX_FLAGS_VLAN_PRIO_SHIFT 29 | 101 | #define IXGBE_TX_FLAGS_VLAN_PRIO_SHIFT 29 |
102 | #define IXGBE_TX_FLAGS_VLAN_SHIFT 16 | 102 | #define IXGBE_TX_FLAGS_VLAN_SHIFT 16 |
103 | 103 | ||
104 | #define IXGBE_MAX_RSC_INT_RATE 162760 | ||
105 | |||
106 | #define IXGBE_MAX_VF_MC_ENTRIES 30 | 104 | #define IXGBE_MAX_VF_MC_ENTRIES 30 |
107 | #define IXGBE_MAX_VF_FUNCTIONS 64 | 105 | #define IXGBE_MAX_VF_FUNCTIONS 64 |
108 | #define IXGBE_MAX_VFTA_ENTRIES 128 | 106 | #define IXGBE_MAX_VFTA_ENTRIES 128 |
@@ -152,6 +150,7 @@ struct ixgbe_tx_buffer { | |||
152 | struct sk_buff *skb; | 150 | struct sk_buff *skb; |
153 | unsigned int bytecount; | 151 | unsigned int bytecount; |
154 | unsigned short gso_segs; | 152 | unsigned short gso_segs; |
153 | __be16 protocol; | ||
155 | DEFINE_DMA_UNMAP_ADDR(dma); | 154 | DEFINE_DMA_UNMAP_ADDR(dma); |
156 | DEFINE_DMA_UNMAP_LEN(len); | 155 | DEFINE_DMA_UNMAP_LEN(len); |
157 | u32 tx_flags; | 156 | u32 tx_flags; |
@@ -207,15 +206,18 @@ enum ixgbe_ring_state_t { | |||
207 | clear_bit(__IXGBE_RX_RSC_ENABLED, &(ring)->state) | 206 | clear_bit(__IXGBE_RX_RSC_ENABLED, &(ring)->state) |
208 | struct ixgbe_ring { | 207 | struct ixgbe_ring { |
209 | struct ixgbe_ring *next; /* pointer to next ring in q_vector */ | 208 | struct ixgbe_ring *next; /* pointer to next ring in q_vector */ |
209 | struct ixgbe_q_vector *q_vector; /* backpointer to host q_vector */ | ||
210 | struct net_device *netdev; /* netdev ring belongs to */ | ||
211 | struct device *dev; /* device for DMA mapping */ | ||
210 | void *desc; /* descriptor ring memory */ | 212 | void *desc; /* descriptor ring memory */ |
211 | struct device *dev; /* device for DMA mapping */ | ||
212 | struct net_device *netdev; /* netdev ring belongs to */ | ||
213 | union { | 213 | union { |
214 | struct ixgbe_tx_buffer *tx_buffer_info; | 214 | struct ixgbe_tx_buffer *tx_buffer_info; |
215 | struct ixgbe_rx_buffer *rx_buffer_info; | 215 | struct ixgbe_rx_buffer *rx_buffer_info; |
216 | }; | 216 | }; |
217 | unsigned long state; | 217 | unsigned long state; |
218 | u8 __iomem *tail; | 218 | u8 __iomem *tail; |
219 | dma_addr_t dma; /* phys. address of descriptor ring */ | ||
220 | unsigned int size; /* length in bytes */ | ||
219 | 221 | ||
220 | u16 count; /* amount of descriptors */ | 222 | u16 count; /* amount of descriptors */ |
221 | 223 | ||
@@ -225,17 +227,17 @@ struct ixgbe_ring { | |||
225 | * associated with this ring, which is | 227 | * associated with this ring, which is |
226 | * different for DCB and RSS modes | 228 | * different for DCB and RSS modes |
227 | */ | 229 | */ |
230 | u16 next_to_use; | ||
231 | u16 next_to_clean; | ||
232 | |||
228 | union { | 233 | union { |
234 | u16 next_to_alloc; | ||
229 | struct { | 235 | struct { |
230 | u8 atr_sample_rate; | 236 | u8 atr_sample_rate; |
231 | u8 atr_count; | 237 | u8 atr_count; |
232 | }; | 238 | }; |
233 | u16 next_to_alloc; | ||
234 | }; | 239 | }; |
235 | 240 | ||
236 | u16 next_to_use; | ||
237 | u16 next_to_clean; | ||
238 | |||
239 | u8 dcb_tc; | 241 | u8 dcb_tc; |
240 | struct ixgbe_queue_stats stats; | 242 | struct ixgbe_queue_stats stats; |
241 | struct u64_stats_sync syncp; | 243 | struct u64_stats_sync syncp; |
@@ -243,9 +245,6 @@ struct ixgbe_ring { | |||
243 | struct ixgbe_tx_queue_stats tx_stats; | 245 | struct ixgbe_tx_queue_stats tx_stats; |
244 | struct ixgbe_rx_queue_stats rx_stats; | 246 | struct ixgbe_rx_queue_stats rx_stats; |
245 | }; | 247 | }; |
246 | unsigned int size; /* length in bytes */ | ||
247 | dma_addr_t dma; /* phys. address of descriptor ring */ | ||
248 | struct ixgbe_q_vector *q_vector; /* back-pointer to host q_vector */ | ||
249 | } ____cacheline_internodealigned_in_smp; | 248 | } ____cacheline_internodealigned_in_smp; |
250 | 249 | ||
251 | enum ixgbe_ring_f_enum { | 250 | enum ixgbe_ring_f_enum { |
@@ -437,7 +436,8 @@ struct ixgbe_adapter { | |||
437 | #define IXGBE_FLAG2_SFP_NEEDS_RESET (u32)(1 << 5) | 436 | #define IXGBE_FLAG2_SFP_NEEDS_RESET (u32)(1 << 5) |
438 | #define IXGBE_FLAG2_RESET_REQUESTED (u32)(1 << 6) | 437 | #define IXGBE_FLAG2_RESET_REQUESTED (u32)(1 << 6) |
439 | #define IXGBE_FLAG2_FDIR_REQUIRES_REINIT (u32)(1 << 7) | 438 | #define IXGBE_FLAG2_FDIR_REQUIRES_REINIT (u32)(1 << 7) |
440 | 439 | #define IXGBE_FLAG2_RSS_FIELD_IPV4_UDP (u32)(1 << 8) | |
440 | #define IXGBE_FLAG2_RSS_FIELD_IPV6_UDP (u32)(1 << 9) | ||
441 | 441 | ||
442 | /* Tx fast path data */ | 442 | /* Tx fast path data */ |
443 | int num_tx_queues; | 443 | int num_tx_queues; |
@@ -581,7 +581,9 @@ extern int ixgbe_copy_dcb_cfg(struct ixgbe_dcb_config *src_dcb_cfg, | |||
581 | 581 | ||
582 | extern char ixgbe_driver_name[]; | 582 | extern char ixgbe_driver_name[]; |
583 | extern const char ixgbe_driver_version[]; | 583 | extern const char ixgbe_driver_version[]; |
584 | #ifdef IXGBE_FCOE | ||
584 | extern char ixgbe_default_device_descr[]; | 585 | extern char ixgbe_default_device_descr[]; |
586 | #endif /* IXGBE_FCOE */ | ||
585 | 587 | ||
586 | extern void ixgbe_up(struct ixgbe_adapter *adapter); | 588 | extern void ixgbe_up(struct ixgbe_adapter *adapter); |
587 | extern void ixgbe_down(struct ixgbe_adapter *adapter); | 589 | extern void ixgbe_down(struct ixgbe_adapter *adapter); |
@@ -606,6 +608,7 @@ extern void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *, | |||
606 | struct ixgbe_tx_buffer *); | 608 | struct ixgbe_tx_buffer *); |
607 | extern void ixgbe_alloc_rx_buffers(struct ixgbe_ring *, u16); | 609 | extern void ixgbe_alloc_rx_buffers(struct ixgbe_ring *, u16); |
608 | extern void ixgbe_write_eitr(struct ixgbe_q_vector *); | 610 | extern void ixgbe_write_eitr(struct ixgbe_q_vector *); |
611 | extern int ixgbe_poll(struct napi_struct *napi, int budget); | ||
609 | extern int ethtool_ioctl(struct ifreq *ifr); | 612 | extern int ethtool_ioctl(struct ifreq *ifr); |
610 | extern s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw); | 613 | extern s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw); |
611 | extern s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl); | 614 | extern s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl); |
@@ -625,14 +628,16 @@ extern s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw, | |||
625 | extern void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input, | 628 | extern void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input, |
626 | union ixgbe_atr_input *mask); | 629 | union ixgbe_atr_input *mask); |
627 | extern void ixgbe_set_rx_mode(struct net_device *netdev); | 630 | extern void ixgbe_set_rx_mode(struct net_device *netdev); |
631 | #ifdef CONFIG_IXGBE_DCB | ||
628 | extern int ixgbe_setup_tc(struct net_device *dev, u8 tc); | 632 | extern int ixgbe_setup_tc(struct net_device *dev, u8 tc); |
633 | #endif | ||
629 | extern void ixgbe_tx_ctxtdesc(struct ixgbe_ring *, u32, u32, u32, u32); | 634 | extern void ixgbe_tx_ctxtdesc(struct ixgbe_ring *, u32, u32, u32, u32); |
630 | extern void ixgbe_do_reset(struct net_device *netdev); | 635 | extern void ixgbe_do_reset(struct net_device *netdev); |
631 | #ifdef IXGBE_FCOE | 636 | #ifdef IXGBE_FCOE |
632 | extern void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter); | 637 | extern void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter); |
633 | extern int ixgbe_fso(struct ixgbe_ring *tx_ring, | 638 | extern int ixgbe_fso(struct ixgbe_ring *tx_ring, |
634 | struct ixgbe_tx_buffer *first, | 639 | struct ixgbe_tx_buffer *first, |
635 | u32 tx_flags, u8 *hdr_len); | 640 | u8 *hdr_len); |
636 | extern void ixgbe_cleanup_fcoe(struct ixgbe_adapter *adapter); | 641 | extern void ixgbe_cleanup_fcoe(struct ixgbe_adapter *adapter); |
637 | extern int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter, | 642 | extern int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter, |
638 | union ixgbe_adv_rx_desc *rx_desc, | 643 | union ixgbe_adv_rx_desc *rx_desc, |
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c index b09e67cc9d6e..31a2bf76a346 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c | |||
@@ -2137,31 +2137,29 @@ static int ixgbe_get_coalesce(struct net_device *netdev, | |||
2137 | * this function must be called before setting the new value of | 2137 | * this function must be called before setting the new value of |
2138 | * rx_itr_setting | 2138 | * rx_itr_setting |
2139 | */ | 2139 | */ |
2140 | static bool ixgbe_update_rsc(struct ixgbe_adapter *adapter, | 2140 | static bool ixgbe_update_rsc(struct ixgbe_adapter *adapter) |
2141 | struct ethtool_coalesce *ec) | ||
2142 | { | 2141 | { |
2143 | struct net_device *netdev = adapter->netdev; | 2142 | struct net_device *netdev = adapter->netdev; |
2144 | 2143 | ||
2145 | if (!(adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE)) | 2144 | /* nothing to do if LRO or RSC are not enabled */ |
2145 | if (!(adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) || | ||
2146 | !(netdev->features & NETIF_F_LRO)) | ||
2146 | return false; | 2147 | return false; |
2147 | 2148 | ||
2148 | /* if interrupt rate is too high then disable RSC */ | 2149 | /* check the feature flag value and enable RSC if necessary */ |
2149 | if (ec->rx_coalesce_usecs != 1 && | 2150 | if (adapter->rx_itr_setting == 1 || |
2150 | ec->rx_coalesce_usecs <= (IXGBE_MIN_RSC_ITR >> 2)) { | 2151 | adapter->rx_itr_setting > IXGBE_MIN_RSC_ITR) { |
2151 | if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) { | 2152 | if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)) { |
2152 | e_info(probe, "rx-usecs set too low, disabling RSC\n"); | ||
2153 | adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED; | ||
2154 | return true; | ||
2155 | } | ||
2156 | } else { | ||
2157 | /* check the feature flag value and enable RSC if necessary */ | ||
2158 | if ((netdev->features & NETIF_F_LRO) && | ||
2159 | !(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)) { | ||
2160 | e_info(probe, "rx-usecs set to %d, re-enabling RSC\n", | ||
2161 | ec->rx_coalesce_usecs); | ||
2162 | adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED; | 2153 | adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED; |
2154 | e_info(probe, "rx-usecs value high enough " | ||
2155 | "to re-enable RSC\n"); | ||
2163 | return true; | 2156 | return true; |
2164 | } | 2157 | } |
2158 | /* if interrupt rate is too high then disable RSC */ | ||
2159 | } else if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) { | ||
2160 | adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED; | ||
2161 | e_info(probe, "rx-usecs set too low, disabling RSC\n"); | ||
2162 | return true; | ||
2165 | } | 2163 | } |
2166 | return false; | 2164 | return false; |
2167 | } | 2165 | } |
@@ -2185,9 +2183,6 @@ static int ixgbe_set_coalesce(struct net_device *netdev, | |||
2185 | (ec->tx_coalesce_usecs > (IXGBE_MAX_EITR >> 2))) | 2183 | (ec->tx_coalesce_usecs > (IXGBE_MAX_EITR >> 2))) |
2186 | return -EINVAL; | 2184 | return -EINVAL; |
2187 | 2185 | ||
2188 | /* check the old value and enable RSC if necessary */ | ||
2189 | need_reset = ixgbe_update_rsc(adapter, ec); | ||
2190 | |||
2191 | if (ec->rx_coalesce_usecs > 1) | 2186 | if (ec->rx_coalesce_usecs > 1) |
2192 | adapter->rx_itr_setting = ec->rx_coalesce_usecs << 2; | 2187 | adapter->rx_itr_setting = ec->rx_coalesce_usecs << 2; |
2193 | else | 2188 | else |
@@ -2208,6 +2203,9 @@ static int ixgbe_set_coalesce(struct net_device *netdev, | |||
2208 | else | 2203 | else |
2209 | tx_itr_param = adapter->tx_itr_setting; | 2204 | tx_itr_param = adapter->tx_itr_setting; |
2210 | 2205 | ||
2206 | /* check the old value and enable RSC if necessary */ | ||
2207 | need_reset = ixgbe_update_rsc(adapter); | ||
2208 | |||
2211 | if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) | 2209 | if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) |
2212 | num_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; | 2210 | num_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; |
2213 | else | 2211 | else |
@@ -2328,6 +2326,48 @@ static int ixgbe_get_ethtool_fdir_all(struct ixgbe_adapter *adapter, | |||
2328 | return 0; | 2326 | return 0; |
2329 | } | 2327 | } |
2330 | 2328 | ||
2329 | static int ixgbe_get_rss_hash_opts(struct ixgbe_adapter *adapter, | ||
2330 | struct ethtool_rxnfc *cmd) | ||
2331 | { | ||
2332 | cmd->data = 0; | ||
2333 | |||
2334 | /* if RSS is disabled then report no hashing */ | ||
2335 | if (!(adapter->flags & IXGBE_FLAG_RSS_ENABLED)) | ||
2336 | return 0; | ||
2337 | |||
2338 | /* Report default options for RSS on ixgbe */ | ||
2339 | switch (cmd->flow_type) { | ||
2340 | case TCP_V4_FLOW: | ||
2341 | cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; | ||
2342 | case UDP_V4_FLOW: | ||
2343 | if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV4_UDP) | ||
2344 | cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; | ||
2345 | case SCTP_V4_FLOW: | ||
2346 | case AH_ESP_V4_FLOW: | ||
2347 | case AH_V4_FLOW: | ||
2348 | case ESP_V4_FLOW: | ||
2349 | case IPV4_FLOW: | ||
2350 | cmd->data |= RXH_IP_SRC | RXH_IP_DST; | ||
2351 | break; | ||
2352 | case TCP_V6_FLOW: | ||
2353 | cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; | ||
2354 | case UDP_V6_FLOW: | ||
2355 | if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV6_UDP) | ||
2356 | cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; | ||
2357 | case SCTP_V6_FLOW: | ||
2358 | case AH_ESP_V6_FLOW: | ||
2359 | case AH_V6_FLOW: | ||
2360 | case ESP_V6_FLOW: | ||
2361 | case IPV6_FLOW: | ||
2362 | cmd->data |= RXH_IP_SRC | RXH_IP_DST; | ||
2363 | break; | ||
2364 | default: | ||
2365 | return -EINVAL; | ||
2366 | } | ||
2367 | |||
2368 | return 0; | ||
2369 | } | ||
2370 | |||
2331 | static int ixgbe_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, | 2371 | static int ixgbe_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, |
2332 | u32 *rule_locs) | 2372 | u32 *rule_locs) |
2333 | { | 2373 | { |
@@ -2349,6 +2389,9 @@ static int ixgbe_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, | |||
2349 | case ETHTOOL_GRXCLSRLALL: | 2389 | case ETHTOOL_GRXCLSRLALL: |
2350 | ret = ixgbe_get_ethtool_fdir_all(adapter, cmd, rule_locs); | 2390 | ret = ixgbe_get_ethtool_fdir_all(adapter, cmd, rule_locs); |
2351 | break; | 2391 | break; |
2392 | case ETHTOOL_GRXFH: | ||
2393 | ret = ixgbe_get_rss_hash_opts(adapter, cmd); | ||
2394 | break; | ||
2352 | default: | 2395 | default: |
2353 | break; | 2396 | break; |
2354 | } | 2397 | } |
@@ -2583,6 +2626,111 @@ static int ixgbe_del_ethtool_fdir_entry(struct ixgbe_adapter *adapter, | |||
2583 | return err; | 2626 | return err; |
2584 | } | 2627 | } |
2585 | 2628 | ||
2629 | #define UDP_RSS_FLAGS (IXGBE_FLAG2_RSS_FIELD_IPV4_UDP | \ | ||
2630 | IXGBE_FLAG2_RSS_FIELD_IPV6_UDP) | ||
2631 | static int ixgbe_set_rss_hash_opt(struct ixgbe_adapter *adapter, | ||
2632 | struct ethtool_rxnfc *nfc) | ||
2633 | { | ||
2634 | u32 flags2 = adapter->flags2; | ||
2635 | |||
2636 | /* | ||
2637 | * RSS does not support anything other than hashing | ||
2638 | * to queues on src and dst IPs and ports | ||
2639 | */ | ||
2640 | if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST | | ||
2641 | RXH_L4_B_0_1 | RXH_L4_B_2_3)) | ||
2642 | return -EINVAL; | ||
2643 | |||
2644 | switch (nfc->flow_type) { | ||
2645 | case TCP_V4_FLOW: | ||
2646 | case TCP_V6_FLOW: | ||
2647 | if (!(nfc->data & RXH_IP_SRC) || | ||
2648 | !(nfc->data & RXH_IP_DST) || | ||
2649 | !(nfc->data & RXH_L4_B_0_1) || | ||
2650 | !(nfc->data & RXH_L4_B_2_3)) | ||
2651 | return -EINVAL; | ||
2652 | break; | ||
2653 | case UDP_V4_FLOW: | ||
2654 | if (!(nfc->data & RXH_IP_SRC) || | ||
2655 | !(nfc->data & RXH_IP_DST)) | ||
2656 | return -EINVAL; | ||
2657 | switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { | ||
2658 | case 0: | ||
2659 | flags2 &= ~IXGBE_FLAG2_RSS_FIELD_IPV4_UDP; | ||
2660 | break; | ||
2661 | case (RXH_L4_B_0_1 | RXH_L4_B_2_3): | ||
2662 | flags2 |= IXGBE_FLAG2_RSS_FIELD_IPV4_UDP; | ||
2663 | break; | ||
2664 | default: | ||
2665 | return -EINVAL; | ||
2666 | } | ||
2667 | break; | ||
2668 | case UDP_V6_FLOW: | ||
2669 | if (!(nfc->data & RXH_IP_SRC) || | ||
2670 | !(nfc->data & RXH_IP_DST)) | ||
2671 | return -EINVAL; | ||
2672 | switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { | ||
2673 | case 0: | ||
2674 | flags2 &= ~IXGBE_FLAG2_RSS_FIELD_IPV6_UDP; | ||
2675 | break; | ||
2676 | case (RXH_L4_B_0_1 | RXH_L4_B_2_3): | ||
2677 | flags2 |= IXGBE_FLAG2_RSS_FIELD_IPV6_UDP; | ||
2678 | break; | ||
2679 | default: | ||
2680 | return -EINVAL; | ||
2681 | } | ||
2682 | break; | ||
2683 | case AH_ESP_V4_FLOW: | ||
2684 | case AH_V4_FLOW: | ||
2685 | case ESP_V4_FLOW: | ||
2686 | case SCTP_V4_FLOW: | ||
2687 | case AH_ESP_V6_FLOW: | ||
2688 | case AH_V6_FLOW: | ||
2689 | case ESP_V6_FLOW: | ||
2690 | case SCTP_V6_FLOW: | ||
2691 | if (!(nfc->data & RXH_IP_SRC) || | ||
2692 | !(nfc->data & RXH_IP_DST) || | ||
2693 | (nfc->data & RXH_L4_B_0_1) || | ||
2694 | (nfc->data & RXH_L4_B_2_3)) | ||
2695 | return -EINVAL; | ||
2696 | break; | ||
2697 | default: | ||
2698 | return -EINVAL; | ||
2699 | } | ||
2700 | |||
2701 | /* if we changed something we need to update flags */ | ||
2702 | if (flags2 != adapter->flags2) { | ||
2703 | struct ixgbe_hw *hw = &adapter->hw; | ||
2704 | u32 mrqc = IXGBE_READ_REG(hw, IXGBE_MRQC); | ||
2705 | |||
2706 | if ((flags2 & UDP_RSS_FLAGS) && | ||
2707 | !(adapter->flags2 & UDP_RSS_FLAGS)) | ||
2708 | e_warn(drv, "enabling UDP RSS: fragmented packets" | ||
2709 | " may arrive out of order to the stack above\n"); | ||
2710 | |||
2711 | adapter->flags2 = flags2; | ||
2712 | |||
2713 | /* Perform hash on these packet types */ | ||
2714 | mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4 | ||
2715 | | IXGBE_MRQC_RSS_FIELD_IPV4_TCP | ||
2716 | | IXGBE_MRQC_RSS_FIELD_IPV6 | ||
2717 | | IXGBE_MRQC_RSS_FIELD_IPV6_TCP; | ||
2718 | |||
2719 | mrqc &= ~(IXGBE_MRQC_RSS_FIELD_IPV4_UDP | | ||
2720 | IXGBE_MRQC_RSS_FIELD_IPV6_UDP); | ||
2721 | |||
2722 | if (flags2 & IXGBE_FLAG2_RSS_FIELD_IPV4_UDP) | ||
2723 | mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP; | ||
2724 | |||
2725 | if (flags2 & IXGBE_FLAG2_RSS_FIELD_IPV6_UDP) | ||
2726 | mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP; | ||
2727 | |||
2728 | IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); | ||
2729 | } | ||
2730 | |||
2731 | return 0; | ||
2732 | } | ||
2733 | |||
2586 | static int ixgbe_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) | 2734 | static int ixgbe_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) |
2587 | { | 2735 | { |
2588 | struct ixgbe_adapter *adapter = netdev_priv(dev); | 2736 | struct ixgbe_adapter *adapter = netdev_priv(dev); |
@@ -2595,6 +2743,9 @@ static int ixgbe_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) | |||
2595 | case ETHTOOL_SRXCLSRLDEL: | 2743 | case ETHTOOL_SRXCLSRLDEL: |
2596 | ret = ixgbe_del_ethtool_fdir_entry(adapter, cmd); | 2744 | ret = ixgbe_del_ethtool_fdir_entry(adapter, cmd); |
2597 | break; | 2745 | break; |
2746 | case ETHTOOL_SRXFH: | ||
2747 | ret = ixgbe_set_rss_hash_opt(adapter, cmd); | ||
2748 | break; | ||
2598 | default: | 2749 | default: |
2599 | break; | 2750 | break; |
2600 | } | 2751 | } |
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c index 5f943d3f85c4..77ea4b716535 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c | |||
@@ -448,16 +448,15 @@ ddp_out: | |||
448 | * ixgbe_fso - ixgbe FCoE Sequence Offload (FSO) | 448 | * ixgbe_fso - ixgbe FCoE Sequence Offload (FSO) |
449 | * @tx_ring: tx desc ring | 449 | * @tx_ring: tx desc ring |
450 | * @first: first tx_buffer structure containing skb, tx_flags, and protocol | 450 | * @first: first tx_buffer structure containing skb, tx_flags, and protocol |
451 | * @tx_flags: tx flags | ||
452 | * @hdr_len: hdr_len to be returned | 451 | * @hdr_len: hdr_len to be returned |
453 | * | 452 | * |
454 | * This sets up large send offload for FCoE | 453 | * This sets up large send offload for FCoE |
455 | * | 454 | * |
456 | * Returns : 0 indicates no FSO, > 0 for FSO, < 0 for error | 455 | * Returns : 0 indicates success, < 0 for error |
457 | */ | 456 | */ |
458 | int ixgbe_fso(struct ixgbe_ring *tx_ring, | 457 | int ixgbe_fso(struct ixgbe_ring *tx_ring, |
459 | struct ixgbe_tx_buffer *first, | 458 | struct ixgbe_tx_buffer *first, |
460 | u32 tx_flags, u8 *hdr_len) | 459 | u8 *hdr_len) |
461 | { | 460 | { |
462 | struct sk_buff *skb = first->skb; | 461 | struct sk_buff *skb = first->skb; |
463 | struct fc_frame_header *fh; | 462 | struct fc_frame_header *fh; |
@@ -539,8 +538,12 @@ int ixgbe_fso(struct ixgbe_ring *tx_ring, | |||
539 | first->gso_segs = DIV_ROUND_UP(skb->len - *hdr_len, | 538 | first->gso_segs = DIV_ROUND_UP(skb->len - *hdr_len, |
540 | skb_shinfo(skb)->gso_size); | 539 | skb_shinfo(skb)->gso_size); |
541 | first->bytecount += (first->gso_segs - 1) * *hdr_len; | 540 | first->bytecount += (first->gso_segs - 1) * *hdr_len; |
541 | first->tx_flags |= IXGBE_TX_FLAGS_FSO; | ||
542 | } | 542 | } |
543 | 543 | ||
544 | /* set flag indicating FCOE to ixgbe_tx_map call */ | ||
545 | first->tx_flags |= IXGBE_TX_FLAGS_FCOE; | ||
546 | |||
544 | /* mss_l4len_id: use 1 for FSO as TSO, no need for L4LEN */ | 547 | /* mss_l4len_id: use 1 for FSO as TSO, no need for L4LEN */ |
545 | mss_l4len_idx = skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT; | 548 | mss_l4len_idx = skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT; |
546 | mss_l4len_idx |= 1 << IXGBE_ADVTXD_IDX_SHIFT; | 549 | mss_l4len_idx |= 1 << IXGBE_ADVTXD_IDX_SHIFT; |
@@ -550,13 +553,13 @@ int ixgbe_fso(struct ixgbe_ring *tx_ring, | |||
550 | sizeof(struct fc_frame_header); | 553 | sizeof(struct fc_frame_header); |
551 | vlan_macip_lens |= (skb_transport_offset(skb) - 4) | 554 | vlan_macip_lens |= (skb_transport_offset(skb) - 4) |
552 | << IXGBE_ADVTXD_MACLEN_SHIFT; | 555 | << IXGBE_ADVTXD_MACLEN_SHIFT; |
553 | vlan_macip_lens |= tx_flags & IXGBE_TX_FLAGS_VLAN_MASK; | 556 | vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK; |
554 | 557 | ||
555 | /* write context desc */ | 558 | /* write context desc */ |
556 | ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, fcoe_sof_eof, | 559 | ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, fcoe_sof_eof, |
557 | IXGBE_ADVTXT_TUCMD_FCOE, mss_l4len_idx); | 560 | IXGBE_ADVTXT_TUCMD_FCOE, mss_l4len_idx); |
558 | 561 | ||
559 | return skb_is_gso(skb); | 562 | return 0; |
560 | } | 563 | } |
561 | 564 | ||
562 | static void ixgbe_fcoe_ddp_pools_free(struct ixgbe_fcoe *fcoe) | 565 | static void ixgbe_fcoe_ddp_pools_free(struct ixgbe_fcoe *fcoe) |
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c new file mode 100644 index 000000000000..027d7a75be39 --- /dev/null +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c | |||
@@ -0,0 +1,929 @@ | |||
1 | /******************************************************************************* | ||
2 | |||
3 | Intel 10 Gigabit PCI Express Linux driver | ||
4 | Copyright(c) 1999 - 2012 Intel Corporation. | ||
5 | |||
6 | This program is free software; you can redistribute it and/or modify it | ||
7 | under the terms and conditions of the GNU General Public License, | ||
8 | version 2, as published by the Free Software Foundation. | ||
9 | |||
10 | This program is distributed in the hope it will be useful, but WITHOUT | ||
11 | ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
12 | FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
13 | more details. | ||
14 | |||
15 | You should have received a copy of the GNU General Public License along with | ||
16 | this program; if not, write to the Free Software Foundation, Inc., | ||
17 | 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | ||
18 | |||
19 | The full GNU General Public License is included in this distribution in | ||
20 | the file called "COPYING". | ||
21 | |||
22 | Contact Information: | ||
23 | e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> | ||
24 | Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 | ||
25 | |||
26 | *******************************************************************************/ | ||
27 | |||
28 | #include "ixgbe.h" | ||
29 | #include "ixgbe_sriov.h" | ||
30 | |||
31 | /** | ||
32 | * ixgbe_cache_ring_rss - Descriptor ring to register mapping for RSS | ||
33 | * @adapter: board private structure to initialize | ||
34 | * | ||
35 | * Cache the descriptor ring offsets for RSS to the assigned rings. | ||
36 | * | ||
37 | **/ | ||
38 | static inline bool ixgbe_cache_ring_rss(struct ixgbe_adapter *adapter) | ||
39 | { | ||
40 | int i; | ||
41 | |||
42 | if (!(adapter->flags & IXGBE_FLAG_RSS_ENABLED)) | ||
43 | return false; | ||
44 | |||
45 | for (i = 0; i < adapter->num_rx_queues; i++) | ||
46 | adapter->rx_ring[i]->reg_idx = i; | ||
47 | for (i = 0; i < adapter->num_tx_queues; i++) | ||
48 | adapter->tx_ring[i]->reg_idx = i; | ||
49 | |||
50 | return true; | ||
51 | } | ||
52 | #ifdef CONFIG_IXGBE_DCB | ||
53 | |||
54 | /* ixgbe_get_first_reg_idx - Return first register index associated with ring */ | ||
55 | static void ixgbe_get_first_reg_idx(struct ixgbe_adapter *adapter, u8 tc, | ||
56 | unsigned int *tx, unsigned int *rx) | ||
57 | { | ||
58 | struct net_device *dev = adapter->netdev; | ||
59 | struct ixgbe_hw *hw = &adapter->hw; | ||
60 | u8 num_tcs = netdev_get_num_tc(dev); | ||
61 | |||
62 | *tx = 0; | ||
63 | *rx = 0; | ||
64 | |||
65 | switch (hw->mac.type) { | ||
66 | case ixgbe_mac_82598EB: | ||
67 | *tx = tc << 2; | ||
68 | *rx = tc << 3; | ||
69 | break; | ||
70 | case ixgbe_mac_82599EB: | ||
71 | case ixgbe_mac_X540: | ||
72 | if (num_tcs > 4) { | ||
73 | if (tc < 3) { | ||
74 | *tx = tc << 5; | ||
75 | *rx = tc << 4; | ||
76 | } else if (tc < 5) { | ||
77 | *tx = ((tc + 2) << 4); | ||
78 | *rx = tc << 4; | ||
79 | } else if (tc < num_tcs) { | ||
80 | *tx = ((tc + 8) << 3); | ||
81 | *rx = tc << 4; | ||
82 | } | ||
83 | } else { | ||
84 | *rx = tc << 5; | ||
85 | switch (tc) { | ||
86 | case 0: | ||
87 | *tx = 0; | ||
88 | break; | ||
89 | case 1: | ||
90 | *tx = 64; | ||
91 | break; | ||
92 | case 2: | ||
93 | *tx = 96; | ||
94 | break; | ||
95 | case 3: | ||
96 | *tx = 112; | ||
97 | break; | ||
98 | default: | ||
99 | break; | ||
100 | } | ||
101 | } | ||
102 | break; | ||
103 | default: | ||
104 | break; | ||
105 | } | ||
106 | } | ||
107 | |||
108 | /** | ||
109 | * ixgbe_cache_ring_dcb - Descriptor ring to register mapping for DCB | ||
110 | * @adapter: board private structure to initialize | ||
111 | * | ||
112 | * Cache the descriptor ring offsets for DCB to the assigned rings. | ||
113 | * | ||
114 | **/ | ||
115 | static inline bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter) | ||
116 | { | ||
117 | struct net_device *dev = adapter->netdev; | ||
118 | int i, j, k; | ||
119 | u8 num_tcs = netdev_get_num_tc(dev); | ||
120 | |||
121 | if (!num_tcs) | ||
122 | return false; | ||
123 | |||
124 | for (i = 0, k = 0; i < num_tcs; i++) { | ||
125 | unsigned int tx_s, rx_s; | ||
126 | u16 count = dev->tc_to_txq[i].count; | ||
127 | |||
128 | ixgbe_get_first_reg_idx(adapter, i, &tx_s, &rx_s); | ||
129 | for (j = 0; j < count; j++, k++) { | ||
130 | adapter->tx_ring[k]->reg_idx = tx_s + j; | ||
131 | adapter->rx_ring[k]->reg_idx = rx_s + j; | ||
132 | adapter->tx_ring[k]->dcb_tc = i; | ||
133 | adapter->rx_ring[k]->dcb_tc = i; | ||
134 | } | ||
135 | } | ||
136 | |||
137 | return true; | ||
138 | } | ||
139 | #endif | ||
140 | |||
141 | /** | ||
142 | * ixgbe_cache_ring_fdir - Descriptor ring to register mapping for Flow Director | ||
143 | * @adapter: board private structure to initialize | ||
144 | * | ||
145 | * Cache the descriptor ring offsets for Flow Director to the assigned rings. | ||
146 | * | ||
147 | **/ | ||
148 | static inline bool ixgbe_cache_ring_fdir(struct ixgbe_adapter *adapter) | ||
149 | { | ||
150 | int i; | ||
151 | bool ret = false; | ||
152 | |||
153 | if ((adapter->flags & IXGBE_FLAG_RSS_ENABLED) && | ||
154 | (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE)) { | ||
155 | for (i = 0; i < adapter->num_rx_queues; i++) | ||
156 | adapter->rx_ring[i]->reg_idx = i; | ||
157 | for (i = 0; i < adapter->num_tx_queues; i++) | ||
158 | adapter->tx_ring[i]->reg_idx = i; | ||
159 | ret = true; | ||
160 | } | ||
161 | |||
162 | return ret; | ||
163 | } | ||
164 | |||
165 | #ifdef IXGBE_FCOE | ||
166 | /** | ||
167 | * ixgbe_cache_ring_fcoe - Descriptor ring to register mapping for the FCoE | ||
168 | * @adapter: board private structure to initialize | ||
169 | * | ||
170 | * Cache the descriptor ring offsets for FCoE mode to the assigned rings. | ||
171 | * | ||
172 | */ | ||
173 | static inline bool ixgbe_cache_ring_fcoe(struct ixgbe_adapter *adapter) | ||
174 | { | ||
175 | struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE]; | ||
176 | int i; | ||
177 | u8 fcoe_rx_i = 0, fcoe_tx_i = 0; | ||
178 | |||
179 | if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) | ||
180 | return false; | ||
181 | |||
182 | if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) { | ||
183 | if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) | ||
184 | ixgbe_cache_ring_fdir(adapter); | ||
185 | else | ||
186 | ixgbe_cache_ring_rss(adapter); | ||
187 | |||
188 | fcoe_rx_i = f->mask; | ||
189 | fcoe_tx_i = f->mask; | ||
190 | } | ||
191 | for (i = 0; i < f->indices; i++, fcoe_rx_i++, fcoe_tx_i++) { | ||
192 | adapter->rx_ring[f->mask + i]->reg_idx = fcoe_rx_i; | ||
193 | adapter->tx_ring[f->mask + i]->reg_idx = fcoe_tx_i; | ||
194 | } | ||
195 | return true; | ||
196 | } | ||
197 | |||
198 | #endif /* IXGBE_FCOE */ | ||
199 | /** | ||
200 | * ixgbe_cache_ring_sriov - Descriptor ring to register mapping for sriov | ||
201 | * @adapter: board private structure to initialize | ||
202 | * | ||
203 | * SR-IOV doesn't use any descriptor rings but changes the default if | ||
204 | * no other mapping is used. | ||
205 | * | ||
206 | */ | ||
207 | static inline bool ixgbe_cache_ring_sriov(struct ixgbe_adapter *adapter) | ||
208 | { | ||
209 | adapter->rx_ring[0]->reg_idx = adapter->num_vfs * 2; | ||
210 | adapter->tx_ring[0]->reg_idx = adapter->num_vfs * 2; | ||
211 | if (adapter->num_vfs) | ||
212 | return true; | ||
213 | else | ||
214 | return false; | ||
215 | } | ||
216 | |||
217 | /** | ||
218 | * ixgbe_cache_ring_register - Descriptor ring to register mapping | ||
219 | * @adapter: board private structure to initialize | ||
220 | * | ||
221 | * Once we know the feature-set enabled for the device, we'll cache | ||
222 | * the register offset the descriptor ring is assigned to. | ||
223 | * | ||
224 | * Note, the order the various feature calls is important. It must start with | ||
225 | * the "most" features enabled at the same time, then trickle down to the | ||
226 | * least amount of features turned on at once. | ||
227 | **/ | ||
228 | static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter) | ||
229 | { | ||
230 | /* start with default case */ | ||
231 | adapter->rx_ring[0]->reg_idx = 0; | ||
232 | adapter->tx_ring[0]->reg_idx = 0; | ||
233 | |||
234 | if (ixgbe_cache_ring_sriov(adapter)) | ||
235 | return; | ||
236 | |||
237 | #ifdef CONFIG_IXGBE_DCB | ||
238 | if (ixgbe_cache_ring_dcb(adapter)) | ||
239 | return; | ||
240 | #endif | ||
241 | |||
242 | #ifdef IXGBE_FCOE | ||
243 | if (ixgbe_cache_ring_fcoe(adapter)) | ||
244 | return; | ||
245 | #endif /* IXGBE_FCOE */ | ||
246 | |||
247 | if (ixgbe_cache_ring_fdir(adapter)) | ||
248 | return; | ||
249 | |||
250 | if (ixgbe_cache_ring_rss(adapter)) | ||
251 | return; | ||
252 | } | ||
253 | |||
254 | /** | ||
255 | * ixgbe_set_sriov_queues: Allocate queues for IOV use | ||
256 | * @adapter: board private structure to initialize | ||
257 | * | ||
258 | * IOV doesn't actually use anything, so just NAK the | ||
259 | * request for now and let the other queue routines | ||
260 | * figure out what to do. | ||
261 | */ | ||
262 | static inline bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter) | ||
263 | { | ||
264 | return false; | ||
265 | } | ||
266 | |||
267 | /** | ||
268 | * ixgbe_set_rss_queues: Allocate queues for RSS | ||
269 | * @adapter: board private structure to initialize | ||
270 | * | ||
271 | * This is our "base" multiqueue mode. RSS (Receive Side Scaling) will try | ||
272 | * to allocate one Rx queue per CPU, and if available, one Tx queue per CPU. | ||
273 | * | ||
274 | **/ | ||
275 | static inline bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter) | ||
276 | { | ||
277 | bool ret = false; | ||
278 | struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_RSS]; | ||
279 | |||
280 | if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) { | ||
281 | f->mask = 0xF; | ||
282 | adapter->num_rx_queues = f->indices; | ||
283 | adapter->num_tx_queues = f->indices; | ||
284 | ret = true; | ||
285 | } | ||
286 | |||
287 | return ret; | ||
288 | } | ||
289 | |||
290 | /** | ||
291 | * ixgbe_set_fdir_queues: Allocate queues for Flow Director | ||
292 | * @adapter: board private structure to initialize | ||
293 | * | ||
294 | * Flow Director is an advanced Rx filter, attempting to get Rx flows back | ||
295 | * to the original CPU that initiated the Tx session. This runs in addition | ||
296 | * to RSS, so if a packet doesn't match an FDIR filter, we can still spread the | ||
297 | * Rx load across CPUs using RSS. | ||
298 | * | ||
299 | **/ | ||
300 | static inline bool ixgbe_set_fdir_queues(struct ixgbe_adapter *adapter) | ||
301 | { | ||
302 | bool ret = false; | ||
303 | struct ixgbe_ring_feature *f_fdir = &adapter->ring_feature[RING_F_FDIR]; | ||
304 | |||
305 | f_fdir->indices = min_t(int, num_online_cpus(), f_fdir->indices); | ||
306 | f_fdir->mask = 0; | ||
307 | |||
308 | /* | ||
309 | * Use RSS in addition to Flow Director to ensure the best | ||
310 | * distribution of flows across cores, even when an FDIR flow | ||
311 | * isn't matched. | ||
312 | */ | ||
313 | if ((adapter->flags & IXGBE_FLAG_RSS_ENABLED) && | ||
314 | (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE)) { | ||
315 | adapter->num_tx_queues = f_fdir->indices; | ||
316 | adapter->num_rx_queues = f_fdir->indices; | ||
317 | ret = true; | ||
318 | } else { | ||
319 | adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; | ||
320 | } | ||
321 | return ret; | ||
322 | } | ||
323 | |||
324 | #ifdef IXGBE_FCOE | ||
325 | /** | ||
326 | * ixgbe_set_fcoe_queues: Allocate queues for Fiber Channel over Ethernet (FCoE) | ||
327 | * @adapter: board private structure to initialize | ||
328 | * | ||
329 | * FCoE RX FCRETA can use up to 8 rx queues for up to 8 different exchanges. | ||
330 | * The ring feature mask is not used as a mask for FCoE, as it can take any 8 | ||
331 | * rx queues out of the max number of rx queues, instead, it is used as the | ||
332 | * index of the first rx queue used by FCoE. | ||
333 | * | ||
334 | **/ | ||
335 | static inline bool ixgbe_set_fcoe_queues(struct ixgbe_adapter *adapter) | ||
336 | { | ||
337 | struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE]; | ||
338 | |||
339 | if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) | ||
340 | return false; | ||
341 | |||
342 | f->indices = min_t(int, num_online_cpus(), f->indices); | ||
343 | |||
344 | adapter->num_rx_queues = 1; | ||
345 | adapter->num_tx_queues = 1; | ||
346 | |||
347 | if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) { | ||
348 | e_info(probe, "FCoE enabled with RSS\n"); | ||
349 | if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) | ||
350 | ixgbe_set_fdir_queues(adapter); | ||
351 | else | ||
352 | ixgbe_set_rss_queues(adapter); | ||
353 | } | ||
354 | |||
355 | /* adding FCoE rx rings to the end */ | ||
356 | f->mask = adapter->num_rx_queues; | ||
357 | adapter->num_rx_queues += f->indices; | ||
358 | adapter->num_tx_queues += f->indices; | ||
359 | |||
360 | return true; | ||
361 | } | ||
362 | #endif /* IXGBE_FCOE */ | ||
363 | |||
364 | /* Artificial max queue cap per traffic class in DCB mode */ | ||
365 | #define DCB_QUEUE_CAP 8 | ||
366 | |||
367 | #ifdef CONFIG_IXGBE_DCB | ||
368 | static inline bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter) | ||
369 | { | ||
370 | int per_tc_q, q, i, offset = 0; | ||
371 | struct net_device *dev = adapter->netdev; | ||
372 | int tcs = netdev_get_num_tc(dev); | ||
373 | |||
374 | if (!tcs) | ||
375 | return false; | ||
376 | |||
377 | /* Map queue offset and counts onto allocated tx queues */ | ||
378 | per_tc_q = min_t(unsigned int, dev->num_tx_queues / tcs, DCB_QUEUE_CAP); | ||
379 | q = min_t(int, num_online_cpus(), per_tc_q); | ||
380 | |||
381 | for (i = 0; i < tcs; i++) { | ||
382 | netdev_set_tc_queue(dev, i, q, offset); | ||
383 | offset += q; | ||
384 | } | ||
385 | |||
386 | adapter->num_tx_queues = q * tcs; | ||
387 | adapter->num_rx_queues = q * tcs; | ||
388 | |||
389 | #ifdef IXGBE_FCOE | ||
390 | /* FCoE enabled queues require special configuration indexed | ||
391 | * by feature specific indices and mask. Here we map FCoE | ||
392 | * indices onto the DCB queue pairs allowing FCoE to own | ||
393 | * configuration later. | ||
394 | */ | ||
395 | if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) { | ||
396 | u8 prio_tc[MAX_USER_PRIORITY] = {0}; | ||
397 | int tc; | ||
398 | struct ixgbe_ring_feature *f = | ||
399 | &adapter->ring_feature[RING_F_FCOE]; | ||
400 | |||
401 | ixgbe_dcb_unpack_map(&adapter->dcb_cfg, DCB_TX_CONFIG, prio_tc); | ||
402 | tc = prio_tc[adapter->fcoe.up]; | ||
403 | f->indices = dev->tc_to_txq[tc].count; | ||
404 | f->mask = dev->tc_to_txq[tc].offset; | ||
405 | } | ||
406 | #endif | ||
407 | |||
408 | return true; | ||
409 | } | ||
410 | #endif | ||
411 | |||
412 | /** | ||
413 | * ixgbe_set_num_queues: Allocate queues for device, feature dependent | ||
414 | * @adapter: board private structure to initialize | ||
415 | * | ||
416 | * This is the top level queue allocation routine. The order here is very | ||
417 | * important, starting with the "most" number of features turned on at once, | ||
418 | * and ending with the smallest set of features. This way large combinations | ||
419 | * can be allocated if they're turned on, and smaller combinations are the | ||
420 | * fallthrough conditions. | ||
421 | * | ||
422 | **/ | ||
423 | static int ixgbe_set_num_queues(struct ixgbe_adapter *adapter) | ||
424 | { | ||
425 | /* Start with base case */ | ||
426 | adapter->num_rx_queues = 1; | ||
427 | adapter->num_tx_queues = 1; | ||
428 | adapter->num_rx_pools = adapter->num_rx_queues; | ||
429 | adapter->num_rx_queues_per_pool = 1; | ||
430 | |||
431 | if (ixgbe_set_sriov_queues(adapter)) | ||
432 | goto done; | ||
433 | |||
434 | #ifdef CONFIG_IXGBE_DCB | ||
435 | if (ixgbe_set_dcb_queues(adapter)) | ||
436 | goto done; | ||
437 | |||
438 | #endif | ||
439 | #ifdef IXGBE_FCOE | ||
440 | if (ixgbe_set_fcoe_queues(adapter)) | ||
441 | goto done; | ||
442 | |||
443 | #endif /* IXGBE_FCOE */ | ||
444 | if (ixgbe_set_fdir_queues(adapter)) | ||
445 | goto done; | ||
446 | |||
447 | if (ixgbe_set_rss_queues(adapter)) | ||
448 | goto done; | ||
449 | |||
450 | /* fallback to base case */ | ||
451 | adapter->num_rx_queues = 1; | ||
452 | adapter->num_tx_queues = 1; | ||
453 | |||
454 | done: | ||
455 | if ((adapter->netdev->reg_state == NETREG_UNREGISTERED) || | ||
456 | (adapter->netdev->reg_state == NETREG_UNREGISTERING)) | ||
457 | return 0; | ||
458 | |||
459 | /* Notify the stack of the (possibly) reduced queue counts. */ | ||
460 | netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues); | ||
461 | return netif_set_real_num_rx_queues(adapter->netdev, | ||
462 | adapter->num_rx_queues); | ||
463 | } | ||
464 | |||
465 | static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter, | ||
466 | int vectors) | ||
467 | { | ||
468 | int err, vector_threshold; | ||
469 | |||
470 | /* We'll want at least 2 (vector_threshold): | ||
471 | * 1) TxQ[0] + RxQ[0] handler | ||
472 | * 2) Other (Link Status Change, etc.) | ||
473 | */ | ||
474 | vector_threshold = MIN_MSIX_COUNT; | ||
475 | |||
476 | /* | ||
477 | * The more we get, the more we will assign to Tx/Rx Cleanup | ||
478 | * for the separate queues...where Rx Cleanup >= Tx Cleanup. | ||
479 | * Right now, we simply care about how many we'll get; we'll | ||
480 | * set them up later while requesting irq's. | ||
481 | */ | ||
482 | while (vectors >= vector_threshold) { | ||
483 | err = pci_enable_msix(adapter->pdev, adapter->msix_entries, | ||
484 | vectors); | ||
485 | if (!err) /* Success in acquiring all requested vectors. */ | ||
486 | break; | ||
487 | else if (err < 0) | ||
488 | vectors = 0; /* Nasty failure, quit now */ | ||
489 | else /* err == number of vectors we should try again with */ | ||
490 | vectors = err; | ||
491 | } | ||
492 | |||
493 | if (vectors < vector_threshold) { | ||
494 | /* Can't allocate enough MSI-X interrupts? Oh well. | ||
495 | * This just means we'll go with either a single MSI | ||
496 | * vector or fall back to legacy interrupts. | ||
497 | */ | ||
498 | netif_printk(adapter, hw, KERN_DEBUG, adapter->netdev, | ||
499 | "Unable to allocate MSI-X interrupts\n"); | ||
500 | adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED; | ||
501 | kfree(adapter->msix_entries); | ||
502 | adapter->msix_entries = NULL; | ||
503 | } else { | ||
504 | adapter->flags |= IXGBE_FLAG_MSIX_ENABLED; /* Woot! */ | ||
505 | /* | ||
506 | * Adjust for only the vectors we'll use, which is minimum | ||
507 | * of max_msix_q_vectors + NON_Q_VECTORS, or the number of | ||
508 | * vectors we were allocated. | ||
509 | */ | ||
510 | adapter->num_msix_vectors = min(vectors, | ||
511 | adapter->max_msix_q_vectors + NON_Q_VECTORS); | ||
512 | } | ||
513 | } | ||
514 | |||
515 | static void ixgbe_add_ring(struct ixgbe_ring *ring, | ||
516 | struct ixgbe_ring_container *head) | ||
517 | { | ||
518 | ring->next = head->ring; | ||
519 | head->ring = ring; | ||
520 | head->count++; | ||
521 | } | ||
522 | |||
523 | /** | ||
524 | * ixgbe_alloc_q_vector - Allocate memory for a single interrupt vector | ||
525 | * @adapter: board private structure to initialize | ||
526 | * @v_idx: index of vector in adapter struct | ||
527 | * | ||
528 | * We allocate one q_vector. If allocation fails we return -ENOMEM. | ||
529 | **/ | ||
530 | static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter, int v_idx, | ||
531 | int txr_count, int txr_idx, | ||
532 | int rxr_count, int rxr_idx) | ||
533 | { | ||
534 | struct ixgbe_q_vector *q_vector; | ||
535 | struct ixgbe_ring *ring; | ||
536 | int node = -1; | ||
537 | int cpu = -1; | ||
538 | int ring_count, size; | ||
539 | |||
540 | ring_count = txr_count + rxr_count; | ||
541 | size = sizeof(struct ixgbe_q_vector) + | ||
542 | (sizeof(struct ixgbe_ring) * ring_count); | ||
543 | |||
544 | /* customize cpu for Flow Director mapping */ | ||
545 | if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) { | ||
546 | if (cpu_online(v_idx)) { | ||
547 | cpu = v_idx; | ||
548 | node = cpu_to_node(cpu); | ||
549 | } | ||
550 | } | ||
551 | |||
552 | /* allocate q_vector and rings */ | ||
553 | q_vector = kzalloc_node(size, GFP_KERNEL, node); | ||
554 | if (!q_vector) | ||
555 | q_vector = kzalloc(size, GFP_KERNEL); | ||
556 | if (!q_vector) | ||
557 | return -ENOMEM; | ||
558 | |||
559 | /* setup affinity mask and node */ | ||
560 | if (cpu != -1) | ||
561 | cpumask_set_cpu(cpu, &q_vector->affinity_mask); | ||
562 | else | ||
563 | cpumask_copy(&q_vector->affinity_mask, cpu_online_mask); | ||
564 | q_vector->numa_node = node; | ||
565 | |||
566 | /* initialize NAPI */ | ||
567 | netif_napi_add(adapter->netdev, &q_vector->napi, | ||
568 | ixgbe_poll, 64); | ||
569 | |||
570 | /* tie q_vector and adapter together */ | ||
571 | adapter->q_vector[v_idx] = q_vector; | ||
572 | q_vector->adapter = adapter; | ||
573 | q_vector->v_idx = v_idx; | ||
574 | |||
575 | /* initialize work limits */ | ||
576 | q_vector->tx.work_limit = adapter->tx_work_limit; | ||
577 | |||
578 | /* initialize pointer to rings */ | ||
579 | ring = q_vector->ring; | ||
580 | |||
581 | while (txr_count) { | ||
582 | /* assign generic ring traits */ | ||
583 | ring->dev = &adapter->pdev->dev; | ||
584 | ring->netdev = adapter->netdev; | ||
585 | |||
586 | /* configure backlink on ring */ | ||
587 | ring->q_vector = q_vector; | ||
588 | |||
589 | /* update q_vector Tx values */ | ||
590 | ixgbe_add_ring(ring, &q_vector->tx); | ||
591 | |||
592 | /* apply Tx specific ring traits */ | ||
593 | ring->count = adapter->tx_ring_count; | ||
594 | ring->queue_index = txr_idx; | ||
595 | |||
596 | /* assign ring to adapter */ | ||
597 | adapter->tx_ring[txr_idx] = ring; | ||
598 | |||
599 | /* update count and index */ | ||
600 | txr_count--; | ||
601 | txr_idx++; | ||
602 | |||
603 | /* push pointer to next ring */ | ||
604 | ring++; | ||
605 | } | ||
606 | |||
607 | while (rxr_count) { | ||
608 | /* assign generic ring traits */ | ||
609 | ring->dev = &adapter->pdev->dev; | ||
610 | ring->netdev = adapter->netdev; | ||
611 | |||
612 | /* configure backlink on ring */ | ||
613 | ring->q_vector = q_vector; | ||
614 | |||
615 | /* update q_vector Rx values */ | ||
616 | ixgbe_add_ring(ring, &q_vector->rx); | ||
617 | |||
618 | /* | ||
619 | * 82599 errata, UDP frames with a 0 checksum | ||
620 | * can be marked as checksum errors. | ||
621 | */ | ||
622 | if (adapter->hw.mac.type == ixgbe_mac_82599EB) | ||
623 | set_bit(__IXGBE_RX_CSUM_UDP_ZERO_ERR, &ring->state); | ||
624 | |||
625 | /* apply Rx specific ring traits */ | ||
626 | ring->count = adapter->rx_ring_count; | ||
627 | ring->queue_index = rxr_idx; | ||
628 | |||
629 | /* assign ring to adapter */ | ||
630 | adapter->rx_ring[rxr_idx] = ring; | ||
631 | |||
632 | /* update count and index */ | ||
633 | rxr_count--; | ||
634 | rxr_idx++; | ||
635 | |||
636 | /* push pointer to next ring */ | ||
637 | ring++; | ||
638 | } | ||
639 | |||
640 | return 0; | ||
641 | } | ||
642 | |||
643 | /** | ||
644 | * ixgbe_free_q_vector - Free memory allocated for specific interrupt vector | ||
645 | * @adapter: board private structure to initialize | ||
646 | * @v_idx: Index of vector to be freed | ||
647 | * | ||
648 | * This function frees the memory allocated to the q_vector. In addition if | ||
649 | * NAPI is enabled it will delete any references to the NAPI struct prior | ||
650 | * to freeing the q_vector. | ||
651 | **/ | ||
652 | static void ixgbe_free_q_vector(struct ixgbe_adapter *adapter, int v_idx) | ||
653 | { | ||
654 | struct ixgbe_q_vector *q_vector = adapter->q_vector[v_idx]; | ||
655 | struct ixgbe_ring *ring; | ||
656 | |||
657 | ixgbe_for_each_ring(ring, q_vector->tx) | ||
658 | adapter->tx_ring[ring->queue_index] = NULL; | ||
659 | |||
660 | ixgbe_for_each_ring(ring, q_vector->rx) | ||
661 | adapter->rx_ring[ring->queue_index] = NULL; | ||
662 | |||
663 | adapter->q_vector[v_idx] = NULL; | ||
664 | netif_napi_del(&q_vector->napi); | ||
665 | |||
666 | /* | ||
667 | * ixgbe_get_stats64() might access the rings on this vector, | ||
668 | * we must wait a grace period before freeing it. | ||
669 | */ | ||
670 | kfree_rcu(q_vector, rcu); | ||
671 | } | ||
672 | |||
673 | /** | ||
674 | * ixgbe_alloc_q_vectors - Allocate memory for interrupt vectors | ||
675 | * @adapter: board private structure to initialize | ||
676 | * | ||
677 | * We allocate one q_vector per queue interrupt. If allocation fails we | ||
678 | * return -ENOMEM. | ||
679 | **/ | ||
680 | static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter) | ||
681 | { | ||
682 | int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; | ||
683 | int rxr_remaining = adapter->num_rx_queues; | ||
684 | int txr_remaining = adapter->num_tx_queues; | ||
685 | int rxr_idx = 0, txr_idx = 0, v_idx = 0; | ||
686 | int err; | ||
687 | |||
688 | /* only one q_vector if MSI-X is disabled. */ | ||
689 | if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) | ||
690 | q_vectors = 1; | ||
691 | |||
692 | if (q_vectors >= (rxr_remaining + txr_remaining)) { | ||
693 | for (; rxr_remaining; v_idx++, q_vectors--) { | ||
694 | int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors); | ||
695 | err = ixgbe_alloc_q_vector(adapter, v_idx, | ||
696 | 0, 0, rqpv, rxr_idx); | ||
697 | |||
698 | if (err) | ||
699 | goto err_out; | ||
700 | |||
701 | /* update counts and index */ | ||
702 | rxr_remaining -= rqpv; | ||
703 | rxr_idx += rqpv; | ||
704 | } | ||
705 | } | ||
706 | |||
707 | for (; q_vectors; v_idx++, q_vectors--) { | ||
708 | int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors); | ||
709 | int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors); | ||
710 | err = ixgbe_alloc_q_vector(adapter, v_idx, | ||
711 | tqpv, txr_idx, | ||
712 | rqpv, rxr_idx); | ||
713 | |||
714 | if (err) | ||
715 | goto err_out; | ||
716 | |||
717 | /* update counts and index */ | ||
718 | rxr_remaining -= rqpv; | ||
719 | rxr_idx += rqpv; | ||
720 | txr_remaining -= tqpv; | ||
721 | txr_idx += tqpv; | ||
722 | } | ||
723 | |||
724 | return 0; | ||
725 | |||
726 | err_out: | ||
727 | while (v_idx) { | ||
728 | v_idx--; | ||
729 | ixgbe_free_q_vector(adapter, v_idx); | ||
730 | } | ||
731 | |||
732 | return -ENOMEM; | ||
733 | } | ||
734 | |||
735 | /** | ||
736 | * ixgbe_free_q_vectors - Free memory allocated for interrupt vectors | ||
737 | * @adapter: board private structure to initialize | ||
738 | * | ||
739 | * This function frees the memory allocated to the q_vectors. In addition if | ||
740 | * NAPI is enabled it will delete any references to the NAPI struct prior | ||
741 | * to freeing the q_vector. | ||
742 | **/ | ||
743 | static void ixgbe_free_q_vectors(struct ixgbe_adapter *adapter) | ||
744 | { | ||
745 | int v_idx, q_vectors; | ||
746 | |||
747 | if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) | ||
748 | q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; | ||
749 | else | ||
750 | q_vectors = 1; | ||
751 | |||
752 | for (v_idx = 0; v_idx < q_vectors; v_idx++) | ||
753 | ixgbe_free_q_vector(adapter, v_idx); | ||
754 | } | ||
755 | |||
756 | static void ixgbe_reset_interrupt_capability(struct ixgbe_adapter *adapter) | ||
757 | { | ||
758 | if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { | ||
759 | adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED; | ||
760 | pci_disable_msix(adapter->pdev); | ||
761 | kfree(adapter->msix_entries); | ||
762 | adapter->msix_entries = NULL; | ||
763 | } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) { | ||
764 | adapter->flags &= ~IXGBE_FLAG_MSI_ENABLED; | ||
765 | pci_disable_msi(adapter->pdev); | ||
766 | } | ||
767 | } | ||
768 | |||
769 | /** | ||
770 | * ixgbe_set_interrupt_capability - set MSI-X or MSI if supported | ||
771 | * @adapter: board private structure to initialize | ||
772 | * | ||
773 | * Attempt to configure the interrupts using the best available | ||
774 | * capabilities of the hardware and the kernel. | ||
775 | **/ | ||
776 | static int ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter) | ||
777 | { | ||
778 | struct ixgbe_hw *hw = &adapter->hw; | ||
779 | int err = 0; | ||
780 | int vector, v_budget; | ||
781 | |||
782 | /* | ||
783 | * It's easy to be greedy for MSI-X vectors, but it really | ||
784 | * doesn't do us much good if we have a lot more vectors | ||
785 | * than CPU's. So let's be conservative and only ask for | ||
786 | * (roughly) the same number of vectors as there are CPU's. | ||
787 | * The default is to use pairs of vectors. | ||
788 | */ | ||
789 | v_budget = max(adapter->num_rx_queues, adapter->num_tx_queues); | ||
790 | v_budget = min_t(int, v_budget, num_online_cpus()); | ||
791 | v_budget += NON_Q_VECTORS; | ||
792 | |||
793 | /* | ||
794 | * At the same time, hardware can only support a maximum of | ||
795 | * hw.mac->max_msix_vectors vectors. With features | ||
796 | * such as RSS and VMDq, we can easily surpass the number of Rx and Tx | ||
797 | * descriptor queues supported by our device. Thus, we cap it off in | ||
798 | * those rare cases where the cpu count also exceeds our vector limit. | ||
799 | */ | ||
800 | v_budget = min_t(int, v_budget, hw->mac.max_msix_vectors); | ||
801 | |||
802 | /* A failure in MSI-X entry allocation isn't fatal, but it does | ||
803 | * mean we disable MSI-X capabilities of the adapter. */ | ||
804 | adapter->msix_entries = kcalloc(v_budget, | ||
805 | sizeof(struct msix_entry), GFP_KERNEL); | ||
806 | if (adapter->msix_entries) { | ||
807 | for (vector = 0; vector < v_budget; vector++) | ||
808 | adapter->msix_entries[vector].entry = vector; | ||
809 | |||
810 | ixgbe_acquire_msix_vectors(adapter, v_budget); | ||
811 | |||
812 | if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) | ||
813 | goto out; | ||
814 | } | ||
815 | |||
816 | adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED; | ||
817 | adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED; | ||
818 | if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) { | ||
819 | e_err(probe, | ||
820 | "ATR is not supported while multiple " | ||
821 | "queues are disabled. Disabling Flow Director\n"); | ||
822 | } | ||
823 | adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; | ||
824 | adapter->atr_sample_rate = 0; | ||
825 | if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) | ||
826 | ixgbe_disable_sriov(adapter); | ||
827 | |||
828 | err = ixgbe_set_num_queues(adapter); | ||
829 | if (err) | ||
830 | return err; | ||
831 | |||
832 | err = pci_enable_msi(adapter->pdev); | ||
833 | if (!err) { | ||
834 | adapter->flags |= IXGBE_FLAG_MSI_ENABLED; | ||
835 | } else { | ||
836 | netif_printk(adapter, hw, KERN_DEBUG, adapter->netdev, | ||
837 | "Unable to allocate MSI interrupt, " | ||
838 | "falling back to legacy. Error: %d\n", err); | ||
839 | /* reset err */ | ||
840 | err = 0; | ||
841 | } | ||
842 | |||
843 | out: | ||
844 | return err; | ||
845 | } | ||
846 | |||
847 | /** | ||
848 | * ixgbe_init_interrupt_scheme - Determine proper interrupt scheme | ||
849 | * @adapter: board private structure to initialize | ||
850 | * | ||
851 | * We determine which interrupt scheme to use based on... | ||
852 | * - Kernel support (MSI, MSI-X) | ||
853 | * - which can be user-defined (via MODULE_PARAM) | ||
854 | * - Hardware queue count (num_*_queues) | ||
855 | * - defined by miscellaneous hardware support/features (RSS, etc.) | ||
856 | **/ | ||
857 | int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter) | ||
858 | { | ||
859 | int err; | ||
860 | |||
861 | /* Number of supported queues */ | ||
862 | err = ixgbe_set_num_queues(adapter); | ||
863 | if (err) | ||
864 | return err; | ||
865 | |||
866 | err = ixgbe_set_interrupt_capability(adapter); | ||
867 | if (err) { | ||
868 | e_dev_err("Unable to setup interrupt capabilities\n"); | ||
869 | goto err_set_interrupt; | ||
870 | } | ||
871 | |||
872 | err = ixgbe_alloc_q_vectors(adapter); | ||
873 | if (err) { | ||
874 | e_dev_err("Unable to allocate memory for queue vectors\n"); | ||
875 | goto err_alloc_q_vectors; | ||
876 | } | ||
877 | |||
878 | ixgbe_cache_ring_register(adapter); | ||
879 | |||
880 | e_dev_info("Multiqueue %s: Rx Queue count = %u, Tx Queue count = %u\n", | ||
881 | (adapter->num_rx_queues > 1) ? "Enabled" : "Disabled", | ||
882 | adapter->num_rx_queues, adapter->num_tx_queues); | ||
883 | |||
884 | set_bit(__IXGBE_DOWN, &adapter->state); | ||
885 | |||
886 | return 0; | ||
887 | |||
888 | err_alloc_q_vectors: | ||
889 | ixgbe_reset_interrupt_capability(adapter); | ||
890 | err_set_interrupt: | ||
891 | return err; | ||
892 | } | ||
893 | |||
894 | /** | ||
895 | * ixgbe_clear_interrupt_scheme - Clear the current interrupt scheme settings | ||
896 | * @adapter: board private structure to clear interrupt scheme on | ||
897 | * | ||
898 | * We go through and clear interrupt specific resources and reset the structure | ||
899 | * to pre-load conditions | ||
900 | **/ | ||
901 | void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter) | ||
902 | { | ||
903 | adapter->num_tx_queues = 0; | ||
904 | adapter->num_rx_queues = 0; | ||
905 | |||
906 | ixgbe_free_q_vectors(adapter); | ||
907 | ixgbe_reset_interrupt_capability(adapter); | ||
908 | } | ||
909 | |||
910 | void ixgbe_tx_ctxtdesc(struct ixgbe_ring *tx_ring, u32 vlan_macip_lens, | ||
911 | u32 fcoe_sof_eof, u32 type_tucmd, u32 mss_l4len_idx) | ||
912 | { | ||
913 | struct ixgbe_adv_tx_context_desc *context_desc; | ||
914 | u16 i = tx_ring->next_to_use; | ||
915 | |||
916 | context_desc = IXGBE_TX_CTXTDESC(tx_ring, i); | ||
917 | |||
918 | i++; | ||
919 | tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; | ||
920 | |||
921 | /* set bits to identify this as an advanced context descriptor */ | ||
922 | type_tucmd |= IXGBE_TXD_CMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT; | ||
923 | |||
924 | context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens); | ||
925 | context_desc->seqnum_seed = cpu_to_le32(fcoe_sof_eof); | ||
926 | context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd); | ||
927 | context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx); | ||
928 | } | ||
929 | |||
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 1d8f9f83f8ed..398fc223cab9 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | |||
@@ -55,8 +55,13 @@ | |||
55 | char ixgbe_driver_name[] = "ixgbe"; | 55 | char ixgbe_driver_name[] = "ixgbe"; |
56 | static const char ixgbe_driver_string[] = | 56 | static const char ixgbe_driver_string[] = |
57 | "Intel(R) 10 Gigabit PCI Express Network Driver"; | 57 | "Intel(R) 10 Gigabit PCI Express Network Driver"; |
58 | #ifdef IXGBE_FCOE | ||
58 | char ixgbe_default_device_descr[] = | 59 | char ixgbe_default_device_descr[] = |
59 | "Intel(R) 10 Gigabit Network Connection"; | 60 | "Intel(R) 10 Gigabit Network Connection"; |
61 | #else | ||
62 | static char ixgbe_default_device_descr[] = | ||
63 | "Intel(R) 10 Gigabit Network Connection"; | ||
64 | #endif | ||
60 | #define MAJ 3 | 65 | #define MAJ 3 |
61 | #define MIN 6 | 66 | #define MIN 6 |
62 | #define BUILD 7 | 67 | #define BUILD 7 |
@@ -2308,6 +2313,55 @@ static irqreturn_t ixgbe_msix_clean_rings(int irq, void *data) | |||
2308 | } | 2313 | } |
2309 | 2314 | ||
2310 | /** | 2315 | /** |
2316 | * ixgbe_poll - NAPI Rx polling callback | ||
2317 | * @napi: structure for representing this polling device | ||
2318 | * @budget: how many packets driver is allowed to clean | ||
2319 | * | ||
2320 | * This function is used for legacy and MSI, NAPI mode | ||
2321 | **/ | ||
2322 | int ixgbe_poll(struct napi_struct *napi, int budget) | ||
2323 | { | ||
2324 | struct ixgbe_q_vector *q_vector = | ||
2325 | container_of(napi, struct ixgbe_q_vector, napi); | ||
2326 | struct ixgbe_adapter *adapter = q_vector->adapter; | ||
2327 | struct ixgbe_ring *ring; | ||
2328 | int per_ring_budget; | ||
2329 | bool clean_complete = true; | ||
2330 | |||
2331 | #ifdef CONFIG_IXGBE_DCA | ||
2332 | if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) | ||
2333 | ixgbe_update_dca(q_vector); | ||
2334 | #endif | ||
2335 | |||
2336 | ixgbe_for_each_ring(ring, q_vector->tx) | ||
2337 | clean_complete &= !!ixgbe_clean_tx_irq(q_vector, ring); | ||
2338 | |||
2339 | /* attempt to distribute budget to each queue fairly, but don't allow | ||
2340 | * the budget to go below 1 because we'll exit polling */ | ||
2341 | if (q_vector->rx.count > 1) | ||
2342 | per_ring_budget = max(budget/q_vector->rx.count, 1); | ||
2343 | else | ||
2344 | per_ring_budget = budget; | ||
2345 | |||
2346 | ixgbe_for_each_ring(ring, q_vector->rx) | ||
2347 | clean_complete &= ixgbe_clean_rx_irq(q_vector, ring, | ||
2348 | per_ring_budget); | ||
2349 | |||
2350 | /* If all work not completed, return budget and keep polling */ | ||
2351 | if (!clean_complete) | ||
2352 | return budget; | ||
2353 | |||
2354 | /* all work done, exit the polling mode */ | ||
2355 | napi_complete(napi); | ||
2356 | if (adapter->rx_itr_setting & 1) | ||
2357 | ixgbe_set_itr(q_vector); | ||
2358 | if (!test_bit(__IXGBE_DOWN, &adapter->state)) | ||
2359 | ixgbe_irq_enable_queues(adapter, ((u64)1 << q_vector->v_idx)); | ||
2360 | |||
2361 | return 0; | ||
2362 | } | ||
2363 | |||
2364 | /** | ||
2311 | * ixgbe_request_msix_irqs - Initialize MSI-X interrupts | 2365 | * ixgbe_request_msix_irqs - Initialize MSI-X interrupts |
2312 | * @adapter: board private structure | 2366 | * @adapter: board private structure |
2313 | * | 2367 | * |
@@ -2807,6 +2861,11 @@ static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter) | |||
2807 | | IXGBE_MRQC_RSS_FIELD_IPV6 | 2861 | | IXGBE_MRQC_RSS_FIELD_IPV6 |
2808 | | IXGBE_MRQC_RSS_FIELD_IPV6_TCP; | 2862 | | IXGBE_MRQC_RSS_FIELD_IPV6_TCP; |
2809 | 2863 | ||
2864 | if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV4_UDP) | ||
2865 | mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP; | ||
2866 | if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV6_UDP) | ||
2867 | mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP; | ||
2868 | |||
2810 | IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); | 2869 | IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); |
2811 | } | 2870 | } |
2812 | 2871 | ||
@@ -4254,55 +4313,6 @@ void ixgbe_down(struct ixgbe_adapter *adapter) | |||
4254 | } | 4313 | } |
4255 | 4314 | ||
4256 | /** | 4315 | /** |
4257 | * ixgbe_poll - NAPI Rx polling callback | ||
4258 | * @napi: structure for representing this polling device | ||
4259 | * @budget: how many packets driver is allowed to clean | ||
4260 | * | ||
4261 | * This function is used for legacy and MSI, NAPI mode | ||
4262 | **/ | ||
4263 | static int ixgbe_poll(struct napi_struct *napi, int budget) | ||
4264 | { | ||
4265 | struct ixgbe_q_vector *q_vector = | ||
4266 | container_of(napi, struct ixgbe_q_vector, napi); | ||
4267 | struct ixgbe_adapter *adapter = q_vector->adapter; | ||
4268 | struct ixgbe_ring *ring; | ||
4269 | int per_ring_budget; | ||
4270 | bool clean_complete = true; | ||
4271 | |||
4272 | #ifdef CONFIG_IXGBE_DCA | ||
4273 | if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) | ||
4274 | ixgbe_update_dca(q_vector); | ||
4275 | #endif | ||
4276 | |||
4277 | ixgbe_for_each_ring(ring, q_vector->tx) | ||
4278 | clean_complete &= !!ixgbe_clean_tx_irq(q_vector, ring); | ||
4279 | |||
4280 | /* attempt to distribute budget to each queue fairly, but don't allow | ||
4281 | * the budget to go below 1 because we'll exit polling */ | ||
4282 | if (q_vector->rx.count > 1) | ||
4283 | per_ring_budget = max(budget/q_vector->rx.count, 1); | ||
4284 | else | ||
4285 | per_ring_budget = budget; | ||
4286 | |||
4287 | ixgbe_for_each_ring(ring, q_vector->rx) | ||
4288 | clean_complete &= ixgbe_clean_rx_irq(q_vector, ring, | ||
4289 | per_ring_budget); | ||
4290 | |||
4291 | /* If all work not completed, return budget and keep polling */ | ||
4292 | if (!clean_complete) | ||
4293 | return budget; | ||
4294 | |||
4295 | /* all work done, exit the polling mode */ | ||
4296 | napi_complete(napi); | ||
4297 | if (adapter->rx_itr_setting & 1) | ||
4298 | ixgbe_set_itr(q_vector); | ||
4299 | if (!test_bit(__IXGBE_DOWN, &adapter->state)) | ||
4300 | ixgbe_irq_enable_queues(adapter, ((u64)1 << q_vector->v_idx)); | ||
4301 | |||
4302 | return 0; | ||
4303 | } | ||
4304 | |||
4305 | /** | ||
4306 | * ixgbe_tx_timeout - Respond to a Tx Hang | 4316 | * ixgbe_tx_timeout - Respond to a Tx Hang |
4307 | * @netdev: network interface device structure | 4317 | * @netdev: network interface device structure |
4308 | **/ | 4318 | **/ |
@@ -4315,888 +4325,6 @@ static void ixgbe_tx_timeout(struct net_device *netdev) | |||
4315 | } | 4325 | } |
4316 | 4326 | ||
4317 | /** | 4327 | /** |
4318 | * ixgbe_set_rss_queues: Allocate queues for RSS | ||
4319 | * @adapter: board private structure to initialize | ||
4320 | * | ||
4321 | * This is our "base" multiqueue mode. RSS (Receive Side Scaling) will try | ||
4322 | * to allocate one Rx queue per CPU, and if available, one Tx queue per CPU. | ||
4323 | * | ||
4324 | **/ | ||
4325 | static inline bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter) | ||
4326 | { | ||
4327 | bool ret = false; | ||
4328 | struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_RSS]; | ||
4329 | |||
4330 | if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) { | ||
4331 | f->mask = 0xF; | ||
4332 | adapter->num_rx_queues = f->indices; | ||
4333 | adapter->num_tx_queues = f->indices; | ||
4334 | ret = true; | ||
4335 | } else { | ||
4336 | ret = false; | ||
4337 | } | ||
4338 | |||
4339 | return ret; | ||
4340 | } | ||
4341 | |||
4342 | /** | ||
4343 | * ixgbe_set_fdir_queues: Allocate queues for Flow Director | ||
4344 | * @adapter: board private structure to initialize | ||
4345 | * | ||
4346 | * Flow Director is an advanced Rx filter, attempting to get Rx flows back | ||
4347 | * to the original CPU that initiated the Tx session. This runs in addition | ||
4348 | * to RSS, so if a packet doesn't match an FDIR filter, we can still spread the | ||
4349 | * Rx load across CPUs using RSS. | ||
4350 | * | ||
4351 | **/ | ||
4352 | static inline bool ixgbe_set_fdir_queues(struct ixgbe_adapter *adapter) | ||
4353 | { | ||
4354 | bool ret = false; | ||
4355 | struct ixgbe_ring_feature *f_fdir = &adapter->ring_feature[RING_F_FDIR]; | ||
4356 | |||
4357 | f_fdir->indices = min((int)num_online_cpus(), f_fdir->indices); | ||
4358 | f_fdir->mask = 0; | ||
4359 | |||
4360 | /* | ||
4361 | * Use RSS in addition to Flow Director to ensure the best | ||
4362 | * distribution of flows across cores, even when an FDIR flow | ||
4363 | * isn't matched. | ||
4364 | */ | ||
4365 | if ((adapter->flags & IXGBE_FLAG_RSS_ENABLED) && | ||
4366 | (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE)) { | ||
4367 | adapter->num_tx_queues = f_fdir->indices; | ||
4368 | adapter->num_rx_queues = f_fdir->indices; | ||
4369 | ret = true; | ||
4370 | } else { | ||
4371 | adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; | ||
4372 | } | ||
4373 | return ret; | ||
4374 | } | ||
4375 | |||
4376 | #ifdef IXGBE_FCOE | ||
4377 | /** | ||
4378 | * ixgbe_set_fcoe_queues: Allocate queues for Fiber Channel over Ethernet (FCoE) | ||
4379 | * @adapter: board private structure to initialize | ||
4380 | * | ||
4381 | * FCoE RX FCRETA can use up to 8 rx queues for up to 8 different exchanges. | ||
4382 | * The ring feature mask is not used as a mask for FCoE, as it can take any 8 | ||
4383 | * rx queues out of the max number of rx queues, instead, it is used as the | ||
4384 | * index of the first rx queue used by FCoE. | ||
4385 | * | ||
4386 | **/ | ||
4387 | static inline bool ixgbe_set_fcoe_queues(struct ixgbe_adapter *adapter) | ||
4388 | { | ||
4389 | struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE]; | ||
4390 | |||
4391 | if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) | ||
4392 | return false; | ||
4393 | |||
4394 | f->indices = min_t(int, num_online_cpus(), f->indices); | ||
4395 | |||
4396 | adapter->num_rx_queues = 1; | ||
4397 | adapter->num_tx_queues = 1; | ||
4398 | |||
4399 | if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) { | ||
4400 | e_info(probe, "FCoE enabled with RSS\n"); | ||
4401 | if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) | ||
4402 | ixgbe_set_fdir_queues(adapter); | ||
4403 | else | ||
4404 | ixgbe_set_rss_queues(adapter); | ||
4405 | } | ||
4406 | |||
4407 | /* adding FCoE rx rings to the end */ | ||
4408 | f->mask = adapter->num_rx_queues; | ||
4409 | adapter->num_rx_queues += f->indices; | ||
4410 | adapter->num_tx_queues += f->indices; | ||
4411 | |||
4412 | return true; | ||
4413 | } | ||
4414 | #endif /* IXGBE_FCOE */ | ||
4415 | |||
4416 | /* Artificial max queue cap per traffic class in DCB mode */ | ||
4417 | #define DCB_QUEUE_CAP 8 | ||
4418 | |||
4419 | #ifdef CONFIG_IXGBE_DCB | ||
4420 | static inline bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter) | ||
4421 | { | ||
4422 | int per_tc_q, q, i, offset = 0; | ||
4423 | struct net_device *dev = adapter->netdev; | ||
4424 | int tcs = netdev_get_num_tc(dev); | ||
4425 | |||
4426 | if (!tcs) | ||
4427 | return false; | ||
4428 | |||
4429 | /* Map queue offset and counts onto allocated tx queues */ | ||
4430 | per_tc_q = min_t(unsigned int, dev->num_tx_queues / tcs, DCB_QUEUE_CAP); | ||
4431 | q = min_t(int, num_online_cpus(), per_tc_q); | ||
4432 | |||
4433 | for (i = 0; i < tcs; i++) { | ||
4434 | netdev_set_tc_queue(dev, i, q, offset); | ||
4435 | offset += q; | ||
4436 | } | ||
4437 | |||
4438 | adapter->num_tx_queues = q * tcs; | ||
4439 | adapter->num_rx_queues = q * tcs; | ||
4440 | |||
4441 | #ifdef IXGBE_FCOE | ||
4442 | /* FCoE enabled queues require special configuration indexed | ||
4443 | * by feature specific indices and mask. Here we map FCoE | ||
4444 | * indices onto the DCB queue pairs allowing FCoE to own | ||
4445 | * configuration later. | ||
4446 | */ | ||
4447 | if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) { | ||
4448 | u8 prio_tc[MAX_USER_PRIORITY] = {0}; | ||
4449 | int tc; | ||
4450 | struct ixgbe_ring_feature *f = | ||
4451 | &adapter->ring_feature[RING_F_FCOE]; | ||
4452 | |||
4453 | ixgbe_dcb_unpack_map(&adapter->dcb_cfg, DCB_TX_CONFIG, prio_tc); | ||
4454 | tc = prio_tc[adapter->fcoe.up]; | ||
4455 | f->indices = dev->tc_to_txq[tc].count; | ||
4456 | f->mask = dev->tc_to_txq[tc].offset; | ||
4457 | } | ||
4458 | #endif | ||
4459 | |||
4460 | return true; | ||
4461 | } | ||
4462 | #endif | ||
4463 | |||
4464 | /** | ||
4465 | * ixgbe_set_sriov_queues: Allocate queues for IOV use | ||
4466 | * @adapter: board private structure to initialize | ||
4467 | * | ||
4468 | * IOV doesn't actually use anything, so just NAK the | ||
4469 | * request for now and let the other queue routines | ||
4470 | * figure out what to do. | ||
4471 | */ | ||
4472 | static inline bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter) | ||
4473 | { | ||
4474 | return false; | ||
4475 | } | ||
4476 | |||
4477 | /* | ||
4478 | * ixgbe_set_num_queues: Allocate queues for device, feature dependent | ||
4479 | * @adapter: board private structure to initialize | ||
4480 | * | ||
4481 | * This is the top level queue allocation routine. The order here is very | ||
4482 | * important, starting with the "most" number of features turned on at once, | ||
4483 | * and ending with the smallest set of features. This way large combinations | ||
4484 | * can be allocated if they're turned on, and smaller combinations are the | ||
4485 | * fallthrough conditions. | ||
4486 | * | ||
4487 | **/ | ||
4488 | static int ixgbe_set_num_queues(struct ixgbe_adapter *adapter) | ||
4489 | { | ||
4490 | /* Start with base case */ | ||
4491 | adapter->num_rx_queues = 1; | ||
4492 | adapter->num_tx_queues = 1; | ||
4493 | adapter->num_rx_pools = adapter->num_rx_queues; | ||
4494 | adapter->num_rx_queues_per_pool = 1; | ||
4495 | |||
4496 | if (ixgbe_set_sriov_queues(adapter)) | ||
4497 | goto done; | ||
4498 | |||
4499 | #ifdef CONFIG_IXGBE_DCB | ||
4500 | if (ixgbe_set_dcb_queues(adapter)) | ||
4501 | goto done; | ||
4502 | |||
4503 | #endif | ||
4504 | #ifdef IXGBE_FCOE | ||
4505 | if (ixgbe_set_fcoe_queues(adapter)) | ||
4506 | goto done; | ||
4507 | |||
4508 | #endif /* IXGBE_FCOE */ | ||
4509 | if (ixgbe_set_fdir_queues(adapter)) | ||
4510 | goto done; | ||
4511 | |||
4512 | if (ixgbe_set_rss_queues(adapter)) | ||
4513 | goto done; | ||
4514 | |||
4515 | /* fallback to base case */ | ||
4516 | adapter->num_rx_queues = 1; | ||
4517 | adapter->num_tx_queues = 1; | ||
4518 | |||
4519 | done: | ||
4520 | if ((adapter->netdev->reg_state == NETREG_UNREGISTERED) || | ||
4521 | (adapter->netdev->reg_state == NETREG_UNREGISTERING)) | ||
4522 | return 0; | ||
4523 | |||
4524 | /* Notify the stack of the (possibly) reduced queue counts. */ | ||
4525 | netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues); | ||
4526 | return netif_set_real_num_rx_queues(adapter->netdev, | ||
4527 | adapter->num_rx_queues); | ||
4528 | } | ||
4529 | |||
4530 | static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter, | ||
4531 | int vectors) | ||
4532 | { | ||
4533 | int err, vector_threshold; | ||
4534 | |||
4535 | /* We'll want at least 2 (vector_threshold): | ||
4536 | * 1) TxQ[0] + RxQ[0] handler | ||
4537 | * 2) Other (Link Status Change, etc.) | ||
4538 | */ | ||
4539 | vector_threshold = MIN_MSIX_COUNT; | ||
4540 | |||
4541 | /* | ||
4542 | * The more we get, the more we will assign to Tx/Rx Cleanup | ||
4543 | * for the separate queues...where Rx Cleanup >= Tx Cleanup. | ||
4544 | * Right now, we simply care about how many we'll get; we'll | ||
4545 | * set them up later while requesting irq's. | ||
4546 | */ | ||
4547 | while (vectors >= vector_threshold) { | ||
4548 | err = pci_enable_msix(adapter->pdev, adapter->msix_entries, | ||
4549 | vectors); | ||
4550 | if (!err) /* Success in acquiring all requested vectors. */ | ||
4551 | break; | ||
4552 | else if (err < 0) | ||
4553 | vectors = 0; /* Nasty failure, quit now */ | ||
4554 | else /* err == number of vectors we should try again with */ | ||
4555 | vectors = err; | ||
4556 | } | ||
4557 | |||
4558 | if (vectors < vector_threshold) { | ||
4559 | /* Can't allocate enough MSI-X interrupts? Oh well. | ||
4560 | * This just means we'll go with either a single MSI | ||
4561 | * vector or fall back to legacy interrupts. | ||
4562 | */ | ||
4563 | netif_printk(adapter, hw, KERN_DEBUG, adapter->netdev, | ||
4564 | "Unable to allocate MSI-X interrupts\n"); | ||
4565 | adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED; | ||
4566 | kfree(adapter->msix_entries); | ||
4567 | adapter->msix_entries = NULL; | ||
4568 | } else { | ||
4569 | adapter->flags |= IXGBE_FLAG_MSIX_ENABLED; /* Woot! */ | ||
4570 | /* | ||
4571 | * Adjust for only the vectors we'll use, which is minimum | ||
4572 | * of max_msix_q_vectors + NON_Q_VECTORS, or the number of | ||
4573 | * vectors we were allocated. | ||
4574 | */ | ||
4575 | adapter->num_msix_vectors = min(vectors, | ||
4576 | adapter->max_msix_q_vectors + NON_Q_VECTORS); | ||
4577 | } | ||
4578 | } | ||
4579 | |||
4580 | /** | ||
4581 | * ixgbe_cache_ring_rss - Descriptor ring to register mapping for RSS | ||
4582 | * @adapter: board private structure to initialize | ||
4583 | * | ||
4584 | * Cache the descriptor ring offsets for RSS to the assigned rings. | ||
4585 | * | ||
4586 | **/ | ||
4587 | static inline bool ixgbe_cache_ring_rss(struct ixgbe_adapter *adapter) | ||
4588 | { | ||
4589 | int i; | ||
4590 | |||
4591 | if (!(adapter->flags & IXGBE_FLAG_RSS_ENABLED)) | ||
4592 | return false; | ||
4593 | |||
4594 | for (i = 0; i < adapter->num_rx_queues; i++) | ||
4595 | adapter->rx_ring[i]->reg_idx = i; | ||
4596 | for (i = 0; i < adapter->num_tx_queues; i++) | ||
4597 | adapter->tx_ring[i]->reg_idx = i; | ||
4598 | |||
4599 | return true; | ||
4600 | } | ||
4601 | |||
4602 | #ifdef CONFIG_IXGBE_DCB | ||
4603 | |||
4604 | /* ixgbe_get_first_reg_idx - Return first register index associated with ring */ | ||
4605 | static void ixgbe_get_first_reg_idx(struct ixgbe_adapter *adapter, u8 tc, | ||
4606 | unsigned int *tx, unsigned int *rx) | ||
4607 | { | ||
4608 | struct net_device *dev = adapter->netdev; | ||
4609 | struct ixgbe_hw *hw = &adapter->hw; | ||
4610 | u8 num_tcs = netdev_get_num_tc(dev); | ||
4611 | |||
4612 | *tx = 0; | ||
4613 | *rx = 0; | ||
4614 | |||
4615 | switch (hw->mac.type) { | ||
4616 | case ixgbe_mac_82598EB: | ||
4617 | *tx = tc << 2; | ||
4618 | *rx = tc << 3; | ||
4619 | break; | ||
4620 | case ixgbe_mac_82599EB: | ||
4621 | case ixgbe_mac_X540: | ||
4622 | if (num_tcs > 4) { | ||
4623 | if (tc < 3) { | ||
4624 | *tx = tc << 5; | ||
4625 | *rx = tc << 4; | ||
4626 | } else if (tc < 5) { | ||
4627 | *tx = ((tc + 2) << 4); | ||
4628 | *rx = tc << 4; | ||
4629 | } else if (tc < num_tcs) { | ||
4630 | *tx = ((tc + 8) << 3); | ||
4631 | *rx = tc << 4; | ||
4632 | } | ||
4633 | } else { | ||
4634 | *rx = tc << 5; | ||
4635 | switch (tc) { | ||
4636 | case 0: | ||
4637 | *tx = 0; | ||
4638 | break; | ||
4639 | case 1: | ||
4640 | *tx = 64; | ||
4641 | break; | ||
4642 | case 2: | ||
4643 | *tx = 96; | ||
4644 | break; | ||
4645 | case 3: | ||
4646 | *tx = 112; | ||
4647 | break; | ||
4648 | default: | ||
4649 | break; | ||
4650 | } | ||
4651 | } | ||
4652 | break; | ||
4653 | default: | ||
4654 | break; | ||
4655 | } | ||
4656 | } | ||
4657 | |||
4658 | /** | ||
4659 | * ixgbe_cache_ring_dcb - Descriptor ring to register mapping for DCB | ||
4660 | * @adapter: board private structure to initialize | ||
4661 | * | ||
4662 | * Cache the descriptor ring offsets for DCB to the assigned rings. | ||
4663 | * | ||
4664 | **/ | ||
4665 | static inline bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter) | ||
4666 | { | ||
4667 | struct net_device *dev = adapter->netdev; | ||
4668 | int i, j, k; | ||
4669 | u8 num_tcs = netdev_get_num_tc(dev); | ||
4670 | |||
4671 | if (!num_tcs) | ||
4672 | return false; | ||
4673 | |||
4674 | for (i = 0, k = 0; i < num_tcs; i++) { | ||
4675 | unsigned int tx_s, rx_s; | ||
4676 | u16 count = dev->tc_to_txq[i].count; | ||
4677 | |||
4678 | ixgbe_get_first_reg_idx(adapter, i, &tx_s, &rx_s); | ||
4679 | for (j = 0; j < count; j++, k++) { | ||
4680 | adapter->tx_ring[k]->reg_idx = tx_s + j; | ||
4681 | adapter->rx_ring[k]->reg_idx = rx_s + j; | ||
4682 | adapter->tx_ring[k]->dcb_tc = i; | ||
4683 | adapter->rx_ring[k]->dcb_tc = i; | ||
4684 | } | ||
4685 | } | ||
4686 | |||
4687 | return true; | ||
4688 | } | ||
4689 | #endif | ||
4690 | |||
4691 | /** | ||
4692 | * ixgbe_cache_ring_fdir - Descriptor ring to register mapping for Flow Director | ||
4693 | * @adapter: board private structure to initialize | ||
4694 | * | ||
4695 | * Cache the descriptor ring offsets for Flow Director to the assigned rings. | ||
4696 | * | ||
4697 | **/ | ||
4698 | static inline bool ixgbe_cache_ring_fdir(struct ixgbe_adapter *adapter) | ||
4699 | { | ||
4700 | int i; | ||
4701 | bool ret = false; | ||
4702 | |||
4703 | if ((adapter->flags & IXGBE_FLAG_RSS_ENABLED) && | ||
4704 | (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE)) { | ||
4705 | for (i = 0; i < adapter->num_rx_queues; i++) | ||
4706 | adapter->rx_ring[i]->reg_idx = i; | ||
4707 | for (i = 0; i < adapter->num_tx_queues; i++) | ||
4708 | adapter->tx_ring[i]->reg_idx = i; | ||
4709 | ret = true; | ||
4710 | } | ||
4711 | |||
4712 | return ret; | ||
4713 | } | ||
4714 | |||
4715 | #ifdef IXGBE_FCOE | ||
4716 | /** | ||
4717 | * ixgbe_cache_ring_fcoe - Descriptor ring to register mapping for the FCoE | ||
4718 | * @adapter: board private structure to initialize | ||
4719 | * | ||
4720 | * Cache the descriptor ring offsets for FCoE mode to the assigned rings. | ||
4721 | * | ||
4722 | */ | ||
4723 | static inline bool ixgbe_cache_ring_fcoe(struct ixgbe_adapter *adapter) | ||
4724 | { | ||
4725 | struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE]; | ||
4726 | int i; | ||
4727 | u8 fcoe_rx_i = 0, fcoe_tx_i = 0; | ||
4728 | |||
4729 | if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) | ||
4730 | return false; | ||
4731 | |||
4732 | if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) { | ||
4733 | if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) | ||
4734 | ixgbe_cache_ring_fdir(adapter); | ||
4735 | else | ||
4736 | ixgbe_cache_ring_rss(adapter); | ||
4737 | |||
4738 | fcoe_rx_i = f->mask; | ||
4739 | fcoe_tx_i = f->mask; | ||
4740 | } | ||
4741 | for (i = 0; i < f->indices; i++, fcoe_rx_i++, fcoe_tx_i++) { | ||
4742 | adapter->rx_ring[f->mask + i]->reg_idx = fcoe_rx_i; | ||
4743 | adapter->tx_ring[f->mask + i]->reg_idx = fcoe_tx_i; | ||
4744 | } | ||
4745 | return true; | ||
4746 | } | ||
4747 | |||
4748 | #endif /* IXGBE_FCOE */ | ||
4749 | /** | ||
4750 | * ixgbe_cache_ring_sriov - Descriptor ring to register mapping for sriov | ||
4751 | * @adapter: board private structure to initialize | ||
4752 | * | ||
4753 | * SR-IOV doesn't use any descriptor rings but changes the default if | ||
4754 | * no other mapping is used. | ||
4755 | * | ||
4756 | */ | ||
4757 | static inline bool ixgbe_cache_ring_sriov(struct ixgbe_adapter *adapter) | ||
4758 | { | ||
4759 | adapter->rx_ring[0]->reg_idx = adapter->num_vfs * 2; | ||
4760 | adapter->tx_ring[0]->reg_idx = adapter->num_vfs * 2; | ||
4761 | if (adapter->num_vfs) | ||
4762 | return true; | ||
4763 | else | ||
4764 | return false; | ||
4765 | } | ||
4766 | |||
4767 | /** | ||
4768 | * ixgbe_cache_ring_register - Descriptor ring to register mapping | ||
4769 | * @adapter: board private structure to initialize | ||
4770 | * | ||
4771 | * Once we know the feature-set enabled for the device, we'll cache | ||
4772 | * the register offset the descriptor ring is assigned to. | ||
4773 | * | ||
4774 | * Note, the order the various feature calls is important. It must start with | ||
4775 | * the "most" features enabled at the same time, then trickle down to the | ||
4776 | * least amount of features turned on at once. | ||
4777 | **/ | ||
4778 | static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter) | ||
4779 | { | ||
4780 | /* start with default case */ | ||
4781 | adapter->rx_ring[0]->reg_idx = 0; | ||
4782 | adapter->tx_ring[0]->reg_idx = 0; | ||
4783 | |||
4784 | if (ixgbe_cache_ring_sriov(adapter)) | ||
4785 | return; | ||
4786 | |||
4787 | #ifdef CONFIG_IXGBE_DCB | ||
4788 | if (ixgbe_cache_ring_dcb(adapter)) | ||
4789 | return; | ||
4790 | #endif | ||
4791 | |||
4792 | #ifdef IXGBE_FCOE | ||
4793 | if (ixgbe_cache_ring_fcoe(adapter)) | ||
4794 | return; | ||
4795 | #endif /* IXGBE_FCOE */ | ||
4796 | |||
4797 | if (ixgbe_cache_ring_fdir(adapter)) | ||
4798 | return; | ||
4799 | |||
4800 | if (ixgbe_cache_ring_rss(adapter)) | ||
4801 | return; | ||
4802 | } | ||
4803 | |||
4804 | /** | ||
4805 | * ixgbe_set_interrupt_capability - set MSI-X or MSI if supported | ||
4806 | * @adapter: board private structure to initialize | ||
4807 | * | ||
4808 | * Attempt to configure the interrupts using the best available | ||
4809 | * capabilities of the hardware and the kernel. | ||
4810 | **/ | ||
4811 | static int ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter) | ||
4812 | { | ||
4813 | struct ixgbe_hw *hw = &adapter->hw; | ||
4814 | int err = 0; | ||
4815 | int vector, v_budget; | ||
4816 | |||
4817 | /* | ||
4818 | * It's easy to be greedy for MSI-X vectors, but it really | ||
4819 | * doesn't do us much good if we have a lot more vectors | ||
4820 | * than CPU's. So let's be conservative and only ask for | ||
4821 | * (roughly) the same number of vectors as there are CPU's. | ||
4822 | * The default is to use pairs of vectors. | ||
4823 | */ | ||
4824 | v_budget = max(adapter->num_rx_queues, adapter->num_tx_queues); | ||
4825 | v_budget = min_t(int, v_budget, num_online_cpus()); | ||
4826 | v_budget += NON_Q_VECTORS; | ||
4827 | |||
4828 | /* | ||
4829 | * At the same time, hardware can only support a maximum of | ||
4830 | * hw.mac->max_msix_vectors vectors. With features | ||
4831 | * such as RSS and VMDq, we can easily surpass the number of Rx and Tx | ||
4832 | * descriptor queues supported by our device. Thus, we cap it off in | ||
4833 | * those rare cases where the cpu count also exceeds our vector limit. | ||
4834 | */ | ||
4835 | v_budget = min_t(int, v_budget, hw->mac.max_msix_vectors); | ||
4836 | |||
4837 | /* A failure in MSI-X entry allocation isn't fatal, but it does | ||
4838 | * mean we disable MSI-X capabilities of the adapter. */ | ||
4839 | adapter->msix_entries = kcalloc(v_budget, | ||
4840 | sizeof(struct msix_entry), GFP_KERNEL); | ||
4841 | if (adapter->msix_entries) { | ||
4842 | for (vector = 0; vector < v_budget; vector++) | ||
4843 | adapter->msix_entries[vector].entry = vector; | ||
4844 | |||
4845 | ixgbe_acquire_msix_vectors(adapter, v_budget); | ||
4846 | |||
4847 | if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) | ||
4848 | goto out; | ||
4849 | } | ||
4850 | |||
4851 | adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED; | ||
4852 | adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED; | ||
4853 | if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) { | ||
4854 | e_err(probe, | ||
4855 | "ATR is not supported while multiple " | ||
4856 | "queues are disabled. Disabling Flow Director\n"); | ||
4857 | } | ||
4858 | adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; | ||
4859 | adapter->atr_sample_rate = 0; | ||
4860 | if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) | ||
4861 | ixgbe_disable_sriov(adapter); | ||
4862 | |||
4863 | err = ixgbe_set_num_queues(adapter); | ||
4864 | if (err) | ||
4865 | return err; | ||
4866 | |||
4867 | err = pci_enable_msi(adapter->pdev); | ||
4868 | if (!err) { | ||
4869 | adapter->flags |= IXGBE_FLAG_MSI_ENABLED; | ||
4870 | } else { | ||
4871 | netif_printk(adapter, hw, KERN_DEBUG, adapter->netdev, | ||
4872 | "Unable to allocate MSI interrupt, " | ||
4873 | "falling back to legacy. Error: %d\n", err); | ||
4874 | /* reset err */ | ||
4875 | err = 0; | ||
4876 | } | ||
4877 | |||
4878 | out: | ||
4879 | return err; | ||
4880 | } | ||
4881 | |||
4882 | static void ixgbe_add_ring(struct ixgbe_ring *ring, | ||
4883 | struct ixgbe_ring_container *head) | ||
4884 | { | ||
4885 | ring->next = head->ring; | ||
4886 | head->ring = ring; | ||
4887 | head->count++; | ||
4888 | } | ||
4889 | |||
4890 | /** | ||
4891 | * ixgbe_alloc_q_vector - Allocate memory for a single interrupt vector | ||
4892 | * @adapter: board private structure to initialize | ||
4893 | * @v_idx: index of vector in adapter struct | ||
4894 | * | ||
4895 | * We allocate one q_vector. If allocation fails we return -ENOMEM. | ||
4896 | **/ | ||
4897 | static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter, int v_idx, | ||
4898 | int txr_count, int txr_idx, | ||
4899 | int rxr_count, int rxr_idx) | ||
4900 | { | ||
4901 | struct ixgbe_q_vector *q_vector; | ||
4902 | struct ixgbe_ring *ring; | ||
4903 | int node = -1; | ||
4904 | int cpu = -1; | ||
4905 | int ring_count, size; | ||
4906 | |||
4907 | ring_count = txr_count + rxr_count; | ||
4908 | size = sizeof(struct ixgbe_q_vector) + | ||
4909 | (sizeof(struct ixgbe_ring) * ring_count); | ||
4910 | |||
4911 | /* customize cpu for Flow Director mapping */ | ||
4912 | if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) { | ||
4913 | if (cpu_online(v_idx)) { | ||
4914 | cpu = v_idx; | ||
4915 | node = cpu_to_node(cpu); | ||
4916 | } | ||
4917 | } | ||
4918 | |||
4919 | /* allocate q_vector and rings */ | ||
4920 | q_vector = kzalloc_node(size, GFP_KERNEL, node); | ||
4921 | if (!q_vector) | ||
4922 | q_vector = kzalloc(size, GFP_KERNEL); | ||
4923 | if (!q_vector) | ||
4924 | return -ENOMEM; | ||
4925 | |||
4926 | /* setup affinity mask and node */ | ||
4927 | if (cpu != -1) | ||
4928 | cpumask_set_cpu(cpu, &q_vector->affinity_mask); | ||
4929 | else | ||
4930 | cpumask_copy(&q_vector->affinity_mask, cpu_online_mask); | ||
4931 | q_vector->numa_node = node; | ||
4932 | |||
4933 | /* initialize NAPI */ | ||
4934 | netif_napi_add(adapter->netdev, &q_vector->napi, | ||
4935 | ixgbe_poll, 64); | ||
4936 | |||
4937 | /* tie q_vector and adapter together */ | ||
4938 | adapter->q_vector[v_idx] = q_vector; | ||
4939 | q_vector->adapter = adapter; | ||
4940 | q_vector->v_idx = v_idx; | ||
4941 | |||
4942 | /* initialize work limits */ | ||
4943 | q_vector->tx.work_limit = adapter->tx_work_limit; | ||
4944 | |||
4945 | /* initialize pointer to rings */ | ||
4946 | ring = q_vector->ring; | ||
4947 | |||
4948 | while (txr_count) { | ||
4949 | /* assign generic ring traits */ | ||
4950 | ring->dev = &adapter->pdev->dev; | ||
4951 | ring->netdev = adapter->netdev; | ||
4952 | |||
4953 | /* configure backlink on ring */ | ||
4954 | ring->q_vector = q_vector; | ||
4955 | |||
4956 | /* update q_vector Tx values */ | ||
4957 | ixgbe_add_ring(ring, &q_vector->tx); | ||
4958 | |||
4959 | /* apply Tx specific ring traits */ | ||
4960 | ring->count = adapter->tx_ring_count; | ||
4961 | ring->queue_index = txr_idx; | ||
4962 | |||
4963 | /* assign ring to adapter */ | ||
4964 | adapter->tx_ring[txr_idx] = ring; | ||
4965 | |||
4966 | /* update count and index */ | ||
4967 | txr_count--; | ||
4968 | txr_idx++; | ||
4969 | |||
4970 | /* push pointer to next ring */ | ||
4971 | ring++; | ||
4972 | } | ||
4973 | |||
4974 | while (rxr_count) { | ||
4975 | /* assign generic ring traits */ | ||
4976 | ring->dev = &adapter->pdev->dev; | ||
4977 | ring->netdev = adapter->netdev; | ||
4978 | |||
4979 | /* configure backlink on ring */ | ||
4980 | ring->q_vector = q_vector; | ||
4981 | |||
4982 | /* update q_vector Rx values */ | ||
4983 | ixgbe_add_ring(ring, &q_vector->rx); | ||
4984 | |||
4985 | /* | ||
4986 | * 82599 errata, UDP frames with a 0 checksum | ||
4987 | * can be marked as checksum errors. | ||
4988 | */ | ||
4989 | if (adapter->hw.mac.type == ixgbe_mac_82599EB) | ||
4990 | set_bit(__IXGBE_RX_CSUM_UDP_ZERO_ERR, &ring->state); | ||
4991 | |||
4992 | /* apply Rx specific ring traits */ | ||
4993 | ring->count = adapter->rx_ring_count; | ||
4994 | ring->queue_index = rxr_idx; | ||
4995 | |||
4996 | /* assign ring to adapter */ | ||
4997 | adapter->rx_ring[rxr_idx] = ring; | ||
4998 | |||
4999 | /* update count and index */ | ||
5000 | rxr_count--; | ||
5001 | rxr_idx++; | ||
5002 | |||
5003 | /* push pointer to next ring */ | ||
5004 | ring++; | ||
5005 | } | ||
5006 | |||
5007 | return 0; | ||
5008 | } | ||
5009 | |||
5010 | /** | ||
5011 | * ixgbe_free_q_vector - Free memory allocated for specific interrupt vector | ||
5012 | * @adapter: board private structure to initialize | ||
5013 | * @v_idx: Index of vector to be freed | ||
5014 | * | ||
5015 | * This function frees the memory allocated to the q_vector. In addition if | ||
5016 | * NAPI is enabled it will delete any references to the NAPI struct prior | ||
5017 | * to freeing the q_vector. | ||
5018 | **/ | ||
5019 | static void ixgbe_free_q_vector(struct ixgbe_adapter *adapter, int v_idx) | ||
5020 | { | ||
5021 | struct ixgbe_q_vector *q_vector = adapter->q_vector[v_idx]; | ||
5022 | struct ixgbe_ring *ring; | ||
5023 | |||
5024 | ixgbe_for_each_ring(ring, q_vector->tx) | ||
5025 | adapter->tx_ring[ring->queue_index] = NULL; | ||
5026 | |||
5027 | ixgbe_for_each_ring(ring, q_vector->rx) | ||
5028 | adapter->rx_ring[ring->queue_index] = NULL; | ||
5029 | |||
5030 | adapter->q_vector[v_idx] = NULL; | ||
5031 | netif_napi_del(&q_vector->napi); | ||
5032 | |||
5033 | /* | ||
5034 | * ixgbe_get_stats64() might access the rings on this vector, | ||
5035 | * we must wait a grace period before freeing it. | ||
5036 | */ | ||
5037 | kfree_rcu(q_vector, rcu); | ||
5038 | } | ||
5039 | |||
5040 | /** | ||
5041 | * ixgbe_alloc_q_vectors - Allocate memory for interrupt vectors | ||
5042 | * @adapter: board private structure to initialize | ||
5043 | * | ||
5044 | * We allocate one q_vector per queue interrupt. If allocation fails we | ||
5045 | * return -ENOMEM. | ||
5046 | **/ | ||
5047 | static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter) | ||
5048 | { | ||
5049 | int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; | ||
5050 | int rxr_remaining = adapter->num_rx_queues; | ||
5051 | int txr_remaining = adapter->num_tx_queues; | ||
5052 | int rxr_idx = 0, txr_idx = 0, v_idx = 0; | ||
5053 | int err; | ||
5054 | |||
5055 | /* only one q_vector if MSI-X is disabled. */ | ||
5056 | if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) | ||
5057 | q_vectors = 1; | ||
5058 | |||
5059 | if (q_vectors >= (rxr_remaining + txr_remaining)) { | ||
5060 | for (; rxr_remaining; v_idx++, q_vectors--) { | ||
5061 | int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors); | ||
5062 | err = ixgbe_alloc_q_vector(adapter, v_idx, | ||
5063 | 0, 0, rqpv, rxr_idx); | ||
5064 | |||
5065 | if (err) | ||
5066 | goto err_out; | ||
5067 | |||
5068 | /* update counts and index */ | ||
5069 | rxr_remaining -= rqpv; | ||
5070 | rxr_idx += rqpv; | ||
5071 | } | ||
5072 | } | ||
5073 | |||
5074 | for (; q_vectors; v_idx++, q_vectors--) { | ||
5075 | int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors); | ||
5076 | int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors); | ||
5077 | err = ixgbe_alloc_q_vector(adapter, v_idx, | ||
5078 | tqpv, txr_idx, | ||
5079 | rqpv, rxr_idx); | ||
5080 | |||
5081 | if (err) | ||
5082 | goto err_out; | ||
5083 | |||
5084 | /* update counts and index */ | ||
5085 | rxr_remaining -= rqpv; | ||
5086 | rxr_idx += rqpv; | ||
5087 | txr_remaining -= tqpv; | ||
5088 | txr_idx += tqpv; | ||
5089 | } | ||
5090 | |||
5091 | return 0; | ||
5092 | |||
5093 | err_out: | ||
5094 | while (v_idx) { | ||
5095 | v_idx--; | ||
5096 | ixgbe_free_q_vector(adapter, v_idx); | ||
5097 | } | ||
5098 | |||
5099 | return -ENOMEM; | ||
5100 | } | ||
5101 | |||
5102 | /** | ||
5103 | * ixgbe_free_q_vectors - Free memory allocated for interrupt vectors | ||
5104 | * @adapter: board private structure to initialize | ||
5105 | * | ||
5106 | * This function frees the memory allocated to the q_vectors. In addition if | ||
5107 | * NAPI is enabled it will delete any references to the NAPI struct prior | ||
5108 | * to freeing the q_vector. | ||
5109 | **/ | ||
5110 | static void ixgbe_free_q_vectors(struct ixgbe_adapter *adapter) | ||
5111 | { | ||
5112 | int v_idx, q_vectors; | ||
5113 | |||
5114 | if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) | ||
5115 | q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; | ||
5116 | else | ||
5117 | q_vectors = 1; | ||
5118 | |||
5119 | for (v_idx = 0; v_idx < q_vectors; v_idx++) | ||
5120 | ixgbe_free_q_vector(adapter, v_idx); | ||
5121 | } | ||
5122 | |||
5123 | static void ixgbe_reset_interrupt_capability(struct ixgbe_adapter *adapter) | ||
5124 | { | ||
5125 | if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { | ||
5126 | adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED; | ||
5127 | pci_disable_msix(adapter->pdev); | ||
5128 | kfree(adapter->msix_entries); | ||
5129 | adapter->msix_entries = NULL; | ||
5130 | } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) { | ||
5131 | adapter->flags &= ~IXGBE_FLAG_MSI_ENABLED; | ||
5132 | pci_disable_msi(adapter->pdev); | ||
5133 | } | ||
5134 | } | ||
5135 | |||
5136 | /** | ||
5137 | * ixgbe_init_interrupt_scheme - Determine proper interrupt scheme | ||
5138 | * @adapter: board private structure to initialize | ||
5139 | * | ||
5140 | * We determine which interrupt scheme to use based on... | ||
5141 | * - Kernel support (MSI, MSI-X) | ||
5142 | * - which can be user-defined (via MODULE_PARAM) | ||
5143 | * - Hardware queue count (num_*_queues) | ||
5144 | * - defined by miscellaneous hardware support/features (RSS, etc.) | ||
5145 | **/ | ||
5146 | int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter) | ||
5147 | { | ||
5148 | int err; | ||
5149 | |||
5150 | /* Number of supported queues */ | ||
5151 | err = ixgbe_set_num_queues(adapter); | ||
5152 | if (err) | ||
5153 | return err; | ||
5154 | |||
5155 | err = ixgbe_set_interrupt_capability(adapter); | ||
5156 | if (err) { | ||
5157 | e_dev_err("Unable to setup interrupt capabilities\n"); | ||
5158 | goto err_set_interrupt; | ||
5159 | } | ||
5160 | |||
5161 | err = ixgbe_alloc_q_vectors(adapter); | ||
5162 | if (err) { | ||
5163 | e_dev_err("Unable to allocate memory for queue vectors\n"); | ||
5164 | goto err_alloc_q_vectors; | ||
5165 | } | ||
5166 | |||
5167 | ixgbe_cache_ring_register(adapter); | ||
5168 | |||
5169 | e_dev_info("Multiqueue %s: Rx Queue count = %u, Tx Queue count = %u\n", | ||
5170 | (adapter->num_rx_queues > 1) ? "Enabled" : "Disabled", | ||
5171 | adapter->num_rx_queues, adapter->num_tx_queues); | ||
5172 | |||
5173 | set_bit(__IXGBE_DOWN, &adapter->state); | ||
5174 | |||
5175 | return 0; | ||
5176 | |||
5177 | err_alloc_q_vectors: | ||
5178 | ixgbe_reset_interrupt_capability(adapter); | ||
5179 | err_set_interrupt: | ||
5180 | return err; | ||
5181 | } | ||
5182 | |||
5183 | /** | ||
5184 | * ixgbe_clear_interrupt_scheme - Clear the current interrupt scheme settings | ||
5185 | * @adapter: board private structure to clear interrupt scheme on | ||
5186 | * | ||
5187 | * We go through and clear interrupt specific resources and reset the structure | ||
5188 | * to pre-load conditions | ||
5189 | **/ | ||
5190 | void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter) | ||
5191 | { | ||
5192 | adapter->num_tx_queues = 0; | ||
5193 | adapter->num_rx_queues = 0; | ||
5194 | |||
5195 | ixgbe_free_q_vectors(adapter); | ||
5196 | ixgbe_reset_interrupt_capability(adapter); | ||
5197 | } | ||
5198 | |||
5199 | /** | ||
5200 | * ixgbe_sw_init - Initialize general software structures (struct ixgbe_adapter) | 4328 | * ixgbe_sw_init - Initialize general software structures (struct ixgbe_adapter) |
5201 | * @adapter: board private structure to initialize | 4329 | * @adapter: board private structure to initialize |
5202 | * | 4330 | * |
@@ -6235,7 +5363,7 @@ static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter) | |||
6235 | * print link down message | 5363 | * print link down message |
6236 | * @adapter - pointer to the adapter structure | 5364 | * @adapter - pointer to the adapter structure |
6237 | **/ | 5365 | **/ |
6238 | static void ixgbe_watchdog_link_is_down(struct ixgbe_adapter* adapter) | 5366 | static void ixgbe_watchdog_link_is_down(struct ixgbe_adapter *adapter) |
6239 | { | 5367 | { |
6240 | struct net_device *netdev = adapter->netdev; | 5368 | struct net_device *netdev = adapter->netdev; |
6241 | struct ixgbe_hw *hw = &adapter->hw; | 5369 | struct ixgbe_hw *hw = &adapter->hw; |
@@ -6480,41 +5608,32 @@ static void ixgbe_service_timer(unsigned long data) | |||
6480 | unsigned long next_event_offset; | 5608 | unsigned long next_event_offset; |
6481 | bool ready = true; | 5609 | bool ready = true; |
6482 | 5610 | ||
6483 | #ifdef CONFIG_PCI_IOV | 5611 | /* poll faster when waiting for link */ |
6484 | ready = false; | 5612 | if (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE) |
5613 | next_event_offset = HZ / 10; | ||
5614 | else | ||
5615 | next_event_offset = HZ * 2; | ||
6485 | 5616 | ||
5617 | #ifdef CONFIG_PCI_IOV | ||
6486 | /* | 5618 | /* |
6487 | * don't bother with SR-IOV VF DMA hang check if there are | 5619 | * don't bother with SR-IOV VF DMA hang check if there are |
6488 | * no VFs or the link is down | 5620 | * no VFs or the link is down |
6489 | */ | 5621 | */ |
6490 | if (!adapter->num_vfs || | 5622 | if (!adapter->num_vfs || |
6491 | (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE)) { | 5623 | (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE)) |
6492 | ready = true; | ||
6493 | goto normal_timer_service; | 5624 | goto normal_timer_service; |
6494 | } | ||
6495 | 5625 | ||
6496 | /* If we have VFs allocated then we must check for DMA hangs */ | 5626 | /* If we have VFs allocated then we must check for DMA hangs */ |
6497 | ixgbe_check_for_bad_vf(adapter); | 5627 | ixgbe_check_for_bad_vf(adapter); |
6498 | next_event_offset = HZ / 50; | 5628 | next_event_offset = HZ / 50; |
6499 | adapter->timer_event_accumulator++; | 5629 | adapter->timer_event_accumulator++; |
6500 | 5630 | ||
6501 | if (adapter->timer_event_accumulator >= 100) { | 5631 | if (adapter->timer_event_accumulator >= 100) |
6502 | ready = true; | ||
6503 | adapter->timer_event_accumulator = 0; | 5632 | adapter->timer_event_accumulator = 0; |
6504 | } | ||
6505 | |||
6506 | goto schedule_event; | ||
6507 | |||
6508 | normal_timer_service: | ||
6509 | #endif | ||
6510 | /* poll faster when waiting for link */ | ||
6511 | if (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE) | ||
6512 | next_event_offset = HZ / 10; | ||
6513 | else | 5633 | else |
6514 | next_event_offset = HZ * 2; | 5634 | ready = false; |
6515 | 5635 | ||
6516 | #ifdef CONFIG_PCI_IOV | 5636 | normal_timer_service: |
6517 | schedule_event: | ||
6518 | #endif | 5637 | #endif |
6519 | /* Reset the timer */ | 5638 | /* Reset the timer */ |
6520 | mod_timer(&adapter->service_timer, next_event_offset + jiffies); | 5639 | mod_timer(&adapter->service_timer, next_event_offset + jiffies); |
@@ -6563,32 +5682,11 @@ static void ixgbe_service_task(struct work_struct *work) | |||
6563 | ixgbe_service_event_complete(adapter); | 5682 | ixgbe_service_event_complete(adapter); |
6564 | } | 5683 | } |
6565 | 5684 | ||
6566 | void ixgbe_tx_ctxtdesc(struct ixgbe_ring *tx_ring, u32 vlan_macip_lens, | ||
6567 | u32 fcoe_sof_eof, u32 type_tucmd, u32 mss_l4len_idx) | ||
6568 | { | ||
6569 | struct ixgbe_adv_tx_context_desc *context_desc; | ||
6570 | u16 i = tx_ring->next_to_use; | ||
6571 | |||
6572 | context_desc = IXGBE_TX_CTXTDESC(tx_ring, i); | ||
6573 | |||
6574 | i++; | ||
6575 | tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; | ||
6576 | |||
6577 | /* set bits to identify this as an advanced context descriptor */ | ||
6578 | type_tucmd |= IXGBE_TXD_CMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT; | ||
6579 | |||
6580 | context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens); | ||
6581 | context_desc->seqnum_seed = cpu_to_le32(fcoe_sof_eof); | ||
6582 | context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd); | ||
6583 | context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx); | ||
6584 | } | ||
6585 | |||
6586 | static int ixgbe_tso(struct ixgbe_ring *tx_ring, | 5685 | static int ixgbe_tso(struct ixgbe_ring *tx_ring, |
6587 | struct ixgbe_tx_buffer *first, | 5686 | struct ixgbe_tx_buffer *first, |
6588 | u32 tx_flags, __be16 protocol, u8 *hdr_len) | 5687 | u8 *hdr_len) |
6589 | { | 5688 | { |
6590 | struct sk_buff *skb = first->skb; | 5689 | struct sk_buff *skb = first->skb; |
6591 | int err; | ||
6592 | u32 vlan_macip_lens, type_tucmd; | 5690 | u32 vlan_macip_lens, type_tucmd; |
6593 | u32 mss_l4len_idx, l4len; | 5691 | u32 mss_l4len_idx, l4len; |
6594 | 5692 | ||
@@ -6596,7 +5694,7 @@ static int ixgbe_tso(struct ixgbe_ring *tx_ring, | |||
6596 | return 0; | 5694 | return 0; |
6597 | 5695 | ||
6598 | if (skb_header_cloned(skb)) { | 5696 | if (skb_header_cloned(skb)) { |
6599 | err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); | 5697 | int err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); |
6600 | if (err) | 5698 | if (err) |
6601 | return err; | 5699 | return err; |
6602 | } | 5700 | } |
@@ -6604,7 +5702,7 @@ static int ixgbe_tso(struct ixgbe_ring *tx_ring, | |||
6604 | /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */ | 5702 | /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */ |
6605 | type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP; | 5703 | type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP; |
6606 | 5704 | ||
6607 | if (protocol == __constant_htons(ETH_P_IP)) { | 5705 | if (first->protocol == __constant_htons(ETH_P_IP)) { |
6608 | struct iphdr *iph = ip_hdr(skb); | 5706 | struct iphdr *iph = ip_hdr(skb); |
6609 | iph->tot_len = 0; | 5707 | iph->tot_len = 0; |
6610 | iph->check = 0; | 5708 | iph->check = 0; |
@@ -6613,12 +5711,17 @@ static int ixgbe_tso(struct ixgbe_ring *tx_ring, | |||
6613 | IPPROTO_TCP, | 5711 | IPPROTO_TCP, |
6614 | 0); | 5712 | 0); |
6615 | type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4; | 5713 | type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4; |
5714 | first->tx_flags |= IXGBE_TX_FLAGS_TSO | | ||
5715 | IXGBE_TX_FLAGS_CSUM | | ||
5716 | IXGBE_TX_FLAGS_IPV4; | ||
6616 | } else if (skb_is_gso_v6(skb)) { | 5717 | } else if (skb_is_gso_v6(skb)) { |
6617 | ipv6_hdr(skb)->payload_len = 0; | 5718 | ipv6_hdr(skb)->payload_len = 0; |
6618 | tcp_hdr(skb)->check = | 5719 | tcp_hdr(skb)->check = |
6619 | ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, | 5720 | ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, |
6620 | &ipv6_hdr(skb)->daddr, | 5721 | &ipv6_hdr(skb)->daddr, |
6621 | 0, IPPROTO_TCP, 0); | 5722 | 0, IPPROTO_TCP, 0); |
5723 | first->tx_flags |= IXGBE_TX_FLAGS_TSO | | ||
5724 | IXGBE_TX_FLAGS_CSUM; | ||
6622 | } | 5725 | } |
6623 | 5726 | ||
6624 | /* compute header lengths */ | 5727 | /* compute header lengths */ |
@@ -6637,17 +5740,16 @@ static int ixgbe_tso(struct ixgbe_ring *tx_ring, | |||
6637 | /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */ | 5740 | /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */ |
6638 | vlan_macip_lens = skb_network_header_len(skb); | 5741 | vlan_macip_lens = skb_network_header_len(skb); |
6639 | vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT; | 5742 | vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT; |
6640 | vlan_macip_lens |= tx_flags & IXGBE_TX_FLAGS_VLAN_MASK; | 5743 | vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK; |
6641 | 5744 | ||
6642 | ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, 0, type_tucmd, | 5745 | ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, 0, type_tucmd, |
6643 | mss_l4len_idx); | 5746 | mss_l4len_idx); |
6644 | 5747 | ||
6645 | return 1; | 5748 | return 1; |
6646 | } | 5749 | } |
6647 | 5750 | ||
6648 | static bool ixgbe_tx_csum(struct ixgbe_ring *tx_ring, | 5751 | static void ixgbe_tx_csum(struct ixgbe_ring *tx_ring, |
6649 | struct ixgbe_tx_buffer *first, | 5752 | struct ixgbe_tx_buffer *first) |
6650 | u32 tx_flags, __be16 protocol) | ||
6651 | { | 5753 | { |
6652 | struct sk_buff *skb = first->skb; | 5754 | struct sk_buff *skb = first->skb; |
6653 | u32 vlan_macip_lens = 0; | 5755 | u32 vlan_macip_lens = 0; |
@@ -6655,12 +5757,12 @@ static bool ixgbe_tx_csum(struct ixgbe_ring *tx_ring, | |||
6655 | u32 type_tucmd = 0; | 5757 | u32 type_tucmd = 0; |
6656 | 5758 | ||
6657 | if (skb->ip_summed != CHECKSUM_PARTIAL) { | 5759 | if (skb->ip_summed != CHECKSUM_PARTIAL) { |
6658 | if (!(tx_flags & IXGBE_TX_FLAGS_HW_VLAN) && | 5760 | if (!(first->tx_flags & IXGBE_TX_FLAGS_HW_VLAN) && |
6659 | !(tx_flags & IXGBE_TX_FLAGS_TXSW)) | 5761 | !(first->tx_flags & IXGBE_TX_FLAGS_TXSW)) |
6660 | return false; | 5762 | return; |
6661 | } else { | 5763 | } else { |
6662 | u8 l4_hdr = 0; | 5764 | u8 l4_hdr = 0; |
6663 | switch (protocol) { | 5765 | switch (first->protocol) { |
6664 | case __constant_htons(ETH_P_IP): | 5766 | case __constant_htons(ETH_P_IP): |
6665 | vlan_macip_lens |= skb_network_header_len(skb); | 5767 | vlan_macip_lens |= skb_network_header_len(skb); |
6666 | type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4; | 5768 | type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4; |
@@ -6674,7 +5776,7 @@ static bool ixgbe_tx_csum(struct ixgbe_ring *tx_ring, | |||
6674 | if (unlikely(net_ratelimit())) { | 5776 | if (unlikely(net_ratelimit())) { |
6675 | dev_warn(tx_ring->dev, | 5777 | dev_warn(tx_ring->dev, |
6676 | "partial checksum but proto=%x!\n", | 5778 | "partial checksum but proto=%x!\n", |
6677 | skb->protocol); | 5779 | first->protocol); |
6678 | } | 5780 | } |
6679 | break; | 5781 | break; |
6680 | } | 5782 | } |
@@ -6698,19 +5800,21 @@ static bool ixgbe_tx_csum(struct ixgbe_ring *tx_ring, | |||
6698 | if (unlikely(net_ratelimit())) { | 5800 | if (unlikely(net_ratelimit())) { |
6699 | dev_warn(tx_ring->dev, | 5801 | dev_warn(tx_ring->dev, |
6700 | "partial checksum but l4 proto=%x!\n", | 5802 | "partial checksum but l4 proto=%x!\n", |
6701 | skb->protocol); | 5803 | l4_hdr); |
6702 | } | 5804 | } |
6703 | break; | 5805 | break; |
6704 | } | 5806 | } |
5807 | |||
5808 | /* update TX checksum flag */ | ||
5809 | first->tx_flags |= IXGBE_TX_FLAGS_CSUM; | ||
6705 | } | 5810 | } |
6706 | 5811 | ||
5812 | /* vlan_macip_lens: MACLEN, VLAN tag */ | ||
6707 | vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT; | 5813 | vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT; |
6708 | vlan_macip_lens |= tx_flags & IXGBE_TX_FLAGS_VLAN_MASK; | 5814 | vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK; |
6709 | 5815 | ||
6710 | ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, 0, | 5816 | ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, 0, |
6711 | type_tucmd, mss_l4len_idx); | 5817 | type_tucmd, mss_l4len_idx); |
6712 | |||
6713 | return (skb->ip_summed == CHECKSUM_PARTIAL); | ||
6714 | } | 5818 | } |
6715 | 5819 | ||
6716 | static __le32 ixgbe_tx_cmd_type(u32 tx_flags) | 5820 | static __le32 ixgbe_tx_cmd_type(u32 tx_flags) |
@@ -6775,7 +5879,6 @@ static void ixgbe_tx_olinfo_status(union ixgbe_adv_tx_desc *tx_desc, | |||
6775 | 5879 | ||
6776 | static void ixgbe_tx_map(struct ixgbe_ring *tx_ring, | 5880 | static void ixgbe_tx_map(struct ixgbe_ring *tx_ring, |
6777 | struct ixgbe_tx_buffer *first, | 5881 | struct ixgbe_tx_buffer *first, |
6778 | u32 tx_flags, | ||
6779 | const u8 hdr_len) | 5882 | const u8 hdr_len) |
6780 | { | 5883 | { |
6781 | dma_addr_t dma; | 5884 | dma_addr_t dma; |
@@ -6786,6 +5889,7 @@ static void ixgbe_tx_map(struct ixgbe_ring *tx_ring, | |||
6786 | unsigned int data_len = skb->data_len; | 5889 | unsigned int data_len = skb->data_len; |
6787 | unsigned int size = skb_headlen(skb); | 5890 | unsigned int size = skb_headlen(skb); |
6788 | unsigned int paylen = skb->len - hdr_len; | 5891 | unsigned int paylen = skb->len - hdr_len; |
5892 | u32 tx_flags = first->tx_flags; | ||
6789 | __le32 cmd_type; | 5893 | __le32 cmd_type; |
6790 | u16 i = tx_ring->next_to_use; | 5894 | u16 i = tx_ring->next_to_use; |
6791 | 5895 | ||
@@ -6812,7 +5916,6 @@ static void ixgbe_tx_map(struct ixgbe_ring *tx_ring, | |||
6812 | /* record length, and DMA address */ | 5916 | /* record length, and DMA address */ |
6813 | dma_unmap_len_set(first, len, size); | 5917 | dma_unmap_len_set(first, len, size); |
6814 | dma_unmap_addr_set(first, dma, dma); | 5918 | dma_unmap_addr_set(first, dma, dma); |
6815 | first->tx_flags = tx_flags; | ||
6816 | 5919 | ||
6817 | tx_desc->read.buffer_addr = cpu_to_le64(dma); | 5920 | tx_desc->read.buffer_addr = cpu_to_le64(dma); |
6818 | 5921 | ||
@@ -6921,8 +6024,7 @@ dma_error: | |||
6921 | } | 6024 | } |
6922 | 6025 | ||
6923 | static void ixgbe_atr(struct ixgbe_ring *ring, | 6026 | static void ixgbe_atr(struct ixgbe_ring *ring, |
6924 | struct ixgbe_tx_buffer *first, | 6027 | struct ixgbe_tx_buffer *first) |
6925 | u32 tx_flags, __be16 protocol) | ||
6926 | { | 6028 | { |
6927 | struct ixgbe_q_vector *q_vector = ring->q_vector; | 6029 | struct ixgbe_q_vector *q_vector = ring->q_vector; |
6928 | union ixgbe_atr_hash_dword input = { .dword = 0 }; | 6030 | union ixgbe_atr_hash_dword input = { .dword = 0 }; |
@@ -6949,9 +6051,9 @@ static void ixgbe_atr(struct ixgbe_ring *ring, | |||
6949 | hdr.network = skb_network_header(first->skb); | 6051 | hdr.network = skb_network_header(first->skb); |
6950 | 6052 | ||
6951 | /* Currently only IPv4/IPv6 with TCP is supported */ | 6053 | /* Currently only IPv4/IPv6 with TCP is supported */ |
6952 | if ((protocol != __constant_htons(ETH_P_IPV6) || | 6054 | if ((first->protocol != __constant_htons(ETH_P_IPV6) || |
6953 | hdr.ipv6->nexthdr != IPPROTO_TCP) && | 6055 | hdr.ipv6->nexthdr != IPPROTO_TCP) && |
6954 | (protocol != __constant_htons(ETH_P_IP) || | 6056 | (first->protocol != __constant_htons(ETH_P_IP) || |
6955 | hdr.ipv4->protocol != IPPROTO_TCP)) | 6057 | hdr.ipv4->protocol != IPPROTO_TCP)) |
6956 | return; | 6058 | return; |
6957 | 6059 | ||
@@ -6968,7 +6070,7 @@ static void ixgbe_atr(struct ixgbe_ring *ring, | |||
6968 | /* reset sample count */ | 6070 | /* reset sample count */ |
6969 | ring->atr_count = 0; | 6071 | ring->atr_count = 0; |
6970 | 6072 | ||
6971 | vlan_id = htons(tx_flags >> IXGBE_TX_FLAGS_VLAN_SHIFT); | 6073 | vlan_id = htons(first->tx_flags >> IXGBE_TX_FLAGS_VLAN_SHIFT); |
6972 | 6074 | ||
6973 | /* | 6075 | /* |
6974 | * src and dst are inverted, think how the receiver sees them | 6076 | * src and dst are inverted, think how the receiver sees them |
@@ -6983,13 +6085,13 @@ static void ixgbe_atr(struct ixgbe_ring *ring, | |||
6983 | * since src port and flex bytes occupy the same word XOR them together | 6085 | * since src port and flex bytes occupy the same word XOR them together |
6984 | * and write the value to source port portion of compressed dword | 6086 | * and write the value to source port portion of compressed dword |
6985 | */ | 6087 | */ |
6986 | if (tx_flags & (IXGBE_TX_FLAGS_SW_VLAN | IXGBE_TX_FLAGS_HW_VLAN)) | 6088 | if (first->tx_flags & (IXGBE_TX_FLAGS_SW_VLAN | IXGBE_TX_FLAGS_HW_VLAN)) |
6987 | common.port.src ^= th->dest ^ __constant_htons(ETH_P_8021Q); | 6089 | common.port.src ^= th->dest ^ __constant_htons(ETH_P_8021Q); |
6988 | else | 6090 | else |
6989 | common.port.src ^= th->dest ^ protocol; | 6091 | common.port.src ^= th->dest ^ first->protocol; |
6990 | common.port.dst ^= th->source; | 6092 | common.port.dst ^= th->source; |
6991 | 6093 | ||
6992 | if (protocol == __constant_htons(ETH_P_IP)) { | 6094 | if (first->protocol == __constant_htons(ETH_P_IP)) { |
6993 | input.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4; | 6095 | input.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4; |
6994 | common.ip ^= hdr.ipv4->saddr ^ hdr.ipv4->daddr; | 6096 | common.ip ^= hdr.ipv4->saddr ^ hdr.ipv4->daddr; |
6995 | } else { | 6097 | } else { |
@@ -7145,43 +6247,36 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, | |||
7145 | } | 6247 | } |
7146 | } | 6248 | } |
7147 | 6249 | ||
6250 | /* record initial flags and protocol */ | ||
6251 | first->tx_flags = tx_flags; | ||
6252 | first->protocol = protocol; | ||
6253 | |||
7148 | #ifdef IXGBE_FCOE | 6254 | #ifdef IXGBE_FCOE |
7149 | /* setup tx offload for FCoE */ | 6255 | /* setup tx offload for FCoE */ |
7150 | if ((protocol == __constant_htons(ETH_P_FCOE)) && | 6256 | if ((protocol == __constant_htons(ETH_P_FCOE)) && |
7151 | (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) { | 6257 | (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) { |
7152 | tso = ixgbe_fso(tx_ring, first, tx_flags, &hdr_len); | 6258 | tso = ixgbe_fso(tx_ring, first, &hdr_len); |
7153 | if (tso < 0) | 6259 | if (tso < 0) |
7154 | goto out_drop; | 6260 | goto out_drop; |
7155 | else if (tso) | ||
7156 | tx_flags |= IXGBE_TX_FLAGS_FSO | | ||
7157 | IXGBE_TX_FLAGS_FCOE; | ||
7158 | else | ||
7159 | tx_flags |= IXGBE_TX_FLAGS_FCOE; | ||
7160 | 6261 | ||
7161 | goto xmit_fcoe; | 6262 | goto xmit_fcoe; |
7162 | } | 6263 | } |
7163 | 6264 | ||
7164 | #endif /* IXGBE_FCOE */ | 6265 | #endif /* IXGBE_FCOE */ |
7165 | /* setup IPv4/IPv6 offloads */ | 6266 | tso = ixgbe_tso(tx_ring, first, &hdr_len); |
7166 | if (protocol == __constant_htons(ETH_P_IP)) | ||
7167 | tx_flags |= IXGBE_TX_FLAGS_IPV4; | ||
7168 | |||
7169 | tso = ixgbe_tso(tx_ring, first, tx_flags, protocol, &hdr_len); | ||
7170 | if (tso < 0) | 6267 | if (tso < 0) |
7171 | goto out_drop; | 6268 | goto out_drop; |
7172 | else if (tso) | 6269 | else if (!tso) |
7173 | tx_flags |= IXGBE_TX_FLAGS_TSO | IXGBE_TX_FLAGS_CSUM; | 6270 | ixgbe_tx_csum(tx_ring, first); |
7174 | else if (ixgbe_tx_csum(tx_ring, first, tx_flags, protocol)) | ||
7175 | tx_flags |= IXGBE_TX_FLAGS_CSUM; | ||
7176 | 6271 | ||
7177 | /* add the ATR filter if ATR is on */ | 6272 | /* add the ATR filter if ATR is on */ |
7178 | if (test_bit(__IXGBE_TX_FDIR_INIT_DONE, &tx_ring->state)) | 6273 | if (test_bit(__IXGBE_TX_FDIR_INIT_DONE, &tx_ring->state)) |
7179 | ixgbe_atr(tx_ring, first, tx_flags, protocol); | 6274 | ixgbe_atr(tx_ring, first); |
7180 | 6275 | ||
7181 | #ifdef IXGBE_FCOE | 6276 | #ifdef IXGBE_FCOE |
7182 | xmit_fcoe: | 6277 | xmit_fcoe: |
7183 | #endif /* IXGBE_FCOE */ | 6278 | #endif /* IXGBE_FCOE */ |
7184 | ixgbe_tx_map(tx_ring, first, tx_flags, hdr_len); | 6279 | ixgbe_tx_map(tx_ring, first, hdr_len); |
7185 | 6280 | ||
7186 | ixgbe_maybe_stop_tx(tx_ring, DESC_NEEDED); | 6281 | ixgbe_maybe_stop_tx(tx_ring, DESC_NEEDED); |
7187 | 6282 | ||
@@ -7347,8 +6442,8 @@ static void ixgbe_netpoll(struct net_device *netdev) | |||
7347 | } | 6442 | } |
7348 | adapter->flags &= ~IXGBE_FLAG_IN_NETPOLL; | 6443 | adapter->flags &= ~IXGBE_FLAG_IN_NETPOLL; |
7349 | } | 6444 | } |
7350 | #endif | ||
7351 | 6445 | ||
6446 | #endif | ||
7352 | static struct rtnl_link_stats64 *ixgbe_get_stats64(struct net_device *netdev, | 6447 | static struct rtnl_link_stats64 *ixgbe_get_stats64(struct net_device *netdev, |
7353 | struct rtnl_link_stats64 *stats) | 6448 | struct rtnl_link_stats64 *stats) |
7354 | { | 6449 | { |
@@ -7397,6 +6492,7 @@ static struct rtnl_link_stats64 *ixgbe_get_stats64(struct net_device *netdev, | |||
7397 | return stats; | 6492 | return stats; |
7398 | } | 6493 | } |
7399 | 6494 | ||
6495 | #ifdef CONFIG_IXGBE_DCB | ||
7400 | /* ixgbe_validate_rtr - verify 802.1Qp to Rx packet buffer mapping is valid. | 6496 | /* ixgbe_validate_rtr - verify 802.1Qp to Rx packet buffer mapping is valid. |
7401 | * #adapter: pointer to ixgbe_adapter | 6497 | * #adapter: pointer to ixgbe_adapter |
7402 | * @tc: number of traffic classes currently enabled | 6498 | * @tc: number of traffic classes currently enabled |
@@ -7433,7 +6529,6 @@ static void ixgbe_validate_rtr(struct ixgbe_adapter *adapter, u8 tc) | |||
7433 | return; | 6529 | return; |
7434 | } | 6530 | } |
7435 | 6531 | ||
7436 | |||
7437 | /* ixgbe_setup_tc - routine to configure net_device for multiple traffic | 6532 | /* ixgbe_setup_tc - routine to configure net_device for multiple traffic |
7438 | * classes. | 6533 | * classes. |
7439 | * | 6534 | * |
@@ -7453,7 +6548,8 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc) | |||
7453 | 6548 | ||
7454 | /* Hardware supports up to 8 traffic classes */ | 6549 | /* Hardware supports up to 8 traffic classes */ |
7455 | if (tc > adapter->dcb_cfg.num_tcs.pg_tcs || | 6550 | if (tc > adapter->dcb_cfg.num_tcs.pg_tcs || |
7456 | (hw->mac.type == ixgbe_mac_82598EB && tc < MAX_TRAFFIC_CLASS)) | 6551 | (hw->mac.type == ixgbe_mac_82598EB && |
6552 | tc < MAX_TRAFFIC_CLASS)) | ||
7457 | return -EINVAL; | 6553 | return -EINVAL; |
7458 | 6554 | ||
7459 | /* Hardware has to reinitialize queues and interrupts to | 6555 | /* Hardware has to reinitialize queues and interrupts to |
@@ -7467,7 +6563,6 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc) | |||
7467 | if (tc) { | 6563 | if (tc) { |
7468 | netdev_set_num_tc(dev, tc); | 6564 | netdev_set_num_tc(dev, tc); |
7469 | adapter->last_lfc_mode = adapter->hw.fc.current_mode; | 6565 | adapter->last_lfc_mode = adapter->hw.fc.current_mode; |
7470 | |||
7471 | adapter->flags |= IXGBE_FLAG_DCB_ENABLED; | 6566 | adapter->flags |= IXGBE_FLAG_DCB_ENABLED; |
7472 | adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; | 6567 | adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; |
7473 | 6568 | ||
@@ -7475,7 +6570,6 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc) | |||
7475 | adapter->hw.fc.requested_mode = ixgbe_fc_none; | 6570 | adapter->hw.fc.requested_mode = ixgbe_fc_none; |
7476 | } else { | 6571 | } else { |
7477 | netdev_reset_tc(dev); | 6572 | netdev_reset_tc(dev); |
7478 | |||
7479 | adapter->hw.fc.requested_mode = adapter->last_lfc_mode; | 6573 | adapter->hw.fc.requested_mode = adapter->last_lfc_mode; |
7480 | 6574 | ||
7481 | adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED; | 6575 | adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED; |
@@ -7493,6 +6587,7 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc) | |||
7493 | return 0; | 6587 | return 0; |
7494 | } | 6588 | } |
7495 | 6589 | ||
6590 | #endif /* CONFIG_IXGBE_DCB */ | ||
7496 | void ixgbe_do_reset(struct net_device *netdev) | 6591 | void ixgbe_do_reset(struct net_device *netdev) |
7497 | { | 6592 | { |
7498 | struct ixgbe_adapter *adapter = netdev_priv(netdev); | 6593 | struct ixgbe_adapter *adapter = netdev_priv(netdev); |
@@ -7504,54 +6599,52 @@ void ixgbe_do_reset(struct net_device *netdev) | |||
7504 | } | 6599 | } |
7505 | 6600 | ||
7506 | static netdev_features_t ixgbe_fix_features(struct net_device *netdev, | 6601 | static netdev_features_t ixgbe_fix_features(struct net_device *netdev, |
7507 | netdev_features_t data) | 6602 | netdev_features_t features) |
7508 | { | 6603 | { |
7509 | struct ixgbe_adapter *adapter = netdev_priv(netdev); | 6604 | struct ixgbe_adapter *adapter = netdev_priv(netdev); |
7510 | 6605 | ||
7511 | #ifdef CONFIG_DCB | 6606 | #ifdef CONFIG_DCB |
7512 | if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) | 6607 | if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) |
7513 | data &= ~NETIF_F_HW_VLAN_RX; | 6608 | features &= ~NETIF_F_HW_VLAN_RX; |
7514 | #endif | 6609 | #endif |
7515 | 6610 | ||
7516 | /* return error if RXHASH is being enabled when RSS is not supported */ | 6611 | /* return error if RXHASH is being enabled when RSS is not supported */ |
7517 | if (!(adapter->flags & IXGBE_FLAG_RSS_ENABLED)) | 6612 | if (!(adapter->flags & IXGBE_FLAG_RSS_ENABLED)) |
7518 | data &= ~NETIF_F_RXHASH; | 6613 | features &= ~NETIF_F_RXHASH; |
7519 | 6614 | ||
7520 | /* If Rx checksum is disabled, then RSC/LRO should also be disabled */ | 6615 | /* If Rx checksum is disabled, then RSC/LRO should also be disabled */ |
7521 | if (!(data & NETIF_F_RXCSUM)) | 6616 | if (!(features & NETIF_F_RXCSUM)) |
7522 | data &= ~NETIF_F_LRO; | 6617 | features &= ~NETIF_F_LRO; |
7523 | 6618 | ||
7524 | /* Turn off LRO if not RSC capable or invalid ITR settings */ | 6619 | /* Turn off LRO if not RSC capable */ |
7525 | if (!(adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE)) { | 6620 | if (!(adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE)) |
7526 | data &= ~NETIF_F_LRO; | 6621 | features &= ~NETIF_F_LRO; |
7527 | } else if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) && | 6622 | |
7528 | (adapter->rx_itr_setting != 1 && | ||
7529 | adapter->rx_itr_setting > IXGBE_MAX_RSC_INT_RATE)) { | ||
7530 | data &= ~NETIF_F_LRO; | ||
7531 | e_info(probe, "rx-usecs set too low, not enabling RSC\n"); | ||
7532 | } | ||
7533 | 6623 | ||
7534 | return data; | 6624 | return features; |
7535 | } | 6625 | } |
7536 | 6626 | ||
7537 | static int ixgbe_set_features(struct net_device *netdev, | 6627 | static int ixgbe_set_features(struct net_device *netdev, |
7538 | netdev_features_t data) | 6628 | netdev_features_t features) |
7539 | { | 6629 | { |
7540 | struct ixgbe_adapter *adapter = netdev_priv(netdev); | 6630 | struct ixgbe_adapter *adapter = netdev_priv(netdev); |
7541 | netdev_features_t changed = netdev->features ^ data; | 6631 | netdev_features_t changed = netdev->features ^ features; |
7542 | bool need_reset = false; | 6632 | bool need_reset = false; |
7543 | 6633 | ||
7544 | /* Make sure RSC matches LRO, reset if change */ | 6634 | /* Make sure RSC matches LRO, reset if change */ |
7545 | if (!!(data & NETIF_F_LRO) != | 6635 | if (!(features & NETIF_F_LRO)) { |
7546 | !!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)) { | 6636 | if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) |
7547 | adapter->flags2 ^= IXGBE_FLAG2_RSC_ENABLED; | ||
7548 | switch (adapter->hw.mac.type) { | ||
7549 | case ixgbe_mac_X540: | ||
7550 | case ixgbe_mac_82599EB: | ||
7551 | need_reset = true; | 6637 | need_reset = true; |
7552 | break; | 6638 | adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED; |
7553 | default: | 6639 | } else if ((adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) && |
7554 | break; | 6640 | !(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)) { |
6641 | if (adapter->rx_itr_setting == 1 || | ||
6642 | adapter->rx_itr_setting > IXGBE_MIN_RSC_ITR) { | ||
6643 | adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED; | ||
6644 | need_reset = true; | ||
6645 | } else if ((changed ^ features) & NETIF_F_LRO) { | ||
6646 | e_info(probe, "rx-usecs set too low, " | ||
6647 | "disabling RSC\n"); | ||
7555 | } | 6648 | } |
7556 | } | 6649 | } |
7557 | 6650 | ||
@@ -7559,31 +6652,30 @@ static int ixgbe_set_features(struct net_device *netdev, | |||
7559 | * Check if Flow Director n-tuple support was enabled or disabled. If | 6652 | * Check if Flow Director n-tuple support was enabled or disabled. If |
7560 | * the state changed, we need to reset. | 6653 | * the state changed, we need to reset. |
7561 | */ | 6654 | */ |
7562 | if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) { | 6655 | if (!(features & NETIF_F_NTUPLE)) { |
7563 | /* turn off ATR, enable perfect filters and reset */ | 6656 | if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) { |
7564 | if (data & NETIF_F_NTUPLE) { | 6657 | /* turn off Flow Director, set ATR and reset */ |
7565 | adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; | 6658 | if ((adapter->flags & IXGBE_FLAG_RSS_ENABLED) && |
7566 | adapter->flags |= IXGBE_FLAG_FDIR_PERFECT_CAPABLE; | 6659 | !(adapter->flags & IXGBE_FLAG_DCB_ENABLED)) |
6660 | adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE; | ||
7567 | need_reset = true; | 6661 | need_reset = true; |
7568 | } | 6662 | } |
7569 | } else if (!(data & NETIF_F_NTUPLE)) { | ||
7570 | /* turn off Flow Director, set ATR and reset */ | ||
7571 | adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE; | 6663 | adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE; |
7572 | if ((adapter->flags & IXGBE_FLAG_RSS_ENABLED) && | 6664 | } else if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) { |
7573 | !(adapter->flags & IXGBE_FLAG_DCB_ENABLED)) | 6665 | /* turn off ATR, enable perfect filters and reset */ |
7574 | adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE; | 6666 | adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; |
6667 | adapter->flags |= IXGBE_FLAG_FDIR_PERFECT_CAPABLE; | ||
7575 | need_reset = true; | 6668 | need_reset = true; |
7576 | } | 6669 | } |
7577 | 6670 | ||
7578 | if (changed & NETIF_F_RXALL) | 6671 | if (changed & NETIF_F_RXALL) |
7579 | need_reset = true; | 6672 | need_reset = true; |
7580 | 6673 | ||
7581 | netdev->features = data; | 6674 | netdev->features = features; |
7582 | if (need_reset) | 6675 | if (need_reset) |
7583 | ixgbe_do_reset(netdev); | 6676 | ixgbe_do_reset(netdev); |
7584 | 6677 | ||
7585 | return 0; | 6678 | return 0; |
7586 | |||
7587 | } | 6679 | } |
7588 | 6680 | ||
7589 | static const struct net_device_ops ixgbe_netdev_ops = { | 6681 | static const struct net_device_ops ixgbe_netdev_ops = { |
@@ -7591,7 +6683,7 @@ static const struct net_device_ops ixgbe_netdev_ops = { | |||
7591 | .ndo_stop = ixgbe_close, | 6683 | .ndo_stop = ixgbe_close, |
7592 | .ndo_start_xmit = ixgbe_xmit_frame, | 6684 | .ndo_start_xmit = ixgbe_xmit_frame, |
7593 | .ndo_select_queue = ixgbe_select_queue, | 6685 | .ndo_select_queue = ixgbe_select_queue, |
7594 | .ndo_set_rx_mode = ixgbe_set_rx_mode, | 6686 | .ndo_set_rx_mode = ixgbe_set_rx_mode, |
7595 | .ndo_validate_addr = eth_validate_addr, | 6687 | .ndo_validate_addr = eth_validate_addr, |
7596 | .ndo_set_mac_address = ixgbe_set_mac, | 6688 | .ndo_set_mac_address = ixgbe_set_mac, |
7597 | .ndo_change_mtu = ixgbe_change_mtu, | 6689 | .ndo_change_mtu = ixgbe_change_mtu, |
@@ -7602,10 +6694,12 @@ static const struct net_device_ops ixgbe_netdev_ops = { | |||
7602 | .ndo_set_vf_mac = ixgbe_ndo_set_vf_mac, | 6694 | .ndo_set_vf_mac = ixgbe_ndo_set_vf_mac, |
7603 | .ndo_set_vf_vlan = ixgbe_ndo_set_vf_vlan, | 6695 | .ndo_set_vf_vlan = ixgbe_ndo_set_vf_vlan, |
7604 | .ndo_set_vf_tx_rate = ixgbe_ndo_set_vf_bw, | 6696 | .ndo_set_vf_tx_rate = ixgbe_ndo_set_vf_bw, |
7605 | .ndo_set_vf_spoofchk = ixgbe_ndo_set_vf_spoofchk, | 6697 | .ndo_set_vf_spoofchk = ixgbe_ndo_set_vf_spoofchk, |
7606 | .ndo_get_vf_config = ixgbe_ndo_get_vf_config, | 6698 | .ndo_get_vf_config = ixgbe_ndo_get_vf_config, |
7607 | .ndo_get_stats64 = ixgbe_get_stats64, | 6699 | .ndo_get_stats64 = ixgbe_get_stats64, |
6700 | #ifdef CONFIG_IXGBE_DCB | ||
7608 | .ndo_setup_tc = ixgbe_setup_tc, | 6701 | .ndo_setup_tc = ixgbe_setup_tc, |
6702 | #endif | ||
7609 | #ifdef CONFIG_NET_POLL_CONTROLLER | 6703 | #ifdef CONFIG_NET_POLL_CONTROLLER |
7610 | .ndo_poll_controller = ixgbe_netpoll, | 6704 | .ndo_poll_controller = ixgbe_netpoll, |
7611 | #endif | 6705 | #endif |
@@ -7623,7 +6717,7 @@ static const struct net_device_ops ixgbe_netdev_ops = { | |||
7623 | }; | 6717 | }; |
7624 | 6718 | ||
7625 | static void __devinit ixgbe_probe_vf(struct ixgbe_adapter *adapter, | 6719 | static void __devinit ixgbe_probe_vf(struct ixgbe_adapter *adapter, |
7626 | const struct ixgbe_info *ii) | 6720 | const struct ixgbe_info *ii) |
7627 | { | 6721 | { |
7628 | #ifdef CONFIG_PCI_IOV | 6722 | #ifdef CONFIG_PCI_IOV |
7629 | struct ixgbe_hw *hw = &adapter->hw; | 6723 | struct ixgbe_hw *hw = &adapter->hw; |
@@ -7904,7 +6998,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, | |||
7904 | if (hw->eeprom.ops.validate_checksum(hw, NULL) < 0) { | 6998 | if (hw->eeprom.ops.validate_checksum(hw, NULL) < 0) { |
7905 | e_dev_err("The EEPROM Checksum Is Not Valid\n"); | 6999 | e_dev_err("The EEPROM Checksum Is Not Valid\n"); |
7906 | err = -EIO; | 7000 | err = -EIO; |
7907 | goto err_eeprom; | 7001 | goto err_sw_init; |
7908 | } | 7002 | } |
7909 | 7003 | ||
7910 | memcpy(netdev->dev_addr, hw->mac.perm_addr, netdev->addr_len); | 7004 | memcpy(netdev->dev_addr, hw->mac.perm_addr, netdev->addr_len); |
@@ -7913,11 +7007,11 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, | |||
7913 | if (ixgbe_validate_mac_addr(netdev->perm_addr)) { | 7007 | if (ixgbe_validate_mac_addr(netdev->perm_addr)) { |
7914 | e_dev_err("invalid MAC address\n"); | 7008 | e_dev_err("invalid MAC address\n"); |
7915 | err = -EIO; | 7009 | err = -EIO; |
7916 | goto err_eeprom; | 7010 | goto err_sw_init; |
7917 | } | 7011 | } |
7918 | 7012 | ||
7919 | setup_timer(&adapter->service_timer, &ixgbe_service_timer, | 7013 | setup_timer(&adapter->service_timer, &ixgbe_service_timer, |
7920 | (unsigned long) adapter); | 7014 | (unsigned long) adapter); |
7921 | 7015 | ||
7922 | INIT_WORK(&adapter->service_task, ixgbe_service_task); | 7016 | INIT_WORK(&adapter->service_task, ixgbe_service_task); |
7923 | clear_bit(__IXGBE_SERVICE_SCHED, &adapter->state); | 7017 | clear_bit(__IXGBE_SERVICE_SCHED, &adapter->state); |
@@ -8005,7 +7099,6 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, | |||
8005 | 7099 | ||
8006 | /* reset the hardware with the new settings */ | 7100 | /* reset the hardware with the new settings */ |
8007 | err = hw->mac.ops.start_hw(hw); | 7101 | err = hw->mac.ops.start_hw(hw); |
8008 | |||
8009 | if (err == IXGBE_ERR_EEPROM_VERSION) { | 7102 | if (err == IXGBE_ERR_EEPROM_VERSION) { |
8010 | /* We are running on a pre-production device, log a warning */ | 7103 | /* We are running on a pre-production device, log a warning */ |
8011 | e_dev_warn("This device is a pre-production adapter/LOM. " | 7104 | e_dev_warn("This device is a pre-production adapter/LOM. " |
@@ -8060,7 +7153,6 @@ err_register: | |||
8060 | ixgbe_release_hw_control(adapter); | 7153 | ixgbe_release_hw_control(adapter); |
8061 | ixgbe_clear_interrupt_scheme(adapter); | 7154 | ixgbe_clear_interrupt_scheme(adapter); |
8062 | err_sw_init: | 7155 | err_sw_init: |
8063 | err_eeprom: | ||
8064 | if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) | 7156 | if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) |
8065 | ixgbe_disable_sriov(adapter); | 7157 | ixgbe_disable_sriov(adapter); |
8066 | adapter->flags2 &= ~IXGBE_FLAG2_SEARCH_FOR_SFP; | 7158 | adapter->flags2 &= ~IXGBE_FLAG2_SEARCH_FOR_SFP; |