diff options
author | Peter P Waskiewicz Jr <peter.p.waskiewicz.jr@intel.com> | 2009-06-04 12:01:43 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2009-06-07 08:20:24 -0400 |
commit | c4cf55e5d2e9353c6054eb0e22fc1d0a9a48f045 (patch) | |
tree | 2b8597403de4807f27d038250e07eefd003e7418 | |
parent | ffff47720318860933b2af84d1912af8b2e621f2 (diff) |
ixgbe: Enable Flow Director hashing in 82599
This patch enables Flow Director's ATR functionality to the main base
driver for 82599.
Signed-off-by: Peter P Waskiewicz Jr <peter.p.waskiewicz.jr@intel.com>
Acked-by: Mallikarjuna R Chilakala <mallikarjuna.chilakala@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | drivers/net/ixgbe/ixgbe.h | 12 | ||||
-rw-r--r-- | drivers/net/ixgbe/ixgbe_ethtool.c | 2 | ||||
-rw-r--r-- | drivers/net/ixgbe/ixgbe_main.c | 215 |
3 files changed, 228 insertions, 1 deletions
diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h index 7adf959e2038..f2206e2a2425 100644 --- a/drivers/net/ixgbe/ixgbe.h +++ b/drivers/net/ixgbe/ixgbe.h | |||
@@ -126,6 +126,8 @@ struct ixgbe_ring { | |||
126 | unsigned int count; /* amount of descriptors */ | 126 | unsigned int count; /* amount of descriptors */ |
127 | unsigned int next_to_use; | 127 | unsigned int next_to_use; |
128 | unsigned int next_to_clean; | 128 | unsigned int next_to_clean; |
129 | u8 atr_sample_rate; | ||
130 | u8 atr_count; | ||
129 | 131 | ||
130 | int queue_index; /* needed for multiqueue queue management */ | 132 | int queue_index; /* needed for multiqueue queue management */ |
131 | union { | 133 | union { |
@@ -148,6 +150,7 @@ struct ixgbe_ring { | |||
148 | int cpu; | 150 | int cpu; |
149 | #endif | 151 | #endif |
150 | struct ixgbe_queue_stats stats; | 152 | struct ixgbe_queue_stats stats; |
153 | unsigned long reinit_state; | ||
151 | 154 | ||
152 | u16 work_limit; /* max work per interrupt */ | 155 | u16 work_limit; /* max work per interrupt */ |
153 | u16 rx_buf_len; | 156 | u16 rx_buf_len; |
@@ -159,6 +162,7 @@ enum ixgbe_ring_f_enum { | |||
159 | RING_F_DCB, | 162 | RING_F_DCB, |
160 | RING_F_VMDQ, | 163 | RING_F_VMDQ, |
161 | RING_F_RSS, | 164 | RING_F_RSS, |
165 | RING_F_FDIR, | ||
162 | #ifdef IXGBE_FCOE | 166 | #ifdef IXGBE_FCOE |
163 | RING_F_FCOE, | 167 | RING_F_FCOE, |
164 | #endif /* IXGBE_FCOE */ | 168 | #endif /* IXGBE_FCOE */ |
@@ -169,6 +173,7 @@ enum ixgbe_ring_f_enum { | |||
169 | #define IXGBE_MAX_DCB_INDICES 8 | 173 | #define IXGBE_MAX_DCB_INDICES 8 |
170 | #define IXGBE_MAX_RSS_INDICES 16 | 174 | #define IXGBE_MAX_RSS_INDICES 16 |
171 | #define IXGBE_MAX_VMDQ_INDICES 16 | 175 | #define IXGBE_MAX_VMDQ_INDICES 16 |
176 | #define IXGBE_MAX_FDIR_INDICES 64 | ||
172 | #ifdef IXGBE_FCOE | 177 | #ifdef IXGBE_FCOE |
173 | #define IXGBE_MAX_FCOE_INDICES 8 | 178 | #define IXGBE_MAX_FCOE_INDICES 8 |
174 | #endif /* IXGBE_FCOE */ | 179 | #endif /* IXGBE_FCOE */ |
@@ -317,6 +322,8 @@ struct ixgbe_adapter { | |||
317 | #define IXGBE_FLAG_IN_WATCHDOG_TASK (u32)(1 << 23) | 322 | #define IXGBE_FLAG_IN_WATCHDOG_TASK (u32)(1 << 23) |
318 | #define IXGBE_FLAG_IN_SFP_LINK_TASK (u32)(1 << 24) | 323 | #define IXGBE_FLAG_IN_SFP_LINK_TASK (u32)(1 << 24) |
319 | #define IXGBE_FLAG_IN_SFP_MOD_TASK (u32)(1 << 25) | 324 | #define IXGBE_FLAG_IN_SFP_MOD_TASK (u32)(1 << 25) |
325 | #define IXGBE_FLAG_FDIR_HASH_CAPABLE (u32)(1 << 26) | ||
326 | #define IXGBE_FLAG_FDIR_PERFECT_CAPABLE (u32)(1 << 27) | ||
320 | #define IXGBE_FLAG_FCOE_ENABLED (u32)(1 << 29) | 327 | #define IXGBE_FLAG_FCOE_ENABLED (u32)(1 << 29) |
321 | 328 | ||
322 | u32 flags2; | 329 | u32 flags2; |
@@ -356,6 +363,10 @@ struct ixgbe_adapter { | |||
356 | struct timer_list sfp_timer; | 363 | struct timer_list sfp_timer; |
357 | struct work_struct multispeed_fiber_task; | 364 | struct work_struct multispeed_fiber_task; |
358 | struct work_struct sfp_config_module_task; | 365 | struct work_struct sfp_config_module_task; |
366 | u32 fdir_pballoc; | ||
367 | u32 atr_sample_rate; | ||
368 | spinlock_t fdir_perfect_lock; | ||
369 | struct work_struct fdir_reinit_task; | ||
359 | #ifdef IXGBE_FCOE | 370 | #ifdef IXGBE_FCOE |
360 | struct ixgbe_fcoe fcoe; | 371 | struct ixgbe_fcoe fcoe; |
361 | #endif /* IXGBE_FCOE */ | 372 | #endif /* IXGBE_FCOE */ |
@@ -368,6 +379,7 @@ enum ixbge_state_t { | |||
368 | __IXGBE_TESTING, | 379 | __IXGBE_TESTING, |
369 | __IXGBE_RESETTING, | 380 | __IXGBE_RESETTING, |
370 | __IXGBE_DOWN, | 381 | __IXGBE_DOWN, |
382 | __IXGBE_FDIR_INIT_DONE, | ||
371 | __IXGBE_SFP_MODULE_NOT_FOUND | 383 | __IXGBE_SFP_MODULE_NOT_FOUND |
372 | }; | 384 | }; |
373 | 385 | ||
diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c index ce9cf7edefb1..86f4f3e36f27 100644 --- a/drivers/net/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ixgbe/ixgbe_ethtool.c | |||
@@ -68,6 +68,8 @@ static struct ixgbe_stats ixgbe_gstrings_stats[] = { | |||
68 | {"rx_crc_errors", IXGBE_STAT(net_stats.rx_crc_errors)}, | 68 | {"rx_crc_errors", IXGBE_STAT(net_stats.rx_crc_errors)}, |
69 | {"rx_frame_errors", IXGBE_STAT(net_stats.rx_frame_errors)}, | 69 | {"rx_frame_errors", IXGBE_STAT(net_stats.rx_frame_errors)}, |
70 | {"hw_rsc_count", IXGBE_STAT(rsc_count)}, | 70 | {"hw_rsc_count", IXGBE_STAT(rsc_count)}, |
71 | {"fdir_match", IXGBE_STAT(stats.fdirmatch)}, | ||
72 | {"fdir_miss", IXGBE_STAT(stats.fdirmiss)}, | ||
71 | {"rx_fifo_errors", IXGBE_STAT(net_stats.rx_fifo_errors)}, | 73 | {"rx_fifo_errors", IXGBE_STAT(net_stats.rx_fifo_errors)}, |
72 | {"rx_missed_errors", IXGBE_STAT(net_stats.rx_missed_errors)}, | 74 | {"rx_missed_errors", IXGBE_STAT(net_stats.rx_missed_errors)}, |
73 | {"tx_aborted_errors", IXGBE_STAT(net_stats.tx_aborted_errors)}, | 75 | {"tx_aborted_errors", IXGBE_STAT(net_stats.tx_aborted_errors)}, |
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index 2553173a16c4..ca7c5d508752 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c | |||
@@ -1123,8 +1123,24 @@ static irqreturn_t ixgbe_msix_lsc(int irq, void *data) | |||
1123 | if (hw->mac.type == ixgbe_mac_82598EB) | 1123 | if (hw->mac.type == ixgbe_mac_82598EB) |
1124 | ixgbe_check_fan_failure(adapter, eicr); | 1124 | ixgbe_check_fan_failure(adapter, eicr); |
1125 | 1125 | ||
1126 | if (hw->mac.type == ixgbe_mac_82599EB) | 1126 | if (hw->mac.type == ixgbe_mac_82599EB) { |
1127 | ixgbe_check_sfp_event(adapter, eicr); | 1127 | ixgbe_check_sfp_event(adapter, eicr); |
1128 | |||
1129 | /* Handle Flow Director Full threshold interrupt */ | ||
1130 | if (eicr & IXGBE_EICR_FLOW_DIR) { | ||
1131 | int i; | ||
1132 | IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_FLOW_DIR); | ||
1133 | /* Disable transmits before FDIR Re-initialization */ | ||
1134 | netif_tx_stop_all_queues(netdev); | ||
1135 | for (i = 0; i < adapter->num_tx_queues; i++) { | ||
1136 | struct ixgbe_ring *tx_ring = | ||
1137 | &adapter->tx_ring[i]; | ||
1138 | if (test_and_clear_bit(__IXGBE_FDIR_INIT_DONE, | ||
1139 | &tx_ring->reinit_state)) | ||
1140 | schedule_work(&adapter->fdir_reinit_task); | ||
1141 | } | ||
1142 | } | ||
1143 | } | ||
1128 | if (!test_bit(__IXGBE_DOWN, &adapter->state)) | 1144 | if (!test_bit(__IXGBE_DOWN, &adapter->state)) |
1129 | IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER); | 1145 | IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER); |
1130 | 1146 | ||
@@ -1623,6 +1639,9 @@ static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter) | |||
1623 | mask |= IXGBE_EIMS_GPI_SDP1; | 1639 | mask |= IXGBE_EIMS_GPI_SDP1; |
1624 | mask |= IXGBE_EIMS_GPI_SDP2; | 1640 | mask |= IXGBE_EIMS_GPI_SDP2; |
1625 | } | 1641 | } |
1642 | if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE || | ||
1643 | adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) | ||
1644 | mask |= IXGBE_EIMS_FLOW_DIR; | ||
1626 | 1645 | ||
1627 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask); | 1646 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask); |
1628 | ixgbe_irq_enable_queues(adapter, ~0); | 1647 | ixgbe_irq_enable_queues(adapter, ~0); |
@@ -2376,6 +2395,7 @@ static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter) | |||
2376 | static void ixgbe_configure(struct ixgbe_adapter *adapter) | 2395 | static void ixgbe_configure(struct ixgbe_adapter *adapter) |
2377 | { | 2396 | { |
2378 | struct net_device *netdev = adapter->netdev; | 2397 | struct net_device *netdev = adapter->netdev; |
2398 | struct ixgbe_hw *hw = &adapter->hw; | ||
2379 | int i; | 2399 | int i; |
2380 | 2400 | ||
2381 | ixgbe_set_rx_mode(netdev); | 2401 | ixgbe_set_rx_mode(netdev); |
@@ -2397,6 +2417,15 @@ static void ixgbe_configure(struct ixgbe_adapter *adapter) | |||
2397 | ixgbe_configure_fcoe(adapter); | 2417 | ixgbe_configure_fcoe(adapter); |
2398 | 2418 | ||
2399 | #endif /* IXGBE_FCOE */ | 2419 | #endif /* IXGBE_FCOE */ |
2420 | if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) { | ||
2421 | for (i = 0; i < adapter->num_tx_queues; i++) | ||
2422 | adapter->tx_ring[i].atr_sample_rate = | ||
2423 | adapter->atr_sample_rate; | ||
2424 | ixgbe_init_fdir_signature_82599(hw, adapter->fdir_pballoc); | ||
2425 | } else if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) { | ||
2426 | ixgbe_init_fdir_perfect_82599(hw, adapter->fdir_pballoc); | ||
2427 | } | ||
2428 | |||
2400 | ixgbe_configure_tx(adapter); | 2429 | ixgbe_configure_tx(adapter); |
2401 | ixgbe_configure_rx(adapter); | 2430 | ixgbe_configure_rx(adapter); |
2402 | for (i = 0; i < adapter->num_rx_queues; i++) | 2431 | for (i = 0; i < adapter->num_rx_queues; i++) |
@@ -2653,6 +2682,10 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter) | |||
2653 | DPRINTK(PROBE, ERR, "link_config FAILED %d\n", err); | 2682 | DPRINTK(PROBE, ERR, "link_config FAILED %d\n", err); |
2654 | } | 2683 | } |
2655 | 2684 | ||
2685 | for (i = 0; i < adapter->num_tx_queues; i++) | ||
2686 | set_bit(__IXGBE_FDIR_INIT_DONE, | ||
2687 | &(adapter->tx_ring[i].reinit_state)); | ||
2688 | |||
2656 | /* enable transmits */ | 2689 | /* enable transmits */ |
2657 | netif_tx_start_all_queues(netdev); | 2690 | netif_tx_start_all_queues(netdev); |
2658 | 2691 | ||
@@ -2848,6 +2881,10 @@ void ixgbe_down(struct ixgbe_adapter *adapter) | |||
2848 | del_timer_sync(&adapter->watchdog_timer); | 2881 | del_timer_sync(&adapter->watchdog_timer); |
2849 | cancel_work_sync(&adapter->watchdog_task); | 2882 | cancel_work_sync(&adapter->watchdog_task); |
2850 | 2883 | ||
2884 | if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE || | ||
2885 | adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) | ||
2886 | cancel_work_sync(&adapter->fdir_reinit_task); | ||
2887 | |||
2851 | /* disable transmits in the hardware now that interrupts are off */ | 2888 | /* disable transmits in the hardware now that interrupts are off */ |
2852 | for (i = 0; i < adapter->num_tx_queues; i++) { | 2889 | for (i = 0; i < adapter->num_tx_queues; i++) { |
2853 | j = adapter->tx_ring[i].reg_idx; | 2890 | j = adapter->tx_ring[i].reg_idx; |
@@ -2982,6 +3019,38 @@ static inline bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter) | |||
2982 | return ret; | 3019 | return ret; |
2983 | } | 3020 | } |
2984 | 3021 | ||
3022 | /** | ||
3023 | * ixgbe_set_fdir_queues: Allocate queues for Flow Director | ||
3024 | * @adapter: board private structure to initialize | ||
3025 | * | ||
3026 | * Flow Director is an advanced Rx filter, attempting to get Rx flows back | ||
3027 | * to the original CPU that initiated the Tx session. This runs in addition | ||
3028 | * to RSS, so if a packet doesn't match an FDIR filter, we can still spread the | ||
3029 | * Rx load across CPUs using RSS. | ||
3030 | * | ||
3031 | **/ | ||
3032 | static bool inline ixgbe_set_fdir_queues(struct ixgbe_adapter *adapter) | ||
3033 | { | ||
3034 | bool ret = false; | ||
3035 | struct ixgbe_ring_feature *f_fdir = &adapter->ring_feature[RING_F_FDIR]; | ||
3036 | |||
3037 | f_fdir->indices = min((int)num_online_cpus(), f_fdir->indices); | ||
3038 | f_fdir->mask = 0; | ||
3039 | |||
3040 | /* Flow Director must have RSS enabled */ | ||
3041 | if (adapter->flags & IXGBE_FLAG_RSS_ENABLED && | ||
3042 | ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE || | ||
3043 | (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)))) { | ||
3044 | adapter->num_tx_queues = f_fdir->indices; | ||
3045 | adapter->num_rx_queues = f_fdir->indices; | ||
3046 | ret = true; | ||
3047 | } else { | ||
3048 | adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; | ||
3049 | adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE; | ||
3050 | } | ||
3051 | return ret; | ||
3052 | } | ||
3053 | |||
2985 | #ifdef IXGBE_FCOE | 3054 | #ifdef IXGBE_FCOE |
2986 | /** | 3055 | /** |
2987 | * ixgbe_set_fcoe_queues: Allocate queues for Fiber Channel over Ethernet (FCoE) | 3056 | * ixgbe_set_fcoe_queues: Allocate queues for Fiber Channel over Ethernet (FCoE) |
@@ -3046,6 +3115,9 @@ static void ixgbe_set_num_queues(struct ixgbe_adapter *adapter) | |||
3046 | goto done; | 3115 | goto done; |
3047 | 3116 | ||
3048 | #endif | 3117 | #endif |
3118 | if (ixgbe_set_fdir_queues(adapter)) | ||
3119 | goto done; | ||
3120 | |||
3049 | if (ixgbe_set_rss_queues(adapter)) | 3121 | if (ixgbe_set_rss_queues(adapter)) |
3050 | goto done; | 3122 | goto done; |
3051 | 3123 | ||
@@ -3216,6 +3288,31 @@ static inline bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter) | |||
3216 | } | 3288 | } |
3217 | #endif | 3289 | #endif |
3218 | 3290 | ||
3291 | /** | ||
3292 | * ixgbe_cache_ring_fdir - Descriptor ring to register mapping for Flow Director | ||
3293 | * @adapter: board private structure to initialize | ||
3294 | * | ||
3295 | * Cache the descriptor ring offsets for Flow Director to the assigned rings. | ||
3296 | * | ||
3297 | **/ | ||
3298 | static bool inline ixgbe_cache_ring_fdir(struct ixgbe_adapter *adapter) | ||
3299 | { | ||
3300 | int i; | ||
3301 | bool ret = false; | ||
3302 | |||
3303 | if (adapter->flags & IXGBE_FLAG_RSS_ENABLED && | ||
3304 | ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) || | ||
3305 | (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))) { | ||
3306 | for (i = 0; i < adapter->num_rx_queues; i++) | ||
3307 | adapter->rx_ring[i].reg_idx = i; | ||
3308 | for (i = 0; i < adapter->num_tx_queues; i++) | ||
3309 | adapter->tx_ring[i].reg_idx = i; | ||
3310 | ret = true; | ||
3311 | } | ||
3312 | |||
3313 | return ret; | ||
3314 | } | ||
3315 | |||
3219 | #ifdef IXGBE_FCOE | 3316 | #ifdef IXGBE_FCOE |
3220 | /** | 3317 | /** |
3221 | * ixgbe_cache_ring_fcoe - Descriptor ring to register mapping for the FCoE | 3318 | * ixgbe_cache_ring_fcoe - Descriptor ring to register mapping for the FCoE |
@@ -3276,6 +3373,9 @@ static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter) | |||
3276 | return; | 3373 | return; |
3277 | 3374 | ||
3278 | #endif | 3375 | #endif |
3376 | if (ixgbe_cache_ring_fdir(adapter)) | ||
3377 | return; | ||
3378 | |||
3279 | if (ixgbe_cache_ring_rss(adapter)) | 3379 | if (ixgbe_cache_ring_rss(adapter)) |
3280 | return; | 3380 | return; |
3281 | } | 3381 | } |
@@ -3369,6 +3469,9 @@ static int ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter) | |||
3369 | 3469 | ||
3370 | adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED; | 3470 | adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED; |
3371 | adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED; | 3471 | adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED; |
3472 | adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; | ||
3473 | adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE; | ||
3474 | adapter->atr_sample_rate = 0; | ||
3372 | ixgbe_set_num_queues(adapter); | 3475 | ixgbe_set_num_queues(adapter); |
3373 | 3476 | ||
3374 | err = pci_enable_msi(adapter->pdev); | 3477 | err = pci_enable_msi(adapter->pdev); |
@@ -3634,6 +3737,11 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter) | |||
3634 | adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82599; | 3737 | adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82599; |
3635 | adapter->flags |= IXGBE_FLAG2_RSC_CAPABLE; | 3738 | adapter->flags |= IXGBE_FLAG2_RSC_CAPABLE; |
3636 | adapter->flags |= IXGBE_FLAG2_RSC_ENABLED; | 3739 | adapter->flags |= IXGBE_FLAG2_RSC_ENABLED; |
3740 | adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE; | ||
3741 | adapter->ring_feature[RING_F_FDIR].indices = | ||
3742 | IXGBE_MAX_FDIR_INDICES; | ||
3743 | adapter->atr_sample_rate = 20; | ||
3744 | adapter->fdir_pballoc = 0; | ||
3637 | #ifdef IXGBE_FCOE | 3745 | #ifdef IXGBE_FCOE |
3638 | adapter->flags |= IXGBE_FLAG_FCOE_ENABLED; | 3746 | adapter->flags |= IXGBE_FLAG_FCOE_ENABLED; |
3639 | adapter->ring_feature[RING_F_FCOE].indices = IXGBE_FCRETA_SIZE; | 3747 | adapter->ring_feature[RING_F_FCOE].indices = IXGBE_FCRETA_SIZE; |
@@ -4223,6 +4331,8 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter) | |||
4223 | IXGBE_READ_REG(hw, IXGBE_TORH); /* to clear */ | 4331 | IXGBE_READ_REG(hw, IXGBE_TORH); /* to clear */ |
4224 | adapter->stats.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT); | 4332 | adapter->stats.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT); |
4225 | adapter->stats.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT); | 4333 | adapter->stats.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT); |
4334 | adapter->stats.fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH); | ||
4335 | adapter->stats.fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS); | ||
4226 | #ifdef IXGBE_FCOE | 4336 | #ifdef IXGBE_FCOE |
4227 | adapter->stats.fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC); | 4337 | adapter->stats.fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC); |
4228 | adapter->stats.fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC); | 4338 | adapter->stats.fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC); |
@@ -4388,6 +4498,30 @@ static void ixgbe_sfp_config_module_task(struct work_struct *work) | |||
4388 | } | 4498 | } |
4389 | 4499 | ||
4390 | /** | 4500 | /** |
4501 | * ixgbe_fdir_reinit_task - worker thread to reinit FDIR filter table | ||
4502 | * @work: pointer to work_struct containing our data | ||
4503 | **/ | ||
4504 | static void ixgbe_fdir_reinit_task(struct work_struct *work) | ||
4505 | { | ||
4506 | struct ixgbe_adapter *adapter = container_of(work, | ||
4507 | struct ixgbe_adapter, | ||
4508 | fdir_reinit_task); | ||
4509 | struct ixgbe_hw *hw = &adapter->hw; | ||
4510 | int i; | ||
4511 | |||
4512 | if (ixgbe_reinit_fdir_tables_82599(hw) == 0) { | ||
4513 | for (i = 0; i < adapter->num_tx_queues; i++) | ||
4514 | set_bit(__IXGBE_FDIR_INIT_DONE, | ||
4515 | &(adapter->tx_ring[i].reinit_state)); | ||
4516 | } else { | ||
4517 | DPRINTK(PROBE, ERR, "failed to finish FDIR re-initialization, " | ||
4518 | "ignored adding FDIR ATR filters \n"); | ||
4519 | } | ||
4520 | /* Done FDIR Re-initialization, enable transmits */ | ||
4521 | netif_tx_start_all_queues(adapter->netdev); | ||
4522 | } | ||
4523 | |||
4524 | /** | ||
4391 | * ixgbe_watchdog_task - worker thread to bring link up | 4525 | * ixgbe_watchdog_task - worker thread to bring link up |
4392 | * @work: pointer to work_struct containing our data | 4526 | * @work: pointer to work_struct containing our data |
4393 | **/ | 4527 | **/ |
@@ -4814,6 +4948,58 @@ static void ixgbe_tx_queue(struct ixgbe_adapter *adapter, | |||
4814 | writel(i, adapter->hw.hw_addr + tx_ring->tail); | 4948 | writel(i, adapter->hw.hw_addr + tx_ring->tail); |
4815 | } | 4949 | } |
4816 | 4950 | ||
4951 | static void ixgbe_atr(struct ixgbe_adapter *adapter, struct sk_buff *skb, | ||
4952 | int queue, u32 tx_flags) | ||
4953 | { | ||
4954 | /* Right now, we support IPv4 only */ | ||
4955 | struct ixgbe_atr_input atr_input; | ||
4956 | struct tcphdr *th; | ||
4957 | struct udphdr *uh; | ||
4958 | struct iphdr *iph = ip_hdr(skb); | ||
4959 | struct ethhdr *eth = (struct ethhdr *)skb->data; | ||
4960 | u16 vlan_id, src_port, dst_port, flex_bytes; | ||
4961 | u32 src_ipv4_addr, dst_ipv4_addr; | ||
4962 | u8 l4type = 0; | ||
4963 | |||
4964 | /* check if we're UDP or TCP */ | ||
4965 | if (iph->protocol == IPPROTO_TCP) { | ||
4966 | th = tcp_hdr(skb); | ||
4967 | src_port = th->source; | ||
4968 | dst_port = th->dest; | ||
4969 | l4type |= IXGBE_ATR_L4TYPE_TCP; | ||
4970 | /* l4type IPv4 type is 0, no need to assign */ | ||
4971 | } else if(iph->protocol == IPPROTO_UDP) { | ||
4972 | uh = udp_hdr(skb); | ||
4973 | src_port = uh->source; | ||
4974 | dst_port = uh->dest; | ||
4975 | l4type |= IXGBE_ATR_L4TYPE_UDP; | ||
4976 | /* l4type IPv4 type is 0, no need to assign */ | ||
4977 | } else { | ||
4978 | /* Unsupported L4 header, just bail here */ | ||
4979 | return; | ||
4980 | } | ||
4981 | |||
4982 | memset(&atr_input, 0, sizeof(struct ixgbe_atr_input)); | ||
4983 | |||
4984 | vlan_id = (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK) >> | ||
4985 | IXGBE_TX_FLAGS_VLAN_SHIFT; | ||
4986 | src_ipv4_addr = iph->saddr; | ||
4987 | dst_ipv4_addr = iph->daddr; | ||
4988 | flex_bytes = eth->h_proto; | ||
4989 | |||
4990 | ixgbe_atr_set_vlan_id_82599(&atr_input, vlan_id); | ||
4991 | ixgbe_atr_set_src_port_82599(&atr_input, dst_port); | ||
4992 | ixgbe_atr_set_dst_port_82599(&atr_input, src_port); | ||
4993 | ixgbe_atr_set_flex_byte_82599(&atr_input, flex_bytes); | ||
4994 | ixgbe_atr_set_l4type_82599(&atr_input, l4type); | ||
4995 | /* src and dst are inverted, think how the receiver sees them */ | ||
4996 | ixgbe_atr_set_src_ipv4_82599(&atr_input, dst_ipv4_addr); | ||
4997 | ixgbe_atr_set_dst_ipv4_82599(&atr_input, src_ipv4_addr); | ||
4998 | |||
4999 | /* This assumes the Rx queue and Tx queue are bound to the same CPU */ | ||
5000 | ixgbe_fdir_add_signature_filter_82599(&adapter->hw, &atr_input, queue); | ||
5001 | } | ||
5002 | |||
4817 | static int __ixgbe_maybe_stop_tx(struct net_device *netdev, | 5003 | static int __ixgbe_maybe_stop_tx(struct net_device *netdev, |
4818 | struct ixgbe_ring *tx_ring, int size) | 5004 | struct ixgbe_ring *tx_ring, int size) |
4819 | { | 5005 | { |
@@ -4848,6 +5034,9 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb) | |||
4848 | { | 5034 | { |
4849 | struct ixgbe_adapter *adapter = netdev_priv(dev); | 5035 | struct ixgbe_adapter *adapter = netdev_priv(dev); |
4850 | 5036 | ||
5037 | if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) | ||
5038 | return smp_processor_id(); | ||
5039 | |||
4851 | if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) | 5040 | if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) |
4852 | return 0; /* All traffic should default to class 0 */ | 5041 | return 0; /* All traffic should default to class 0 */ |
4853 | 5042 | ||
@@ -4932,6 +5121,17 @@ static int ixgbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | |||
4932 | 5121 | ||
4933 | count = ixgbe_tx_map(adapter, tx_ring, skb, tx_flags, first); | 5122 | count = ixgbe_tx_map(adapter, tx_ring, skb, tx_flags, first); |
4934 | if (count) { | 5123 | if (count) { |
5124 | /* add the ATR filter if ATR is on */ | ||
5125 | if (tx_ring->atr_sample_rate) { | ||
5126 | ++tx_ring->atr_count; | ||
5127 | if ((tx_ring->atr_count >= tx_ring->atr_sample_rate) && | ||
5128 | test_bit(__IXGBE_FDIR_INIT_DONE, | ||
5129 | &tx_ring->reinit_state)) { | ||
5130 | ixgbe_atr(adapter, skb, tx_ring->queue_index, | ||
5131 | tx_flags); | ||
5132 | tx_ring->atr_count = 0; | ||
5133 | } | ||
5134 | } | ||
4935 | ixgbe_tx_queue(adapter, tx_ring, tx_flags, count, skb->len, | 5135 | ixgbe_tx_queue(adapter, tx_ring, tx_flags, count, skb->len, |
4936 | hdr_len); | 5136 | hdr_len); |
4937 | ixgbe_maybe_stop_tx(netdev, tx_ring, DESC_NEEDED); | 5137 | ixgbe_maybe_stop_tx(netdev, tx_ring, DESC_NEEDED); |
@@ -5314,6 +5514,12 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, | |||
5314 | netdev->features |= NETIF_F_FCOE_CRC; | 5514 | netdev->features |= NETIF_F_FCOE_CRC; |
5315 | netdev->features |= NETIF_F_FSO; | 5515 | netdev->features |= NETIF_F_FSO; |
5316 | netdev->fcoe_ddp_xid = IXGBE_FCOE_DDP_MAX - 1; | 5516 | netdev->fcoe_ddp_xid = IXGBE_FCOE_DDP_MAX - 1; |
5517 | DPRINTK(DRV, INFO, "FCoE enabled, " | ||
5518 | "disabling Flow Director\n"); | ||
5519 | adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; | ||
5520 | adapter->flags &= | ||
5521 | ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE; | ||
5522 | adapter->atr_sample_rate = 0; | ||
5317 | } else { | 5523 | } else { |
5318 | adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED; | 5524 | adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED; |
5319 | } | 5525 | } |
@@ -5412,6 +5618,10 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, | |||
5412 | /* carrier off reporting is important to ethtool even BEFORE open */ | 5618 | /* carrier off reporting is important to ethtool even BEFORE open */ |
5413 | netif_carrier_off(netdev); | 5619 | netif_carrier_off(netdev); |
5414 | 5620 | ||
5621 | if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE || | ||
5622 | adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) | ||
5623 | INIT_WORK(&adapter->fdir_reinit_task, ixgbe_fdir_reinit_task); | ||
5624 | |||
5415 | #ifdef CONFIG_IXGBE_DCA | 5625 | #ifdef CONFIG_IXGBE_DCA |
5416 | if (dca_add_requester(&pdev->dev) == 0) { | 5626 | if (dca_add_requester(&pdev->dev) == 0) { |
5417 | adapter->flags |= IXGBE_FLAG_DCA_ENABLED; | 5627 | adapter->flags |= IXGBE_FLAG_DCA_ENABLED; |
@@ -5474,6 +5684,9 @@ static void __devexit ixgbe_remove(struct pci_dev *pdev) | |||
5474 | cancel_work_sync(&adapter->sfp_task); | 5684 | cancel_work_sync(&adapter->sfp_task); |
5475 | cancel_work_sync(&adapter->multispeed_fiber_task); | 5685 | cancel_work_sync(&adapter->multispeed_fiber_task); |
5476 | cancel_work_sync(&adapter->sfp_config_module_task); | 5686 | cancel_work_sync(&adapter->sfp_config_module_task); |
5687 | if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE || | ||
5688 | adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) | ||
5689 | cancel_work_sync(&adapter->fdir_reinit_task); | ||
5477 | flush_scheduled_work(); | 5690 | flush_scheduled_work(); |
5478 | 5691 | ||
5479 | #ifdef CONFIG_IXGBE_DCA | 5692 | #ifdef CONFIG_IXGBE_DCA |