diff options
-rw-r--r-- | drivers/net/ethernet/intel/ixgbe/Makefile | 2 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ixgbe/ixgbe.h | 5 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c | 929 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 911 |
4 files changed, 945 insertions, 902 deletions
diff --git a/drivers/net/ethernet/intel/ixgbe/Makefile b/drivers/net/ethernet/intel/ixgbe/Makefile index 7a16177a12a5..8be1d1b2132e 100644 --- a/drivers/net/ethernet/intel/ixgbe/Makefile +++ b/drivers/net/ethernet/intel/ixgbe/Makefile | |||
@@ -34,7 +34,7 @@ obj-$(CONFIG_IXGBE) += ixgbe.o | |||
34 | 34 | ||
35 | ixgbe-objs := ixgbe_main.o ixgbe_common.o ixgbe_ethtool.o \ | 35 | ixgbe-objs := ixgbe_main.o ixgbe_common.o ixgbe_ethtool.o \ |
36 | ixgbe_82599.o ixgbe_82598.o ixgbe_phy.o ixgbe_sriov.o \ | 36 | ixgbe_82599.o ixgbe_82598.o ixgbe_phy.o ixgbe_sriov.o \ |
37 | ixgbe_mbx.o ixgbe_x540.o | 37 | ixgbe_mbx.o ixgbe_x540.o ixgbe_lib.o |
38 | 38 | ||
39 | ixgbe-$(CONFIG_IXGBE_DCB) += ixgbe_dcb.o ixgbe_dcb_82598.o \ | 39 | ixgbe-$(CONFIG_IXGBE_DCB) += ixgbe_dcb.o ixgbe_dcb_82598.o \ |
40 | ixgbe_dcb_82599.o ixgbe_dcb_nl.o | 40 | ixgbe_dcb_82599.o ixgbe_dcb_nl.o |
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h index e0cc3118234e..80e26ff30ebf 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h | |||
@@ -581,7 +581,9 @@ extern int ixgbe_copy_dcb_cfg(struct ixgbe_dcb_config *src_dcb_cfg, | |||
581 | 581 | ||
582 | extern char ixgbe_driver_name[]; | 582 | extern char ixgbe_driver_name[]; |
583 | extern const char ixgbe_driver_version[]; | 583 | extern const char ixgbe_driver_version[]; |
584 | #ifdef IXGBE_FCOE | ||
584 | extern char ixgbe_default_device_descr[]; | 585 | extern char ixgbe_default_device_descr[]; |
586 | #endif /* IXGBE_FCOE */ | ||
585 | 587 | ||
586 | extern void ixgbe_up(struct ixgbe_adapter *adapter); | 588 | extern void ixgbe_up(struct ixgbe_adapter *adapter); |
587 | extern void ixgbe_down(struct ixgbe_adapter *adapter); | 589 | extern void ixgbe_down(struct ixgbe_adapter *adapter); |
@@ -606,6 +608,7 @@ extern void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *, | |||
606 | struct ixgbe_tx_buffer *); | 608 | struct ixgbe_tx_buffer *); |
607 | extern void ixgbe_alloc_rx_buffers(struct ixgbe_ring *, u16); | 609 | extern void ixgbe_alloc_rx_buffers(struct ixgbe_ring *, u16); |
608 | extern void ixgbe_write_eitr(struct ixgbe_q_vector *); | 610 | extern void ixgbe_write_eitr(struct ixgbe_q_vector *); |
611 | extern int ixgbe_poll(struct napi_struct *napi, int budget); | ||
609 | extern int ethtool_ioctl(struct ifreq *ifr); | 612 | extern int ethtool_ioctl(struct ifreq *ifr); |
610 | extern s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw); | 613 | extern s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw); |
611 | extern s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl); | 614 | extern s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl); |
@@ -625,7 +628,9 @@ extern s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw, | |||
625 | extern void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input, | 628 | extern void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input, |
626 | union ixgbe_atr_input *mask); | 629 | union ixgbe_atr_input *mask); |
627 | extern void ixgbe_set_rx_mode(struct net_device *netdev); | 630 | extern void ixgbe_set_rx_mode(struct net_device *netdev); |
631 | #ifdef CONFIG_IXGBE_DCB | ||
628 | extern int ixgbe_setup_tc(struct net_device *dev, u8 tc); | 632 | extern int ixgbe_setup_tc(struct net_device *dev, u8 tc); |
633 | #endif | ||
629 | extern void ixgbe_tx_ctxtdesc(struct ixgbe_ring *, u32, u32, u32, u32); | 634 | extern void ixgbe_tx_ctxtdesc(struct ixgbe_ring *, u32, u32, u32, u32); |
630 | extern void ixgbe_do_reset(struct net_device *netdev); | 635 | extern void ixgbe_do_reset(struct net_device *netdev); |
631 | #ifdef IXGBE_FCOE | 636 | #ifdef IXGBE_FCOE |
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c new file mode 100644 index 000000000000..027d7a75be39 --- /dev/null +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c | |||
@@ -0,0 +1,929 @@ | |||
1 | /******************************************************************************* | ||
2 | |||
3 | Intel 10 Gigabit PCI Express Linux driver | ||
4 | Copyright(c) 1999 - 2012 Intel Corporation. | ||
5 | |||
6 | This program is free software; you can redistribute it and/or modify it | ||
7 | under the terms and conditions of the GNU General Public License, | ||
8 | version 2, as published by the Free Software Foundation. | ||
9 | |||
10 | This program is distributed in the hope it will be useful, but WITHOUT | ||
11 | ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
12 | FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
13 | more details. | ||
14 | |||
15 | You should have received a copy of the GNU General Public License along with | ||
16 | this program; if not, write to the Free Software Foundation, Inc., | ||
17 | 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | ||
18 | |||
19 | The full GNU General Public License is included in this distribution in | ||
20 | the file called "COPYING". | ||
21 | |||
22 | Contact Information: | ||
23 | e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> | ||
24 | Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 | ||
25 | |||
26 | *******************************************************************************/ | ||
27 | |||
28 | #include "ixgbe.h" | ||
29 | #include "ixgbe_sriov.h" | ||
30 | |||
31 | /** | ||
32 | * ixgbe_cache_ring_rss - Descriptor ring to register mapping for RSS | ||
33 | * @adapter: board private structure to initialize | ||
34 | * | ||
35 | * Cache the descriptor ring offsets for RSS to the assigned rings. | ||
36 | * | ||
37 | **/ | ||
38 | static inline bool ixgbe_cache_ring_rss(struct ixgbe_adapter *adapter) | ||
39 | { | ||
40 | int i; | ||
41 | |||
42 | if (!(adapter->flags & IXGBE_FLAG_RSS_ENABLED)) | ||
43 | return false; | ||
44 | |||
45 | for (i = 0; i < adapter->num_rx_queues; i++) | ||
46 | adapter->rx_ring[i]->reg_idx = i; | ||
47 | for (i = 0; i < adapter->num_tx_queues; i++) | ||
48 | adapter->tx_ring[i]->reg_idx = i; | ||
49 | |||
50 | return true; | ||
51 | } | ||
52 | #ifdef CONFIG_IXGBE_DCB | ||
53 | |||
54 | /* ixgbe_get_first_reg_idx - Return first register index associated with ring */ | ||
55 | static void ixgbe_get_first_reg_idx(struct ixgbe_adapter *adapter, u8 tc, | ||
56 | unsigned int *tx, unsigned int *rx) | ||
57 | { | ||
58 | struct net_device *dev = adapter->netdev; | ||
59 | struct ixgbe_hw *hw = &adapter->hw; | ||
60 | u8 num_tcs = netdev_get_num_tc(dev); | ||
61 | |||
62 | *tx = 0; | ||
63 | *rx = 0; | ||
64 | |||
65 | switch (hw->mac.type) { | ||
66 | case ixgbe_mac_82598EB: | ||
67 | *tx = tc << 2; | ||
68 | *rx = tc << 3; | ||
69 | break; | ||
70 | case ixgbe_mac_82599EB: | ||
71 | case ixgbe_mac_X540: | ||
72 | if (num_tcs > 4) { | ||
73 | if (tc < 3) { | ||
74 | *tx = tc << 5; | ||
75 | *rx = tc << 4; | ||
76 | } else if (tc < 5) { | ||
77 | *tx = ((tc + 2) << 4); | ||
78 | *rx = tc << 4; | ||
79 | } else if (tc < num_tcs) { | ||
80 | *tx = ((tc + 8) << 3); | ||
81 | *rx = tc << 4; | ||
82 | } | ||
83 | } else { | ||
84 | *rx = tc << 5; | ||
85 | switch (tc) { | ||
86 | case 0: | ||
87 | *tx = 0; | ||
88 | break; | ||
89 | case 1: | ||
90 | *tx = 64; | ||
91 | break; | ||
92 | case 2: | ||
93 | *tx = 96; | ||
94 | break; | ||
95 | case 3: | ||
96 | *tx = 112; | ||
97 | break; | ||
98 | default: | ||
99 | break; | ||
100 | } | ||
101 | } | ||
102 | break; | ||
103 | default: | ||
104 | break; | ||
105 | } | ||
106 | } | ||
107 | |||
108 | /** | ||
109 | * ixgbe_cache_ring_dcb - Descriptor ring to register mapping for DCB | ||
110 | * @adapter: board private structure to initialize | ||
111 | * | ||
112 | * Cache the descriptor ring offsets for DCB to the assigned rings. | ||
113 | * | ||
114 | **/ | ||
115 | static inline bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter) | ||
116 | { | ||
117 | struct net_device *dev = adapter->netdev; | ||
118 | int i, j, k; | ||
119 | u8 num_tcs = netdev_get_num_tc(dev); | ||
120 | |||
121 | if (!num_tcs) | ||
122 | return false; | ||
123 | |||
124 | for (i = 0, k = 0; i < num_tcs; i++) { | ||
125 | unsigned int tx_s, rx_s; | ||
126 | u16 count = dev->tc_to_txq[i].count; | ||
127 | |||
128 | ixgbe_get_first_reg_idx(adapter, i, &tx_s, &rx_s); | ||
129 | for (j = 0; j < count; j++, k++) { | ||
130 | adapter->tx_ring[k]->reg_idx = tx_s + j; | ||
131 | adapter->rx_ring[k]->reg_idx = rx_s + j; | ||
132 | adapter->tx_ring[k]->dcb_tc = i; | ||
133 | adapter->rx_ring[k]->dcb_tc = i; | ||
134 | } | ||
135 | } | ||
136 | |||
137 | return true; | ||
138 | } | ||
139 | #endif | ||
140 | |||
141 | /** | ||
142 | * ixgbe_cache_ring_fdir - Descriptor ring to register mapping for Flow Director | ||
143 | * @adapter: board private structure to initialize | ||
144 | * | ||
145 | * Cache the descriptor ring offsets for Flow Director to the assigned rings. | ||
146 | * | ||
147 | **/ | ||
148 | static inline bool ixgbe_cache_ring_fdir(struct ixgbe_adapter *adapter) | ||
149 | { | ||
150 | int i; | ||
151 | bool ret = false; | ||
152 | |||
153 | if ((adapter->flags & IXGBE_FLAG_RSS_ENABLED) && | ||
154 | (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE)) { | ||
155 | for (i = 0; i < adapter->num_rx_queues; i++) | ||
156 | adapter->rx_ring[i]->reg_idx = i; | ||
157 | for (i = 0; i < adapter->num_tx_queues; i++) | ||
158 | adapter->tx_ring[i]->reg_idx = i; | ||
159 | ret = true; | ||
160 | } | ||
161 | |||
162 | return ret; | ||
163 | } | ||
164 | |||
165 | #ifdef IXGBE_FCOE | ||
166 | /** | ||
167 | * ixgbe_cache_ring_fcoe - Descriptor ring to register mapping for the FCoE | ||
168 | * @adapter: board private structure to initialize | ||
169 | * | ||
170 | * Cache the descriptor ring offsets for FCoE mode to the assigned rings. | ||
171 | * | ||
172 | */ | ||
173 | static inline bool ixgbe_cache_ring_fcoe(struct ixgbe_adapter *adapter) | ||
174 | { | ||
175 | struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE]; | ||
176 | int i; | ||
177 | u8 fcoe_rx_i = 0, fcoe_tx_i = 0; | ||
178 | |||
179 | if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) | ||
180 | return false; | ||
181 | |||
182 | if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) { | ||
183 | if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) | ||
184 | ixgbe_cache_ring_fdir(adapter); | ||
185 | else | ||
186 | ixgbe_cache_ring_rss(adapter); | ||
187 | |||
188 | fcoe_rx_i = f->mask; | ||
189 | fcoe_tx_i = f->mask; | ||
190 | } | ||
191 | for (i = 0; i < f->indices; i++, fcoe_rx_i++, fcoe_tx_i++) { | ||
192 | adapter->rx_ring[f->mask + i]->reg_idx = fcoe_rx_i; | ||
193 | adapter->tx_ring[f->mask + i]->reg_idx = fcoe_tx_i; | ||
194 | } | ||
195 | return true; | ||
196 | } | ||
197 | |||
198 | #endif /* IXGBE_FCOE */ | ||
199 | /** | ||
200 | * ixgbe_cache_ring_sriov - Descriptor ring to register mapping for sriov | ||
201 | * @adapter: board private structure to initialize | ||
202 | * | ||
203 | * SR-IOV doesn't use any descriptor rings but changes the default if | ||
204 | * no other mapping is used. | ||
205 | * | ||
206 | */ | ||
207 | static inline bool ixgbe_cache_ring_sriov(struct ixgbe_adapter *adapter) | ||
208 | { | ||
209 | adapter->rx_ring[0]->reg_idx = adapter->num_vfs * 2; | ||
210 | adapter->tx_ring[0]->reg_idx = adapter->num_vfs * 2; | ||
211 | if (adapter->num_vfs) | ||
212 | return true; | ||
213 | else | ||
214 | return false; | ||
215 | } | ||
216 | |||
217 | /** | ||
218 | * ixgbe_cache_ring_register - Descriptor ring to register mapping | ||
219 | * @adapter: board private structure to initialize | ||
220 | * | ||
221 | * Once we know the feature-set enabled for the device, we'll cache | ||
222 | * the register offset the descriptor ring is assigned to. | ||
223 | * | ||
224 | * Note, the order the various feature calls is important. It must start with | ||
225 | * the "most" features enabled at the same time, then trickle down to the | ||
226 | * least amount of features turned on at once. | ||
227 | **/ | ||
228 | static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter) | ||
229 | { | ||
230 | /* start with default case */ | ||
231 | adapter->rx_ring[0]->reg_idx = 0; | ||
232 | adapter->tx_ring[0]->reg_idx = 0; | ||
233 | |||
234 | if (ixgbe_cache_ring_sriov(adapter)) | ||
235 | return; | ||
236 | |||
237 | #ifdef CONFIG_IXGBE_DCB | ||
238 | if (ixgbe_cache_ring_dcb(adapter)) | ||
239 | return; | ||
240 | #endif | ||
241 | |||
242 | #ifdef IXGBE_FCOE | ||
243 | if (ixgbe_cache_ring_fcoe(adapter)) | ||
244 | return; | ||
245 | #endif /* IXGBE_FCOE */ | ||
246 | |||
247 | if (ixgbe_cache_ring_fdir(adapter)) | ||
248 | return; | ||
249 | |||
250 | if (ixgbe_cache_ring_rss(adapter)) | ||
251 | return; | ||
252 | } | ||
253 | |||
254 | /** | ||
255 | * ixgbe_set_sriov_queues: Allocate queues for IOV use | ||
256 | * @adapter: board private structure to initialize | ||
257 | * | ||
258 | * IOV doesn't actually use anything, so just NAK the | ||
259 | * request for now and let the other queue routines | ||
260 | * figure out what to do. | ||
261 | */ | ||
262 | static inline bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter) | ||
263 | { | ||
264 | return false; | ||
265 | } | ||
266 | |||
267 | /** | ||
268 | * ixgbe_set_rss_queues: Allocate queues for RSS | ||
269 | * @adapter: board private structure to initialize | ||
270 | * | ||
271 | * This is our "base" multiqueue mode. RSS (Receive Side Scaling) will try | ||
272 | * to allocate one Rx queue per CPU, and if available, one Tx queue per CPU. | ||
273 | * | ||
274 | **/ | ||
275 | static inline bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter) | ||
276 | { | ||
277 | bool ret = false; | ||
278 | struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_RSS]; | ||
279 | |||
280 | if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) { | ||
281 | f->mask = 0xF; | ||
282 | adapter->num_rx_queues = f->indices; | ||
283 | adapter->num_tx_queues = f->indices; | ||
284 | ret = true; | ||
285 | } | ||
286 | |||
287 | return ret; | ||
288 | } | ||
289 | |||
290 | /** | ||
291 | * ixgbe_set_fdir_queues: Allocate queues for Flow Director | ||
292 | * @adapter: board private structure to initialize | ||
293 | * | ||
294 | * Flow Director is an advanced Rx filter, attempting to get Rx flows back | ||
295 | * to the original CPU that initiated the Tx session. This runs in addition | ||
296 | * to RSS, so if a packet doesn't match an FDIR filter, we can still spread the | ||
297 | * Rx load across CPUs using RSS. | ||
298 | * | ||
299 | **/ | ||
300 | static inline bool ixgbe_set_fdir_queues(struct ixgbe_adapter *adapter) | ||
301 | { | ||
302 | bool ret = false; | ||
303 | struct ixgbe_ring_feature *f_fdir = &adapter->ring_feature[RING_F_FDIR]; | ||
304 | |||
305 | f_fdir->indices = min_t(int, num_online_cpus(), f_fdir->indices); | ||
306 | f_fdir->mask = 0; | ||
307 | |||
308 | /* | ||
309 | * Use RSS in addition to Flow Director to ensure the best | ||
310 | * distribution of flows across cores, even when an FDIR flow | ||
311 | * isn't matched. | ||
312 | */ | ||
313 | if ((adapter->flags & IXGBE_FLAG_RSS_ENABLED) && | ||
314 | (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE)) { | ||
315 | adapter->num_tx_queues = f_fdir->indices; | ||
316 | adapter->num_rx_queues = f_fdir->indices; | ||
317 | ret = true; | ||
318 | } else { | ||
319 | adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; | ||
320 | } | ||
321 | return ret; | ||
322 | } | ||
323 | |||
324 | #ifdef IXGBE_FCOE | ||
325 | /** | ||
326 | * ixgbe_set_fcoe_queues: Allocate queues for Fiber Channel over Ethernet (FCoE) | ||
327 | * @adapter: board private structure to initialize | ||
328 | * | ||
329 | * FCoE RX FCRETA can use up to 8 rx queues for up to 8 different exchanges. | ||
330 | * The ring feature mask is not used as a mask for FCoE, as it can take any 8 | ||
331 | * rx queues out of the max number of rx queues, instead, it is used as the | ||
332 | * index of the first rx queue used by FCoE. | ||
333 | * | ||
334 | **/ | ||
335 | static inline bool ixgbe_set_fcoe_queues(struct ixgbe_adapter *adapter) | ||
336 | { | ||
337 | struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE]; | ||
338 | |||
339 | if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) | ||
340 | return false; | ||
341 | |||
342 | f->indices = min_t(int, num_online_cpus(), f->indices); | ||
343 | |||
344 | adapter->num_rx_queues = 1; | ||
345 | adapter->num_tx_queues = 1; | ||
346 | |||
347 | if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) { | ||
348 | e_info(probe, "FCoE enabled with RSS\n"); | ||
349 | if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) | ||
350 | ixgbe_set_fdir_queues(adapter); | ||
351 | else | ||
352 | ixgbe_set_rss_queues(adapter); | ||
353 | } | ||
354 | |||
355 | /* adding FCoE rx rings to the end */ | ||
356 | f->mask = adapter->num_rx_queues; | ||
357 | adapter->num_rx_queues += f->indices; | ||
358 | adapter->num_tx_queues += f->indices; | ||
359 | |||
360 | return true; | ||
361 | } | ||
362 | #endif /* IXGBE_FCOE */ | ||
363 | |||
364 | /* Artificial max queue cap per traffic class in DCB mode */ | ||
365 | #define DCB_QUEUE_CAP 8 | ||
366 | |||
367 | #ifdef CONFIG_IXGBE_DCB | ||
368 | static inline bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter) | ||
369 | { | ||
370 | int per_tc_q, q, i, offset = 0; | ||
371 | struct net_device *dev = adapter->netdev; | ||
372 | int tcs = netdev_get_num_tc(dev); | ||
373 | |||
374 | if (!tcs) | ||
375 | return false; | ||
376 | |||
377 | /* Map queue offset and counts onto allocated tx queues */ | ||
378 | per_tc_q = min_t(unsigned int, dev->num_tx_queues / tcs, DCB_QUEUE_CAP); | ||
379 | q = min_t(int, num_online_cpus(), per_tc_q); | ||
380 | |||
381 | for (i = 0; i < tcs; i++) { | ||
382 | netdev_set_tc_queue(dev, i, q, offset); | ||
383 | offset += q; | ||
384 | } | ||
385 | |||
386 | adapter->num_tx_queues = q * tcs; | ||
387 | adapter->num_rx_queues = q * tcs; | ||
388 | |||
389 | #ifdef IXGBE_FCOE | ||
390 | /* FCoE enabled queues require special configuration indexed | ||
391 | * by feature specific indices and mask. Here we map FCoE | ||
392 | * indices onto the DCB queue pairs allowing FCoE to own | ||
393 | * configuration later. | ||
394 | */ | ||
395 | if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) { | ||
396 | u8 prio_tc[MAX_USER_PRIORITY] = {0}; | ||
397 | int tc; | ||
398 | struct ixgbe_ring_feature *f = | ||
399 | &adapter->ring_feature[RING_F_FCOE]; | ||
400 | |||
401 | ixgbe_dcb_unpack_map(&adapter->dcb_cfg, DCB_TX_CONFIG, prio_tc); | ||
402 | tc = prio_tc[adapter->fcoe.up]; | ||
403 | f->indices = dev->tc_to_txq[tc].count; | ||
404 | f->mask = dev->tc_to_txq[tc].offset; | ||
405 | } | ||
406 | #endif | ||
407 | |||
408 | return true; | ||
409 | } | ||
410 | #endif | ||
411 | |||
412 | /** | ||
413 | * ixgbe_set_num_queues: Allocate queues for device, feature dependent | ||
414 | * @adapter: board private structure to initialize | ||
415 | * | ||
416 | * This is the top level queue allocation routine. The order here is very | ||
417 | * important, starting with the "most" number of features turned on at once, | ||
418 | * and ending with the smallest set of features. This way large combinations | ||
419 | * can be allocated if they're turned on, and smaller combinations are the | ||
420 | * fallthrough conditions. | ||
421 | * | ||
422 | **/ | ||
423 | static int ixgbe_set_num_queues(struct ixgbe_adapter *adapter) | ||
424 | { | ||
425 | /* Start with base case */ | ||
426 | adapter->num_rx_queues = 1; | ||
427 | adapter->num_tx_queues = 1; | ||
428 | adapter->num_rx_pools = adapter->num_rx_queues; | ||
429 | adapter->num_rx_queues_per_pool = 1; | ||
430 | |||
431 | if (ixgbe_set_sriov_queues(adapter)) | ||
432 | goto done; | ||
433 | |||
434 | #ifdef CONFIG_IXGBE_DCB | ||
435 | if (ixgbe_set_dcb_queues(adapter)) | ||
436 | goto done; | ||
437 | |||
438 | #endif | ||
439 | #ifdef IXGBE_FCOE | ||
440 | if (ixgbe_set_fcoe_queues(adapter)) | ||
441 | goto done; | ||
442 | |||
443 | #endif /* IXGBE_FCOE */ | ||
444 | if (ixgbe_set_fdir_queues(adapter)) | ||
445 | goto done; | ||
446 | |||
447 | if (ixgbe_set_rss_queues(adapter)) | ||
448 | goto done; | ||
449 | |||
450 | /* fallback to base case */ | ||
451 | adapter->num_rx_queues = 1; | ||
452 | adapter->num_tx_queues = 1; | ||
453 | |||
454 | done: | ||
455 | if ((adapter->netdev->reg_state == NETREG_UNREGISTERED) || | ||
456 | (adapter->netdev->reg_state == NETREG_UNREGISTERING)) | ||
457 | return 0; | ||
458 | |||
459 | /* Notify the stack of the (possibly) reduced queue counts. */ | ||
460 | netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues); | ||
461 | return netif_set_real_num_rx_queues(adapter->netdev, | ||
462 | adapter->num_rx_queues); | ||
463 | } | ||
464 | |||
465 | static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter, | ||
466 | int vectors) | ||
467 | { | ||
468 | int err, vector_threshold; | ||
469 | |||
470 | /* We'll want at least 2 (vector_threshold): | ||
471 | * 1) TxQ[0] + RxQ[0] handler | ||
472 | * 2) Other (Link Status Change, etc.) | ||
473 | */ | ||
474 | vector_threshold = MIN_MSIX_COUNT; | ||
475 | |||
476 | /* | ||
477 | * The more we get, the more we will assign to Tx/Rx Cleanup | ||
478 | * for the separate queues...where Rx Cleanup >= Tx Cleanup. | ||
479 | * Right now, we simply care about how many we'll get; we'll | ||
480 | * set them up later while requesting irq's. | ||
481 | */ | ||
482 | while (vectors >= vector_threshold) { | ||
483 | err = pci_enable_msix(adapter->pdev, adapter->msix_entries, | ||
484 | vectors); | ||
485 | if (!err) /* Success in acquiring all requested vectors. */ | ||
486 | break; | ||
487 | else if (err < 0) | ||
488 | vectors = 0; /* Nasty failure, quit now */ | ||
489 | else /* err == number of vectors we should try again with */ | ||
490 | vectors = err; | ||
491 | } | ||
492 | |||
493 | if (vectors < vector_threshold) { | ||
494 | /* Can't allocate enough MSI-X interrupts? Oh well. | ||
495 | * This just means we'll go with either a single MSI | ||
496 | * vector or fall back to legacy interrupts. | ||
497 | */ | ||
498 | netif_printk(adapter, hw, KERN_DEBUG, adapter->netdev, | ||
499 | "Unable to allocate MSI-X interrupts\n"); | ||
500 | adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED; | ||
501 | kfree(adapter->msix_entries); | ||
502 | adapter->msix_entries = NULL; | ||
503 | } else { | ||
504 | adapter->flags |= IXGBE_FLAG_MSIX_ENABLED; /* Woot! */ | ||
505 | /* | ||
506 | * Adjust for only the vectors we'll use, which is minimum | ||
507 | * of max_msix_q_vectors + NON_Q_VECTORS, or the number of | ||
508 | * vectors we were allocated. | ||
509 | */ | ||
510 | adapter->num_msix_vectors = min(vectors, | ||
511 | adapter->max_msix_q_vectors + NON_Q_VECTORS); | ||
512 | } | ||
513 | } | ||
514 | |||
515 | static void ixgbe_add_ring(struct ixgbe_ring *ring, | ||
516 | struct ixgbe_ring_container *head) | ||
517 | { | ||
518 | ring->next = head->ring; | ||
519 | head->ring = ring; | ||
520 | head->count++; | ||
521 | } | ||
522 | |||
523 | /** | ||
524 | * ixgbe_alloc_q_vector - Allocate memory for a single interrupt vector | ||
525 | * @adapter: board private structure to initialize | ||
526 | * @v_idx: index of vector in adapter struct | ||
527 | * | ||
528 | * We allocate one q_vector. If allocation fails we return -ENOMEM. | ||
529 | **/ | ||
530 | static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter, int v_idx, | ||
531 | int txr_count, int txr_idx, | ||
532 | int rxr_count, int rxr_idx) | ||
533 | { | ||
534 | struct ixgbe_q_vector *q_vector; | ||
535 | struct ixgbe_ring *ring; | ||
536 | int node = -1; | ||
537 | int cpu = -1; | ||
538 | int ring_count, size; | ||
539 | |||
540 | ring_count = txr_count + rxr_count; | ||
541 | size = sizeof(struct ixgbe_q_vector) + | ||
542 | (sizeof(struct ixgbe_ring) * ring_count); | ||
543 | |||
544 | /* customize cpu for Flow Director mapping */ | ||
545 | if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) { | ||
546 | if (cpu_online(v_idx)) { | ||
547 | cpu = v_idx; | ||
548 | node = cpu_to_node(cpu); | ||
549 | } | ||
550 | } | ||
551 | |||
552 | /* allocate q_vector and rings */ | ||
553 | q_vector = kzalloc_node(size, GFP_KERNEL, node); | ||
554 | if (!q_vector) | ||
555 | q_vector = kzalloc(size, GFP_KERNEL); | ||
556 | if (!q_vector) | ||
557 | return -ENOMEM; | ||
558 | |||
559 | /* setup affinity mask and node */ | ||
560 | if (cpu != -1) | ||
561 | cpumask_set_cpu(cpu, &q_vector->affinity_mask); | ||
562 | else | ||
563 | cpumask_copy(&q_vector->affinity_mask, cpu_online_mask); | ||
564 | q_vector->numa_node = node; | ||
565 | |||
566 | /* initialize NAPI */ | ||
567 | netif_napi_add(adapter->netdev, &q_vector->napi, | ||
568 | ixgbe_poll, 64); | ||
569 | |||
570 | /* tie q_vector and adapter together */ | ||
571 | adapter->q_vector[v_idx] = q_vector; | ||
572 | q_vector->adapter = adapter; | ||
573 | q_vector->v_idx = v_idx; | ||
574 | |||
575 | /* initialize work limits */ | ||
576 | q_vector->tx.work_limit = adapter->tx_work_limit; | ||
577 | |||
578 | /* initialize pointer to rings */ | ||
579 | ring = q_vector->ring; | ||
580 | |||
581 | while (txr_count) { | ||
582 | /* assign generic ring traits */ | ||
583 | ring->dev = &adapter->pdev->dev; | ||
584 | ring->netdev = adapter->netdev; | ||
585 | |||
586 | /* configure backlink on ring */ | ||
587 | ring->q_vector = q_vector; | ||
588 | |||
589 | /* update q_vector Tx values */ | ||
590 | ixgbe_add_ring(ring, &q_vector->tx); | ||
591 | |||
592 | /* apply Tx specific ring traits */ | ||
593 | ring->count = adapter->tx_ring_count; | ||
594 | ring->queue_index = txr_idx; | ||
595 | |||
596 | /* assign ring to adapter */ | ||
597 | adapter->tx_ring[txr_idx] = ring; | ||
598 | |||
599 | /* update count and index */ | ||
600 | txr_count--; | ||
601 | txr_idx++; | ||
602 | |||
603 | /* push pointer to next ring */ | ||
604 | ring++; | ||
605 | } | ||
606 | |||
607 | while (rxr_count) { | ||
608 | /* assign generic ring traits */ | ||
609 | ring->dev = &adapter->pdev->dev; | ||
610 | ring->netdev = adapter->netdev; | ||
611 | |||
612 | /* configure backlink on ring */ | ||
613 | ring->q_vector = q_vector; | ||
614 | |||
615 | /* update q_vector Rx values */ | ||
616 | ixgbe_add_ring(ring, &q_vector->rx); | ||
617 | |||
618 | /* | ||
619 | * 82599 errata, UDP frames with a 0 checksum | ||
620 | * can be marked as checksum errors. | ||
621 | */ | ||
622 | if (adapter->hw.mac.type == ixgbe_mac_82599EB) | ||
623 | set_bit(__IXGBE_RX_CSUM_UDP_ZERO_ERR, &ring->state); | ||
624 | |||
625 | /* apply Rx specific ring traits */ | ||
626 | ring->count = adapter->rx_ring_count; | ||
627 | ring->queue_index = rxr_idx; | ||
628 | |||
629 | /* assign ring to adapter */ | ||
630 | adapter->rx_ring[rxr_idx] = ring; | ||
631 | |||
632 | /* update count and index */ | ||
633 | rxr_count--; | ||
634 | rxr_idx++; | ||
635 | |||
636 | /* push pointer to next ring */ | ||
637 | ring++; | ||
638 | } | ||
639 | |||
640 | return 0; | ||
641 | } | ||
642 | |||
643 | /** | ||
644 | * ixgbe_free_q_vector - Free memory allocated for specific interrupt vector | ||
645 | * @adapter: board private structure to initialize | ||
646 | * @v_idx: Index of vector to be freed | ||
647 | * | ||
648 | * This function frees the memory allocated to the q_vector. In addition if | ||
649 | * NAPI is enabled it will delete any references to the NAPI struct prior | ||
650 | * to freeing the q_vector. | ||
651 | **/ | ||
652 | static void ixgbe_free_q_vector(struct ixgbe_adapter *adapter, int v_idx) | ||
653 | { | ||
654 | struct ixgbe_q_vector *q_vector = adapter->q_vector[v_idx]; | ||
655 | struct ixgbe_ring *ring; | ||
656 | |||
657 | ixgbe_for_each_ring(ring, q_vector->tx) | ||
658 | adapter->tx_ring[ring->queue_index] = NULL; | ||
659 | |||
660 | ixgbe_for_each_ring(ring, q_vector->rx) | ||
661 | adapter->rx_ring[ring->queue_index] = NULL; | ||
662 | |||
663 | adapter->q_vector[v_idx] = NULL; | ||
664 | netif_napi_del(&q_vector->napi); | ||
665 | |||
666 | /* | ||
667 | * ixgbe_get_stats64() might access the rings on this vector, | ||
668 | * we must wait a grace period before freeing it. | ||
669 | */ | ||
670 | kfree_rcu(q_vector, rcu); | ||
671 | } | ||
672 | |||
673 | /** | ||
674 | * ixgbe_alloc_q_vectors - Allocate memory for interrupt vectors | ||
675 | * @adapter: board private structure to initialize | ||
676 | * | ||
677 | * We allocate one q_vector per queue interrupt. If allocation fails we | ||
678 | * return -ENOMEM. | ||
679 | **/ | ||
680 | static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter) | ||
681 | { | ||
682 | int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; | ||
683 | int rxr_remaining = adapter->num_rx_queues; | ||
684 | int txr_remaining = adapter->num_tx_queues; | ||
685 | int rxr_idx = 0, txr_idx = 0, v_idx = 0; | ||
686 | int err; | ||
687 | |||
688 | /* only one q_vector if MSI-X is disabled. */ | ||
689 | if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) | ||
690 | q_vectors = 1; | ||
691 | |||
692 | if (q_vectors >= (rxr_remaining + txr_remaining)) { | ||
693 | for (; rxr_remaining; v_idx++, q_vectors--) { | ||
694 | int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors); | ||
695 | err = ixgbe_alloc_q_vector(adapter, v_idx, | ||
696 | 0, 0, rqpv, rxr_idx); | ||
697 | |||
698 | if (err) | ||
699 | goto err_out; | ||
700 | |||
701 | /* update counts and index */ | ||
702 | rxr_remaining -= rqpv; | ||
703 | rxr_idx += rqpv; | ||
704 | } | ||
705 | } | ||
706 | |||
707 | for (; q_vectors; v_idx++, q_vectors--) { | ||
708 | int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors); | ||
709 | int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors); | ||
710 | err = ixgbe_alloc_q_vector(adapter, v_idx, | ||
711 | tqpv, txr_idx, | ||
712 | rqpv, rxr_idx); | ||
713 | |||
714 | if (err) | ||
715 | goto err_out; | ||
716 | |||
717 | /* update counts and index */ | ||
718 | rxr_remaining -= rqpv; | ||
719 | rxr_idx += rqpv; | ||
720 | txr_remaining -= tqpv; | ||
721 | txr_idx += tqpv; | ||
722 | } | ||
723 | |||
724 | return 0; | ||
725 | |||
726 | err_out: | ||
727 | while (v_idx) { | ||
728 | v_idx--; | ||
729 | ixgbe_free_q_vector(adapter, v_idx); | ||
730 | } | ||
731 | |||
732 | return -ENOMEM; | ||
733 | } | ||
734 | |||
735 | /** | ||
736 | * ixgbe_free_q_vectors - Free memory allocated for interrupt vectors | ||
737 | * @adapter: board private structure to initialize | ||
738 | * | ||
739 | * This function frees the memory allocated to the q_vectors. In addition if | ||
740 | * NAPI is enabled it will delete any references to the NAPI struct prior | ||
741 | * to freeing the q_vector. | ||
742 | **/ | ||
743 | static void ixgbe_free_q_vectors(struct ixgbe_adapter *adapter) | ||
744 | { | ||
745 | int v_idx, q_vectors; | ||
746 | |||
747 | if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) | ||
748 | q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; | ||
749 | else | ||
750 | q_vectors = 1; | ||
751 | |||
752 | for (v_idx = 0; v_idx < q_vectors; v_idx++) | ||
753 | ixgbe_free_q_vector(adapter, v_idx); | ||
754 | } | ||
755 | |||
756 | static void ixgbe_reset_interrupt_capability(struct ixgbe_adapter *adapter) | ||
757 | { | ||
758 | if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { | ||
759 | adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED; | ||
760 | pci_disable_msix(adapter->pdev); | ||
761 | kfree(adapter->msix_entries); | ||
762 | adapter->msix_entries = NULL; | ||
763 | } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) { | ||
764 | adapter->flags &= ~IXGBE_FLAG_MSI_ENABLED; | ||
765 | pci_disable_msi(adapter->pdev); | ||
766 | } | ||
767 | } | ||
768 | |||
769 | /** | ||
770 | * ixgbe_set_interrupt_capability - set MSI-X or MSI if supported | ||
771 | * @adapter: board private structure to initialize | ||
772 | * | ||
773 | * Attempt to configure the interrupts using the best available | ||
774 | * capabilities of the hardware and the kernel. | ||
775 | **/ | ||
776 | static int ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter) | ||
777 | { | ||
778 | struct ixgbe_hw *hw = &adapter->hw; | ||
779 | int err = 0; | ||
780 | int vector, v_budget; | ||
781 | |||
782 | /* | ||
783 | * It's easy to be greedy for MSI-X vectors, but it really | ||
784 | * doesn't do us much good if we have a lot more vectors | ||
785 | * than CPU's. So let's be conservative and only ask for | ||
786 | * (roughly) the same number of vectors as there are CPU's. | ||
787 | * The default is to use pairs of vectors. | ||
788 | */ | ||
789 | v_budget = max(adapter->num_rx_queues, adapter->num_tx_queues); | ||
790 | v_budget = min_t(int, v_budget, num_online_cpus()); | ||
791 | v_budget += NON_Q_VECTORS; | ||
792 | |||
793 | /* | ||
794 | * At the same time, hardware can only support a maximum of | ||
795 | * hw.mac->max_msix_vectors vectors. With features | ||
796 | * such as RSS and VMDq, we can easily surpass the number of Rx and Tx | ||
797 | * descriptor queues supported by our device. Thus, we cap it off in | ||
798 | * those rare cases where the cpu count also exceeds our vector limit. | ||
799 | */ | ||
800 | v_budget = min_t(int, v_budget, hw->mac.max_msix_vectors); | ||
801 | |||
802 | /* A failure in MSI-X entry allocation isn't fatal, but it does | ||
803 | * mean we disable MSI-X capabilities of the adapter. */ | ||
804 | adapter->msix_entries = kcalloc(v_budget, | ||
805 | sizeof(struct msix_entry), GFP_KERNEL); | ||
806 | if (adapter->msix_entries) { | ||
807 | for (vector = 0; vector < v_budget; vector++) | ||
808 | adapter->msix_entries[vector].entry = vector; | ||
809 | |||
810 | ixgbe_acquire_msix_vectors(adapter, v_budget); | ||
811 | |||
812 | if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) | ||
813 | goto out; | ||
814 | } | ||
815 | |||
816 | adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED; | ||
817 | adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED; | ||
818 | if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) { | ||
819 | e_err(probe, | ||
820 | "ATR is not supported while multiple " | ||
821 | "queues are disabled. Disabling Flow Director\n"); | ||
822 | } | ||
823 | adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; | ||
824 | adapter->atr_sample_rate = 0; | ||
825 | if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) | ||
826 | ixgbe_disable_sriov(adapter); | ||
827 | |||
828 | err = ixgbe_set_num_queues(adapter); | ||
829 | if (err) | ||
830 | return err; | ||
831 | |||
832 | err = pci_enable_msi(adapter->pdev); | ||
833 | if (!err) { | ||
834 | adapter->flags |= IXGBE_FLAG_MSI_ENABLED; | ||
835 | } else { | ||
836 | netif_printk(adapter, hw, KERN_DEBUG, adapter->netdev, | ||
837 | "Unable to allocate MSI interrupt, " | ||
838 | "falling back to legacy. Error: %d\n", err); | ||
839 | /* reset err */ | ||
840 | err = 0; | ||
841 | } | ||
842 | |||
843 | out: | ||
844 | return err; | ||
845 | } | ||
846 | |||
847 | /** | ||
848 | * ixgbe_init_interrupt_scheme - Determine proper interrupt scheme | ||
849 | * @adapter: board private structure to initialize | ||
850 | * | ||
851 | * We determine which interrupt scheme to use based on... | ||
852 | * - Kernel support (MSI, MSI-X) | ||
853 | * - which can be user-defined (via MODULE_PARAM) | ||
854 | * - Hardware queue count (num_*_queues) | ||
855 | * - defined by miscellaneous hardware support/features (RSS, etc.) | ||
856 | **/ | ||
857 | int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter) | ||
858 | { | ||
859 | int err; | ||
860 | |||
861 | /* Number of supported queues */ | ||
862 | err = ixgbe_set_num_queues(adapter); | ||
863 | if (err) | ||
864 | return err; | ||
865 | |||
866 | err = ixgbe_set_interrupt_capability(adapter); | ||
867 | if (err) { | ||
868 | e_dev_err("Unable to setup interrupt capabilities\n"); | ||
869 | goto err_set_interrupt; | ||
870 | } | ||
871 | |||
872 | err = ixgbe_alloc_q_vectors(adapter); | ||
873 | if (err) { | ||
874 | e_dev_err("Unable to allocate memory for queue vectors\n"); | ||
875 | goto err_alloc_q_vectors; | ||
876 | } | ||
877 | |||
878 | ixgbe_cache_ring_register(adapter); | ||
879 | |||
880 | e_dev_info("Multiqueue %s: Rx Queue count = %u, Tx Queue count = %u\n", | ||
881 | (adapter->num_rx_queues > 1) ? "Enabled" : "Disabled", | ||
882 | adapter->num_rx_queues, adapter->num_tx_queues); | ||
883 | |||
884 | set_bit(__IXGBE_DOWN, &adapter->state); | ||
885 | |||
886 | return 0; | ||
887 | |||
888 | err_alloc_q_vectors: | ||
889 | ixgbe_reset_interrupt_capability(adapter); | ||
890 | err_set_interrupt: | ||
891 | return err; | ||
892 | } | ||
893 | |||
894 | /** | ||
895 | * ixgbe_clear_interrupt_scheme - Clear the current interrupt scheme settings | ||
896 | * @adapter: board private structure to clear interrupt scheme on | ||
897 | * | ||
898 | * We go through and clear interrupt specific resources and reset the structure | ||
899 | * to pre-load conditions | ||
900 | **/ | ||
901 | void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter) | ||
902 | { | ||
903 | adapter->num_tx_queues = 0; | ||
904 | adapter->num_rx_queues = 0; | ||
905 | |||
906 | ixgbe_free_q_vectors(adapter); | ||
907 | ixgbe_reset_interrupt_capability(adapter); | ||
908 | } | ||
909 | |||
910 | void ixgbe_tx_ctxtdesc(struct ixgbe_ring *tx_ring, u32 vlan_macip_lens, | ||
911 | u32 fcoe_sof_eof, u32 type_tucmd, u32 mss_l4len_idx) | ||
912 | { | ||
913 | struct ixgbe_adv_tx_context_desc *context_desc; | ||
914 | u16 i = tx_ring->next_to_use; | ||
915 | |||
916 | context_desc = IXGBE_TX_CTXTDESC(tx_ring, i); | ||
917 | |||
918 | i++; | ||
919 | tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; | ||
920 | |||
921 | /* set bits to identify this as an advanced context descriptor */ | ||
922 | type_tucmd |= IXGBE_TXD_CMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT; | ||
923 | |||
924 | context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens); | ||
925 | context_desc->seqnum_seed = cpu_to_le32(fcoe_sof_eof); | ||
926 | context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd); | ||
927 | context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx); | ||
928 | } | ||
929 | |||
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index e9d9fca084a9..398fc223cab9 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | |||
@@ -55,8 +55,13 @@ | |||
55 | char ixgbe_driver_name[] = "ixgbe"; | 55 | char ixgbe_driver_name[] = "ixgbe"; |
56 | static const char ixgbe_driver_string[] = | 56 | static const char ixgbe_driver_string[] = |
57 | "Intel(R) 10 Gigabit PCI Express Network Driver"; | 57 | "Intel(R) 10 Gigabit PCI Express Network Driver"; |
58 | #ifdef IXGBE_FCOE | ||
58 | char ixgbe_default_device_descr[] = | 59 | char ixgbe_default_device_descr[] = |
59 | "Intel(R) 10 Gigabit Network Connection"; | 60 | "Intel(R) 10 Gigabit Network Connection"; |
61 | #else | ||
62 | static char ixgbe_default_device_descr[] = | ||
63 | "Intel(R) 10 Gigabit Network Connection"; | ||
64 | #endif | ||
60 | #define MAJ 3 | 65 | #define MAJ 3 |
61 | #define MIN 6 | 66 | #define MIN 6 |
62 | #define BUILD 7 | 67 | #define BUILD 7 |
@@ -2314,7 +2319,7 @@ static irqreturn_t ixgbe_msix_clean_rings(int irq, void *data) | |||
2314 | * | 2319 | * |
2315 | * This function is used for legacy and MSI, NAPI mode | 2320 | * This function is used for legacy and MSI, NAPI mode |
2316 | **/ | 2321 | **/ |
2317 | static int ixgbe_poll(struct napi_struct *napi, int budget) | 2322 | int ixgbe_poll(struct napi_struct *napi, int budget) |
2318 | { | 2323 | { |
2319 | struct ixgbe_q_vector *q_vector = | 2324 | struct ixgbe_q_vector *q_vector = |
2320 | container_of(napi, struct ixgbe_q_vector, napi); | 2325 | container_of(napi, struct ixgbe_q_vector, napi); |
@@ -4320,886 +4325,6 @@ static void ixgbe_tx_timeout(struct net_device *netdev) | |||
4320 | } | 4325 | } |
4321 | 4326 | ||
4322 | /** | 4327 | /** |
4323 | * ixgbe_set_rss_queues: Allocate queues for RSS | ||
4324 | * @adapter: board private structure to initialize | ||
4325 | * | ||
4326 | * This is our "base" multiqueue mode. RSS (Receive Side Scaling) will try | ||
4327 | * to allocate one Rx queue per CPU, and if available, one Tx queue per CPU. | ||
4328 | * | ||
4329 | **/ | ||
4330 | static inline bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter) | ||
4331 | { | ||
4332 | bool ret = false; | ||
4333 | struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_RSS]; | ||
4334 | |||
4335 | if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) { | ||
4336 | f->mask = 0xF; | ||
4337 | adapter->num_rx_queues = f->indices; | ||
4338 | adapter->num_tx_queues = f->indices; | ||
4339 | ret = true; | ||
4340 | } | ||
4341 | |||
4342 | return ret; | ||
4343 | } | ||
4344 | |||
4345 | /** | ||
4346 | * ixgbe_set_fdir_queues: Allocate queues for Flow Director | ||
4347 | * @adapter: board private structure to initialize | ||
4348 | * | ||
4349 | * Flow Director is an advanced Rx filter, attempting to get Rx flows back | ||
4350 | * to the original CPU that initiated the Tx session. This runs in addition | ||
4351 | * to RSS, so if a packet doesn't match an FDIR filter, we can still spread the | ||
4352 | * Rx load across CPUs using RSS. | ||
4353 | * | ||
4354 | **/ | ||
4355 | static inline bool ixgbe_set_fdir_queues(struct ixgbe_adapter *adapter) | ||
4356 | { | ||
4357 | bool ret = false; | ||
4358 | struct ixgbe_ring_feature *f_fdir = &adapter->ring_feature[RING_F_FDIR]; | ||
4359 | |||
4360 | f_fdir->indices = min_t(int, num_online_cpus(), f_fdir->indices); | ||
4361 | f_fdir->mask = 0; | ||
4362 | |||
4363 | /* | ||
4364 | * Use RSS in addition to Flow Director to ensure the best | ||
4365 | * distribution of flows across cores, even when an FDIR flow | ||
4366 | * isn't matched. | ||
4367 | */ | ||
4368 | if ((adapter->flags & IXGBE_FLAG_RSS_ENABLED) && | ||
4369 | (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE)) { | ||
4370 | adapter->num_tx_queues = f_fdir->indices; | ||
4371 | adapter->num_rx_queues = f_fdir->indices; | ||
4372 | ret = true; | ||
4373 | } else { | ||
4374 | adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; | ||
4375 | } | ||
4376 | return ret; | ||
4377 | } | ||
4378 | |||
4379 | #ifdef IXGBE_FCOE | ||
4380 | /** | ||
4381 | * ixgbe_set_fcoe_queues: Allocate queues for Fiber Channel over Ethernet (FCoE) | ||
4382 | * @adapter: board private structure to initialize | ||
4383 | * | ||
4384 | * FCoE RX FCRETA can use up to 8 rx queues for up to 8 different exchanges. | ||
4385 | * The ring feature mask is not used as a mask for FCoE, as it can take any 8 | ||
4386 | * rx queues out of the max number of rx queues, instead, it is used as the | ||
4387 | * index of the first rx queue used by FCoE. | ||
4388 | * | ||
4389 | **/ | ||
4390 | static inline bool ixgbe_set_fcoe_queues(struct ixgbe_adapter *adapter) | ||
4391 | { | ||
4392 | struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE]; | ||
4393 | |||
4394 | if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) | ||
4395 | return false; | ||
4396 | |||
4397 | f->indices = min_t(int, num_online_cpus(), f->indices); | ||
4398 | |||
4399 | adapter->num_rx_queues = 1; | ||
4400 | adapter->num_tx_queues = 1; | ||
4401 | |||
4402 | if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) { | ||
4403 | e_info(probe, "FCoE enabled with RSS\n"); | ||
4404 | if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) | ||
4405 | ixgbe_set_fdir_queues(adapter); | ||
4406 | else | ||
4407 | ixgbe_set_rss_queues(adapter); | ||
4408 | } | ||
4409 | |||
4410 | /* adding FCoE rx rings to the end */ | ||
4411 | f->mask = adapter->num_rx_queues; | ||
4412 | adapter->num_rx_queues += f->indices; | ||
4413 | adapter->num_tx_queues += f->indices; | ||
4414 | |||
4415 | return true; | ||
4416 | } | ||
4417 | #endif /* IXGBE_FCOE */ | ||
4418 | |||
4419 | /* Artificial max queue cap per traffic class in DCB mode */ | ||
4420 | #define DCB_QUEUE_CAP 8 | ||
4421 | |||
4422 | #ifdef CONFIG_IXGBE_DCB | ||
4423 | static inline bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter) | ||
4424 | { | ||
4425 | int per_tc_q, q, i, offset = 0; | ||
4426 | struct net_device *dev = adapter->netdev; | ||
4427 | int tcs = netdev_get_num_tc(dev); | ||
4428 | |||
4429 | if (!tcs) | ||
4430 | return false; | ||
4431 | |||
4432 | /* Map queue offset and counts onto allocated tx queues */ | ||
4433 | per_tc_q = min_t(unsigned int, dev->num_tx_queues / tcs, DCB_QUEUE_CAP); | ||
4434 | q = min_t(int, num_online_cpus(), per_tc_q); | ||
4435 | |||
4436 | for (i = 0; i < tcs; i++) { | ||
4437 | netdev_set_tc_queue(dev, i, q, offset); | ||
4438 | offset += q; | ||
4439 | } | ||
4440 | |||
4441 | adapter->num_tx_queues = q * tcs; | ||
4442 | adapter->num_rx_queues = q * tcs; | ||
4443 | |||
4444 | #ifdef IXGBE_FCOE | ||
4445 | /* FCoE enabled queues require special configuration indexed | ||
4446 | * by feature specific indices and mask. Here we map FCoE | ||
4447 | * indices onto the DCB queue pairs allowing FCoE to own | ||
4448 | * configuration later. | ||
4449 | */ | ||
4450 | if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) { | ||
4451 | u8 prio_tc[MAX_USER_PRIORITY] = {0}; | ||
4452 | int tc; | ||
4453 | struct ixgbe_ring_feature *f = | ||
4454 | &adapter->ring_feature[RING_F_FCOE]; | ||
4455 | |||
4456 | ixgbe_dcb_unpack_map(&adapter->dcb_cfg, DCB_TX_CONFIG, prio_tc); | ||
4457 | tc = prio_tc[adapter->fcoe.up]; | ||
4458 | f->indices = dev->tc_to_txq[tc].count; | ||
4459 | f->mask = dev->tc_to_txq[tc].offset; | ||
4460 | } | ||
4461 | #endif | ||
4462 | |||
4463 | return true; | ||
4464 | } | ||
4465 | #endif | ||
4466 | |||
4467 | /** | ||
4468 | * ixgbe_set_sriov_queues: Allocate queues for IOV use | ||
4469 | * @adapter: board private structure to initialize | ||
4470 | * | ||
4471 | * IOV doesn't actually use anything, so just NAK the | ||
4472 | * request for now and let the other queue routines | ||
4473 | * figure out what to do. | ||
4474 | */ | ||
4475 | static inline bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter) | ||
4476 | { | ||
4477 | return false; | ||
4478 | } | ||
4479 | |||
4480 | /* | ||
4481 | * ixgbe_set_num_queues: Allocate queues for device, feature dependent | ||
4482 | * @adapter: board private structure to initialize | ||
4483 | * | ||
4484 | * This is the top level queue allocation routine. The order here is very | ||
4485 | * important, starting with the "most" number of features turned on at once, | ||
4486 | * and ending with the smallest set of features. This way large combinations | ||
4487 | * can be allocated if they're turned on, and smaller combinations are the | ||
4488 | * fallthrough conditions. | ||
4489 | * | ||
4490 | **/ | ||
4491 | static int ixgbe_set_num_queues(struct ixgbe_adapter *adapter) | ||
4492 | { | ||
4493 | /* Start with base case */ | ||
4494 | adapter->num_rx_queues = 1; | ||
4495 | adapter->num_tx_queues = 1; | ||
4496 | adapter->num_rx_pools = adapter->num_rx_queues; | ||
4497 | adapter->num_rx_queues_per_pool = 1; | ||
4498 | |||
4499 | if (ixgbe_set_sriov_queues(adapter)) | ||
4500 | goto done; | ||
4501 | |||
4502 | #ifdef CONFIG_IXGBE_DCB | ||
4503 | if (ixgbe_set_dcb_queues(adapter)) | ||
4504 | goto done; | ||
4505 | |||
4506 | #endif | ||
4507 | #ifdef IXGBE_FCOE | ||
4508 | if (ixgbe_set_fcoe_queues(adapter)) | ||
4509 | goto done; | ||
4510 | |||
4511 | #endif /* IXGBE_FCOE */ | ||
4512 | if (ixgbe_set_fdir_queues(adapter)) | ||
4513 | goto done; | ||
4514 | |||
4515 | if (ixgbe_set_rss_queues(adapter)) | ||
4516 | goto done; | ||
4517 | |||
4518 | /* fallback to base case */ | ||
4519 | adapter->num_rx_queues = 1; | ||
4520 | adapter->num_tx_queues = 1; | ||
4521 | |||
4522 | done: | ||
4523 | if ((adapter->netdev->reg_state == NETREG_UNREGISTERED) || | ||
4524 | (adapter->netdev->reg_state == NETREG_UNREGISTERING)) | ||
4525 | return 0; | ||
4526 | |||
4527 | /* Notify the stack of the (possibly) reduced queue counts. */ | ||
4528 | netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues); | ||
4529 | return netif_set_real_num_rx_queues(adapter->netdev, | ||
4530 | adapter->num_rx_queues); | ||
4531 | } | ||
4532 | |||
4533 | static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter, | ||
4534 | int vectors) | ||
4535 | { | ||
4536 | int err, vector_threshold; | ||
4537 | |||
4538 | /* We'll want at least 2 (vector_threshold): | ||
4539 | * 1) TxQ[0] + RxQ[0] handler | ||
4540 | * 2) Other (Link Status Change, etc.) | ||
4541 | */ | ||
4542 | vector_threshold = MIN_MSIX_COUNT; | ||
4543 | |||
4544 | /* | ||
4545 | * The more we get, the more we will assign to Tx/Rx Cleanup | ||
4546 | * for the separate queues...where Rx Cleanup >= Tx Cleanup. | ||
4547 | * Right now, we simply care about how many we'll get; we'll | ||
4548 | * set them up later while requesting irq's. | ||
4549 | */ | ||
4550 | while (vectors >= vector_threshold) { | ||
4551 | err = pci_enable_msix(adapter->pdev, adapter->msix_entries, | ||
4552 | vectors); | ||
4553 | if (!err) /* Success in acquiring all requested vectors. */ | ||
4554 | break; | ||
4555 | else if (err < 0) | ||
4556 | vectors = 0; /* Nasty failure, quit now */ | ||
4557 | else /* err == number of vectors we should try again with */ | ||
4558 | vectors = err; | ||
4559 | } | ||
4560 | |||
4561 | if (vectors < vector_threshold) { | ||
4562 | /* Can't allocate enough MSI-X interrupts? Oh well. | ||
4563 | * This just means we'll go with either a single MSI | ||
4564 | * vector or fall back to legacy interrupts. | ||
4565 | */ | ||
4566 | netif_printk(adapter, hw, KERN_DEBUG, adapter->netdev, | ||
4567 | "Unable to allocate MSI-X interrupts\n"); | ||
4568 | adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED; | ||
4569 | kfree(adapter->msix_entries); | ||
4570 | adapter->msix_entries = NULL; | ||
4571 | } else { | ||
4572 | adapter->flags |= IXGBE_FLAG_MSIX_ENABLED; /* Woot! */ | ||
4573 | /* | ||
4574 | * Adjust for only the vectors we'll use, which is minimum | ||
4575 | * of max_msix_q_vectors + NON_Q_VECTORS, or the number of | ||
4576 | * vectors we were allocated. | ||
4577 | */ | ||
4578 | adapter->num_msix_vectors = min(vectors, | ||
4579 | adapter->max_msix_q_vectors + NON_Q_VECTORS); | ||
4580 | } | ||
4581 | } | ||
4582 | |||
4583 | /** | ||
4584 | * ixgbe_cache_ring_rss - Descriptor ring to register mapping for RSS | ||
4585 | * @adapter: board private structure to initialize | ||
4586 | * | ||
4587 | * Cache the descriptor ring offsets for RSS to the assigned rings. | ||
4588 | * | ||
4589 | **/ | ||
4590 | static inline bool ixgbe_cache_ring_rss(struct ixgbe_adapter *adapter) | ||
4591 | { | ||
4592 | int i; | ||
4593 | |||
4594 | if (!(adapter->flags & IXGBE_FLAG_RSS_ENABLED)) | ||
4595 | return false; | ||
4596 | |||
4597 | for (i = 0; i < adapter->num_rx_queues; i++) | ||
4598 | adapter->rx_ring[i]->reg_idx = i; | ||
4599 | for (i = 0; i < adapter->num_tx_queues; i++) | ||
4600 | adapter->tx_ring[i]->reg_idx = i; | ||
4601 | |||
4602 | return true; | ||
4603 | } | ||
4604 | |||
4605 | #ifdef CONFIG_IXGBE_DCB | ||
4606 | |||
4607 | /* ixgbe_get_first_reg_idx - Return first register index associated with ring */ | ||
4608 | static void ixgbe_get_first_reg_idx(struct ixgbe_adapter *adapter, u8 tc, | ||
4609 | unsigned int *tx, unsigned int *rx) | ||
4610 | { | ||
4611 | struct net_device *dev = adapter->netdev; | ||
4612 | struct ixgbe_hw *hw = &adapter->hw; | ||
4613 | u8 num_tcs = netdev_get_num_tc(dev); | ||
4614 | |||
4615 | *tx = 0; | ||
4616 | *rx = 0; | ||
4617 | |||
4618 | switch (hw->mac.type) { | ||
4619 | case ixgbe_mac_82598EB: | ||
4620 | *tx = tc << 2; | ||
4621 | *rx = tc << 3; | ||
4622 | break; | ||
4623 | case ixgbe_mac_82599EB: | ||
4624 | case ixgbe_mac_X540: | ||
4625 | if (num_tcs > 4) { | ||
4626 | if (tc < 3) { | ||
4627 | *tx = tc << 5; | ||
4628 | *rx = tc << 4; | ||
4629 | } else if (tc < 5) { | ||
4630 | *tx = ((tc + 2) << 4); | ||
4631 | *rx = tc << 4; | ||
4632 | } else if (tc < num_tcs) { | ||
4633 | *tx = ((tc + 8) << 3); | ||
4634 | *rx = tc << 4; | ||
4635 | } | ||
4636 | } else { | ||
4637 | *rx = tc << 5; | ||
4638 | switch (tc) { | ||
4639 | case 0: | ||
4640 | *tx = 0; | ||
4641 | break; | ||
4642 | case 1: | ||
4643 | *tx = 64; | ||
4644 | break; | ||
4645 | case 2: | ||
4646 | *tx = 96; | ||
4647 | break; | ||
4648 | case 3: | ||
4649 | *tx = 112; | ||
4650 | break; | ||
4651 | default: | ||
4652 | break; | ||
4653 | } | ||
4654 | } | ||
4655 | break; | ||
4656 | default: | ||
4657 | break; | ||
4658 | } | ||
4659 | } | ||
4660 | |||
4661 | /** | ||
4662 | * ixgbe_cache_ring_dcb - Descriptor ring to register mapping for DCB | ||
4663 | * @adapter: board private structure to initialize | ||
4664 | * | ||
4665 | * Cache the descriptor ring offsets for DCB to the assigned rings. | ||
4666 | * | ||
4667 | **/ | ||
4668 | static inline bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter) | ||
4669 | { | ||
4670 | struct net_device *dev = adapter->netdev; | ||
4671 | int i, j, k; | ||
4672 | u8 num_tcs = netdev_get_num_tc(dev); | ||
4673 | |||
4674 | if (!num_tcs) | ||
4675 | return false; | ||
4676 | |||
4677 | for (i = 0, k = 0; i < num_tcs; i++) { | ||
4678 | unsigned int tx_s, rx_s; | ||
4679 | u16 count = dev->tc_to_txq[i].count; | ||
4680 | |||
4681 | ixgbe_get_first_reg_idx(adapter, i, &tx_s, &rx_s); | ||
4682 | for (j = 0; j < count; j++, k++) { | ||
4683 | adapter->tx_ring[k]->reg_idx = tx_s + j; | ||
4684 | adapter->rx_ring[k]->reg_idx = rx_s + j; | ||
4685 | adapter->tx_ring[k]->dcb_tc = i; | ||
4686 | adapter->rx_ring[k]->dcb_tc = i; | ||
4687 | } | ||
4688 | } | ||
4689 | |||
4690 | return true; | ||
4691 | } | ||
4692 | #endif | ||
4693 | |||
4694 | /** | ||
4695 | * ixgbe_cache_ring_fdir - Descriptor ring to register mapping for Flow Director | ||
4696 | * @adapter: board private structure to initialize | ||
4697 | * | ||
4698 | * Cache the descriptor ring offsets for Flow Director to the assigned rings. | ||
4699 | * | ||
4700 | **/ | ||
4701 | static inline bool ixgbe_cache_ring_fdir(struct ixgbe_adapter *adapter) | ||
4702 | { | ||
4703 | int i; | ||
4704 | bool ret = false; | ||
4705 | |||
4706 | if ((adapter->flags & IXGBE_FLAG_RSS_ENABLED) && | ||
4707 | (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE)) { | ||
4708 | for (i = 0; i < adapter->num_rx_queues; i++) | ||
4709 | adapter->rx_ring[i]->reg_idx = i; | ||
4710 | for (i = 0; i < adapter->num_tx_queues; i++) | ||
4711 | adapter->tx_ring[i]->reg_idx = i; | ||
4712 | ret = true; | ||
4713 | } | ||
4714 | |||
4715 | return ret; | ||
4716 | } | ||
4717 | |||
4718 | #ifdef IXGBE_FCOE | ||
4719 | /** | ||
4720 | * ixgbe_cache_ring_fcoe - Descriptor ring to register mapping for the FCoE | ||
4721 | * @adapter: board private structure to initialize | ||
4722 | * | ||
4723 | * Cache the descriptor ring offsets for FCoE mode to the assigned rings. | ||
4724 | * | ||
4725 | */ | ||
4726 | static inline bool ixgbe_cache_ring_fcoe(struct ixgbe_adapter *adapter) | ||
4727 | { | ||
4728 | struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE]; | ||
4729 | int i; | ||
4730 | u8 fcoe_rx_i = 0, fcoe_tx_i = 0; | ||
4731 | |||
4732 | if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) | ||
4733 | return false; | ||
4734 | |||
4735 | if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) { | ||
4736 | if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) | ||
4737 | ixgbe_cache_ring_fdir(adapter); | ||
4738 | else | ||
4739 | ixgbe_cache_ring_rss(adapter); | ||
4740 | |||
4741 | fcoe_rx_i = f->mask; | ||
4742 | fcoe_tx_i = f->mask; | ||
4743 | } | ||
4744 | for (i = 0; i < f->indices; i++, fcoe_rx_i++, fcoe_tx_i++) { | ||
4745 | adapter->rx_ring[f->mask + i]->reg_idx = fcoe_rx_i; | ||
4746 | adapter->tx_ring[f->mask + i]->reg_idx = fcoe_tx_i; | ||
4747 | } | ||
4748 | return true; | ||
4749 | } | ||
4750 | |||
4751 | #endif /* IXGBE_FCOE */ | ||
4752 | /** | ||
4753 | * ixgbe_cache_ring_sriov - Descriptor ring to register mapping for sriov | ||
4754 | * @adapter: board private structure to initialize | ||
4755 | * | ||
4756 | * SR-IOV doesn't use any descriptor rings but changes the default if | ||
4757 | * no other mapping is used. | ||
4758 | * | ||
4759 | */ | ||
4760 | static inline bool ixgbe_cache_ring_sriov(struct ixgbe_adapter *adapter) | ||
4761 | { | ||
4762 | adapter->rx_ring[0]->reg_idx = adapter->num_vfs * 2; | ||
4763 | adapter->tx_ring[0]->reg_idx = adapter->num_vfs * 2; | ||
4764 | if (adapter->num_vfs) | ||
4765 | return true; | ||
4766 | else | ||
4767 | return false; | ||
4768 | } | ||
4769 | |||
4770 | /** | ||
4771 | * ixgbe_cache_ring_register - Descriptor ring to register mapping | ||
4772 | * @adapter: board private structure to initialize | ||
4773 | * | ||
4774 | * Once we know the feature-set enabled for the device, we'll cache | ||
4775 | * the register offset the descriptor ring is assigned to. | ||
4776 | * | ||
4777 | * Note, the order the various feature calls is important. It must start with | ||
4778 | * the "most" features enabled at the same time, then trickle down to the | ||
4779 | * least amount of features turned on at once. | ||
4780 | **/ | ||
4781 | static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter) | ||
4782 | { | ||
4783 | /* start with default case */ | ||
4784 | adapter->rx_ring[0]->reg_idx = 0; | ||
4785 | adapter->tx_ring[0]->reg_idx = 0; | ||
4786 | |||
4787 | if (ixgbe_cache_ring_sriov(adapter)) | ||
4788 | return; | ||
4789 | |||
4790 | #ifdef CONFIG_IXGBE_DCB | ||
4791 | if (ixgbe_cache_ring_dcb(adapter)) | ||
4792 | return; | ||
4793 | #endif | ||
4794 | |||
4795 | #ifdef IXGBE_FCOE | ||
4796 | if (ixgbe_cache_ring_fcoe(adapter)) | ||
4797 | return; | ||
4798 | #endif /* IXGBE_FCOE */ | ||
4799 | |||
4800 | if (ixgbe_cache_ring_fdir(adapter)) | ||
4801 | return; | ||
4802 | |||
4803 | if (ixgbe_cache_ring_rss(adapter)) | ||
4804 | return; | ||
4805 | } | ||
4806 | |||
4807 | /** | ||
4808 | * ixgbe_set_interrupt_capability - set MSI-X or MSI if supported | ||
4809 | * @adapter: board private structure to initialize | ||
4810 | * | ||
4811 | * Attempt to configure the interrupts using the best available | ||
4812 | * capabilities of the hardware and the kernel. | ||
4813 | **/ | ||
4814 | static int ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter) | ||
4815 | { | ||
4816 | struct ixgbe_hw *hw = &adapter->hw; | ||
4817 | int err = 0; | ||
4818 | int vector, v_budget; | ||
4819 | |||
4820 | /* | ||
4821 | * It's easy to be greedy for MSI-X vectors, but it really | ||
4822 | * doesn't do us much good if we have a lot more vectors | ||
4823 | * than CPU's. So let's be conservative and only ask for | ||
4824 | * (roughly) the same number of vectors as there are CPU's. | ||
4825 | * The default is to use pairs of vectors. | ||
4826 | */ | ||
4827 | v_budget = max(adapter->num_rx_queues, adapter->num_tx_queues); | ||
4828 | v_budget = min_t(int, v_budget, num_online_cpus()); | ||
4829 | v_budget += NON_Q_VECTORS; | ||
4830 | |||
4831 | /* | ||
4832 | * At the same time, hardware can only support a maximum of | ||
4833 | * hw.mac->max_msix_vectors vectors. With features | ||
4834 | * such as RSS and VMDq, we can easily surpass the number of Rx and Tx | ||
4835 | * descriptor queues supported by our device. Thus, we cap it off in | ||
4836 | * those rare cases where the cpu count also exceeds our vector limit. | ||
4837 | */ | ||
4838 | v_budget = min_t(int, v_budget, hw->mac.max_msix_vectors); | ||
4839 | |||
4840 | /* A failure in MSI-X entry allocation isn't fatal, but it does | ||
4841 | * mean we disable MSI-X capabilities of the adapter. */ | ||
4842 | adapter->msix_entries = kcalloc(v_budget, | ||
4843 | sizeof(struct msix_entry), GFP_KERNEL); | ||
4844 | if (adapter->msix_entries) { | ||
4845 | for (vector = 0; vector < v_budget; vector++) | ||
4846 | adapter->msix_entries[vector].entry = vector; | ||
4847 | |||
4848 | ixgbe_acquire_msix_vectors(adapter, v_budget); | ||
4849 | |||
4850 | if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) | ||
4851 | goto out; | ||
4852 | } | ||
4853 | |||
4854 | adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED; | ||
4855 | adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED; | ||
4856 | if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) { | ||
4857 | e_err(probe, | ||
4858 | "ATR is not supported while multiple " | ||
4859 | "queues are disabled. Disabling Flow Director\n"); | ||
4860 | } | ||
4861 | adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; | ||
4862 | adapter->atr_sample_rate = 0; | ||
4863 | if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) | ||
4864 | ixgbe_disable_sriov(adapter); | ||
4865 | |||
4866 | err = ixgbe_set_num_queues(adapter); | ||
4867 | if (err) | ||
4868 | return err; | ||
4869 | |||
4870 | err = pci_enable_msi(adapter->pdev); | ||
4871 | if (!err) { | ||
4872 | adapter->flags |= IXGBE_FLAG_MSI_ENABLED; | ||
4873 | } else { | ||
4874 | netif_printk(adapter, hw, KERN_DEBUG, adapter->netdev, | ||
4875 | "Unable to allocate MSI interrupt, " | ||
4876 | "falling back to legacy. Error: %d\n", err); | ||
4877 | /* reset err */ | ||
4878 | err = 0; | ||
4879 | } | ||
4880 | |||
4881 | out: | ||
4882 | return err; | ||
4883 | } | ||
4884 | |||
4885 | static void ixgbe_add_ring(struct ixgbe_ring *ring, | ||
4886 | struct ixgbe_ring_container *head) | ||
4887 | { | ||
4888 | ring->next = head->ring; | ||
4889 | head->ring = ring; | ||
4890 | head->count++; | ||
4891 | } | ||
4892 | |||
4893 | /** | ||
4894 | * ixgbe_alloc_q_vector - Allocate memory for a single interrupt vector | ||
4895 | * @adapter: board private structure to initialize | ||
4896 | * @v_idx: index of vector in adapter struct | ||
4897 | * | ||
4898 | * We allocate one q_vector. If allocation fails we return -ENOMEM. | ||
4899 | **/ | ||
4900 | static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter, int v_idx, | ||
4901 | int txr_count, int txr_idx, | ||
4902 | int rxr_count, int rxr_idx) | ||
4903 | { | ||
4904 | struct ixgbe_q_vector *q_vector; | ||
4905 | struct ixgbe_ring *ring; | ||
4906 | int node = -1; | ||
4907 | int cpu = -1; | ||
4908 | int ring_count, size; | ||
4909 | |||
4910 | ring_count = txr_count + rxr_count; | ||
4911 | size = sizeof(struct ixgbe_q_vector) + | ||
4912 | (sizeof(struct ixgbe_ring) * ring_count); | ||
4913 | |||
4914 | /* customize cpu for Flow Director mapping */ | ||
4915 | if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) { | ||
4916 | if (cpu_online(v_idx)) { | ||
4917 | cpu = v_idx; | ||
4918 | node = cpu_to_node(cpu); | ||
4919 | } | ||
4920 | } | ||
4921 | |||
4922 | /* allocate q_vector and rings */ | ||
4923 | q_vector = kzalloc_node(size, GFP_KERNEL, node); | ||
4924 | if (!q_vector) | ||
4925 | q_vector = kzalloc(size, GFP_KERNEL); | ||
4926 | if (!q_vector) | ||
4927 | return -ENOMEM; | ||
4928 | |||
4929 | /* setup affinity mask and node */ | ||
4930 | if (cpu != -1) | ||
4931 | cpumask_set_cpu(cpu, &q_vector->affinity_mask); | ||
4932 | else | ||
4933 | cpumask_copy(&q_vector->affinity_mask, cpu_online_mask); | ||
4934 | q_vector->numa_node = node; | ||
4935 | |||
4936 | /* initialize NAPI */ | ||
4937 | netif_napi_add(adapter->netdev, &q_vector->napi, | ||
4938 | ixgbe_poll, 64); | ||
4939 | |||
4940 | /* tie q_vector and adapter together */ | ||
4941 | adapter->q_vector[v_idx] = q_vector; | ||
4942 | q_vector->adapter = adapter; | ||
4943 | q_vector->v_idx = v_idx; | ||
4944 | |||
4945 | /* initialize work limits */ | ||
4946 | q_vector->tx.work_limit = adapter->tx_work_limit; | ||
4947 | |||
4948 | /* initialize pointer to rings */ | ||
4949 | ring = q_vector->ring; | ||
4950 | |||
4951 | while (txr_count) { | ||
4952 | /* assign generic ring traits */ | ||
4953 | ring->dev = &adapter->pdev->dev; | ||
4954 | ring->netdev = adapter->netdev; | ||
4955 | |||
4956 | /* configure backlink on ring */ | ||
4957 | ring->q_vector = q_vector; | ||
4958 | |||
4959 | /* update q_vector Tx values */ | ||
4960 | ixgbe_add_ring(ring, &q_vector->tx); | ||
4961 | |||
4962 | /* apply Tx specific ring traits */ | ||
4963 | ring->count = adapter->tx_ring_count; | ||
4964 | ring->queue_index = txr_idx; | ||
4965 | |||
4966 | /* assign ring to adapter */ | ||
4967 | adapter->tx_ring[txr_idx] = ring; | ||
4968 | |||
4969 | /* update count and index */ | ||
4970 | txr_count--; | ||
4971 | txr_idx++; | ||
4972 | |||
4973 | /* push pointer to next ring */ | ||
4974 | ring++; | ||
4975 | } | ||
4976 | |||
4977 | while (rxr_count) { | ||
4978 | /* assign generic ring traits */ | ||
4979 | ring->dev = &adapter->pdev->dev; | ||
4980 | ring->netdev = adapter->netdev; | ||
4981 | |||
4982 | /* configure backlink on ring */ | ||
4983 | ring->q_vector = q_vector; | ||
4984 | |||
4985 | /* update q_vector Rx values */ | ||
4986 | ixgbe_add_ring(ring, &q_vector->rx); | ||
4987 | |||
4988 | /* | ||
4989 | * 82599 errata, UDP frames with a 0 checksum | ||
4990 | * can be marked as checksum errors. | ||
4991 | */ | ||
4992 | if (adapter->hw.mac.type == ixgbe_mac_82599EB) | ||
4993 | set_bit(__IXGBE_RX_CSUM_UDP_ZERO_ERR, &ring->state); | ||
4994 | |||
4995 | /* apply Rx specific ring traits */ | ||
4996 | ring->count = adapter->rx_ring_count; | ||
4997 | ring->queue_index = rxr_idx; | ||
4998 | |||
4999 | /* assign ring to adapter */ | ||
5000 | adapter->rx_ring[rxr_idx] = ring; | ||
5001 | |||
5002 | /* update count and index */ | ||
5003 | rxr_count--; | ||
5004 | rxr_idx++; | ||
5005 | |||
5006 | /* push pointer to next ring */ | ||
5007 | ring++; | ||
5008 | } | ||
5009 | |||
5010 | return 0; | ||
5011 | } | ||
5012 | |||
5013 | /** | ||
5014 | * ixgbe_free_q_vector - Free memory allocated for specific interrupt vector | ||
5015 | * @adapter: board private structure to initialize | ||
5016 | * @v_idx: Index of vector to be freed | ||
5017 | * | ||
5018 | * This function frees the memory allocated to the q_vector. In addition if | ||
5019 | * NAPI is enabled it will delete any references to the NAPI struct prior | ||
5020 | * to freeing the q_vector. | ||
5021 | **/ | ||
5022 | static void ixgbe_free_q_vector(struct ixgbe_adapter *adapter, int v_idx) | ||
5023 | { | ||
5024 | struct ixgbe_q_vector *q_vector = adapter->q_vector[v_idx]; | ||
5025 | struct ixgbe_ring *ring; | ||
5026 | |||
5027 | ixgbe_for_each_ring(ring, q_vector->tx) | ||
5028 | adapter->tx_ring[ring->queue_index] = NULL; | ||
5029 | |||
5030 | ixgbe_for_each_ring(ring, q_vector->rx) | ||
5031 | adapter->rx_ring[ring->queue_index] = NULL; | ||
5032 | |||
5033 | adapter->q_vector[v_idx] = NULL; | ||
5034 | netif_napi_del(&q_vector->napi); | ||
5035 | |||
5036 | /* | ||
5037 | * ixgbe_get_stats64() might access the rings on this vector, | ||
5038 | * we must wait a grace period before freeing it. | ||
5039 | */ | ||
5040 | kfree_rcu(q_vector, rcu); | ||
5041 | } | ||
5042 | |||
5043 | /** | ||
5044 | * ixgbe_alloc_q_vectors - Allocate memory for interrupt vectors | ||
5045 | * @adapter: board private structure to initialize | ||
5046 | * | ||
5047 | * We allocate one q_vector per queue interrupt. If allocation fails we | ||
5048 | * return -ENOMEM. | ||
5049 | **/ | ||
5050 | static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter) | ||
5051 | { | ||
5052 | int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; | ||
5053 | int rxr_remaining = adapter->num_rx_queues; | ||
5054 | int txr_remaining = adapter->num_tx_queues; | ||
5055 | int rxr_idx = 0, txr_idx = 0, v_idx = 0; | ||
5056 | int err; | ||
5057 | |||
5058 | /* only one q_vector if MSI-X is disabled. */ | ||
5059 | if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) | ||
5060 | q_vectors = 1; | ||
5061 | |||
5062 | if (q_vectors >= (rxr_remaining + txr_remaining)) { | ||
5063 | for (; rxr_remaining; v_idx++, q_vectors--) { | ||
5064 | int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors); | ||
5065 | err = ixgbe_alloc_q_vector(adapter, v_idx, | ||
5066 | 0, 0, rqpv, rxr_idx); | ||
5067 | |||
5068 | if (err) | ||
5069 | goto err_out; | ||
5070 | |||
5071 | /* update counts and index */ | ||
5072 | rxr_remaining -= rqpv; | ||
5073 | rxr_idx += rqpv; | ||
5074 | } | ||
5075 | } | ||
5076 | |||
5077 | for (; q_vectors; v_idx++, q_vectors--) { | ||
5078 | int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors); | ||
5079 | int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors); | ||
5080 | err = ixgbe_alloc_q_vector(adapter, v_idx, | ||
5081 | tqpv, txr_idx, | ||
5082 | rqpv, rxr_idx); | ||
5083 | |||
5084 | if (err) | ||
5085 | goto err_out; | ||
5086 | |||
5087 | /* update counts and index */ | ||
5088 | rxr_remaining -= rqpv; | ||
5089 | rxr_idx += rqpv; | ||
5090 | txr_remaining -= tqpv; | ||
5091 | txr_idx += tqpv; | ||
5092 | } | ||
5093 | |||
5094 | return 0; | ||
5095 | |||
5096 | err_out: | ||
5097 | while (v_idx) { | ||
5098 | v_idx--; | ||
5099 | ixgbe_free_q_vector(adapter, v_idx); | ||
5100 | } | ||
5101 | |||
5102 | return -ENOMEM; | ||
5103 | } | ||
5104 | |||
5105 | /** | ||
5106 | * ixgbe_free_q_vectors - Free memory allocated for interrupt vectors | ||
5107 | * @adapter: board private structure to initialize | ||
5108 | * | ||
5109 | * This function frees the memory allocated to the q_vectors. In addition if | ||
5110 | * NAPI is enabled it will delete any references to the NAPI struct prior | ||
5111 | * to freeing the q_vector. | ||
5112 | **/ | ||
5113 | static void ixgbe_free_q_vectors(struct ixgbe_adapter *adapter) | ||
5114 | { | ||
5115 | int v_idx, q_vectors; | ||
5116 | |||
5117 | if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) | ||
5118 | q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; | ||
5119 | else | ||
5120 | q_vectors = 1; | ||
5121 | |||
5122 | for (v_idx = 0; v_idx < q_vectors; v_idx++) | ||
5123 | ixgbe_free_q_vector(adapter, v_idx); | ||
5124 | } | ||
5125 | |||
5126 | static void ixgbe_reset_interrupt_capability(struct ixgbe_adapter *adapter) | ||
5127 | { | ||
5128 | if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { | ||
5129 | adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED; | ||
5130 | pci_disable_msix(adapter->pdev); | ||
5131 | kfree(adapter->msix_entries); | ||
5132 | adapter->msix_entries = NULL; | ||
5133 | } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) { | ||
5134 | adapter->flags &= ~IXGBE_FLAG_MSI_ENABLED; | ||
5135 | pci_disable_msi(adapter->pdev); | ||
5136 | } | ||
5137 | } | ||
5138 | |||
5139 | /** | ||
5140 | * ixgbe_init_interrupt_scheme - Determine proper interrupt scheme | ||
5141 | * @adapter: board private structure to initialize | ||
5142 | * | ||
5143 | * We determine which interrupt scheme to use based on... | ||
5144 | * - Kernel support (MSI, MSI-X) | ||
5145 | * - which can be user-defined (via MODULE_PARAM) | ||
5146 | * - Hardware queue count (num_*_queues) | ||
5147 | * - defined by miscellaneous hardware support/features (RSS, etc.) | ||
5148 | **/ | ||
5149 | int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter) | ||
5150 | { | ||
5151 | int err; | ||
5152 | |||
5153 | /* Number of supported queues */ | ||
5154 | err = ixgbe_set_num_queues(adapter); | ||
5155 | if (err) | ||
5156 | return err; | ||
5157 | |||
5158 | err = ixgbe_set_interrupt_capability(adapter); | ||
5159 | if (err) { | ||
5160 | e_dev_err("Unable to setup interrupt capabilities\n"); | ||
5161 | goto err_set_interrupt; | ||
5162 | } | ||
5163 | |||
5164 | err = ixgbe_alloc_q_vectors(adapter); | ||
5165 | if (err) { | ||
5166 | e_dev_err("Unable to allocate memory for queue vectors\n"); | ||
5167 | goto err_alloc_q_vectors; | ||
5168 | } | ||
5169 | |||
5170 | ixgbe_cache_ring_register(adapter); | ||
5171 | |||
5172 | e_dev_info("Multiqueue %s: Rx Queue count = %u, Tx Queue count = %u\n", | ||
5173 | (adapter->num_rx_queues > 1) ? "Enabled" : "Disabled", | ||
5174 | adapter->num_rx_queues, adapter->num_tx_queues); | ||
5175 | |||
5176 | set_bit(__IXGBE_DOWN, &adapter->state); | ||
5177 | |||
5178 | return 0; | ||
5179 | |||
5180 | err_alloc_q_vectors: | ||
5181 | ixgbe_reset_interrupt_capability(adapter); | ||
5182 | err_set_interrupt: | ||
5183 | return err; | ||
5184 | } | ||
5185 | |||
5186 | /** | ||
5187 | * ixgbe_clear_interrupt_scheme - Clear the current interrupt scheme settings | ||
5188 | * @adapter: board private structure to clear interrupt scheme on | ||
5189 | * | ||
5190 | * We go through and clear interrupt specific resources and reset the structure | ||
5191 | * to pre-load conditions | ||
5192 | **/ | ||
5193 | void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter) | ||
5194 | { | ||
5195 | adapter->num_tx_queues = 0; | ||
5196 | adapter->num_rx_queues = 0; | ||
5197 | |||
5198 | ixgbe_free_q_vectors(adapter); | ||
5199 | ixgbe_reset_interrupt_capability(adapter); | ||
5200 | } | ||
5201 | |||
5202 | /** | ||
5203 | * ixgbe_sw_init - Initialize general software structures (struct ixgbe_adapter) | 4328 | * ixgbe_sw_init - Initialize general software structures (struct ixgbe_adapter) |
5204 | * @adapter: board private structure to initialize | 4329 | * @adapter: board private structure to initialize |
5205 | * | 4330 | * |
@@ -6557,26 +5682,6 @@ static void ixgbe_service_task(struct work_struct *work) | |||
6557 | ixgbe_service_event_complete(adapter); | 5682 | ixgbe_service_event_complete(adapter); |
6558 | } | 5683 | } |
6559 | 5684 | ||
6560 | void ixgbe_tx_ctxtdesc(struct ixgbe_ring *tx_ring, u32 vlan_macip_lens, | ||
6561 | u32 fcoe_sof_eof, u32 type_tucmd, u32 mss_l4len_idx) | ||
6562 | { | ||
6563 | struct ixgbe_adv_tx_context_desc *context_desc; | ||
6564 | u16 i = tx_ring->next_to_use; | ||
6565 | |||
6566 | context_desc = IXGBE_TX_CTXTDESC(tx_ring, i); | ||
6567 | |||
6568 | i++; | ||
6569 | tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; | ||
6570 | |||
6571 | /* set bits to identify this as an advanced context descriptor */ | ||
6572 | type_tucmd |= IXGBE_TXD_CMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT; | ||
6573 | |||
6574 | context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens); | ||
6575 | context_desc->seqnum_seed = cpu_to_le32(fcoe_sof_eof); | ||
6576 | context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd); | ||
6577 | context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx); | ||
6578 | } | ||
6579 | |||
6580 | static int ixgbe_tso(struct ixgbe_ring *tx_ring, | 5685 | static int ixgbe_tso(struct ixgbe_ring *tx_ring, |
6581 | struct ixgbe_tx_buffer *first, | 5686 | struct ixgbe_tx_buffer *first, |
6582 | u8 *hdr_len) | 5687 | u8 *hdr_len) |
@@ -7387,6 +6492,7 @@ static struct rtnl_link_stats64 *ixgbe_get_stats64(struct net_device *netdev, | |||
7387 | return stats; | 6492 | return stats; |
7388 | } | 6493 | } |
7389 | 6494 | ||
6495 | #ifdef CONFIG_IXGBE_DCB | ||
7390 | /* ixgbe_validate_rtr - verify 802.1Qp to Rx packet buffer mapping is valid. | 6496 | /* ixgbe_validate_rtr - verify 802.1Qp to Rx packet buffer mapping is valid. |
7391 | * #adapter: pointer to ixgbe_adapter | 6497 | * #adapter: pointer to ixgbe_adapter |
7392 | * @tc: number of traffic classes currently enabled | 6498 | * @tc: number of traffic classes currently enabled |
@@ -7481,6 +6587,7 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc) | |||
7481 | return 0; | 6587 | return 0; |
7482 | } | 6588 | } |
7483 | 6589 | ||
6590 | #endif /* CONFIG_IXGBE_DCB */ | ||
7484 | void ixgbe_do_reset(struct net_device *netdev) | 6591 | void ixgbe_do_reset(struct net_device *netdev) |
7485 | { | 6592 | { |
7486 | struct ixgbe_adapter *adapter = netdev_priv(netdev); | 6593 | struct ixgbe_adapter *adapter = netdev_priv(netdev); |
@@ -7590,7 +6697,9 @@ static const struct net_device_ops ixgbe_netdev_ops = { | |||
7590 | .ndo_set_vf_spoofchk = ixgbe_ndo_set_vf_spoofchk, | 6697 | .ndo_set_vf_spoofchk = ixgbe_ndo_set_vf_spoofchk, |
7591 | .ndo_get_vf_config = ixgbe_ndo_get_vf_config, | 6698 | .ndo_get_vf_config = ixgbe_ndo_get_vf_config, |
7592 | .ndo_get_stats64 = ixgbe_get_stats64, | 6699 | .ndo_get_stats64 = ixgbe_get_stats64, |
6700 | #ifdef CONFIG_IXGBE_DCB | ||
7593 | .ndo_setup_tc = ixgbe_setup_tc, | 6701 | .ndo_setup_tc = ixgbe_setup_tc, |
6702 | #endif | ||
7594 | #ifdef CONFIG_NET_POLL_CONTROLLER | 6703 | #ifdef CONFIG_NET_POLL_CONTROLLER |
7595 | .ndo_poll_controller = ixgbe_netpoll, | 6704 | .ndo_poll_controller = ixgbe_netpoll, |
7596 | #endif | 6705 | #endif |