aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
diff options
context:
space:
mode:
authorJeff Kirsher <jeffrey.t.kirsher@intel.com>2012-02-18 02:08:14 -0500
committerJeff Kirsher <jeffrey.t.kirsher@intel.com>2012-03-19 16:59:11 -0400
commit8af3c33f4dab8c20c0a0eb1a7e00d2303d7f47eb (patch)
tree4213b4f6c0014783cc89d340a04611ed7cfa4e15 /drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
parent567d2de291b5ddb83654c5e87c14b4c6fa7216ed (diff)
ixgbe: fix namespace issues when FCoE/DCB is not enabled
Resolve namespace issues when FCoE or DCB is not enabled. The issue is with certain configurations we end up with namespace problems. A simple example: ixgbe_main.c - defines func A() - uses func A() ixgbe_fcoe.c - uses func A() ixgbe.h - has prototype for func A() For default (FCoE included) all is good. But when it isn't the namespace checker complains about how func A() could be static. To resolve this, created a ixgbe_lib file to contain functions used by DCB/FCoE and their helper functions so that they are always in namespace whether or not DCB/FCoE is enabled. Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com> Tested-by: Phil Schmitt <phillip.j.schmitt@intel.com>
Diffstat (limited to 'drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c')
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c929
1 files changed, 929 insertions, 0 deletions
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
new file mode 100644
index 000000000000..027d7a75be39
--- /dev/null
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
@@ -0,0 +1,929 @@
1/*******************************************************************************
2
3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2012 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27
28#include "ixgbe.h"
29#include "ixgbe_sriov.h"
30
31/**
32 * ixgbe_cache_ring_rss - Descriptor ring to register mapping for RSS
33 * @adapter: board private structure to initialize
34 *
35 * Cache the descriptor ring offsets for RSS to the assigned rings.
36 *
37 **/
38static inline bool ixgbe_cache_ring_rss(struct ixgbe_adapter *adapter)
39{
40 int i;
41
42 if (!(adapter->flags & IXGBE_FLAG_RSS_ENABLED))
43 return false;
44
45 for (i = 0; i < adapter->num_rx_queues; i++)
46 adapter->rx_ring[i]->reg_idx = i;
47 for (i = 0; i < adapter->num_tx_queues; i++)
48 adapter->tx_ring[i]->reg_idx = i;
49
50 return true;
51}
52#ifdef CONFIG_IXGBE_DCB
53
54/* ixgbe_get_first_reg_idx - Return first register index associated with ring */
55static void ixgbe_get_first_reg_idx(struct ixgbe_adapter *adapter, u8 tc,
56 unsigned int *tx, unsigned int *rx)
57{
58 struct net_device *dev = adapter->netdev;
59 struct ixgbe_hw *hw = &adapter->hw;
60 u8 num_tcs = netdev_get_num_tc(dev);
61
62 *tx = 0;
63 *rx = 0;
64
65 switch (hw->mac.type) {
66 case ixgbe_mac_82598EB:
67 *tx = tc << 2;
68 *rx = tc << 3;
69 break;
70 case ixgbe_mac_82599EB:
71 case ixgbe_mac_X540:
72 if (num_tcs > 4) {
73 if (tc < 3) {
74 *tx = tc << 5;
75 *rx = tc << 4;
76 } else if (tc < 5) {
77 *tx = ((tc + 2) << 4);
78 *rx = tc << 4;
79 } else if (tc < num_tcs) {
80 *tx = ((tc + 8) << 3);
81 *rx = tc << 4;
82 }
83 } else {
84 *rx = tc << 5;
85 switch (tc) {
86 case 0:
87 *tx = 0;
88 break;
89 case 1:
90 *tx = 64;
91 break;
92 case 2:
93 *tx = 96;
94 break;
95 case 3:
96 *tx = 112;
97 break;
98 default:
99 break;
100 }
101 }
102 break;
103 default:
104 break;
105 }
106}
107
108/**
109 * ixgbe_cache_ring_dcb - Descriptor ring to register mapping for DCB
110 * @adapter: board private structure to initialize
111 *
112 * Cache the descriptor ring offsets for DCB to the assigned rings.
113 *
114 **/
115static inline bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter)
116{
117 struct net_device *dev = adapter->netdev;
118 int i, j, k;
119 u8 num_tcs = netdev_get_num_tc(dev);
120
121 if (!num_tcs)
122 return false;
123
124 for (i = 0, k = 0; i < num_tcs; i++) {
125 unsigned int tx_s, rx_s;
126 u16 count = dev->tc_to_txq[i].count;
127
128 ixgbe_get_first_reg_idx(adapter, i, &tx_s, &rx_s);
129 for (j = 0; j < count; j++, k++) {
130 adapter->tx_ring[k]->reg_idx = tx_s + j;
131 adapter->rx_ring[k]->reg_idx = rx_s + j;
132 adapter->tx_ring[k]->dcb_tc = i;
133 adapter->rx_ring[k]->dcb_tc = i;
134 }
135 }
136
137 return true;
138}
139#endif
140
141/**
142 * ixgbe_cache_ring_fdir - Descriptor ring to register mapping for Flow Director
143 * @adapter: board private structure to initialize
144 *
145 * Cache the descriptor ring offsets for Flow Director to the assigned rings.
146 *
147 **/
148static inline bool ixgbe_cache_ring_fdir(struct ixgbe_adapter *adapter)
149{
150 int i;
151 bool ret = false;
152
153 if ((adapter->flags & IXGBE_FLAG_RSS_ENABLED) &&
154 (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE)) {
155 for (i = 0; i < adapter->num_rx_queues; i++)
156 adapter->rx_ring[i]->reg_idx = i;
157 for (i = 0; i < adapter->num_tx_queues; i++)
158 adapter->tx_ring[i]->reg_idx = i;
159 ret = true;
160 }
161
162 return ret;
163}
164
165#ifdef IXGBE_FCOE
166/**
167 * ixgbe_cache_ring_fcoe - Descriptor ring to register mapping for the FCoE
168 * @adapter: board private structure to initialize
169 *
170 * Cache the descriptor ring offsets for FCoE mode to the assigned rings.
171 *
172 */
173static inline bool ixgbe_cache_ring_fcoe(struct ixgbe_adapter *adapter)
174{
175 struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE];
176 int i;
177 u8 fcoe_rx_i = 0, fcoe_tx_i = 0;
178
179 if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED))
180 return false;
181
182 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
183 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE)
184 ixgbe_cache_ring_fdir(adapter);
185 else
186 ixgbe_cache_ring_rss(adapter);
187
188 fcoe_rx_i = f->mask;
189 fcoe_tx_i = f->mask;
190 }
191 for (i = 0; i < f->indices; i++, fcoe_rx_i++, fcoe_tx_i++) {
192 adapter->rx_ring[f->mask + i]->reg_idx = fcoe_rx_i;
193 adapter->tx_ring[f->mask + i]->reg_idx = fcoe_tx_i;
194 }
195 return true;
196}
197
198#endif /* IXGBE_FCOE */
199/**
200 * ixgbe_cache_ring_sriov - Descriptor ring to register mapping for sriov
201 * @adapter: board private structure to initialize
202 *
203 * SR-IOV doesn't use any descriptor rings but changes the default if
204 * no other mapping is used.
205 *
206 */
207static inline bool ixgbe_cache_ring_sriov(struct ixgbe_adapter *adapter)
208{
209 adapter->rx_ring[0]->reg_idx = adapter->num_vfs * 2;
210 adapter->tx_ring[0]->reg_idx = adapter->num_vfs * 2;
211 if (adapter->num_vfs)
212 return true;
213 else
214 return false;
215}
216
217/**
218 * ixgbe_cache_ring_register - Descriptor ring to register mapping
219 * @adapter: board private structure to initialize
220 *
221 * Once we know the feature-set enabled for the device, we'll cache
222 * the register offset the descriptor ring is assigned to.
223 *
224 * Note, the order the various feature calls is important. It must start with
225 * the "most" features enabled at the same time, then trickle down to the
226 * least amount of features turned on at once.
227 **/
228static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter)
229{
230 /* start with default case */
231 adapter->rx_ring[0]->reg_idx = 0;
232 adapter->tx_ring[0]->reg_idx = 0;
233
234 if (ixgbe_cache_ring_sriov(adapter))
235 return;
236
237#ifdef CONFIG_IXGBE_DCB
238 if (ixgbe_cache_ring_dcb(adapter))
239 return;
240#endif
241
242#ifdef IXGBE_FCOE
243 if (ixgbe_cache_ring_fcoe(adapter))
244 return;
245#endif /* IXGBE_FCOE */
246
247 if (ixgbe_cache_ring_fdir(adapter))
248 return;
249
250 if (ixgbe_cache_ring_rss(adapter))
251 return;
252}
253
254/**
255 * ixgbe_set_sriov_queues: Allocate queues for IOV use
256 * @adapter: board private structure to initialize
257 *
258 * IOV doesn't actually use anything, so just NAK the
259 * request for now and let the other queue routines
260 * figure out what to do.
261 */
262static inline bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter)
263{
264 return false;
265}
266
267/**
268 * ixgbe_set_rss_queues: Allocate queues for RSS
269 * @adapter: board private structure to initialize
270 *
271 * This is our "base" multiqueue mode. RSS (Receive Side Scaling) will try
272 * to allocate one Rx queue per CPU, and if available, one Tx queue per CPU.
273 *
274 **/
275static inline bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter)
276{
277 bool ret = false;
278 struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_RSS];
279
280 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
281 f->mask = 0xF;
282 adapter->num_rx_queues = f->indices;
283 adapter->num_tx_queues = f->indices;
284 ret = true;
285 }
286
287 return ret;
288}
289
290/**
291 * ixgbe_set_fdir_queues: Allocate queues for Flow Director
292 * @adapter: board private structure to initialize
293 *
294 * Flow Director is an advanced Rx filter, attempting to get Rx flows back
295 * to the original CPU that initiated the Tx session. This runs in addition
296 * to RSS, so if a packet doesn't match an FDIR filter, we can still spread the
297 * Rx load across CPUs using RSS.
298 *
299 **/
300static inline bool ixgbe_set_fdir_queues(struct ixgbe_adapter *adapter)
301{
302 bool ret = false;
303 struct ixgbe_ring_feature *f_fdir = &adapter->ring_feature[RING_F_FDIR];
304
305 f_fdir->indices = min_t(int, num_online_cpus(), f_fdir->indices);
306 f_fdir->mask = 0;
307
308 /*
309 * Use RSS in addition to Flow Director to ensure the best
310 * distribution of flows across cores, even when an FDIR flow
311 * isn't matched.
312 */
313 if ((adapter->flags & IXGBE_FLAG_RSS_ENABLED) &&
314 (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE)) {
315 adapter->num_tx_queues = f_fdir->indices;
316 adapter->num_rx_queues = f_fdir->indices;
317 ret = true;
318 } else {
319 adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
320 }
321 return ret;
322}
323
324#ifdef IXGBE_FCOE
325/**
326 * ixgbe_set_fcoe_queues: Allocate queues for Fiber Channel over Ethernet (FCoE)
327 * @adapter: board private structure to initialize
328 *
329 * FCoE RX FCRETA can use up to 8 rx queues for up to 8 different exchanges.
330 * The ring feature mask is not used as a mask for FCoE, as it can take any 8
331 * rx queues out of the max number of rx queues, instead, it is used as the
332 * index of the first rx queue used by FCoE.
333 *
334 **/
335static inline bool ixgbe_set_fcoe_queues(struct ixgbe_adapter *adapter)
336{
337 struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE];
338
339 if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED))
340 return false;
341
342 f->indices = min_t(int, num_online_cpus(), f->indices);
343
344 adapter->num_rx_queues = 1;
345 adapter->num_tx_queues = 1;
346
347 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
348 e_info(probe, "FCoE enabled with RSS\n");
349 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE)
350 ixgbe_set_fdir_queues(adapter);
351 else
352 ixgbe_set_rss_queues(adapter);
353 }
354
355 /* adding FCoE rx rings to the end */
356 f->mask = adapter->num_rx_queues;
357 adapter->num_rx_queues += f->indices;
358 adapter->num_tx_queues += f->indices;
359
360 return true;
361}
362#endif /* IXGBE_FCOE */
363
364/* Artificial max queue cap per traffic class in DCB mode */
365#define DCB_QUEUE_CAP 8
366
367#ifdef CONFIG_IXGBE_DCB
368static inline bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter)
369{
370 int per_tc_q, q, i, offset = 0;
371 struct net_device *dev = adapter->netdev;
372 int tcs = netdev_get_num_tc(dev);
373
374 if (!tcs)
375 return false;
376
377 /* Map queue offset and counts onto allocated tx queues */
378 per_tc_q = min_t(unsigned int, dev->num_tx_queues / tcs, DCB_QUEUE_CAP);
379 q = min_t(int, num_online_cpus(), per_tc_q);
380
381 for (i = 0; i < tcs; i++) {
382 netdev_set_tc_queue(dev, i, q, offset);
383 offset += q;
384 }
385
386 adapter->num_tx_queues = q * tcs;
387 adapter->num_rx_queues = q * tcs;
388
389#ifdef IXGBE_FCOE
390 /* FCoE enabled queues require special configuration indexed
391 * by feature specific indices and mask. Here we map FCoE
392 * indices onto the DCB queue pairs allowing FCoE to own
393 * configuration later.
394 */
395 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
396 u8 prio_tc[MAX_USER_PRIORITY] = {0};
397 int tc;
398 struct ixgbe_ring_feature *f =
399 &adapter->ring_feature[RING_F_FCOE];
400
401 ixgbe_dcb_unpack_map(&adapter->dcb_cfg, DCB_TX_CONFIG, prio_tc);
402 tc = prio_tc[adapter->fcoe.up];
403 f->indices = dev->tc_to_txq[tc].count;
404 f->mask = dev->tc_to_txq[tc].offset;
405 }
406#endif
407
408 return true;
409}
410#endif
411
412/**
413 * ixgbe_set_num_queues: Allocate queues for device, feature dependent
414 * @adapter: board private structure to initialize
415 *
416 * This is the top level queue allocation routine. The order here is very
417 * important, starting with the "most" number of features turned on at once,
418 * and ending with the smallest set of features. This way large combinations
419 * can be allocated if they're turned on, and smaller combinations are the
420 * fallthrough conditions.
421 *
422 **/
423static int ixgbe_set_num_queues(struct ixgbe_adapter *adapter)
424{
425 /* Start with base case */
426 adapter->num_rx_queues = 1;
427 adapter->num_tx_queues = 1;
428 adapter->num_rx_pools = adapter->num_rx_queues;
429 adapter->num_rx_queues_per_pool = 1;
430
431 if (ixgbe_set_sriov_queues(adapter))
432 goto done;
433
434#ifdef CONFIG_IXGBE_DCB
435 if (ixgbe_set_dcb_queues(adapter))
436 goto done;
437
438#endif
439#ifdef IXGBE_FCOE
440 if (ixgbe_set_fcoe_queues(adapter))
441 goto done;
442
443#endif /* IXGBE_FCOE */
444 if (ixgbe_set_fdir_queues(adapter))
445 goto done;
446
447 if (ixgbe_set_rss_queues(adapter))
448 goto done;
449
450 /* fallback to base case */
451 adapter->num_rx_queues = 1;
452 adapter->num_tx_queues = 1;
453
454done:
455 if ((adapter->netdev->reg_state == NETREG_UNREGISTERED) ||
456 (adapter->netdev->reg_state == NETREG_UNREGISTERING))
457 return 0;
458
459 /* Notify the stack of the (possibly) reduced queue counts. */
460 netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues);
461 return netif_set_real_num_rx_queues(adapter->netdev,
462 adapter->num_rx_queues);
463}
464
465static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter,
466 int vectors)
467{
468 int err, vector_threshold;
469
470 /* We'll want at least 2 (vector_threshold):
471 * 1) TxQ[0] + RxQ[0] handler
472 * 2) Other (Link Status Change, etc.)
473 */
474 vector_threshold = MIN_MSIX_COUNT;
475
476 /*
477 * The more we get, the more we will assign to Tx/Rx Cleanup
478 * for the separate queues...where Rx Cleanup >= Tx Cleanup.
479 * Right now, we simply care about how many we'll get; we'll
480 * set them up later while requesting irq's.
481 */
482 while (vectors >= vector_threshold) {
483 err = pci_enable_msix(adapter->pdev, adapter->msix_entries,
484 vectors);
485 if (!err) /* Success in acquiring all requested vectors. */
486 break;
487 else if (err < 0)
488 vectors = 0; /* Nasty failure, quit now */
489 else /* err == number of vectors we should try again with */
490 vectors = err;
491 }
492
493 if (vectors < vector_threshold) {
494 /* Can't allocate enough MSI-X interrupts? Oh well.
495 * This just means we'll go with either a single MSI
496 * vector or fall back to legacy interrupts.
497 */
498 netif_printk(adapter, hw, KERN_DEBUG, adapter->netdev,
499 "Unable to allocate MSI-X interrupts\n");
500 adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
501 kfree(adapter->msix_entries);
502 adapter->msix_entries = NULL;
503 } else {
504 adapter->flags |= IXGBE_FLAG_MSIX_ENABLED; /* Woot! */
505 /*
506 * Adjust for only the vectors we'll use, which is minimum
507 * of max_msix_q_vectors + NON_Q_VECTORS, or the number of
508 * vectors we were allocated.
509 */
510 adapter->num_msix_vectors = min(vectors,
511 adapter->max_msix_q_vectors + NON_Q_VECTORS);
512 }
513}
514
515static void ixgbe_add_ring(struct ixgbe_ring *ring,
516 struct ixgbe_ring_container *head)
517{
518 ring->next = head->ring;
519 head->ring = ring;
520 head->count++;
521}
522
523/**
524 * ixgbe_alloc_q_vector - Allocate memory for a single interrupt vector
525 * @adapter: board private structure to initialize
526 * @v_idx: index of vector in adapter struct
527 *
528 * We allocate one q_vector. If allocation fails we return -ENOMEM.
529 **/
530static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter, int v_idx,
531 int txr_count, int txr_idx,
532 int rxr_count, int rxr_idx)
533{
534 struct ixgbe_q_vector *q_vector;
535 struct ixgbe_ring *ring;
536 int node = -1;
537 int cpu = -1;
538 int ring_count, size;
539
540 ring_count = txr_count + rxr_count;
541 size = sizeof(struct ixgbe_q_vector) +
542 (sizeof(struct ixgbe_ring) * ring_count);
543
544 /* customize cpu for Flow Director mapping */
545 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
546 if (cpu_online(v_idx)) {
547 cpu = v_idx;
548 node = cpu_to_node(cpu);
549 }
550 }
551
552 /* allocate q_vector and rings */
553 q_vector = kzalloc_node(size, GFP_KERNEL, node);
554 if (!q_vector)
555 q_vector = kzalloc(size, GFP_KERNEL);
556 if (!q_vector)
557 return -ENOMEM;
558
559 /* setup affinity mask and node */
560 if (cpu != -1)
561 cpumask_set_cpu(cpu, &q_vector->affinity_mask);
562 else
563 cpumask_copy(&q_vector->affinity_mask, cpu_online_mask);
564 q_vector->numa_node = node;
565
566 /* initialize NAPI */
567 netif_napi_add(adapter->netdev, &q_vector->napi,
568 ixgbe_poll, 64);
569
570 /* tie q_vector and adapter together */
571 adapter->q_vector[v_idx] = q_vector;
572 q_vector->adapter = adapter;
573 q_vector->v_idx = v_idx;
574
575 /* initialize work limits */
576 q_vector->tx.work_limit = adapter->tx_work_limit;
577
578 /* initialize pointer to rings */
579 ring = q_vector->ring;
580
581 while (txr_count) {
582 /* assign generic ring traits */
583 ring->dev = &adapter->pdev->dev;
584 ring->netdev = adapter->netdev;
585
586 /* configure backlink on ring */
587 ring->q_vector = q_vector;
588
589 /* update q_vector Tx values */
590 ixgbe_add_ring(ring, &q_vector->tx);
591
592 /* apply Tx specific ring traits */
593 ring->count = adapter->tx_ring_count;
594 ring->queue_index = txr_idx;
595
596 /* assign ring to adapter */
597 adapter->tx_ring[txr_idx] = ring;
598
599 /* update count and index */
600 txr_count--;
601 txr_idx++;
602
603 /* push pointer to next ring */
604 ring++;
605 }
606
607 while (rxr_count) {
608 /* assign generic ring traits */
609 ring->dev = &adapter->pdev->dev;
610 ring->netdev = adapter->netdev;
611
612 /* configure backlink on ring */
613 ring->q_vector = q_vector;
614
615 /* update q_vector Rx values */
616 ixgbe_add_ring(ring, &q_vector->rx);
617
618 /*
619 * 82599 errata, UDP frames with a 0 checksum
620 * can be marked as checksum errors.
621 */
622 if (adapter->hw.mac.type == ixgbe_mac_82599EB)
623 set_bit(__IXGBE_RX_CSUM_UDP_ZERO_ERR, &ring->state);
624
625 /* apply Rx specific ring traits */
626 ring->count = adapter->rx_ring_count;
627 ring->queue_index = rxr_idx;
628
629 /* assign ring to adapter */
630 adapter->rx_ring[rxr_idx] = ring;
631
632 /* update count and index */
633 rxr_count--;
634 rxr_idx++;
635
636 /* push pointer to next ring */
637 ring++;
638 }
639
640 return 0;
641}
642
643/**
644 * ixgbe_free_q_vector - Free memory allocated for specific interrupt vector
645 * @adapter: board private structure to initialize
646 * @v_idx: Index of vector to be freed
647 *
648 * This function frees the memory allocated to the q_vector. In addition if
649 * NAPI is enabled it will delete any references to the NAPI struct prior
650 * to freeing the q_vector.
651 **/
652static void ixgbe_free_q_vector(struct ixgbe_adapter *adapter, int v_idx)
653{
654 struct ixgbe_q_vector *q_vector = adapter->q_vector[v_idx];
655 struct ixgbe_ring *ring;
656
657 ixgbe_for_each_ring(ring, q_vector->tx)
658 adapter->tx_ring[ring->queue_index] = NULL;
659
660 ixgbe_for_each_ring(ring, q_vector->rx)
661 adapter->rx_ring[ring->queue_index] = NULL;
662
663 adapter->q_vector[v_idx] = NULL;
664 netif_napi_del(&q_vector->napi);
665
666 /*
667 * ixgbe_get_stats64() might access the rings on this vector,
668 * we must wait a grace period before freeing it.
669 */
670 kfree_rcu(q_vector, rcu);
671}
672
673/**
674 * ixgbe_alloc_q_vectors - Allocate memory for interrupt vectors
675 * @adapter: board private structure to initialize
676 *
677 * We allocate one q_vector per queue interrupt. If allocation fails we
678 * return -ENOMEM.
679 **/
680static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter)
681{
682 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
683 int rxr_remaining = adapter->num_rx_queues;
684 int txr_remaining = adapter->num_tx_queues;
685 int rxr_idx = 0, txr_idx = 0, v_idx = 0;
686 int err;
687
688 /* only one q_vector if MSI-X is disabled. */
689 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
690 q_vectors = 1;
691
692 if (q_vectors >= (rxr_remaining + txr_remaining)) {
693 for (; rxr_remaining; v_idx++, q_vectors--) {
694 int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors);
695 err = ixgbe_alloc_q_vector(adapter, v_idx,
696 0, 0, rqpv, rxr_idx);
697
698 if (err)
699 goto err_out;
700
701 /* update counts and index */
702 rxr_remaining -= rqpv;
703 rxr_idx += rqpv;
704 }
705 }
706
707 for (; q_vectors; v_idx++, q_vectors--) {
708 int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors);
709 int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors);
710 err = ixgbe_alloc_q_vector(adapter, v_idx,
711 tqpv, txr_idx,
712 rqpv, rxr_idx);
713
714 if (err)
715 goto err_out;
716
717 /* update counts and index */
718 rxr_remaining -= rqpv;
719 rxr_idx += rqpv;
720 txr_remaining -= tqpv;
721 txr_idx += tqpv;
722 }
723
724 return 0;
725
726err_out:
727 while (v_idx) {
728 v_idx--;
729 ixgbe_free_q_vector(adapter, v_idx);
730 }
731
732 return -ENOMEM;
733}
734
735/**
736 * ixgbe_free_q_vectors - Free memory allocated for interrupt vectors
737 * @adapter: board private structure to initialize
738 *
739 * This function frees the memory allocated to the q_vectors. In addition if
740 * NAPI is enabled it will delete any references to the NAPI struct prior
741 * to freeing the q_vector.
742 **/
743static void ixgbe_free_q_vectors(struct ixgbe_adapter *adapter)
744{
745 int v_idx, q_vectors;
746
747 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
748 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
749 else
750 q_vectors = 1;
751
752 for (v_idx = 0; v_idx < q_vectors; v_idx++)
753 ixgbe_free_q_vector(adapter, v_idx);
754}
755
756static void ixgbe_reset_interrupt_capability(struct ixgbe_adapter *adapter)
757{
758 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
759 adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
760 pci_disable_msix(adapter->pdev);
761 kfree(adapter->msix_entries);
762 adapter->msix_entries = NULL;
763 } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) {
764 adapter->flags &= ~IXGBE_FLAG_MSI_ENABLED;
765 pci_disable_msi(adapter->pdev);
766 }
767}
768
769/**
770 * ixgbe_set_interrupt_capability - set MSI-X or MSI if supported
771 * @adapter: board private structure to initialize
772 *
773 * Attempt to configure the interrupts using the best available
774 * capabilities of the hardware and the kernel.
775 **/
776static int ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter)
777{
778 struct ixgbe_hw *hw = &adapter->hw;
779 int err = 0;
780 int vector, v_budget;
781
782 /*
783 * It's easy to be greedy for MSI-X vectors, but it really
784 * doesn't do us much good if we have a lot more vectors
785 * than CPU's. So let's be conservative and only ask for
786 * (roughly) the same number of vectors as there are CPU's.
787 * The default is to use pairs of vectors.
788 */
789 v_budget = max(adapter->num_rx_queues, adapter->num_tx_queues);
790 v_budget = min_t(int, v_budget, num_online_cpus());
791 v_budget += NON_Q_VECTORS;
792
793 /*
794 * At the same time, hardware can only support a maximum of
795 * hw.mac->max_msix_vectors vectors. With features
796 * such as RSS and VMDq, we can easily surpass the number of Rx and Tx
797 * descriptor queues supported by our device. Thus, we cap it off in
798 * those rare cases where the cpu count also exceeds our vector limit.
799 */
800 v_budget = min_t(int, v_budget, hw->mac.max_msix_vectors);
801
802 /* A failure in MSI-X entry allocation isn't fatal, but it does
803 * mean we disable MSI-X capabilities of the adapter. */
804 adapter->msix_entries = kcalloc(v_budget,
805 sizeof(struct msix_entry), GFP_KERNEL);
806 if (adapter->msix_entries) {
807 for (vector = 0; vector < v_budget; vector++)
808 adapter->msix_entries[vector].entry = vector;
809
810 ixgbe_acquire_msix_vectors(adapter, v_budget);
811
812 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
813 goto out;
814 }
815
816 adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
817 adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
818 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
819 e_err(probe,
820 "ATR is not supported while multiple "
821 "queues are disabled. Disabling Flow Director\n");
822 }
823 adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
824 adapter->atr_sample_rate = 0;
825 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
826 ixgbe_disable_sriov(adapter);
827
828 err = ixgbe_set_num_queues(adapter);
829 if (err)
830 return err;
831
832 err = pci_enable_msi(adapter->pdev);
833 if (!err) {
834 adapter->flags |= IXGBE_FLAG_MSI_ENABLED;
835 } else {
836 netif_printk(adapter, hw, KERN_DEBUG, adapter->netdev,
837 "Unable to allocate MSI interrupt, "
838 "falling back to legacy. Error: %d\n", err);
839 /* reset err */
840 err = 0;
841 }
842
843out:
844 return err;
845}
846
847/**
848 * ixgbe_init_interrupt_scheme - Determine proper interrupt scheme
849 * @adapter: board private structure to initialize
850 *
851 * We determine which interrupt scheme to use based on...
852 * - Kernel support (MSI, MSI-X)
853 * - which can be user-defined (via MODULE_PARAM)
854 * - Hardware queue count (num_*_queues)
855 * - defined by miscellaneous hardware support/features (RSS, etc.)
856 **/
857int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter)
858{
859 int err;
860
861 /* Number of supported queues */
862 err = ixgbe_set_num_queues(adapter);
863 if (err)
864 return err;
865
866 err = ixgbe_set_interrupt_capability(adapter);
867 if (err) {
868 e_dev_err("Unable to setup interrupt capabilities\n");
869 goto err_set_interrupt;
870 }
871
872 err = ixgbe_alloc_q_vectors(adapter);
873 if (err) {
874 e_dev_err("Unable to allocate memory for queue vectors\n");
875 goto err_alloc_q_vectors;
876 }
877
878 ixgbe_cache_ring_register(adapter);
879
880 e_dev_info("Multiqueue %s: Rx Queue count = %u, Tx Queue count = %u\n",
881 (adapter->num_rx_queues > 1) ? "Enabled" : "Disabled",
882 adapter->num_rx_queues, adapter->num_tx_queues);
883
884 set_bit(__IXGBE_DOWN, &adapter->state);
885
886 return 0;
887
888err_alloc_q_vectors:
889 ixgbe_reset_interrupt_capability(adapter);
890err_set_interrupt:
891 return err;
892}
893
894/**
895 * ixgbe_clear_interrupt_scheme - Clear the current interrupt scheme settings
896 * @adapter: board private structure to clear interrupt scheme on
897 *
898 * We go through and clear interrupt specific resources and reset the structure
899 * to pre-load conditions
900 **/
901void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter)
902{
903 adapter->num_tx_queues = 0;
904 adapter->num_rx_queues = 0;
905
906 ixgbe_free_q_vectors(adapter);
907 ixgbe_reset_interrupt_capability(adapter);
908}
909
910void ixgbe_tx_ctxtdesc(struct ixgbe_ring *tx_ring, u32 vlan_macip_lens,
911 u32 fcoe_sof_eof, u32 type_tucmd, u32 mss_l4len_idx)
912{
913 struct ixgbe_adv_tx_context_desc *context_desc;
914 u16 i = tx_ring->next_to_use;
915
916 context_desc = IXGBE_TX_CTXTDESC(tx_ring, i);
917
918 i++;
919 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
920
921 /* set bits to identify this as an advanced context descriptor */
922 type_tucmd |= IXGBE_TXD_CMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
923
924 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
925 context_desc->seqnum_seed = cpu_to_le32(fcoe_sof_eof);
926 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd);
927 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
928}
929