diff options
author | Alexander Duyck <alexander.h.duyck@intel.com> | 2009-10-27 11:49:27 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2009-10-28 04:20:16 -0400 |
commit | 047e0030f1e601233ae5b03910602ec93c620bce (patch) | |
tree | 3d9cd1d339b150277fe424e7fc9f3aa2a9716665 | |
parent | 678b77e265f6d66f1e68f3d095841c44ba5ab112 (diff) |
igb: add new data structure for handling interrupts and NAPI
Add a new igb_q_vector data structure to handle interrupts and NAPI. This
helps to abstract the rings away from the adapter struct. In addition it
allows for a bit of consolidation since a tx and rx ring can share a
q_vector.
Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | drivers/net/igb/igb.h | 46 | ||||
-rw-r--r-- | drivers/net/igb/igb_ethtool.c | 8 | ||||
-rw-r--r-- | drivers/net/igb/igb_main.c | 872 |
3 files changed, 534 insertions, 392 deletions
diff --git a/drivers/net/igb/igb.h b/drivers/net/igb/igb.h index b805b1c63f80..86492c8957ec 100644 --- a/drivers/net/igb/igb.h +++ b/drivers/net/igb/igb.h | |||
@@ -55,6 +55,8 @@ struct igb_adapter; | |||
55 | #define IGB_DEFAULT_ITR 3 /* dynamic */ | 55 | #define IGB_DEFAULT_ITR 3 /* dynamic */ |
56 | #define IGB_MAX_ITR_USECS 10000 | 56 | #define IGB_MAX_ITR_USECS 10000 |
57 | #define IGB_MIN_ITR_USECS 10 | 57 | #define IGB_MIN_ITR_USECS 10 |
58 | #define NON_Q_VECTORS 1 | ||
59 | #define MAX_Q_VECTORS 8 | ||
58 | 60 | ||
59 | /* Transmit and receive queues */ | 61 | /* Transmit and receive queues */ |
60 | #define IGB_MAX_RX_QUEUES (adapter->vfs_allocated_count ? \ | 62 | #define IGB_MAX_RX_QUEUES (adapter->vfs_allocated_count ? \ |
@@ -149,25 +151,38 @@ struct igb_rx_queue_stats { | |||
149 | u64 drops; | 151 | u64 drops; |
150 | }; | 152 | }; |
151 | 153 | ||
152 | struct igb_ring { | 154 | struct igb_q_vector { |
153 | struct igb_adapter *adapter; /* backlink */ | 155 | struct igb_adapter *adapter; /* backlink */ |
154 | void *desc; /* descriptor ring memory */ | 156 | struct igb_ring *rx_ring; |
155 | dma_addr_t dma; /* phys address of the ring */ | 157 | struct igb_ring *tx_ring; |
156 | unsigned int size; /* length of desc. ring in bytes */ | 158 | struct napi_struct napi; |
157 | unsigned int count; /* number of desc. in the ring */ | 159 | |
160 | u32 eims_value; | ||
161 | u16 cpu; | ||
162 | |||
163 | u16 itr_val; | ||
164 | u8 set_itr; | ||
165 | u8 itr_shift; | ||
166 | void __iomem *itr_register; | ||
167 | |||
168 | char name[IFNAMSIZ + 9]; | ||
169 | }; | ||
170 | |||
171 | struct igb_ring { | ||
172 | struct igb_q_vector *q_vector; /* backlink to q_vector */ | ||
173 | void *desc; /* descriptor ring memory */ | ||
174 | dma_addr_t dma; /* phys address of the ring */ | ||
175 | unsigned int size; /* length of desc. ring in bytes */ | ||
176 | unsigned int count; /* number of desc. in the ring */ | ||
158 | u16 next_to_use; | 177 | u16 next_to_use; |
159 | u16 next_to_clean; | 178 | u16 next_to_clean; |
160 | u16 head; | 179 | u16 head; |
161 | u16 tail; | 180 | u16 tail; |
162 | struct igb_buffer *buffer_info; /* array of buffer info structs */ | 181 | struct igb_buffer *buffer_info; /* array of buffer info structs */ |
163 | 182 | ||
164 | u32 eims_value; | 183 | u8 queue_index; |
165 | u32 itr_val; | 184 | u8 reg_idx; |
166 | u16 itr_register; | ||
167 | u16 cpu; | ||
168 | 185 | ||
169 | u16 queue_index; | ||
170 | u16 reg_idx; | ||
171 | unsigned int total_bytes; | 186 | unsigned int total_bytes; |
172 | unsigned int total_packets; | 187 | unsigned int total_packets; |
173 | 188 | ||
@@ -181,13 +196,8 @@ struct igb_ring { | |||
181 | struct { | 196 | struct { |
182 | struct igb_rx_queue_stats rx_stats; | 197 | struct igb_rx_queue_stats rx_stats; |
183 | u64 rx_queue_drops; | 198 | u64 rx_queue_drops; |
184 | struct napi_struct napi; | ||
185 | int set_itr; | ||
186 | struct igb_ring *buddy; | ||
187 | }; | 199 | }; |
188 | }; | 200 | }; |
189 | |||
190 | char name[IFNAMSIZ + 5]; | ||
191 | }; | 201 | }; |
192 | 202 | ||
193 | #define E1000_RX_DESC_ADV(R, i) \ | 203 | #define E1000_RX_DESC_ADV(R, i) \ |
@@ -254,7 +264,6 @@ struct igb_adapter { | |||
254 | 264 | ||
255 | /* OS defined structs */ | 265 | /* OS defined structs */ |
256 | struct net_device *netdev; | 266 | struct net_device *netdev; |
257 | struct napi_struct napi; | ||
258 | struct pci_dev *pdev; | 267 | struct pci_dev *pdev; |
259 | struct cyclecounter cycles; | 268 | struct cyclecounter cycles; |
260 | struct timecounter clock; | 269 | struct timecounter clock; |
@@ -272,6 +281,9 @@ struct igb_adapter { | |||
272 | struct igb_ring test_rx_ring; | 281 | struct igb_ring test_rx_ring; |
273 | 282 | ||
274 | int msg_enable; | 283 | int msg_enable; |
284 | |||
285 | unsigned int num_q_vectors; | ||
286 | struct igb_q_vector *q_vector[MAX_Q_VECTORS]; | ||
275 | struct msix_entry *msix_entries; | 287 | struct msix_entry *msix_entries; |
276 | u32 eims_enable_mask; | 288 | u32 eims_enable_mask; |
277 | u32 eims_other; | 289 | u32 eims_other; |
diff --git a/drivers/net/igb/igb_ethtool.c b/drivers/net/igb/igb_ethtool.c index dafb25bfd9e1..f71276fec3ff 100644 --- a/drivers/net/igb/igb_ethtool.c +++ b/drivers/net/igb/igb_ethtool.c | |||
@@ -1907,7 +1907,6 @@ static int igb_set_coalesce(struct net_device *netdev, | |||
1907 | struct ethtool_coalesce *ec) | 1907 | struct ethtool_coalesce *ec) |
1908 | { | 1908 | { |
1909 | struct igb_adapter *adapter = netdev_priv(netdev); | 1909 | struct igb_adapter *adapter = netdev_priv(netdev); |
1910 | struct e1000_hw *hw = &adapter->hw; | ||
1911 | int i; | 1910 | int i; |
1912 | 1911 | ||
1913 | if ((ec->rx_coalesce_usecs > IGB_MAX_ITR_USECS) || | 1912 | if ((ec->rx_coalesce_usecs > IGB_MAX_ITR_USECS) || |
@@ -1925,8 +1924,11 @@ static int igb_set_coalesce(struct net_device *netdev, | |||
1925 | adapter->itr = adapter->itr_setting; | 1924 | adapter->itr = adapter->itr_setting; |
1926 | } | 1925 | } |
1927 | 1926 | ||
1928 | for (i = 0; i < adapter->num_rx_queues; i++) | 1927 | for (i = 0; i < adapter->num_q_vectors; i++) { |
1929 | wr32(adapter->rx_ring[i].itr_register, adapter->itr); | 1928 | struct igb_q_vector *q_vector = adapter->q_vector[i]; |
1929 | q_vector->itr_val = adapter->itr; | ||
1930 | q_vector->set_itr = 1; | ||
1931 | } | ||
1930 | 1932 | ||
1931 | return 0; | 1933 | return 0; |
1932 | } | 1934 | } |
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c index 2ffe0997b838..c15eb4c39169 100644 --- a/drivers/net/igb/igb_main.c +++ b/drivers/net/igb/igb_main.c | |||
@@ -111,16 +111,14 @@ static void igb_set_uta(struct igb_adapter *adapter); | |||
111 | static irqreturn_t igb_intr(int irq, void *); | 111 | static irqreturn_t igb_intr(int irq, void *); |
112 | static irqreturn_t igb_intr_msi(int irq, void *); | 112 | static irqreturn_t igb_intr_msi(int irq, void *); |
113 | static irqreturn_t igb_msix_other(int irq, void *); | 113 | static irqreturn_t igb_msix_other(int irq, void *); |
114 | static irqreturn_t igb_msix_rx(int irq, void *); | 114 | static irqreturn_t igb_msix_ring(int irq, void *); |
115 | static irqreturn_t igb_msix_tx(int irq, void *); | ||
116 | #ifdef CONFIG_IGB_DCA | 115 | #ifdef CONFIG_IGB_DCA |
117 | static void igb_update_rx_dca(struct igb_ring *); | 116 | static void igb_update_dca(struct igb_q_vector *); |
118 | static void igb_update_tx_dca(struct igb_ring *); | ||
119 | static void igb_setup_dca(struct igb_adapter *); | 117 | static void igb_setup_dca(struct igb_adapter *); |
120 | #endif /* CONFIG_IGB_DCA */ | 118 | #endif /* CONFIG_IGB_DCA */ |
121 | static bool igb_clean_tx_irq(struct igb_ring *); | 119 | static bool igb_clean_tx_irq(struct igb_q_vector *); |
122 | static int igb_poll(struct napi_struct *, int); | 120 | static int igb_poll(struct napi_struct *, int); |
123 | static bool igb_clean_rx_irq_adv(struct igb_ring *, int *, int); | 121 | static bool igb_clean_rx_irq_adv(struct igb_q_vector *, int *, int); |
124 | static void igb_alloc_rx_buffers_adv(struct igb_ring *, int); | 122 | static void igb_alloc_rx_buffers_adv(struct igb_ring *, int); |
125 | static int igb_ioctl(struct net_device *, struct ifreq *, int cmd); | 123 | static int igb_ioctl(struct net_device *, struct ifreq *, int cmd); |
126 | static void igb_tx_timeout(struct net_device *); | 124 | static void igb_tx_timeout(struct net_device *); |
@@ -374,7 +372,7 @@ module_exit(igb_exit_module); | |||
374 | static void igb_cache_ring_register(struct igb_adapter *adapter) | 372 | static void igb_cache_ring_register(struct igb_adapter *adapter) |
375 | { | 373 | { |
376 | int i; | 374 | int i; |
377 | unsigned int rbase_offset = adapter->vfs_allocated_count; | 375 | u32 rbase_offset = adapter->vfs_allocated_count; |
378 | 376 | ||
379 | switch (adapter->hw.mac.type) { | 377 | switch (adapter->hw.mac.type) { |
380 | case e1000_82576: | 378 | case e1000_82576: |
@@ -400,6 +398,18 @@ static void igb_cache_ring_register(struct igb_adapter *adapter) | |||
400 | } | 398 | } |
401 | } | 399 | } |
402 | 400 | ||
401 | static void igb_free_queues(struct igb_adapter *adapter) | ||
402 | { | ||
403 | kfree(adapter->tx_ring); | ||
404 | kfree(adapter->rx_ring); | ||
405 | |||
406 | adapter->tx_ring = NULL; | ||
407 | adapter->rx_ring = NULL; | ||
408 | |||
409 | adapter->num_rx_queues = 0; | ||
410 | adapter->num_tx_queues = 0; | ||
411 | } | ||
412 | |||
403 | /** | 413 | /** |
404 | * igb_alloc_queues - Allocate memory for all rings | 414 | * igb_alloc_queues - Allocate memory for all rings |
405 | * @adapter: board private structure to initialize | 415 | * @adapter: board private structure to initialize |
@@ -414,59 +424,48 @@ static int igb_alloc_queues(struct igb_adapter *adapter) | |||
414 | adapter->tx_ring = kcalloc(adapter->num_tx_queues, | 424 | adapter->tx_ring = kcalloc(adapter->num_tx_queues, |
415 | sizeof(struct igb_ring), GFP_KERNEL); | 425 | sizeof(struct igb_ring), GFP_KERNEL); |
416 | if (!adapter->tx_ring) | 426 | if (!adapter->tx_ring) |
417 | return -ENOMEM; | 427 | goto err; |
418 | 428 | ||
419 | adapter->rx_ring = kcalloc(adapter->num_rx_queues, | 429 | adapter->rx_ring = kcalloc(adapter->num_rx_queues, |
420 | sizeof(struct igb_ring), GFP_KERNEL); | 430 | sizeof(struct igb_ring), GFP_KERNEL); |
421 | if (!adapter->rx_ring) { | 431 | if (!adapter->rx_ring) |
422 | kfree(adapter->tx_ring); | 432 | goto err; |
423 | return -ENOMEM; | ||
424 | } | ||
425 | |||
426 | adapter->rx_ring->buddy = adapter->tx_ring; | ||
427 | 433 | ||
428 | for (i = 0; i < adapter->num_tx_queues; i++) { | 434 | for (i = 0; i < adapter->num_tx_queues; i++) { |
429 | struct igb_ring *ring = &(adapter->tx_ring[i]); | 435 | struct igb_ring *ring = &(adapter->tx_ring[i]); |
430 | ring->count = adapter->tx_ring_count; | 436 | ring->count = adapter->tx_ring_count; |
431 | ring->adapter = adapter; | ||
432 | ring->queue_index = i; | 437 | ring->queue_index = i; |
433 | } | 438 | } |
434 | for (i = 0; i < adapter->num_rx_queues; i++) { | 439 | for (i = 0; i < adapter->num_rx_queues; i++) { |
435 | struct igb_ring *ring = &(adapter->rx_ring[i]); | 440 | struct igb_ring *ring = &(adapter->rx_ring[i]); |
436 | ring->count = adapter->rx_ring_count; | 441 | ring->count = adapter->rx_ring_count; |
437 | ring->adapter = adapter; | ||
438 | ring->queue_index = i; | 442 | ring->queue_index = i; |
439 | ring->itr_register = E1000_ITR; | ||
440 | |||
441 | /* set a default napi handler for each rx_ring */ | ||
442 | netif_napi_add(adapter->netdev, &ring->napi, igb_poll, 64); | ||
443 | } | 443 | } |
444 | 444 | ||
445 | igb_cache_ring_register(adapter); | 445 | igb_cache_ring_register(adapter); |
446 | return 0; | ||
447 | } | ||
448 | 446 | ||
449 | static void igb_free_queues(struct igb_adapter *adapter) | 447 | return 0; |
450 | { | ||
451 | int i; | ||
452 | |||
453 | for (i = 0; i < adapter->num_rx_queues; i++) | ||
454 | netif_napi_del(&adapter->rx_ring[i].napi); | ||
455 | 448 | ||
456 | adapter->num_rx_queues = 0; | 449 | err: |
457 | adapter->num_tx_queues = 0; | 450 | igb_free_queues(adapter); |
458 | 451 | ||
459 | kfree(adapter->tx_ring); | 452 | return -ENOMEM; |
460 | kfree(adapter->rx_ring); | ||
461 | } | 453 | } |
462 | 454 | ||
463 | #define IGB_N0_QUEUE -1 | 455 | #define IGB_N0_QUEUE -1 |
464 | static void igb_assign_vector(struct igb_adapter *adapter, int rx_queue, | 456 | static void igb_assign_vector(struct igb_q_vector *q_vector, int msix_vector) |
465 | int tx_queue, int msix_vector) | ||
466 | { | 457 | { |
467 | u32 msixbm = 0; | 458 | u32 msixbm = 0; |
459 | struct igb_adapter *adapter = q_vector->adapter; | ||
468 | struct e1000_hw *hw = &adapter->hw; | 460 | struct e1000_hw *hw = &adapter->hw; |
469 | u32 ivar, index; | 461 | u32 ivar, index; |
462 | int rx_queue = IGB_N0_QUEUE; | ||
463 | int tx_queue = IGB_N0_QUEUE; | ||
464 | |||
465 | if (q_vector->rx_ring) | ||
466 | rx_queue = q_vector->rx_ring->reg_idx; | ||
467 | if (q_vector->tx_ring) | ||
468 | tx_queue = q_vector->tx_ring->reg_idx; | ||
470 | 469 | ||
471 | switch (hw->mac.type) { | 470 | switch (hw->mac.type) { |
472 | case e1000_82575: | 471 | case e1000_82575: |
@@ -474,16 +473,12 @@ static void igb_assign_vector(struct igb_adapter *adapter, int rx_queue, | |||
474 | bitmask for the EICR/EIMS/EIMC registers. To assign one | 473 | bitmask for the EICR/EIMS/EIMC registers. To assign one |
475 | or more queues to a vector, we write the appropriate bits | 474 | or more queues to a vector, we write the appropriate bits |
476 | into the MSIXBM register for that vector. */ | 475 | into the MSIXBM register for that vector. */ |
477 | if (rx_queue > IGB_N0_QUEUE) { | 476 | if (rx_queue > IGB_N0_QUEUE) |
478 | msixbm = E1000_EICR_RX_QUEUE0 << rx_queue; | 477 | msixbm = E1000_EICR_RX_QUEUE0 << rx_queue; |
479 | adapter->rx_ring[rx_queue].eims_value = msixbm; | 478 | if (tx_queue > IGB_N0_QUEUE) |
480 | } | ||
481 | if (tx_queue > IGB_N0_QUEUE) { | ||
482 | msixbm |= E1000_EICR_TX_QUEUE0 << tx_queue; | 479 | msixbm |= E1000_EICR_TX_QUEUE0 << tx_queue; |
483 | adapter->tx_ring[tx_queue].eims_value = | ||
484 | E1000_EICR_TX_QUEUE0 << tx_queue; | ||
485 | } | ||
486 | array_wr32(E1000_MSIXBM(0), msix_vector, msixbm); | 480 | array_wr32(E1000_MSIXBM(0), msix_vector, msixbm); |
481 | q_vector->eims_value = msixbm; | ||
487 | break; | 482 | break; |
488 | case e1000_82576: | 483 | case e1000_82576: |
489 | /* 82576 uses a table-based method for assigning vectors. | 484 | /* 82576 uses a table-based method for assigning vectors. |
@@ -491,35 +486,34 @@ static void igb_assign_vector(struct igb_adapter *adapter, int rx_queue, | |||
491 | a vector number along with a "valid" bit. Sadly, the layout | 486 | a vector number along with a "valid" bit. Sadly, the layout |
492 | of the table is somewhat counterintuitive. */ | 487 | of the table is somewhat counterintuitive. */ |
493 | if (rx_queue > IGB_N0_QUEUE) { | 488 | if (rx_queue > IGB_N0_QUEUE) { |
494 | index = (rx_queue >> 1) + adapter->vfs_allocated_count; | 489 | index = (rx_queue & 0x7); |
495 | ivar = array_rd32(E1000_IVAR0, index); | 490 | ivar = array_rd32(E1000_IVAR0, index); |
496 | if (rx_queue & 0x1) { | 491 | if (rx_queue < 8) { |
497 | /* vector goes into third byte of register */ | ||
498 | ivar = ivar & 0xFF00FFFF; | ||
499 | ivar |= (msix_vector | E1000_IVAR_VALID) << 16; | ||
500 | } else { | ||
501 | /* vector goes into low byte of register */ | 492 | /* vector goes into low byte of register */ |
502 | ivar = ivar & 0xFFFFFF00; | 493 | ivar = ivar & 0xFFFFFF00; |
503 | ivar |= msix_vector | E1000_IVAR_VALID; | 494 | ivar |= msix_vector | E1000_IVAR_VALID; |
495 | } else { | ||
496 | /* vector goes into third byte of register */ | ||
497 | ivar = ivar & 0xFF00FFFF; | ||
498 | ivar |= (msix_vector | E1000_IVAR_VALID) << 16; | ||
504 | } | 499 | } |
505 | adapter->rx_ring[rx_queue].eims_value= 1 << msix_vector; | ||
506 | array_wr32(E1000_IVAR0, index, ivar); | 500 | array_wr32(E1000_IVAR0, index, ivar); |
507 | } | 501 | } |
508 | if (tx_queue > IGB_N0_QUEUE) { | 502 | if (tx_queue > IGB_N0_QUEUE) { |
509 | index = (tx_queue >> 1) + adapter->vfs_allocated_count; | 503 | index = (tx_queue & 0x7); |
510 | ivar = array_rd32(E1000_IVAR0, index); | 504 | ivar = array_rd32(E1000_IVAR0, index); |
511 | if (tx_queue & 0x1) { | 505 | if (tx_queue < 8) { |
512 | /* vector goes into high byte of register */ | ||
513 | ivar = ivar & 0x00FFFFFF; | ||
514 | ivar |= (msix_vector | E1000_IVAR_VALID) << 24; | ||
515 | } else { | ||
516 | /* vector goes into second byte of register */ | 506 | /* vector goes into second byte of register */ |
517 | ivar = ivar & 0xFFFF00FF; | 507 | ivar = ivar & 0xFFFF00FF; |
518 | ivar |= (msix_vector | E1000_IVAR_VALID) << 8; | 508 | ivar |= (msix_vector | E1000_IVAR_VALID) << 8; |
509 | } else { | ||
510 | /* vector goes into high byte of register */ | ||
511 | ivar = ivar & 0x00FFFFFF; | ||
512 | ivar |= (msix_vector | E1000_IVAR_VALID) << 24; | ||
519 | } | 513 | } |
520 | adapter->tx_ring[tx_queue].eims_value= 1 << msix_vector; | ||
521 | array_wr32(E1000_IVAR0, index, ivar); | 514 | array_wr32(E1000_IVAR0, index, ivar); |
522 | } | 515 | } |
516 | q_vector->eims_value = 1 << msix_vector; | ||
523 | break; | 517 | break; |
524 | default: | 518 | default: |
525 | BUG(); | 519 | BUG(); |
@@ -540,43 +534,10 @@ static void igb_configure_msix(struct igb_adapter *adapter) | |||
540 | struct e1000_hw *hw = &adapter->hw; | 534 | struct e1000_hw *hw = &adapter->hw; |
541 | 535 | ||
542 | adapter->eims_enable_mask = 0; | 536 | adapter->eims_enable_mask = 0; |
543 | if (hw->mac.type == e1000_82576) | ||
544 | /* Turn on MSI-X capability first, or our settings | ||
545 | * won't stick. And it will take days to debug. */ | ||
546 | wr32(E1000_GPIE, E1000_GPIE_MSIX_MODE | | ||
547 | E1000_GPIE_PBA | E1000_GPIE_EIAME | | ||
548 | E1000_GPIE_NSICR); | ||
549 | |||
550 | for (i = 0; i < adapter->num_tx_queues; i++) { | ||
551 | struct igb_ring *tx_ring = &adapter->tx_ring[i]; | ||
552 | igb_assign_vector(adapter, IGB_N0_QUEUE, i, vector++); | ||
553 | adapter->eims_enable_mask |= tx_ring->eims_value; | ||
554 | if (tx_ring->itr_val) | ||
555 | writel(tx_ring->itr_val, | ||
556 | hw->hw_addr + tx_ring->itr_register); | ||
557 | else | ||
558 | writel(1, hw->hw_addr + tx_ring->itr_register); | ||
559 | } | ||
560 | |||
561 | for (i = 0; i < adapter->num_rx_queues; i++) { | ||
562 | struct igb_ring *rx_ring = &adapter->rx_ring[i]; | ||
563 | rx_ring->buddy = NULL; | ||
564 | igb_assign_vector(adapter, i, IGB_N0_QUEUE, vector++); | ||
565 | adapter->eims_enable_mask |= rx_ring->eims_value; | ||
566 | if (rx_ring->itr_val) | ||
567 | writel(rx_ring->itr_val, | ||
568 | hw->hw_addr + rx_ring->itr_register); | ||
569 | else | ||
570 | writel(1, hw->hw_addr + rx_ring->itr_register); | ||
571 | } | ||
572 | |||
573 | 537 | ||
574 | /* set vector for other causes, i.e. link changes */ | 538 | /* set vector for other causes, i.e. link changes */ |
575 | switch (hw->mac.type) { | 539 | switch (hw->mac.type) { |
576 | case e1000_82575: | 540 | case e1000_82575: |
577 | array_wr32(E1000_MSIXBM(0), vector++, | ||
578 | E1000_EIMS_OTHER); | ||
579 | |||
580 | tmp = rd32(E1000_CTRL_EXT); | 541 | tmp = rd32(E1000_CTRL_EXT); |
581 | /* enable MSI-X PBA support*/ | 542 | /* enable MSI-X PBA support*/ |
582 | tmp |= E1000_CTRL_EXT_PBA_CLR; | 543 | tmp |= E1000_CTRL_EXT_PBA_CLR; |
@@ -586,22 +547,40 @@ static void igb_configure_msix(struct igb_adapter *adapter) | |||
586 | tmp |= E1000_CTRL_EXT_IRCA; | 547 | tmp |= E1000_CTRL_EXT_IRCA; |
587 | 548 | ||
588 | wr32(E1000_CTRL_EXT, tmp); | 549 | wr32(E1000_CTRL_EXT, tmp); |
589 | adapter->eims_enable_mask |= E1000_EIMS_OTHER; | 550 | |
551 | /* enable msix_other interrupt */ | ||
552 | array_wr32(E1000_MSIXBM(0), vector++, | ||
553 | E1000_EIMS_OTHER); | ||
590 | adapter->eims_other = E1000_EIMS_OTHER; | 554 | adapter->eims_other = E1000_EIMS_OTHER; |
591 | 555 | ||
592 | break; | 556 | break; |
593 | 557 | ||
594 | case e1000_82576: | 558 | case e1000_82576: |
559 | /* Turn on MSI-X capability first, or our settings | ||
560 | * won't stick. And it will take days to debug. */ | ||
561 | wr32(E1000_GPIE, E1000_GPIE_MSIX_MODE | | ||
562 | E1000_GPIE_PBA | E1000_GPIE_EIAME | | ||
563 | E1000_GPIE_NSICR); | ||
564 | |||
565 | /* enable msix_other interrupt */ | ||
566 | adapter->eims_other = 1 << vector; | ||
595 | tmp = (vector++ | E1000_IVAR_VALID) << 8; | 567 | tmp = (vector++ | E1000_IVAR_VALID) << 8; |
596 | wr32(E1000_IVAR_MISC, tmp); | ||
597 | 568 | ||
598 | adapter->eims_enable_mask = (1 << (vector)) - 1; | 569 | wr32(E1000_IVAR_MISC, tmp); |
599 | adapter->eims_other = 1 << (vector - 1); | ||
600 | break; | 570 | break; |
601 | default: | 571 | default: |
602 | /* do nothing, since nothing else supports MSI-X */ | 572 | /* do nothing, since nothing else supports MSI-X */ |
603 | break; | 573 | break; |
604 | } /* switch (hw->mac.type) */ | 574 | } /* switch (hw->mac.type) */ |
575 | |||
576 | adapter->eims_enable_mask |= adapter->eims_other; | ||
577 | |||
578 | for (i = 0; i < adapter->num_q_vectors; i++) { | ||
579 | struct igb_q_vector *q_vector = adapter->q_vector[i]; | ||
580 | igb_assign_vector(q_vector, vector++); | ||
581 | adapter->eims_enable_mask |= q_vector->eims_value; | ||
582 | } | ||
583 | |||
605 | wrfl(); | 584 | wrfl(); |
606 | } | 585 | } |
607 | 586 | ||
@@ -614,43 +593,40 @@ static void igb_configure_msix(struct igb_adapter *adapter) | |||
614 | static int igb_request_msix(struct igb_adapter *adapter) | 593 | static int igb_request_msix(struct igb_adapter *adapter) |
615 | { | 594 | { |
616 | struct net_device *netdev = adapter->netdev; | 595 | struct net_device *netdev = adapter->netdev; |
596 | struct e1000_hw *hw = &adapter->hw; | ||
617 | int i, err = 0, vector = 0; | 597 | int i, err = 0, vector = 0; |
618 | 598 | ||
619 | vector = 0; | 599 | err = request_irq(adapter->msix_entries[vector].vector, |
620 | 600 | &igb_msix_other, 0, netdev->name, adapter); | |
621 | for (i = 0; i < adapter->num_tx_queues; i++) { | 601 | if (err) |
622 | struct igb_ring *ring = &(adapter->tx_ring[i]); | 602 | goto out; |
623 | sprintf(ring->name, "%s-tx-%d", netdev->name, i); | 603 | vector++; |
624 | err = request_irq(adapter->msix_entries[vector].vector, | 604 | |
625 | &igb_msix_tx, 0, ring->name, | 605 | for (i = 0; i < adapter->num_q_vectors; i++) { |
626 | &(adapter->tx_ring[i])); | 606 | struct igb_q_vector *q_vector = adapter->q_vector[i]; |
627 | if (err) | 607 | |
628 | goto out; | 608 | q_vector->itr_register = hw->hw_addr + E1000_EITR(vector); |
629 | ring->itr_register = E1000_EITR(0) + (vector << 2); | 609 | |
630 | ring->itr_val = 976; /* ~4000 ints/sec */ | 610 | if (q_vector->rx_ring && q_vector->tx_ring) |
631 | vector++; | 611 | sprintf(q_vector->name, "%s-TxRx-%u", netdev->name, |
632 | } | 612 | q_vector->rx_ring->queue_index); |
633 | for (i = 0; i < adapter->num_rx_queues; i++) { | 613 | else if (q_vector->tx_ring) |
634 | struct igb_ring *ring = &(adapter->rx_ring[i]); | 614 | sprintf(q_vector->name, "%s-tx-%u", netdev->name, |
635 | if (strlen(netdev->name) < (IFNAMSIZ - 5)) | 615 | q_vector->tx_ring->queue_index); |
636 | sprintf(ring->name, "%s-rx-%d", netdev->name, i); | 616 | else if (q_vector->rx_ring) |
617 | sprintf(q_vector->name, "%s-rx-%u", netdev->name, | ||
618 | q_vector->rx_ring->queue_index); | ||
637 | else | 619 | else |
638 | memcpy(ring->name, netdev->name, IFNAMSIZ); | 620 | sprintf(q_vector->name, "%s-unused", netdev->name); |
621 | |||
639 | err = request_irq(adapter->msix_entries[vector].vector, | 622 | err = request_irq(adapter->msix_entries[vector].vector, |
640 | &igb_msix_rx, 0, ring->name, | 623 | &igb_msix_ring, 0, q_vector->name, |
641 | &(adapter->rx_ring[i])); | 624 | q_vector); |
642 | if (err) | 625 | if (err) |
643 | goto out; | 626 | goto out; |
644 | ring->itr_register = E1000_EITR(0) + (vector << 2); | ||
645 | ring->itr_val = adapter->itr; | ||
646 | vector++; | 627 | vector++; |
647 | } | 628 | } |
648 | 629 | ||
649 | err = request_irq(adapter->msix_entries[vector].vector, | ||
650 | &igb_msix_other, 0, netdev->name, netdev); | ||
651 | if (err) | ||
652 | goto out; | ||
653 | |||
654 | igb_configure_msix(adapter); | 630 | igb_configure_msix(adapter); |
655 | return 0; | 631 | return 0; |
656 | out: | 632 | out: |
@@ -663,11 +639,44 @@ static void igb_reset_interrupt_capability(struct igb_adapter *adapter) | |||
663 | pci_disable_msix(adapter->pdev); | 639 | pci_disable_msix(adapter->pdev); |
664 | kfree(adapter->msix_entries); | 640 | kfree(adapter->msix_entries); |
665 | adapter->msix_entries = NULL; | 641 | adapter->msix_entries = NULL; |
666 | } else if (adapter->flags & IGB_FLAG_HAS_MSI) | 642 | } else if (adapter->flags & IGB_FLAG_HAS_MSI) { |
667 | pci_disable_msi(adapter->pdev); | 643 | pci_disable_msi(adapter->pdev); |
668 | return; | 644 | } |
669 | } | 645 | } |
670 | 646 | ||
647 | /** | ||
648 | * igb_free_q_vectors - Free memory allocated for interrupt vectors | ||
649 | * @adapter: board private structure to initialize | ||
650 | * | ||
651 | * This function frees the memory allocated to the q_vectors. In addition if | ||
652 | * NAPI is enabled it will delete any references to the NAPI struct prior | ||
653 | * to freeing the q_vector. | ||
654 | **/ | ||
655 | static void igb_free_q_vectors(struct igb_adapter *adapter) | ||
656 | { | ||
657 | int v_idx; | ||
658 | |||
659 | for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) { | ||
660 | struct igb_q_vector *q_vector = adapter->q_vector[v_idx]; | ||
661 | adapter->q_vector[v_idx] = NULL; | ||
662 | netif_napi_del(&q_vector->napi); | ||
663 | kfree(q_vector); | ||
664 | } | ||
665 | adapter->num_q_vectors = 0; | ||
666 | } | ||
667 | |||
668 | /** | ||
669 | * igb_clear_interrupt_scheme - reset the device to a state of no interrupts | ||
670 | * | ||
671 | * This function resets the device so that it has 0 rx queues, tx queues, and | ||
672 | * MSI-X interrupts allocated. | ||
673 | */ | ||
674 | static void igb_clear_interrupt_scheme(struct igb_adapter *adapter) | ||
675 | { | ||
676 | igb_free_queues(adapter); | ||
677 | igb_free_q_vectors(adapter); | ||
678 | igb_reset_interrupt_capability(adapter); | ||
679 | } | ||
671 | 680 | ||
672 | /** | 681 | /** |
673 | * igb_set_interrupt_capability - set MSI or MSI-X if supported | 682 | * igb_set_interrupt_capability - set MSI or MSI-X if supported |
@@ -681,11 +690,20 @@ static void igb_set_interrupt_capability(struct igb_adapter *adapter) | |||
681 | int numvecs, i; | 690 | int numvecs, i; |
682 | 691 | ||
683 | /* Number of supported queues. */ | 692 | /* Number of supported queues. */ |
684 | /* Having more queues than CPUs doesn't make sense. */ | ||
685 | adapter->num_rx_queues = min_t(u32, IGB_MAX_RX_QUEUES, num_online_cpus()); | 693 | adapter->num_rx_queues = min_t(u32, IGB_MAX_RX_QUEUES, num_online_cpus()); |
686 | adapter->num_tx_queues = min_t(u32, IGB_MAX_TX_QUEUES, num_online_cpus()); | 694 | adapter->num_tx_queues = min_t(u32, IGB_MAX_TX_QUEUES, num_online_cpus()); |
687 | 695 | ||
688 | numvecs = adapter->num_tx_queues + adapter->num_rx_queues + 1; | 696 | /* start with one vector for every rx queue */ |
697 | numvecs = adapter->num_rx_queues; | ||
698 | |||
699 | /* if tx handler is seperate add 1 for every tx queue */ | ||
700 | numvecs += adapter->num_tx_queues; | ||
701 | |||
702 | /* store the number of vectors reserved for queues */ | ||
703 | adapter->num_q_vectors = numvecs; | ||
704 | |||
705 | /* add 1 vector for link status interrupts */ | ||
706 | numvecs++; | ||
689 | adapter->msix_entries = kcalloc(numvecs, sizeof(struct msix_entry), | 707 | adapter->msix_entries = kcalloc(numvecs, sizeof(struct msix_entry), |
690 | GFP_KERNEL); | 708 | GFP_KERNEL); |
691 | if (!adapter->msix_entries) | 709 | if (!adapter->msix_entries) |
@@ -721,6 +739,7 @@ msi_only: | |||
721 | #endif | 739 | #endif |
722 | adapter->num_rx_queues = 1; | 740 | adapter->num_rx_queues = 1; |
723 | adapter->num_tx_queues = 1; | 741 | adapter->num_tx_queues = 1; |
742 | adapter->num_q_vectors = 1; | ||
724 | if (!pci_enable_msi(adapter->pdev)) | 743 | if (!pci_enable_msi(adapter->pdev)) |
725 | adapter->flags |= IGB_FLAG_HAS_MSI; | 744 | adapter->flags |= IGB_FLAG_HAS_MSI; |
726 | out: | 745 | out: |
@@ -730,6 +749,139 @@ out: | |||
730 | } | 749 | } |
731 | 750 | ||
732 | /** | 751 | /** |
752 | * igb_alloc_q_vectors - Allocate memory for interrupt vectors | ||
753 | * @adapter: board private structure to initialize | ||
754 | * | ||
755 | * We allocate one q_vector per queue interrupt. If allocation fails we | ||
756 | * return -ENOMEM. | ||
757 | **/ | ||
758 | static int igb_alloc_q_vectors(struct igb_adapter *adapter) | ||
759 | { | ||
760 | struct igb_q_vector *q_vector; | ||
761 | struct e1000_hw *hw = &adapter->hw; | ||
762 | int v_idx; | ||
763 | |||
764 | for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) { | ||
765 | q_vector = kzalloc(sizeof(struct igb_q_vector), GFP_KERNEL); | ||
766 | if (!q_vector) | ||
767 | goto err_out; | ||
768 | q_vector->adapter = adapter; | ||
769 | q_vector->itr_shift = (hw->mac.type == e1000_82575) ? 16 : 0; | ||
770 | q_vector->itr_register = hw->hw_addr + E1000_EITR(0); | ||
771 | q_vector->itr_val = IGB_START_ITR; | ||
772 | q_vector->set_itr = 1; | ||
773 | netif_napi_add(adapter->netdev, &q_vector->napi, igb_poll, 64); | ||
774 | adapter->q_vector[v_idx] = q_vector; | ||
775 | } | ||
776 | return 0; | ||
777 | |||
778 | err_out: | ||
779 | while (v_idx) { | ||
780 | v_idx--; | ||
781 | q_vector = adapter->q_vector[v_idx]; | ||
782 | netif_napi_del(&q_vector->napi); | ||
783 | kfree(q_vector); | ||
784 | adapter->q_vector[v_idx] = NULL; | ||
785 | } | ||
786 | return -ENOMEM; | ||
787 | } | ||
788 | |||
789 | static void igb_map_rx_ring_to_vector(struct igb_adapter *adapter, | ||
790 | int ring_idx, int v_idx) | ||
791 | { | ||
792 | struct igb_q_vector *q_vector; | ||
793 | |||
794 | q_vector = adapter->q_vector[v_idx]; | ||
795 | q_vector->rx_ring = &adapter->rx_ring[ring_idx]; | ||
796 | q_vector->rx_ring->q_vector = q_vector; | ||
797 | q_vector->itr_val = adapter->itr; | ||
798 | } | ||
799 | |||
800 | static void igb_map_tx_ring_to_vector(struct igb_adapter *adapter, | ||
801 | int ring_idx, int v_idx) | ||
802 | { | ||
803 | struct igb_q_vector *q_vector; | ||
804 | |||
805 | q_vector = adapter->q_vector[v_idx]; | ||
806 | q_vector->tx_ring = &adapter->tx_ring[ring_idx]; | ||
807 | q_vector->tx_ring->q_vector = q_vector; | ||
808 | q_vector->itr_val = adapter->itr; | ||
809 | } | ||
810 | |||
811 | /** | ||
812 | * igb_map_ring_to_vector - maps allocated queues to vectors | ||
813 | * | ||
814 | * This function maps the recently allocated queues to vectors. | ||
815 | **/ | ||
816 | static int igb_map_ring_to_vector(struct igb_adapter *adapter) | ||
817 | { | ||
818 | int i; | ||
819 | int v_idx = 0; | ||
820 | |||
821 | if ((adapter->num_q_vectors < adapter->num_rx_queues) || | ||
822 | (adapter->num_q_vectors < adapter->num_tx_queues)) | ||
823 | return -ENOMEM; | ||
824 | |||
825 | if (adapter->num_q_vectors >= | ||
826 | (adapter->num_rx_queues + adapter->num_tx_queues)) { | ||
827 | for (i = 0; i < adapter->num_rx_queues; i++) | ||
828 | igb_map_rx_ring_to_vector(adapter, i, v_idx++); | ||
829 | for (i = 0; i < adapter->num_tx_queues; i++) | ||
830 | igb_map_tx_ring_to_vector(adapter, i, v_idx++); | ||
831 | } else { | ||
832 | for (i = 0; i < adapter->num_rx_queues; i++) { | ||
833 | if (i < adapter->num_tx_queues) | ||
834 | igb_map_tx_ring_to_vector(adapter, i, v_idx); | ||
835 | igb_map_rx_ring_to_vector(adapter, i, v_idx++); | ||
836 | } | ||
837 | for (; i < adapter->num_tx_queues; i++) | ||
838 | igb_map_tx_ring_to_vector(adapter, i, v_idx++); | ||
839 | } | ||
840 | return 0; | ||
841 | } | ||
842 | |||
843 | /** | ||
844 | * igb_init_interrupt_scheme - initialize interrupts, allocate queues/vectors | ||
845 | * | ||
846 | * This function initializes the interrupts and allocates all of the queues. | ||
847 | **/ | ||
848 | static int igb_init_interrupt_scheme(struct igb_adapter *adapter) | ||
849 | { | ||
850 | struct pci_dev *pdev = adapter->pdev; | ||
851 | int err; | ||
852 | |||
853 | igb_set_interrupt_capability(adapter); | ||
854 | |||
855 | err = igb_alloc_q_vectors(adapter); | ||
856 | if (err) { | ||
857 | dev_err(&pdev->dev, "Unable to allocate memory for vectors\n"); | ||
858 | goto err_alloc_q_vectors; | ||
859 | } | ||
860 | |||
861 | err = igb_alloc_queues(adapter); | ||
862 | if (err) { | ||
863 | dev_err(&pdev->dev, "Unable to allocate memory for queues\n"); | ||
864 | goto err_alloc_queues; | ||
865 | } | ||
866 | |||
867 | err = igb_map_ring_to_vector(adapter); | ||
868 | if (err) { | ||
869 | dev_err(&pdev->dev, "Invalid q_vector to ring mapping\n"); | ||
870 | goto err_map_queues; | ||
871 | } | ||
872 | |||
873 | |||
874 | return 0; | ||
875 | err_map_queues: | ||
876 | igb_free_queues(adapter); | ||
877 | err_alloc_queues: | ||
878 | igb_free_q_vectors(adapter); | ||
879 | err_alloc_q_vectors: | ||
880 | igb_reset_interrupt_capability(adapter); | ||
881 | return err; | ||
882 | } | ||
883 | |||
884 | /** | ||
733 | * igb_request_irq - initialize interrupts | 885 | * igb_request_irq - initialize interrupts |
734 | * | 886 | * |
735 | * Attempts to configure interrupts using the best available | 887 | * Attempts to configure interrupts using the best available |
@@ -738,6 +890,7 @@ out: | |||
738 | static int igb_request_irq(struct igb_adapter *adapter) | 890 | static int igb_request_irq(struct igb_adapter *adapter) |
739 | { | 891 | { |
740 | struct net_device *netdev = adapter->netdev; | 892 | struct net_device *netdev = adapter->netdev; |
893 | struct pci_dev *pdev = adapter->pdev; | ||
741 | struct e1000_hw *hw = &adapter->hw; | 894 | struct e1000_hw *hw = &adapter->hw; |
742 | int err = 0; | 895 | int err = 0; |
743 | 896 | ||
@@ -746,18 +899,36 @@ static int igb_request_irq(struct igb_adapter *adapter) | |||
746 | if (!err) | 899 | if (!err) |
747 | goto request_done; | 900 | goto request_done; |
748 | /* fall back to MSI */ | 901 | /* fall back to MSI */ |
749 | igb_reset_interrupt_capability(adapter); | 902 | igb_clear_interrupt_scheme(adapter); |
750 | if (!pci_enable_msi(adapter->pdev)) | 903 | if (!pci_enable_msi(adapter->pdev)) |
751 | adapter->flags |= IGB_FLAG_HAS_MSI; | 904 | adapter->flags |= IGB_FLAG_HAS_MSI; |
752 | igb_free_all_tx_resources(adapter); | 905 | igb_free_all_tx_resources(adapter); |
753 | igb_free_all_rx_resources(adapter); | 906 | igb_free_all_rx_resources(adapter); |
907 | adapter->num_tx_queues = 1; | ||
754 | adapter->num_rx_queues = 1; | 908 | adapter->num_rx_queues = 1; |
755 | igb_alloc_queues(adapter); | 909 | adapter->num_q_vectors = 1; |
910 | err = igb_alloc_q_vectors(adapter); | ||
911 | if (err) { | ||
912 | dev_err(&pdev->dev, | ||
913 | "Unable to allocate memory for vectors\n"); | ||
914 | goto request_done; | ||
915 | } | ||
916 | err = igb_alloc_queues(adapter); | ||
917 | if (err) { | ||
918 | dev_err(&pdev->dev, | ||
919 | "Unable to allocate memory for queues\n"); | ||
920 | igb_free_q_vectors(adapter); | ||
921 | goto request_done; | ||
922 | } | ||
923 | igb_setup_all_tx_resources(adapter); | ||
924 | igb_setup_all_rx_resources(adapter); | ||
756 | } else { | 925 | } else { |
757 | switch (hw->mac.type) { | 926 | switch (hw->mac.type) { |
758 | case e1000_82575: | 927 | case e1000_82575: |
759 | wr32(E1000_MSIXBM(0), | 928 | wr32(E1000_MSIXBM(0), |
760 | (E1000_EICR_RX_QUEUE0 | E1000_EIMS_OTHER)); | 929 | (E1000_EICR_RX_QUEUE0 | |
930 | E1000_EICR_TX_QUEUE0 | | ||
931 | E1000_EIMS_OTHER)); | ||
761 | break; | 932 | break; |
762 | case e1000_82576: | 933 | case e1000_82576: |
763 | wr32(E1000_IVAR0, E1000_IVAR_VALID); | 934 | wr32(E1000_IVAR0, E1000_IVAR_VALID); |
@@ -769,16 +940,17 @@ static int igb_request_irq(struct igb_adapter *adapter) | |||
769 | 940 | ||
770 | if (adapter->flags & IGB_FLAG_HAS_MSI) { | 941 | if (adapter->flags & IGB_FLAG_HAS_MSI) { |
771 | err = request_irq(adapter->pdev->irq, &igb_intr_msi, 0, | 942 | err = request_irq(adapter->pdev->irq, &igb_intr_msi, 0, |
772 | netdev->name, netdev); | 943 | netdev->name, adapter); |
773 | if (!err) | 944 | if (!err) |
774 | goto request_done; | 945 | goto request_done; |
946 | |||
775 | /* fall back to legacy interrupts */ | 947 | /* fall back to legacy interrupts */ |
776 | igb_reset_interrupt_capability(adapter); | 948 | igb_reset_interrupt_capability(adapter); |
777 | adapter->flags &= ~IGB_FLAG_HAS_MSI; | 949 | adapter->flags &= ~IGB_FLAG_HAS_MSI; |
778 | } | 950 | } |
779 | 951 | ||
780 | err = request_irq(adapter->pdev->irq, &igb_intr, IRQF_SHARED, | 952 | err = request_irq(adapter->pdev->irq, &igb_intr, IRQF_SHARED, |
781 | netdev->name, netdev); | 953 | netdev->name, adapter); |
782 | 954 | ||
783 | if (err) | 955 | if (err) |
784 | dev_err(&adapter->pdev->dev, "Error %d getting interrupt\n", | 956 | dev_err(&adapter->pdev->dev, "Error %d getting interrupt\n", |
@@ -790,23 +962,19 @@ request_done: | |||
790 | 962 | ||
791 | static void igb_free_irq(struct igb_adapter *adapter) | 963 | static void igb_free_irq(struct igb_adapter *adapter) |
792 | { | 964 | { |
793 | struct net_device *netdev = adapter->netdev; | ||
794 | |||
795 | if (adapter->msix_entries) { | 965 | if (adapter->msix_entries) { |
796 | int vector = 0, i; | 966 | int vector = 0, i; |
797 | 967 | ||
798 | for (i = 0; i < adapter->num_tx_queues; i++) | 968 | free_irq(adapter->msix_entries[vector++].vector, adapter); |
799 | free_irq(adapter->msix_entries[vector++].vector, | ||
800 | &(adapter->tx_ring[i])); | ||
801 | for (i = 0; i < adapter->num_rx_queues; i++) | ||
802 | free_irq(adapter->msix_entries[vector++].vector, | ||
803 | &(adapter->rx_ring[i])); | ||
804 | 969 | ||
805 | free_irq(adapter->msix_entries[vector++].vector, netdev); | 970 | for (i = 0; i < adapter->num_q_vectors; i++) { |
806 | return; | 971 | struct igb_q_vector *q_vector = adapter->q_vector[i]; |
972 | free_irq(adapter->msix_entries[vector++].vector, | ||
973 | q_vector); | ||
974 | } | ||
975 | } else { | ||
976 | free_irq(adapter->pdev->irq, adapter); | ||
807 | } | 977 | } |
808 | |||
809 | free_irq(adapter->pdev->irq, netdev); | ||
810 | } | 978 | } |
811 | 979 | ||
812 | /** | 980 | /** |
@@ -967,8 +1135,10 @@ int igb_up(struct igb_adapter *adapter) | |||
967 | 1135 | ||
968 | clear_bit(__IGB_DOWN, &adapter->state); | 1136 | clear_bit(__IGB_DOWN, &adapter->state); |
969 | 1137 | ||
970 | for (i = 0; i < adapter->num_rx_queues; i++) | 1138 | for (i = 0; i < adapter->num_q_vectors; i++) { |
971 | napi_enable(&adapter->rx_ring[i].napi); | 1139 | struct igb_q_vector *q_vector = adapter->q_vector[i]; |
1140 | napi_enable(&q_vector->napi); | ||
1141 | } | ||
972 | if (adapter->msix_entries) | 1142 | if (adapter->msix_entries) |
973 | igb_configure_msix(adapter); | 1143 | igb_configure_msix(adapter); |
974 | 1144 | ||
@@ -1012,8 +1182,10 @@ void igb_down(struct igb_adapter *adapter) | |||
1012 | wrfl(); | 1182 | wrfl(); |
1013 | msleep(10); | 1183 | msleep(10); |
1014 | 1184 | ||
1015 | for (i = 0; i < adapter->num_rx_queues; i++) | 1185 | for (i = 0; i < adapter->num_q_vectors; i++) { |
1016 | napi_disable(&adapter->rx_ring[i].napi); | 1186 | struct igb_q_vector *q_vector = adapter->q_vector[i]; |
1187 | napi_disable(&q_vector->napi); | ||
1188 | } | ||
1017 | 1189 | ||
1018 | igb_irq_disable(adapter); | 1190 | igb_irq_disable(adapter); |
1019 | 1191 | ||
@@ -1584,9 +1756,8 @@ err_eeprom: | |||
1584 | 1756 | ||
1585 | if (hw->flash_address) | 1757 | if (hw->flash_address) |
1586 | iounmap(hw->flash_address); | 1758 | iounmap(hw->flash_address); |
1587 | |||
1588 | igb_free_queues(adapter); | ||
1589 | err_sw_init: | 1759 | err_sw_init: |
1760 | igb_clear_interrupt_scheme(adapter); | ||
1590 | iounmap(hw->hw_addr); | 1761 | iounmap(hw->hw_addr); |
1591 | err_ioremap: | 1762 | err_ioremap: |
1592 | free_netdev(netdev); | 1763 | free_netdev(netdev); |
@@ -1640,9 +1811,7 @@ static void __devexit igb_remove(struct pci_dev *pdev) | |||
1640 | if (!igb_check_reset_block(&adapter->hw)) | 1811 | if (!igb_check_reset_block(&adapter->hw)) |
1641 | igb_reset_phy(&adapter->hw); | 1812 | igb_reset_phy(&adapter->hw); |
1642 | 1813 | ||
1643 | igb_reset_interrupt_capability(adapter); | 1814 | igb_clear_interrupt_scheme(adapter); |
1644 | |||
1645 | igb_free_queues(adapter); | ||
1646 | 1815 | ||
1647 | #ifdef CONFIG_PCI_IOV | 1816 | #ifdef CONFIG_PCI_IOV |
1648 | /* reclaim resources allocated to VFs */ | 1817 | /* reclaim resources allocated to VFs */ |
@@ -1696,9 +1865,7 @@ static int __devinit igb_sw_init(struct igb_adapter *adapter) | |||
1696 | 1865 | ||
1697 | /* This call may decrease the number of queues depending on | 1866 | /* This call may decrease the number of queues depending on |
1698 | * interrupt mode. */ | 1867 | * interrupt mode. */ |
1699 | igb_set_interrupt_capability(adapter); | 1868 | if (igb_init_interrupt_scheme(adapter)) { |
1700 | |||
1701 | if (igb_alloc_queues(adapter)) { | ||
1702 | dev_err(&pdev->dev, "Unable to allocate memory for queues\n"); | 1869 | dev_err(&pdev->dev, "Unable to allocate memory for queues\n"); |
1703 | return -ENOMEM; | 1870 | return -ENOMEM; |
1704 | } | 1871 | } |
@@ -1768,8 +1935,10 @@ static int igb_open(struct net_device *netdev) | |||
1768 | /* From here on the code is the same as igb_up() */ | 1935 | /* From here on the code is the same as igb_up() */ |
1769 | clear_bit(__IGB_DOWN, &adapter->state); | 1936 | clear_bit(__IGB_DOWN, &adapter->state); |
1770 | 1937 | ||
1771 | for (i = 0; i < adapter->num_rx_queues; i++) | 1938 | for (i = 0; i < adapter->num_q_vectors; i++) { |
1772 | napi_enable(&adapter->rx_ring[i].napi); | 1939 | struct igb_q_vector *q_vector = adapter->q_vector[i]; |
1940 | napi_enable(&q_vector->napi); | ||
1941 | } | ||
1773 | 1942 | ||
1774 | /* Clear any pending interrupts. */ | 1943 | /* Clear any pending interrupts. */ |
1775 | rd32(E1000_ICR); | 1944 | rd32(E1000_ICR); |
@@ -1858,14 +2027,13 @@ int igb_setup_tx_resources(struct igb_adapter *adapter, | |||
1858 | if (!tx_ring->desc) | 2027 | if (!tx_ring->desc) |
1859 | goto err; | 2028 | goto err; |
1860 | 2029 | ||
1861 | tx_ring->adapter = adapter; | ||
1862 | tx_ring->next_to_use = 0; | 2030 | tx_ring->next_to_use = 0; |
1863 | tx_ring->next_to_clean = 0; | 2031 | tx_ring->next_to_clean = 0; |
1864 | return 0; | 2032 | return 0; |
1865 | 2033 | ||
1866 | err: | 2034 | err: |
1867 | vfree(tx_ring->buffer_info); | 2035 | vfree(tx_ring->buffer_info); |
1868 | dev_err(&adapter->pdev->dev, | 2036 | dev_err(&pdev->dev, |
1869 | "Unable to allocate memory for the transmit descriptor ring\n"); | 2037 | "Unable to allocate memory for the transmit descriptor ring\n"); |
1870 | return -ENOMEM; | 2038 | return -ENOMEM; |
1871 | } | 2039 | } |
@@ -1996,8 +2164,6 @@ int igb_setup_rx_resources(struct igb_adapter *adapter, | |||
1996 | rx_ring->next_to_clean = 0; | 2164 | rx_ring->next_to_clean = 0; |
1997 | rx_ring->next_to_use = 0; | 2165 | rx_ring->next_to_use = 0; |
1998 | 2166 | ||
1999 | rx_ring->adapter = adapter; | ||
2000 | |||
2001 | return 0; | 2167 | return 0; |
2002 | 2168 | ||
2003 | err: | 2169 | err: |
@@ -2308,7 +2474,7 @@ static void igb_configure_rx(struct igb_adapter *adapter) | |||
2308 | **/ | 2474 | **/ |
2309 | void igb_free_tx_resources(struct igb_ring *tx_ring) | 2475 | void igb_free_tx_resources(struct igb_ring *tx_ring) |
2310 | { | 2476 | { |
2311 | struct pci_dev *pdev = tx_ring->adapter->pdev; | 2477 | struct pci_dev *pdev = tx_ring->q_vector->adapter->pdev; |
2312 | 2478 | ||
2313 | igb_clean_tx_ring(tx_ring); | 2479 | igb_clean_tx_ring(tx_ring); |
2314 | 2480 | ||
@@ -2354,7 +2520,7 @@ static void igb_unmap_and_free_tx_resource(struct igb_adapter *adapter, | |||
2354 | **/ | 2520 | **/ |
2355 | static void igb_clean_tx_ring(struct igb_ring *tx_ring) | 2521 | static void igb_clean_tx_ring(struct igb_ring *tx_ring) |
2356 | { | 2522 | { |
2357 | struct igb_adapter *adapter = tx_ring->adapter; | 2523 | struct igb_adapter *adapter = tx_ring->q_vector->adapter; |
2358 | struct igb_buffer *buffer_info; | 2524 | struct igb_buffer *buffer_info; |
2359 | unsigned long size; | 2525 | unsigned long size; |
2360 | unsigned int i; | 2526 | unsigned int i; |
@@ -2402,7 +2568,7 @@ static void igb_clean_all_tx_rings(struct igb_adapter *adapter) | |||
2402 | **/ | 2568 | **/ |
2403 | void igb_free_rx_resources(struct igb_ring *rx_ring) | 2569 | void igb_free_rx_resources(struct igb_ring *rx_ring) |
2404 | { | 2570 | { |
2405 | struct pci_dev *pdev = rx_ring->adapter->pdev; | 2571 | struct pci_dev *pdev = rx_ring->q_vector->adapter->pdev; |
2406 | 2572 | ||
2407 | igb_clean_rx_ring(rx_ring); | 2573 | igb_clean_rx_ring(rx_ring); |
2408 | 2574 | ||
@@ -2434,7 +2600,7 @@ static void igb_free_all_rx_resources(struct igb_adapter *adapter) | |||
2434 | **/ | 2600 | **/ |
2435 | static void igb_clean_rx_ring(struct igb_ring *rx_ring) | 2601 | static void igb_clean_rx_ring(struct igb_ring *rx_ring) |
2436 | { | 2602 | { |
2437 | struct igb_adapter *adapter = rx_ring->adapter; | 2603 | struct igb_adapter *adapter = rx_ring->q_vector->adapter; |
2438 | struct igb_buffer *buffer_info; | 2604 | struct igb_buffer *buffer_info; |
2439 | struct pci_dev *pdev = adapter->pdev; | 2605 | struct pci_dev *pdev = adapter->pdev; |
2440 | unsigned long size; | 2606 | unsigned long size; |
@@ -2749,7 +2915,6 @@ static void igb_watchdog_task(struct work_struct *work) | |||
2749 | struct net_device *netdev = adapter->netdev; | 2915 | struct net_device *netdev = adapter->netdev; |
2750 | struct igb_ring *tx_ring = adapter->tx_ring; | 2916 | struct igb_ring *tx_ring = adapter->tx_ring; |
2751 | u32 link; | 2917 | u32 link; |
2752 | u32 eics = 0; | ||
2753 | int i; | 2918 | int i; |
2754 | 2919 | ||
2755 | link = igb_has_link(adapter); | 2920 | link = igb_has_link(adapter); |
@@ -2848,8 +3013,11 @@ link_up: | |||
2848 | 3013 | ||
2849 | /* Cause software interrupt to ensure rx ring is cleaned */ | 3014 | /* Cause software interrupt to ensure rx ring is cleaned */ |
2850 | if (adapter->msix_entries) { | 3015 | if (adapter->msix_entries) { |
2851 | for (i = 0; i < adapter->num_rx_queues; i++) | 3016 | u32 eics = 0; |
2852 | eics |= adapter->rx_ring[i].eims_value; | 3017 | for (i = 0; i < adapter->num_q_vectors; i++) { |
3018 | struct igb_q_vector *q_vector = adapter->q_vector[i]; | ||
3019 | eics |= q_vector->eims_value; | ||
3020 | } | ||
2853 | wr32(E1000_EICS, eics); | 3021 | wr32(E1000_EICS, eics); |
2854 | } else { | 3022 | } else { |
2855 | wr32(E1000_ICS, E1000_ICS_RXDMT0); | 3023 | wr32(E1000_ICS, E1000_ICS_RXDMT0); |
@@ -2886,25 +3054,37 @@ enum latency_range { | |||
2886 | * parameter (see igb_param.c) | 3054 | * parameter (see igb_param.c) |
2887 | * NOTE: This function is called only when operating in a multiqueue | 3055 | * NOTE: This function is called only when operating in a multiqueue |
2888 | * receive environment. | 3056 | * receive environment. |
2889 | * @rx_ring: pointer to ring | 3057 | * @q_vector: pointer to q_vector |
2890 | **/ | 3058 | **/ |
2891 | static void igb_update_ring_itr(struct igb_ring *rx_ring) | 3059 | static void igb_update_ring_itr(struct igb_q_vector *q_vector) |
2892 | { | 3060 | { |
2893 | int new_val = rx_ring->itr_val; | 3061 | int new_val = q_vector->itr_val; |
2894 | int avg_wire_size = 0; | 3062 | int avg_wire_size = 0; |
2895 | struct igb_adapter *adapter = rx_ring->adapter; | 3063 | struct igb_adapter *adapter = q_vector->adapter; |
2896 | |||
2897 | if (!rx_ring->total_packets) | ||
2898 | goto clear_counts; /* no packets, so don't do anything */ | ||
2899 | 3064 | ||
2900 | /* For non-gigabit speeds, just fix the interrupt rate at 4000 | 3065 | /* For non-gigabit speeds, just fix the interrupt rate at 4000 |
2901 | * ints/sec - ITR timer value of 120 ticks. | 3066 | * ints/sec - ITR timer value of 120 ticks. |
2902 | */ | 3067 | */ |
2903 | if (adapter->link_speed != SPEED_1000) { | 3068 | if (adapter->link_speed != SPEED_1000) { |
2904 | new_val = 120; | 3069 | new_val = 976; |
2905 | goto set_itr_val; | 3070 | goto set_itr_val; |
2906 | } | 3071 | } |
2907 | avg_wire_size = rx_ring->total_bytes / rx_ring->total_packets; | 3072 | |
3073 | if (q_vector->rx_ring && q_vector->rx_ring->total_packets) { | ||
3074 | struct igb_ring *ring = q_vector->rx_ring; | ||
3075 | avg_wire_size = ring->total_bytes / ring->total_packets; | ||
3076 | } | ||
3077 | |||
3078 | if (q_vector->tx_ring && q_vector->tx_ring->total_packets) { | ||
3079 | struct igb_ring *ring = q_vector->tx_ring; | ||
3080 | avg_wire_size = max_t(u32, avg_wire_size, | ||
3081 | (ring->total_bytes / | ||
3082 | ring->total_packets)); | ||
3083 | } | ||
3084 | |||
3085 | /* if avg_wire_size isn't set no work was done */ | ||
3086 | if (!avg_wire_size) | ||
3087 | goto clear_counts; | ||
2908 | 3088 | ||
2909 | /* Add 24 bytes to size to account for CRC, preamble, and gap */ | 3089 | /* Add 24 bytes to size to account for CRC, preamble, and gap */ |
2910 | avg_wire_size += 24; | 3090 | avg_wire_size += 24; |
@@ -2919,13 +3099,19 @@ static void igb_update_ring_itr(struct igb_ring *rx_ring) | |||
2919 | new_val = avg_wire_size / 2; | 3099 | new_val = avg_wire_size / 2; |
2920 | 3100 | ||
2921 | set_itr_val: | 3101 | set_itr_val: |
2922 | if (new_val != rx_ring->itr_val) { | 3102 | if (new_val != q_vector->itr_val) { |
2923 | rx_ring->itr_val = new_val; | 3103 | q_vector->itr_val = new_val; |
2924 | rx_ring->set_itr = 1; | 3104 | q_vector->set_itr = 1; |
2925 | } | 3105 | } |
2926 | clear_counts: | 3106 | clear_counts: |
2927 | rx_ring->total_bytes = 0; | 3107 | if (q_vector->rx_ring) { |
2928 | rx_ring->total_packets = 0; | 3108 | q_vector->rx_ring->total_bytes = 0; |
3109 | q_vector->rx_ring->total_packets = 0; | ||
3110 | } | ||
3111 | if (q_vector->tx_ring) { | ||
3112 | q_vector->tx_ring->total_bytes = 0; | ||
3113 | q_vector->tx_ring->total_packets = 0; | ||
3114 | } | ||
2929 | } | 3115 | } |
2930 | 3116 | ||
2931 | /** | 3117 | /** |
@@ -2942,7 +3128,7 @@ clear_counts: | |||
2942 | * NOTE: These calculations are only valid when operating in a single- | 3128 | * NOTE: These calculations are only valid when operating in a single- |
2943 | * queue environment. | 3129 | * queue environment. |
2944 | * @adapter: pointer to adapter | 3130 | * @adapter: pointer to adapter |
2945 | * @itr_setting: current adapter->itr | 3131 | * @itr_setting: current q_vector->itr_val |
2946 | * @packets: the number of packets during this measurement interval | 3132 | * @packets: the number of packets during this measurement interval |
2947 | * @bytes: the number of bytes during this measurement interval | 3133 | * @bytes: the number of bytes during this measurement interval |
2948 | **/ | 3134 | **/ |
@@ -2994,8 +3180,9 @@ update_itr_done: | |||
2994 | 3180 | ||
2995 | static void igb_set_itr(struct igb_adapter *adapter) | 3181 | static void igb_set_itr(struct igb_adapter *adapter) |
2996 | { | 3182 | { |
3183 | struct igb_q_vector *q_vector = adapter->q_vector[0]; | ||
2997 | u16 current_itr; | 3184 | u16 current_itr; |
2998 | u32 new_itr = adapter->itr; | 3185 | u32 new_itr = q_vector->itr_val; |
2999 | 3186 | ||
3000 | /* for non-gigabit speeds, just fix the interrupt rate at 4000 */ | 3187 | /* for non-gigabit speeds, just fix the interrupt rate at 4000 */ |
3001 | if (adapter->link_speed != SPEED_1000) { | 3188 | if (adapter->link_speed != SPEED_1000) { |
@@ -3009,15 +3196,11 @@ static void igb_set_itr(struct igb_adapter *adapter) | |||
3009 | adapter->rx_ring->total_packets, | 3196 | adapter->rx_ring->total_packets, |
3010 | adapter->rx_ring->total_bytes); | 3197 | adapter->rx_ring->total_bytes); |
3011 | 3198 | ||
3012 | if (adapter->rx_ring->buddy) { | 3199 | adapter->tx_itr = igb_update_itr(adapter, |
3013 | adapter->tx_itr = igb_update_itr(adapter, | 3200 | adapter->tx_itr, |
3014 | adapter->tx_itr, | 3201 | adapter->tx_ring->total_packets, |
3015 | adapter->tx_ring->total_packets, | 3202 | adapter->tx_ring->total_bytes); |
3016 | adapter->tx_ring->total_bytes); | 3203 | current_itr = max(adapter->rx_itr, adapter->tx_itr); |
3017 | current_itr = max(adapter->rx_itr, adapter->tx_itr); | ||
3018 | } else { | ||
3019 | current_itr = adapter->rx_itr; | ||
3020 | } | ||
3021 | 3204 | ||
3022 | /* conservative mode (itr 3) eliminates the lowest_latency setting */ | 3205 | /* conservative mode (itr 3) eliminates the lowest_latency setting */ |
3023 | if (adapter->itr_setting == 3 && current_itr == lowest_latency) | 3206 | if (adapter->itr_setting == 3 && current_itr == lowest_latency) |
@@ -3041,18 +3224,17 @@ static void igb_set_itr(struct igb_adapter *adapter) | |||
3041 | set_itr_now: | 3224 | set_itr_now: |
3042 | adapter->rx_ring->total_bytes = 0; | 3225 | adapter->rx_ring->total_bytes = 0; |
3043 | adapter->rx_ring->total_packets = 0; | 3226 | adapter->rx_ring->total_packets = 0; |
3044 | if (adapter->rx_ring->buddy) { | 3227 | adapter->tx_ring->total_bytes = 0; |
3045 | adapter->rx_ring->buddy->total_bytes = 0; | 3228 | adapter->tx_ring->total_packets = 0; |
3046 | adapter->rx_ring->buddy->total_packets = 0; | ||
3047 | } | ||
3048 | 3229 | ||
3049 | if (new_itr != adapter->itr) { | 3230 | if (new_itr != q_vector->itr_val) { |
3050 | /* this attempts to bias the interrupt rate towards Bulk | 3231 | /* this attempts to bias the interrupt rate towards Bulk |
3051 | * by adding intermediate steps when interrupt rate is | 3232 | * by adding intermediate steps when interrupt rate is |
3052 | * increasing */ | 3233 | * increasing */ |
3053 | new_itr = new_itr > adapter->itr ? | 3234 | new_itr = new_itr > q_vector->itr_val ? |
3054 | max((new_itr * adapter->itr) / | 3235 | max((new_itr * q_vector->itr_val) / |
3055 | (new_itr + (adapter->itr >> 2)), new_itr) : | 3236 | (new_itr + (q_vector->itr_val >> 2)), |
3237 | new_itr) : | ||
3056 | new_itr; | 3238 | new_itr; |
3057 | /* Don't write the value here; it resets the adapter's | 3239 | /* Don't write the value here; it resets the adapter's |
3058 | * internal timer, and causes us to delay far longer than | 3240 | * internal timer, and causes us to delay far longer than |
@@ -3060,15 +3242,13 @@ set_itr_now: | |||
3060 | * value at the beginning of the next interrupt so the timing | 3242 | * value at the beginning of the next interrupt so the timing |
3061 | * ends up being correct. | 3243 | * ends up being correct. |
3062 | */ | 3244 | */ |
3063 | adapter->itr = new_itr; | 3245 | q_vector->itr_val = new_itr; |
3064 | adapter->rx_ring->itr_val = new_itr; | 3246 | q_vector->set_itr = 1; |
3065 | adapter->rx_ring->set_itr = 1; | ||
3066 | } | 3247 | } |
3067 | 3248 | ||
3068 | return; | 3249 | return; |
3069 | } | 3250 | } |
3070 | 3251 | ||
3071 | |||
3072 | #define IGB_TX_FLAGS_CSUM 0x00000001 | 3252 | #define IGB_TX_FLAGS_CSUM 0x00000001 |
3073 | #define IGB_TX_FLAGS_VLAN 0x00000002 | 3253 | #define IGB_TX_FLAGS_VLAN 0x00000002 |
3074 | #define IGB_TX_FLAGS_TSO 0x00000004 | 3254 | #define IGB_TX_FLAGS_TSO 0x00000004 |
@@ -3781,14 +3961,12 @@ void igb_update_stats(struct igb_adapter *adapter) | |||
3781 | 3961 | ||
3782 | static irqreturn_t igb_msix_other(int irq, void *data) | 3962 | static irqreturn_t igb_msix_other(int irq, void *data) |
3783 | { | 3963 | { |
3784 | struct net_device *netdev = data; | 3964 | struct igb_adapter *adapter = data; |
3785 | struct igb_adapter *adapter = netdev_priv(netdev); | ||
3786 | struct e1000_hw *hw = &adapter->hw; | 3965 | struct e1000_hw *hw = &adapter->hw; |
3787 | u32 icr = rd32(E1000_ICR); | 3966 | u32 icr = rd32(E1000_ICR); |
3788 | |||
3789 | /* reading ICR causes bit 31 of EICR to be cleared */ | 3967 | /* reading ICR causes bit 31 of EICR to be cleared */ |
3790 | 3968 | ||
3791 | if(icr & E1000_ICR_DOUTSYNC) { | 3969 | if (icr & E1000_ICR_DOUTSYNC) { |
3792 | /* HW is reporting DMA is out of sync */ | 3970 | /* HW is reporting DMA is out of sync */ |
3793 | adapter->stats.doosync++; | 3971 | adapter->stats.doosync++; |
3794 | } | 3972 | } |
@@ -3810,119 +3988,79 @@ static irqreturn_t igb_msix_other(int irq, void *data) | |||
3810 | return IRQ_HANDLED; | 3988 | return IRQ_HANDLED; |
3811 | } | 3989 | } |
3812 | 3990 | ||
3813 | static irqreturn_t igb_msix_tx(int irq, void *data) | 3991 | static void igb_write_itr(struct igb_q_vector *q_vector) |
3814 | { | 3992 | { |
3815 | struct igb_ring *tx_ring = data; | 3993 | u32 itr_val = q_vector->itr_val & 0x7FFC; |
3816 | struct igb_adapter *adapter = tx_ring->adapter; | ||
3817 | struct e1000_hw *hw = &adapter->hw; | ||
3818 | 3994 | ||
3819 | #ifdef CONFIG_IGB_DCA | 3995 | if (!q_vector->set_itr) |
3820 | if (adapter->flags & IGB_FLAG_DCA_ENABLED) | 3996 | return; |
3821 | igb_update_tx_dca(tx_ring); | ||
3822 | #endif | ||
3823 | 3997 | ||
3824 | tx_ring->total_bytes = 0; | 3998 | if (!itr_val) |
3825 | tx_ring->total_packets = 0; | 3999 | itr_val = 0x4; |
3826 | 4000 | ||
3827 | /* auto mask will automatically reenable the interrupt when we write | 4001 | if (q_vector->itr_shift) |
3828 | * EICS */ | 4002 | itr_val |= itr_val << q_vector->itr_shift; |
3829 | if (!igb_clean_tx_irq(tx_ring)) | ||
3830 | /* Ring was not completely cleaned, so fire another interrupt */ | ||
3831 | wr32(E1000_EICS, tx_ring->eims_value); | ||
3832 | else | 4003 | else |
3833 | wr32(E1000_EIMS, tx_ring->eims_value); | 4004 | itr_val |= 0x8000000; |
3834 | 4005 | ||
3835 | return IRQ_HANDLED; | 4006 | writel(itr_val, q_vector->itr_register); |
3836 | } | 4007 | q_vector->set_itr = 0; |
3837 | |||
3838 | static void igb_write_itr(struct igb_ring *ring) | ||
3839 | { | ||
3840 | struct e1000_hw *hw = &ring->adapter->hw; | ||
3841 | if ((ring->adapter->itr_setting & 3) && ring->set_itr) { | ||
3842 | switch (hw->mac.type) { | ||
3843 | case e1000_82576: | ||
3844 | wr32(ring->itr_register, ring->itr_val | | ||
3845 | 0x80000000); | ||
3846 | break; | ||
3847 | default: | ||
3848 | wr32(ring->itr_register, ring->itr_val | | ||
3849 | (ring->itr_val << 16)); | ||
3850 | break; | ||
3851 | } | ||
3852 | ring->set_itr = 0; | ||
3853 | } | ||
3854 | } | 4008 | } |
3855 | 4009 | ||
3856 | static irqreturn_t igb_msix_rx(int irq, void *data) | 4010 | static irqreturn_t igb_msix_ring(int irq, void *data) |
3857 | { | 4011 | { |
3858 | struct igb_ring *rx_ring = data; | 4012 | struct igb_q_vector *q_vector = data; |
3859 | |||
3860 | /* Write the ITR value calculated at the end of the | ||
3861 | * previous interrupt. | ||
3862 | */ | ||
3863 | 4013 | ||
3864 | igb_write_itr(rx_ring); | 4014 | /* Write the ITR value calculated from the previous interrupt. */ |
4015 | igb_write_itr(q_vector); | ||
3865 | 4016 | ||
3866 | if (napi_schedule_prep(&rx_ring->napi)) | 4017 | napi_schedule(&q_vector->napi); |
3867 | __napi_schedule(&rx_ring->napi); | ||
3868 | 4018 | ||
3869 | #ifdef CONFIG_IGB_DCA | 4019 | return IRQ_HANDLED; |
3870 | if (rx_ring->adapter->flags & IGB_FLAG_DCA_ENABLED) | ||
3871 | igb_update_rx_dca(rx_ring); | ||
3872 | #endif | ||
3873 | return IRQ_HANDLED; | ||
3874 | } | 4020 | } |
3875 | 4021 | ||
3876 | #ifdef CONFIG_IGB_DCA | 4022 | #ifdef CONFIG_IGB_DCA |
3877 | static void igb_update_rx_dca(struct igb_ring *rx_ring) | 4023 | static void igb_update_dca(struct igb_q_vector *q_vector) |
3878 | { | 4024 | { |
3879 | u32 dca_rxctrl; | 4025 | struct igb_adapter *adapter = q_vector->adapter; |
3880 | struct igb_adapter *adapter = rx_ring->adapter; | ||
3881 | struct e1000_hw *hw = &adapter->hw; | 4026 | struct e1000_hw *hw = &adapter->hw; |
3882 | int cpu = get_cpu(); | 4027 | int cpu = get_cpu(); |
3883 | int q = rx_ring->reg_idx; | ||
3884 | 4028 | ||
3885 | if (rx_ring->cpu != cpu) { | 4029 | if (q_vector->cpu == cpu) |
3886 | dca_rxctrl = rd32(E1000_DCA_RXCTRL(q)); | 4030 | goto out_no_update; |
3887 | if (hw->mac.type == e1000_82576) { | 4031 | |
3888 | dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK_82576; | 4032 | if (q_vector->tx_ring) { |
3889 | dca_rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu) << | 4033 | int q = q_vector->tx_ring->reg_idx; |
3890 | E1000_DCA_RXCTRL_CPUID_SHIFT; | 4034 | u32 dca_txctrl = rd32(E1000_DCA_TXCTRL(q)); |
4035 | if (hw->mac.type == e1000_82575) { | ||
4036 | dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK; | ||
4037 | dca_txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu); | ||
3891 | } else { | 4038 | } else { |
4039 | dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK_82576; | ||
4040 | dca_txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu) << | ||
4041 | E1000_DCA_TXCTRL_CPUID_SHIFT; | ||
4042 | } | ||
4043 | dca_txctrl |= E1000_DCA_TXCTRL_DESC_DCA_EN; | ||
4044 | wr32(E1000_DCA_TXCTRL(q), dca_txctrl); | ||
4045 | } | ||
4046 | if (q_vector->rx_ring) { | ||
4047 | int q = q_vector->rx_ring->reg_idx; | ||
4048 | u32 dca_rxctrl = rd32(E1000_DCA_RXCTRL(q)); | ||
4049 | if (hw->mac.type == e1000_82575) { | ||
3892 | dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK; | 4050 | dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK; |
3893 | dca_rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu); | 4051 | dca_rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu); |
4052 | } else { | ||
4053 | dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK_82576; | ||
4054 | dca_rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu) << | ||
4055 | E1000_DCA_RXCTRL_CPUID_SHIFT; | ||
3894 | } | 4056 | } |
3895 | dca_rxctrl |= E1000_DCA_RXCTRL_DESC_DCA_EN; | 4057 | dca_rxctrl |= E1000_DCA_RXCTRL_DESC_DCA_EN; |
3896 | dca_rxctrl |= E1000_DCA_RXCTRL_HEAD_DCA_EN; | 4058 | dca_rxctrl |= E1000_DCA_RXCTRL_HEAD_DCA_EN; |
3897 | dca_rxctrl |= E1000_DCA_RXCTRL_DATA_DCA_EN; | 4059 | dca_rxctrl |= E1000_DCA_RXCTRL_DATA_DCA_EN; |
3898 | wr32(E1000_DCA_RXCTRL(q), dca_rxctrl); | 4060 | wr32(E1000_DCA_RXCTRL(q), dca_rxctrl); |
3899 | rx_ring->cpu = cpu; | ||
3900 | } | ||
3901 | put_cpu(); | ||
3902 | } | ||
3903 | |||
3904 | static void igb_update_tx_dca(struct igb_ring *tx_ring) | ||
3905 | { | ||
3906 | u32 dca_txctrl; | ||
3907 | struct igb_adapter *adapter = tx_ring->adapter; | ||
3908 | struct e1000_hw *hw = &adapter->hw; | ||
3909 | int cpu = get_cpu(); | ||
3910 | int q = tx_ring->reg_idx; | ||
3911 | |||
3912 | if (tx_ring->cpu != cpu) { | ||
3913 | dca_txctrl = rd32(E1000_DCA_TXCTRL(q)); | ||
3914 | if (hw->mac.type == e1000_82576) { | ||
3915 | dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK_82576; | ||
3916 | dca_txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu) << | ||
3917 | E1000_DCA_TXCTRL_CPUID_SHIFT; | ||
3918 | } else { | ||
3919 | dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK; | ||
3920 | dca_txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu); | ||
3921 | } | ||
3922 | dca_txctrl |= E1000_DCA_TXCTRL_DESC_DCA_EN; | ||
3923 | wr32(E1000_DCA_TXCTRL(q), dca_txctrl); | ||
3924 | tx_ring->cpu = cpu; | ||
3925 | } | 4061 | } |
4062 | q_vector->cpu = cpu; | ||
4063 | out_no_update: | ||
3926 | put_cpu(); | 4064 | put_cpu(); |
3927 | } | 4065 | } |
3928 | 4066 | ||
@@ -3937,13 +4075,10 @@ static void igb_setup_dca(struct igb_adapter *adapter) | |||
3937 | /* Always use CB2 mode, difference is masked in the CB driver. */ | 4075 | /* Always use CB2 mode, difference is masked in the CB driver. */ |
3938 | wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_CB2); | 4076 | wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_CB2); |
3939 | 4077 | ||
3940 | for (i = 0; i < adapter->num_tx_queues; i++) { | 4078 | for (i = 0; i < adapter->num_q_vectors; i++) { |
3941 | adapter->tx_ring[i].cpu = -1; | 4079 | struct igb_q_vector *q_vector = adapter->q_vector[i]; |
3942 | igb_update_tx_dca(&adapter->tx_ring[i]); | 4080 | q_vector->cpu = -1; |
3943 | } | 4081 | igb_update_dca(q_vector); |
3944 | for (i = 0; i < adapter->num_rx_queues; i++) { | ||
3945 | adapter->rx_ring[i].cpu = -1; | ||
3946 | igb_update_rx_dca(&adapter->rx_ring[i]); | ||
3947 | } | 4082 | } |
3948 | } | 4083 | } |
3949 | 4084 | ||
@@ -3972,7 +4107,7 @@ static int __igb_notify_dca(struct device *dev, void *data) | |||
3972 | case DCA_PROVIDER_REMOVE: | 4107 | case DCA_PROVIDER_REMOVE: |
3973 | if (adapter->flags & IGB_FLAG_DCA_ENABLED) { | 4108 | if (adapter->flags & IGB_FLAG_DCA_ENABLED) { |
3974 | /* without this a class_device is left | 4109 | /* without this a class_device is left |
3975 | * hanging around in the sysfs model */ | 4110 | * hanging around in the sysfs model */ |
3976 | dca_remove_requester(dev); | 4111 | dca_remove_requester(dev); |
3977 | dev_info(&adapter->pdev->dev, "DCA disabled\n"); | 4112 | dev_info(&adapter->pdev->dev, "DCA disabled\n"); |
3978 | adapter->flags &= ~IGB_FLAG_DCA_ENABLED; | 4113 | adapter->flags &= ~IGB_FLAG_DCA_ENABLED; |
@@ -4379,15 +4514,15 @@ static void igb_set_uta(struct igb_adapter *adapter) | |||
4379 | **/ | 4514 | **/ |
4380 | static irqreturn_t igb_intr_msi(int irq, void *data) | 4515 | static irqreturn_t igb_intr_msi(int irq, void *data) |
4381 | { | 4516 | { |
4382 | struct net_device *netdev = data; | 4517 | struct igb_adapter *adapter = data; |
4383 | struct igb_adapter *adapter = netdev_priv(netdev); | 4518 | struct igb_q_vector *q_vector = adapter->q_vector[0]; |
4384 | struct e1000_hw *hw = &adapter->hw; | 4519 | struct e1000_hw *hw = &adapter->hw; |
4385 | /* read ICR disables interrupts using IAM */ | 4520 | /* read ICR disables interrupts using IAM */ |
4386 | u32 icr = rd32(E1000_ICR); | 4521 | u32 icr = rd32(E1000_ICR); |
4387 | 4522 | ||
4388 | igb_write_itr(adapter->rx_ring); | 4523 | igb_write_itr(q_vector); |
4389 | 4524 | ||
4390 | if(icr & E1000_ICR_DOUTSYNC) { | 4525 | if (icr & E1000_ICR_DOUTSYNC) { |
4391 | /* HW is reporting DMA is out of sync */ | 4526 | /* HW is reporting DMA is out of sync */ |
4392 | adapter->stats.doosync++; | 4527 | adapter->stats.doosync++; |
4393 | } | 4528 | } |
@@ -4398,7 +4533,7 @@ static irqreturn_t igb_intr_msi(int irq, void *data) | |||
4398 | mod_timer(&adapter->watchdog_timer, jiffies + 1); | 4533 | mod_timer(&adapter->watchdog_timer, jiffies + 1); |
4399 | } | 4534 | } |
4400 | 4535 | ||
4401 | napi_schedule(&adapter->rx_ring[0].napi); | 4536 | napi_schedule(&q_vector->napi); |
4402 | 4537 | ||
4403 | return IRQ_HANDLED; | 4538 | return IRQ_HANDLED; |
4404 | } | 4539 | } |
@@ -4410,8 +4545,8 @@ static irqreturn_t igb_intr_msi(int irq, void *data) | |||
4410 | **/ | 4545 | **/ |
4411 | static irqreturn_t igb_intr(int irq, void *data) | 4546 | static irqreturn_t igb_intr(int irq, void *data) |
4412 | { | 4547 | { |
4413 | struct net_device *netdev = data; | 4548 | struct igb_adapter *adapter = data; |
4414 | struct igb_adapter *adapter = netdev_priv(netdev); | 4549 | struct igb_q_vector *q_vector = adapter->q_vector[0]; |
4415 | struct e1000_hw *hw = &adapter->hw; | 4550 | struct e1000_hw *hw = &adapter->hw; |
4416 | /* Interrupt Auto-Mask...upon reading ICR, interrupts are masked. No | 4551 | /* Interrupt Auto-Mask...upon reading ICR, interrupts are masked. No |
4417 | * need for the IMC write */ | 4552 | * need for the IMC write */ |
@@ -4419,14 +4554,14 @@ static irqreturn_t igb_intr(int irq, void *data) | |||
4419 | if (!icr) | 4554 | if (!icr) |
4420 | return IRQ_NONE; /* Not our interrupt */ | 4555 | return IRQ_NONE; /* Not our interrupt */ |
4421 | 4556 | ||
4422 | igb_write_itr(adapter->rx_ring); | 4557 | igb_write_itr(q_vector); |
4423 | 4558 | ||
4424 | /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is | 4559 | /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is |
4425 | * not set, then the adapter didn't send an interrupt */ | 4560 | * not set, then the adapter didn't send an interrupt */ |
4426 | if (!(icr & E1000_ICR_INT_ASSERTED)) | 4561 | if (!(icr & E1000_ICR_INT_ASSERTED)) |
4427 | return IRQ_NONE; | 4562 | return IRQ_NONE; |
4428 | 4563 | ||
4429 | if(icr & E1000_ICR_DOUTSYNC) { | 4564 | if (icr & E1000_ICR_DOUTSYNC) { |
4430 | /* HW is reporting DMA is out of sync */ | 4565 | /* HW is reporting DMA is out of sync */ |
4431 | adapter->stats.doosync++; | 4566 | adapter->stats.doosync++; |
4432 | } | 4567 | } |
@@ -4438,26 +4573,26 @@ static irqreturn_t igb_intr(int irq, void *data) | |||
4438 | mod_timer(&adapter->watchdog_timer, jiffies + 1); | 4573 | mod_timer(&adapter->watchdog_timer, jiffies + 1); |
4439 | } | 4574 | } |
4440 | 4575 | ||
4441 | napi_schedule(&adapter->rx_ring[0].napi); | 4576 | napi_schedule(&q_vector->napi); |
4442 | 4577 | ||
4443 | return IRQ_HANDLED; | 4578 | return IRQ_HANDLED; |
4444 | } | 4579 | } |
4445 | 4580 | ||
4446 | static inline void igb_rx_irq_enable(struct igb_ring *rx_ring) | 4581 | static inline void igb_ring_irq_enable(struct igb_q_vector *q_vector) |
4447 | { | 4582 | { |
4448 | struct igb_adapter *adapter = rx_ring->adapter; | 4583 | struct igb_adapter *adapter = q_vector->adapter; |
4449 | struct e1000_hw *hw = &adapter->hw; | 4584 | struct e1000_hw *hw = &adapter->hw; |
4450 | 4585 | ||
4451 | if (adapter->itr_setting & 3) { | 4586 | if (adapter->itr_setting & 3) { |
4452 | if (adapter->num_rx_queues == 1) | 4587 | if (!adapter->msix_entries) |
4453 | igb_set_itr(adapter); | 4588 | igb_set_itr(adapter); |
4454 | else | 4589 | else |
4455 | igb_update_ring_itr(rx_ring); | 4590 | igb_update_ring_itr(q_vector); |
4456 | } | 4591 | } |
4457 | 4592 | ||
4458 | if (!test_bit(__IGB_DOWN, &adapter->state)) { | 4593 | if (!test_bit(__IGB_DOWN, &adapter->state)) { |
4459 | if (adapter->msix_entries) | 4594 | if (adapter->msix_entries) |
4460 | wr32(E1000_EIMS, rx_ring->eims_value); | 4595 | wr32(E1000_EIMS, q_vector->eims_value); |
4461 | else | 4596 | else |
4462 | igb_irq_enable(adapter); | 4597 | igb_irq_enable(adapter); |
4463 | } | 4598 | } |
@@ -4470,28 +4605,28 @@ static inline void igb_rx_irq_enable(struct igb_ring *rx_ring) | |||
4470 | **/ | 4605 | **/ |
4471 | static int igb_poll(struct napi_struct *napi, int budget) | 4606 | static int igb_poll(struct napi_struct *napi, int budget) |
4472 | { | 4607 | { |
4473 | struct igb_ring *rx_ring = container_of(napi, struct igb_ring, napi); | 4608 | struct igb_q_vector *q_vector = container_of(napi, |
4474 | int work_done = 0; | 4609 | struct igb_q_vector, |
4610 | napi); | ||
4611 | int tx_clean_complete = 1, work_done = 0; | ||
4475 | 4612 | ||
4476 | #ifdef CONFIG_IGB_DCA | 4613 | #ifdef CONFIG_IGB_DCA |
4477 | if (rx_ring->adapter->flags & IGB_FLAG_DCA_ENABLED) | 4614 | if (q_vector->adapter->flags & IGB_FLAG_DCA_ENABLED) |
4478 | igb_update_rx_dca(rx_ring); | 4615 | igb_update_dca(q_vector); |
4479 | #endif | 4616 | #endif |
4480 | igb_clean_rx_irq_adv(rx_ring, &work_done, budget); | 4617 | if (q_vector->tx_ring) |
4618 | tx_clean_complete = igb_clean_tx_irq(q_vector); | ||
4481 | 4619 | ||
4482 | if (rx_ring->buddy) { | 4620 | if (q_vector->rx_ring) |
4483 | #ifdef CONFIG_IGB_DCA | 4621 | igb_clean_rx_irq_adv(q_vector, &work_done, budget); |
4484 | if (rx_ring->adapter->flags & IGB_FLAG_DCA_ENABLED) | 4622 | |
4485 | igb_update_tx_dca(rx_ring->buddy); | 4623 | if (!tx_clean_complete) |
4486 | #endif | 4624 | work_done = budget; |
4487 | if (!igb_clean_tx_irq(rx_ring->buddy)) | ||
4488 | work_done = budget; | ||
4489 | } | ||
4490 | 4625 | ||
4491 | /* If not enough Rx work done, exit the polling mode */ | 4626 | /* If not enough Rx work done, exit the polling mode */ |
4492 | if (work_done < budget) { | 4627 | if (work_done < budget) { |
4493 | napi_complete(napi); | 4628 | napi_complete(napi); |
4494 | igb_rx_irq_enable(rx_ring); | 4629 | igb_ring_irq_enable(q_vector); |
4495 | } | 4630 | } |
4496 | 4631 | ||
4497 | return work_done; | 4632 | return work_done; |
@@ -4533,12 +4668,13 @@ static void igb_tx_hwtstamp(struct igb_adapter *adapter, struct sk_buff *skb) | |||
4533 | 4668 | ||
4534 | /** | 4669 | /** |
4535 | * igb_clean_tx_irq - Reclaim resources after transmit completes | 4670 | * igb_clean_tx_irq - Reclaim resources after transmit completes |
4536 | * @adapter: board private structure | 4671 | * @q_vector: pointer to q_vector containing needed info |
4537 | * returns true if ring is completely cleaned | 4672 | * returns true if ring is completely cleaned |
4538 | **/ | 4673 | **/ |
4539 | static bool igb_clean_tx_irq(struct igb_ring *tx_ring) | 4674 | static bool igb_clean_tx_irq(struct igb_q_vector *q_vector) |
4540 | { | 4675 | { |
4541 | struct igb_adapter *adapter = tx_ring->adapter; | 4676 | struct igb_adapter *adapter = q_vector->adapter; |
4677 | struct igb_ring *tx_ring = q_vector->tx_ring; | ||
4542 | struct net_device *netdev = adapter->netdev; | 4678 | struct net_device *netdev = adapter->netdev; |
4543 | struct e1000_hw *hw = &adapter->hw; | 4679 | struct e1000_hw *hw = &adapter->hw; |
4544 | struct igb_buffer *buffer_info; | 4680 | struct igb_buffer *buffer_info; |
@@ -4646,25 +4782,21 @@ static bool igb_clean_tx_irq(struct igb_ring *tx_ring) | |||
4646 | 4782 | ||
4647 | /** | 4783 | /** |
4648 | * igb_receive_skb - helper function to handle rx indications | 4784 | * igb_receive_skb - helper function to handle rx indications |
4649 | * @ring: pointer to receive ring receving this packet | 4785 | * @q_vector: structure containing interrupt and ring information |
4650 | * @status: descriptor status field as written by hardware | 4786 | * @skb: packet to send up |
4651 | * @rx_desc: receive descriptor containing vlan and type information. | 4787 | * @vlan_tag: vlan tag for packet |
4652 | * @skb: pointer to sk_buff to be indicated to stack | ||
4653 | **/ | 4788 | **/ |
4654 | static void igb_receive_skb(struct igb_ring *ring, u8 status, | 4789 | static void igb_receive_skb(struct igb_q_vector *q_vector, |
4655 | union e1000_adv_rx_desc * rx_desc, | 4790 | struct sk_buff *skb, |
4656 | struct sk_buff *skb) | 4791 | u16 vlan_tag) |
4657 | { | 4792 | { |
4658 | struct igb_adapter * adapter = ring->adapter; | 4793 | struct igb_adapter *adapter = q_vector->adapter; |
4659 | bool vlan_extracted = (adapter->vlgrp && (status & E1000_RXD_STAT_VP)); | 4794 | |
4660 | 4795 | if (vlan_tag) | |
4661 | skb_record_rx_queue(skb, ring->queue_index); | 4796 | vlan_gro_receive(&q_vector->napi, adapter->vlgrp, |
4662 | if (vlan_extracted) | 4797 | vlan_tag, skb); |
4663 | vlan_gro_receive(&ring->napi, adapter->vlgrp, | ||
4664 | le16_to_cpu(rx_desc->wb.upper.vlan), | ||
4665 | skb); | ||
4666 | else | 4798 | else |
4667 | napi_gro_receive(&ring->napi, skb); | 4799 | napi_gro_receive(&q_vector->napi, skb); |
4668 | } | 4800 | } |
4669 | 4801 | ||
4670 | static inline void igb_rx_checksum_adv(struct igb_adapter *adapter, | 4802 | static inline void igb_rx_checksum_adv(struct igb_adapter *adapter, |
@@ -4712,11 +4844,12 @@ static inline u16 igb_get_hlen(struct igb_adapter *adapter, | |||
4712 | return hlen; | 4844 | return hlen; |
4713 | } | 4845 | } |
4714 | 4846 | ||
4715 | static bool igb_clean_rx_irq_adv(struct igb_ring *rx_ring, | 4847 | static bool igb_clean_rx_irq_adv(struct igb_q_vector *q_vector, |
4716 | int *work_done, int budget) | 4848 | int *work_done, int budget) |
4717 | { | 4849 | { |
4718 | struct igb_adapter *adapter = rx_ring->adapter; | 4850 | struct igb_adapter *adapter = q_vector->adapter; |
4719 | struct net_device *netdev = adapter->netdev; | 4851 | struct net_device *netdev = adapter->netdev; |
4852 | struct igb_ring *rx_ring = q_vector->rx_ring; | ||
4720 | struct e1000_hw *hw = &adapter->hw; | 4853 | struct e1000_hw *hw = &adapter->hw; |
4721 | struct pci_dev *pdev = adapter->pdev; | 4854 | struct pci_dev *pdev = adapter->pdev; |
4722 | union e1000_adv_rx_desc *rx_desc , *next_rxd; | 4855 | union e1000_adv_rx_desc *rx_desc , *next_rxd; |
@@ -4728,6 +4861,7 @@ static bool igb_clean_rx_irq_adv(struct igb_ring *rx_ring, | |||
4728 | unsigned int i; | 4861 | unsigned int i; |
4729 | u32 staterr; | 4862 | u32 staterr; |
4730 | u16 length; | 4863 | u16 length; |
4864 | u16 vlan_tag; | ||
4731 | 4865 | ||
4732 | i = rx_ring->next_to_clean; | 4866 | i = rx_ring->next_to_clean; |
4733 | buffer_info = &rx_ring->buffer_info[i]; | 4867 | buffer_info = &rx_ring->buffer_info[i]; |
@@ -4855,8 +4989,12 @@ send_up: | |||
4855 | igb_rx_checksum_adv(adapter, staterr, skb); | 4989 | igb_rx_checksum_adv(adapter, staterr, skb); |
4856 | 4990 | ||
4857 | skb->protocol = eth_type_trans(skb, netdev); | 4991 | skb->protocol = eth_type_trans(skb, netdev); |
4992 | skb_record_rx_queue(skb, rx_ring->queue_index); | ||
4993 | |||
4994 | vlan_tag = ((staterr & E1000_RXD_STAT_VP) ? | ||
4995 | le16_to_cpu(rx_desc->wb.upper.vlan) : 0); | ||
4858 | 4996 | ||
4859 | igb_receive_skb(rx_ring, staterr, rx_desc, skb); | 4997 | igb_receive_skb(q_vector, skb, vlan_tag); |
4860 | 4998 | ||
4861 | next_desc: | 4999 | next_desc: |
4862 | rx_desc->wb.upper.status_error = 0; | 5000 | rx_desc->wb.upper.status_error = 0; |
@@ -4895,7 +5033,7 @@ next_desc: | |||
4895 | static void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring, | 5033 | static void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring, |
4896 | int cleaned_count) | 5034 | int cleaned_count) |
4897 | { | 5035 | { |
4898 | struct igb_adapter *adapter = rx_ring->adapter; | 5036 | struct igb_adapter *adapter = rx_ring->q_vector->adapter; |
4899 | struct net_device *netdev = adapter->netdev; | 5037 | struct net_device *netdev = adapter->netdev; |
4900 | struct pci_dev *pdev = adapter->pdev; | 5038 | struct pci_dev *pdev = adapter->pdev; |
4901 | union e1000_adv_rx_desc *rx_desc; | 5039 | union e1000_adv_rx_desc *rx_desc; |
@@ -5360,9 +5498,7 @@ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake) | |||
5360 | if (netif_running(netdev)) | 5498 | if (netif_running(netdev)) |
5361 | igb_close(netdev); | 5499 | igb_close(netdev); |
5362 | 5500 | ||
5363 | igb_reset_interrupt_capability(adapter); | 5501 | igb_clear_interrupt_scheme(adapter); |
5364 | |||
5365 | igb_free_queues(adapter); | ||
5366 | 5502 | ||
5367 | #ifdef CONFIG_PM | 5503 | #ifdef CONFIG_PM |
5368 | retval = pci_save_state(pdev); | 5504 | retval = pci_save_state(pdev); |
@@ -5457,9 +5593,7 @@ static int igb_resume(struct pci_dev *pdev) | |||
5457 | pci_enable_wake(pdev, PCI_D3hot, 0); | 5593 | pci_enable_wake(pdev, PCI_D3hot, 0); |
5458 | pci_enable_wake(pdev, PCI_D3cold, 0); | 5594 | pci_enable_wake(pdev, PCI_D3cold, 0); |
5459 | 5595 | ||
5460 | igb_set_interrupt_capability(adapter); | 5596 | if (igb_init_interrupt_scheme(adapter)) { |
5461 | |||
5462 | if (igb_alloc_queues(adapter)) { | ||
5463 | dev_err(&pdev->dev, "Unable to allocate memory for queues\n"); | 5597 | dev_err(&pdev->dev, "Unable to allocate memory for queues\n"); |
5464 | return -ENOMEM; | 5598 | return -ENOMEM; |
5465 | } | 5599 | } |
@@ -5511,22 +5645,16 @@ static void igb_netpoll(struct net_device *netdev) | |||
5511 | int i; | 5645 | int i; |
5512 | 5646 | ||
5513 | if (!adapter->msix_entries) { | 5647 | if (!adapter->msix_entries) { |
5648 | struct igb_q_vector *q_vector = adapter->q_vector[0]; | ||
5514 | igb_irq_disable(adapter); | 5649 | igb_irq_disable(adapter); |
5515 | napi_schedule(&adapter->rx_ring[0].napi); | 5650 | napi_schedule(&q_vector->napi); |
5516 | return; | 5651 | return; |
5517 | } | 5652 | } |
5518 | 5653 | ||
5519 | for (i = 0; i < adapter->num_tx_queues; i++) { | 5654 | for (i = 0; i < adapter->num_q_vectors; i++) { |
5520 | struct igb_ring *tx_ring = &adapter->tx_ring[i]; | 5655 | struct igb_q_vector *q_vector = adapter->q_vector[i]; |
5521 | wr32(E1000_EIMC, tx_ring->eims_value); | 5656 | wr32(E1000_EIMC, q_vector->eims_value); |
5522 | igb_clean_tx_irq(tx_ring); | 5657 | napi_schedule(&q_vector->napi); |
5523 | wr32(E1000_EIMS, tx_ring->eims_value); | ||
5524 | } | ||
5525 | |||
5526 | for (i = 0; i < adapter->num_rx_queues; i++) { | ||
5527 | struct igb_ring *rx_ring = &adapter->rx_ring[i]; | ||
5528 | wr32(E1000_EIMC, rx_ring->eims_value); | ||
5529 | napi_schedule(&rx_ring->napi); | ||
5530 | } | 5658 | } |
5531 | } | 5659 | } |
5532 | #endif /* CONFIG_NET_POLL_CONTROLLER */ | 5660 | #endif /* CONFIG_NET_POLL_CONTROLLER */ |