diff options
Diffstat (limited to 'drivers/net/ixgbe/ixgbe_main.c')
-rw-r--r-- | drivers/net/ixgbe/ixgbe_main.c | 1520 |
1 files changed, 1176 insertions, 344 deletions
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index c2095ce531c9..7ad2993dc581 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c | |||
@@ -48,7 +48,7 @@ char ixgbe_driver_name[] = "ixgbe"; | |||
48 | static const char ixgbe_driver_string[] = | 48 | static const char ixgbe_driver_string[] = |
49 | "Intel(R) 10 Gigabit PCI Express Network Driver"; | 49 | "Intel(R) 10 Gigabit PCI Express Network Driver"; |
50 | 50 | ||
51 | #define DRV_VERSION "1.1.18" | 51 | #define DRV_VERSION "1.3.18-k2" |
52 | const char ixgbe_driver_version[] = DRV_VERSION; | 52 | const char ixgbe_driver_version[] = DRV_VERSION; |
53 | static const char ixgbe_copyright[] = | 53 | static const char ixgbe_copyright[] = |
54 | "Copyright (c) 1999-2007 Intel Corporation."; | 54 | "Copyright (c) 1999-2007 Intel Corporation."; |
@@ -80,6 +80,16 @@ static struct pci_device_id ixgbe_pci_tbl[] = { | |||
80 | }; | 80 | }; |
81 | MODULE_DEVICE_TABLE(pci, ixgbe_pci_tbl); | 81 | MODULE_DEVICE_TABLE(pci, ixgbe_pci_tbl); |
82 | 82 | ||
83 | #ifdef CONFIG_DCA | ||
84 | static int ixgbe_notify_dca(struct notifier_block *, unsigned long event, | ||
85 | void *p); | ||
86 | static struct notifier_block dca_notifier = { | ||
87 | .notifier_call = ixgbe_notify_dca, | ||
88 | .next = NULL, | ||
89 | .priority = 0 | ||
90 | }; | ||
91 | #endif | ||
92 | |||
83 | MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>"); | 93 | MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>"); |
84 | MODULE_DESCRIPTION("Intel(R) 10 Gigabit PCI Express Network Driver"); | 94 | MODULE_DESCRIPTION("Intel(R) 10 Gigabit PCI Express Network Driver"); |
85 | MODULE_LICENSE("GPL"); | 95 | MODULE_LICENSE("GPL"); |
@@ -256,26 +266,125 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_adapter *adapter, | |||
256 | * sees the new next_to_clean. | 266 | * sees the new next_to_clean. |
257 | */ | 267 | */ |
258 | smp_mb(); | 268 | smp_mb(); |
269 | #ifdef CONFIG_NETDEVICES_MULTIQUEUE | ||
270 | if (__netif_subqueue_stopped(netdev, tx_ring->queue_index) && | ||
271 | !test_bit(__IXGBE_DOWN, &adapter->state)) { | ||
272 | netif_wake_subqueue(netdev, tx_ring->queue_index); | ||
273 | adapter->restart_queue++; | ||
274 | } | ||
275 | #else | ||
259 | if (netif_queue_stopped(netdev) && | 276 | if (netif_queue_stopped(netdev) && |
260 | !test_bit(__IXGBE_DOWN, &adapter->state)) { | 277 | !test_bit(__IXGBE_DOWN, &adapter->state)) { |
261 | netif_wake_queue(netdev); | 278 | netif_wake_queue(netdev); |
262 | adapter->restart_queue++; | 279 | adapter->restart_queue++; |
263 | } | 280 | } |
281 | #endif | ||
264 | } | 282 | } |
265 | 283 | ||
266 | if (adapter->detect_tx_hung) | 284 | if (adapter->detect_tx_hung) |
267 | if (ixgbe_check_tx_hang(adapter, tx_ring, eop, eop_desc)) | 285 | if (ixgbe_check_tx_hang(adapter, tx_ring, eop, eop_desc)) |
286 | #ifdef CONFIG_NETDEVICES_MULTIQUEUE | ||
287 | netif_stop_subqueue(netdev, tx_ring->queue_index); | ||
288 | #else | ||
268 | netif_stop_queue(netdev); | 289 | netif_stop_queue(netdev); |
290 | #endif | ||
269 | 291 | ||
270 | if (total_tx_packets >= tx_ring->work_limit) | 292 | if (total_tx_packets >= tx_ring->work_limit) |
271 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, tx_ring->eims_value); | 293 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, tx_ring->eims_value); |
272 | 294 | ||
295 | tx_ring->total_bytes += total_tx_bytes; | ||
296 | tx_ring->total_packets += total_tx_packets; | ||
273 | adapter->net_stats.tx_bytes += total_tx_bytes; | 297 | adapter->net_stats.tx_bytes += total_tx_bytes; |
274 | adapter->net_stats.tx_packets += total_tx_packets; | 298 | adapter->net_stats.tx_packets += total_tx_packets; |
275 | cleaned = total_tx_packets ? true : false; | 299 | cleaned = total_tx_packets ? true : false; |
276 | return cleaned; | 300 | return cleaned; |
277 | } | 301 | } |
278 | 302 | ||
303 | #ifdef CONFIG_DCA | ||
304 | static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter, | ||
305 | struct ixgbe_ring *rxr) | ||
306 | { | ||
307 | u32 rxctrl; | ||
308 | int cpu = get_cpu(); | ||
309 | int q = rxr - adapter->rx_ring; | ||
310 | |||
311 | if (rxr->cpu != cpu) { | ||
312 | rxctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_DCA_RXCTRL(q)); | ||
313 | rxctrl &= ~IXGBE_DCA_RXCTRL_CPUID_MASK; | ||
314 | rxctrl |= dca_get_tag(cpu); | ||
315 | rxctrl |= IXGBE_DCA_RXCTRL_DESC_DCA_EN; | ||
316 | rxctrl |= IXGBE_DCA_RXCTRL_HEAD_DCA_EN; | ||
317 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_RXCTRL(q), rxctrl); | ||
318 | rxr->cpu = cpu; | ||
319 | } | ||
320 | put_cpu(); | ||
321 | } | ||
322 | |||
323 | static void ixgbe_update_tx_dca(struct ixgbe_adapter *adapter, | ||
324 | struct ixgbe_ring *txr) | ||
325 | { | ||
326 | u32 txctrl; | ||
327 | int cpu = get_cpu(); | ||
328 | int q = txr - adapter->tx_ring; | ||
329 | |||
330 | if (txr->cpu != cpu) { | ||
331 | txctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_DCA_TXCTRL(q)); | ||
332 | txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK; | ||
333 | txctrl |= dca_get_tag(cpu); | ||
334 | txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN; | ||
335 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_TXCTRL(q), txctrl); | ||
336 | txr->cpu = cpu; | ||
337 | } | ||
338 | put_cpu(); | ||
339 | } | ||
340 | |||
341 | static void ixgbe_setup_dca(struct ixgbe_adapter *adapter) | ||
342 | { | ||
343 | int i; | ||
344 | |||
345 | if (!(adapter->flags & IXGBE_FLAG_DCA_ENABLED)) | ||
346 | return; | ||
347 | |||
348 | for (i = 0; i < adapter->num_tx_queues; i++) { | ||
349 | adapter->tx_ring[i].cpu = -1; | ||
350 | ixgbe_update_tx_dca(adapter, &adapter->tx_ring[i]); | ||
351 | } | ||
352 | for (i = 0; i < adapter->num_rx_queues; i++) { | ||
353 | adapter->rx_ring[i].cpu = -1; | ||
354 | ixgbe_update_rx_dca(adapter, &adapter->rx_ring[i]); | ||
355 | } | ||
356 | } | ||
357 | |||
358 | static int __ixgbe_notify_dca(struct device *dev, void *data) | ||
359 | { | ||
360 | struct net_device *netdev = dev_get_drvdata(dev); | ||
361 | struct ixgbe_adapter *adapter = netdev_priv(netdev); | ||
362 | unsigned long event = *(unsigned long *)data; | ||
363 | |||
364 | switch (event) { | ||
365 | case DCA_PROVIDER_ADD: | ||
366 | adapter->flags |= IXGBE_FLAG_DCA_ENABLED; | ||
367 | /* Always use CB2 mode, difference is masked | ||
368 | * in the CB driver. */ | ||
369 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 2); | ||
370 | if (dca_add_requester(dev) == IXGBE_SUCCESS) { | ||
371 | ixgbe_setup_dca(adapter); | ||
372 | break; | ||
373 | } | ||
374 | /* Fall Through since DCA is disabled. */ | ||
375 | case DCA_PROVIDER_REMOVE: | ||
376 | if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) { | ||
377 | dca_remove_requester(dev); | ||
378 | adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED; | ||
379 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 1); | ||
380 | } | ||
381 | break; | ||
382 | } | ||
383 | |||
384 | return IXGBE_SUCCESS; | ||
385 | } | ||
386 | |||
387 | #endif /* CONFIG_DCA */ | ||
279 | /** | 388 | /** |
280 | * ixgbe_receive_skb - Send a completed packet up the stack | 389 | * ixgbe_receive_skb - Send a completed packet up the stack |
281 | * @adapter: board private structure | 390 | * @adapter: board private structure |
@@ -556,10 +665,15 @@ next_desc: | |||
556 | adapter->net_stats.rx_bytes += total_rx_bytes; | 665 | adapter->net_stats.rx_bytes += total_rx_bytes; |
557 | adapter->net_stats.rx_packets += total_rx_packets; | 666 | adapter->net_stats.rx_packets += total_rx_packets; |
558 | 667 | ||
668 | rx_ring->total_packets += total_rx_packets; | ||
669 | rx_ring->total_bytes += total_rx_bytes; | ||
670 | adapter->net_stats.rx_bytes += total_rx_bytes; | ||
671 | adapter->net_stats.rx_packets += total_rx_packets; | ||
672 | |||
559 | return cleaned; | 673 | return cleaned; |
560 | } | 674 | } |
561 | 675 | ||
562 | #define IXGBE_MAX_INTR 10 | 676 | static int ixgbe_clean_rxonly(struct napi_struct *, int); |
563 | /** | 677 | /** |
564 | * ixgbe_configure_msix - Configure MSI-X hardware | 678 | * ixgbe_configure_msix - Configure MSI-X hardware |
565 | * @adapter: board private structure | 679 | * @adapter: board private structure |
@@ -569,28 +683,195 @@ next_desc: | |||
569 | **/ | 683 | **/ |
570 | static void ixgbe_configure_msix(struct ixgbe_adapter *adapter) | 684 | static void ixgbe_configure_msix(struct ixgbe_adapter *adapter) |
571 | { | 685 | { |
572 | int i, vector = 0; | 686 | struct ixgbe_q_vector *q_vector; |
687 | int i, j, q_vectors, v_idx, r_idx; | ||
688 | u32 mask; | ||
573 | 689 | ||
574 | for (i = 0; i < adapter->num_tx_queues; i++) { | 690 | q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; |
575 | ixgbe_set_ivar(adapter, IXGBE_IVAR_TX_QUEUE(i), | ||
576 | IXGBE_MSIX_VECTOR(vector)); | ||
577 | writel(EITR_INTS_PER_SEC_TO_REG(adapter->tx_eitr), | ||
578 | adapter->hw.hw_addr + adapter->tx_ring[i].itr_register); | ||
579 | vector++; | ||
580 | } | ||
581 | 691 | ||
582 | for (i = 0; i < adapter->num_rx_queues; i++) { | 692 | /* Populate the IVAR table and set the ITR values to the |
583 | ixgbe_set_ivar(adapter, IXGBE_IVAR_RX_QUEUE(i), | 693 | * corresponding register. |
584 | IXGBE_MSIX_VECTOR(vector)); | 694 | */ |
585 | writel(EITR_INTS_PER_SEC_TO_REG(adapter->rx_eitr), | 695 | for (v_idx = 0; v_idx < q_vectors; v_idx++) { |
586 | adapter->hw.hw_addr + adapter->rx_ring[i].itr_register); | 696 | q_vector = &adapter->q_vector[v_idx]; |
587 | vector++; | 697 | /* XXX for_each_bit(...) */ |
698 | r_idx = find_first_bit(q_vector->rxr_idx, | ||
699 | adapter->num_rx_queues); | ||
700 | |||
701 | for (i = 0; i < q_vector->rxr_count; i++) { | ||
702 | j = adapter->rx_ring[r_idx].reg_idx; | ||
703 | ixgbe_set_ivar(adapter, IXGBE_IVAR_RX_QUEUE(j), v_idx); | ||
704 | r_idx = find_next_bit(q_vector->rxr_idx, | ||
705 | adapter->num_rx_queues, | ||
706 | r_idx + 1); | ||
707 | } | ||
708 | r_idx = find_first_bit(q_vector->txr_idx, | ||
709 | adapter->num_tx_queues); | ||
710 | |||
711 | for (i = 0; i < q_vector->txr_count; i++) { | ||
712 | j = adapter->tx_ring[r_idx].reg_idx; | ||
713 | ixgbe_set_ivar(adapter, IXGBE_IVAR_TX_QUEUE(j), v_idx); | ||
714 | r_idx = find_next_bit(q_vector->txr_idx, | ||
715 | adapter->num_tx_queues, | ||
716 | r_idx + 1); | ||
717 | } | ||
718 | |||
719 | /* if this is a tx only vector use half the irq (tx) rate */ | ||
720 | if (q_vector->txr_count && !q_vector->rxr_count) | ||
721 | q_vector->eitr = adapter->tx_eitr; | ||
722 | else | ||
723 | /* rx only or mixed */ | ||
724 | q_vector->eitr = adapter->rx_eitr; | ||
725 | |||
726 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(v_idx), | ||
727 | EITR_INTS_PER_SEC_TO_REG(q_vector->eitr)); | ||
588 | } | 728 | } |
589 | 729 | ||
590 | vector = adapter->num_tx_queues + adapter->num_rx_queues; | 730 | ixgbe_set_ivar(adapter, IXGBE_IVAR_OTHER_CAUSES_INDEX, v_idx); |
591 | ixgbe_set_ivar(adapter, IXGBE_IVAR_OTHER_CAUSES_INDEX, | 731 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(v_idx), 1950); |
592 | IXGBE_MSIX_VECTOR(vector)); | 732 | |
593 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(vector), 1950); | 733 | /* set up to autoclear timer, lsc, and the vectors */ |
734 | mask = IXGBE_EIMS_ENABLE_MASK; | ||
735 | mask &= ~IXGBE_EIMS_OTHER; | ||
736 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, mask); | ||
737 | } | ||
738 | |||
739 | enum latency_range { | ||
740 | lowest_latency = 0, | ||
741 | low_latency = 1, | ||
742 | bulk_latency = 2, | ||
743 | latency_invalid = 255 | ||
744 | }; | ||
745 | |||
746 | /** | ||
747 | * ixgbe_update_itr - update the dynamic ITR value based on statistics | ||
748 | * @adapter: pointer to adapter | ||
749 | * @eitr: eitr setting (ints per sec) to give last timeslice | ||
750 | * @itr_setting: current throttle rate in ints/second | ||
751 | * @packets: the number of packets during this measurement interval | ||
752 | * @bytes: the number of bytes during this measurement interval | ||
753 | * | ||
754 | * Stores a new ITR value based on packets and byte | ||
755 | * counts during the last interrupt. The advantage of per interrupt | ||
756 | * computation is faster updates and more accurate ITR for the current | ||
757 | * traffic pattern. Constants in this function were computed | ||
758 | * based on theoretical maximum wire speed and thresholds were set based | ||
759 | * on testing data as well as attempting to minimize response time | ||
760 | * while increasing bulk throughput. | ||
761 | * this functionality is controlled by the InterruptThrottleRate module | ||
762 | * parameter (see ixgbe_param.c) | ||
763 | **/ | ||
764 | static u8 ixgbe_update_itr(struct ixgbe_adapter *adapter, | ||
765 | u32 eitr, u8 itr_setting, | ||
766 | int packets, int bytes) | ||
767 | { | ||
768 | unsigned int retval = itr_setting; | ||
769 | u32 timepassed_us; | ||
770 | u64 bytes_perint; | ||
771 | |||
772 | if (packets == 0) | ||
773 | goto update_itr_done; | ||
774 | |||
775 | |||
776 | /* simple throttlerate management | ||
777 | * 0-20MB/s lowest (100000 ints/s) | ||
778 | * 20-100MB/s low (20000 ints/s) | ||
779 | * 100-1249MB/s bulk (8000 ints/s) | ||
780 | */ | ||
781 | /* what was last interrupt timeslice? */ | ||
782 | timepassed_us = 1000000/eitr; | ||
783 | bytes_perint = bytes / timepassed_us; /* bytes/usec */ | ||
784 | |||
785 | switch (itr_setting) { | ||
786 | case lowest_latency: | ||
787 | if (bytes_perint > adapter->eitr_low) | ||
788 | retval = low_latency; | ||
789 | break; | ||
790 | case low_latency: | ||
791 | if (bytes_perint > adapter->eitr_high) | ||
792 | retval = bulk_latency; | ||
793 | else if (bytes_perint <= adapter->eitr_low) | ||
794 | retval = lowest_latency; | ||
795 | break; | ||
796 | case bulk_latency: | ||
797 | if (bytes_perint <= adapter->eitr_high) | ||
798 | retval = low_latency; | ||
799 | break; | ||
800 | } | ||
801 | |||
802 | update_itr_done: | ||
803 | return retval; | ||
804 | } | ||
805 | |||
806 | static void ixgbe_set_itr_msix(struct ixgbe_q_vector *q_vector) | ||
807 | { | ||
808 | struct ixgbe_adapter *adapter = q_vector->adapter; | ||
809 | struct ixgbe_hw *hw = &adapter->hw; | ||
810 | u32 new_itr; | ||
811 | u8 current_itr, ret_itr; | ||
812 | int i, r_idx, v_idx = ((void *)q_vector - (void *)(adapter->q_vector)) / | ||
813 | sizeof(struct ixgbe_q_vector); | ||
814 | struct ixgbe_ring *rx_ring, *tx_ring; | ||
815 | |||
816 | r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues); | ||
817 | for (i = 0; i < q_vector->txr_count; i++) { | ||
818 | tx_ring = &(adapter->tx_ring[r_idx]); | ||
819 | ret_itr = ixgbe_update_itr(adapter, q_vector->eitr, | ||
820 | q_vector->tx_eitr, | ||
821 | tx_ring->total_packets, | ||
822 | tx_ring->total_bytes); | ||
823 | /* if the result for this queue would decrease interrupt | ||
824 | * rate for this vector then use that result */ | ||
825 | q_vector->tx_eitr = ((q_vector->tx_eitr > ret_itr) ? | ||
826 | q_vector->tx_eitr - 1 : ret_itr); | ||
827 | r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues, | ||
828 | r_idx + 1); | ||
829 | } | ||
830 | |||
831 | r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); | ||
832 | for (i = 0; i < q_vector->rxr_count; i++) { | ||
833 | rx_ring = &(adapter->rx_ring[r_idx]); | ||
834 | ret_itr = ixgbe_update_itr(adapter, q_vector->eitr, | ||
835 | q_vector->rx_eitr, | ||
836 | rx_ring->total_packets, | ||
837 | rx_ring->total_bytes); | ||
838 | /* if the result for this queue would decrease interrupt | ||
839 | * rate for this vector then use that result */ | ||
840 | q_vector->rx_eitr = ((q_vector->rx_eitr > ret_itr) ? | ||
841 | q_vector->rx_eitr - 1 : ret_itr); | ||
842 | r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues, | ||
843 | r_idx + 1); | ||
844 | } | ||
845 | |||
846 | current_itr = max(q_vector->rx_eitr, q_vector->tx_eitr); | ||
847 | |||
848 | switch (current_itr) { | ||
849 | /* counts and packets in update_itr are dependent on these numbers */ | ||
850 | case lowest_latency: | ||
851 | new_itr = 100000; | ||
852 | break; | ||
853 | case low_latency: | ||
854 | new_itr = 20000; /* aka hwitr = ~200 */ | ||
855 | break; | ||
856 | case bulk_latency: | ||
857 | default: | ||
858 | new_itr = 8000; | ||
859 | break; | ||
860 | } | ||
861 | |||
862 | if (new_itr != q_vector->eitr) { | ||
863 | u32 itr_reg; | ||
864 | /* do an exponential smoothing */ | ||
865 | new_itr = ((q_vector->eitr * 90)/100) + ((new_itr * 10)/100); | ||
866 | q_vector->eitr = new_itr; | ||
867 | itr_reg = EITR_INTS_PER_SEC_TO_REG(new_itr); | ||
868 | /* must write high and low 16 bits to reset counter */ | ||
869 | DPRINTK(TX_ERR, DEBUG, "writing eitr(%d): %08X\n", v_idx, | ||
870 | itr_reg); | ||
871 | IXGBE_WRITE_REG(hw, IXGBE_EITR(v_idx), itr_reg | (itr_reg)<<16); | ||
872 | } | ||
873 | |||
874 | return; | ||
594 | } | 875 | } |
595 | 876 | ||
596 | static irqreturn_t ixgbe_msix_lsc(int irq, void *data) | 877 | static irqreturn_t ixgbe_msix_lsc(int irq, void *data) |
@@ -614,153 +895,302 @@ static irqreturn_t ixgbe_msix_lsc(int irq, void *data) | |||
614 | 895 | ||
615 | static irqreturn_t ixgbe_msix_clean_tx(int irq, void *data) | 896 | static irqreturn_t ixgbe_msix_clean_tx(int irq, void *data) |
616 | { | 897 | { |
617 | struct ixgbe_ring *txr = data; | 898 | struct ixgbe_q_vector *q_vector = data; |
618 | struct ixgbe_adapter *adapter = txr->adapter; | 899 | struct ixgbe_adapter *adapter = q_vector->adapter; |
900 | struct ixgbe_ring *txr; | ||
901 | int i, r_idx; | ||
619 | 902 | ||
620 | ixgbe_clean_tx_irq(adapter, txr); | 903 | if (!q_vector->txr_count) |
904 | return IRQ_HANDLED; | ||
905 | |||
906 | r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues); | ||
907 | for (i = 0; i < q_vector->txr_count; i++) { | ||
908 | txr = &(adapter->tx_ring[r_idx]); | ||
909 | #ifdef CONFIG_DCA | ||
910 | if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) | ||
911 | ixgbe_update_tx_dca(adapter, txr); | ||
912 | #endif | ||
913 | txr->total_bytes = 0; | ||
914 | txr->total_packets = 0; | ||
915 | ixgbe_clean_tx_irq(adapter, txr); | ||
916 | r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues, | ||
917 | r_idx + 1); | ||
918 | } | ||
621 | 919 | ||
622 | return IRQ_HANDLED; | 920 | return IRQ_HANDLED; |
623 | } | 921 | } |
624 | 922 | ||
923 | /** | ||
924 | * ixgbe_msix_clean_rx - single unshared vector rx clean (all queues) | ||
925 | * @irq: unused | ||
926 | * @data: pointer to our q_vector struct for this interrupt vector | ||
927 | **/ | ||
625 | static irqreturn_t ixgbe_msix_clean_rx(int irq, void *data) | 928 | static irqreturn_t ixgbe_msix_clean_rx(int irq, void *data) |
626 | { | 929 | { |
627 | struct ixgbe_ring *rxr = data; | 930 | struct ixgbe_q_vector *q_vector = data; |
628 | struct ixgbe_adapter *adapter = rxr->adapter; | 931 | struct ixgbe_adapter *adapter = q_vector->adapter; |
932 | struct ixgbe_ring *rxr; | ||
933 | int r_idx; | ||
934 | |||
935 | r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); | ||
936 | if (!q_vector->rxr_count) | ||
937 | return IRQ_HANDLED; | ||
938 | |||
939 | rxr = &(adapter->rx_ring[r_idx]); | ||
940 | /* disable interrupts on this vector only */ | ||
941 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, rxr->v_idx); | ||
942 | rxr->total_bytes = 0; | ||
943 | rxr->total_packets = 0; | ||
944 | netif_rx_schedule(adapter->netdev, &q_vector->napi); | ||
945 | |||
946 | return IRQ_HANDLED; | ||
947 | } | ||
948 | |||
949 | static irqreturn_t ixgbe_msix_clean_many(int irq, void *data) | ||
950 | { | ||
951 | ixgbe_msix_clean_rx(irq, data); | ||
952 | ixgbe_msix_clean_tx(irq, data); | ||
629 | 953 | ||
630 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, rxr->eims_value); | ||
631 | netif_rx_schedule(adapter->netdev, &adapter->napi); | ||
632 | return IRQ_HANDLED; | 954 | return IRQ_HANDLED; |
633 | } | 955 | } |
634 | 956 | ||
957 | /** | ||
958 | * ixgbe_clean_rxonly - msix (aka one shot) rx clean routine | ||
959 | * @napi: napi struct with our devices info in it | ||
960 | * @budget: amount of work driver is allowed to do this pass, in packets | ||
961 | * | ||
962 | **/ | ||
635 | static int ixgbe_clean_rxonly(struct napi_struct *napi, int budget) | 963 | static int ixgbe_clean_rxonly(struct napi_struct *napi, int budget) |
636 | { | 964 | { |
637 | struct ixgbe_adapter *adapter = container_of(napi, | 965 | struct ixgbe_q_vector *q_vector = |
638 | struct ixgbe_adapter, napi); | 966 | container_of(napi, struct ixgbe_q_vector, napi); |
639 | struct net_device *netdev = adapter->netdev; | 967 | struct ixgbe_adapter *adapter = q_vector->adapter; |
968 | struct ixgbe_ring *rxr; | ||
640 | int work_done = 0; | 969 | int work_done = 0; |
641 | struct ixgbe_ring *rxr = adapter->rx_ring; | 970 | long r_idx; |
642 | 971 | ||
643 | /* Keep link state information with original netdev */ | 972 | r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); |
644 | if (!netif_carrier_ok(netdev)) | 973 | rxr = &(adapter->rx_ring[r_idx]); |
645 | goto quit_polling; | 974 | #ifdef CONFIG_DCA |
975 | if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) | ||
976 | ixgbe_update_rx_dca(adapter, rxr); | ||
977 | #endif | ||
646 | 978 | ||
647 | ixgbe_clean_rx_irq(adapter, rxr, &work_done, budget); | 979 | ixgbe_clean_rx_irq(adapter, rxr, &work_done, budget); |
648 | 980 | ||
649 | /* If no Tx and not enough Rx work done, exit the polling mode */ | 981 | /* If all Rx work done, exit the polling mode */ |
650 | if ((work_done < budget) || !netif_running(netdev)) { | 982 | if (work_done < budget) { |
651 | quit_polling: | 983 | netif_rx_complete(adapter->netdev, napi); |
652 | netif_rx_complete(netdev, napi); | 984 | if (adapter->rx_eitr < IXGBE_MIN_ITR_USECS) |
985 | ixgbe_set_itr_msix(q_vector); | ||
653 | if (!test_bit(__IXGBE_DOWN, &adapter->state)) | 986 | if (!test_bit(__IXGBE_DOWN, &adapter->state)) |
654 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, | 987 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, rxr->v_idx); |
655 | rxr->eims_value); | ||
656 | } | 988 | } |
657 | 989 | ||
658 | return work_done; | 990 | return work_done; |
659 | } | 991 | } |
660 | 992 | ||
993 | static inline void map_vector_to_rxq(struct ixgbe_adapter *a, int v_idx, | ||
994 | int r_idx) | ||
995 | { | ||
996 | a->q_vector[v_idx].adapter = a; | ||
997 | set_bit(r_idx, a->q_vector[v_idx].rxr_idx); | ||
998 | a->q_vector[v_idx].rxr_count++; | ||
999 | a->rx_ring[r_idx].v_idx = 1 << v_idx; | ||
1000 | } | ||
1001 | |||
1002 | static inline void map_vector_to_txq(struct ixgbe_adapter *a, int v_idx, | ||
1003 | int r_idx) | ||
1004 | { | ||
1005 | a->q_vector[v_idx].adapter = a; | ||
1006 | set_bit(r_idx, a->q_vector[v_idx].txr_idx); | ||
1007 | a->q_vector[v_idx].txr_count++; | ||
1008 | a->tx_ring[r_idx].v_idx = 1 << v_idx; | ||
1009 | } | ||
1010 | |||
661 | /** | 1011 | /** |
662 | * ixgbe_setup_msix - Initialize MSI-X interrupts | 1012 | * ixgbe_map_rings_to_vectors - Maps descriptor rings to vectors |
1013 | * @adapter: board private structure to initialize | ||
1014 | * @vectors: allotted vector count for descriptor rings | ||
663 | * | 1015 | * |
664 | * ixgbe_setup_msix allocates MSI-X vectors and requests | 1016 | * This function maps descriptor rings to the queue-specific vectors |
665 | * interrutps from the kernel. | 1017 | * we were allotted through the MSI-X enabling code. Ideally, we'd have |
1018 | * one vector per ring/queue, but on a constrained vector budget, we | ||
1019 | * group the rings as "efficiently" as possible. You would add new | ||
1020 | * mapping configurations in here. | ||
666 | **/ | 1021 | **/ |
667 | static int ixgbe_setup_msix(struct ixgbe_adapter *adapter) | 1022 | static int ixgbe_map_rings_to_vectors(struct ixgbe_adapter *adapter, |
668 | { | 1023 | int vectors) |
669 | struct net_device *netdev = adapter->netdev; | 1024 | { |
670 | int i, int_vector = 0, err = 0; | 1025 | int v_start = 0; |
671 | int max_msix_count; | 1026 | int rxr_idx = 0, txr_idx = 0; |
1027 | int rxr_remaining = adapter->num_rx_queues; | ||
1028 | int txr_remaining = adapter->num_tx_queues; | ||
1029 | int i, j; | ||
1030 | int rqpv, tqpv; | ||
1031 | int err = 0; | ||
1032 | |||
1033 | /* No mapping required if MSI-X is disabled. */ | ||
1034 | if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) | ||
1035 | goto out; | ||
672 | 1036 | ||
673 | /* +1 for the LSC interrupt */ | 1037 | /* |
674 | max_msix_count = adapter->num_rx_queues + adapter->num_tx_queues + 1; | 1038 | * The ideal configuration... |
675 | adapter->msix_entries = kcalloc(max_msix_count, | 1039 | * We have enough vectors to map one per queue. |
676 | sizeof(struct msix_entry), GFP_KERNEL); | 1040 | */ |
677 | if (!adapter->msix_entries) | 1041 | if (vectors == adapter->num_rx_queues + adapter->num_tx_queues) { |
678 | return -ENOMEM; | 1042 | for (; rxr_idx < rxr_remaining; v_start++, rxr_idx++) |
1043 | map_vector_to_rxq(adapter, v_start, rxr_idx); | ||
679 | 1044 | ||
680 | for (i = 0; i < max_msix_count; i++) | 1045 | for (; txr_idx < txr_remaining; v_start++, txr_idx++) |
681 | adapter->msix_entries[i].entry = i; | 1046 | map_vector_to_txq(adapter, v_start, txr_idx); |
682 | 1047 | ||
683 | err = pci_enable_msix(adapter->pdev, adapter->msix_entries, | ||
684 | max_msix_count); | ||
685 | if (err) | ||
686 | goto out; | 1048 | goto out; |
1049 | } | ||
687 | 1050 | ||
688 | for (i = 0; i < adapter->num_tx_queues; i++) { | 1051 | /* |
689 | sprintf(adapter->tx_ring[i].name, "%s-tx%d", netdev->name, i); | 1052 | * If we don't have enough vectors for a 1-to-1 |
690 | err = request_irq(adapter->msix_entries[int_vector].vector, | 1053 | * mapping, we'll have to group them so there are |
691 | &ixgbe_msix_clean_tx, | 1054 | * multiple queues per vector. |
692 | 0, | 1055 | */ |
693 | adapter->tx_ring[i].name, | 1056 | /* Re-adjusting *qpv takes care of the remainder. */ |
694 | &(adapter->tx_ring[i])); | 1057 | for (i = v_start; i < vectors; i++) { |
695 | if (err) { | 1058 | rqpv = DIV_ROUND_UP(rxr_remaining, vectors - i); |
696 | DPRINTK(PROBE, ERR, | 1059 | for (j = 0; j < rqpv; j++) { |
697 | "request_irq failed for MSIX interrupt " | 1060 | map_vector_to_rxq(adapter, i, rxr_idx); |
698 | "Error: %d\n", err); | 1061 | rxr_idx++; |
699 | goto release_irqs; | 1062 | rxr_remaining--; |
1063 | } | ||
1064 | } | ||
1065 | for (i = v_start; i < vectors; i++) { | ||
1066 | tqpv = DIV_ROUND_UP(txr_remaining, vectors - i); | ||
1067 | for (j = 0; j < tqpv; j++) { | ||
1068 | map_vector_to_txq(adapter, i, txr_idx); | ||
1069 | txr_idx++; | ||
1070 | txr_remaining--; | ||
700 | } | 1071 | } |
701 | adapter->tx_ring[i].eims_value = | ||
702 | (1 << IXGBE_MSIX_VECTOR(int_vector)); | ||
703 | adapter->tx_ring[i].itr_register = IXGBE_EITR(int_vector); | ||
704 | int_vector++; | ||
705 | } | 1072 | } |
706 | 1073 | ||
707 | for (i = 0; i < adapter->num_rx_queues; i++) { | 1074 | out: |
708 | if (strlen(netdev->name) < (IFNAMSIZ - 5)) | 1075 | return err; |
709 | sprintf(adapter->rx_ring[i].name, | 1076 | } |
710 | "%s-rx%d", netdev->name, i); | 1077 | |
711 | else | 1078 | /** |
712 | memcpy(adapter->rx_ring[i].name, | 1079 | * ixgbe_request_msix_irqs - Initialize MSI-X interrupts |
713 | netdev->name, IFNAMSIZ); | 1080 | * @adapter: board private structure |
714 | err = request_irq(adapter->msix_entries[int_vector].vector, | 1081 | * |
715 | &ixgbe_msix_clean_rx, 0, | 1082 | * ixgbe_request_msix_irqs allocates MSI-X vectors and requests |
716 | adapter->rx_ring[i].name, | 1083 | * interrupts from the kernel. |
717 | &(adapter->rx_ring[i])); | 1084 | **/ |
1085 | static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter) | ||
1086 | { | ||
1087 | struct net_device *netdev = adapter->netdev; | ||
1088 | irqreturn_t (*handler)(int, void *); | ||
1089 | int i, vector, q_vectors, err; | ||
1090 | |||
1091 | /* Decrement for Other and TCP Timer vectors */ | ||
1092 | q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; | ||
1093 | |||
1094 | /* Map the Tx/Rx rings to the vectors we were allotted. */ | ||
1095 | err = ixgbe_map_rings_to_vectors(adapter, q_vectors); | ||
1096 | if (err) | ||
1097 | goto out; | ||
1098 | |||
1099 | #define SET_HANDLER(_v) ((!(_v)->rxr_count) ? &ixgbe_msix_clean_tx : \ | ||
1100 | (!(_v)->txr_count) ? &ixgbe_msix_clean_rx : \ | ||
1101 | &ixgbe_msix_clean_many) | ||
1102 | for (vector = 0; vector < q_vectors; vector++) { | ||
1103 | handler = SET_HANDLER(&adapter->q_vector[vector]); | ||
1104 | sprintf(adapter->name[vector], "%s:v%d-%s", | ||
1105 | netdev->name, vector, | ||
1106 | (handler == &ixgbe_msix_clean_rx) ? "Rx" : | ||
1107 | ((handler == &ixgbe_msix_clean_tx) ? "Tx" : "TxRx")); | ||
1108 | err = request_irq(adapter->msix_entries[vector].vector, | ||
1109 | handler, 0, adapter->name[vector], | ||
1110 | &(adapter->q_vector[vector])); | ||
718 | if (err) { | 1111 | if (err) { |
719 | DPRINTK(PROBE, ERR, | 1112 | DPRINTK(PROBE, ERR, |
720 | "request_irq failed for MSIX interrupt " | 1113 | "request_irq failed for MSIX interrupt " |
721 | "Error: %d\n", err); | 1114 | "Error: %d\n", err); |
722 | goto release_irqs; | 1115 | goto free_queue_irqs; |
723 | } | 1116 | } |
724 | |||
725 | adapter->rx_ring[i].eims_value = | ||
726 | (1 << IXGBE_MSIX_VECTOR(int_vector)); | ||
727 | adapter->rx_ring[i].itr_register = IXGBE_EITR(int_vector); | ||
728 | int_vector++; | ||
729 | } | 1117 | } |
730 | 1118 | ||
731 | sprintf(adapter->lsc_name, "%s-lsc", netdev->name); | 1119 | sprintf(adapter->name[vector], "%s:lsc", netdev->name); |
732 | err = request_irq(adapter->msix_entries[int_vector].vector, | 1120 | err = request_irq(adapter->msix_entries[vector].vector, |
733 | &ixgbe_msix_lsc, 0, adapter->lsc_name, netdev); | 1121 | &ixgbe_msix_lsc, 0, adapter->name[vector], netdev); |
734 | if (err) { | 1122 | if (err) { |
735 | DPRINTK(PROBE, ERR, | 1123 | DPRINTK(PROBE, ERR, |
736 | "request_irq for msix_lsc failed: %d\n", err); | 1124 | "request_irq for msix_lsc failed: %d\n", err); |
737 | goto release_irqs; | 1125 | goto free_queue_irqs; |
738 | } | 1126 | } |
739 | 1127 | ||
740 | /* FIXME: implement netif_napi_remove() instead */ | ||
741 | adapter->napi.poll = ixgbe_clean_rxonly; | ||
742 | adapter->flags |= IXGBE_FLAG_MSIX_ENABLED; | ||
743 | return 0; | 1128 | return 0; |
744 | 1129 | ||
745 | release_irqs: | 1130 | free_queue_irqs: |
746 | int_vector--; | 1131 | for (i = vector - 1; i >= 0; i--) |
747 | for (; int_vector >= adapter->num_tx_queues; int_vector--) | 1132 | free_irq(adapter->msix_entries[--vector].vector, |
748 | free_irq(adapter->msix_entries[int_vector].vector, | 1133 | &(adapter->q_vector[i])); |
749 | &(adapter->rx_ring[int_vector - | 1134 | adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED; |
750 | adapter->num_tx_queues])); | 1135 | pci_disable_msix(adapter->pdev); |
751 | |||
752 | for (; int_vector >= 0; int_vector--) | ||
753 | free_irq(adapter->msix_entries[int_vector].vector, | ||
754 | &(adapter->tx_ring[int_vector])); | ||
755 | out: | ||
756 | kfree(adapter->msix_entries); | 1136 | kfree(adapter->msix_entries); |
757 | adapter->msix_entries = NULL; | 1137 | adapter->msix_entries = NULL; |
758 | adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED; | 1138 | out: |
759 | return err; | 1139 | return err; |
760 | } | 1140 | } |
761 | 1141 | ||
1142 | static void ixgbe_set_itr(struct ixgbe_adapter *adapter) | ||
1143 | { | ||
1144 | struct ixgbe_hw *hw = &adapter->hw; | ||
1145 | struct ixgbe_q_vector *q_vector = adapter->q_vector; | ||
1146 | u8 current_itr; | ||
1147 | u32 new_itr = q_vector->eitr; | ||
1148 | struct ixgbe_ring *rx_ring = &adapter->rx_ring[0]; | ||
1149 | struct ixgbe_ring *tx_ring = &adapter->tx_ring[0]; | ||
1150 | |||
1151 | q_vector->tx_eitr = ixgbe_update_itr(adapter, new_itr, | ||
1152 | q_vector->tx_eitr, | ||
1153 | tx_ring->total_packets, | ||
1154 | tx_ring->total_bytes); | ||
1155 | q_vector->rx_eitr = ixgbe_update_itr(adapter, new_itr, | ||
1156 | q_vector->rx_eitr, | ||
1157 | rx_ring->total_packets, | ||
1158 | rx_ring->total_bytes); | ||
1159 | |||
1160 | current_itr = max(q_vector->rx_eitr, q_vector->tx_eitr); | ||
1161 | |||
1162 | switch (current_itr) { | ||
1163 | /* counts and packets in update_itr are dependent on these numbers */ | ||
1164 | case lowest_latency: | ||
1165 | new_itr = 100000; | ||
1166 | break; | ||
1167 | case low_latency: | ||
1168 | new_itr = 20000; /* aka hwitr = ~200 */ | ||
1169 | break; | ||
1170 | case bulk_latency: | ||
1171 | new_itr = 8000; | ||
1172 | break; | ||
1173 | default: | ||
1174 | break; | ||
1175 | } | ||
1176 | |||
1177 | if (new_itr != q_vector->eitr) { | ||
1178 | u32 itr_reg; | ||
1179 | /* do an exponential smoothing */ | ||
1180 | new_itr = ((q_vector->eitr * 90)/100) + ((new_itr * 10)/100); | ||
1181 | q_vector->eitr = new_itr; | ||
1182 | itr_reg = EITR_INTS_PER_SEC_TO_REG(new_itr); | ||
1183 | /* must write high and low 16 bits to reset counter */ | ||
1184 | IXGBE_WRITE_REG(hw, IXGBE_EITR(0), itr_reg | (itr_reg)<<16); | ||
1185 | } | ||
1186 | |||
1187 | return; | ||
1188 | } | ||
1189 | |||
1190 | static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter); | ||
1191 | |||
762 | /** | 1192 | /** |
763 | * ixgbe_intr - Interrupt Handler | 1193 | * ixgbe_intr - legacy mode Interrupt Handler |
764 | * @irq: interrupt number | 1194 | * @irq: interrupt number |
765 | * @data: pointer to a network interface device structure | 1195 | * @data: pointer to a network interface device structure |
766 | * @pt_regs: CPU registers structure | 1196 | * @pt_regs: CPU registers structure |
@@ -772,8 +1202,10 @@ static irqreturn_t ixgbe_intr(int irq, void *data) | |||
772 | struct ixgbe_hw *hw = &adapter->hw; | 1202 | struct ixgbe_hw *hw = &adapter->hw; |
773 | u32 eicr; | 1203 | u32 eicr; |
774 | 1204 | ||
775 | eicr = IXGBE_READ_REG(hw, IXGBE_EICR); | ||
776 | 1205 | ||
1206 | /* for NAPI, using EIAM to auto-mask tx/rx interrupt bits on read | ||
1207 | * therefore no explict interrupt disable is necessary */ | ||
1208 | eicr = IXGBE_READ_REG(hw, IXGBE_EICR); | ||
777 | if (!eicr) | 1209 | if (!eicr) |
778 | return IRQ_NONE; /* Not our interrupt */ | 1210 | return IRQ_NONE; /* Not our interrupt */ |
779 | 1211 | ||
@@ -782,16 +1214,33 @@ static irqreturn_t ixgbe_intr(int irq, void *data) | |||
782 | if (!test_bit(__IXGBE_DOWN, &adapter->state)) | 1214 | if (!test_bit(__IXGBE_DOWN, &adapter->state)) |
783 | mod_timer(&adapter->watchdog_timer, jiffies); | 1215 | mod_timer(&adapter->watchdog_timer, jiffies); |
784 | } | 1216 | } |
785 | if (netif_rx_schedule_prep(netdev, &adapter->napi)) { | 1217 | |
786 | /* Disable interrupts and register for poll. The flush of the | 1218 | |
787 | * posted write is intentionally left out. */ | 1219 | if (netif_rx_schedule_prep(netdev, &adapter->q_vector[0].napi)) { |
788 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0); | 1220 | adapter->tx_ring[0].total_packets = 0; |
789 | __netif_rx_schedule(netdev, &adapter->napi); | 1221 | adapter->tx_ring[0].total_bytes = 0; |
1222 | adapter->rx_ring[0].total_packets = 0; | ||
1223 | adapter->rx_ring[0].total_bytes = 0; | ||
1224 | /* would disable interrupts here but EIAM disabled it */ | ||
1225 | __netif_rx_schedule(netdev, &adapter->q_vector[0].napi); | ||
790 | } | 1226 | } |
791 | 1227 | ||
792 | return IRQ_HANDLED; | 1228 | return IRQ_HANDLED; |
793 | } | 1229 | } |
794 | 1230 | ||
1231 | static inline void ixgbe_reset_q_vectors(struct ixgbe_adapter *adapter) | ||
1232 | { | ||
1233 | int i, q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; | ||
1234 | |||
1235 | for (i = 0; i < q_vectors; i++) { | ||
1236 | struct ixgbe_q_vector *q_vector = &adapter->q_vector[i]; | ||
1237 | bitmap_zero(q_vector->rxr_idx, MAX_RX_QUEUES); | ||
1238 | bitmap_zero(q_vector->txr_idx, MAX_TX_QUEUES); | ||
1239 | q_vector->rxr_count = 0; | ||
1240 | q_vector->txr_count = 0; | ||
1241 | } | ||
1242 | } | ||
1243 | |||
795 | /** | 1244 | /** |
796 | * ixgbe_request_irq - initialize interrupts | 1245 | * ixgbe_request_irq - initialize interrupts |
797 | * @adapter: board private structure | 1246 | * @adapter: board private structure |
@@ -799,40 +1248,24 @@ static irqreturn_t ixgbe_intr(int irq, void *data) | |||
799 | * Attempts to configure interrupts using the best available | 1248 | * Attempts to configure interrupts using the best available |
800 | * capabilities of the hardware and kernel. | 1249 | * capabilities of the hardware and kernel. |
801 | **/ | 1250 | **/ |
802 | static int ixgbe_request_irq(struct ixgbe_adapter *adapter, u32 *num_rx_queues) | 1251 | static int ixgbe_request_irq(struct ixgbe_adapter *adapter) |
803 | { | 1252 | { |
804 | struct net_device *netdev = adapter->netdev; | 1253 | struct net_device *netdev = adapter->netdev; |
805 | int flags, err; | 1254 | int err; |
806 | irq_handler_t handler = ixgbe_intr; | ||
807 | |||
808 | flags = IRQF_SHARED; | ||
809 | |||
810 | err = ixgbe_setup_msix(adapter); | ||
811 | if (!err) | ||
812 | goto request_done; | ||
813 | |||
814 | /* | ||
815 | * if we can't do MSI-X, fall through and try MSI | ||
816 | * No need to reallocate memory since we're decreasing the number of | ||
817 | * queues. We just won't use the other ones, also it is freed correctly | ||
818 | * on ixgbe_remove. | ||
819 | */ | ||
820 | *num_rx_queues = 1; | ||
821 | 1255 | ||
822 | /* do MSI */ | 1256 | if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { |
823 | err = pci_enable_msi(adapter->pdev); | 1257 | err = ixgbe_request_msix_irqs(adapter); |
824 | if (!err) { | 1258 | } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) { |
825 | adapter->flags |= IXGBE_FLAG_MSI_ENABLED; | 1259 | err = request_irq(adapter->pdev->irq, &ixgbe_intr, 0, |
826 | flags &= ~IRQF_SHARED; | 1260 | netdev->name, netdev); |
827 | handler = &ixgbe_intr; | 1261 | } else { |
1262 | err = request_irq(adapter->pdev->irq, &ixgbe_intr, IRQF_SHARED, | ||
1263 | netdev->name, netdev); | ||
828 | } | 1264 | } |
829 | 1265 | ||
830 | err = request_irq(adapter->pdev->irq, handler, flags, | ||
831 | netdev->name, netdev); | ||
832 | if (err) | 1266 | if (err) |
833 | DPRINTK(PROBE, ERR, "request_irq failed, Error %d\n", err); | 1267 | DPRINTK(PROBE, ERR, "request_irq failed, Error %d\n", err); |
834 | 1268 | ||
835 | request_done: | ||
836 | return err; | 1269 | return err; |
837 | } | 1270 | } |
838 | 1271 | ||
@@ -841,28 +1274,22 @@ static void ixgbe_free_irq(struct ixgbe_adapter *adapter) | |||
841 | struct net_device *netdev = adapter->netdev; | 1274 | struct net_device *netdev = adapter->netdev; |
842 | 1275 | ||
843 | if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { | 1276 | if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { |
844 | int i; | 1277 | int i, q_vectors; |
845 | 1278 | ||
846 | for (i = 0; i < adapter->num_tx_queues; i++) | 1279 | q_vectors = adapter->num_msix_vectors; |
847 | free_irq(adapter->msix_entries[i].vector, | 1280 | |
848 | &(adapter->tx_ring[i])); | 1281 | i = q_vectors - 1; |
849 | for (i = 0; i < adapter->num_rx_queues; i++) | ||
850 | free_irq(adapter->msix_entries[i + | ||
851 | adapter->num_tx_queues].vector, | ||
852 | &(adapter->rx_ring[i])); | ||
853 | i = adapter->num_rx_queues + adapter->num_tx_queues; | ||
854 | free_irq(adapter->msix_entries[i].vector, netdev); | 1282 | free_irq(adapter->msix_entries[i].vector, netdev); |
855 | pci_disable_msix(adapter->pdev); | ||
856 | kfree(adapter->msix_entries); | ||
857 | adapter->msix_entries = NULL; | ||
858 | adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED; | ||
859 | return; | ||
860 | } | ||
861 | 1283 | ||
862 | free_irq(adapter->pdev->irq, netdev); | 1284 | i--; |
863 | if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) { | 1285 | for (; i >= 0; i--) { |
864 | pci_disable_msi(adapter->pdev); | 1286 | free_irq(adapter->msix_entries[i].vector, |
865 | adapter->flags &= ~IXGBE_FLAG_MSI_ENABLED; | 1287 | &(adapter->q_vector[i])); |
1288 | } | ||
1289 | |||
1290 | ixgbe_reset_q_vectors(adapter); | ||
1291 | } else { | ||
1292 | free_irq(adapter->pdev->irq, netdev); | ||
866 | } | 1293 | } |
867 | } | 1294 | } |
868 | 1295 | ||
@@ -874,7 +1301,13 @@ static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter) | |||
874 | { | 1301 | { |
875 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0); | 1302 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0); |
876 | IXGBE_WRITE_FLUSH(&adapter->hw); | 1303 | IXGBE_WRITE_FLUSH(&adapter->hw); |
877 | synchronize_irq(adapter->pdev->irq); | 1304 | if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { |
1305 | int i; | ||
1306 | for (i = 0; i < adapter->num_msix_vectors; i++) | ||
1307 | synchronize_irq(adapter->msix_entries[i].vector); | ||
1308 | } else { | ||
1309 | synchronize_irq(adapter->pdev->irq); | ||
1310 | } | ||
878 | } | 1311 | } |
879 | 1312 | ||
880 | /** | 1313 | /** |
@@ -883,12 +1316,9 @@ static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter) | |||
883 | **/ | 1316 | **/ |
884 | static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter) | 1317 | static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter) |
885 | { | 1318 | { |
886 | if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) | 1319 | u32 mask; |
887 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, | 1320 | mask = IXGBE_EIMS_ENABLE_MASK; |
888 | (IXGBE_EIMS_ENABLE_MASK & | 1321 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask); |
889 | ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC))); | ||
890 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, | ||
891 | IXGBE_EIMS_ENABLE_MASK); | ||
892 | IXGBE_WRITE_FLUSH(&adapter->hw); | 1322 | IXGBE_WRITE_FLUSH(&adapter->hw); |
893 | } | 1323 | } |
894 | 1324 | ||
@@ -898,20 +1328,18 @@ static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter) | |||
898 | **/ | 1328 | **/ |
899 | static void ixgbe_configure_msi_and_legacy(struct ixgbe_adapter *adapter) | 1329 | static void ixgbe_configure_msi_and_legacy(struct ixgbe_adapter *adapter) |
900 | { | 1330 | { |
901 | int i; | ||
902 | struct ixgbe_hw *hw = &adapter->hw; | 1331 | struct ixgbe_hw *hw = &adapter->hw; |
903 | 1332 | ||
904 | if (adapter->rx_eitr) | 1333 | IXGBE_WRITE_REG(hw, IXGBE_EITR(0), |
905 | IXGBE_WRITE_REG(hw, IXGBE_EITR(0), | 1334 | EITR_INTS_PER_SEC_TO_REG(adapter->rx_eitr)); |
906 | EITR_INTS_PER_SEC_TO_REG(adapter->rx_eitr)); | ||
907 | |||
908 | /* for re-triggering the interrupt in non-NAPI mode */ | ||
909 | adapter->rx_ring[0].eims_value = (1 << IXGBE_MSIX_VECTOR(0)); | ||
910 | adapter->tx_ring[0].eims_value = (1 << IXGBE_MSIX_VECTOR(0)); | ||
911 | 1335 | ||
912 | ixgbe_set_ivar(adapter, IXGBE_IVAR_RX_QUEUE(0), 0); | 1336 | ixgbe_set_ivar(adapter, IXGBE_IVAR_RX_QUEUE(0), 0); |
913 | for (i = 0; i < adapter->num_tx_queues; i++) | 1337 | ixgbe_set_ivar(adapter, IXGBE_IVAR_TX_QUEUE(0), 0); |
914 | ixgbe_set_ivar(adapter, IXGBE_IVAR_TX_QUEUE(i), i); | 1338 | |
1339 | map_vector_to_rxq(adapter, 0, 0); | ||
1340 | map_vector_to_txq(adapter, 0, 0); | ||
1341 | |||
1342 | DPRINTK(HW, INFO, "Legacy interrupt IVAR setup done\n"); | ||
915 | } | 1343 | } |
916 | 1344 | ||
917 | /** | 1345 | /** |
@@ -924,23 +1352,29 @@ static void ixgbe_configure_tx(struct ixgbe_adapter *adapter) | |||
924 | { | 1352 | { |
925 | u64 tdba; | 1353 | u64 tdba; |
926 | struct ixgbe_hw *hw = &adapter->hw; | 1354 | struct ixgbe_hw *hw = &adapter->hw; |
927 | u32 i, tdlen; | 1355 | u32 i, j, tdlen, txctrl; |
928 | 1356 | ||
929 | /* Setup the HW Tx Head and Tail descriptor pointers */ | 1357 | /* Setup the HW Tx Head and Tail descriptor pointers */ |
930 | for (i = 0; i < adapter->num_tx_queues; i++) { | 1358 | for (i = 0; i < adapter->num_tx_queues; i++) { |
1359 | j = adapter->tx_ring[i].reg_idx; | ||
931 | tdba = adapter->tx_ring[i].dma; | 1360 | tdba = adapter->tx_ring[i].dma; |
932 | tdlen = adapter->tx_ring[i].count * | 1361 | tdlen = adapter->tx_ring[i].count * |
933 | sizeof(union ixgbe_adv_tx_desc); | 1362 | sizeof(union ixgbe_adv_tx_desc); |
934 | IXGBE_WRITE_REG(hw, IXGBE_TDBAL(i), (tdba & DMA_32BIT_MASK)); | 1363 | IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j), |
935 | IXGBE_WRITE_REG(hw, IXGBE_TDBAH(i), (tdba >> 32)); | 1364 | (tdba & DMA_32BIT_MASK)); |
936 | IXGBE_WRITE_REG(hw, IXGBE_TDLEN(i), tdlen); | 1365 | IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32)); |
937 | IXGBE_WRITE_REG(hw, IXGBE_TDH(i), 0); | 1366 | IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j), tdlen); |
938 | IXGBE_WRITE_REG(hw, IXGBE_TDT(i), 0); | 1367 | IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0); |
939 | adapter->tx_ring[i].head = IXGBE_TDH(i); | 1368 | IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0); |
940 | adapter->tx_ring[i].tail = IXGBE_TDT(i); | 1369 | adapter->tx_ring[i].head = IXGBE_TDH(j); |
1370 | adapter->tx_ring[i].tail = IXGBE_TDT(j); | ||
1371 | /* Disable Tx Head Writeback RO bit, since this hoses | ||
1372 | * bookkeeping if things aren't delivered in order. | ||
1373 | */ | ||
1374 | txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i)); | ||
1375 | txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN; | ||
1376 | IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), txctrl); | ||
941 | } | 1377 | } |
942 | |||
943 | IXGBE_WRITE_REG(hw, IXGBE_TIPG, IXGBE_TIPG_FIBER_DEFAULT); | ||
944 | } | 1378 | } |
945 | 1379 | ||
946 | #define PAGE_USE_COUNT(S) (((S) >> PAGE_SHIFT) + \ | 1380 | #define PAGE_USE_COUNT(S) (((S) >> PAGE_SHIFT) + \ |
@@ -959,13 +1393,12 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter) | |||
959 | struct ixgbe_hw *hw = &adapter->hw; | 1393 | struct ixgbe_hw *hw = &adapter->hw; |
960 | struct net_device *netdev = adapter->netdev; | 1394 | struct net_device *netdev = adapter->netdev; |
961 | int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; | 1395 | int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; |
1396 | int i, j; | ||
962 | u32 rdlen, rxctrl, rxcsum; | 1397 | u32 rdlen, rxctrl, rxcsum; |
963 | u32 random[10]; | 1398 | u32 random[10]; |
964 | u32 reta, mrqc; | ||
965 | int i; | ||
966 | u32 fctrl, hlreg0; | 1399 | u32 fctrl, hlreg0; |
967 | u32 srrctl; | ||
968 | u32 pages; | 1400 | u32 pages; |
1401 | u32 reta = 0, mrqc, srrctl; | ||
969 | 1402 | ||
970 | /* Decide whether to use packet split mode or not */ | 1403 | /* Decide whether to use packet split mode or not */ |
971 | if (netdev->mtu > ETH_DATA_LEN) | 1404 | if (netdev->mtu > ETH_DATA_LEN) |
@@ -985,6 +1418,7 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter) | |||
985 | 1418 | ||
986 | fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL); | 1419 | fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL); |
987 | fctrl |= IXGBE_FCTRL_BAM; | 1420 | fctrl |= IXGBE_FCTRL_BAM; |
1421 | fctrl |= IXGBE_FCTRL_DPF; /* discard pause frames when FC enabled */ | ||
988 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl); | 1422 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl); |
989 | 1423 | ||
990 | hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0); | 1424 | hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0); |
@@ -1036,37 +1470,23 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter) | |||
1036 | adapter->rx_ring[i].tail = IXGBE_RDT(i); | 1470 | adapter->rx_ring[i].tail = IXGBE_RDT(i); |
1037 | } | 1471 | } |
1038 | 1472 | ||
1039 | if (adapter->num_rx_queues > 1) { | 1473 | if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) { |
1040 | /* Random 40bytes used as random key in RSS hash function */ | ||
1041 | get_random_bytes(&random[0], 40); | ||
1042 | |||
1043 | switch (adapter->num_rx_queues) { | ||
1044 | case 8: | ||
1045 | case 4: | ||
1046 | /* Bits [3:0] in each byte refers the Rx queue no */ | ||
1047 | reta = 0x00010203; | ||
1048 | break; | ||
1049 | case 2: | ||
1050 | reta = 0x00010001; | ||
1051 | break; | ||
1052 | default: | ||
1053 | reta = 0x00000000; | ||
1054 | break; | ||
1055 | } | ||
1056 | |||
1057 | /* Fill out redirection table */ | 1474 | /* Fill out redirection table */ |
1058 | for (i = 0; i < 32; i++) { | 1475 | for (i = 0, j = 0; i < 128; i++, j++) { |
1059 | IXGBE_WRITE_REG_ARRAY(hw, IXGBE_RETA(0), i, reta); | 1476 | if (j == adapter->ring_feature[RING_F_RSS].indices) |
1060 | if (adapter->num_rx_queues > 4) { | 1477 | j = 0; |
1061 | i++; | 1478 | /* reta = 4-byte sliding window of |
1062 | IXGBE_WRITE_REG_ARRAY(hw, IXGBE_RETA(0), i, | 1479 | * 0x00..(indices-1)(indices-1)00..etc. */ |
1063 | 0x04050607); | 1480 | reta = (reta << 8) | (j * 0x11); |
1064 | } | 1481 | if ((i & 3) == 3) |
1482 | IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta); | ||
1065 | } | 1483 | } |
1066 | 1484 | ||
1067 | /* Fill out hash function seeds */ | 1485 | /* Fill out hash function seeds */ |
1486 | /* XXX use a random constant here to glue certain flows */ | ||
1487 | get_random_bytes(&random[0], 40); | ||
1068 | for (i = 0; i < 10; i++) | 1488 | for (i = 0; i < 10; i++) |
1069 | IXGBE_WRITE_REG_ARRAY(hw, IXGBE_RSSRK(0), i, random[i]); | 1489 | IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), random[i]); |
1070 | 1490 | ||
1071 | mrqc = IXGBE_MRQC_RSSEN | 1491 | mrqc = IXGBE_MRQC_RSSEN |
1072 | /* Perform hash on these packet types */ | 1492 | /* Perform hash on these packet types */ |
@@ -1080,26 +1500,23 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter) | |||
1080 | | IXGBE_MRQC_RSS_FIELD_IPV6_UDP | 1500 | | IXGBE_MRQC_RSS_FIELD_IPV6_UDP |
1081 | | IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP; | 1501 | | IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP; |
1082 | IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); | 1502 | IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); |
1503 | } | ||
1083 | 1504 | ||
1084 | /* Multiqueue and packet checksumming are mutually exclusive. */ | 1505 | rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM); |
1085 | rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM); | 1506 | |
1507 | if (adapter->flags & IXGBE_FLAG_RSS_ENABLED || | ||
1508 | adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED) { | ||
1509 | /* Disable indicating checksum in descriptor, enables | ||
1510 | * RSS hash */ | ||
1086 | rxcsum |= IXGBE_RXCSUM_PCSD; | 1511 | rxcsum |= IXGBE_RXCSUM_PCSD; |
1087 | IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum); | ||
1088 | } else { | ||
1089 | /* Enable Receive Checksum Offload for TCP and UDP */ | ||
1090 | rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM); | ||
1091 | if (adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED) { | ||
1092 | /* Enable IPv4 payload checksum for UDP fragments | ||
1093 | * Must be used in conjunction with packet-split. */ | ||
1094 | rxcsum |= IXGBE_RXCSUM_IPPCSE; | ||
1095 | } else { | ||
1096 | /* don't need to clear IPPCSE as it defaults to 0 */ | ||
1097 | } | ||
1098 | IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum); | ||
1099 | } | 1512 | } |
1100 | /* Enable Receives */ | 1513 | if (!(rxcsum & IXGBE_RXCSUM_PCSD)) { |
1101 | IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl); | 1514 | /* Enable IPv4 payload checksum for UDP fragments |
1102 | rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); | 1515 | * if PCSD is not set */ |
1516 | rxcsum |= IXGBE_RXCSUM_IPPCSE; | ||
1517 | } | ||
1518 | |||
1519 | IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum); | ||
1103 | } | 1520 | } |
1104 | 1521 | ||
1105 | static void ixgbe_vlan_rx_register(struct net_device *netdev, | 1522 | static void ixgbe_vlan_rx_register(struct net_device *netdev, |
@@ -1219,6 +1636,42 @@ static void ixgbe_set_multi(struct net_device *netdev) | |||
1219 | 1636 | ||
1220 | } | 1637 | } |
1221 | 1638 | ||
1639 | static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter) | ||
1640 | { | ||
1641 | int q_idx; | ||
1642 | struct ixgbe_q_vector *q_vector; | ||
1643 | int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; | ||
1644 | |||
1645 | /* legacy and MSI only use one vector */ | ||
1646 | if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) | ||
1647 | q_vectors = 1; | ||
1648 | |||
1649 | for (q_idx = 0; q_idx < q_vectors; q_idx++) { | ||
1650 | q_vector = &adapter->q_vector[q_idx]; | ||
1651 | if (!q_vector->rxr_count) | ||
1652 | continue; | ||
1653 | napi_enable(&q_vector->napi); | ||
1654 | } | ||
1655 | } | ||
1656 | |||
1657 | static void ixgbe_napi_disable_all(struct ixgbe_adapter *adapter) | ||
1658 | { | ||
1659 | int q_idx; | ||
1660 | struct ixgbe_q_vector *q_vector; | ||
1661 | int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; | ||
1662 | |||
1663 | /* legacy and MSI only use one vector */ | ||
1664 | if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) | ||
1665 | q_vectors = 1; | ||
1666 | |||
1667 | for (q_idx = 0; q_idx < q_vectors; q_idx++) { | ||
1668 | q_vector = &adapter->q_vector[q_idx]; | ||
1669 | if (!q_vector->rxr_count) | ||
1670 | continue; | ||
1671 | napi_disable(&q_vector->napi); | ||
1672 | } | ||
1673 | } | ||
1674 | |||
1222 | static void ixgbe_configure(struct ixgbe_adapter *adapter) | 1675 | static void ixgbe_configure(struct ixgbe_adapter *adapter) |
1223 | { | 1676 | { |
1224 | struct net_device *netdev = adapter->netdev; | 1677 | struct net_device *netdev = adapter->netdev; |
@@ -1238,30 +1691,35 @@ static void ixgbe_configure(struct ixgbe_adapter *adapter) | |||
1238 | static int ixgbe_up_complete(struct ixgbe_adapter *adapter) | 1691 | static int ixgbe_up_complete(struct ixgbe_adapter *adapter) |
1239 | { | 1692 | { |
1240 | struct net_device *netdev = adapter->netdev; | 1693 | struct net_device *netdev = adapter->netdev; |
1241 | int i; | ||
1242 | u32 gpie = 0; | ||
1243 | struct ixgbe_hw *hw = &adapter->hw; | 1694 | struct ixgbe_hw *hw = &adapter->hw; |
1244 | u32 txdctl, rxdctl, mhadd; | 1695 | int i, j = 0; |
1245 | int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; | 1696 | int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; |
1697 | u32 txdctl, rxdctl, mhadd; | ||
1698 | u32 gpie; | ||
1246 | 1699 | ||
1247 | ixgbe_get_hw_control(adapter); | 1700 | ixgbe_get_hw_control(adapter); |
1248 | 1701 | ||
1249 | if (adapter->flags & (IXGBE_FLAG_MSIX_ENABLED | | 1702 | if ((adapter->flags & IXGBE_FLAG_MSIX_ENABLED) || |
1250 | IXGBE_FLAG_MSI_ENABLED)) { | 1703 | (adapter->flags & IXGBE_FLAG_MSI_ENABLED)) { |
1251 | if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { | 1704 | if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { |
1252 | gpie = (IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_EIAME | | 1705 | gpie = (IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_EIAME | |
1253 | IXGBE_GPIE_PBA_SUPPORT | IXGBE_GPIE_OCD); | 1706 | IXGBE_GPIE_PBA_SUPPORT | IXGBE_GPIE_OCD); |
1254 | } else { | 1707 | } else { |
1255 | /* MSI only */ | 1708 | /* MSI only */ |
1256 | gpie = (IXGBE_GPIE_EIAME | | 1709 | gpie = 0; |
1257 | IXGBE_GPIE_PBA_SUPPORT); | ||
1258 | } | 1710 | } |
1259 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_GPIE, gpie); | 1711 | /* XXX: to interrupt immediately for EICS writes, enable this */ |
1260 | gpie = IXGBE_READ_REG(&adapter->hw, IXGBE_GPIE); | 1712 | /* gpie |= IXGBE_GPIE_EIMEN; */ |
1713 | IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie); | ||
1261 | } | 1714 | } |
1262 | 1715 | ||
1263 | mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD); | 1716 | if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) { |
1717 | /* legacy interrupts, use EIAM to auto-mask when reading EICR, | ||
1718 | * specifically only auto mask tx and rx interrupts */ | ||
1719 | IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE); | ||
1720 | } | ||
1264 | 1721 | ||
1722 | mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD); | ||
1265 | if (max_frame != (mhadd >> IXGBE_MHADD_MFS_SHIFT)) { | 1723 | if (max_frame != (mhadd >> IXGBE_MHADD_MFS_SHIFT)) { |
1266 | mhadd &= ~IXGBE_MHADD_MFS_MASK; | 1724 | mhadd &= ~IXGBE_MHADD_MFS_MASK; |
1267 | mhadd |= max_frame << IXGBE_MHADD_MFS_SHIFT; | 1725 | mhadd |= max_frame << IXGBE_MHADD_MFS_SHIFT; |
@@ -1270,15 +1728,21 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter) | |||
1270 | } | 1728 | } |
1271 | 1729 | ||
1272 | for (i = 0; i < adapter->num_tx_queues; i++) { | 1730 | for (i = 0; i < adapter->num_tx_queues; i++) { |
1273 | txdctl = IXGBE_READ_REG(&adapter->hw, IXGBE_TXDCTL(i)); | 1731 | j = adapter->tx_ring[i].reg_idx; |
1732 | txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j)); | ||
1274 | txdctl |= IXGBE_TXDCTL_ENABLE; | 1733 | txdctl |= IXGBE_TXDCTL_ENABLE; |
1275 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_TXDCTL(i), txdctl); | 1734 | IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j), txdctl); |
1276 | } | 1735 | } |
1277 | 1736 | ||
1278 | for (i = 0; i < adapter->num_rx_queues; i++) { | 1737 | for (i = 0; i < adapter->num_rx_queues; i++) { |
1279 | rxdctl = IXGBE_READ_REG(&adapter->hw, IXGBE_RXDCTL(i)); | 1738 | j = adapter->rx_ring[i].reg_idx; |
1739 | rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j)); | ||
1740 | /* enable PTHRESH=32 descriptors (half the internal cache) | ||
1741 | * and HTHRESH=0 descriptors (to minimize latency on fetch), | ||
1742 | * this also removes a pesky rx_no_buffer_count increment */ | ||
1743 | rxdctl |= 0x0020; | ||
1280 | rxdctl |= IXGBE_RXDCTL_ENABLE; | 1744 | rxdctl |= IXGBE_RXDCTL_ENABLE; |
1281 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXDCTL(i), rxdctl); | 1745 | IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), rxdctl); |
1282 | } | 1746 | } |
1283 | /* enable all receives */ | 1747 | /* enable all receives */ |
1284 | rxdctl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); | 1748 | rxdctl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); |
@@ -1291,7 +1755,11 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter) | |||
1291 | ixgbe_configure_msi_and_legacy(adapter); | 1755 | ixgbe_configure_msi_and_legacy(adapter); |
1292 | 1756 | ||
1293 | clear_bit(__IXGBE_DOWN, &adapter->state); | 1757 | clear_bit(__IXGBE_DOWN, &adapter->state); |
1294 | napi_enable(&adapter->napi); | 1758 | ixgbe_napi_enable_all(adapter); |
1759 | |||
1760 | /* clear any pending interrupts, may auto mask */ | ||
1761 | IXGBE_READ_REG(hw, IXGBE_EICR); | ||
1762 | |||
1295 | ixgbe_irq_enable(adapter); | 1763 | ixgbe_irq_enable(adapter); |
1296 | 1764 | ||
1297 | /* bring the link up in the watchdog, this could race with our first | 1765 | /* bring the link up in the watchdog, this could race with our first |
@@ -1333,7 +1801,7 @@ static int ixgbe_resume(struct pci_dev *pdev) | |||
1333 | { | 1801 | { |
1334 | struct net_device *netdev = pci_get_drvdata(pdev); | 1802 | struct net_device *netdev = pci_get_drvdata(pdev); |
1335 | struct ixgbe_adapter *adapter = netdev_priv(netdev); | 1803 | struct ixgbe_adapter *adapter = netdev_priv(netdev); |
1336 | u32 err, num_rx_queues = adapter->num_rx_queues; | 1804 | u32 err; |
1337 | 1805 | ||
1338 | pci_set_power_state(pdev, PCI_D0); | 1806 | pci_set_power_state(pdev, PCI_D0); |
1339 | pci_restore_state(pdev); | 1807 | pci_restore_state(pdev); |
@@ -1349,7 +1817,7 @@ static int ixgbe_resume(struct pci_dev *pdev) | |||
1349 | pci_enable_wake(pdev, PCI_D3cold, 0); | 1817 | pci_enable_wake(pdev, PCI_D3cold, 0); |
1350 | 1818 | ||
1351 | if (netif_running(netdev)) { | 1819 | if (netif_running(netdev)) { |
1352 | err = ixgbe_request_irq(adapter, &num_rx_queues); | 1820 | err = ixgbe_request_irq(adapter); |
1353 | if (err) | 1821 | if (err) |
1354 | return err; | 1822 | return err; |
1355 | } | 1823 | } |
@@ -1449,27 +1917,27 @@ static void ixgbe_clean_tx_ring(struct ixgbe_adapter *adapter, | |||
1449 | } | 1917 | } |
1450 | 1918 | ||
1451 | /** | 1919 | /** |
1452 | * ixgbe_clean_all_tx_rings - Free Tx Buffers for all queues | 1920 | * ixgbe_clean_all_rx_rings - Free Rx Buffers for all queues |
1453 | * @adapter: board private structure | 1921 | * @adapter: board private structure |
1454 | **/ | 1922 | **/ |
1455 | static void ixgbe_clean_all_tx_rings(struct ixgbe_adapter *adapter) | 1923 | static void ixgbe_clean_all_rx_rings(struct ixgbe_adapter *adapter) |
1456 | { | 1924 | { |
1457 | int i; | 1925 | int i; |
1458 | 1926 | ||
1459 | for (i = 0; i < adapter->num_tx_queues; i++) | 1927 | for (i = 0; i < adapter->num_rx_queues; i++) |
1460 | ixgbe_clean_tx_ring(adapter, &adapter->tx_ring[i]); | 1928 | ixgbe_clean_rx_ring(adapter, &adapter->rx_ring[i]); |
1461 | } | 1929 | } |
1462 | 1930 | ||
1463 | /** | 1931 | /** |
1464 | * ixgbe_clean_all_rx_rings - Free Rx Buffers for all queues | 1932 | * ixgbe_clean_all_tx_rings - Free Tx Buffers for all queues |
1465 | * @adapter: board private structure | 1933 | * @adapter: board private structure |
1466 | **/ | 1934 | **/ |
1467 | static void ixgbe_clean_all_rx_rings(struct ixgbe_adapter *adapter) | 1935 | static void ixgbe_clean_all_tx_rings(struct ixgbe_adapter *adapter) |
1468 | { | 1936 | { |
1469 | int i; | 1937 | int i; |
1470 | 1938 | ||
1471 | for (i = 0; i < adapter->num_rx_queues; i++) | 1939 | for (i = 0; i < adapter->num_tx_queues; i++) |
1472 | ixgbe_clean_rx_ring(adapter, &adapter->rx_ring[i]); | 1940 | ixgbe_clean_tx_ring(adapter, &adapter->tx_ring[i]); |
1473 | } | 1941 | } |
1474 | 1942 | ||
1475 | void ixgbe_down(struct ixgbe_adapter *adapter) | 1943 | void ixgbe_down(struct ixgbe_adapter *adapter) |
@@ -1493,10 +1961,9 @@ void ixgbe_down(struct ixgbe_adapter *adapter) | |||
1493 | IXGBE_WRITE_FLUSH(&adapter->hw); | 1961 | IXGBE_WRITE_FLUSH(&adapter->hw); |
1494 | msleep(10); | 1962 | msleep(10); |
1495 | 1963 | ||
1496 | napi_disable(&adapter->napi); | ||
1497 | |||
1498 | ixgbe_irq_disable(adapter); | 1964 | ixgbe_irq_disable(adapter); |
1499 | 1965 | ||
1966 | ixgbe_napi_disable_all(adapter); | ||
1500 | del_timer_sync(&adapter->watchdog_timer); | 1967 | del_timer_sync(&adapter->watchdog_timer); |
1501 | 1968 | ||
1502 | netif_carrier_off(netdev); | 1969 | netif_carrier_off(netdev); |
@@ -1547,27 +2014,37 @@ static void ixgbe_shutdown(struct pci_dev *pdev) | |||
1547 | } | 2014 | } |
1548 | 2015 | ||
1549 | /** | 2016 | /** |
1550 | * ixgbe_clean - NAPI Rx polling callback | 2017 | * ixgbe_poll - NAPI Rx polling callback |
1551 | * @adapter: board private structure | 2018 | * @napi: structure for representing this polling device |
2019 | * @budget: how many packets driver is allowed to clean | ||
2020 | * | ||
2021 | * This function is used for legacy and MSI, NAPI mode | ||
1552 | **/ | 2022 | **/ |
1553 | static int ixgbe_clean(struct napi_struct *napi, int budget) | 2023 | static int ixgbe_poll(struct napi_struct *napi, int budget) |
1554 | { | 2024 | { |
1555 | struct ixgbe_adapter *adapter = container_of(napi, | 2025 | struct ixgbe_q_vector *q_vector = container_of(napi, |
1556 | struct ixgbe_adapter, napi); | 2026 | struct ixgbe_q_vector, napi); |
1557 | struct net_device *netdev = adapter->netdev; | 2027 | struct ixgbe_adapter *adapter = q_vector->adapter; |
1558 | int tx_cleaned = 0, work_done = 0; | 2028 | int tx_cleaned = 0, work_done = 0; |
1559 | 2029 | ||
1560 | /* In non-MSIX case, there is no multi-Tx/Rx queue */ | 2030 | #ifdef CONFIG_DCA |
2031 | if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) { | ||
2032 | ixgbe_update_tx_dca(adapter, adapter->tx_ring); | ||
2033 | ixgbe_update_rx_dca(adapter, adapter->rx_ring); | ||
2034 | } | ||
2035 | #endif | ||
2036 | |||
1561 | tx_cleaned = ixgbe_clean_tx_irq(adapter, adapter->tx_ring); | 2037 | tx_cleaned = ixgbe_clean_tx_irq(adapter, adapter->tx_ring); |
1562 | ixgbe_clean_rx_irq(adapter, &adapter->rx_ring[0], &work_done, | 2038 | ixgbe_clean_rx_irq(adapter, adapter->rx_ring, &work_done, budget); |
1563 | budget); | ||
1564 | 2039 | ||
1565 | if (tx_cleaned) | 2040 | if (tx_cleaned) |
1566 | work_done = budget; | 2041 | work_done = budget; |
1567 | 2042 | ||
1568 | /* If budget not fully consumed, exit the polling mode */ | 2043 | /* If budget not fully consumed, exit the polling mode */ |
1569 | if (work_done < budget) { | 2044 | if (work_done < budget) { |
1570 | netif_rx_complete(netdev, napi); | 2045 | netif_rx_complete(adapter->netdev, napi); |
2046 | if (adapter->rx_eitr < IXGBE_MIN_ITR_USECS) | ||
2047 | ixgbe_set_itr(adapter); | ||
1571 | if (!test_bit(__IXGBE_DOWN, &adapter->state)) | 2048 | if (!test_bit(__IXGBE_DOWN, &adapter->state)) |
1572 | ixgbe_irq_enable(adapter); | 2049 | ixgbe_irq_enable(adapter); |
1573 | } | 2050 | } |
@@ -1597,6 +2074,136 @@ static void ixgbe_reset_task(struct work_struct *work) | |||
1597 | ixgbe_reinit_locked(adapter); | 2074 | ixgbe_reinit_locked(adapter); |
1598 | } | 2075 | } |
1599 | 2076 | ||
2077 | static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter, | ||
2078 | int vectors) | ||
2079 | { | ||
2080 | int err, vector_threshold; | ||
2081 | |||
2082 | /* We'll want at least 3 (vector_threshold): | ||
2083 | * 1) TxQ[0] Cleanup | ||
2084 | * 2) RxQ[0] Cleanup | ||
2085 | * 3) Other (Link Status Change, etc.) | ||
2086 | * 4) TCP Timer (optional) | ||
2087 | */ | ||
2088 | vector_threshold = MIN_MSIX_COUNT; | ||
2089 | |||
2090 | /* The more we get, the more we will assign to Tx/Rx Cleanup | ||
2091 | * for the separate queues...where Rx Cleanup >= Tx Cleanup. | ||
2092 | * Right now, we simply care about how many we'll get; we'll | ||
2093 | * set them up later while requesting irq's. | ||
2094 | */ | ||
2095 | while (vectors >= vector_threshold) { | ||
2096 | err = pci_enable_msix(adapter->pdev, adapter->msix_entries, | ||
2097 | vectors); | ||
2098 | if (!err) /* Success in acquiring all requested vectors. */ | ||
2099 | break; | ||
2100 | else if (err < 0) | ||
2101 | vectors = 0; /* Nasty failure, quit now */ | ||
2102 | else /* err == number of vectors we should try again with */ | ||
2103 | vectors = err; | ||
2104 | } | ||
2105 | |||
2106 | if (vectors < vector_threshold) { | ||
2107 | /* Can't allocate enough MSI-X interrupts? Oh well. | ||
2108 | * This just means we'll go with either a single MSI | ||
2109 | * vector or fall back to legacy interrupts. | ||
2110 | */ | ||
2111 | DPRINTK(HW, DEBUG, "Unable to allocate MSI-X interrupts\n"); | ||
2112 | adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED; | ||
2113 | kfree(adapter->msix_entries); | ||
2114 | adapter->msix_entries = NULL; | ||
2115 | adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED; | ||
2116 | adapter->num_tx_queues = 1; | ||
2117 | adapter->num_rx_queues = 1; | ||
2118 | } else { | ||
2119 | adapter->flags |= IXGBE_FLAG_MSIX_ENABLED; /* Woot! */ | ||
2120 | adapter->num_msix_vectors = vectors; | ||
2121 | } | ||
2122 | } | ||
2123 | |||
2124 | static void __devinit ixgbe_set_num_queues(struct ixgbe_adapter *adapter) | ||
2125 | { | ||
2126 | int nrq, ntq; | ||
2127 | int feature_mask = 0, rss_i, rss_m; | ||
2128 | |||
2129 | /* Number of supported queues */ | ||
2130 | switch (adapter->hw.mac.type) { | ||
2131 | case ixgbe_mac_82598EB: | ||
2132 | rss_i = adapter->ring_feature[RING_F_RSS].indices; | ||
2133 | rss_m = 0; | ||
2134 | feature_mask |= IXGBE_FLAG_RSS_ENABLED; | ||
2135 | |||
2136 | switch (adapter->flags & feature_mask) { | ||
2137 | case (IXGBE_FLAG_RSS_ENABLED): | ||
2138 | rss_m = 0xF; | ||
2139 | nrq = rss_i; | ||
2140 | #ifdef CONFIG_NETDEVICES_MULTIQUEUE | ||
2141 | ntq = rss_i; | ||
2142 | #else | ||
2143 | ntq = 1; | ||
2144 | #endif | ||
2145 | break; | ||
2146 | case 0: | ||
2147 | default: | ||
2148 | rss_i = 0; | ||
2149 | rss_m = 0; | ||
2150 | nrq = 1; | ||
2151 | ntq = 1; | ||
2152 | break; | ||
2153 | } | ||
2154 | |||
2155 | adapter->ring_feature[RING_F_RSS].indices = rss_i; | ||
2156 | adapter->ring_feature[RING_F_RSS].mask = rss_m; | ||
2157 | break; | ||
2158 | default: | ||
2159 | nrq = 1; | ||
2160 | ntq = 1; | ||
2161 | break; | ||
2162 | } | ||
2163 | |||
2164 | adapter->num_rx_queues = nrq; | ||
2165 | adapter->num_tx_queues = ntq; | ||
2166 | } | ||
2167 | |||
2168 | /** | ||
2169 | * ixgbe_cache_ring_register - Descriptor ring to register mapping | ||
2170 | * @adapter: board private structure to initialize | ||
2171 | * | ||
2172 | * Once we know the feature-set enabled for the device, we'll cache | ||
2173 | * the register offset the descriptor ring is assigned to. | ||
2174 | **/ | ||
2175 | static void __devinit ixgbe_cache_ring_register(struct ixgbe_adapter *adapter) | ||
2176 | { | ||
2177 | /* TODO: Remove all uses of the indices in the cases where multiple | ||
2178 | * features are OR'd together, if the feature set makes sense. | ||
2179 | */ | ||
2180 | int feature_mask = 0, rss_i; | ||
2181 | int i, txr_idx, rxr_idx; | ||
2182 | |||
2183 | /* Number of supported queues */ | ||
2184 | switch (adapter->hw.mac.type) { | ||
2185 | case ixgbe_mac_82598EB: | ||
2186 | rss_i = adapter->ring_feature[RING_F_RSS].indices; | ||
2187 | txr_idx = 0; | ||
2188 | rxr_idx = 0; | ||
2189 | feature_mask |= IXGBE_FLAG_RSS_ENABLED; | ||
2190 | switch (adapter->flags & feature_mask) { | ||
2191 | case (IXGBE_FLAG_RSS_ENABLED): | ||
2192 | for (i = 0; i < adapter->num_rx_queues; i++) | ||
2193 | adapter->rx_ring[i].reg_idx = i; | ||
2194 | for (i = 0; i < adapter->num_tx_queues; i++) | ||
2195 | adapter->tx_ring[i].reg_idx = i; | ||
2196 | break; | ||
2197 | case 0: | ||
2198 | default: | ||
2199 | break; | ||
2200 | } | ||
2201 | break; | ||
2202 | default: | ||
2203 | break; | ||
2204 | } | ||
2205 | } | ||
2206 | |||
1600 | /** | 2207 | /** |
1601 | * ixgbe_alloc_queues - Allocate memory for all rings | 2208 | * ixgbe_alloc_queues - Allocate memory for all rings |
1602 | * @adapter: board private structure to initialize | 2209 | * @adapter: board private structure to initialize |
@@ -1612,25 +2219,167 @@ static int __devinit ixgbe_alloc_queues(struct ixgbe_adapter *adapter) | |||
1612 | adapter->tx_ring = kcalloc(adapter->num_tx_queues, | 2219 | adapter->tx_ring = kcalloc(adapter->num_tx_queues, |
1613 | sizeof(struct ixgbe_ring), GFP_KERNEL); | 2220 | sizeof(struct ixgbe_ring), GFP_KERNEL); |
1614 | if (!adapter->tx_ring) | 2221 | if (!adapter->tx_ring) |
1615 | return -ENOMEM; | 2222 | goto err_tx_ring_allocation; |
1616 | |||
1617 | for (i = 0; i < adapter->num_tx_queues; i++) | ||
1618 | adapter->tx_ring[i].count = IXGBE_DEFAULT_TXD; | ||
1619 | 2223 | ||
1620 | adapter->rx_ring = kcalloc(adapter->num_rx_queues, | 2224 | adapter->rx_ring = kcalloc(adapter->num_rx_queues, |
1621 | sizeof(struct ixgbe_ring), GFP_KERNEL); | 2225 | sizeof(struct ixgbe_ring), GFP_KERNEL); |
1622 | if (!adapter->rx_ring) { | 2226 | if (!adapter->rx_ring) |
1623 | kfree(adapter->tx_ring); | 2227 | goto err_rx_ring_allocation; |
1624 | return -ENOMEM; | ||
1625 | } | ||
1626 | 2228 | ||
2229 | for (i = 0; i < adapter->num_tx_queues; i++) { | ||
2230 | adapter->tx_ring[i].count = IXGBE_DEFAULT_TXD; | ||
2231 | adapter->tx_ring[i].queue_index = i; | ||
2232 | } | ||
1627 | for (i = 0; i < adapter->num_rx_queues; i++) { | 2233 | for (i = 0; i < adapter->num_rx_queues; i++) { |
1628 | adapter->rx_ring[i].adapter = adapter; | ||
1629 | adapter->rx_ring[i].itr_register = IXGBE_EITR(i); | ||
1630 | adapter->rx_ring[i].count = IXGBE_DEFAULT_RXD; | 2234 | adapter->rx_ring[i].count = IXGBE_DEFAULT_RXD; |
2235 | adapter->rx_ring[i].queue_index = i; | ||
1631 | } | 2236 | } |
1632 | 2237 | ||
2238 | ixgbe_cache_ring_register(adapter); | ||
2239 | |||
1633 | return 0; | 2240 | return 0; |
2241 | |||
2242 | err_rx_ring_allocation: | ||
2243 | kfree(adapter->tx_ring); | ||
2244 | err_tx_ring_allocation: | ||
2245 | return -ENOMEM; | ||
2246 | } | ||
2247 | |||
2248 | /** | ||
2249 | * ixgbe_set_interrupt_capability - set MSI-X or MSI if supported | ||
2250 | * @adapter: board private structure to initialize | ||
2251 | * | ||
2252 | * Attempt to configure the interrupts using the best available | ||
2253 | * capabilities of the hardware and the kernel. | ||
2254 | **/ | ||
2255 | static int __devinit ixgbe_set_interrupt_capability(struct ixgbe_adapter | ||
2256 | *adapter) | ||
2257 | { | ||
2258 | int err = 0; | ||
2259 | int vector, v_budget; | ||
2260 | |||
2261 | /* | ||
2262 | * It's easy to be greedy for MSI-X vectors, but it really | ||
2263 | * doesn't do us much good if we have a lot more vectors | ||
2264 | * than CPU's. So let's be conservative and only ask for | ||
2265 | * (roughly) twice the number of vectors as there are CPU's. | ||
2266 | */ | ||
2267 | v_budget = min(adapter->num_rx_queues + adapter->num_tx_queues, | ||
2268 | (int)(num_online_cpus() * 2)) + NON_Q_VECTORS; | ||
2269 | |||
2270 | /* | ||
2271 | * At the same time, hardware can only support a maximum of | ||
2272 | * MAX_MSIX_COUNT vectors. With features such as RSS and VMDq, | ||
2273 | * we can easily reach upwards of 64 Rx descriptor queues and | ||
2274 | * 32 Tx queues. Thus, we cap it off in those rare cases where | ||
2275 | * the cpu count also exceeds our vector limit. | ||
2276 | */ | ||
2277 | v_budget = min(v_budget, MAX_MSIX_COUNT); | ||
2278 | |||
2279 | /* A failure in MSI-X entry allocation isn't fatal, but it does | ||
2280 | * mean we disable MSI-X capabilities of the adapter. */ | ||
2281 | adapter->msix_entries = kcalloc(v_budget, | ||
2282 | sizeof(struct msix_entry), GFP_KERNEL); | ||
2283 | if (!adapter->msix_entries) { | ||
2284 | adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED; | ||
2285 | ixgbe_set_num_queues(adapter); | ||
2286 | kfree(adapter->tx_ring); | ||
2287 | kfree(adapter->rx_ring); | ||
2288 | err = ixgbe_alloc_queues(adapter); | ||
2289 | if (err) { | ||
2290 | DPRINTK(PROBE, ERR, "Unable to allocate memory " | ||
2291 | "for queues\n"); | ||
2292 | goto out; | ||
2293 | } | ||
2294 | |||
2295 | goto try_msi; | ||
2296 | } | ||
2297 | |||
2298 | for (vector = 0; vector < v_budget; vector++) | ||
2299 | adapter->msix_entries[vector].entry = vector; | ||
2300 | |||
2301 | ixgbe_acquire_msix_vectors(adapter, v_budget); | ||
2302 | |||
2303 | if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) | ||
2304 | goto out; | ||
2305 | |||
2306 | try_msi: | ||
2307 | err = pci_enable_msi(adapter->pdev); | ||
2308 | if (!err) { | ||
2309 | adapter->flags |= IXGBE_FLAG_MSI_ENABLED; | ||
2310 | } else { | ||
2311 | DPRINTK(HW, DEBUG, "Unable to allocate MSI interrupt, " | ||
2312 | "falling back to legacy. Error: %d\n", err); | ||
2313 | /* reset err */ | ||
2314 | err = 0; | ||
2315 | } | ||
2316 | |||
2317 | out: | ||
2318 | #ifdef CONFIG_NETDEVICES_MULTIQUEUE | ||
2319 | /* Notify the stack of the (possibly) reduced Tx Queue count. */ | ||
2320 | adapter->netdev->egress_subqueue_count = adapter->num_tx_queues; | ||
2321 | #endif | ||
2322 | |||
2323 | return err; | ||
2324 | } | ||
2325 | |||
2326 | static void ixgbe_reset_interrupt_capability(struct ixgbe_adapter *adapter) | ||
2327 | { | ||
2328 | if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { | ||
2329 | adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED; | ||
2330 | pci_disable_msix(adapter->pdev); | ||
2331 | kfree(adapter->msix_entries); | ||
2332 | adapter->msix_entries = NULL; | ||
2333 | } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) { | ||
2334 | adapter->flags &= ~IXGBE_FLAG_MSI_ENABLED; | ||
2335 | pci_disable_msi(adapter->pdev); | ||
2336 | } | ||
2337 | return; | ||
2338 | } | ||
2339 | |||
2340 | /** | ||
2341 | * ixgbe_init_interrupt_scheme - Determine proper interrupt scheme | ||
2342 | * @adapter: board private structure to initialize | ||
2343 | * | ||
2344 | * We determine which interrupt scheme to use based on... | ||
2345 | * - Kernel support (MSI, MSI-X) | ||
2346 | * - which can be user-defined (via MODULE_PARAM) | ||
2347 | * - Hardware queue count (num_*_queues) | ||
2348 | * - defined by miscellaneous hardware support/features (RSS, etc.) | ||
2349 | **/ | ||
2350 | static int __devinit ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter) | ||
2351 | { | ||
2352 | int err; | ||
2353 | |||
2354 | /* Number of supported queues */ | ||
2355 | ixgbe_set_num_queues(adapter); | ||
2356 | |||
2357 | err = ixgbe_alloc_queues(adapter); | ||
2358 | if (err) { | ||
2359 | DPRINTK(PROBE, ERR, "Unable to allocate memory for queues\n"); | ||
2360 | goto err_alloc_queues; | ||
2361 | } | ||
2362 | |||
2363 | err = ixgbe_set_interrupt_capability(adapter); | ||
2364 | if (err) { | ||
2365 | DPRINTK(PROBE, ERR, "Unable to setup interrupt capabilities\n"); | ||
2366 | goto err_set_interrupt; | ||
2367 | } | ||
2368 | |||
2369 | DPRINTK(DRV, INFO, "Multiqueue %s: Rx Queue count = %u, " | ||
2370 | "Tx Queue count = %u\n", | ||
2371 | (adapter->num_rx_queues > 1) ? "Enabled" : | ||
2372 | "Disabled", adapter->num_rx_queues, adapter->num_tx_queues); | ||
2373 | |||
2374 | set_bit(__IXGBE_DOWN, &adapter->state); | ||
2375 | |||
2376 | return 0; | ||
2377 | |||
2378 | err_set_interrupt: | ||
2379 | kfree(adapter->tx_ring); | ||
2380 | kfree(adapter->rx_ring); | ||
2381 | err_alloc_queues: | ||
2382 | return err; | ||
1634 | } | 2383 | } |
1635 | 2384 | ||
1636 | /** | 2385 | /** |
@@ -1645,11 +2394,22 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter) | |||
1645 | { | 2394 | { |
1646 | struct ixgbe_hw *hw = &adapter->hw; | 2395 | struct ixgbe_hw *hw = &adapter->hw; |
1647 | struct pci_dev *pdev = adapter->pdev; | 2396 | struct pci_dev *pdev = adapter->pdev; |
2397 | unsigned int rss; | ||
2398 | |||
2399 | /* Set capability flags */ | ||
2400 | rss = min(IXGBE_MAX_RSS_INDICES, (int)num_online_cpus()); | ||
2401 | adapter->ring_feature[RING_F_RSS].indices = rss; | ||
2402 | adapter->flags |= IXGBE_FLAG_RSS_ENABLED; | ||
2403 | |||
2404 | /* Enable Dynamic interrupt throttling by default */ | ||
2405 | adapter->rx_eitr = 1; | ||
2406 | adapter->tx_eitr = 1; | ||
1648 | 2407 | ||
1649 | /* default flow control settings */ | 2408 | /* default flow control settings */ |
1650 | hw->fc.original_type = ixgbe_fc_full; | 2409 | hw->fc.original_type = ixgbe_fc_full; |
1651 | hw->fc.type = ixgbe_fc_full; | 2410 | hw->fc.type = ixgbe_fc_full; |
1652 | 2411 | ||
2412 | /* select 10G link by default */ | ||
1653 | hw->mac.link_mode_select = IXGBE_AUTOC_LMS_10G_LINK_NO_AN; | 2413 | hw->mac.link_mode_select = IXGBE_AUTOC_LMS_10G_LINK_NO_AN; |
1654 | if (hw->mac.ops.reset(hw)) { | 2414 | if (hw->mac.ops.reset(hw)) { |
1655 | dev_err(&pdev->dev, "HW Init failed\n"); | 2415 | dev_err(&pdev->dev, "HW Init failed\n"); |
@@ -1667,16 +2427,9 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter) | |||
1667 | return -EIO; | 2427 | return -EIO; |
1668 | } | 2428 | } |
1669 | 2429 | ||
1670 | /* Set the default values */ | 2430 | /* enable rx csum by default */ |
1671 | adapter->num_rx_queues = IXGBE_DEFAULT_RXQ; | ||
1672 | adapter->num_tx_queues = 1; | ||
1673 | adapter->flags |= IXGBE_FLAG_RX_CSUM_ENABLED; | 2431 | adapter->flags |= IXGBE_FLAG_RX_CSUM_ENABLED; |
1674 | 2432 | ||
1675 | if (ixgbe_alloc_queues(adapter)) { | ||
1676 | dev_err(&pdev->dev, "Unable to allocate memory for queues\n"); | ||
1677 | return -ENOMEM; | ||
1678 | } | ||
1679 | |||
1680 | set_bit(__IXGBE_DOWN, &adapter->state); | 2433 | set_bit(__IXGBE_DOWN, &adapter->state); |
1681 | 2434 | ||
1682 | return 0; | 2435 | return 0; |
@@ -1716,7 +2469,6 @@ int ixgbe_setup_tx_resources(struct ixgbe_adapter *adapter, | |||
1716 | return -ENOMEM; | 2469 | return -ENOMEM; |
1717 | } | 2470 | } |
1718 | 2471 | ||
1719 | txdr->adapter = adapter; | ||
1720 | txdr->next_to_use = 0; | 2472 | txdr->next_to_use = 0; |
1721 | txdr->next_to_clean = 0; | 2473 | txdr->next_to_clean = 0; |
1722 | txdr->work_limit = txdr->count; | 2474 | txdr->work_limit = txdr->count; |
@@ -1735,7 +2487,7 @@ int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter, | |||
1735 | struct ixgbe_ring *rxdr) | 2487 | struct ixgbe_ring *rxdr) |
1736 | { | 2488 | { |
1737 | struct pci_dev *pdev = adapter->pdev; | 2489 | struct pci_dev *pdev = adapter->pdev; |
1738 | int size, desc_len; | 2490 | int size; |
1739 | 2491 | ||
1740 | size = sizeof(struct ixgbe_rx_buffer) * rxdr->count; | 2492 | size = sizeof(struct ixgbe_rx_buffer) * rxdr->count; |
1741 | rxdr->rx_buffer_info = vmalloc(size); | 2493 | rxdr->rx_buffer_info = vmalloc(size); |
@@ -1746,10 +2498,8 @@ int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter, | |||
1746 | } | 2498 | } |
1747 | memset(rxdr->rx_buffer_info, 0, size); | 2499 | memset(rxdr->rx_buffer_info, 0, size); |
1748 | 2500 | ||
1749 | desc_len = sizeof(union ixgbe_adv_rx_desc); | ||
1750 | |||
1751 | /* Round up to nearest 4K */ | 2501 | /* Round up to nearest 4K */ |
1752 | rxdr->size = rxdr->count * desc_len; | 2502 | rxdr->size = rxdr->count * sizeof(union ixgbe_adv_rx_desc); |
1753 | rxdr->size = ALIGN(rxdr->size, 4096); | 2503 | rxdr->size = ALIGN(rxdr->size, 4096); |
1754 | 2504 | ||
1755 | rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma); | 2505 | rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma); |
@@ -1763,7 +2513,6 @@ int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter, | |||
1763 | 2513 | ||
1764 | rxdr->next_to_clean = 0; | 2514 | rxdr->next_to_clean = 0; |
1765 | rxdr->next_to_use = 0; | 2515 | rxdr->next_to_use = 0; |
1766 | rxdr->adapter = adapter; | ||
1767 | 2516 | ||
1768 | return 0; | 2517 | return 0; |
1769 | } | 2518 | } |
@@ -1841,8 +2590,7 @@ static void ixgbe_free_all_rx_resources(struct ixgbe_adapter *adapter) | |||
1841 | } | 2590 | } |
1842 | 2591 | ||
1843 | /** | 2592 | /** |
1844 | * ixgbe_setup_all_tx_resources - wrapper to allocate Tx resources | 2593 | * ixgbe_setup_all_tx_resources - allocate all queues Tx resources |
1845 | * (Descriptors) for all queues | ||
1846 | * @adapter: board private structure | 2594 | * @adapter: board private structure |
1847 | * | 2595 | * |
1848 | * If this function returns with an error, then it's possible one or | 2596 | * If this function returns with an error, then it's possible one or |
@@ -1868,8 +2616,7 @@ static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter *adapter) | |||
1868 | } | 2616 | } |
1869 | 2617 | ||
1870 | /** | 2618 | /** |
1871 | * ixgbe_setup_all_rx_resources - wrapper to allocate Rx resources | 2619 | * ixgbe_setup_all_rx_resources - allocate all queues Rx resources |
1872 | * (Descriptors) for all queues | ||
1873 | * @adapter: board private structure | 2620 | * @adapter: board private structure |
1874 | * | 2621 | * |
1875 | * If this function returns with an error, then it's possible one or | 2622 | * If this function returns with an error, then it's possible one or |
@@ -1911,6 +2658,9 @@ static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu) | |||
1911 | (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE)) | 2658 | (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE)) |
1912 | return -EINVAL; | 2659 | return -EINVAL; |
1913 | 2660 | ||
2661 | DPRINTK(PROBE, INFO, "changing MTU from %d to %d\n", | ||
2662 | netdev->mtu, new_mtu); | ||
2663 | /* must set new MTU before calling down or up */ | ||
1914 | netdev->mtu = new_mtu; | 2664 | netdev->mtu = new_mtu; |
1915 | 2665 | ||
1916 | if (netif_running(netdev)) | 2666 | if (netif_running(netdev)) |
@@ -1935,23 +2685,16 @@ static int ixgbe_open(struct net_device *netdev) | |||
1935 | { | 2685 | { |
1936 | struct ixgbe_adapter *adapter = netdev_priv(netdev); | 2686 | struct ixgbe_adapter *adapter = netdev_priv(netdev); |
1937 | int err; | 2687 | int err; |
1938 | u32 num_rx_queues = adapter->num_rx_queues; | ||
1939 | 2688 | ||
1940 | /* disallow open during test */ | 2689 | /* disallow open during test */ |
1941 | if (test_bit(__IXGBE_TESTING, &adapter->state)) | 2690 | if (test_bit(__IXGBE_TESTING, &adapter->state)) |
1942 | return -EBUSY; | 2691 | return -EBUSY; |
1943 | 2692 | ||
1944 | try_intr_reinit: | ||
1945 | /* allocate transmit descriptors */ | 2693 | /* allocate transmit descriptors */ |
1946 | err = ixgbe_setup_all_tx_resources(adapter); | 2694 | err = ixgbe_setup_all_tx_resources(adapter); |
1947 | if (err) | 2695 | if (err) |
1948 | goto err_setup_tx; | 2696 | goto err_setup_tx; |
1949 | 2697 | ||
1950 | if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) { | ||
1951 | num_rx_queues = 1; | ||
1952 | adapter->num_rx_queues = num_rx_queues; | ||
1953 | } | ||
1954 | |||
1955 | /* allocate receive descriptors */ | 2698 | /* allocate receive descriptors */ |
1956 | err = ixgbe_setup_all_rx_resources(adapter); | 2699 | err = ixgbe_setup_all_rx_resources(adapter); |
1957 | if (err) | 2700 | if (err) |
@@ -1959,31 +2702,10 @@ try_intr_reinit: | |||
1959 | 2702 | ||
1960 | ixgbe_configure(adapter); | 2703 | ixgbe_configure(adapter); |
1961 | 2704 | ||
1962 | err = ixgbe_request_irq(adapter, &num_rx_queues); | 2705 | err = ixgbe_request_irq(adapter); |
1963 | if (err) | 2706 | if (err) |
1964 | goto err_req_irq; | 2707 | goto err_req_irq; |
1965 | 2708 | ||
1966 | /* ixgbe_request might have reduced num_rx_queues */ | ||
1967 | if (num_rx_queues < adapter->num_rx_queues) { | ||
1968 | /* We didn't get MSI-X, so we need to release everything, | ||
1969 | * set our Rx queue count to num_rx_queues, and redo the | ||
1970 | * whole init process. | ||
1971 | */ | ||
1972 | ixgbe_free_irq(adapter); | ||
1973 | if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) { | ||
1974 | pci_disable_msi(adapter->pdev); | ||
1975 | adapter->flags &= ~IXGBE_FLAG_MSI_ENABLED; | ||
1976 | } | ||
1977 | ixgbe_free_all_rx_resources(adapter); | ||
1978 | ixgbe_free_all_tx_resources(adapter); | ||
1979 | adapter->num_rx_queues = num_rx_queues; | ||
1980 | |||
1981 | /* Reset the hardware, and start over. */ | ||
1982 | ixgbe_reset(adapter); | ||
1983 | |||
1984 | goto try_intr_reinit; | ||
1985 | } | ||
1986 | |||
1987 | err = ixgbe_up_complete(adapter); | 2709 | err = ixgbe_up_complete(adapter); |
1988 | if (err) | 2710 | if (err) |
1989 | goto err_up; | 2711 | goto err_up; |
@@ -2119,6 +2841,9 @@ static void ixgbe_watchdog(unsigned long data) | |||
2119 | struct net_device *netdev = adapter->netdev; | 2841 | struct net_device *netdev = adapter->netdev; |
2120 | bool link_up; | 2842 | bool link_up; |
2121 | u32 link_speed = 0; | 2843 | u32 link_speed = 0; |
2844 | #ifdef CONFIG_NETDEVICES_MULTIQUEUE | ||
2845 | int i; | ||
2846 | #endif | ||
2122 | 2847 | ||
2123 | adapter->hw.mac.ops.check_link(&adapter->hw, &(link_speed), &link_up); | 2848 | adapter->hw.mac.ops.check_link(&adapter->hw, &(link_speed), &link_up); |
2124 | 2849 | ||
@@ -2140,6 +2865,10 @@ static void ixgbe_watchdog(unsigned long data) | |||
2140 | 2865 | ||
2141 | netif_carrier_on(netdev); | 2866 | netif_carrier_on(netdev); |
2142 | netif_wake_queue(netdev); | 2867 | netif_wake_queue(netdev); |
2868 | #ifdef CONFIG_NETDEVICES_MULTIQUEUE | ||
2869 | for (i = 0; i < adapter->num_tx_queues; i++) | ||
2870 | netif_wake_subqueue(netdev, i); | ||
2871 | #endif | ||
2143 | } else { | 2872 | } else { |
2144 | /* Force detection of hung controller */ | 2873 | /* Force detection of hung controller */ |
2145 | adapter->detect_tx_hung = true; | 2874 | adapter->detect_tx_hung = true; |
@@ -2154,10 +2883,23 @@ static void ixgbe_watchdog(unsigned long data) | |||
2154 | 2883 | ||
2155 | ixgbe_update_stats(adapter); | 2884 | ixgbe_update_stats(adapter); |
2156 | 2885 | ||
2157 | /* Reset the timer */ | 2886 | if (!test_bit(__IXGBE_DOWN, &adapter->state)) { |
2158 | if (!test_bit(__IXGBE_DOWN, &adapter->state)) | 2887 | /* Cause software interrupt to ensure rx rings are cleaned */ |
2888 | if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { | ||
2889 | u32 eics = | ||
2890 | (1 << (adapter->num_msix_vectors - NON_Q_VECTORS)) - 1; | ||
2891 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, eics); | ||
2892 | } else { | ||
2893 | /* for legacy and MSI interrupts don't set any bits that | ||
2894 | * are enabled for EIAM, because this operation would | ||
2895 | * set *both* EIMS and EICS for any bit in EIAM */ | ||
2896 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, | ||
2897 | (IXGBE_EICS_TCP_TIMER | IXGBE_EICS_OTHER)); | ||
2898 | } | ||
2899 | /* Reset the timer */ | ||
2159 | mod_timer(&adapter->watchdog_timer, | 2900 | mod_timer(&adapter->watchdog_timer, |
2160 | round_jiffies(jiffies + 2 * HZ)); | 2901 | round_jiffies(jiffies + 2 * HZ)); |
2902 | } | ||
2161 | } | 2903 | } |
2162 | 2904 | ||
2163 | static int ixgbe_tso(struct ixgbe_adapter *adapter, | 2905 | static int ixgbe_tso(struct ixgbe_adapter *adapter, |
@@ -2170,7 +2912,6 @@ static int ixgbe_tso(struct ixgbe_adapter *adapter, | |||
2170 | struct ixgbe_tx_buffer *tx_buffer_info; | 2912 | struct ixgbe_tx_buffer *tx_buffer_info; |
2171 | u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0; | 2913 | u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0; |
2172 | u32 mss_l4len_idx = 0, l4len; | 2914 | u32 mss_l4len_idx = 0, l4len; |
2173 | *hdr_len = 0; | ||
2174 | 2915 | ||
2175 | if (skb_is_gso(skb)) { | 2916 | if (skb_is_gso(skb)) { |
2176 | if (skb_header_cloned(skb)) { | 2917 | if (skb_header_cloned(skb)) { |
@@ -2454,7 +3195,11 @@ static int __ixgbe_maybe_stop_tx(struct net_device *netdev, | |||
2454 | { | 3195 | { |
2455 | struct ixgbe_adapter *adapter = netdev_priv(netdev); | 3196 | struct ixgbe_adapter *adapter = netdev_priv(netdev); |
2456 | 3197 | ||
3198 | #ifdef CONFIG_NETDEVICES_MULTIQUEUE | ||
3199 | netif_stop_subqueue(netdev, tx_ring->queue_index); | ||
3200 | #else | ||
2457 | netif_stop_queue(netdev); | 3201 | netif_stop_queue(netdev); |
3202 | #endif | ||
2458 | /* Herbert's original patch had: | 3203 | /* Herbert's original patch had: |
2459 | * smp_mb__after_netif_stop_queue(); | 3204 | * smp_mb__after_netif_stop_queue(); |
2460 | * but since that doesn't exist yet, just open code it. */ | 3205 | * but since that doesn't exist yet, just open code it. */ |
@@ -2466,7 +3211,11 @@ static int __ixgbe_maybe_stop_tx(struct net_device *netdev, | |||
2466 | return -EBUSY; | 3211 | return -EBUSY; |
2467 | 3212 | ||
2468 | /* A reprieve! - use start_queue because it doesn't call schedule */ | 3213 | /* A reprieve! - use start_queue because it doesn't call schedule */ |
3214 | #ifdef CONFIG_NETDEVICES_MULTIQUEUE | ||
3215 | netif_wake_subqueue(netdev, tx_ring->queue_index); | ||
3216 | #else | ||
2469 | netif_wake_queue(netdev); | 3217 | netif_wake_queue(netdev); |
3218 | #endif | ||
2470 | ++adapter->restart_queue; | 3219 | ++adapter->restart_queue; |
2471 | return 0; | 3220 | return 0; |
2472 | } | 3221 | } |
@@ -2487,15 +3236,18 @@ static int ixgbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | |||
2487 | unsigned int len = skb->len; | 3236 | unsigned int len = skb->len; |
2488 | unsigned int first; | 3237 | unsigned int first; |
2489 | unsigned int tx_flags = 0; | 3238 | unsigned int tx_flags = 0; |
2490 | u8 hdr_len; | 3239 | u8 hdr_len = 0; |
2491 | int tso; | 3240 | int r_idx = 0, tso; |
2492 | unsigned int mss = 0; | 3241 | unsigned int mss = 0; |
2493 | int count = 0; | 3242 | int count = 0; |
2494 | unsigned int f; | 3243 | unsigned int f; |
2495 | unsigned int nr_frags = skb_shinfo(skb)->nr_frags; | 3244 | unsigned int nr_frags = skb_shinfo(skb)->nr_frags; |
2496 | len -= skb->data_len; | 3245 | len -= skb->data_len; |
3246 | #ifdef CONFIG_NETDEVICES_MULTIQUEUE | ||
3247 | r_idx = (adapter->num_tx_queues - 1) & skb->queue_mapping; | ||
3248 | #endif | ||
3249 | tx_ring = &adapter->tx_ring[r_idx]; | ||
2497 | 3250 | ||
2498 | tx_ring = adapter->tx_ring; | ||
2499 | 3251 | ||
2500 | if (skb->len <= 0) { | 3252 | if (skb->len <= 0) { |
2501 | dev_kfree_skb(skb); | 3253 | dev_kfree_skb(skb); |
@@ -2604,6 +3356,31 @@ static void ixgbe_netpoll(struct net_device *netdev) | |||
2604 | #endif | 3356 | #endif |
2605 | 3357 | ||
2606 | /** | 3358 | /** |
3359 | * ixgbe_napi_add_all - prep napi structs for use | ||
3360 | * @adapter: private struct | ||
3361 | * helper function to napi_add each possible q_vector->napi | ||
3362 | */ | ||
3363 | static void ixgbe_napi_add_all(struct ixgbe_adapter *adapter) | ||
3364 | { | ||
3365 | int i, q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; | ||
3366 | int (*poll)(struct napi_struct *, int); | ||
3367 | |||
3368 | if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { | ||
3369 | poll = &ixgbe_clean_rxonly; | ||
3370 | } else { | ||
3371 | poll = &ixgbe_poll; | ||
3372 | /* only one q_vector for legacy modes */ | ||
3373 | q_vectors = 1; | ||
3374 | } | ||
3375 | |||
3376 | for (i = 0; i < q_vectors; i++) { | ||
3377 | struct ixgbe_q_vector *q_vector = &adapter->q_vector[i]; | ||
3378 | netif_napi_add(adapter->netdev, &q_vector->napi, | ||
3379 | (*poll), 64); | ||
3380 | } | ||
3381 | } | ||
3382 | |||
3383 | /** | ||
2607 | * ixgbe_probe - Device Initialization Routine | 3384 | * ixgbe_probe - Device Initialization Routine |
2608 | * @pdev: PCI device information struct | 3385 | * @pdev: PCI device information struct |
2609 | * @ent: entry in ixgbe_pci_tbl | 3386 | * @ent: entry in ixgbe_pci_tbl |
@@ -2655,7 +3432,11 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, | |||
2655 | 3432 | ||
2656 | pci_set_master(pdev); | 3433 | pci_set_master(pdev); |
2657 | 3434 | ||
3435 | #ifdef CONFIG_NETDEVICES_MULTIQUEUE | ||
3436 | netdev = alloc_etherdev_mq(sizeof(struct ixgbe_adapter), MAX_TX_QUEUES); | ||
3437 | #else | ||
2658 | netdev = alloc_etherdev(sizeof(struct ixgbe_adapter)); | 3438 | netdev = alloc_etherdev(sizeof(struct ixgbe_adapter)); |
3439 | #endif | ||
2659 | if (!netdev) { | 3440 | if (!netdev) { |
2660 | err = -ENOMEM; | 3441 | err = -ENOMEM; |
2661 | goto err_alloc_etherdev; | 3442 | goto err_alloc_etherdev; |
@@ -2696,7 +3477,6 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, | |||
2696 | ixgbe_set_ethtool_ops(netdev); | 3477 | ixgbe_set_ethtool_ops(netdev); |
2697 | netdev->tx_timeout = &ixgbe_tx_timeout; | 3478 | netdev->tx_timeout = &ixgbe_tx_timeout; |
2698 | netdev->watchdog_timeo = 5 * HZ; | 3479 | netdev->watchdog_timeo = 5 * HZ; |
2699 | netif_napi_add(netdev, &adapter->napi, ixgbe_clean, 64); | ||
2700 | netdev->vlan_rx_register = ixgbe_vlan_rx_register; | 3480 | netdev->vlan_rx_register = ixgbe_vlan_rx_register; |
2701 | netdev->vlan_rx_add_vid = ixgbe_vlan_rx_add_vid; | 3481 | netdev->vlan_rx_add_vid = ixgbe_vlan_rx_add_vid; |
2702 | netdev->vlan_rx_kill_vid = ixgbe_vlan_rx_kill_vid; | 3482 | netdev->vlan_rx_kill_vid = ixgbe_vlan_rx_kill_vid; |
@@ -2719,6 +3499,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, | |||
2719 | 3499 | ||
2720 | /* Setup hw api */ | 3500 | /* Setup hw api */ |
2721 | memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops)); | 3501 | memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops)); |
3502 | hw->mac.type = ii->mac; | ||
2722 | 3503 | ||
2723 | err = ii->get_invariants(hw); | 3504 | err = ii->get_invariants(hw); |
2724 | if (err) | 3505 | if (err) |
@@ -2741,6 +3522,9 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, | |||
2741 | if (pci_using_dac) | 3522 | if (pci_using_dac) |
2742 | netdev->features |= NETIF_F_HIGHDMA; | 3523 | netdev->features |= NETIF_F_HIGHDMA; |
2743 | 3524 | ||
3525 | #ifdef CONFIG_NETDEVICES_MULTIQUEUE | ||
3526 | netdev->features |= NETIF_F_MULTI_QUEUE; | ||
3527 | #endif | ||
2744 | 3528 | ||
2745 | /* make sure the EEPROM is good */ | 3529 | /* make sure the EEPROM is good */ |
2746 | if (ixgbe_validate_eeprom_checksum(hw, NULL) < 0) { | 3530 | if (ixgbe_validate_eeprom_checksum(hw, NULL) < 0) { |
@@ -2770,9 +3554,9 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, | |||
2770 | hw->fc.low_water = IXGBE_DEFAULT_FCRTL; | 3554 | hw->fc.low_water = IXGBE_DEFAULT_FCRTL; |
2771 | hw->fc.pause_time = IXGBE_DEFAULT_FCPAUSE; | 3555 | hw->fc.pause_time = IXGBE_DEFAULT_FCPAUSE; |
2772 | 3556 | ||
2773 | /* Interrupt Throttle Rate */ | 3557 | err = ixgbe_init_interrupt_scheme(adapter); |
2774 | adapter->rx_eitr = (1000000 / IXGBE_DEFAULT_ITR_RX_USECS); | 3558 | if (err) |
2775 | adapter->tx_eitr = (1000000 / IXGBE_DEFAULT_ITR_TX_USECS); | 3559 | goto err_sw_init; |
2776 | 3560 | ||
2777 | /* print bus type/speed/width info */ | 3561 | /* print bus type/speed/width info */ |
2778 | pci_read_config_word(pdev, IXGBE_PCI_LINK_STATUS, &link_status); | 3562 | pci_read_config_word(pdev, IXGBE_PCI_LINK_STATUS, &link_status); |
@@ -2808,12 +3592,27 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, | |||
2808 | 3592 | ||
2809 | netif_carrier_off(netdev); | 3593 | netif_carrier_off(netdev); |
2810 | netif_stop_queue(netdev); | 3594 | netif_stop_queue(netdev); |
3595 | #ifdef CONFIG_NETDEVICES_MULTIQUEUE | ||
3596 | for (i = 0; i < adapter->num_tx_queues; i++) | ||
3597 | netif_stop_subqueue(netdev, i); | ||
3598 | #endif | ||
3599 | |||
3600 | ixgbe_napi_add_all(adapter); | ||
2811 | 3601 | ||
2812 | strcpy(netdev->name, "eth%d"); | 3602 | strcpy(netdev->name, "eth%d"); |
2813 | err = register_netdev(netdev); | 3603 | err = register_netdev(netdev); |
2814 | if (err) | 3604 | if (err) |
2815 | goto err_register; | 3605 | goto err_register; |
2816 | 3606 | ||
3607 | #ifdef CONFIG_DCA | ||
3608 | if (dca_add_requester(&pdev->dev) == IXGBE_SUCCESS) { | ||
3609 | adapter->flags |= IXGBE_FLAG_DCA_ENABLED; | ||
3610 | /* always use CB2 mode, difference is masked | ||
3611 | * in the CB driver */ | ||
3612 | IXGBE_WRITE_REG(hw, IXGBE_DCA_CTRL, 2); | ||
3613 | ixgbe_setup_dca(adapter); | ||
3614 | } | ||
3615 | #endif | ||
2817 | 3616 | ||
2818 | dev_info(&pdev->dev, "Intel(R) 10 Gigabit Network Connection\n"); | 3617 | dev_info(&pdev->dev, "Intel(R) 10 Gigabit Network Connection\n"); |
2819 | cards_found++; | 3618 | cards_found++; |
@@ -2823,6 +3622,7 @@ err_register: | |||
2823 | ixgbe_release_hw_control(adapter); | 3622 | ixgbe_release_hw_control(adapter); |
2824 | err_hw_init: | 3623 | err_hw_init: |
2825 | err_sw_init: | 3624 | err_sw_init: |
3625 | ixgbe_reset_interrupt_capability(adapter); | ||
2826 | err_eeprom: | 3626 | err_eeprom: |
2827 | iounmap(hw->hw_addr); | 3627 | iounmap(hw->hw_addr); |
2828 | err_ioremap: | 3628 | err_ioremap: |
@@ -2854,16 +3654,27 @@ static void __devexit ixgbe_remove(struct pci_dev *pdev) | |||
2854 | 3654 | ||
2855 | flush_scheduled_work(); | 3655 | flush_scheduled_work(); |
2856 | 3656 | ||
3657 | #ifdef CONFIG_DCA | ||
3658 | if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) { | ||
3659 | adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED; | ||
3660 | dca_remove_requester(&pdev->dev); | ||
3661 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 1); | ||
3662 | } | ||
3663 | |||
3664 | #endif | ||
2857 | unregister_netdev(netdev); | 3665 | unregister_netdev(netdev); |
2858 | 3666 | ||
2859 | ixgbe_release_hw_control(adapter); | 3667 | ixgbe_reset_interrupt_capability(adapter); |
2860 | 3668 | ||
2861 | kfree(adapter->tx_ring); | 3669 | ixgbe_release_hw_control(adapter); |
2862 | kfree(adapter->rx_ring); | ||
2863 | 3670 | ||
2864 | iounmap(adapter->hw.hw_addr); | 3671 | iounmap(adapter->hw.hw_addr); |
2865 | pci_release_regions(pdev); | 3672 | pci_release_regions(pdev); |
2866 | 3673 | ||
3674 | DPRINTK(PROBE, INFO, "complete\n"); | ||
3675 | kfree(adapter->tx_ring); | ||
3676 | kfree(adapter->rx_ring); | ||
3677 | |||
2867 | free_netdev(netdev); | 3678 | free_netdev(netdev); |
2868 | 3679 | ||
2869 | pci_disable_device(pdev); | 3680 | pci_disable_device(pdev); |
@@ -2975,6 +3786,10 @@ static int __init ixgbe_init_module(void) | |||
2975 | 3786 | ||
2976 | printk(KERN_INFO "%s: %s\n", ixgbe_driver_name, ixgbe_copyright); | 3787 | printk(KERN_INFO "%s: %s\n", ixgbe_driver_name, ixgbe_copyright); |
2977 | 3788 | ||
3789 | #ifdef CONFIG_DCA | ||
3790 | dca_register_notify(&dca_notifier); | ||
3791 | |||
3792 | #endif | ||
2978 | ret = pci_register_driver(&ixgbe_driver); | 3793 | ret = pci_register_driver(&ixgbe_driver); |
2979 | return ret; | 3794 | return ret; |
2980 | } | 3795 | } |
@@ -2988,8 +3803,25 @@ module_init(ixgbe_init_module); | |||
2988 | **/ | 3803 | **/ |
2989 | static void __exit ixgbe_exit_module(void) | 3804 | static void __exit ixgbe_exit_module(void) |
2990 | { | 3805 | { |
3806 | #ifdef CONFIG_DCA | ||
3807 | dca_unregister_notify(&dca_notifier); | ||
3808 | #endif | ||
2991 | pci_unregister_driver(&ixgbe_driver); | 3809 | pci_unregister_driver(&ixgbe_driver); |
2992 | } | 3810 | } |
3811 | |||
3812 | #ifdef CONFIG_DCA | ||
3813 | static int ixgbe_notify_dca(struct notifier_block *nb, unsigned long event, | ||
3814 | void *p) | ||
3815 | { | ||
3816 | int ret_val; | ||
3817 | |||
3818 | ret_val = driver_for_each_device(&ixgbe_driver.driver, NULL, &event, | ||
3819 | __ixgbe_notify_dca); | ||
3820 | |||
3821 | return ret_val ? NOTIFY_BAD : NOTIFY_DONE; | ||
3822 | } | ||
3823 | #endif /* CONFIG_DCA */ | ||
3824 | |||
2993 | module_exit(ixgbe_exit_module); | 3825 | module_exit(ixgbe_exit_module); |
2994 | 3826 | ||
2995 | /* ixgbe_main.c */ | 3827 | /* ixgbe_main.c */ |