aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ixgbe/ixgbe_main.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ixgbe/ixgbe_main.c')
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c1107
1 files changed, 768 insertions, 339 deletions
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index c2095ce531c9..fecc8ea79e9d 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -559,7 +559,7 @@ next_desc:
559 return cleaned; 559 return cleaned;
560} 560}
561 561
562#define IXGBE_MAX_INTR 10 562static int ixgbe_clean_rxonly(struct napi_struct *, int);
563/** 563/**
564 * ixgbe_configure_msix - Configure MSI-X hardware 564 * ixgbe_configure_msix - Configure MSI-X hardware
565 * @adapter: board private structure 565 * @adapter: board private structure
@@ -569,28 +569,57 @@ next_desc:
569 **/ 569 **/
570static void ixgbe_configure_msix(struct ixgbe_adapter *adapter) 570static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
571{ 571{
572 int i, vector = 0; 572 struct ixgbe_q_vector *q_vector;
573 int i, j, q_vectors, v_idx, r_idx;
574 u32 mask;
573 575
574 for (i = 0; i < adapter->num_tx_queues; i++) { 576 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
575 ixgbe_set_ivar(adapter, IXGBE_IVAR_TX_QUEUE(i),
576 IXGBE_MSIX_VECTOR(vector));
577 writel(EITR_INTS_PER_SEC_TO_REG(adapter->tx_eitr),
578 adapter->hw.hw_addr + adapter->tx_ring[i].itr_register);
579 vector++;
580 }
581 577
582 for (i = 0; i < adapter->num_rx_queues; i++) { 578 /* Populate the IVAR table and set the ITR values to the
583 ixgbe_set_ivar(adapter, IXGBE_IVAR_RX_QUEUE(i), 579 * corresponding register.
584 IXGBE_MSIX_VECTOR(vector)); 580 */
585 writel(EITR_INTS_PER_SEC_TO_REG(adapter->rx_eitr), 581 for (v_idx = 0; v_idx < q_vectors; v_idx++) {
586 adapter->hw.hw_addr + adapter->rx_ring[i].itr_register); 582 q_vector = &adapter->q_vector[v_idx];
587 vector++; 583 /* XXX for_each_bit(...) */
584 r_idx = find_first_bit(q_vector->rxr_idx,
585 adapter->num_rx_queues);
586
587 for (i = 0; i < q_vector->rxr_count; i++) {
588 j = adapter->rx_ring[r_idx].reg_idx;
589 ixgbe_set_ivar(adapter, IXGBE_IVAR_RX_QUEUE(j), v_idx);
590 r_idx = find_next_bit(q_vector->rxr_idx,
591 adapter->num_rx_queues,
592 r_idx + 1);
593 }
594 r_idx = find_first_bit(q_vector->txr_idx,
595 adapter->num_tx_queues);
596
597 for (i = 0; i < q_vector->txr_count; i++) {
598 j = adapter->tx_ring[r_idx].reg_idx;
599 ixgbe_set_ivar(adapter, IXGBE_IVAR_TX_QUEUE(j), v_idx);
600 r_idx = find_next_bit(q_vector->txr_idx,
601 adapter->num_tx_queues,
602 r_idx + 1);
603 }
604
605 /* if this is a tx only vector use half the irq (tx) rate */
606 if (q_vector->txr_count && !q_vector->rxr_count)
607 q_vector->eitr = adapter->tx_eitr;
608 else
609 /* rx only or mixed */
610 q_vector->eitr = adapter->rx_eitr;
611
612 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(v_idx),
613 EITR_INTS_PER_SEC_TO_REG(q_vector->eitr));
588 } 614 }
589 615
590 vector = adapter->num_tx_queues + adapter->num_rx_queues; 616 ixgbe_set_ivar(adapter, IXGBE_IVAR_OTHER_CAUSES_INDEX, v_idx);
591 ixgbe_set_ivar(adapter, IXGBE_IVAR_OTHER_CAUSES_INDEX, 617 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(v_idx), 1950);
592 IXGBE_MSIX_VECTOR(vector)); 618
593 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(vector), 1950); 619 /* set up to autoclear timer, lsc, and the vectors */
620 mask = IXGBE_EIMS_ENABLE_MASK;
621 mask &= ~IXGBE_EIMS_OTHER;
622 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, mask);
594} 623}
595 624
596static irqreturn_t ixgbe_msix_lsc(int irq, void *data) 625static irqreturn_t ixgbe_msix_lsc(int irq, void *data)
@@ -614,153 +643,241 @@ static irqreturn_t ixgbe_msix_lsc(int irq, void *data)
614 643
615static irqreturn_t ixgbe_msix_clean_tx(int irq, void *data) 644static irqreturn_t ixgbe_msix_clean_tx(int irq, void *data)
616{ 645{
617 struct ixgbe_ring *txr = data; 646 struct ixgbe_q_vector *q_vector = data;
618 struct ixgbe_adapter *adapter = txr->adapter; 647 struct ixgbe_adapter *adapter = q_vector->adapter;
648 struct ixgbe_ring *txr;
649 int i, r_idx;
650
651 if (!q_vector->txr_count)
652 return IRQ_HANDLED;
653
654 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
655 for (i = 0; i < q_vector->txr_count; i++) {
656 txr = &(adapter->tx_ring[r_idx]);
657 ixgbe_clean_tx_irq(adapter, txr);
658 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
659 r_idx + 1);
660 }
619 661
620 ixgbe_clean_tx_irq(adapter, txr);
621 662
622 return IRQ_HANDLED; 663 return IRQ_HANDLED;
623} 664}
624 665
666/**
667 * ixgbe_msix_clean_rx - single unshared vector rx clean (all queues)
668 * @irq: unused
669 * @data: pointer to our q_vector struct for this interrupt vector
670 **/
625static irqreturn_t ixgbe_msix_clean_rx(int irq, void *data) 671static irqreturn_t ixgbe_msix_clean_rx(int irq, void *data)
626{ 672{
627 struct ixgbe_ring *rxr = data; 673 struct ixgbe_q_vector *q_vector = data;
628 struct ixgbe_adapter *adapter = rxr->adapter; 674 struct ixgbe_adapter *adapter = q_vector->adapter;
675 struct ixgbe_ring *rxr;
676 int r_idx;
677
678 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
679 if (!q_vector->rxr_count)
680 return IRQ_HANDLED;
681
682 rxr = &(adapter->rx_ring[r_idx]);
683 /* disable interrupts on this vector only */
684 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, rxr->v_idx);
685 netif_rx_schedule(adapter->netdev, &q_vector->napi);
686
687 return IRQ_HANDLED;
688}
689
690static irqreturn_t ixgbe_msix_clean_many(int irq, void *data)
691{
692 ixgbe_msix_clean_rx(irq, data);
693 ixgbe_msix_clean_tx(irq, data);
629 694
630 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, rxr->eims_value);
631 netif_rx_schedule(adapter->netdev, &adapter->napi);
632 return IRQ_HANDLED; 695 return IRQ_HANDLED;
633} 696}
634 697
698/**
699 * ixgbe_clean_rxonly - msix (aka one shot) rx clean routine
700 * @napi: napi struct with our devices info in it
701 * @budget: amount of work driver is allowed to do this pass, in packets
702 *
703 **/
635static int ixgbe_clean_rxonly(struct napi_struct *napi, int budget) 704static int ixgbe_clean_rxonly(struct napi_struct *napi, int budget)
636{ 705{
637 struct ixgbe_adapter *adapter = container_of(napi, 706 struct ixgbe_q_vector *q_vector =
638 struct ixgbe_adapter, napi); 707 container_of(napi, struct ixgbe_q_vector, napi);
639 struct net_device *netdev = adapter->netdev; 708 struct ixgbe_adapter *adapter = q_vector->adapter;
709 struct ixgbe_ring *rxr;
640 int work_done = 0; 710 int work_done = 0;
641 struct ixgbe_ring *rxr = adapter->rx_ring; 711 long r_idx;
642 712
643 /* Keep link state information with original netdev */ 713 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
644 if (!netif_carrier_ok(netdev)) 714 rxr = &(adapter->rx_ring[r_idx]);
645 goto quit_polling;
646 715
647 ixgbe_clean_rx_irq(adapter, rxr, &work_done, budget); 716 ixgbe_clean_rx_irq(adapter, rxr, &work_done, budget);
648 717
649 /* If no Tx and not enough Rx work done, exit the polling mode */ 718 /* If all Rx work done, exit the polling mode */
650 if ((work_done < budget) || !netif_running(netdev)) { 719 if (work_done < budget) {
651quit_polling: 720 netif_rx_complete(adapter->netdev, napi);
652 netif_rx_complete(netdev, napi);
653 if (!test_bit(__IXGBE_DOWN, &adapter->state)) 721 if (!test_bit(__IXGBE_DOWN, &adapter->state))
654 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, 722 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, rxr->v_idx);
655 rxr->eims_value);
656 } 723 }
657 724
658 return work_done; 725 return work_done;
659} 726}
660 727
728static inline void map_vector_to_rxq(struct ixgbe_adapter *a, int v_idx,
729 int r_idx)
730{
731 a->q_vector[v_idx].adapter = a;
732 set_bit(r_idx, a->q_vector[v_idx].rxr_idx);
733 a->q_vector[v_idx].rxr_count++;
734 a->rx_ring[r_idx].v_idx = 1 << v_idx;
735}
736
737static inline void map_vector_to_txq(struct ixgbe_adapter *a, int v_idx,
738 int r_idx)
739{
740 a->q_vector[v_idx].adapter = a;
741 set_bit(r_idx, a->q_vector[v_idx].txr_idx);
742 a->q_vector[v_idx].txr_count++;
743 a->tx_ring[r_idx].v_idx = 1 << v_idx;
744}
745
661/** 746/**
662 * ixgbe_setup_msix - Initialize MSI-X interrupts 747 * ixgbe_map_rings_to_vectors - Maps descriptor rings to vectors
748 * @adapter: board private structure to initialize
749 * @vectors: allotted vector count for descriptor rings
663 * 750 *
664 * ixgbe_setup_msix allocates MSI-X vectors and requests 751 * This function maps descriptor rings to the queue-specific vectors
665 * interrutps from the kernel. 752 * we were allotted through the MSI-X enabling code. Ideally, we'd have
753 * one vector per ring/queue, but on a constrained vector budget, we
754 * group the rings as "efficiently" as possible. You would add new
755 * mapping configurations in here.
666 **/ 756 **/
667static int ixgbe_setup_msix(struct ixgbe_adapter *adapter) 757static int ixgbe_map_rings_to_vectors(struct ixgbe_adapter *adapter,
668{ 758 int vectors)
669 struct net_device *netdev = adapter->netdev; 759{
670 int i, int_vector = 0, err = 0; 760 int v_start = 0;
671 int max_msix_count; 761 int rxr_idx = 0, txr_idx = 0;
762 int rxr_remaining = adapter->num_rx_queues;
763 int txr_remaining = adapter->num_tx_queues;
764 int i, j;
765 int rqpv, tqpv;
766 int err = 0;
767
768 /* No mapping required if MSI-X is disabled. */
769 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
770 goto out;
672 771
673 /* +1 for the LSC interrupt */ 772 /*
674 max_msix_count = adapter->num_rx_queues + adapter->num_tx_queues + 1; 773 * The ideal configuration...
675 adapter->msix_entries = kcalloc(max_msix_count, 774 * We have enough vectors to map one per queue.
676 sizeof(struct msix_entry), GFP_KERNEL); 775 */
677 if (!adapter->msix_entries) 776 if (vectors == adapter->num_rx_queues + adapter->num_tx_queues) {
678 return -ENOMEM; 777 for (; rxr_idx < rxr_remaining; v_start++, rxr_idx++)
778 map_vector_to_rxq(adapter, v_start, rxr_idx);
679 779
680 for (i = 0; i < max_msix_count; i++) 780 for (; txr_idx < txr_remaining; v_start++, txr_idx++)
681 adapter->msix_entries[i].entry = i; 781 map_vector_to_txq(adapter, v_start, txr_idx);
682 782
683 err = pci_enable_msix(adapter->pdev, adapter->msix_entries,
684 max_msix_count);
685 if (err)
686 goto out; 783 goto out;
784 }
687 785
688 for (i = 0; i < adapter->num_tx_queues; i++) { 786 /*
689 sprintf(adapter->tx_ring[i].name, "%s-tx%d", netdev->name, i); 787 * If we don't have enough vectors for a 1-to-1
690 err = request_irq(adapter->msix_entries[int_vector].vector, 788 * mapping, we'll have to group them so there are
691 &ixgbe_msix_clean_tx, 789 * multiple queues per vector.
692 0, 790 */
693 adapter->tx_ring[i].name, 791 /* Re-adjusting *qpv takes care of the remainder. */
694 &(adapter->tx_ring[i])); 792 for (i = v_start; i < vectors; i++) {
695 if (err) { 793 rqpv = DIV_ROUND_UP(rxr_remaining, vectors - i);
696 DPRINTK(PROBE, ERR, 794 for (j = 0; j < rqpv; j++) {
697 "request_irq failed for MSIX interrupt " 795 map_vector_to_rxq(adapter, i, rxr_idx);
698 "Error: %d\n", err); 796 rxr_idx++;
699 goto release_irqs; 797 rxr_remaining--;
798 }
799 }
800 for (i = v_start; i < vectors; i++) {
801 tqpv = DIV_ROUND_UP(txr_remaining, vectors - i);
802 for (j = 0; j < tqpv; j++) {
803 map_vector_to_txq(adapter, i, txr_idx);
804 txr_idx++;
805 txr_remaining--;
700 } 806 }
701 adapter->tx_ring[i].eims_value =
702 (1 << IXGBE_MSIX_VECTOR(int_vector));
703 adapter->tx_ring[i].itr_register = IXGBE_EITR(int_vector);
704 int_vector++;
705 } 807 }
706 808
707 for (i = 0; i < adapter->num_rx_queues; i++) { 809out:
708 if (strlen(netdev->name) < (IFNAMSIZ - 5)) 810 return err;
709 sprintf(adapter->rx_ring[i].name, 811}
710 "%s-rx%d", netdev->name, i); 812
711 else 813/**
712 memcpy(adapter->rx_ring[i].name, 814 * ixgbe_request_msix_irqs - Initialize MSI-X interrupts
713 netdev->name, IFNAMSIZ); 815 * @adapter: board private structure
714 err = request_irq(adapter->msix_entries[int_vector].vector, 816 *
715 &ixgbe_msix_clean_rx, 0, 817 * ixgbe_request_msix_irqs allocates MSI-X vectors and requests
716 adapter->rx_ring[i].name, 818 * interrupts from the kernel.
717 &(adapter->rx_ring[i])); 819 **/
820static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter)
821{
822 struct net_device *netdev = adapter->netdev;
823 irqreturn_t (*handler)(int, void *);
824 int i, vector, q_vectors, err;
825
826 /* Decrement for Other and TCP Timer vectors */
827 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
828
829 /* Map the Tx/Rx rings to the vectors we were allotted. */
830 err = ixgbe_map_rings_to_vectors(adapter, q_vectors);
831 if (err)
832 goto out;
833
834#define SET_HANDLER(_v) ((!(_v)->rxr_count) ? &ixgbe_msix_clean_tx : \
835 (!(_v)->txr_count) ? &ixgbe_msix_clean_rx : \
836 &ixgbe_msix_clean_many)
837 for (vector = 0; vector < q_vectors; vector++) {
838 handler = SET_HANDLER(&adapter->q_vector[vector]);
839 sprintf(adapter->name[vector], "%s:v%d-%s",
840 netdev->name, vector,
841 (handler == &ixgbe_msix_clean_rx) ? "Rx" :
842 ((handler == &ixgbe_msix_clean_tx) ? "Tx" : "TxRx"));
843 err = request_irq(adapter->msix_entries[vector].vector,
844 handler, 0, adapter->name[vector],
845 &(adapter->q_vector[vector]));
718 if (err) { 846 if (err) {
719 DPRINTK(PROBE, ERR, 847 DPRINTK(PROBE, ERR,
720 "request_irq failed for MSIX interrupt " 848 "request_irq failed for MSIX interrupt "
721 "Error: %d\n", err); 849 "Error: %d\n", err);
722 goto release_irqs; 850 goto free_queue_irqs;
723 } 851 }
724
725 adapter->rx_ring[i].eims_value =
726 (1 << IXGBE_MSIX_VECTOR(int_vector));
727 adapter->rx_ring[i].itr_register = IXGBE_EITR(int_vector);
728 int_vector++;
729 } 852 }
730 853
731 sprintf(adapter->lsc_name, "%s-lsc", netdev->name); 854 sprintf(adapter->name[vector], "%s:lsc", netdev->name);
732 err = request_irq(adapter->msix_entries[int_vector].vector, 855 err = request_irq(adapter->msix_entries[vector].vector,
733 &ixgbe_msix_lsc, 0, adapter->lsc_name, netdev); 856 &ixgbe_msix_lsc, 0, adapter->name[vector], netdev);
734 if (err) { 857 if (err) {
735 DPRINTK(PROBE, ERR, 858 DPRINTK(PROBE, ERR,
736 "request_irq for msix_lsc failed: %d\n", err); 859 "request_irq for msix_lsc failed: %d\n", err);
737 goto release_irqs; 860 goto free_queue_irqs;
738 } 861 }
739 862
740 /* FIXME: implement netif_napi_remove() instead */
741 adapter->napi.poll = ixgbe_clean_rxonly;
742 adapter->flags |= IXGBE_FLAG_MSIX_ENABLED;
743 return 0; 863 return 0;
744 864
745release_irqs: 865free_queue_irqs:
746 int_vector--; 866 for (i = vector - 1; i >= 0; i--)
747 for (; int_vector >= adapter->num_tx_queues; int_vector--) 867 free_irq(adapter->msix_entries[--vector].vector,
748 free_irq(adapter->msix_entries[int_vector].vector, 868 &(adapter->q_vector[i]));
749 &(adapter->rx_ring[int_vector - 869 adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
750 adapter->num_tx_queues])); 870 pci_disable_msix(adapter->pdev);
751
752 for (; int_vector >= 0; int_vector--)
753 free_irq(adapter->msix_entries[int_vector].vector,
754 &(adapter->tx_ring[int_vector]));
755out:
756 kfree(adapter->msix_entries); 871 kfree(adapter->msix_entries);
757 adapter->msix_entries = NULL; 872 adapter->msix_entries = NULL;
758 adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED; 873out:
759 return err; 874 return err;
760} 875}
761 876
877static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter);
878
762/** 879/**
763 * ixgbe_intr - Interrupt Handler 880 * ixgbe_intr - legacy mode Interrupt Handler
764 * @irq: interrupt number 881 * @irq: interrupt number
765 * @data: pointer to a network interface device structure 882 * @data: pointer to a network interface device structure
766 * @pt_regs: CPU registers structure 883 * @pt_regs: CPU registers structure
@@ -772,8 +889,10 @@ static irqreturn_t ixgbe_intr(int irq, void *data)
772 struct ixgbe_hw *hw = &adapter->hw; 889 struct ixgbe_hw *hw = &adapter->hw;
773 u32 eicr; 890 u32 eicr;
774 891
775 eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
776 892
893 /* for NAPI, using EIAM to auto-mask tx/rx interrupt bits on read
894 * therefore no explict interrupt disable is necessary */
895 eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
777 if (!eicr) 896 if (!eicr)
778 return IRQ_NONE; /* Not our interrupt */ 897 return IRQ_NONE; /* Not our interrupt */
779 898
@@ -782,16 +901,29 @@ static irqreturn_t ixgbe_intr(int irq, void *data)
782 if (!test_bit(__IXGBE_DOWN, &adapter->state)) 901 if (!test_bit(__IXGBE_DOWN, &adapter->state))
783 mod_timer(&adapter->watchdog_timer, jiffies); 902 mod_timer(&adapter->watchdog_timer, jiffies);
784 } 903 }
785 if (netif_rx_schedule_prep(netdev, &adapter->napi)) { 904
786 /* Disable interrupts and register for poll. The flush of the 905
787 * posted write is intentionally left out. */ 906 if (netif_rx_schedule_prep(netdev, &adapter->q_vector[0].napi)) {
788 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0); 907 /* would disable interrupts here but EIAM disabled it */
789 __netif_rx_schedule(netdev, &adapter->napi); 908 __netif_rx_schedule(netdev, &adapter->q_vector[0].napi);
790 } 909 }
791 910
792 return IRQ_HANDLED; 911 return IRQ_HANDLED;
793} 912}
794 913
914static inline void ixgbe_reset_q_vectors(struct ixgbe_adapter *adapter)
915{
916 int i, q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
917
918 for (i = 0; i < q_vectors; i++) {
919 struct ixgbe_q_vector *q_vector = &adapter->q_vector[i];
920 bitmap_zero(q_vector->rxr_idx, MAX_RX_QUEUES);
921 bitmap_zero(q_vector->txr_idx, MAX_TX_QUEUES);
922 q_vector->rxr_count = 0;
923 q_vector->txr_count = 0;
924 }
925}
926
795/** 927/**
796 * ixgbe_request_irq - initialize interrupts 928 * ixgbe_request_irq - initialize interrupts
797 * @adapter: board private structure 929 * @adapter: board private structure
@@ -799,40 +931,24 @@ static irqreturn_t ixgbe_intr(int irq, void *data)
799 * Attempts to configure interrupts using the best available 931 * Attempts to configure interrupts using the best available
800 * capabilities of the hardware and kernel. 932 * capabilities of the hardware and kernel.
801 **/ 933 **/
802static int ixgbe_request_irq(struct ixgbe_adapter *adapter, u32 *num_rx_queues) 934static int ixgbe_request_irq(struct ixgbe_adapter *adapter)
803{ 935{
804 struct net_device *netdev = adapter->netdev; 936 struct net_device *netdev = adapter->netdev;
805 int flags, err; 937 int err;
806 irq_handler_t handler = ixgbe_intr;
807
808 flags = IRQF_SHARED;
809
810 err = ixgbe_setup_msix(adapter);
811 if (!err)
812 goto request_done;
813
814 /*
815 * if we can't do MSI-X, fall through and try MSI
816 * No need to reallocate memory since we're decreasing the number of
817 * queues. We just won't use the other ones, also it is freed correctly
818 * on ixgbe_remove.
819 */
820 *num_rx_queues = 1;
821 938
822 /* do MSI */ 939 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
823 err = pci_enable_msi(adapter->pdev); 940 err = ixgbe_request_msix_irqs(adapter);
824 if (!err) { 941 } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) {
825 adapter->flags |= IXGBE_FLAG_MSI_ENABLED; 942 err = request_irq(adapter->pdev->irq, &ixgbe_intr, 0,
826 flags &= ~IRQF_SHARED; 943 netdev->name, netdev);
827 handler = &ixgbe_intr; 944 } else {
945 err = request_irq(adapter->pdev->irq, &ixgbe_intr, IRQF_SHARED,
946 netdev->name, netdev);
828 } 947 }
829 948
830 err = request_irq(adapter->pdev->irq, handler, flags,
831 netdev->name, netdev);
832 if (err) 949 if (err)
833 DPRINTK(PROBE, ERR, "request_irq failed, Error %d\n", err); 950 DPRINTK(PROBE, ERR, "request_irq failed, Error %d\n", err);
834 951
835request_done:
836 return err; 952 return err;
837} 953}
838 954
@@ -841,28 +957,22 @@ static void ixgbe_free_irq(struct ixgbe_adapter *adapter)
841 struct net_device *netdev = adapter->netdev; 957 struct net_device *netdev = adapter->netdev;
842 958
843 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { 959 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
844 int i; 960 int i, q_vectors;
845 961
846 for (i = 0; i < adapter->num_tx_queues; i++) 962 q_vectors = adapter->num_msix_vectors;
847 free_irq(adapter->msix_entries[i].vector, 963
848 &(adapter->tx_ring[i])); 964 i = q_vectors - 1;
849 for (i = 0; i < adapter->num_rx_queues; i++)
850 free_irq(adapter->msix_entries[i +
851 adapter->num_tx_queues].vector,
852 &(adapter->rx_ring[i]));
853 i = adapter->num_rx_queues + adapter->num_tx_queues;
854 free_irq(adapter->msix_entries[i].vector, netdev); 965 free_irq(adapter->msix_entries[i].vector, netdev);
855 pci_disable_msix(adapter->pdev);
856 kfree(adapter->msix_entries);
857 adapter->msix_entries = NULL;
858 adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
859 return;
860 }
861 966
862 free_irq(adapter->pdev->irq, netdev); 967 i--;
863 if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) { 968 for (; i >= 0; i--) {
864 pci_disable_msi(adapter->pdev); 969 free_irq(adapter->msix_entries[i].vector,
865 adapter->flags &= ~IXGBE_FLAG_MSI_ENABLED; 970 &(adapter->q_vector[i]));
971 }
972
973 ixgbe_reset_q_vectors(adapter);
974 } else {
975 free_irq(adapter->pdev->irq, netdev);
866 } 976 }
867} 977}
868 978
@@ -874,7 +984,13 @@ static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter)
874{ 984{
875 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0); 985 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
876 IXGBE_WRITE_FLUSH(&adapter->hw); 986 IXGBE_WRITE_FLUSH(&adapter->hw);
877 synchronize_irq(adapter->pdev->irq); 987 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
988 int i;
989 for (i = 0; i < adapter->num_msix_vectors; i++)
990 synchronize_irq(adapter->msix_entries[i].vector);
991 } else {
992 synchronize_irq(adapter->pdev->irq);
993 }
878} 994}
879 995
880/** 996/**
@@ -883,12 +999,9 @@ static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter)
883 **/ 999 **/
884static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter) 1000static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter)
885{ 1001{
886 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) 1002 u32 mask;
887 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, 1003 mask = IXGBE_EIMS_ENABLE_MASK;
888 (IXGBE_EIMS_ENABLE_MASK & 1004 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
889 ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC)));
890 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS,
891 IXGBE_EIMS_ENABLE_MASK);
892 IXGBE_WRITE_FLUSH(&adapter->hw); 1005 IXGBE_WRITE_FLUSH(&adapter->hw);
893} 1006}
894 1007
@@ -898,20 +1011,18 @@ static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter)
898 **/ 1011 **/
899static void ixgbe_configure_msi_and_legacy(struct ixgbe_adapter *adapter) 1012static void ixgbe_configure_msi_and_legacy(struct ixgbe_adapter *adapter)
900{ 1013{
901 int i;
902 struct ixgbe_hw *hw = &adapter->hw; 1014 struct ixgbe_hw *hw = &adapter->hw;
903 1015
904 if (adapter->rx_eitr) 1016 IXGBE_WRITE_REG(hw, IXGBE_EITR(0),
905 IXGBE_WRITE_REG(hw, IXGBE_EITR(0), 1017 EITR_INTS_PER_SEC_TO_REG(adapter->rx_eitr));
906 EITR_INTS_PER_SEC_TO_REG(adapter->rx_eitr));
907
908 /* for re-triggering the interrupt in non-NAPI mode */
909 adapter->rx_ring[0].eims_value = (1 << IXGBE_MSIX_VECTOR(0));
910 adapter->tx_ring[0].eims_value = (1 << IXGBE_MSIX_VECTOR(0));
911 1018
912 ixgbe_set_ivar(adapter, IXGBE_IVAR_RX_QUEUE(0), 0); 1019 ixgbe_set_ivar(adapter, IXGBE_IVAR_RX_QUEUE(0), 0);
913 for (i = 0; i < adapter->num_tx_queues; i++) 1020 ixgbe_set_ivar(adapter, IXGBE_IVAR_TX_QUEUE(0), 0);
914 ixgbe_set_ivar(adapter, IXGBE_IVAR_TX_QUEUE(i), i); 1021
1022 map_vector_to_rxq(adapter, 0, 0);
1023 map_vector_to_txq(adapter, 0, 0);
1024
1025 DPRINTK(HW, INFO, "Legacy interrupt IVAR setup done\n");
915} 1026}
916 1027
917/** 1028/**
@@ -924,23 +1035,29 @@ static void ixgbe_configure_tx(struct ixgbe_adapter *adapter)
924{ 1035{
925 u64 tdba; 1036 u64 tdba;
926 struct ixgbe_hw *hw = &adapter->hw; 1037 struct ixgbe_hw *hw = &adapter->hw;
927 u32 i, tdlen; 1038 u32 i, j, tdlen, txctrl;
928 1039
929 /* Setup the HW Tx Head and Tail descriptor pointers */ 1040 /* Setup the HW Tx Head and Tail descriptor pointers */
930 for (i = 0; i < adapter->num_tx_queues; i++) { 1041 for (i = 0; i < adapter->num_tx_queues; i++) {
1042 j = adapter->tx_ring[i].reg_idx;
931 tdba = adapter->tx_ring[i].dma; 1043 tdba = adapter->tx_ring[i].dma;
932 tdlen = adapter->tx_ring[i].count * 1044 tdlen = adapter->tx_ring[i].count *
933 sizeof(union ixgbe_adv_tx_desc); 1045 sizeof(union ixgbe_adv_tx_desc);
934 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(i), (tdba & DMA_32BIT_MASK)); 1046 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j),
935 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(i), (tdba >> 32)); 1047 (tdba & DMA_32BIT_MASK));
936 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(i), tdlen); 1048 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32));
937 IXGBE_WRITE_REG(hw, IXGBE_TDH(i), 0); 1049 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j), tdlen);
938 IXGBE_WRITE_REG(hw, IXGBE_TDT(i), 0); 1050 IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0);
939 adapter->tx_ring[i].head = IXGBE_TDH(i); 1051 IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0);
940 adapter->tx_ring[i].tail = IXGBE_TDT(i); 1052 adapter->tx_ring[i].head = IXGBE_TDH(j);
1053 adapter->tx_ring[i].tail = IXGBE_TDT(j);
1054 /* Disable Tx Head Writeback RO bit, since this hoses
1055 * bookkeeping if things aren't delivered in order.
1056 */
1057 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
1058 txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
1059 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), txctrl);
941 } 1060 }
942
943 IXGBE_WRITE_REG(hw, IXGBE_TIPG, IXGBE_TIPG_FIBER_DEFAULT);
944} 1061}
945 1062
946#define PAGE_USE_COUNT(S) (((S) >> PAGE_SHIFT) + \ 1063#define PAGE_USE_COUNT(S) (((S) >> PAGE_SHIFT) + \
@@ -959,13 +1076,12 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
959 struct ixgbe_hw *hw = &adapter->hw; 1076 struct ixgbe_hw *hw = &adapter->hw;
960 struct net_device *netdev = adapter->netdev; 1077 struct net_device *netdev = adapter->netdev;
961 int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; 1078 int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
1079 int i, j;
962 u32 rdlen, rxctrl, rxcsum; 1080 u32 rdlen, rxctrl, rxcsum;
963 u32 random[10]; 1081 u32 random[10];
964 u32 reta, mrqc;
965 int i;
966 u32 fctrl, hlreg0; 1082 u32 fctrl, hlreg0;
967 u32 srrctl;
968 u32 pages; 1083 u32 pages;
1084 u32 reta = 0, mrqc, srrctl;
969 1085
970 /* Decide whether to use packet split mode or not */ 1086 /* Decide whether to use packet split mode or not */
971 if (netdev->mtu > ETH_DATA_LEN) 1087 if (netdev->mtu > ETH_DATA_LEN)
@@ -985,6 +1101,7 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
985 1101
986 fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL); 1102 fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
987 fctrl |= IXGBE_FCTRL_BAM; 1103 fctrl |= IXGBE_FCTRL_BAM;
1104 fctrl |= IXGBE_FCTRL_DPF; /* discard pause frames when FC enabled */
988 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl); 1105 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl);
989 1106
990 hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0); 1107 hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
@@ -1036,37 +1153,23 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
1036 adapter->rx_ring[i].tail = IXGBE_RDT(i); 1153 adapter->rx_ring[i].tail = IXGBE_RDT(i);
1037 } 1154 }
1038 1155
1039 if (adapter->num_rx_queues > 1) { 1156 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
1040 /* Random 40bytes used as random key in RSS hash function */
1041 get_random_bytes(&random[0], 40);
1042
1043 switch (adapter->num_rx_queues) {
1044 case 8:
1045 case 4:
1046 /* Bits [3:0] in each byte refers the Rx queue no */
1047 reta = 0x00010203;
1048 break;
1049 case 2:
1050 reta = 0x00010001;
1051 break;
1052 default:
1053 reta = 0x00000000;
1054 break;
1055 }
1056
1057 /* Fill out redirection table */ 1157 /* Fill out redirection table */
1058 for (i = 0; i < 32; i++) { 1158 for (i = 0, j = 0; i < 128; i++, j++) {
1059 IXGBE_WRITE_REG_ARRAY(hw, IXGBE_RETA(0), i, reta); 1159 if (j == adapter->ring_feature[RING_F_RSS].indices)
1060 if (adapter->num_rx_queues > 4) { 1160 j = 0;
1061 i++; 1161 /* reta = 4-byte sliding window of
1062 IXGBE_WRITE_REG_ARRAY(hw, IXGBE_RETA(0), i, 1162 * 0x00..(indices-1)(indices-1)00..etc. */
1063 0x04050607); 1163 reta = (reta << 8) | (j * 0x11);
1064 } 1164 if ((i & 3) == 3)
1165 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
1065 } 1166 }
1066 1167
1067 /* Fill out hash function seeds */ 1168 /* Fill out hash function seeds */
1169 /* XXX use a random constant here to glue certain flows */
1170 get_random_bytes(&random[0], 40);
1068 for (i = 0; i < 10; i++) 1171 for (i = 0; i < 10; i++)
1069 IXGBE_WRITE_REG_ARRAY(hw, IXGBE_RSSRK(0), i, random[i]); 1172 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), random[i]);
1070 1173
1071 mrqc = IXGBE_MRQC_RSSEN 1174 mrqc = IXGBE_MRQC_RSSEN
1072 /* Perform hash on these packet types */ 1175 /* Perform hash on these packet types */
@@ -1080,26 +1183,23 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
1080 | IXGBE_MRQC_RSS_FIELD_IPV6_UDP 1183 | IXGBE_MRQC_RSS_FIELD_IPV6_UDP
1081 | IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP; 1184 | IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
1082 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); 1185 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
1186 }
1083 1187
1084 /* Multiqueue and packet checksumming are mutually exclusive. */ 1188 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
1085 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM); 1189
1190 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED ||
1191 adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED) {
1192 /* Disable indicating checksum in descriptor, enables
1193 * RSS hash */
1086 rxcsum |= IXGBE_RXCSUM_PCSD; 1194 rxcsum |= IXGBE_RXCSUM_PCSD;
1087 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
1088 } else {
1089 /* Enable Receive Checksum Offload for TCP and UDP */
1090 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
1091 if (adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED) {
1092 /* Enable IPv4 payload checksum for UDP fragments
1093 * Must be used in conjunction with packet-split. */
1094 rxcsum |= IXGBE_RXCSUM_IPPCSE;
1095 } else {
1096 /* don't need to clear IPPCSE as it defaults to 0 */
1097 }
1098 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
1099 } 1195 }
1100 /* Enable Receives */ 1196 if (!(rxcsum & IXGBE_RXCSUM_PCSD)) {
1101 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl); 1197 /* Enable IPv4 payload checksum for UDP fragments
1102 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); 1198 * if PCSD is not set */
1199 rxcsum |= IXGBE_RXCSUM_IPPCSE;
1200 }
1201
1202 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
1103} 1203}
1104 1204
1105static void ixgbe_vlan_rx_register(struct net_device *netdev, 1205static void ixgbe_vlan_rx_register(struct net_device *netdev,
@@ -1219,6 +1319,43 @@ static void ixgbe_set_multi(struct net_device *netdev)
1219 1319
1220} 1320}
1221 1321
1322static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter)
1323{
1324 int q_idx;
1325 struct ixgbe_q_vector *q_vector;
1326 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1327
1328 /* legacy and MSI only use one vector */
1329 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
1330 q_vectors = 1;
1331
1332 for (q_idx = 0; q_idx < q_vectors; q_idx++) {
1333 q_vector = &adapter->q_vector[q_idx];
1334 if (!q_vector->rxr_count)
1335 continue;
1336 napi_enable(&q_vector->napi);
1337 }
1338}
1339
1340static void ixgbe_napi_disable_all(struct ixgbe_adapter *adapter)
1341{
1342 int q_idx;
1343 struct ixgbe_q_vector *q_vector;
1344 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1345
1346 /* legacy and MSI only use one vector */
1347 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
1348 q_vectors = 1;
1349
1350 for (q_idx = 0; q_idx < q_vectors; q_idx++) {
1351 q_vector = &adapter->q_vector[q_idx];
1352 if (!q_vector->rxr_count)
1353 continue;
1354 napi_disable(&q_vector->napi);
1355 }
1356}
1357
1358
1222static void ixgbe_configure(struct ixgbe_adapter *adapter) 1359static void ixgbe_configure(struct ixgbe_adapter *adapter)
1223{ 1360{
1224 struct net_device *netdev = adapter->netdev; 1361 struct net_device *netdev = adapter->netdev;
@@ -1238,30 +1375,35 @@ static void ixgbe_configure(struct ixgbe_adapter *adapter)
1238static int ixgbe_up_complete(struct ixgbe_adapter *adapter) 1375static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
1239{ 1376{
1240 struct net_device *netdev = adapter->netdev; 1377 struct net_device *netdev = adapter->netdev;
1241 int i;
1242 u32 gpie = 0;
1243 struct ixgbe_hw *hw = &adapter->hw; 1378 struct ixgbe_hw *hw = &adapter->hw;
1244 u32 txdctl, rxdctl, mhadd; 1379 int i, j = 0;
1245 int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; 1380 int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
1381 u32 txdctl, rxdctl, mhadd;
1382 u32 gpie;
1246 1383
1247 ixgbe_get_hw_control(adapter); 1384 ixgbe_get_hw_control(adapter);
1248 1385
1249 if (adapter->flags & (IXGBE_FLAG_MSIX_ENABLED | 1386 if ((adapter->flags & IXGBE_FLAG_MSIX_ENABLED) ||
1250 IXGBE_FLAG_MSI_ENABLED)) { 1387 (adapter->flags & IXGBE_FLAG_MSI_ENABLED)) {
1251 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { 1388 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
1252 gpie = (IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_EIAME | 1389 gpie = (IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_EIAME |
1253 IXGBE_GPIE_PBA_SUPPORT | IXGBE_GPIE_OCD); 1390 IXGBE_GPIE_PBA_SUPPORT | IXGBE_GPIE_OCD);
1254 } else { 1391 } else {
1255 /* MSI only */ 1392 /* MSI only */
1256 gpie = (IXGBE_GPIE_EIAME | 1393 gpie = 0;
1257 IXGBE_GPIE_PBA_SUPPORT);
1258 } 1394 }
1259 IXGBE_WRITE_REG(&adapter->hw, IXGBE_GPIE, gpie); 1395 /* XXX: to interrupt immediately for EICS writes, enable this */
1260 gpie = IXGBE_READ_REG(&adapter->hw, IXGBE_GPIE); 1396 /* gpie |= IXGBE_GPIE_EIMEN; */
1397 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
1261 } 1398 }
1262 1399
1263 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD); 1400 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) {
1401 /* legacy interrupts, use EIAM to auto-mask when reading EICR,
1402 * specifically only auto mask tx and rx interrupts */
1403 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
1404 }
1264 1405
1406 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
1265 if (max_frame != (mhadd >> IXGBE_MHADD_MFS_SHIFT)) { 1407 if (max_frame != (mhadd >> IXGBE_MHADD_MFS_SHIFT)) {
1266 mhadd &= ~IXGBE_MHADD_MFS_MASK; 1408 mhadd &= ~IXGBE_MHADD_MFS_MASK;
1267 mhadd |= max_frame << IXGBE_MHADD_MFS_SHIFT; 1409 mhadd |= max_frame << IXGBE_MHADD_MFS_SHIFT;
@@ -1270,15 +1412,21 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
1270 } 1412 }
1271 1413
1272 for (i = 0; i < adapter->num_tx_queues; i++) { 1414 for (i = 0; i < adapter->num_tx_queues; i++) {
1273 txdctl = IXGBE_READ_REG(&adapter->hw, IXGBE_TXDCTL(i)); 1415 j = adapter->tx_ring[i].reg_idx;
1416 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
1274 txdctl |= IXGBE_TXDCTL_ENABLE; 1417 txdctl |= IXGBE_TXDCTL_ENABLE;
1275 IXGBE_WRITE_REG(&adapter->hw, IXGBE_TXDCTL(i), txdctl); 1418 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j), txdctl);
1276 } 1419 }
1277 1420
1278 for (i = 0; i < adapter->num_rx_queues; i++) { 1421 for (i = 0; i < adapter->num_rx_queues; i++) {
1279 rxdctl = IXGBE_READ_REG(&adapter->hw, IXGBE_RXDCTL(i)); 1422 j = adapter->rx_ring[i].reg_idx;
1423 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
1424 /* enable PTHRESH=32 descriptors (half the internal cache)
1425 * and HTHRESH=0 descriptors (to minimize latency on fetch),
1426 * this also removes a pesky rx_no_buffer_count increment */
1427 rxdctl |= 0x0020;
1280 rxdctl |= IXGBE_RXDCTL_ENABLE; 1428 rxdctl |= IXGBE_RXDCTL_ENABLE;
1281 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXDCTL(i), rxdctl); 1429 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), rxdctl);
1282 } 1430 }
1283 /* enable all receives */ 1431 /* enable all receives */
1284 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); 1432 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
@@ -1291,7 +1439,11 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
1291 ixgbe_configure_msi_and_legacy(adapter); 1439 ixgbe_configure_msi_and_legacy(adapter);
1292 1440
1293 clear_bit(__IXGBE_DOWN, &adapter->state); 1441 clear_bit(__IXGBE_DOWN, &adapter->state);
1294 napi_enable(&adapter->napi); 1442 ixgbe_napi_enable_all(adapter);
1443
1444 /* clear any pending interrupts, may auto mask */
1445 IXGBE_READ_REG(hw, IXGBE_EICR);
1446
1295 ixgbe_irq_enable(adapter); 1447 ixgbe_irq_enable(adapter);
1296 1448
1297 /* bring the link up in the watchdog, this could race with our first 1449 /* bring the link up in the watchdog, this could race with our first
@@ -1333,7 +1485,7 @@ static int ixgbe_resume(struct pci_dev *pdev)
1333{ 1485{
1334 struct net_device *netdev = pci_get_drvdata(pdev); 1486 struct net_device *netdev = pci_get_drvdata(pdev);
1335 struct ixgbe_adapter *adapter = netdev_priv(netdev); 1487 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1336 u32 err, num_rx_queues = adapter->num_rx_queues; 1488 u32 err;
1337 1489
1338 pci_set_power_state(pdev, PCI_D0); 1490 pci_set_power_state(pdev, PCI_D0);
1339 pci_restore_state(pdev); 1491 pci_restore_state(pdev);
@@ -1349,7 +1501,7 @@ static int ixgbe_resume(struct pci_dev *pdev)
1349 pci_enable_wake(pdev, PCI_D3cold, 0); 1501 pci_enable_wake(pdev, PCI_D3cold, 0);
1350 1502
1351 if (netif_running(netdev)) { 1503 if (netif_running(netdev)) {
1352 err = ixgbe_request_irq(adapter, &num_rx_queues); 1504 err = ixgbe_request_irq(adapter);
1353 if (err) 1505 if (err)
1354 return err; 1506 return err;
1355 } 1507 }
@@ -1449,27 +1601,27 @@ static void ixgbe_clean_tx_ring(struct ixgbe_adapter *adapter,
1449} 1601}
1450 1602
1451/** 1603/**
1452 * ixgbe_clean_all_tx_rings - Free Tx Buffers for all queues 1604 * ixgbe_clean_all_rx_rings - Free Rx Buffers for all queues
1453 * @adapter: board private structure 1605 * @adapter: board private structure
1454 **/ 1606 **/
1455static void ixgbe_clean_all_tx_rings(struct ixgbe_adapter *adapter) 1607static void ixgbe_clean_all_rx_rings(struct ixgbe_adapter *adapter)
1456{ 1608{
1457 int i; 1609 int i;
1458 1610
1459 for (i = 0; i < adapter->num_tx_queues; i++) 1611 for (i = 0; i < adapter->num_rx_queues; i++)
1460 ixgbe_clean_tx_ring(adapter, &adapter->tx_ring[i]); 1612 ixgbe_clean_rx_ring(adapter, &adapter->rx_ring[i]);
1461} 1613}
1462 1614
1463/** 1615/**
1464 * ixgbe_clean_all_rx_rings - Free Rx Buffers for all queues 1616 * ixgbe_clean_all_tx_rings - Free Tx Buffers for all queues
1465 * @adapter: board private structure 1617 * @adapter: board private structure
1466 **/ 1618 **/
1467static void ixgbe_clean_all_rx_rings(struct ixgbe_adapter *adapter) 1619static void ixgbe_clean_all_tx_rings(struct ixgbe_adapter *adapter)
1468{ 1620{
1469 int i; 1621 int i;
1470 1622
1471 for (i = 0; i < adapter->num_rx_queues; i++) 1623 for (i = 0; i < adapter->num_tx_queues; i++)
1472 ixgbe_clean_rx_ring(adapter, &adapter->rx_ring[i]); 1624 ixgbe_clean_tx_ring(adapter, &adapter->tx_ring[i]);
1473} 1625}
1474 1626
1475void ixgbe_down(struct ixgbe_adapter *adapter) 1627void ixgbe_down(struct ixgbe_adapter *adapter)
@@ -1493,10 +1645,9 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
1493 IXGBE_WRITE_FLUSH(&adapter->hw); 1645 IXGBE_WRITE_FLUSH(&adapter->hw);
1494 msleep(10); 1646 msleep(10);
1495 1647
1496 napi_disable(&adapter->napi);
1497
1498 ixgbe_irq_disable(adapter); 1648 ixgbe_irq_disable(adapter);
1499 1649
1650 ixgbe_napi_disable_all(adapter);
1500 del_timer_sync(&adapter->watchdog_timer); 1651 del_timer_sync(&adapter->watchdog_timer);
1501 1652
1502 netif_carrier_off(netdev); 1653 netif_carrier_off(netdev);
@@ -1547,27 +1698,28 @@ static void ixgbe_shutdown(struct pci_dev *pdev)
1547} 1698}
1548 1699
1549/** 1700/**
1550 * ixgbe_clean - NAPI Rx polling callback 1701 * ixgbe_poll - NAPI Rx polling callback
1551 * @adapter: board private structure 1702 * @napi: structure for representing this polling device
1703 * @budget: how many packets driver is allowed to clean
1704 *
1705 * This function is used for legacy and MSI, NAPI mode
1552 **/ 1706 **/
1553static int ixgbe_clean(struct napi_struct *napi, int budget) 1707static int ixgbe_poll(struct napi_struct *napi, int budget)
1554{ 1708{
1555 struct ixgbe_adapter *adapter = container_of(napi, 1709 struct ixgbe_q_vector *q_vector = container_of(napi,
1556 struct ixgbe_adapter, napi); 1710 struct ixgbe_q_vector, napi);
1557 struct net_device *netdev = adapter->netdev; 1711 struct ixgbe_adapter *adapter = q_vector->adapter;
1558 int tx_cleaned = 0, work_done = 0; 1712 int tx_cleaned = 0, work_done = 0;
1559 1713
1560 /* In non-MSIX case, there is no multi-Tx/Rx queue */
1561 tx_cleaned = ixgbe_clean_tx_irq(adapter, adapter->tx_ring); 1714 tx_cleaned = ixgbe_clean_tx_irq(adapter, adapter->tx_ring);
1562 ixgbe_clean_rx_irq(adapter, &adapter->rx_ring[0], &work_done, 1715 ixgbe_clean_rx_irq(adapter, adapter->rx_ring, &work_done, budget);
1563 budget);
1564 1716
1565 if (tx_cleaned) 1717 if (tx_cleaned)
1566 work_done = budget; 1718 work_done = budget;
1567 1719
1568 /* If budget not fully consumed, exit the polling mode */ 1720 /* If budget not fully consumed, exit the polling mode */
1569 if (work_done < budget) { 1721 if (work_done < budget) {
1570 netif_rx_complete(netdev, napi); 1722 netif_rx_complete(adapter->netdev, napi);
1571 if (!test_bit(__IXGBE_DOWN, &adapter->state)) 1723 if (!test_bit(__IXGBE_DOWN, &adapter->state))
1572 ixgbe_irq_enable(adapter); 1724 ixgbe_irq_enable(adapter);
1573 } 1725 }
@@ -1597,6 +1749,132 @@ static void ixgbe_reset_task(struct work_struct *work)
1597 ixgbe_reinit_locked(adapter); 1749 ixgbe_reinit_locked(adapter);
1598} 1750}
1599 1751
1752static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter,
1753 int vectors)
1754{
1755 int err, vector_threshold;
1756
1757 /* We'll want at least 3 (vector_threshold):
1758 * 1) TxQ[0] Cleanup
1759 * 2) RxQ[0] Cleanup
1760 * 3) Other (Link Status Change, etc.)
1761 * 4) TCP Timer (optional)
1762 */
1763 vector_threshold = MIN_MSIX_COUNT;
1764
1765 /* The more we get, the more we will assign to Tx/Rx Cleanup
1766 * for the separate queues...where Rx Cleanup >= Tx Cleanup.
1767 * Right now, we simply care about how many we'll get; we'll
1768 * set them up later while requesting irq's.
1769 */
1770 while (vectors >= vector_threshold) {
1771 err = pci_enable_msix(adapter->pdev, adapter->msix_entries,
1772 vectors);
1773 if (!err) /* Success in acquiring all requested vectors. */
1774 break;
1775 else if (err < 0)
1776 vectors = 0; /* Nasty failure, quit now */
1777 else /* err == number of vectors we should try again with */
1778 vectors = err;
1779 }
1780
1781 if (vectors < vector_threshold) {
1782 /* Can't allocate enough MSI-X interrupts? Oh well.
1783 * This just means we'll go with either a single MSI
1784 * vector or fall back to legacy interrupts.
1785 */
1786 DPRINTK(HW, DEBUG, "Unable to allocate MSI-X interrupts\n");
1787 adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
1788 kfree(adapter->msix_entries);
1789 adapter->msix_entries = NULL;
1790 adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
1791 adapter->num_tx_queues = 1;
1792 adapter->num_rx_queues = 1;
1793 } else {
1794 adapter->flags |= IXGBE_FLAG_MSIX_ENABLED; /* Woot! */
1795 adapter->num_msix_vectors = vectors;
1796 }
1797}
1798
1799static void __devinit ixgbe_set_num_queues(struct ixgbe_adapter *adapter)
1800{
1801 int nrq, ntq;
1802 int feature_mask = 0, rss_i, rss_m;
1803
1804 /* Number of supported queues */
1805 switch (adapter->hw.mac.type) {
1806 case ixgbe_mac_82598EB:
1807 rss_i = adapter->ring_feature[RING_F_RSS].indices;
1808 rss_m = 0;
1809 feature_mask |= IXGBE_FLAG_RSS_ENABLED;
1810
1811 switch (adapter->flags & feature_mask) {
1812 case (IXGBE_FLAG_RSS_ENABLED):
1813 rss_m = 0xF;
1814 nrq = rss_i;
1815 ntq = 1;
1816 break;
1817 case 0:
1818 default:
1819 rss_i = 0;
1820 rss_m = 0;
1821 nrq = 1;
1822 ntq = 1;
1823 break;
1824 }
1825
1826 adapter->ring_feature[RING_F_RSS].indices = rss_i;
1827 adapter->ring_feature[RING_F_RSS].mask = rss_m;
1828 break;
1829 default:
1830 nrq = 1;
1831 ntq = 1;
1832 break;
1833 }
1834
1835 adapter->num_rx_queues = nrq;
1836 adapter->num_tx_queues = ntq;
1837}
1838
1839/**
1840 * ixgbe_cache_ring_register - Descriptor ring to register mapping
1841 * @adapter: board private structure to initialize
1842 *
1843 * Once we know the feature-set enabled for the device, we'll cache
1844 * the register offset the descriptor ring is assigned to.
1845 **/
1846static void __devinit ixgbe_cache_ring_register(struct ixgbe_adapter *adapter)
1847{
1848 /* TODO: Remove all uses of the indices in the cases where multiple
1849 * features are OR'd together, if the feature set makes sense.
1850 */
1851 int feature_mask = 0, rss_i;
1852 int i, txr_idx, rxr_idx;
1853
1854 /* Number of supported queues */
1855 switch (adapter->hw.mac.type) {
1856 case ixgbe_mac_82598EB:
1857 rss_i = adapter->ring_feature[RING_F_RSS].indices;
1858 txr_idx = 0;
1859 rxr_idx = 0;
1860 feature_mask |= IXGBE_FLAG_RSS_ENABLED;
1861 switch (adapter->flags & feature_mask) {
1862 case (IXGBE_FLAG_RSS_ENABLED):
1863 for (i = 0; i < adapter->num_rx_queues; i++)
1864 adapter->rx_ring[i].reg_idx = i;
1865 for (i = 0; i < adapter->num_tx_queues; i++)
1866 adapter->tx_ring[i].reg_idx = i;
1867 break;
1868 case 0:
1869 default:
1870 break;
1871 }
1872 break;
1873 default:
1874 break;
1875 }
1876}
1877
1600/** 1878/**
1601 * ixgbe_alloc_queues - Allocate memory for all rings 1879 * ixgbe_alloc_queues - Allocate memory for all rings
1602 * @adapter: board private structure to initialize 1880 * @adapter: board private structure to initialize
@@ -1612,25 +1890,163 @@ static int __devinit ixgbe_alloc_queues(struct ixgbe_adapter *adapter)
1612 adapter->tx_ring = kcalloc(adapter->num_tx_queues, 1890 adapter->tx_ring = kcalloc(adapter->num_tx_queues,
1613 sizeof(struct ixgbe_ring), GFP_KERNEL); 1891 sizeof(struct ixgbe_ring), GFP_KERNEL);
1614 if (!adapter->tx_ring) 1892 if (!adapter->tx_ring)
1615 return -ENOMEM; 1893 goto err_tx_ring_allocation;
1616
1617 for (i = 0; i < adapter->num_tx_queues; i++)
1618 adapter->tx_ring[i].count = IXGBE_DEFAULT_TXD;
1619 1894
1620 adapter->rx_ring = kcalloc(adapter->num_rx_queues, 1895 adapter->rx_ring = kcalloc(adapter->num_rx_queues,
1621 sizeof(struct ixgbe_ring), GFP_KERNEL); 1896 sizeof(struct ixgbe_ring), GFP_KERNEL);
1622 if (!adapter->rx_ring) { 1897 if (!adapter->rx_ring)
1623 kfree(adapter->tx_ring); 1898 goto err_rx_ring_allocation;
1624 return -ENOMEM;
1625 }
1626 1899
1900 for (i = 0; i < adapter->num_tx_queues; i++) {
1901 adapter->tx_ring[i].count = IXGBE_DEFAULT_TXD;
1902 adapter->tx_ring[i].queue_index = i;
1903 }
1627 for (i = 0; i < adapter->num_rx_queues; i++) { 1904 for (i = 0; i < adapter->num_rx_queues; i++) {
1628 adapter->rx_ring[i].adapter = adapter;
1629 adapter->rx_ring[i].itr_register = IXGBE_EITR(i);
1630 adapter->rx_ring[i].count = IXGBE_DEFAULT_RXD; 1905 adapter->rx_ring[i].count = IXGBE_DEFAULT_RXD;
1906 adapter->rx_ring[i].queue_index = i;
1907 }
1908
1909 ixgbe_cache_ring_register(adapter);
1910
1911 return 0;
1912
1913err_rx_ring_allocation:
1914 kfree(adapter->tx_ring);
1915err_tx_ring_allocation:
1916 return -ENOMEM;
1917}
1918
1919/**
1920 * ixgbe_set_interrupt_capability - set MSI-X or MSI if supported
1921 * @adapter: board private structure to initialize
1922 *
1923 * Attempt to configure the interrupts using the best available
1924 * capabilities of the hardware and the kernel.
1925 **/
1926static int __devinit ixgbe_set_interrupt_capability(struct ixgbe_adapter
1927 *adapter)
1928{
1929 int err = 0;
1930 int vector, v_budget;
1931
1932 /*
1933 * It's easy to be greedy for MSI-X vectors, but it really
1934 * doesn't do us much good if we have a lot more vectors
1935 * than CPU's. So let's be conservative and only ask for
1936 * (roughly) twice the number of vectors as there are CPU's.
1937 */
1938 v_budget = min(adapter->num_rx_queues + adapter->num_tx_queues,
1939 (int)(num_online_cpus() * 2)) + NON_Q_VECTORS;
1940
1941 /*
1942 * At the same time, hardware can only support a maximum of
1943 * MAX_MSIX_COUNT vectors. With features such as RSS and VMDq,
1944 * we can easily reach upwards of 64 Rx descriptor queues and
1945 * 32 Tx queues. Thus, we cap it off in those rare cases where
1946 * the cpu count also exceeds our vector limit.
1947 */
1948 v_budget = min(v_budget, MAX_MSIX_COUNT);
1949
1950 /* A failure in MSI-X entry allocation isn't fatal, but it does
1951 * mean we disable MSI-X capabilities of the adapter. */
1952 adapter->msix_entries = kcalloc(v_budget,
1953 sizeof(struct msix_entry), GFP_KERNEL);
1954 if (!adapter->msix_entries) {
1955 adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
1956 ixgbe_set_num_queues(adapter);
1957 kfree(adapter->tx_ring);
1958 kfree(adapter->rx_ring);
1959 err = ixgbe_alloc_queues(adapter);
1960 if (err) {
1961 DPRINTK(PROBE, ERR, "Unable to allocate memory "
1962 "for queues\n");
1963 goto out;
1964 }
1965
1966 goto try_msi;
1967 }
1968
1969 for (vector = 0; vector < v_budget; vector++)
1970 adapter->msix_entries[vector].entry = vector;
1971
1972 ixgbe_acquire_msix_vectors(adapter, v_budget);
1973
1974 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
1975 goto out;
1976
1977try_msi:
1978 err = pci_enable_msi(adapter->pdev);
1979 if (!err) {
1980 adapter->flags |= IXGBE_FLAG_MSI_ENABLED;
1981 } else {
1982 DPRINTK(HW, DEBUG, "Unable to allocate MSI interrupt, "
1983 "falling back to legacy. Error: %d\n", err);
1984 /* reset err */
1985 err = 0;
1986 }
1987
1988out:
1989
1990 return err;
1991}
1992
1993static void ixgbe_reset_interrupt_capability(struct ixgbe_adapter *adapter)
1994{
1995 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
1996 adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
1997 pci_disable_msix(adapter->pdev);
1998 kfree(adapter->msix_entries);
1999 adapter->msix_entries = NULL;
2000 } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) {
2001 adapter->flags &= ~IXGBE_FLAG_MSI_ENABLED;
2002 pci_disable_msi(adapter->pdev);
2003 }
2004 return;
2005}
2006
2007/**
2008 * ixgbe_init_interrupt_scheme - Determine proper interrupt scheme
2009 * @adapter: board private structure to initialize
2010 *
2011 * We determine which interrupt scheme to use based on...
2012 * - Kernel support (MSI, MSI-X)
2013 * - which can be user-defined (via MODULE_PARAM)
2014 * - Hardware queue count (num_*_queues)
2015 * - defined by miscellaneous hardware support/features (RSS, etc.)
2016 **/
2017static int __devinit ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter)
2018{
2019 int err;
2020
2021 /* Number of supported queues */
2022 ixgbe_set_num_queues(adapter);
2023
2024 err = ixgbe_alloc_queues(adapter);
2025 if (err) {
2026 DPRINTK(PROBE, ERR, "Unable to allocate memory for queues\n");
2027 goto err_alloc_queues;
2028 }
2029
2030 err = ixgbe_set_interrupt_capability(adapter);
2031 if (err) {
2032 DPRINTK(PROBE, ERR, "Unable to setup interrupt capabilities\n");
2033 goto err_set_interrupt;
1631 } 2034 }
1632 2035
2036 DPRINTK(DRV, INFO, "Multiqueue %s: Rx Queue count = %u, "
2037 "Tx Queue count = %u\n",
2038 (adapter->num_rx_queues > 1) ? "Enabled" :
2039 "Disabled", adapter->num_rx_queues, adapter->num_tx_queues);
2040
2041 set_bit(__IXGBE_DOWN, &adapter->state);
2042
1633 return 0; 2043 return 0;
2044
2045err_set_interrupt:
2046 kfree(adapter->tx_ring);
2047 kfree(adapter->rx_ring);
2048err_alloc_queues:
2049 return err;
1634} 2050}
1635 2051
1636/** 2052/**
@@ -1645,11 +2061,18 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
1645{ 2061{
1646 struct ixgbe_hw *hw = &adapter->hw; 2062 struct ixgbe_hw *hw = &adapter->hw;
1647 struct pci_dev *pdev = adapter->pdev; 2063 struct pci_dev *pdev = adapter->pdev;
2064 unsigned int rss;
2065
2066 /* Set capability flags */
2067 rss = min(IXGBE_MAX_RSS_INDICES, (int)num_online_cpus());
2068 adapter->ring_feature[RING_F_RSS].indices = rss;
2069 adapter->flags |= IXGBE_FLAG_RSS_ENABLED;
1648 2070
1649 /* default flow control settings */ 2071 /* default flow control settings */
1650 hw->fc.original_type = ixgbe_fc_full; 2072 hw->fc.original_type = ixgbe_fc_full;
1651 hw->fc.type = ixgbe_fc_full; 2073 hw->fc.type = ixgbe_fc_full;
1652 2074
2075 /* select 10G link by default */
1653 hw->mac.link_mode_select = IXGBE_AUTOC_LMS_10G_LINK_NO_AN; 2076 hw->mac.link_mode_select = IXGBE_AUTOC_LMS_10G_LINK_NO_AN;
1654 if (hw->mac.ops.reset(hw)) { 2077 if (hw->mac.ops.reset(hw)) {
1655 dev_err(&pdev->dev, "HW Init failed\n"); 2078 dev_err(&pdev->dev, "HW Init failed\n");
@@ -1667,16 +2090,9 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
1667 return -EIO; 2090 return -EIO;
1668 } 2091 }
1669 2092
1670 /* Set the default values */ 2093 /* enable rx csum by default */
1671 adapter->num_rx_queues = IXGBE_DEFAULT_RXQ;
1672 adapter->num_tx_queues = 1;
1673 adapter->flags |= IXGBE_FLAG_RX_CSUM_ENABLED; 2094 adapter->flags |= IXGBE_FLAG_RX_CSUM_ENABLED;
1674 2095
1675 if (ixgbe_alloc_queues(adapter)) {
1676 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
1677 return -ENOMEM;
1678 }
1679
1680 set_bit(__IXGBE_DOWN, &adapter->state); 2096 set_bit(__IXGBE_DOWN, &adapter->state);
1681 2097
1682 return 0; 2098 return 0;
@@ -1716,7 +2132,6 @@ int ixgbe_setup_tx_resources(struct ixgbe_adapter *adapter,
1716 return -ENOMEM; 2132 return -ENOMEM;
1717 } 2133 }
1718 2134
1719 txdr->adapter = adapter;
1720 txdr->next_to_use = 0; 2135 txdr->next_to_use = 0;
1721 txdr->next_to_clean = 0; 2136 txdr->next_to_clean = 0;
1722 txdr->work_limit = txdr->count; 2137 txdr->work_limit = txdr->count;
@@ -1735,7 +2150,7 @@ int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter,
1735 struct ixgbe_ring *rxdr) 2150 struct ixgbe_ring *rxdr)
1736{ 2151{
1737 struct pci_dev *pdev = adapter->pdev; 2152 struct pci_dev *pdev = adapter->pdev;
1738 int size, desc_len; 2153 int size;
1739 2154
1740 size = sizeof(struct ixgbe_rx_buffer) * rxdr->count; 2155 size = sizeof(struct ixgbe_rx_buffer) * rxdr->count;
1741 rxdr->rx_buffer_info = vmalloc(size); 2156 rxdr->rx_buffer_info = vmalloc(size);
@@ -1746,10 +2161,8 @@ int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter,
1746 } 2161 }
1747 memset(rxdr->rx_buffer_info, 0, size); 2162 memset(rxdr->rx_buffer_info, 0, size);
1748 2163
1749 desc_len = sizeof(union ixgbe_adv_rx_desc);
1750
1751 /* Round up to nearest 4K */ 2164 /* Round up to nearest 4K */
1752 rxdr->size = rxdr->count * desc_len; 2165 rxdr->size = rxdr->count * sizeof(union ixgbe_adv_rx_desc);
1753 rxdr->size = ALIGN(rxdr->size, 4096); 2166 rxdr->size = ALIGN(rxdr->size, 4096);
1754 2167
1755 rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma); 2168 rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma);
@@ -1763,7 +2176,6 @@ int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter,
1763 2176
1764 rxdr->next_to_clean = 0; 2177 rxdr->next_to_clean = 0;
1765 rxdr->next_to_use = 0; 2178 rxdr->next_to_use = 0;
1766 rxdr->adapter = adapter;
1767 2179
1768 return 0; 2180 return 0;
1769} 2181}
@@ -1841,8 +2253,7 @@ static void ixgbe_free_all_rx_resources(struct ixgbe_adapter *adapter)
1841} 2253}
1842 2254
1843/** 2255/**
1844 * ixgbe_setup_all_tx_resources - wrapper to allocate Tx resources 2256 * ixgbe_setup_all_tx_resources - allocate all queues Tx resources
1845 * (Descriptors) for all queues
1846 * @adapter: board private structure 2257 * @adapter: board private structure
1847 * 2258 *
1848 * If this function returns with an error, then it's possible one or 2259 * If this function returns with an error, then it's possible one or
@@ -1868,8 +2279,7 @@ static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter *adapter)
1868} 2279}
1869 2280
1870/** 2281/**
1871 * ixgbe_setup_all_rx_resources - wrapper to allocate Rx resources 2282 * ixgbe_setup_all_rx_resources - allocate all queues Rx resources
1872 * (Descriptors) for all queues
1873 * @adapter: board private structure 2283 * @adapter: board private structure
1874 * 2284 *
1875 * If this function returns with an error, then it's possible one or 2285 * If this function returns with an error, then it's possible one or
@@ -1911,6 +2321,9 @@ static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu)
1911 (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE)) 2321 (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE))
1912 return -EINVAL; 2322 return -EINVAL;
1913 2323
2324 DPRINTK(PROBE, INFO, "changing MTU from %d to %d\n",
2325 netdev->mtu, new_mtu);
2326 /* must set new MTU before calling down or up */
1914 netdev->mtu = new_mtu; 2327 netdev->mtu = new_mtu;
1915 2328
1916 if (netif_running(netdev)) 2329 if (netif_running(netdev))
@@ -1935,23 +2348,16 @@ static int ixgbe_open(struct net_device *netdev)
1935{ 2348{
1936 struct ixgbe_adapter *adapter = netdev_priv(netdev); 2349 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1937 int err; 2350 int err;
1938 u32 num_rx_queues = adapter->num_rx_queues;
1939 2351
1940 /* disallow open during test */ 2352 /* disallow open during test */
1941 if (test_bit(__IXGBE_TESTING, &adapter->state)) 2353 if (test_bit(__IXGBE_TESTING, &adapter->state))
1942 return -EBUSY; 2354 return -EBUSY;
1943 2355
1944try_intr_reinit:
1945 /* allocate transmit descriptors */ 2356 /* allocate transmit descriptors */
1946 err = ixgbe_setup_all_tx_resources(adapter); 2357 err = ixgbe_setup_all_tx_resources(adapter);
1947 if (err) 2358 if (err)
1948 goto err_setup_tx; 2359 goto err_setup_tx;
1949 2360
1950 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) {
1951 num_rx_queues = 1;
1952 adapter->num_rx_queues = num_rx_queues;
1953 }
1954
1955 /* allocate receive descriptors */ 2361 /* allocate receive descriptors */
1956 err = ixgbe_setup_all_rx_resources(adapter); 2362 err = ixgbe_setup_all_rx_resources(adapter);
1957 if (err) 2363 if (err)
@@ -1959,31 +2365,10 @@ try_intr_reinit:
1959 2365
1960 ixgbe_configure(adapter); 2366 ixgbe_configure(adapter);
1961 2367
1962 err = ixgbe_request_irq(adapter, &num_rx_queues); 2368 err = ixgbe_request_irq(adapter);
1963 if (err) 2369 if (err)
1964 goto err_req_irq; 2370 goto err_req_irq;
1965 2371
1966 /* ixgbe_request might have reduced num_rx_queues */
1967 if (num_rx_queues < adapter->num_rx_queues) {
1968 /* We didn't get MSI-X, so we need to release everything,
1969 * set our Rx queue count to num_rx_queues, and redo the
1970 * whole init process.
1971 */
1972 ixgbe_free_irq(adapter);
1973 if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) {
1974 pci_disable_msi(adapter->pdev);
1975 adapter->flags &= ~IXGBE_FLAG_MSI_ENABLED;
1976 }
1977 ixgbe_free_all_rx_resources(adapter);
1978 ixgbe_free_all_tx_resources(adapter);
1979 adapter->num_rx_queues = num_rx_queues;
1980
1981 /* Reset the hardware, and start over. */
1982 ixgbe_reset(adapter);
1983
1984 goto try_intr_reinit;
1985 }
1986
1987 err = ixgbe_up_complete(adapter); 2372 err = ixgbe_up_complete(adapter);
1988 if (err) 2373 if (err)
1989 goto err_up; 2374 goto err_up;
@@ -2154,10 +2539,23 @@ static void ixgbe_watchdog(unsigned long data)
2154 2539
2155 ixgbe_update_stats(adapter); 2540 ixgbe_update_stats(adapter);
2156 2541
2157 /* Reset the timer */ 2542 if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
2158 if (!test_bit(__IXGBE_DOWN, &adapter->state)) 2543 /* Cause software interrupt to ensure rx rings are cleaned */
2544 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
2545 u32 eics =
2546 (1 << (adapter->num_msix_vectors - NON_Q_VECTORS)) - 1;
2547 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, eics);
2548 } else {
2549 /* for legacy and MSI interrupts don't set any bits that
2550 * are enabled for EIAM, because this operation would
2551 * set *both* EIMS and EICS for any bit in EIAM */
2552 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS,
2553 (IXGBE_EICS_TCP_TIMER | IXGBE_EICS_OTHER));
2554 }
2555 /* Reset the timer */
2159 mod_timer(&adapter->watchdog_timer, 2556 mod_timer(&adapter->watchdog_timer,
2160 round_jiffies(jiffies + 2 * HZ)); 2557 round_jiffies(jiffies + 2 * HZ));
2558 }
2161} 2559}
2162 2560
2163static int ixgbe_tso(struct ixgbe_adapter *adapter, 2561static int ixgbe_tso(struct ixgbe_adapter *adapter,
@@ -2604,6 +3002,31 @@ static void ixgbe_netpoll(struct net_device *netdev)
2604#endif 3002#endif
2605 3003
2606/** 3004/**
3005 * ixgbe_napi_add_all - prep napi structs for use
3006 * @adapter: private struct
3007 * helper function to napi_add each possible q_vector->napi
3008 */
3009static void ixgbe_napi_add_all(struct ixgbe_adapter *adapter)
3010{
3011 int i, q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
3012 int (*poll)(struct napi_struct *, int);
3013
3014 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
3015 poll = &ixgbe_clean_rxonly;
3016 } else {
3017 poll = &ixgbe_poll;
3018 /* only one q_vector for legacy modes */
3019 q_vectors = 1;
3020 }
3021
3022 for (i = 0; i < q_vectors; i++) {
3023 struct ixgbe_q_vector *q_vector = &adapter->q_vector[i];
3024 netif_napi_add(adapter->netdev, &q_vector->napi,
3025 (*poll), 64);
3026 }
3027}
3028
3029/**
2607 * ixgbe_probe - Device Initialization Routine 3030 * ixgbe_probe - Device Initialization Routine
2608 * @pdev: PCI device information struct 3031 * @pdev: PCI device information struct
2609 * @ent: entry in ixgbe_pci_tbl 3032 * @ent: entry in ixgbe_pci_tbl
@@ -2696,7 +3119,6 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
2696 ixgbe_set_ethtool_ops(netdev); 3119 ixgbe_set_ethtool_ops(netdev);
2697 netdev->tx_timeout = &ixgbe_tx_timeout; 3120 netdev->tx_timeout = &ixgbe_tx_timeout;
2698 netdev->watchdog_timeo = 5 * HZ; 3121 netdev->watchdog_timeo = 5 * HZ;
2699 netif_napi_add(netdev, &adapter->napi, ixgbe_clean, 64);
2700 netdev->vlan_rx_register = ixgbe_vlan_rx_register; 3122 netdev->vlan_rx_register = ixgbe_vlan_rx_register;
2701 netdev->vlan_rx_add_vid = ixgbe_vlan_rx_add_vid; 3123 netdev->vlan_rx_add_vid = ixgbe_vlan_rx_add_vid;
2702 netdev->vlan_rx_kill_vid = ixgbe_vlan_rx_kill_vid; 3124 netdev->vlan_rx_kill_vid = ixgbe_vlan_rx_kill_vid;
@@ -2719,6 +3141,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
2719 3141
2720 /* Setup hw api */ 3142 /* Setup hw api */
2721 memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops)); 3143 memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops));
3144 hw->mac.type = ii->mac;
2722 3145
2723 err = ii->get_invariants(hw); 3146 err = ii->get_invariants(hw);
2724 if (err) 3147 if (err)
@@ -2770,9 +3193,9 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
2770 hw->fc.low_water = IXGBE_DEFAULT_FCRTL; 3193 hw->fc.low_water = IXGBE_DEFAULT_FCRTL;
2771 hw->fc.pause_time = IXGBE_DEFAULT_FCPAUSE; 3194 hw->fc.pause_time = IXGBE_DEFAULT_FCPAUSE;
2772 3195
2773 /* Interrupt Throttle Rate */ 3196 err = ixgbe_init_interrupt_scheme(adapter);
2774 adapter->rx_eitr = (1000000 / IXGBE_DEFAULT_ITR_RX_USECS); 3197 if (err)
2775 adapter->tx_eitr = (1000000 / IXGBE_DEFAULT_ITR_TX_USECS); 3198 goto err_sw_init;
2776 3199
2777 /* print bus type/speed/width info */ 3200 /* print bus type/speed/width info */
2778 pci_read_config_word(pdev, IXGBE_PCI_LINK_STATUS, &link_status); 3201 pci_read_config_word(pdev, IXGBE_PCI_LINK_STATUS, &link_status);
@@ -2809,6 +3232,8 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
2809 netif_carrier_off(netdev); 3232 netif_carrier_off(netdev);
2810 netif_stop_queue(netdev); 3233 netif_stop_queue(netdev);
2811 3234
3235 ixgbe_napi_add_all(adapter);
3236
2812 strcpy(netdev->name, "eth%d"); 3237 strcpy(netdev->name, "eth%d");
2813 err = register_netdev(netdev); 3238 err = register_netdev(netdev);
2814 if (err) 3239 if (err)
@@ -2823,6 +3248,7 @@ err_register:
2823 ixgbe_release_hw_control(adapter); 3248 ixgbe_release_hw_control(adapter);
2824err_hw_init: 3249err_hw_init:
2825err_sw_init: 3250err_sw_init:
3251 ixgbe_reset_interrupt_capability(adapter);
2826err_eeprom: 3252err_eeprom:
2827 iounmap(hw->hw_addr); 3253 iounmap(hw->hw_addr);
2828err_ioremap: 3254err_ioremap:
@@ -2856,14 +3282,17 @@ static void __devexit ixgbe_remove(struct pci_dev *pdev)
2856 3282
2857 unregister_netdev(netdev); 3283 unregister_netdev(netdev);
2858 3284
2859 ixgbe_release_hw_control(adapter); 3285 ixgbe_reset_interrupt_capability(adapter);
2860 3286
2861 kfree(adapter->tx_ring); 3287 ixgbe_release_hw_control(adapter);
2862 kfree(adapter->rx_ring);
2863 3288
2864 iounmap(adapter->hw.hw_addr); 3289 iounmap(adapter->hw.hw_addr);
2865 pci_release_regions(pdev); 3290 pci_release_regions(pdev);
2866 3291
3292 DPRINTK(PROBE, INFO, "complete\n");
3293 kfree(adapter->tx_ring);
3294 kfree(adapter->rx_ring);
3295
2867 free_netdev(netdev); 3296 free_netdev(netdev);
2868 3297
2869 pci_disable_device(pdev); 3298 pci_disable_device(pdev);