aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/intel/ixgbevf
diff options
context:
space:
mode:
authorAlexander Duyck <alexander.h.duyck@intel.com>2012-05-11 04:32:55 -0400
committerJeff Kirsher <jeffrey.t.kirsher@intel.com>2012-07-17 06:07:18 -0400
commit5f3600ebe252aa5fe782e9f9115c66c639f62ac0 (patch)
tree8d6e909f1f46095bdff0892330b15e9b714c67d4 /drivers/net/ethernet/intel/ixgbevf
parentfa71ae270a9af0ee3a1bd605d008f750371cfc1f (diff)
ixgbevf: Use igb style interrupt masks instead of ixgbe style
The interrupt registers accessed in ixgbevf are more similar to the igb style registers than they are to the ixgbe style registers. As such we would be better off setting up the code for the EICS, EIMS, EICS, EIAM, and EIAC like we do in igb instead of ixgbe. Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com> Signed-off-by: Greg Rose <gregory.v.rose@intel.com> Tested-by: Sibai Li <sibai.li@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Diffstat (limited to 'drivers/net/ethernet/intel/ixgbevf')
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/defines.h27
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf.h29
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c195
3 files changed, 105 insertions, 146 deletions
diff --git a/drivers/net/ethernet/intel/ixgbevf/defines.h b/drivers/net/ethernet/intel/ixgbevf/defines.h
index e09a6cc633bb..10cede503fe8 100644
--- a/drivers/net/ethernet/intel/ixgbevf/defines.h
+++ b/drivers/net/ethernet/intel/ixgbevf/defines.h
@@ -264,32 +264,9 @@ struct ixgbe_adv_tx_context_desc {
264 264
265/* Interrupt register bitmasks */ 265/* Interrupt register bitmasks */
266 266
267/* Extended Interrupt Cause Read */
268#define IXGBE_EICR_RTX_QUEUE 0x0000FFFF /* RTx Queue Interrupt */
269#define IXGBE_EICR_MAILBOX 0x00080000 /* VF to PF Mailbox Interrupt */
270#define IXGBE_EICR_OTHER 0x80000000 /* Interrupt Cause Active */
271
272/* Extended Interrupt Cause Set */
273#define IXGBE_EICS_RTX_QUEUE IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */
274#define IXGBE_EICS_MAILBOX IXGBE_EICR_MAILBOX /* VF to PF Mailbox Int */
275#define IXGBE_EICS_OTHER IXGBE_EICR_OTHER /* INT Cause Active */
276
277/* Extended Interrupt Mask Set */
278#define IXGBE_EIMS_RTX_QUEUE IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */
279#define IXGBE_EIMS_MAILBOX IXGBE_EICR_MAILBOX /* VF to PF Mailbox Int */
280#define IXGBE_EIMS_OTHER IXGBE_EICR_OTHER /* INT Cause Active */
281
282/* Extended Interrupt Mask Clear */
283#define IXGBE_EIMC_RTX_QUEUE IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */
284#define IXGBE_EIMC_MAILBOX IXGBE_EICR_MAILBOX /* VF to PF Mailbox Int */
285#define IXGBE_EIMC_OTHER IXGBE_EICR_OTHER /* INT Cause Active */
286
287#define IXGBE_EIMS_ENABLE_MASK ( \
288 IXGBE_EIMS_RTX_QUEUE | \
289 IXGBE_EIMS_MAILBOX | \
290 IXGBE_EIMS_OTHER)
291
292#define IXGBE_EITR_CNT_WDIS 0x80000000 267#define IXGBE_EITR_CNT_WDIS 0x80000000
268#define IXGBE_MAX_EITR 0x00000FF8
269#define IXGBE_MIN_EITR 8
293 270
294/* Error Codes */ 271/* Error Codes */
295#define IXGBE_ERR_INVALID_MAC_ADDR -1 272#define IXGBE_ERR_INVALID_MAC_ADDR -1
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
index 8bedd0fef0b7..f92daca249f8 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
@@ -118,6 +118,8 @@ struct ixgbevf_ring {
118 118
119struct ixgbevf_ring_container { 119struct ixgbevf_ring_container {
120 struct ixgbevf_ring *ring; /* pointer to linked list of rings */ 120 struct ixgbevf_ring *ring; /* pointer to linked list of rings */
121 unsigned int total_bytes; /* total bytes processed this int */
122 unsigned int total_packets; /* total packets processed this int */
121 u8 count; /* total number of rings in vector */ 123 u8 count; /* total number of rings in vector */
122 u8 itr; /* current ITR setting for ring */ 124 u8 itr; /* current ITR setting for ring */
123}; 125};
@@ -131,13 +133,25 @@ struct ixgbevf_ring_container {
131 */ 133 */
132struct ixgbevf_q_vector { 134struct ixgbevf_q_vector {
133 struct ixgbevf_adapter *adapter; 135 struct ixgbevf_adapter *adapter;
136 u16 v_idx; /* index of q_vector within array, also used for
137 * finding the bit in EICR and friends that
138 * represents the vector for this ring */
139 u16 itr; /* Interrupt throttle rate written to EITR */
134 struct napi_struct napi; 140 struct napi_struct napi;
135 struct ixgbevf_ring_container rx, tx; 141 struct ixgbevf_ring_container rx, tx;
136 u32 eitr;
137 int v_idx; /* vector index in list */
138 char name[IFNAMSIZ + 9]; 142 char name[IFNAMSIZ + 9];
139}; 143};
140 144
145/*
146 * microsecond values for various ITR rates shifted by 2 to fit itr register
147 * with the first 3 bits reserved 0
148 */
149#define IXGBE_MIN_RSC_ITR 24
150#define IXGBE_100K_ITR 40
151#define IXGBE_20K_ITR 200
152#define IXGBE_10K_ITR 400
153#define IXGBE_8K_ITR 500
154
141/* Helper macros to switch between ints/sec and what the register uses. 155/* Helper macros to switch between ints/sec and what the register uses.
142 * And yes, it's the same math going both ways. The lowest value 156 * And yes, it's the same math going both ways. The lowest value
143 * supported by all of the ixgbe hardware is 8. 157 * supported by all of the ixgbe hardware is 8.
@@ -176,12 +190,16 @@ struct ixgbevf_adapter {
176 struct ixgbevf_q_vector *q_vector[MAX_MSIX_Q_VECTORS]; 190 struct ixgbevf_q_vector *q_vector[MAX_MSIX_Q_VECTORS];
177 191
178 /* Interrupt Throttle Rate */ 192 /* Interrupt Throttle Rate */
179 u32 itr_setting; 193 u16 rx_itr_setting;
194 u16 tx_itr_setting;
195
196 /* interrupt masks */
197 u32 eims_enable_mask;
198 u32 eims_other;
180 199
181 /* TX */ 200 /* TX */
182 struct ixgbevf_ring *tx_ring; /* One per active queue */ 201 struct ixgbevf_ring *tx_ring; /* One per active queue */
183 int num_tx_queues; 202 int num_tx_queues;
184 u16 tx_itr_setting;
185 u64 restart_queue; 203 u64 restart_queue;
186 u64 hw_csum_tx_good; 204 u64 hw_csum_tx_good;
187 u64 lsc_int; 205 u64 lsc_int;
@@ -192,7 +210,6 @@ struct ixgbevf_adapter {
192 /* RX */ 210 /* RX */
193 struct ixgbevf_ring *rx_ring; /* One per active queue */ 211 struct ixgbevf_ring *rx_ring; /* One per active queue */
194 int num_rx_queues; 212 int num_rx_queues;
195 u16 rx_itr_setting;
196 u64 hw_csum_rx_error; 213 u64 hw_csum_rx_error;
197 u64 hw_rx_no_dma_resources; 214 u64 hw_rx_no_dma_resources;
198 u64 hw_csum_rx_good; 215 u64 hw_csum_rx_good;
@@ -265,7 +282,7 @@ extern void ixgbevf_free_rx_resources(struct ixgbevf_adapter *,
265extern void ixgbevf_free_tx_resources(struct ixgbevf_adapter *, 282extern void ixgbevf_free_tx_resources(struct ixgbevf_adapter *,
266 struct ixgbevf_ring *); 283 struct ixgbevf_ring *);
267extern void ixgbevf_update_stats(struct ixgbevf_adapter *adapter); 284extern void ixgbevf_update_stats(struct ixgbevf_adapter *adapter);
268 285void ixgbevf_write_eitr(struct ixgbevf_q_vector *);
269extern int ethtool_ioctl(struct ifreq *ifr); 286extern int ethtool_ioctl(struct ifreq *ifr);
270 287
271extern void ixgbe_napi_add_all(struct ixgbevf_adapter *adapter); 288extern void ixgbe_napi_add_all(struct ixgbevf_adapter *adapter);
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 628643b7f286..8e022c6f4b90 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -98,8 +98,6 @@ MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
98 98
99/* forward decls */ 99/* forward decls */
100static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector); 100static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector);
101static void ixgbevf_write_eitr(struct ixgbevf_adapter *adapter, int v_idx,
102 u32 itr_reg);
103 101
104static inline void ixgbevf_release_rx_desc(struct ixgbe_hw *hw, 102static inline void ixgbevf_release_rx_desc(struct ixgbe_hw *hw,
105 struct ixgbevf_ring *rx_ring, 103 struct ixgbevf_ring *rx_ring,
@@ -385,13 +383,11 @@ no_buffers:
385} 383}
386 384
387static inline void ixgbevf_irq_enable_queues(struct ixgbevf_adapter *adapter, 385static inline void ixgbevf_irq_enable_queues(struct ixgbevf_adapter *adapter,
388 u64 qmask) 386 u32 qmask)
389{ 387{
390 u32 mask;
391 struct ixgbe_hw *hw = &adapter->hw; 388 struct ixgbe_hw *hw = &adapter->hw;
392 389
393 mask = (qmask & 0xFFFFFFFF); 390 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, qmask);
394 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
395} 391}
396 392
397static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector, 393static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
@@ -561,11 +557,10 @@ static int ixgbevf_poll(struct napi_struct *napi, int budget)
561static void ixgbevf_configure_msix(struct ixgbevf_adapter *adapter) 557static void ixgbevf_configure_msix(struct ixgbevf_adapter *adapter)
562{ 558{
563 struct ixgbevf_q_vector *q_vector; 559 struct ixgbevf_q_vector *q_vector;
564 struct ixgbe_hw *hw = &adapter->hw;
565 int q_vectors, v_idx; 560 int q_vectors, v_idx;
566 u32 mask;
567 561
568 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; 562 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
563 adapter->eims_enable_mask = 0;
569 564
570 /* 565 /*
571 * Populate the IVAR table and set the ITR values to the 566 * Populate the IVAR table and set the ITR values to the
@@ -581,22 +576,30 @@ static void ixgbevf_configure_msix(struct ixgbevf_adapter *adapter)
581 ixgbevf_for_each_ring(ring, q_vector->tx) 576 ixgbevf_for_each_ring(ring, q_vector->tx)
582 ixgbevf_set_ivar(adapter, 1, ring->reg_idx, v_idx); 577 ixgbevf_set_ivar(adapter, 1, ring->reg_idx, v_idx);
583 578
584 /* if this is a tx only vector halve the interrupt rate */ 579 if (q_vector->tx.ring && !q_vector->rx.ring) {
585 if (q_vector->tx.ring && !q_vector->rx.ring) 580 /* tx only vector */
586 q_vector->eitr = (adapter->eitr_param >> 1); 581 if (adapter->tx_itr_setting == 1)
587 else if (q_vector->rx.ring) 582 q_vector->itr = IXGBE_10K_ITR;
588 /* rx only */ 583 else
589 q_vector->eitr = adapter->eitr_param; 584 q_vector->itr = adapter->tx_itr_setting;
585 } else {
586 /* rx or rx/tx vector */
587 if (adapter->rx_itr_setting == 1)
588 q_vector->itr = IXGBE_20K_ITR;
589 else
590 q_vector->itr = adapter->rx_itr_setting;
591 }
592
593 /* add q_vector eims value to global eims_enable_mask */
594 adapter->eims_enable_mask |= 1 << v_idx;
590 595
591 ixgbevf_write_eitr(adapter, v_idx, q_vector->eitr); 596 ixgbevf_write_eitr(q_vector);
592 } 597 }
593 598
594 ixgbevf_set_ivar(adapter, -1, 1, v_idx); 599 ixgbevf_set_ivar(adapter, -1, 1, v_idx);
595 600 /* setup eims_other and add value to global eims_enable_mask */
596 /* set up to autoclear timer, and the vectors */ 601 adapter->eims_other = 1 << v_idx;
597 mask = IXGBE_EIMS_ENABLE_MASK; 602 adapter->eims_enable_mask |= adapter->eims_other;
598 mask &= ~IXGBE_EIMS_OTHER;
599 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, mask);
600} 603}
601 604
602enum latency_range { 605enum latency_range {
@@ -608,11 +611,8 @@ enum latency_range {
608 611
609/** 612/**
610 * ixgbevf_update_itr - update the dynamic ITR value based on statistics 613 * ixgbevf_update_itr - update the dynamic ITR value based on statistics
611 * @adapter: pointer to adapter 614 * @q_vector: structure containing interrupt and ring information
612 * @eitr: eitr setting (ints per sec) to give last timeslice 615 * @ring_container: structure containing ring performance data
613 * @itr_setting: current throttle rate in ints/second
614 * @packets: the number of packets during this measurement interval
615 * @bytes: the number of bytes during this measurement interval
616 * 616 *
617 * Stores a new ITR value based on packets and byte 617 * Stores a new ITR value based on packets and byte
618 * counts during the last interrupt. The advantage of per interrupt 618 * counts during the last interrupt. The advantage of per interrupt
@@ -622,17 +622,17 @@ enum latency_range {
622 * on testing data as well as attempting to minimize response time 622 * on testing data as well as attempting to minimize response time
623 * while increasing bulk throughput. 623 * while increasing bulk throughput.
624 **/ 624 **/
625static u8 ixgbevf_update_itr(struct ixgbevf_adapter *adapter, 625static void ixgbevf_update_itr(struct ixgbevf_q_vector *q_vector,
626 u32 eitr, u8 itr_setting, 626 struct ixgbevf_ring_container *ring_container)
627 int packets, int bytes)
628{ 627{
629 unsigned int retval = itr_setting; 628 int bytes = ring_container->total_bytes;
629 int packets = ring_container->total_packets;
630 u32 timepassed_us; 630 u32 timepassed_us;
631 u64 bytes_perint; 631 u64 bytes_perint;
632 u8 itr_setting = ring_container->itr;
632 633
633 if (packets == 0) 634 if (packets == 0)
634 goto update_itr_done; 635 return;
635
636 636
637 /* simple throttlerate management 637 /* simple throttlerate management
638 * 0-20MB/s lowest (100000 ints/s) 638 * 0-20MB/s lowest (100000 ints/s)
@@ -640,46 +640,48 @@ static u8 ixgbevf_update_itr(struct ixgbevf_adapter *adapter,
640 * 100-1249MB/s bulk (8000 ints/s) 640 * 100-1249MB/s bulk (8000 ints/s)
641 */ 641 */
642 /* what was last interrupt timeslice? */ 642 /* what was last interrupt timeslice? */
643 timepassed_us = 1000000/eitr; 643 timepassed_us = q_vector->itr >> 2;
644 bytes_perint = bytes / timepassed_us; /* bytes/usec */ 644 bytes_perint = bytes / timepassed_us; /* bytes/usec */
645 645
646 switch (itr_setting) { 646 switch (itr_setting) {
647 case lowest_latency: 647 case lowest_latency:
648 if (bytes_perint > 10) 648 if (bytes_perint > 10)
649 retval = low_latency; 649 itr_setting = low_latency;
650 break; 650 break;
651 case low_latency: 651 case low_latency:
652 if (bytes_perint > 20) 652 if (bytes_perint > 20)
653 retval = bulk_latency; 653 itr_setting = bulk_latency;
654 else if (bytes_perint <= 10) 654 else if (bytes_perint <= 10)
655 retval = lowest_latency; 655 itr_setting = lowest_latency;
656 break; 656 break;
657 case bulk_latency: 657 case bulk_latency:
658 if (bytes_perint <= 20) 658 if (bytes_perint <= 20)
659 retval = low_latency; 659 itr_setting = low_latency;
660 break; 660 break;
661 } 661 }
662 662
663update_itr_done: 663 /* clear work counters since we have the values we need */
664 return retval; 664 ring_container->total_bytes = 0;
665 ring_container->total_packets = 0;
666
667 /* write updated itr to ring container */
668 ring_container->itr = itr_setting;
665} 669}
666 670
667/** 671/**
668 * ixgbevf_write_eitr - write VTEITR register in hardware specific way 672 * ixgbevf_write_eitr - write VTEITR register in hardware specific way
669 * @adapter: pointer to adapter struct 673 * @q_vector: structure containing interrupt and ring information
670 * @v_idx: vector index into q_vector array
671 * @itr_reg: new value to be written in *register* format, not ints/s
672 * 674 *
673 * This function is made to be called by ethtool and by the driver 675 * This function is made to be called by ethtool and by the driver
674 * when it needs to update VTEITR registers at runtime. Hardware 676 * when it needs to update VTEITR registers at runtime. Hardware
675 * specific quirks/differences are taken care of here. 677 * specific quirks/differences are taken care of here.
676 */ 678 */
677static void ixgbevf_write_eitr(struct ixgbevf_adapter *adapter, int v_idx, 679void ixgbevf_write_eitr(struct ixgbevf_q_vector *q_vector)
678 u32 itr_reg)
679{ 680{
681 struct ixgbevf_adapter *adapter = q_vector->adapter;
680 struct ixgbe_hw *hw = &adapter->hw; 682 struct ixgbe_hw *hw = &adapter->hw;
681 683 int v_idx = q_vector->v_idx;
682 itr_reg = EITR_INTS_PER_SEC_TO_REG(itr_reg); 684 u32 itr_reg = q_vector->itr & IXGBE_MAX_EITR;
683 685
684 /* 686 /*
685 * set the WDIS bit to not clear the timer bits and cause an 687 * set the WDIS bit to not clear the timer bits and cause an
@@ -692,59 +694,37 @@ static void ixgbevf_write_eitr(struct ixgbevf_adapter *adapter, int v_idx,
692 694
693static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector) 695static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector)
694{ 696{
695 struct ixgbevf_adapter *adapter = q_vector->adapter; 697 u32 new_itr = q_vector->itr;
696 u32 new_itr; 698 u8 current_itr;
697 u8 current_itr, ret_itr; 699
698 int v_idx = q_vector->v_idx; 700 ixgbevf_update_itr(q_vector, &q_vector->tx);
699 struct ixgbevf_ring *rx_ring, *tx_ring; 701 ixgbevf_update_itr(q_vector, &q_vector->rx);
700
701 ixgbevf_for_each_ring(tx_ring, q_vector->tx) {
702 ret_itr = ixgbevf_update_itr(adapter, q_vector->eitr,
703 q_vector->tx.itr,
704 tx_ring->total_packets,
705 tx_ring->total_bytes);
706 /* if the result for this queue would decrease interrupt
707 * rate for this vector then use that result */
708 q_vector->tx.itr = ((q_vector->tx.itr > ret_itr) ?
709 q_vector->tx.itr - 1 : ret_itr);
710 }
711
712 ixgbevf_for_each_ring(rx_ring, q_vector->rx) {
713 ret_itr = ixgbevf_update_itr(adapter, q_vector->eitr,
714 q_vector->rx.itr,
715 rx_ring->total_packets,
716 rx_ring->total_bytes);
717 /* if the result for this queue would decrease interrupt
718 * rate for this vector then use that result */
719 q_vector->rx.itr = ((q_vector->rx.itr > ret_itr) ?
720 q_vector->rx.itr - 1 : ret_itr);
721 }
722 702
723 current_itr = max(q_vector->rx.itr, q_vector->tx.itr); 703 current_itr = max(q_vector->rx.itr, q_vector->tx.itr);
724 704
725 switch (current_itr) { 705 switch (current_itr) {
726 /* counts and packets in update_itr are dependent on these numbers */ 706 /* counts and packets in update_itr are dependent on these numbers */
727 case lowest_latency: 707 case lowest_latency:
728 new_itr = 100000; 708 new_itr = IXGBE_100K_ITR;
729 break; 709 break;
730 case low_latency: 710 case low_latency:
731 new_itr = 20000; /* aka hwitr = ~200 */ 711 new_itr = IXGBE_20K_ITR;
732 break; 712 break;
733 case bulk_latency: 713 case bulk_latency:
734 default: 714 default:
735 new_itr = 8000; 715 new_itr = IXGBE_8K_ITR;
736 break; 716 break;
737 } 717 }
738 718
739 if (new_itr != q_vector->eitr) { 719 if (new_itr != q_vector->itr) {
740 u32 itr_reg;
741
742 /* save the algorithm value here, not the smoothed one */
743 q_vector->eitr = new_itr;
744 /* do an exponential smoothing */ 720 /* do an exponential smoothing */
745 new_itr = ((q_vector->eitr * 90)/100) + ((new_itr * 10)/100); 721 new_itr = (10 * new_itr * q_vector->itr) /
746 itr_reg = EITR_INTS_PER_SEC_TO_REG(new_itr); 722 ((9 * new_itr) + q_vector->itr);
747 ixgbevf_write_eitr(adapter, v_idx, itr_reg); 723
724 /* save the algorithm value here */
725 q_vector->itr = new_itr;
726
727 ixgbevf_write_eitr(q_vector);
748 } 728 }
749} 729}
750 730
@@ -752,13 +732,9 @@ static irqreturn_t ixgbevf_msix_mbx(int irq, void *data)
752{ 732{
753 struct ixgbevf_adapter *adapter = data; 733 struct ixgbevf_adapter *adapter = data;
754 struct ixgbe_hw *hw = &adapter->hw; 734 struct ixgbe_hw *hw = &adapter->hw;
755 u32 eicr;
756 u32 msg; 735 u32 msg;
757 bool got_ack = false; 736 bool got_ack = false;
758 737
759 eicr = IXGBE_READ_REG(hw, IXGBE_VTEICS);
760 IXGBE_WRITE_REG(hw, IXGBE_VTEICR, eicr);
761
762 if (!hw->mbx.ops.check_for_ack(hw)) 738 if (!hw->mbx.ops.check_for_ack(hw))
763 got_ack = true; 739 got_ack = true;
764 740
@@ -787,6 +763,8 @@ static irqreturn_t ixgbevf_msix_mbx(int irq, void *data)
787 if (got_ack) 763 if (got_ack)
788 hw->mbx.v2p_mailbox |= IXGBE_VFMAILBOX_PFACK; 764 hw->mbx.v2p_mailbox |= IXGBE_VFMAILBOX_PFACK;
789 765
766 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_other);
767
790 return IRQ_HANDLED; 768 return IRQ_HANDLED;
791} 769}
792 770
@@ -799,11 +777,8 @@ static irqreturn_t ixgbevf_msix_mbx(int irq, void *data)
799static irqreturn_t ixgbevf_msix_clean_rings(int irq, void *data) 777static irqreturn_t ixgbevf_msix_clean_rings(int irq, void *data)
800{ 778{
801 struct ixgbevf_q_vector *q_vector = data; 779 struct ixgbevf_q_vector *q_vector = data;
802 struct ixgbevf_adapter *adapter = q_vector->adapter;
803 struct ixgbe_hw *hw = &adapter->hw;
804 780
805 /* disable interrupts on this vector only */ 781 /* EIAM disabled interrupts (on this vector) for us */
806 IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, 1 << q_vector->v_idx);
807 if (q_vector->rx.ring || q_vector->tx.ring) 782 if (q_vector->rx.ring || q_vector->tx.ring)
808 napi_schedule(&q_vector->napi); 783 napi_schedule(&q_vector->napi);
809 784
@@ -967,7 +942,6 @@ static inline void ixgbevf_reset_q_vectors(struct ixgbevf_adapter *adapter)
967 q_vector->tx.ring = NULL; 942 q_vector->tx.ring = NULL;
968 q_vector->rx.count = 0; 943 q_vector->rx.count = 0;
969 q_vector->tx.count = 0; 944 q_vector->tx.count = 0;
970 q_vector->eitr = adapter->eitr_param;
971 } 945 }
972} 946}
973 947
@@ -1020,10 +994,12 @@ static void ixgbevf_free_irq(struct ixgbevf_adapter *adapter)
1020 **/ 994 **/
1021static inline void ixgbevf_irq_disable(struct ixgbevf_adapter *adapter) 995static inline void ixgbevf_irq_disable(struct ixgbevf_adapter *adapter)
1022{ 996{
1023 int i;
1024 struct ixgbe_hw *hw = &adapter->hw; 997 struct ixgbe_hw *hw = &adapter->hw;
998 int i;
1025 999
1000 IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, 0);
1026 IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, ~0); 1001 IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, ~0);
1002 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, 0);
1027 1003
1028 IXGBE_WRITE_FLUSH(hw); 1004 IXGBE_WRITE_FLUSH(hw);
1029 1005
@@ -1035,23 +1011,13 @@ static inline void ixgbevf_irq_disable(struct ixgbevf_adapter *adapter)
1035 * ixgbevf_irq_enable - Enable default interrupt generation settings 1011 * ixgbevf_irq_enable - Enable default interrupt generation settings
1036 * @adapter: board private structure 1012 * @adapter: board private structure
1037 **/ 1013 **/
1038static inline void ixgbevf_irq_enable(struct ixgbevf_adapter *adapter, 1014static inline void ixgbevf_irq_enable(struct ixgbevf_adapter *adapter)
1039 bool queues, bool flush)
1040{ 1015{
1041 struct ixgbe_hw *hw = &adapter->hw; 1016 struct ixgbe_hw *hw = &adapter->hw;
1042 u32 mask;
1043 u64 qmask;
1044
1045 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
1046 qmask = ~0;
1047
1048 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
1049
1050 if (queues)
1051 ixgbevf_irq_enable_queues(adapter, qmask);
1052 1017
1053 if (flush) 1018 IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, adapter->eims_enable_mask);
1054 IXGBE_WRITE_FLUSH(hw); 1019 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, adapter->eims_enable_mask);
1020 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_enable_mask);
1055} 1021}
1056 1022
1057/** 1023/**
@@ -1414,7 +1380,7 @@ void ixgbevf_up(struct ixgbevf_adapter *adapter)
1414 /* clear any pending interrupts, may auto mask */ 1380 /* clear any pending interrupts, may auto mask */
1415 IXGBE_READ_REG(hw, IXGBE_VTEICR); 1381 IXGBE_READ_REG(hw, IXGBE_VTEICR);
1416 1382
1417 ixgbevf_irq_enable(adapter, true, true); 1383 ixgbevf_irq_enable(adapter);
1418} 1384}
1419 1385
1420/** 1386/**
@@ -1783,7 +1749,6 @@ static int ixgbevf_alloc_q_vectors(struct ixgbevf_adapter *adapter)
1783 goto err_out; 1749 goto err_out;
1784 q_vector->adapter = adapter; 1750 q_vector->adapter = adapter;
1785 q_vector->v_idx = q_idx; 1751 q_vector->v_idx = q_idx;
1786 q_vector->eitr = adapter->eitr_param;
1787 netif_napi_add(adapter->netdev, &q_vector->napi, 1752 netif_napi_add(adapter->netdev, &q_vector->napi,
1788 ixgbevf_poll, 64); 1753 ixgbevf_poll, 64);
1789 adapter->q_vector[q_idx] = q_vector; 1754 adapter->q_vector[q_idx] = q_vector;
@@ -1932,8 +1897,8 @@ static int __devinit ixgbevf_sw_init(struct ixgbevf_adapter *adapter)
1932 } 1897 }
1933 1898
1934 /* Enable dynamic interrupt throttling rates */ 1899 /* Enable dynamic interrupt throttling rates */
1935 adapter->eitr_param = 20000; 1900 adapter->rx_itr_setting = 1;
1936 adapter->itr_setting = 1; 1901 adapter->tx_itr_setting = 1;
1937 1902
1938 /* set default ring sizes */ 1903 /* set default ring sizes */
1939 adapter->tx_ring_count = IXGBEVF_DEFAULT_TXD; 1904 adapter->tx_ring_count = IXGBEVF_DEFAULT_TXD;
@@ -1998,7 +1963,7 @@ static void ixgbevf_watchdog(unsigned long data)
1998{ 1963{
1999 struct ixgbevf_adapter *adapter = (struct ixgbevf_adapter *)data; 1964 struct ixgbevf_adapter *adapter = (struct ixgbevf_adapter *)data;
2000 struct ixgbe_hw *hw = &adapter->hw; 1965 struct ixgbe_hw *hw = &adapter->hw;
2001 u64 eics = 0; 1966 u32 eics = 0;
2002 int i; 1967 int i;
2003 1968
2004 /* 1969 /*
@@ -2013,10 +1978,10 @@ static void ixgbevf_watchdog(unsigned long data)
2013 for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) { 1978 for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) {
2014 struct ixgbevf_q_vector *qv = adapter->q_vector[i]; 1979 struct ixgbevf_q_vector *qv = adapter->q_vector[i];
2015 if (qv->rx.ring || qv->tx.ring) 1980 if (qv->rx.ring || qv->tx.ring)
2016 eics |= (1 << i); 1981 eics |= 1 << i;
2017 } 1982 }
2018 1983
2019 IXGBE_WRITE_REG(hw, IXGBE_VTEICS, (u32)eics); 1984 IXGBE_WRITE_REG(hw, IXGBE_VTEICS, eics);
2020 1985
2021watchdog_short_circuit: 1986watchdog_short_circuit:
2022 schedule_work(&adapter->watchdog_task); 1987 schedule_work(&adapter->watchdog_task);
@@ -2389,7 +2354,7 @@ static int ixgbevf_open(struct net_device *netdev)
2389 if (err) 2354 if (err)
2390 goto err_req_irq; 2355 goto err_req_irq;
2391 2356
2392 ixgbevf_irq_enable(adapter, true, true); 2357 ixgbevf_irq_enable(adapter);
2393 2358
2394 return 0; 2359 return 0;
2395 2360