diff options
author | Alexander Duyck <alexander.h.duyck@intel.com> | 2011-07-14 23:05:37 -0400 |
---|---|---|
committer | Jeff Kirsher <jeffrey.t.kirsher@intel.com> | 2011-09-16 00:12:54 -0400 |
commit | 207867f583f63449a5e5588690754f1b86e3cbbf (patch) | |
tree | db8309e823d3b71c4fc6aaf8679232646a7fb054 /drivers/net/ethernet/intel | |
parent | 4ff7fb12cf92fd15e0fbae0b36cca0599f8a7d1b (diff) |
ixgbe: cleanup allocation and freeing of IRQ affinity hint
The allocation and freeing of the IRQ affinity hint needs some updates
since there are a number of spots where we run into possible issues with
the hint not being correctly updated.
Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com>
Tested-by: Phil Schmitt <phillip.j.schmitt@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Diffstat (limited to 'drivers/net/ethernet/intel')
-rw-r--r-- | drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 76 |
1 files changed, 36 insertions, 40 deletions
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 3ce0277cdbf3..73a669d61591 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | |||
@@ -1565,20 +1565,6 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter) | |||
1565 | q_vector->eitr = adapter->rx_eitr_param; | 1565 | q_vector->eitr = adapter->rx_eitr_param; |
1566 | 1566 | ||
1567 | ixgbe_write_eitr(q_vector); | 1567 | ixgbe_write_eitr(q_vector); |
1568 | /* If ATR is enabled, set interrupt affinity */ | ||
1569 | if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) { | ||
1570 | /* | ||
1571 | * Allocate the affinity_hint cpumask, assign the mask | ||
1572 | * for this vector, and set our affinity_hint for | ||
1573 | * this irq. | ||
1574 | */ | ||
1575 | if (!alloc_cpumask_var(&q_vector->affinity_mask, | ||
1576 | GFP_KERNEL)) | ||
1577 | return; | ||
1578 | cpumask_set_cpu(v_idx, q_vector->affinity_mask); | ||
1579 | irq_set_affinity_hint(adapter->msix_entries[v_idx].vector, | ||
1580 | q_vector->affinity_mask); | ||
1581 | } | ||
1582 | } | 1568 | } |
1583 | 1569 | ||
1584 | switch (adapter->hw.mac.type) { | 1570 | switch (adapter->hw.mac.type) { |
@@ -2093,18 +2079,17 @@ out: | |||
2093 | static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter) | 2079 | static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter) |
2094 | { | 2080 | { |
2095 | struct net_device *netdev = adapter->netdev; | 2081 | struct net_device *netdev = adapter->netdev; |
2096 | int i, vector, q_vectors, err; | 2082 | int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; |
2083 | int vector, err; | ||
2097 | int ri = 0, ti = 0; | 2084 | int ri = 0, ti = 0; |
2098 | 2085 | ||
2099 | /* Decrement for Other and TCP Timer vectors */ | ||
2100 | q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; | ||
2101 | |||
2102 | err = ixgbe_map_rings_to_vectors(adapter); | 2086 | err = ixgbe_map_rings_to_vectors(adapter); |
2103 | if (err) | 2087 | if (err) |
2104 | return err; | 2088 | return err; |
2105 | 2089 | ||
2106 | for (vector = 0; vector < q_vectors; vector++) { | 2090 | for (vector = 0; vector < q_vectors; vector++) { |
2107 | struct ixgbe_q_vector *q_vector = adapter->q_vector[vector]; | 2091 | struct ixgbe_q_vector *q_vector = adapter->q_vector[vector]; |
2092 | struct msix_entry *entry = &adapter->msix_entries[vector]; | ||
2108 | 2093 | ||
2109 | if (q_vector->tx.ring && q_vector->rx.ring) { | 2094 | if (q_vector->tx.ring && q_vector->rx.ring) { |
2110 | snprintf(q_vector->name, sizeof(q_vector->name) - 1, | 2095 | snprintf(q_vector->name, sizeof(q_vector->name) - 1, |
@@ -2120,14 +2105,19 @@ static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter) | |||
2120 | /* skip this unused q_vector */ | 2105 | /* skip this unused q_vector */ |
2121 | continue; | 2106 | continue; |
2122 | } | 2107 | } |
2123 | err = request_irq(adapter->msix_entries[vector].vector, | 2108 | err = request_irq(entry->vector, &ixgbe_msix_clean_rings, 0, |
2124 | &ixgbe_msix_clean_rings, 0, q_vector->name, | 2109 | q_vector->name, q_vector); |
2125 | q_vector); | ||
2126 | if (err) { | 2110 | if (err) { |
2127 | e_err(probe, "request_irq failed for MSIX interrupt " | 2111 | e_err(probe, "request_irq failed for MSIX interrupt " |
2128 | "Error: %d\n", err); | 2112 | "Error: %d\n", err); |
2129 | goto free_queue_irqs; | 2113 | goto free_queue_irqs; |
2130 | } | 2114 | } |
2115 | /* If Flow Director is enabled, set interrupt affinity */ | ||
2116 | if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) { | ||
2117 | /* assign the mask for this irq */ | ||
2118 | irq_set_affinity_hint(entry->vector, | ||
2119 | q_vector->affinity_mask); | ||
2120 | } | ||
2131 | } | 2121 | } |
2132 | 2122 | ||
2133 | sprintf(adapter->lsc_int_name, "%s:lsc", netdev->name); | 2123 | sprintf(adapter->lsc_int_name, "%s:lsc", netdev->name); |
@@ -2141,9 +2131,13 @@ static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter) | |||
2141 | return 0; | 2131 | return 0; |
2142 | 2132 | ||
2143 | free_queue_irqs: | 2133 | free_queue_irqs: |
2144 | for (i = vector - 1; i >= 0; i--) | 2134 | while (vector) { |
2145 | free_irq(adapter->msix_entries[--vector].vector, | 2135 | vector--; |
2146 | adapter->q_vector[i]); | 2136 | irq_set_affinity_hint(adapter->msix_entries[vector].vector, |
2137 | NULL); | ||
2138 | free_irq(adapter->msix_entries[vector].vector, | ||
2139 | adapter->q_vector[vector]); | ||
2140 | } | ||
2147 | adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED; | 2141 | adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED; |
2148 | pci_disable_msix(adapter->pdev); | 2142 | pci_disable_msix(adapter->pdev); |
2149 | kfree(adapter->msix_entries); | 2143 | kfree(adapter->msix_entries); |
@@ -2333,14 +2327,19 @@ static void ixgbe_free_irq(struct ixgbe_adapter *adapter) | |||
2333 | !adapter->q_vector[i]->tx.ring) | 2327 | !adapter->q_vector[i]->tx.ring) |
2334 | continue; | 2328 | continue; |
2335 | 2329 | ||
2330 | /* clear the affinity_mask in the IRQ descriptor */ | ||
2331 | irq_set_affinity_hint(adapter->msix_entries[i].vector, | ||
2332 | NULL); | ||
2333 | |||
2336 | free_irq(adapter->msix_entries[i].vector, | 2334 | free_irq(adapter->msix_entries[i].vector, |
2337 | adapter->q_vector[i]); | 2335 | adapter->q_vector[i]); |
2338 | } | 2336 | } |
2339 | |||
2340 | ixgbe_reset_q_vectors(adapter); | ||
2341 | } else { | 2337 | } else { |
2342 | free_irq(adapter->pdev->irq, adapter); | 2338 | free_irq(adapter->pdev->irq, adapter); |
2343 | } | 2339 | } |
2340 | |||
2341 | /* clear q_vector state information */ | ||
2342 | ixgbe_reset_q_vectors(adapter); | ||
2344 | } | 2343 | } |
2345 | 2344 | ||
2346 | /** | 2345 | /** |
@@ -3879,7 +3878,6 @@ void ixgbe_down(struct ixgbe_adapter *adapter) | |||
3879 | struct ixgbe_hw *hw = &adapter->hw; | 3878 | struct ixgbe_hw *hw = &adapter->hw; |
3880 | u32 rxctrl; | 3879 | u32 rxctrl; |
3881 | int i; | 3880 | int i; |
3882 | int num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; | ||
3883 | 3881 | ||
3884 | /* signal that we are down to the interrupt handler */ | 3882 | /* signal that we are down to the interrupt handler */ |
3885 | set_bit(__IXGBE_DOWN, &adapter->state); | 3883 | set_bit(__IXGBE_DOWN, &adapter->state); |
@@ -3924,15 +3922,6 @@ void ixgbe_down(struct ixgbe_adapter *adapter) | |||
3924 | adapter->vfinfo[i].clear_to_send = 0; | 3922 | adapter->vfinfo[i].clear_to_send = 0; |
3925 | } | 3923 | } |
3926 | 3924 | ||
3927 | /* Cleanup the affinity_hint CPU mask memory and callback */ | ||
3928 | for (i = 0; i < num_q_vectors; i++) { | ||
3929 | struct ixgbe_q_vector *q_vector = adapter->q_vector[i]; | ||
3930 | /* clear the affinity_mask in the IRQ descriptor */ | ||
3931 | irq_set_affinity_hint(adapter->msix_entries[i]. vector, NULL); | ||
3932 | /* release the CPU mask memory */ | ||
3933 | free_cpumask_var(q_vector->affinity_mask); | ||
3934 | } | ||
3935 | |||
3936 | /* disable transmits in the hardware now that interrupts are off */ | 3925 | /* disable transmits in the hardware now that interrupts are off */ |
3937 | for (i = 0; i < adapter->num_tx_queues; i++) { | 3926 | for (i = 0; i < adapter->num_tx_queues; i++) { |
3938 | u8 reg_idx = adapter->tx_ring[i]->reg_idx; | 3927 | u8 reg_idx = adapter->tx_ring[i]->reg_idx; |
@@ -4677,6 +4666,11 @@ static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter) | |||
4677 | q_vector->adapter = adapter; | 4666 | q_vector->adapter = adapter; |
4678 | q_vector->v_idx = v_idx; | 4667 | q_vector->v_idx = v_idx; |
4679 | 4668 | ||
4669 | /* Allocate the affinity_hint cpumask, configure the mask */ | ||
4670 | if (!alloc_cpumask_var(&q_vector->affinity_mask, GFP_KERNEL)) | ||
4671 | goto err_out; | ||
4672 | cpumask_set_cpu(v_idx, q_vector->affinity_mask); | ||
4673 | |||
4680 | if (q_vector->tx.count && !q_vector->rx.count) | 4674 | if (q_vector->tx.count && !q_vector->rx.count) |
4681 | q_vector->eitr = adapter->tx_eitr_param; | 4675 | q_vector->eitr = adapter->tx_eitr_param; |
4682 | else | 4676 | else |
@@ -4694,6 +4688,7 @@ err_out: | |||
4694 | v_idx--; | 4688 | v_idx--; |
4695 | q_vector = adapter->q_vector[v_idx]; | 4689 | q_vector = adapter->q_vector[v_idx]; |
4696 | netif_napi_del(&q_vector->napi); | 4690 | netif_napi_del(&q_vector->napi); |
4691 | free_cpumask_var(q_vector->affinity_mask); | ||
4697 | kfree(q_vector); | 4692 | kfree(q_vector); |
4698 | adapter->q_vector[v_idx] = NULL; | 4693 | adapter->q_vector[v_idx] = NULL; |
4699 | } | 4694 | } |
@@ -4710,17 +4705,18 @@ err_out: | |||
4710 | **/ | 4705 | **/ |
4711 | static void ixgbe_free_q_vectors(struct ixgbe_adapter *adapter) | 4706 | static void ixgbe_free_q_vectors(struct ixgbe_adapter *adapter) |
4712 | { | 4707 | { |
4713 | int q_idx, num_q_vectors; | 4708 | int v_idx, num_q_vectors; |
4714 | 4709 | ||
4715 | if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) | 4710 | if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) |
4716 | num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; | 4711 | num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; |
4717 | else | 4712 | else |
4718 | num_q_vectors = 1; | 4713 | num_q_vectors = 1; |
4719 | 4714 | ||
4720 | for (q_idx = 0; q_idx < num_q_vectors; q_idx++) { | 4715 | for (v_idx = 0; v_idx < num_q_vectors; v_idx++) { |
4721 | struct ixgbe_q_vector *q_vector = adapter->q_vector[q_idx]; | 4716 | struct ixgbe_q_vector *q_vector = adapter->q_vector[v_idx]; |
4722 | adapter->q_vector[q_idx] = NULL; | 4717 | adapter->q_vector[v_idx] = NULL; |
4723 | netif_napi_del(&q_vector->napi); | 4718 | netif_napi_del(&q_vector->napi); |
4719 | free_cpumask_var(q_vector->affinity_mask); | ||
4724 | kfree(q_vector); | 4720 | kfree(q_vector); |
4725 | } | 4721 | } |
4726 | } | 4722 | } |