aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ixgbe/ixgbe_main.c
diff options
context:
space:
mode:
authorNelson, Shannon <shannon.nelson@intel.com>2009-04-27 18:42:54 -0400
committerDavid S. Miller <davem@davemloft.net>2009-04-28 04:53:16 -0400
commit835462fc5d69adc948e8afb2073264888aaa0e2f (patch)
treef73ff69971d910341241f24b76d25cf94da0797c /drivers/net/ixgbe/ixgbe_main.c
parentf8212f979f777af2a8e3a9deb0c11a9fcf35e305 (diff)
ixgbe: Interrupt management update for 82599
Update the interrupt management to correctly handle greater than 16 queue vectors. Signed-off-by: Shannon Nelson <shannon.nelson@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ixgbe/ixgbe_main.c')
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c90
1 files changed, 50 insertions, 40 deletions
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index 419ce472cef8..6c90b6801cbd 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -326,8 +326,18 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_adapter *adapter,
326 } 326 }
327 327
328 /* re-arm the interrupt */ 328 /* re-arm the interrupt */
329 if (count >= tx_ring->work_limit) 329 if (count >= tx_ring->work_limit) {
330 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, tx_ring->v_idx); 330 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
331 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS,
332 tx_ring->v_idx);
333 else if (tx_ring->v_idx & 0xFFFFFFFF)
334 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0),
335 tx_ring->v_idx);
336 else
337 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1),
338 (tx_ring->v_idx >> 32));
339 }
340
331 341
332 tx_ring->total_bytes += total_bytes; 342 tx_ring->total_bytes += total_bytes;
333 tx_ring->total_packets += total_packets; 343 tx_ring->total_packets += total_packets;
@@ -1166,7 +1176,13 @@ static irqreturn_t ixgbe_msix_clean_rx(int irq, void *data)
1166 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); 1176 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
1167 rx_ring = &(adapter->rx_ring[r_idx]); 1177 rx_ring = &(adapter->rx_ring[r_idx]);
1168 /* disable interrupts on this vector only */ 1178 /* disable interrupts on this vector only */
1169 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, rx_ring->v_idx); 1179 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
1180 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, rx_ring->v_idx);
1181 else if (rx_ring->v_idx & 0xFFFFFFFF)
1182 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), rx_ring->v_idx);
1183 else
1184 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1),
1185 (rx_ring->v_idx >> 32));
1170 napi_schedule(&q_vector->napi); 1186 napi_schedule(&q_vector->napi);
1171 1187
1172 return IRQ_HANDLED; 1188 return IRQ_HANDLED;
@@ -1180,6 +1196,23 @@ static irqreturn_t ixgbe_msix_clean_many(int irq, void *data)
1180 return IRQ_HANDLED; 1196 return IRQ_HANDLED;
1181} 1197}
1182 1198
1199static inline void ixgbe_irq_enable_queues(struct ixgbe_adapter *adapter,
1200 u64 qmask)
1201{
1202 u32 mask;
1203
1204 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
1205 mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
1206 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
1207 } else {
1208 mask = (qmask & 0xFFFFFFFF);
1209 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS_EX(0), mask);
1210 mask = (qmask >> 32);
1211 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS_EX(1), mask);
1212 }
1213 /* skip the flush */
1214}
1215
1183/** 1216/**
1184 * ixgbe_clean_rxonly - msix (aka one shot) rx clean routine 1217 * ixgbe_clean_rxonly - msix (aka one shot) rx clean routine
1185 * @napi: napi struct with our devices info in it 1218 * @napi: napi struct with our devices info in it
@@ -1212,7 +1245,7 @@ static int ixgbe_clean_rxonly(struct napi_struct *napi, int budget)
1212 if (adapter->itr_setting & 1) 1245 if (adapter->itr_setting & 1)
1213 ixgbe_set_itr_msix(q_vector); 1246 ixgbe_set_itr_msix(q_vector);
1214 if (!test_bit(__IXGBE_DOWN, &adapter->state)) 1247 if (!test_bit(__IXGBE_DOWN, &adapter->state))
1215 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, rx_ring->v_idx); 1248 ixgbe_irq_enable_queues(adapter, rx_ring->v_idx);
1216 } 1249 }
1217 1250
1218 return work_done; 1251 return work_done;
@@ -1234,7 +1267,7 @@ static int ixgbe_clean_rxonly_many(struct napi_struct *napi, int budget)
1234 struct ixgbe_ring *rx_ring = NULL; 1267 struct ixgbe_ring *rx_ring = NULL;
1235 int work_done = 0, i; 1268 int work_done = 0, i;
1236 long r_idx; 1269 long r_idx;
1237 u16 enable_mask = 0; 1270 u64 enable_mask = 0;
1238 1271
1239 /* attempt to distribute budget to each queue fairly, but don't allow 1272 /* attempt to distribute budget to each queue fairly, but don't allow
1240 * the budget to go below 1 because we'll exit polling */ 1273 * the budget to go below 1 because we'll exit polling */
@@ -1261,7 +1294,7 @@ static int ixgbe_clean_rxonly_many(struct napi_struct *napi, int budget)
1261 if (adapter->itr_setting & 1) 1294 if (adapter->itr_setting & 1)
1262 ixgbe_set_itr_msix(q_vector); 1295 ixgbe_set_itr_msix(q_vector);
1263 if (!test_bit(__IXGBE_DOWN, &adapter->state)) 1296 if (!test_bit(__IXGBE_DOWN, &adapter->state))
1264 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, enable_mask); 1297 ixgbe_irq_enable_queues(adapter, enable_mask);
1265 return 0; 1298 return 0;
1266 } 1299 }
1267 1300
@@ -1481,7 +1514,8 @@ static void ixgbe_set_itr(struct ixgbe_adapter *adapter)
1481static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter) 1514static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter)
1482{ 1515{
1483 u32 mask; 1516 u32 mask;
1484 mask = IXGBE_EIMS_ENABLE_MASK; 1517
1518 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
1485 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) 1519 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE)
1486 mask |= IXGBE_EIMS_GPI_SDP1; 1520 mask |= IXGBE_EIMS_GPI_SDP1;
1487 if (adapter->hw.mac.type == ixgbe_mac_82599EB) { 1521 if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
@@ -1491,14 +1525,7 @@ static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter)
1491 } 1525 }
1492 1526
1493 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask); 1527 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
1494 if (adapter->hw.mac.type == ixgbe_mac_82599EB) { 1528 ixgbe_irq_enable_queues(adapter, ~0);
1495 /* enable the rest of the queue vectors */
1496 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS_EX(1),
1497 (IXGBE_EIMS_RTX_QUEUE << 16));
1498 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS_EX(2),
1499 ((IXGBE_EIMS_RTX_QUEUE << 16) |
1500 IXGBE_EIMS_RTX_QUEUE));
1501 }
1502 IXGBE_WRITE_FLUSH(&adapter->hw); 1529 IXGBE_WRITE_FLUSH(&adapter->hw);
1503} 1530}
1504 1531
@@ -1622,10 +1649,12 @@ static void ixgbe_free_irq(struct ixgbe_adapter *adapter)
1622 **/ 1649 **/
1623static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter) 1650static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter)
1624{ 1651{
1625 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0); 1652 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
1626 if (adapter->hw.mac.type == ixgbe_mac_82599EB) { 1653 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
1654 } else {
1655 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000);
1656 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0);
1627 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0); 1657 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0);
1628 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(2), ~0);
1629 } 1658 }
1630 IXGBE_WRITE_FLUSH(&adapter->hw); 1659 IXGBE_WRITE_FLUSH(&adapter->hw);
1631 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { 1660 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
@@ -1637,18 +1666,6 @@ static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter)
1637 } 1666 }
1638} 1667}
1639 1668
1640static inline void ixgbe_irq_enable_queues(struct ixgbe_adapter *adapter)
1641{
1642 u32 mask = IXGBE_EIMS_RTX_QUEUE;
1643 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
1644 if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
1645 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS_EX(1), mask << 16);
1646 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS_EX(2),
1647 (mask << 16 | mask));
1648 }
1649 /* skip the flush */
1650}
1651
1652/** 1669/**
1653 * ixgbe_configure_msi_and_legacy - Initialize PIN (INTA...) and MSI interrupts 1670 * ixgbe_configure_msi_and_legacy - Initialize PIN (INTA...) and MSI interrupts
1654 * 1671 *
@@ -2714,7 +2731,7 @@ static int ixgbe_poll(struct napi_struct *napi, int budget)
2714 if (adapter->itr_setting & 1) 2731 if (adapter->itr_setting & 1)
2715 ixgbe_set_itr(adapter); 2732 ixgbe_set_itr(adapter);
2716 if (!test_bit(__IXGBE_DOWN, &adapter->state)) 2733 if (!test_bit(__IXGBE_DOWN, &adapter->state))
2717 ixgbe_irq_enable_queues(adapter); 2734 ixgbe_irq_enable_queues(adapter, IXGBE_EIMS_RTX_QUEUE);
2718 } 2735 }
2719 return work_done; 2736 return work_done;
2720} 2737}
@@ -4005,16 +4022,9 @@ static void ixgbe_watchdog(unsigned long data)
4005 break; 4022 break;
4006 case ixgbe_mac_82599EB: 4023 case ixgbe_mac_82599EB:
4007 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { 4024 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
4008 /* 4025 IXGBE_WRITE_REG(hw, IXGBE_EICS_EX(0),
4009 * EICS(0..15) first 0-15 q vectors 4026 (u32)(eics & 0xFFFFFFFF));
4010 * EICS[1] (16..31) q vectors 16-31
4011 * EICS[2] (0..31) q vectors 32-63
4012 */
4013 IXGBE_WRITE_REG(hw, IXGBE_EICS,
4014 (u32)(eics & 0xFFFF));
4015 IXGBE_WRITE_REG(hw, IXGBE_EICS_EX(1), 4027 IXGBE_WRITE_REG(hw, IXGBE_EICS_EX(1),
4016 (u32)(eics & 0xFFFF0000));
4017 IXGBE_WRITE_REG(hw, IXGBE_EICS_EX(2),
4018 (u32)(eics >> 32)); 4028 (u32)(eics >> 32));
4019 } else { 4029 } else {
4020 /* 4030 /*