aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ixgbe/ixgbe_main.c
diff options
context:
space:
mode:
authorAlexander Duyck <alexander.h.duyck@intel.com>2010-11-16 22:27:03 -0500
committerJeff Kirsher <jeffrey.t.kirsher@intel.com>2010-11-16 22:27:03 -0500
commitbd50817859e7e82ba6e4adc75ebd8ac19459d8a4 (patch)
treeb4fcbe0313d482c79a423acb6eac18d90940067a /drivers/net/ixgbe/ixgbe_main.c
parentaa80175a539a47fd11e2fbf1696a29f7a2652930 (diff)
ixgbe: change mac_type if statements to switch statements
This change replaces a number of if/elseif/else statements with switch statements to support the addition of future devices to the ixgbe driver. Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com> Tested-by: Ross Brattain <ross.b.brattain@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Diffstat (limited to 'drivers/net/ixgbe/ixgbe_main.c')
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c351
1 files changed, 217 insertions, 134 deletions
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index 9f5331bc5985..10fff68088e6 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -589,14 +589,19 @@ static inline void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter,
589{ 589{
590 u32 mask; 590 u32 mask;
591 591
592 if (adapter->hw.mac.type == ixgbe_mac_82598EB) { 592 switch (adapter->hw.mac.type) {
593 case ixgbe_mac_82598EB:
593 mask = (IXGBE_EIMS_RTX_QUEUE & qmask); 594 mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
594 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask); 595 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
595 } else { 596 break;
597 case ixgbe_mac_82599EB:
596 mask = (qmask & 0xFFFFFFFF); 598 mask = (qmask & 0xFFFFFFFF);
597 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask); 599 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
598 mask = (qmask >> 32); 600 mask = (qmask >> 32);
599 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask); 601 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask);
602 break;
603 default:
604 break;
600 } 605 }
601} 606}
602 607
@@ -672,6 +677,7 @@ static inline bool ixgbe_tx_xon_state(struct ixgbe_adapter *adapter,
672 break; 677 break;
673 default: 678 default:
674 tc = 0; 679 tc = 0;
680 break;
675 } 681 }
676 txoff <<= tc; 682 txoff <<= tc;
677 } 683 }
@@ -1474,11 +1480,18 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
1474 } 1480 }
1475 } 1481 }
1476 1482
1477 if (adapter->hw.mac.type == ixgbe_mac_82598EB) 1483 switch (adapter->hw.mac.type) {
1484 case ixgbe_mac_82598EB:
1478 ixgbe_set_ivar(adapter, -1, IXGBE_IVAR_OTHER_CAUSES_INDEX, 1485 ixgbe_set_ivar(adapter, -1, IXGBE_IVAR_OTHER_CAUSES_INDEX,
1479 v_idx); 1486 v_idx);
1480 else if (adapter->hw.mac.type == ixgbe_mac_82599EB) 1487 break;
1488 case ixgbe_mac_82599EB:
1481 ixgbe_set_ivar(adapter, -1, 1, v_idx); 1489 ixgbe_set_ivar(adapter, -1, 1, v_idx);
1490 break;
1491
1492 default:
1493 break;
1494 }
1482 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(v_idx), 1950); 1495 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(v_idx), 1950);
1483 1496
1484 /* set up to autoclear timer, and the vectors */ 1497 /* set up to autoclear timer, and the vectors */
@@ -1574,10 +1587,12 @@ void ixgbe_write_eitr(struct ixgbe_q_vector *q_vector)
1574 int v_idx = q_vector->v_idx; 1587 int v_idx = q_vector->v_idx;
1575 u32 itr_reg = EITR_INTS_PER_SEC_TO_REG(q_vector->eitr); 1588 u32 itr_reg = EITR_INTS_PER_SEC_TO_REG(q_vector->eitr);
1576 1589
1577 if (adapter->hw.mac.type == ixgbe_mac_82598EB) { 1590 switch (adapter->hw.mac.type) {
1591 case ixgbe_mac_82598EB:
1578 /* must write high and low 16 bits to reset counter */ 1592 /* must write high and low 16 bits to reset counter */
1579 itr_reg |= (itr_reg << 16); 1593 itr_reg |= (itr_reg << 16);
1580 } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) { 1594 break;
1595 case ixgbe_mac_82599EB:
1581 /* 1596 /*
1582 * 82599 can support a value of zero, so allow it for 1597 * 82599 can support a value of zero, so allow it for
1583 * max interrupt rate, but there is an errata where it can 1598 * max interrupt rate, but there is an errata where it can
@@ -1592,6 +1607,9 @@ void ixgbe_write_eitr(struct ixgbe_q_vector *q_vector)
1592 * immediate assertion of the interrupt 1607 * immediate assertion of the interrupt
1593 */ 1608 */
1594 itr_reg |= IXGBE_EITR_CNT_WDIS; 1609 itr_reg |= IXGBE_EITR_CNT_WDIS;
1610 break;
1611 default:
1612 break;
1595 } 1613 }
1596 IXGBE_WRITE_REG(hw, IXGBE_EITR(v_idx), itr_reg); 1614 IXGBE_WRITE_REG(hw, IXGBE_EITR(v_idx), itr_reg);
1597} 1615}
@@ -1771,16 +1789,8 @@ static irqreturn_t ixgbe_msix_lsc(int irq, void *data)
1771 if (eicr & IXGBE_EICR_MAILBOX) 1789 if (eicr & IXGBE_EICR_MAILBOX)
1772 ixgbe_msg_task(adapter); 1790 ixgbe_msg_task(adapter);
1773 1791
1774 if (hw->mac.type == ixgbe_mac_82598EB) 1792 switch (hw->mac.type) {
1775 ixgbe_check_fan_failure(adapter, eicr); 1793 case ixgbe_mac_82599EB:
1776
1777 if (hw->mac.type == ixgbe_mac_82599EB) {
1778 ixgbe_check_sfp_event(adapter, eicr);
1779 adapter->interrupt_event = eicr;
1780 if ((adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) &&
1781 ((eicr & IXGBE_EICR_GPI_SDP0) || (eicr & IXGBE_EICR_LSC)))
1782 schedule_work(&adapter->check_overtemp_task);
1783
1784 /* Handle Flow Director Full threshold interrupt */ 1794 /* Handle Flow Director Full threshold interrupt */
1785 if (eicr & IXGBE_EICR_FLOW_DIR) { 1795 if (eicr & IXGBE_EICR_FLOW_DIR) {
1786 int i; 1796 int i;
@@ -1795,7 +1805,19 @@ static irqreturn_t ixgbe_msix_lsc(int irq, void *data)
1795 schedule_work(&adapter->fdir_reinit_task); 1805 schedule_work(&adapter->fdir_reinit_task);
1796 } 1806 }
1797 } 1807 }
1808 ixgbe_check_sfp_event(adapter, eicr);
1809 if ((adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) &&
1810 ((eicr & IXGBE_EICR_GPI_SDP0) || (eicr & IXGBE_EICR_LSC))) {
1811 adapter->interrupt_event = eicr;
1812 schedule_work(&adapter->check_overtemp_task);
1813 }
1814 break;
1815 default:
1816 break;
1798 } 1817 }
1818
1819 ixgbe_check_fan_failure(adapter, eicr);
1820
1799 if (!test_bit(__IXGBE_DOWN, &adapter->state)) 1821 if (!test_bit(__IXGBE_DOWN, &adapter->state))
1800 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER); 1822 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
1801 1823
@@ -1806,15 +1828,23 @@ static inline void ixgbe_irq_enable_queues(struct ixgbe_adapter *adapter,
1806 u64 qmask) 1828 u64 qmask)
1807{ 1829{
1808 u32 mask; 1830 u32 mask;
1831 struct ixgbe_hw *hw = &adapter->hw;
1809 1832
1810 if (adapter->hw.mac.type == ixgbe_mac_82598EB) { 1833 switch (hw->mac.type) {
1834 case ixgbe_mac_82598EB:
1811 mask = (IXGBE_EIMS_RTX_QUEUE & qmask); 1835 mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
1812 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask); 1836 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
1813 } else { 1837 break;
1838 case ixgbe_mac_82599EB:
1814 mask = (qmask & 0xFFFFFFFF); 1839 mask = (qmask & 0xFFFFFFFF);
1815 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS_EX(0), mask); 1840 if (mask)
1841 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
1816 mask = (qmask >> 32); 1842 mask = (qmask >> 32);
1817 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS_EX(1), mask); 1843 if (mask)
1844 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
1845 break;
1846 default:
1847 break;
1818 } 1848 }
1819 /* skip the flush */ 1849 /* skip the flush */
1820} 1850}
@@ -1823,15 +1853,23 @@ static inline void ixgbe_irq_disable_queues(struct ixgbe_adapter *adapter,
1823 u64 qmask) 1853 u64 qmask)
1824{ 1854{
1825 u32 mask; 1855 u32 mask;
1856 struct ixgbe_hw *hw = &adapter->hw;
1826 1857
1827 if (adapter->hw.mac.type == ixgbe_mac_82598EB) { 1858 switch (hw->mac.type) {
1859 case ixgbe_mac_82598EB:
1828 mask = (IXGBE_EIMS_RTX_QUEUE & qmask); 1860 mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
1829 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, mask); 1861 IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
1830 } else { 1862 break;
1863 case ixgbe_mac_82599EB:
1831 mask = (qmask & 0xFFFFFFFF); 1864 mask = (qmask & 0xFFFFFFFF);
1832 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), mask); 1865 if (mask)
1866 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
1833 mask = (qmask >> 32); 1867 mask = (qmask >> 32);
1834 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), mask); 1868 if (mask)
1869 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
1870 break;
1871 default:
1872 break;
1835 } 1873 }
1836 /* skip the flush */ 1874 /* skip the flush */
1837} 1875}
@@ -2288,12 +2326,16 @@ static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues,
2288 mask |= IXGBE_EIMS_GPI_SDP0; 2326 mask |= IXGBE_EIMS_GPI_SDP0;
2289 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) 2327 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE)
2290 mask |= IXGBE_EIMS_GPI_SDP1; 2328 mask |= IXGBE_EIMS_GPI_SDP1;
2291 if (adapter->hw.mac.type == ixgbe_mac_82599EB) { 2329 switch (adapter->hw.mac.type) {
2330 case ixgbe_mac_82599EB:
2292 mask |= IXGBE_EIMS_ECC; 2331 mask |= IXGBE_EIMS_ECC;
2293 mask |= IXGBE_EIMS_GPI_SDP1; 2332 mask |= IXGBE_EIMS_GPI_SDP1;
2294 mask |= IXGBE_EIMS_GPI_SDP2; 2333 mask |= IXGBE_EIMS_GPI_SDP2;
2295 if (adapter->num_vfs) 2334 if (adapter->num_vfs)
2296 mask |= IXGBE_EIMS_MAILBOX; 2335 mask |= IXGBE_EIMS_MAILBOX;
2336 break;
2337 default:
2338 break;
2297 } 2339 }
2298 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE || 2340 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
2299 adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) 2341 adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
@@ -2349,13 +2391,20 @@ static irqreturn_t ixgbe_intr(int irq, void *data)
2349 if (eicr & IXGBE_EICR_LSC) 2391 if (eicr & IXGBE_EICR_LSC)
2350 ixgbe_check_lsc(adapter); 2392 ixgbe_check_lsc(adapter);
2351 2393
2352 if (hw->mac.type == ixgbe_mac_82599EB) 2394 switch (hw->mac.type) {
2395 case ixgbe_mac_82599EB:
2353 ixgbe_check_sfp_event(adapter, eicr); 2396 ixgbe_check_sfp_event(adapter, eicr);
2397 if ((adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) &&
2398 ((eicr & IXGBE_EICR_GPI_SDP0) || (eicr & IXGBE_EICR_LSC))) {
2399 adapter->interrupt_event = eicr;
2400 schedule_work(&adapter->check_overtemp_task);
2401 }
2402 break;
2403 default:
2404 break;
2405 }
2354 2406
2355 ixgbe_check_fan_failure(adapter, eicr); 2407 ixgbe_check_fan_failure(adapter, eicr);
2356 if ((adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) &&
2357 ((eicr & IXGBE_EICR_GPI_SDP0) || (eicr & IXGBE_EICR_LSC)))
2358 schedule_work(&adapter->check_overtemp_task);
2359 2408
2360 if (napi_schedule_prep(&(q_vector->napi))) { 2409 if (napi_schedule_prep(&(q_vector->napi))) {
2361 adapter->tx_ring[0]->total_packets = 0; 2410 adapter->tx_ring[0]->total_packets = 0;
@@ -2448,14 +2497,19 @@ static void ixgbe_free_irq(struct ixgbe_adapter *adapter)
2448 **/ 2497 **/
2449static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter) 2498static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter)
2450{ 2499{
2451 if (adapter->hw.mac.type == ixgbe_mac_82598EB) { 2500 switch (adapter->hw.mac.type) {
2501 case ixgbe_mac_82598EB:
2452 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0); 2502 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
2453 } else { 2503 break;
2504 case ixgbe_mac_82599EB:
2454 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000); 2505 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000);
2455 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0); 2506 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0);
2456 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0); 2507 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0);
2457 if (adapter->num_vfs > 32) 2508 if (adapter->num_vfs > 32)
2458 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, 0); 2509 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, 0);
2510 break;
2511 default:
2512 break;
2459 } 2513 }
2460 IXGBE_WRITE_FLUSH(&adapter->hw); 2514 IXGBE_WRITE_FLUSH(&adapter->hw);
2461 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { 2515 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
@@ -2630,15 +2684,20 @@ static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter,
2630 struct ixgbe_ring *rx_ring) 2684 struct ixgbe_ring *rx_ring)
2631{ 2685{
2632 u32 srrctl; 2686 u32 srrctl;
2633 int index; 2687 int index = rx_ring->reg_idx;
2634 struct ixgbe_ring_feature *feature = adapter->ring_feature;
2635 2688
2636 index = rx_ring->reg_idx; 2689 switch (adapter->hw.mac.type) {
2637 if (adapter->hw.mac.type == ixgbe_mac_82598EB) { 2690 case ixgbe_mac_82598EB: {
2638 unsigned long mask; 2691 struct ixgbe_ring_feature *feature = adapter->ring_feature;
2639 mask = (unsigned long) feature[RING_F_RSS].mask; 2692 const int mask = feature[RING_F_RSS].mask;
2640 index = index & mask; 2693 index = index & mask;
2641 } 2694 }
2695 break;
2696 case ixgbe_mac_82599EB:
2697 default:
2698 break;
2699 }
2700
2642 srrctl = IXGBE_READ_REG(&adapter->hw, IXGBE_SRRCTL(index)); 2701 srrctl = IXGBE_READ_REG(&adapter->hw, IXGBE_SRRCTL(index));
2643 2702
2644 srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK; 2703 srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
@@ -3899,10 +3958,15 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
3899 (txdctl & ~IXGBE_TXDCTL_ENABLE)); 3958 (txdctl & ~IXGBE_TXDCTL_ENABLE));
3900 } 3959 }
3901 /* Disable the Tx DMA engine on 82599 */ 3960 /* Disable the Tx DMA engine on 82599 */
3902 if (hw->mac.type == ixgbe_mac_82599EB) 3961 switch (hw->mac.type) {
3962 case ixgbe_mac_82599EB:
3903 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, 3963 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL,
3904 (IXGBE_READ_REG(hw, IXGBE_DMATXCTL) & 3964 (IXGBE_READ_REG(hw, IXGBE_DMATXCTL) &
3905 ~IXGBE_DMATXCTL_TE)); 3965 ~IXGBE_DMATXCTL_TE));
3966 break;
3967 default:
3968 break;
3969 }
3906 3970
3907 /* power down the optics */ 3971 /* power down the optics */
3908 if (hw->phy.multispeed_fiber) 3972 if (hw->phy.multispeed_fiber)
@@ -4260,71 +4324,66 @@ static inline bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter)
4260 bool ret = false; 4324 bool ret = false;
4261 int dcb_i = adapter->ring_feature[RING_F_DCB].indices; 4325 int dcb_i = adapter->ring_feature[RING_F_DCB].indices;
4262 4326
4263 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { 4327 if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED))
4264 if (adapter->hw.mac.type == ixgbe_mac_82598EB) { 4328 return false;
4265 /* the number of queues is assumed to be symmetric */
4266 for (i = 0; i < dcb_i; i++) {
4267 adapter->rx_ring[i]->reg_idx = i << 3;
4268 adapter->tx_ring[i]->reg_idx = i << 2;
4269 }
4270 ret = true;
4271 } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
4272 if (dcb_i == 8) {
4273 /*
4274 * Tx TC0 starts at: descriptor queue 0
4275 * Tx TC1 starts at: descriptor queue 32
4276 * Tx TC2 starts at: descriptor queue 64
4277 * Tx TC3 starts at: descriptor queue 80
4278 * Tx TC4 starts at: descriptor queue 96
4279 * Tx TC5 starts at: descriptor queue 104
4280 * Tx TC6 starts at: descriptor queue 112
4281 * Tx TC7 starts at: descriptor queue 120
4282 *
4283 * Rx TC0-TC7 are offset by 16 queues each
4284 */
4285 for (i = 0; i < 3; i++) {
4286 adapter->tx_ring[i]->reg_idx = i << 5;
4287 adapter->rx_ring[i]->reg_idx = i << 4;
4288 }
4289 for ( ; i < 5; i++) {
4290 adapter->tx_ring[i]->reg_idx =
4291 ((i + 2) << 4);
4292 adapter->rx_ring[i]->reg_idx = i << 4;
4293 }
4294 for ( ; i < dcb_i; i++) {
4295 adapter->tx_ring[i]->reg_idx =
4296 ((i + 8) << 3);
4297 adapter->rx_ring[i]->reg_idx = i << 4;
4298 }
4299 4329
4300 ret = true; 4330 /* the number of queues is assumed to be symmetric */
4301 } else if (dcb_i == 4) { 4331 switch (adapter->hw.mac.type) {
4302 /* 4332 case ixgbe_mac_82598EB:
4303 * Tx TC0 starts at: descriptor queue 0 4333 for (i = 0; i < dcb_i; i++) {
4304 * Tx TC1 starts at: descriptor queue 64 4334 adapter->rx_ring[i]->reg_idx = i << 3;
4305 * Tx TC2 starts at: descriptor queue 96 4335 adapter->tx_ring[i]->reg_idx = i << 2;
4306 * Tx TC3 starts at: descriptor queue 112 4336 }
4307 * 4337 ret = true;
4308 * Rx TC0-TC3 are offset by 32 queues each 4338 break;
4309 */ 4339 case ixgbe_mac_82599EB:
4310 adapter->tx_ring[0]->reg_idx = 0; 4340 if (dcb_i == 8) {
4311 adapter->tx_ring[1]->reg_idx = 64; 4341 /*
4312 adapter->tx_ring[2]->reg_idx = 96; 4342 * Tx TC0 starts at: descriptor queue 0
4313 adapter->tx_ring[3]->reg_idx = 112; 4343 * Tx TC1 starts at: descriptor queue 32
4314 for (i = 0 ; i < dcb_i; i++) 4344 * Tx TC2 starts at: descriptor queue 64
4315 adapter->rx_ring[i]->reg_idx = i << 5; 4345 * Tx TC3 starts at: descriptor queue 80
4316 4346 * Tx TC4 starts at: descriptor queue 96
4317 ret = true; 4347 * Tx TC5 starts at: descriptor queue 104
4318 } else { 4348 * Tx TC6 starts at: descriptor queue 112
4319 ret = false; 4349 * Tx TC7 starts at: descriptor queue 120
4350 *
4351 * Rx TC0-TC7 are offset by 16 queues each
4352 */
4353 for (i = 0; i < 3; i++) {
4354 adapter->tx_ring[i]->reg_idx = i << 5;
4355 adapter->rx_ring[i]->reg_idx = i << 4;
4320 } 4356 }
4321 } else { 4357 for ( ; i < 5; i++) {
4322 ret = false; 4358 adapter->tx_ring[i]->reg_idx = ((i + 2) << 4);
4359 adapter->rx_ring[i]->reg_idx = i << 4;
4360 }
4361 for ( ; i < dcb_i; i++) {
4362 adapter->tx_ring[i]->reg_idx = ((i + 8) << 3);
4363 adapter->rx_ring[i]->reg_idx = i << 4;
4364 }
4365 ret = true;
4366 } else if (dcb_i == 4) {
4367 /*
4368 * Tx TC0 starts at: descriptor queue 0
4369 * Tx TC1 starts at: descriptor queue 64
4370 * Tx TC2 starts at: descriptor queue 96
4371 * Tx TC3 starts at: descriptor queue 112
4372 *
4373 * Rx TC0-TC3 are offset by 32 queues each
4374 */
4375 adapter->tx_ring[0]->reg_idx = 0;
4376 adapter->tx_ring[1]->reg_idx = 64;
4377 adapter->tx_ring[2]->reg_idx = 96;
4378 adapter->tx_ring[3]->reg_idx = 112;
4379 for (i = 0 ; i < dcb_i; i++)
4380 adapter->rx_ring[i]->reg_idx = i << 5;
4381 ret = true;
4323 } 4382 }
4324 } else { 4383 break;
4325 ret = false; 4384 default:
4385 break;
4326 } 4386 }
4327
4328 return ret; 4387 return ret;
4329} 4388}
4330#endif 4389#endif
@@ -4885,11 +4944,13 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
4885 adapter->ring_feature[RING_F_RSS].indices = rss; 4944 adapter->ring_feature[RING_F_RSS].indices = rss;
4886 adapter->flags |= IXGBE_FLAG_RSS_ENABLED; 4945 adapter->flags |= IXGBE_FLAG_RSS_ENABLED;
4887 adapter->ring_feature[RING_F_DCB].indices = IXGBE_MAX_DCB_INDICES; 4946 adapter->ring_feature[RING_F_DCB].indices = IXGBE_MAX_DCB_INDICES;
4888 if (hw->mac.type == ixgbe_mac_82598EB) { 4947 switch (hw->mac.type) {
4948 case ixgbe_mac_82598EB:
4889 if (hw->device_id == IXGBE_DEV_ID_82598AT) 4949 if (hw->device_id == IXGBE_DEV_ID_82598AT)
4890 adapter->flags |= IXGBE_FLAG_FAN_FAIL_CAPABLE; 4950 adapter->flags |= IXGBE_FLAG_FAN_FAIL_CAPABLE;
4891 adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82598; 4951 adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82598;
4892 } else if (hw->mac.type == ixgbe_mac_82599EB) { 4952 break;
4953 case ixgbe_mac_82599EB:
4893 adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82599; 4954 adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82599;
4894 adapter->flags2 |= IXGBE_FLAG2_RSC_CAPABLE; 4955 adapter->flags2 |= IXGBE_FLAG2_RSC_CAPABLE;
4895 adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED; 4956 adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
@@ -4918,6 +4979,9 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
4918 adapter->fcoe.up = IXGBE_FCOE_DEFTC; 4979 adapter->fcoe.up = IXGBE_FCOE_DEFTC;
4919#endif 4980#endif
4920#endif /* IXGBE_FCOE */ 4981#endif /* IXGBE_FCOE */
4982 break;
4983 default:
4984 break;
4921 } 4985 }
4922 4986
4923#ifdef CONFIG_IXGBE_DCB 4987#ifdef CONFIG_IXGBE_DCB
@@ -5400,10 +5464,16 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
5400 IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0); 5464 IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
5401 } 5465 }
5402 5466
5403 if (wufc && hw->mac.type == ixgbe_mac_82599EB) 5467 switch (hw->mac.type) {
5404 pci_wake_from_d3(pdev, true); 5468 case ixgbe_mac_82598EB:
5405 else
5406 pci_wake_from_d3(pdev, false); 5469 pci_wake_from_d3(pdev, false);
5470 break;
5471 case ixgbe_mac_82599EB:
5472 pci_wake_from_d3(pdev, !!wufc);
5473 break;
5474 default:
5475 break;
5476 }
5407 5477
5408 *enable_wake = !!wufc; 5478 *enable_wake = !!wufc;
5409 5479
@@ -5522,17 +5592,21 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
5522 hwstats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC(i)); 5592 hwstats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC(i));
5523 hwstats->qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i)); 5593 hwstats->qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
5524 hwstats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC(i)); 5594 hwstats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC(i));
5525 if (hw->mac.type == ixgbe_mac_82599EB) { 5595 switch (hw->mac.type) {
5526 hwstats->pxonrxc[i] += 5596 case ixgbe_mac_82598EB:
5527 IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
5528 hwstats->pxoffrxc[i] +=
5529 IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
5530 hwstats->qprdc[i] += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
5531 } else {
5532 hwstats->pxonrxc[i] += 5597 hwstats->pxonrxc[i] +=
5533 IXGBE_READ_REG(hw, IXGBE_PXONRXC(i)); 5598 IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
5534 hwstats->pxoffrxc[i] += 5599 hwstats->pxoffrxc[i] +=
5535 IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i)); 5600 IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
5601 break;
5602 case ixgbe_mac_82599EB:
5603 hwstats->pxonrxc[i] +=
5604 IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
5605 hwstats->pxoffrxc[i] +=
5606 IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
5607 break;
5608 default:
5609 break;
5536 } 5610 }
5537 hwstats->pxontxc[i] += IXGBE_READ_REG(hw, IXGBE_PXONTXC(i)); 5611 hwstats->pxontxc[i] += IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
5538 hwstats->pxofftxc[i] += IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i)); 5612 hwstats->pxofftxc[i] += IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
@@ -5542,18 +5616,21 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
5542 hwstats->gprc -= missed_rx; 5616 hwstats->gprc -= missed_rx;
5543 5617
5544 /* 82598 hardware only has a 32 bit counter in the high register */ 5618 /* 82598 hardware only has a 32 bit counter in the high register */
5545 if (hw->mac.type == ixgbe_mac_82599EB) { 5619 switch (hw->mac.type) {
5546 u64 tmp; 5620 case ixgbe_mac_82598EB:
5621 hwstats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
5622 hwstats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
5623 hwstats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
5624 hwstats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
5625 hwstats->tor += IXGBE_READ_REG(hw, IXGBE_TORH);
5626 break;
5627 case ixgbe_mac_82599EB:
5547 hwstats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL); 5628 hwstats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL);
5548 tmp = IXGBE_READ_REG(hw, IXGBE_GORCH) & 0xF; 5629 IXGBE_READ_REG(hw, IXGBE_GORCH); /* to clear */
5549 /* 4 high bits of GORC */
5550 hwstats->gorc += (tmp << 32);
5551 hwstats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL); 5630 hwstats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL);
5552 tmp = IXGBE_READ_REG(hw, IXGBE_GOTCH) & 0xF; 5631 IXGBE_READ_REG(hw, IXGBE_GOTCH); /* to clear */
5553 /* 4 high bits of GOTC */
5554 hwstats->gotc += (tmp << 32);
5555 hwstats->tor += IXGBE_READ_REG(hw, IXGBE_TORL); 5632 hwstats->tor += IXGBE_READ_REG(hw, IXGBE_TORL);
5556 IXGBE_READ_REG(hw, IXGBE_TORH); /* to clear */ 5633 IXGBE_READ_REG(hw, IXGBE_TORH); /* to clear */
5557 hwstats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT); 5634 hwstats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
5558 hwstats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT); 5635 hwstats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
5559 hwstats->fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH); 5636 hwstats->fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH);
@@ -5566,12 +5643,9 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
5566 hwstats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC); 5643 hwstats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
5567 hwstats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC); 5644 hwstats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
5568#endif /* IXGBE_FCOE */ 5645#endif /* IXGBE_FCOE */
5569 } else { 5646 break;
5570 hwstats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC); 5647 default:
5571 hwstats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC); 5648 break;
5572 hwstats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
5573 hwstats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
5574 hwstats->tor += IXGBE_READ_REG(hw, IXGBE_TORH);
5575 } 5649 }
5576 bprc = IXGBE_READ_REG(hw, IXGBE_BPRC); 5650 bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
5577 hwstats->bprc += bprc; 5651 hwstats->bprc += bprc;
@@ -5807,17 +5881,26 @@ static void ixgbe_watchdog_task(struct work_struct *work)
5807 if (!netif_carrier_ok(netdev)) { 5881 if (!netif_carrier_ok(netdev)) {
5808 bool flow_rx, flow_tx; 5882 bool flow_rx, flow_tx;
5809 5883
5810 if (hw->mac.type == ixgbe_mac_82599EB) { 5884 switch (hw->mac.type) {
5811 u32 mflcn = IXGBE_READ_REG(hw, IXGBE_MFLCN); 5885 case ixgbe_mac_82598EB: {
5812 u32 fccfg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
5813 flow_rx = !!(mflcn & IXGBE_MFLCN_RFCE);
5814 flow_tx = !!(fccfg & IXGBE_FCCFG_TFCE_802_3X);
5815 } else {
5816 u32 frctl = IXGBE_READ_REG(hw, IXGBE_FCTRL); 5886 u32 frctl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
5817 u32 rmcs = IXGBE_READ_REG(hw, IXGBE_RMCS); 5887 u32 rmcs = IXGBE_READ_REG(hw, IXGBE_RMCS);
5818 flow_rx = !!(frctl & IXGBE_FCTRL_RFCE); 5888 flow_rx = !!(frctl & IXGBE_FCTRL_RFCE);
5819 flow_tx = !!(rmcs & IXGBE_RMCS_TFCE_802_3X); 5889 flow_tx = !!(rmcs & IXGBE_RMCS_TFCE_802_3X);
5820 } 5890 }
5891 break;
5892 case ixgbe_mac_82599EB: {
5893 u32 mflcn = IXGBE_READ_REG(hw, IXGBE_MFLCN);
5894 u32 fccfg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
5895 flow_rx = !!(mflcn & IXGBE_MFLCN_RFCE);
5896 flow_tx = !!(fccfg & IXGBE_FCCFG_TFCE_802_3X);
5897 }
5898 break;
5899 default:
5900 flow_tx = false;
5901 flow_rx = false;
5902 break;
5903 }
5821 5904
5822 e_info(drv, "NIC Link is Up %s, Flow Control: %s\n", 5905 e_info(drv, "NIC Link is Up %s, Flow Control: %s\n",
5823 (link_speed == IXGBE_LINK_SPEED_10GB_FULL ? 5906 (link_speed == IXGBE_LINK_SPEED_10GB_FULL ?