aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/forcedeth.c
diff options
context:
space:
mode:
authorSzymon Janc <szymon@janc.net.pl>2010-11-27 03:39:43 -0500
committerDavid S. Miller <davem@davemloft.net>2010-11-28 21:06:57 -0500
commit78aea4fc67a7534d5f5bbb0419a2bcb50b0547c9 (patch)
tree70a2a10f87e3efeed6ca5778dc1a60eac80c460c /drivers/net/forcedeth.c
parent47c05314328d9c40f6006783dc4c1e3080bd2914 (diff)
forcedeth: fix multiple code style issues
Signed-off-by: Szymon Janc <szymon@janc.net.pl> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/forcedeth.c')
-rw-r--r--drivers/net/forcedeth.c301
1 files changed, 135 insertions, 166 deletions
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index 0fa1776563a3..87757c89caef 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -186,9 +186,9 @@ enum {
186 NvRegSlotTime = 0x9c, 186 NvRegSlotTime = 0x9c,
187#define NVREG_SLOTTIME_LEGBF_ENABLED 0x80000000 187#define NVREG_SLOTTIME_LEGBF_ENABLED 0x80000000
188#define NVREG_SLOTTIME_10_100_FULL 0x00007f00 188#define NVREG_SLOTTIME_10_100_FULL 0x00007f00
189#define NVREG_SLOTTIME_1000_FULL 0x0003ff00 189#define NVREG_SLOTTIME_1000_FULL 0x0003ff00
190#define NVREG_SLOTTIME_HALF 0x0000ff00 190#define NVREG_SLOTTIME_HALF 0x0000ff00
191#define NVREG_SLOTTIME_DEFAULT 0x00007f00 191#define NVREG_SLOTTIME_DEFAULT 0x00007f00
192#define NVREG_SLOTTIME_MASK 0x000000ff 192#define NVREG_SLOTTIME_MASK 0x000000ff
193 193
194 NvRegTxDeferral = 0xA0, 194 NvRegTxDeferral = 0xA0,
@@ -297,7 +297,7 @@ enum {
297#define NVREG_WAKEUPFLAGS_ENABLE 0x1111 297#define NVREG_WAKEUPFLAGS_ENABLE 0x1111
298 298
299 NvRegMgmtUnitGetVersion = 0x204, 299 NvRegMgmtUnitGetVersion = 0x204,
300#define NVREG_MGMTUNITGETVERSION 0x01 300#define NVREG_MGMTUNITGETVERSION 0x01
301 NvRegMgmtUnitVersion = 0x208, 301 NvRegMgmtUnitVersion = 0x208,
302#define NVREG_MGMTUNITVERSION 0x08 302#define NVREG_MGMTUNITVERSION 0x08
303 NvRegPowerCap = 0x268, 303 NvRegPowerCap = 0x268,
@@ -368,8 +368,8 @@ struct ring_desc_ex {
368}; 368};
369 369
370union ring_type { 370union ring_type {
371 struct ring_desc* orig; 371 struct ring_desc *orig;
372 struct ring_desc_ex* ex; 372 struct ring_desc_ex *ex;
373}; 373};
374 374
375#define FLAG_MASK_V1 0xffff0000 375#define FLAG_MASK_V1 0xffff0000
@@ -444,10 +444,10 @@ union ring_type {
444#define NV_RX3_VLAN_TAG_MASK (0x0000FFFF) 444#define NV_RX3_VLAN_TAG_MASK (0x0000FFFF)
445 445
446/* Miscelaneous hardware related defines: */ 446/* Miscelaneous hardware related defines: */
447#define NV_PCI_REGSZ_VER1 0x270 447#define NV_PCI_REGSZ_VER1 0x270
448#define NV_PCI_REGSZ_VER2 0x2d4 448#define NV_PCI_REGSZ_VER2 0x2d4
449#define NV_PCI_REGSZ_VER3 0x604 449#define NV_PCI_REGSZ_VER3 0x604
450#define NV_PCI_REGSZ_MAX 0x604 450#define NV_PCI_REGSZ_MAX 0x604
451 451
452/* various timeout delays: all in usec */ 452/* various timeout delays: all in usec */
453#define NV_TXRX_RESET_DELAY 4 453#define NV_TXRX_RESET_DELAY 4
@@ -717,7 +717,7 @@ static const struct register_test nv_registers_test[] = {
717 { NvRegMulticastAddrA, 0xffffffff }, 717 { NvRegMulticastAddrA, 0xffffffff },
718 { NvRegTxWatermark, 0x0ff }, 718 { NvRegTxWatermark, 0x0ff },
719 { NvRegWakeUpFlags, 0x07777 }, 719 { NvRegWakeUpFlags, 0x07777 },
720 { 0,0 } 720 { 0, 0 }
721}; 721};
722 722
723struct nv_skb_map { 723struct nv_skb_map {
@@ -911,7 +911,7 @@ static int phy_cross = NV_CROSSOVER_DETECTION_DISABLED;
911 * Power down phy when interface is down (persists through reboot; 911 * Power down phy when interface is down (persists through reboot;
912 * older Linux and other OSes may not power it up again) 912 * older Linux and other OSes may not power it up again)
913 */ 913 */
914static int phy_power_down = 0; 914static int phy_power_down;
915 915
916static inline struct fe_priv *get_nvpriv(struct net_device *dev) 916static inline struct fe_priv *get_nvpriv(struct net_device *dev)
917{ 917{
@@ -984,12 +984,10 @@ static void setup_hw_rings(struct net_device *dev, int rxtx_flags)
984 u8 __iomem *base = get_hwbase(dev); 984 u8 __iomem *base = get_hwbase(dev);
985 985
986 if (!nv_optimized(np)) { 986 if (!nv_optimized(np)) {
987 if (rxtx_flags & NV_SETUP_RX_RING) { 987 if (rxtx_flags & NV_SETUP_RX_RING)
988 writel(dma_low(np->ring_addr), base + NvRegRxRingPhysAddr); 988 writel(dma_low(np->ring_addr), base + NvRegRxRingPhysAddr);
989 } 989 if (rxtx_flags & NV_SETUP_TX_RING)
990 if (rxtx_flags & NV_SETUP_TX_RING) {
991 writel(dma_low(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc)), base + NvRegTxRingPhysAddr); 990 writel(dma_low(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc)), base + NvRegTxRingPhysAddr);
992 }
993 } else { 991 } else {
994 if (rxtx_flags & NV_SETUP_RX_RING) { 992 if (rxtx_flags & NV_SETUP_RX_RING) {
995 writel(dma_low(np->ring_addr), base + NvRegRxRingPhysAddr); 993 writel(dma_low(np->ring_addr), base + NvRegRxRingPhysAddr);
@@ -1174,9 +1172,8 @@ static int phy_reset(struct net_device *dev, u32 bmcr_setup)
1174 unsigned int tries = 0; 1172 unsigned int tries = 0;
1175 1173
1176 miicontrol = BMCR_RESET | bmcr_setup; 1174 miicontrol = BMCR_RESET | bmcr_setup;
1177 if (mii_rw(dev, np->phyaddr, MII_BMCR, miicontrol)) { 1175 if (mii_rw(dev, np->phyaddr, MII_BMCR, miicontrol))
1178 return -1; 1176 return -1;
1179 }
1180 1177
1181 /* wait for 500ms */ 1178 /* wait for 500ms */
1182 msleep(500); 1179 msleep(500);
@@ -1196,7 +1193,7 @@ static int phy_init(struct net_device *dev)
1196{ 1193{
1197 struct fe_priv *np = get_nvpriv(dev); 1194 struct fe_priv *np = get_nvpriv(dev);
1198 u8 __iomem *base = get_hwbase(dev); 1195 u8 __iomem *base = get_hwbase(dev);
1199 u32 phyinterface, phy_reserved, mii_status, mii_control, mii_control_1000,reg; 1196 u32 phyinterface, phy_reserved, mii_status, mii_control, mii_control_1000, reg;
1200 1197
1201 /* phy errata for E3016 phy */ 1198 /* phy errata for E3016 phy */
1202 if (np->phy_model == PHY_MODEL_MARVELL_E3016) { 1199 if (np->phy_model == PHY_MODEL_MARVELL_E3016) {
@@ -1313,8 +1310,7 @@ static int phy_init(struct net_device *dev)
1313 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1310 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1314 return PHY_ERROR; 1311 return PHY_ERROR;
1315 } 1312 }
1316 } 1313 } else
1317 else
1318 np->gigabit = 0; 1314 np->gigabit = 0;
1319 1315
1320 mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); 1316 mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
@@ -1340,7 +1336,7 @@ static int phy_init(struct net_device *dev)
1340 } 1336 }
1341 1337
1342 /* phy vendor specific configuration */ 1338 /* phy vendor specific configuration */
1343 if ((np->phy_oui == PHY_OUI_CICADA) && (phyinterface & PHY_RGMII) ) { 1339 if ((np->phy_oui == PHY_OUI_CICADA) && (phyinterface & PHY_RGMII)) {
1344 phy_reserved = mii_rw(dev, np->phyaddr, MII_RESV1, MII_READ); 1340 phy_reserved = mii_rw(dev, np->phyaddr, MII_RESV1, MII_READ);
1345 phy_reserved &= ~(PHY_CICADA_INIT1 | PHY_CICADA_INIT2); 1341 phy_reserved &= ~(PHY_CICADA_INIT1 | PHY_CICADA_INIT2);
1346 phy_reserved |= (PHY_CICADA_INIT3 | PHY_CICADA_INIT4); 1342 phy_reserved |= (PHY_CICADA_INIT3 | PHY_CICADA_INIT4);
@@ -1501,12 +1497,10 @@ static int phy_init(struct net_device *dev)
1501 /* restart auto negotiation, power down phy */ 1497 /* restart auto negotiation, power down phy */
1502 mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); 1498 mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
1503 mii_control |= (BMCR_ANRESTART | BMCR_ANENABLE); 1499 mii_control |= (BMCR_ANRESTART | BMCR_ANENABLE);
1504 if (phy_power_down) { 1500 if (phy_power_down)
1505 mii_control |= BMCR_PDOWN; 1501 mii_control |= BMCR_PDOWN;
1506 } 1502 if (mii_rw(dev, np->phyaddr, MII_BMCR, mii_control))
1507 if (mii_rw(dev, np->phyaddr, MII_BMCR, mii_control)) {
1508 return PHY_ERROR; 1503 return PHY_ERROR;
1509 }
1510 1504
1511 return 0; 1505 return 0;
1512} 1506}
@@ -1526,8 +1520,8 @@ static void nv_start_rx(struct net_device *dev)
1526 } 1520 }
1527 writel(np->linkspeed, base + NvRegLinkSpeed); 1521 writel(np->linkspeed, base + NvRegLinkSpeed);
1528 pci_push(base); 1522 pci_push(base);
1529 rx_ctrl |= NVREG_RCVCTL_START; 1523 rx_ctrl |= NVREG_RCVCTL_START;
1530 if (np->mac_in_use) 1524 if (np->mac_in_use)
1531 rx_ctrl &= ~NVREG_RCVCTL_RX_PATH_EN; 1525 rx_ctrl &= ~NVREG_RCVCTL_RX_PATH_EN;
1532 writel(rx_ctrl, base + NvRegReceiverControl); 1526 writel(rx_ctrl, base + NvRegReceiverControl);
1533 dprintk(KERN_DEBUG "%s: nv_start_rx to duplex %d, speed 0x%08x.\n", 1527 dprintk(KERN_DEBUG "%s: nv_start_rx to duplex %d, speed 0x%08x.\n",
@@ -1745,7 +1739,7 @@ static struct net_device_stats *nv_get_stats(struct net_device *dev)
1745static int nv_alloc_rx(struct net_device *dev) 1739static int nv_alloc_rx(struct net_device *dev)
1746{ 1740{
1747 struct fe_priv *np = netdev_priv(dev); 1741 struct fe_priv *np = netdev_priv(dev);
1748 struct ring_desc* less_rx; 1742 struct ring_desc *less_rx;
1749 1743
1750 less_rx = np->get_rx.orig; 1744 less_rx = np->get_rx.orig;
1751 if (less_rx-- == np->first_rx.orig) 1745 if (less_rx-- == np->first_rx.orig)
@@ -1767,9 +1761,8 @@ static int nv_alloc_rx(struct net_device *dev)
1767 np->put_rx.orig = np->first_rx.orig; 1761 np->put_rx.orig = np->first_rx.orig;
1768 if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx)) 1762 if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx))
1769 np->put_rx_ctx = np->first_rx_ctx; 1763 np->put_rx_ctx = np->first_rx_ctx;
1770 } else { 1764 } else
1771 return 1; 1765 return 1;
1772 }
1773 } 1766 }
1774 return 0; 1767 return 0;
1775} 1768}
@@ -1777,7 +1770,7 @@ static int nv_alloc_rx(struct net_device *dev)
1777static int nv_alloc_rx_optimized(struct net_device *dev) 1770static int nv_alloc_rx_optimized(struct net_device *dev)
1778{ 1771{
1779 struct fe_priv *np = netdev_priv(dev); 1772 struct fe_priv *np = netdev_priv(dev);
1780 struct ring_desc_ex* less_rx; 1773 struct ring_desc_ex *less_rx;
1781 1774
1782 less_rx = np->get_rx.ex; 1775 less_rx = np->get_rx.ex;
1783 if (less_rx-- == np->first_rx.ex) 1776 if (less_rx-- == np->first_rx.ex)
@@ -1800,9 +1793,8 @@ static int nv_alloc_rx_optimized(struct net_device *dev)
1800 np->put_rx.ex = np->first_rx.ex; 1793 np->put_rx.ex = np->first_rx.ex;
1801 if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx)) 1794 if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx))
1802 np->put_rx_ctx = np->first_rx_ctx; 1795 np->put_rx_ctx = np->first_rx_ctx;
1803 } else { 1796 } else
1804 return 1; 1797 return 1;
1805 }
1806 } 1798 }
1807 return 0; 1799 return 0;
1808} 1800}
@@ -2018,24 +2010,24 @@ static void nv_legacybackoff_reseed(struct net_device *dev)
2018 2010
2019/* Known Good seed sets */ 2011/* Known Good seed sets */
2020static const u32 main_seedset[BACKOFF_SEEDSET_ROWS][BACKOFF_SEEDSET_LFSRS] = { 2012static const u32 main_seedset[BACKOFF_SEEDSET_ROWS][BACKOFF_SEEDSET_LFSRS] = {
2021 {145, 155, 165, 175, 185, 196, 235, 245, 255, 265, 275, 285, 660, 690, 874}, 2013 {145, 155, 165, 175, 185, 196, 235, 245, 255, 265, 275, 285, 660, 690, 874},
2022 {245, 255, 265, 575, 385, 298, 335, 345, 355, 366, 375, 385, 761, 790, 974}, 2014 {245, 255, 265, 575, 385, 298, 335, 345, 355, 366, 375, 385, 761, 790, 974},
2023 {145, 155, 165, 175, 185, 196, 235, 245, 255, 265, 275, 285, 660, 690, 874}, 2015 {145, 155, 165, 175, 185, 196, 235, 245, 255, 265, 275, 285, 660, 690, 874},
2024 {245, 255, 265, 575, 385, 298, 335, 345, 355, 366, 375, 386, 761, 790, 974}, 2016 {245, 255, 265, 575, 385, 298, 335, 345, 355, 366, 375, 386, 761, 790, 974},
2025 {266, 265, 276, 585, 397, 208, 345, 355, 365, 376, 385, 396, 771, 700, 984}, 2017 {266, 265, 276, 585, 397, 208, 345, 355, 365, 376, 385, 396, 771, 700, 984},
2026 {266, 265, 276, 586, 397, 208, 346, 355, 365, 376, 285, 396, 771, 700, 984}, 2018 {266, 265, 276, 586, 397, 208, 346, 355, 365, 376, 285, 396, 771, 700, 984},
2027 {366, 365, 376, 686, 497, 308, 447, 455, 466, 476, 485, 496, 871, 800, 84}, 2019 {366, 365, 376, 686, 497, 308, 447, 455, 466, 476, 485, 496, 871, 800, 84},
2028 {466, 465, 476, 786, 597, 408, 547, 555, 566, 576, 585, 597, 971, 900, 184}}; 2020 {466, 465, 476, 786, 597, 408, 547, 555, 566, 576, 585, 597, 971, 900, 184} };
2029 2021
2030static const u32 gear_seedset[BACKOFF_SEEDSET_ROWS][BACKOFF_SEEDSET_LFSRS] = { 2022static const u32 gear_seedset[BACKOFF_SEEDSET_ROWS][BACKOFF_SEEDSET_LFSRS] = {
2031 {251, 262, 273, 324, 319, 508, 375, 364, 341, 371, 398, 193, 375, 30, 295}, 2023 {251, 262, 273, 324, 319, 508, 375, 364, 341, 371, 398, 193, 375, 30, 295},
2032 {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395}, 2024 {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395},
2033 {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 397}, 2025 {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 397},
2034 {251, 262, 273, 324, 319, 508, 375, 364, 341, 371, 398, 193, 375, 30, 295}, 2026 {251, 262, 273, 324, 319, 508, 375, 364, 341, 371, 398, 193, 375, 30, 295},
2035 {251, 262, 273, 324, 319, 508, 375, 364, 341, 371, 398, 193, 375, 30, 295}, 2027 {251, 262, 273, 324, 319, 508, 375, 364, 341, 371, 398, 193, 375, 30, 295},
2036 {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395}, 2028 {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395},
2037 {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395}, 2029 {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395},
2038 {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395}}; 2030 {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395} };
2039 2031
2040static void nv_gear_backoff_reseed(struct net_device *dev) 2032static void nv_gear_backoff_reseed(struct net_device *dev)
2041{ 2033{
@@ -2083,13 +2075,12 @@ static void nv_gear_backoff_reseed(struct net_device *dev)
2083 temp = NVREG_BKOFFCTRL_DEFAULT | (0 << NVREG_BKOFFCTRL_SELECT); 2075 temp = NVREG_BKOFFCTRL_DEFAULT | (0 << NVREG_BKOFFCTRL_SELECT);
2084 temp |= combinedSeed & NVREG_BKOFFCTRL_SEED_MASK; 2076 temp |= combinedSeed & NVREG_BKOFFCTRL_SEED_MASK;
2085 temp |= combinedSeed >> NVREG_BKOFFCTRL_GEAR; 2077 temp |= combinedSeed >> NVREG_BKOFFCTRL_GEAR;
2086 writel(temp,base + NvRegBackOffControl); 2078 writel(temp, base + NvRegBackOffControl);
2087 2079
2088 /* Setup seeds for all gear LFSRs. */ 2080 /* Setup seeds for all gear LFSRs. */
2089 get_random_bytes(&seedset, sizeof(seedset)); 2081 get_random_bytes(&seedset, sizeof(seedset));
2090 seedset = seedset % BACKOFF_SEEDSET_ROWS; 2082 seedset = seedset % BACKOFF_SEEDSET_ROWS;
2091 for (i = 1; i <= BACKOFF_SEEDSET_LFSRS; i++) 2083 for (i = 1; i <= BACKOFF_SEEDSET_LFSRS; i++) {
2092 {
2093 temp = NVREG_BKOFFCTRL_DEFAULT | (i << NVREG_BKOFFCTRL_SELECT); 2084 temp = NVREG_BKOFFCTRL_DEFAULT | (i << NVREG_BKOFFCTRL_SELECT);
2094 temp |= main_seedset[seedset][i-1] & 0x3ff; 2085 temp |= main_seedset[seedset][i-1] & 0x3ff;
2095 temp |= ((gear_seedset[seedset][i-1] & 0x3ff) << NVREG_BKOFFCTRL_GEAR); 2086 temp |= ((gear_seedset[seedset][i-1] & 0x3ff) << NVREG_BKOFFCTRL_GEAR);
@@ -2113,10 +2104,10 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
2113 u32 size = skb_headlen(skb); 2104 u32 size = skb_headlen(skb);
2114 u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0); 2105 u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
2115 u32 empty_slots; 2106 u32 empty_slots;
2116 struct ring_desc* put_tx; 2107 struct ring_desc *put_tx;
2117 struct ring_desc* start_tx; 2108 struct ring_desc *start_tx;
2118 struct ring_desc* prev_tx; 2109 struct ring_desc *prev_tx;
2119 struct nv_skb_map* prev_tx_ctx; 2110 struct nv_skb_map *prev_tx_ctx;
2120 unsigned long flags; 2111 unsigned long flags;
2121 2112
2122 /* add fragments to entries count */ 2113 /* add fragments to entries count */
@@ -2208,10 +2199,10 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
2208 dev->name, entries, tx_flags_extra); 2199 dev->name, entries, tx_flags_extra);
2209 { 2200 {
2210 int j; 2201 int j;
2211 for (j=0; j<64; j++) { 2202 for (j = 0; j < 64; j++) {
2212 if ((j%16) == 0) 2203 if ((j%16) == 0)
2213 dprintk("\n%03x:", j); 2204 dprintk("\n%03x:", j);
2214 dprintk(" %02x", ((unsigned char*)skb->data)[j]); 2205 dprintk(" %02x", ((unsigned char *)skb->data)[j]);
2215 } 2206 }
2216 dprintk("\n"); 2207 dprintk("\n");
2217 } 2208 }
@@ -2233,11 +2224,11 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
2233 u32 size = skb_headlen(skb); 2224 u32 size = skb_headlen(skb);
2234 u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0); 2225 u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
2235 u32 empty_slots; 2226 u32 empty_slots;
2236 struct ring_desc_ex* put_tx; 2227 struct ring_desc_ex *put_tx;
2237 struct ring_desc_ex* start_tx; 2228 struct ring_desc_ex *start_tx;
2238 struct ring_desc_ex* prev_tx; 2229 struct ring_desc_ex *prev_tx;
2239 struct nv_skb_map* prev_tx_ctx; 2230 struct nv_skb_map *prev_tx_ctx;
2240 struct nv_skb_map* start_tx_ctx; 2231 struct nv_skb_map *start_tx_ctx;
2241 unsigned long flags; 2232 unsigned long flags;
2242 2233
2243 /* add fragments to entries count */ 2234 /* add fragments to entries count */
@@ -2359,10 +2350,10 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
2359 dev->name, entries, tx_flags_extra); 2350 dev->name, entries, tx_flags_extra);
2360 { 2351 {
2361 int j; 2352 int j;
2362 for (j=0; j<64; j++) { 2353 for (j = 0; j < 64; j++) {
2363 if ((j%16) == 0) 2354 if ((j%16) == 0)
2364 dprintk("\n%03x:", j); 2355 dprintk("\n%03x:", j);
2365 dprintk(" %02x", ((unsigned char*)skb->data)[j]); 2356 dprintk(" %02x", ((unsigned char *)skb->data)[j]);
2366 } 2357 }
2367 dprintk("\n"); 2358 dprintk("\n");
2368 } 2359 }
@@ -2399,7 +2390,7 @@ static int nv_tx_done(struct net_device *dev, int limit)
2399 struct fe_priv *np = netdev_priv(dev); 2390 struct fe_priv *np = netdev_priv(dev);
2400 u32 flags; 2391 u32 flags;
2401 int tx_work = 0; 2392 int tx_work = 0;
2402 struct ring_desc* orig_get_tx = np->get_tx.orig; 2393 struct ring_desc *orig_get_tx = np->get_tx.orig;
2403 2394
2404 while ((np->get_tx.orig != np->put_tx.orig) && 2395 while ((np->get_tx.orig != np->put_tx.orig) &&
2405 !((flags = le32_to_cpu(np->get_tx.orig->flaglen)) & NV_TX_VALID) && 2396 !((flags = le32_to_cpu(np->get_tx.orig->flaglen)) & NV_TX_VALID) &&
@@ -2464,7 +2455,7 @@ static int nv_tx_done_optimized(struct net_device *dev, int limit)
2464 struct fe_priv *np = netdev_priv(dev); 2455 struct fe_priv *np = netdev_priv(dev);
2465 u32 flags; 2456 u32 flags;
2466 int tx_work = 0; 2457 int tx_work = 0;
2467 struct ring_desc_ex* orig_get_tx = np->get_tx.ex; 2458 struct ring_desc_ex *orig_get_tx = np->get_tx.ex;
2468 2459
2469 while ((np->get_tx.ex != np->put_tx.ex) && 2460 while ((np->get_tx.ex != np->put_tx.ex) &&
2470 !((flags = le32_to_cpu(np->get_tx.ex->flaglen)) & NV_TX2_VALID) && 2461 !((flags = le32_to_cpu(np->get_tx.ex->flaglen)) & NV_TX2_VALID) &&
@@ -2491,9 +2482,8 @@ static int nv_tx_done_optimized(struct net_device *dev, int limit)
2491 np->get_tx_ctx->skb = NULL; 2482 np->get_tx_ctx->skb = NULL;
2492 tx_work++; 2483 tx_work++;
2493 2484
2494 if (np->tx_limit) { 2485 if (np->tx_limit)
2495 nv_tx_flip_ownership(dev); 2486 nv_tx_flip_ownership(dev);
2496 }
2497 } 2487 }
2498 if (unlikely(np->get_tx.ex++ == np->last_tx.ex)) 2488 if (unlikely(np->get_tx.ex++ == np->last_tx.ex))
2499 np->get_tx.ex = np->first_tx.ex; 2489 np->get_tx.ex = np->first_tx.ex;
@@ -2532,7 +2522,7 @@ static void nv_tx_timeout(struct net_device *dev)
2532 printk(KERN_INFO "%s: Ring at %lx\n", 2522 printk(KERN_INFO "%s: Ring at %lx\n",
2533 dev->name, (unsigned long)np->ring_addr); 2523 dev->name, (unsigned long)np->ring_addr);
2534 printk(KERN_INFO "%s: Dumping tx registers\n", dev->name); 2524 printk(KERN_INFO "%s: Dumping tx registers\n", dev->name);
2535 for (i=0;i<=np->register_size;i+= 32) { 2525 for (i = 0; i <= np->register_size; i += 32) {
2536 printk(KERN_INFO "%3x: %08x %08x %08x %08x %08x %08x %08x %08x\n", 2526 printk(KERN_INFO "%3x: %08x %08x %08x %08x %08x %08x %08x %08x\n",
2537 i, 2527 i,
2538 readl(base + i + 0), readl(base + i + 4), 2528 readl(base + i + 0), readl(base + i + 4),
@@ -2541,7 +2531,7 @@ static void nv_tx_timeout(struct net_device *dev)
2541 readl(base + i + 24), readl(base + i + 28)); 2531 readl(base + i + 24), readl(base + i + 28));
2542 } 2532 }
2543 printk(KERN_INFO "%s: Dumping tx ring\n", dev->name); 2533 printk(KERN_INFO "%s: Dumping tx ring\n", dev->name);
2544 for (i=0;i<np->tx_ring_size;i+= 4) { 2534 for (i = 0; i < np->tx_ring_size; i += 4) {
2545 if (!nv_optimized(np)) { 2535 if (!nv_optimized(np)) {
2546 printk(KERN_INFO "%03x: %08x %08x // %08x %08x // %08x %08x // %08x %08x\n", 2536 printk(KERN_INFO "%03x: %08x %08x // %08x %08x // %08x %08x // %08x %08x\n",
2547 i, 2537 i,
@@ -2616,11 +2606,11 @@ static int nv_getlen(struct net_device *dev, void *packet, int datalen)
2616 int protolen; /* length as stored in the proto field */ 2606 int protolen; /* length as stored in the proto field */
2617 2607
2618 /* 1) calculate len according to header */ 2608 /* 1) calculate len according to header */
2619 if ( ((struct vlan_ethhdr *)packet)->h_vlan_proto == htons(ETH_P_8021Q)) { 2609 if (((struct vlan_ethhdr *)packet)->h_vlan_proto == htons(ETH_P_8021Q)) {
2620 protolen = ntohs( ((struct vlan_ethhdr *)packet)->h_vlan_encapsulated_proto ); 2610 protolen = ntohs(((struct vlan_ethhdr *)packet)->h_vlan_encapsulated_proto);
2621 hdrlen = VLAN_HLEN; 2611 hdrlen = VLAN_HLEN;
2622 } else { 2612 } else {
2623 protolen = ntohs( ((struct ethhdr *)packet)->h_proto); 2613 protolen = ntohs(((struct ethhdr *)packet)->h_proto);
2624 hdrlen = ETH_HLEN; 2614 hdrlen = ETH_HLEN;
2625 } 2615 }
2626 dprintk(KERN_DEBUG "%s: nv_getlen: datalen %d, protolen %d, hdrlen %d\n", 2616 dprintk(KERN_DEBUG "%s: nv_getlen: datalen %d, protolen %d, hdrlen %d\n",
@@ -2667,7 +2657,7 @@ static int nv_rx_process(struct net_device *dev, int limit)
2667 struct sk_buff *skb; 2657 struct sk_buff *skb;
2668 int len; 2658 int len;
2669 2659
2670 while((np->get_rx.orig != np->put_rx.orig) && 2660 while ((np->get_rx.orig != np->put_rx.orig) &&
2671 !((flags = le32_to_cpu(np->get_rx.orig->flaglen)) & NV_RX_AVAIL) && 2661 !((flags = le32_to_cpu(np->get_rx.orig->flaglen)) & NV_RX_AVAIL) &&
2672 (rx_work < limit)) { 2662 (rx_work < limit)) {
2673 2663
@@ -2687,11 +2677,11 @@ static int nv_rx_process(struct net_device *dev, int limit)
2687 2677
2688 { 2678 {
2689 int j; 2679 int j;
2690 dprintk(KERN_DEBUG "Dumping packet (flags 0x%x).",flags); 2680 dprintk(KERN_DEBUG "Dumping packet (flags 0x%x).", flags);
2691 for (j=0; j<64; j++) { 2681 for (j = 0; j < 64; j++) {
2692 if ((j%16) == 0) 2682 if ((j%16) == 0)
2693 dprintk("\n%03x:", j); 2683 dprintk("\n%03x:", j);
2694 dprintk(" %02x", ((unsigned char*)skb->data)[j]); 2684 dprintk(" %02x", ((unsigned char *)skb->data)[j]);
2695 } 2685 }
2696 dprintk("\n"); 2686 dprintk("\n");
2697 } 2687 }
@@ -2710,9 +2700,8 @@ static int nv_rx_process(struct net_device *dev, int limit)
2710 } 2700 }
2711 /* framing errors are soft errors */ 2701 /* framing errors are soft errors */
2712 else if ((flags & NV_RX_ERROR_MASK) == NV_RX_FRAMINGERR) { 2702 else if ((flags & NV_RX_ERROR_MASK) == NV_RX_FRAMINGERR) {
2713 if (flags & NV_RX_SUBSTRACT1) { 2703 if (flags & NV_RX_SUBSTRACT1)
2714 len--; 2704 len--;
2715 }
2716 } 2705 }
2717 /* the rest are hard errors */ 2706 /* the rest are hard errors */
2718 else { 2707 else {
@@ -2745,9 +2734,8 @@ static int nv_rx_process(struct net_device *dev, int limit)
2745 } 2734 }
2746 /* framing errors are soft errors */ 2735 /* framing errors are soft errors */
2747 else if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_FRAMINGERR) { 2736 else if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_FRAMINGERR) {
2748 if (flags & NV_RX2_SUBSTRACT1) { 2737 if (flags & NV_RX2_SUBSTRACT1)
2749 len--; 2738 len--;
2750 }
2751 } 2739 }
2752 /* the rest are hard errors */ 2740 /* the rest are hard errors */
2753 else { 2741 else {
@@ -2797,7 +2785,7 @@ static int nv_rx_process_optimized(struct net_device *dev, int limit)
2797 struct sk_buff *skb; 2785 struct sk_buff *skb;
2798 int len; 2786 int len;
2799 2787
2800 while((np->get_rx.ex != np->put_rx.ex) && 2788 while ((np->get_rx.ex != np->put_rx.ex) &&
2801 !((flags = le32_to_cpu(np->get_rx.ex->flaglen)) & NV_RX2_AVAIL) && 2789 !((flags = le32_to_cpu(np->get_rx.ex->flaglen)) & NV_RX2_AVAIL) &&
2802 (rx_work < limit)) { 2790 (rx_work < limit)) {
2803 2791
@@ -2817,11 +2805,11 @@ static int nv_rx_process_optimized(struct net_device *dev, int limit)
2817 2805
2818 { 2806 {
2819 int j; 2807 int j;
2820 dprintk(KERN_DEBUG "Dumping packet (flags 0x%x).",flags); 2808 dprintk(KERN_DEBUG "Dumping packet (flags 0x%x).", flags);
2821 for (j=0; j<64; j++) { 2809 for (j = 0; j < 64; j++) {
2822 if ((j%16) == 0) 2810 if ((j%16) == 0)
2823 dprintk("\n%03x:", j); 2811 dprintk("\n%03x:", j);
2824 dprintk(" %02x", ((unsigned char*)skb->data)[j]); 2812 dprintk(" %02x", ((unsigned char *)skb->data)[j]);
2825 } 2813 }
2826 dprintk("\n"); 2814 dprintk("\n");
2827 } 2815 }
@@ -2838,9 +2826,8 @@ static int nv_rx_process_optimized(struct net_device *dev, int limit)
2838 } 2826 }
2839 /* framing errors are soft errors */ 2827 /* framing errors are soft errors */
2840 else if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_FRAMINGERR) { 2828 else if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_FRAMINGERR) {
2841 if (flags & NV_RX2_SUBSTRACT1) { 2829 if (flags & NV_RX2_SUBSTRACT1)
2842 len--; 2830 len--;
2843 }
2844 } 2831 }
2845 /* the rest are hard errors */ 2832 /* the rest are hard errors */
2846 else { 2833 else {
@@ -2949,7 +2936,7 @@ static int nv_change_mtu(struct net_device *dev, int new_mtu)
2949 /* reinit nic view of the rx queue */ 2936 /* reinit nic view of the rx queue */
2950 writel(np->rx_buf_sz, base + NvRegOffloadConfig); 2937 writel(np->rx_buf_sz, base + NvRegOffloadConfig);
2951 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING); 2938 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
2952 writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT), 2939 writel(((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
2953 base + NvRegRingSizes); 2940 base + NvRegRingSizes);
2954 pci_push(base); 2941 pci_push(base);
2955 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); 2942 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
@@ -2986,7 +2973,7 @@ static void nv_copy_mac_to_hw(struct net_device *dev)
2986static int nv_set_mac_address(struct net_device *dev, void *addr) 2973static int nv_set_mac_address(struct net_device *dev, void *addr)
2987{ 2974{
2988 struct fe_priv *np = netdev_priv(dev); 2975 struct fe_priv *np = netdev_priv(dev);
2989 struct sockaddr *macaddr = (struct sockaddr*)addr; 2976 struct sockaddr *macaddr = (struct sockaddr *)addr;
2990 2977
2991 if (!is_valid_ether_addr(macaddr->sa_data)) 2978 if (!is_valid_ether_addr(macaddr->sa_data))
2992 return -EADDRNOTAVAIL; 2979 return -EADDRNOTAVAIL;
@@ -3302,7 +3289,7 @@ set_speed:
3302 } 3289 }
3303 writel(txreg, base + NvRegTxWatermark); 3290 writel(txreg, base + NvRegTxWatermark);
3304 3291
3305 writel(NVREG_MISC1_FORCE | ( np->duplex ? 0 : NVREG_MISC1_HD), 3292 writel(NVREG_MISC1_FORCE | (np->duplex ? 0 : NVREG_MISC1_HD),
3306 base + NvRegMisc1); 3293 base + NvRegMisc1);
3307 pci_push(base); 3294 pci_push(base);
3308 writel(np->linkspeed, base + NvRegLinkSpeed); 3295 writel(np->linkspeed, base + NvRegLinkSpeed);
@@ -3312,8 +3299,8 @@ set_speed:
3312 /* setup pause frame */ 3299 /* setup pause frame */
3313 if (np->duplex != 0) { 3300 if (np->duplex != 0) {
3314 if (np->autoneg && np->pause_flags & NV_PAUSEFRAME_AUTONEG) { 3301 if (np->autoneg && np->pause_flags & NV_PAUSEFRAME_AUTONEG) {
3315 adv_pause = adv & (ADVERTISE_PAUSE_CAP| ADVERTISE_PAUSE_ASYM); 3302 adv_pause = adv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
3316 lpa_pause = lpa & (LPA_PAUSE_CAP| LPA_PAUSE_ASYM); 3303 lpa_pause = lpa & (LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
3317 3304
3318 switch (adv_pause) { 3305 switch (adv_pause) {
3319 case ADVERTISE_PAUSE_CAP: 3306 case ADVERTISE_PAUSE_CAP:
@@ -3324,22 +3311,17 @@ set_speed:
3324 } 3311 }
3325 break; 3312 break;
3326 case ADVERTISE_PAUSE_ASYM: 3313 case ADVERTISE_PAUSE_ASYM:
3327 if (lpa_pause == (LPA_PAUSE_CAP| LPA_PAUSE_ASYM)) 3314 if (lpa_pause == (LPA_PAUSE_CAP | LPA_PAUSE_ASYM))
3328 {
3329 pause_flags |= NV_PAUSEFRAME_TX_ENABLE; 3315 pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
3330 }
3331 break; 3316 break;
3332 case ADVERTISE_PAUSE_CAP| ADVERTISE_PAUSE_ASYM: 3317 case ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM:
3333 if (lpa_pause & LPA_PAUSE_CAP) 3318 if (lpa_pause & LPA_PAUSE_CAP) {
3334 {
3335 pause_flags |= NV_PAUSEFRAME_RX_ENABLE; 3319 pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
3336 if (np->pause_flags & NV_PAUSEFRAME_TX_REQ) 3320 if (np->pause_flags & NV_PAUSEFRAME_TX_REQ)
3337 pause_flags |= NV_PAUSEFRAME_TX_ENABLE; 3321 pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
3338 } 3322 }
3339 if (lpa_pause == LPA_PAUSE_ASYM) 3323 if (lpa_pause == LPA_PAUSE_ASYM)
3340 {
3341 pause_flags |= NV_PAUSEFRAME_RX_ENABLE; 3324 pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
3342 }
3343 break; 3325 break;
3344 } 3326 }
3345 } else { 3327 } else {
@@ -3514,7 +3496,7 @@ static irqreturn_t nv_nic_irq_tx(int foo, void *data)
3514 3496
3515 dprintk(KERN_DEBUG "%s: nv_nic_irq_tx\n", dev->name); 3497 dprintk(KERN_DEBUG "%s: nv_nic_irq_tx\n", dev->name);
3516 3498
3517 for (i=0; ; i++) { 3499 for (i = 0;; i++) {
3518 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_TX_ALL; 3500 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_TX_ALL;
3519 writel(NVREG_IRQ_TX_ALL, base + NvRegMSIXIrqStatus); 3501 writel(NVREG_IRQ_TX_ALL, base + NvRegMSIXIrqStatus);
3520 dprintk(KERN_DEBUG "%s: tx irq: %08x\n", dev->name, events); 3502 dprintk(KERN_DEBUG "%s: tx irq: %08x\n", dev->name, events);
@@ -3553,7 +3535,7 @@ static int nv_napi_poll(struct napi_struct *napi, int budget)
3553 u8 __iomem *base = get_hwbase(dev); 3535 u8 __iomem *base = get_hwbase(dev);
3554 unsigned long flags; 3536 unsigned long flags;
3555 int retcode; 3537 int retcode;
3556 int rx_count, tx_work=0, rx_work=0; 3538 int rx_count, tx_work = 0, rx_work = 0;
3557 3539
3558 do { 3540 do {
3559 if (!nv_optimized(np)) { 3541 if (!nv_optimized(np)) {
@@ -3628,7 +3610,7 @@ static irqreturn_t nv_nic_irq_rx(int foo, void *data)
3628 3610
3629 dprintk(KERN_DEBUG "%s: nv_nic_irq_rx\n", dev->name); 3611 dprintk(KERN_DEBUG "%s: nv_nic_irq_rx\n", dev->name);
3630 3612
3631 for (i=0; ; i++) { 3613 for (i = 0;; i++) {
3632 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_RX_ALL; 3614 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_RX_ALL;
3633 writel(NVREG_IRQ_RX_ALL, base + NvRegMSIXIrqStatus); 3615 writel(NVREG_IRQ_RX_ALL, base + NvRegMSIXIrqStatus);
3634 dprintk(KERN_DEBUG "%s: rx irq: %08x\n", dev->name, events); 3616 dprintk(KERN_DEBUG "%s: rx irq: %08x\n", dev->name, events);
@@ -3675,7 +3657,7 @@ static irqreturn_t nv_nic_irq_other(int foo, void *data)
3675 3657
3676 dprintk(KERN_DEBUG "%s: nv_nic_irq_other\n", dev->name); 3658 dprintk(KERN_DEBUG "%s: nv_nic_irq_other\n", dev->name);
3677 3659
3678 for (i=0; ; i++) { 3660 for (i = 0;; i++) {
3679 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_OTHER; 3661 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_OTHER;
3680 writel(NVREG_IRQ_OTHER, base + NvRegMSIXIrqStatus); 3662 writel(NVREG_IRQ_OTHER, base + NvRegMSIXIrqStatus);
3681 dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events); 3663 dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events);
@@ -3776,17 +3758,15 @@ static void set_msix_vector_map(struct net_device *dev, u32 vector, u32 irqmask)
3776 * the remaining 8 interrupts. 3758 * the remaining 8 interrupts.
3777 */ 3759 */
3778 for (i = 0; i < 8; i++) { 3760 for (i = 0; i < 8; i++) {
3779 if ((irqmask >> i) & 0x1) { 3761 if ((irqmask >> i) & 0x1)
3780 msixmap |= vector << (i << 2); 3762 msixmap |= vector << (i << 2);
3781 }
3782 } 3763 }
3783 writel(readl(base + NvRegMSIXMap0) | msixmap, base + NvRegMSIXMap0); 3764 writel(readl(base + NvRegMSIXMap0) | msixmap, base + NvRegMSIXMap0);
3784 3765
3785 msixmap = 0; 3766 msixmap = 0;
3786 for (i = 0; i < 8; i++) { 3767 for (i = 0; i < 8; i++) {
3787 if ((irqmask >> (i + 8)) & 0x1) { 3768 if ((irqmask >> (i + 8)) & 0x1)
3788 msixmap |= vector << (i << 2); 3769 msixmap |= vector << (i << 2);
3789 }
3790 } 3770 }
3791 writel(readl(base + NvRegMSIXMap1) | msixmap, base + NvRegMSIXMap1); 3771 writel(readl(base + NvRegMSIXMap1) | msixmap, base + NvRegMSIXMap1);
3792} 3772}
@@ -3809,9 +3789,8 @@ static int nv_request_irq(struct net_device *dev, int intr_test)
3809 } 3789 }
3810 3790
3811 if (np->msi_flags & NV_MSI_X_CAPABLE) { 3791 if (np->msi_flags & NV_MSI_X_CAPABLE) {
3812 for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) { 3792 for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++)
3813 np->msi_x_entry[i].entry = i; 3793 np->msi_x_entry[i].entry = i;
3814 }
3815 if ((ret = pci_enable_msix(np->pci_dev, np->msi_x_entry, (np->msi_flags & NV_MSI_X_VECTORS_MASK))) == 0) { 3794 if ((ret = pci_enable_msix(np->pci_dev, np->msi_x_entry, (np->msi_flags & NV_MSI_X_VECTORS_MASK))) == 0) {
3816 np->msi_flags |= NV_MSI_X_ENABLED; 3795 np->msi_flags |= NV_MSI_X_ENABLED;
3817 if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT && !intr_test) { 3796 if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT && !intr_test) {
@@ -3903,9 +3882,8 @@ static void nv_free_irq(struct net_device *dev)
3903 int i; 3882 int i;
3904 3883
3905 if (np->msi_flags & NV_MSI_X_ENABLED) { 3884 if (np->msi_flags & NV_MSI_X_ENABLED) {
3906 for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) { 3885 for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++)
3907 free_irq(np->msi_x_entry[i].vector, dev); 3886 free_irq(np->msi_x_entry[i].vector, dev);
3908 }
3909 pci_disable_msix(np->pci_dev); 3887 pci_disable_msix(np->pci_dev);
3910 np->msi_flags &= ~NV_MSI_X_ENABLED; 3888 np->msi_flags &= ~NV_MSI_X_ENABLED;
3911 } else { 3889 } else {
@@ -3975,7 +3953,7 @@ static void nv_do_nic_poll(unsigned long data)
3975 /* reinit nic view of the rx queue */ 3953 /* reinit nic view of the rx queue */
3976 writel(np->rx_buf_sz, base + NvRegOffloadConfig); 3954 writel(np->rx_buf_sz, base + NvRegOffloadConfig);
3977 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING); 3955 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
3978 writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT), 3956 writel(((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
3979 base + NvRegRingSizes); 3957 base + NvRegRingSizes);
3980 pci_push(base); 3958 pci_push(base);
3981 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); 3959 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
@@ -4105,7 +4083,7 @@ static int nv_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
4105 } 4083 }
4106 4084
4107 if (netif_carrier_ok(dev)) { 4085 if (netif_carrier_ok(dev)) {
4108 switch(np->linkspeed & (NVREG_LINKSPEED_MASK)) { 4086 switch (np->linkspeed & (NVREG_LINKSPEED_MASK)) {
4109 case NVREG_LINKSPEED_10: 4087 case NVREG_LINKSPEED_10:
4110 ecmd->speed = SPEED_10; 4088 ecmd->speed = SPEED_10;
4111 break; 4089 break;
@@ -4344,7 +4322,7 @@ static void nv_get_regs(struct net_device *dev, struct ethtool_regs *regs, void
4344 4322
4345 regs->version = FORCEDETH_REGS_VER; 4323 regs->version = FORCEDETH_REGS_VER;
4346 spin_lock_irq(&np->lock); 4324 spin_lock_irq(&np->lock);
4347 for (i = 0;i <= np->register_size/sizeof(u32); i++) 4325 for (i = 0; i <= np->register_size/sizeof(u32); i++)
4348 rbuf[i] = readl(base + i*sizeof(u32)); 4326 rbuf[i] = readl(base + i*sizeof(u32));
4349 spin_unlock_irq(&np->lock); 4327 spin_unlock_irq(&np->lock);
4350} 4328}
@@ -4491,14 +4469,14 @@ static int nv_set_ringparam(struct net_device *dev, struct ethtool_ringparam* ri
4491 np->tx_ring_size = ring->tx_pending; 4469 np->tx_ring_size = ring->tx_pending;
4492 4470
4493 if (!nv_optimized(np)) { 4471 if (!nv_optimized(np)) {
4494 np->rx_ring.orig = (struct ring_desc*)rxtx_ring; 4472 np->rx_ring.orig = (struct ring_desc *)rxtx_ring;
4495 np->tx_ring.orig = &np->rx_ring.orig[np->rx_ring_size]; 4473 np->tx_ring.orig = &np->rx_ring.orig[np->rx_ring_size];
4496 } else { 4474 } else {
4497 np->rx_ring.ex = (struct ring_desc_ex*)rxtx_ring; 4475 np->rx_ring.ex = (struct ring_desc_ex *)rxtx_ring;
4498 np->tx_ring.ex = &np->rx_ring.ex[np->rx_ring_size]; 4476 np->tx_ring.ex = &np->rx_ring.ex[np->rx_ring_size];
4499 } 4477 }
4500 np->rx_skb = (struct nv_skb_map*)rx_skbuff; 4478 np->rx_skb = (struct nv_skb_map *)rx_skbuff;
4501 np->tx_skb = (struct nv_skb_map*)tx_skbuff; 4479 np->tx_skb = (struct nv_skb_map *)tx_skbuff;
4502 np->ring_addr = ring_addr; 4480 np->ring_addr = ring_addr;
4503 4481
4504 memset(np->rx_skb, 0, sizeof(struct nv_skb_map) * np->rx_ring_size); 4482 memset(np->rx_skb, 0, sizeof(struct nv_skb_map) * np->rx_ring_size);
@@ -4515,7 +4493,7 @@ static int nv_set_ringparam(struct net_device *dev, struct ethtool_ringparam* ri
4515 /* reinit nic view of the queues */ 4493 /* reinit nic view of the queues */
4516 writel(np->rx_buf_sz, base + NvRegOffloadConfig); 4494 writel(np->rx_buf_sz, base + NvRegOffloadConfig);
4517 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING); 4495 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
4518 writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT), 4496 writel(((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
4519 base + NvRegRingSizes); 4497 base + NvRegRingSizes);
4520 pci_push(base); 4498 pci_push(base);
4521 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); 4499 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
@@ -4841,7 +4819,7 @@ static int nv_loopback_test(struct net_device *dev)
4841 /* reinit nic view of the rx queue */ 4819 /* reinit nic view of the rx queue */
4842 writel(np->rx_buf_sz, base + NvRegOffloadConfig); 4820 writel(np->rx_buf_sz, base + NvRegOffloadConfig);
4843 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING); 4821 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
4844 writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT), 4822 writel(((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
4845 base + NvRegRingSizes); 4823 base + NvRegRingSizes);
4846 pci_push(base); 4824 pci_push(base);
4847 4825
@@ -4893,9 +4871,8 @@ static int nv_loopback_test(struct net_device *dev)
4893 if (flags & NV_RX_ERROR) 4871 if (flags & NV_RX_ERROR)
4894 ret = 0; 4872 ret = 0;
4895 } else { 4873 } else {
4896 if (flags & NV_RX2_ERROR) { 4874 if (flags & NV_RX2_ERROR)
4897 ret = 0; 4875 ret = 0;
4898 }
4899 } 4876 }
4900 4877
4901 if (ret) { 4878 if (ret) {
@@ -4958,11 +4935,10 @@ static void nv_self_test(struct net_device *dev, struct ethtool_test *test, u64
4958 netif_addr_lock(dev); 4935 netif_addr_lock(dev);
4959 spin_lock_irq(&np->lock); 4936 spin_lock_irq(&np->lock);
4960 nv_disable_hw_interrupts(dev, np->irqmask); 4937 nv_disable_hw_interrupts(dev, np->irqmask);
4961 if (!(np->msi_flags & NV_MSI_X_ENABLED)) { 4938 if (!(np->msi_flags & NV_MSI_X_ENABLED))
4962 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus); 4939 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
4963 } else { 4940 else
4964 writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus); 4941 writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus);
4965 }
4966 /* stop engines */ 4942 /* stop engines */
4967 nv_stop_rxtx(dev); 4943 nv_stop_rxtx(dev);
4968 nv_txrx_reset(dev); 4944 nv_txrx_reset(dev);
@@ -5003,7 +4979,7 @@ static void nv_self_test(struct net_device *dev, struct ethtool_test *test, u64
5003 /* reinit nic view of the rx queue */ 4979 /* reinit nic view of the rx queue */
5004 writel(np->rx_buf_sz, base + NvRegOffloadConfig); 4980 writel(np->rx_buf_sz, base + NvRegOffloadConfig);
5005 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING); 4981 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
5006 writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT), 4982 writel(((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
5007 base + NvRegRingSizes); 4983 base + NvRegRingSizes);
5008 pci_push(base); 4984 pci_push(base);
5009 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); 4985 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
@@ -5106,8 +5082,7 @@ static int nv_mgmt_acquire_sema(struct net_device *dev)
5106 ((tx_ctrl & NVREG_XMITCTL_MGMT_SEMA_MASK) == NVREG_XMITCTL_MGMT_SEMA_FREE)) { 5082 ((tx_ctrl & NVREG_XMITCTL_MGMT_SEMA_MASK) == NVREG_XMITCTL_MGMT_SEMA_FREE)) {
5107 np->mgmt_sema = 1; 5083 np->mgmt_sema = 1;
5108 return 1; 5084 return 1;
5109 } 5085 } else
5110 else
5111 udelay(50); 5086 udelay(50);
5112 } 5087 }
5113 5088
@@ -5204,7 +5179,7 @@ static int nv_open(struct net_device *dev)
5204 5179
5205 /* give hw rings */ 5180 /* give hw rings */
5206 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING); 5181 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
5207 writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT), 5182 writel(((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
5208 base + NvRegRingSizes); 5183 base + NvRegRingSizes);
5209 5184
5210 writel(np->linkspeed, base + NvRegLinkSpeed); 5185 writel(np->linkspeed, base + NvRegLinkSpeed);
@@ -5251,8 +5226,7 @@ static int nv_open(struct net_device *dev)
5251 writel(NVREG_POLL_DEFAULT_THROUGHPUT, base + NvRegPollingInterval); 5226 writel(NVREG_POLL_DEFAULT_THROUGHPUT, base + NvRegPollingInterval);
5252 else 5227 else
5253 writel(NVREG_POLL_DEFAULT_CPU, base + NvRegPollingInterval); 5228 writel(NVREG_POLL_DEFAULT_CPU, base + NvRegPollingInterval);
5254 } 5229 } else
5255 else
5256 writel(poll_interval & 0xFFFF, base + NvRegPollingInterval); 5230 writel(poll_interval & 0xFFFF, base + NvRegPollingInterval);
5257 writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6); 5231 writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6);
5258 writel((np->phyaddr << NVREG_ADAPTCTL_PHYSHIFT)|NVREG_ADAPTCTL_PHYVALID|NVREG_ADAPTCTL_RUNNING, 5232 writel((np->phyaddr << NVREG_ADAPTCTL_PHYSHIFT)|NVREG_ADAPTCTL_PHYVALID|NVREG_ADAPTCTL_RUNNING,
@@ -5263,7 +5237,7 @@ static int nv_open(struct net_device *dev)
5263 writel(NVREG_WAKEUPFLAGS_ENABLE , base + NvRegWakeUpFlags); 5237 writel(NVREG_WAKEUPFLAGS_ENABLE , base + NvRegWakeUpFlags);
5264 5238
5265 i = readl(base + NvRegPowerState); 5239 i = readl(base + NvRegPowerState);
5266 if ( (i & NVREG_POWERSTATE_POWEREDUP) == 0) 5240 if ((i & NVREG_POWERSTATE_POWEREDUP) == 0)
5267 writel(NVREG_POWERSTATE_POWEREDUP|i, base + NvRegPowerState); 5241 writel(NVREG_POWERSTATE_POWEREDUP|i, base + NvRegPowerState);
5268 5242
5269 pci_push(base); 5243 pci_push(base);
@@ -5276,9 +5250,8 @@ static int nv_open(struct net_device *dev)
5276 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus); 5250 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
5277 pci_push(base); 5251 pci_push(base);
5278 5252
5279 if (nv_request_irq(dev, 0)) { 5253 if (nv_request_irq(dev, 0))
5280 goto out_drain; 5254 goto out_drain;
5281 }
5282 5255
5283 /* ask for interrupts */ 5256 /* ask for interrupts */
5284 nv_enable_hw_interrupts(dev, np->irqmask); 5257 nv_enable_hw_interrupts(dev, np->irqmask);
@@ -5466,7 +5439,7 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
5466 addr = 0; 5439 addr = 0;
5467 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { 5440 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
5468 dprintk(KERN_DEBUG "%s: resource %d start %p len %ld flags 0x%08lx.\n", 5441 dprintk(KERN_DEBUG "%s: resource %d start %p len %ld flags 0x%08lx.\n",
5469 pci_name(pci_dev), i, (void*)pci_resource_start(pci_dev, i), 5442 pci_name(pci_dev), i, (void *)pci_resource_start(pci_dev, i),
5470 pci_resource_len(pci_dev, i), 5443 pci_resource_len(pci_dev, i),
5471 pci_resource_flags(pci_dev, i)); 5444 pci_resource_flags(pci_dev, i));
5472 if (pci_resource_flags(pci_dev, i) & IORESOURCE_MEM && 5445 if (pci_resource_flags(pci_dev, i) & IORESOURCE_MEM &&
@@ -5631,7 +5604,7 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
5631 */ 5604 */
5632 dev_printk(KERN_ERR, &pci_dev->dev, 5605 dev_printk(KERN_ERR, &pci_dev->dev,
5633 "Invalid Mac address detected: %pM\n", 5606 "Invalid Mac address detected: %pM\n",
5634 dev->dev_addr); 5607 dev->dev_addr);
5635 dev_printk(KERN_ERR, &pci_dev->dev, 5608 dev_printk(KERN_ERR, &pci_dev->dev,
5636 "Please complain to your hardware vendor. Switching to a random MAC.\n"); 5609 "Please complain to your hardware vendor. Switching to a random MAC.\n");
5637 random_ether_addr(dev->dev_addr); 5610 random_ether_addr(dev->dev_addr);
@@ -5663,16 +5636,15 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
5663 writel(powerstate, base + NvRegPowerState2); 5636 writel(powerstate, base + NvRegPowerState2);
5664 } 5637 }
5665 5638
5666 if (np->desc_ver == DESC_VER_1) { 5639 if (np->desc_ver == DESC_VER_1)
5667 np->tx_flags = NV_TX_VALID; 5640 np->tx_flags = NV_TX_VALID;
5668 } else { 5641 else
5669 np->tx_flags = NV_TX2_VALID; 5642 np->tx_flags = NV_TX2_VALID;
5670 }
5671 5643
5672 np->msi_flags = 0; 5644 np->msi_flags = 0;
5673 if ((id->driver_data & DEV_HAS_MSI) && msi) { 5645 if ((id->driver_data & DEV_HAS_MSI) && msi)
5674 np->msi_flags |= NV_MSI_CAPABLE; 5646 np->msi_flags |= NV_MSI_CAPABLE;
5675 } 5647
5676 if ((id->driver_data & DEV_HAS_MSI_X) && msix) { 5648 if ((id->driver_data & DEV_HAS_MSI_X) && msix) {
5677 /* msix has had reported issues when modifying irqmask 5649 /* msix has had reported issues when modifying irqmask
5678 as in the case of napi, therefore, disable for now 5650 as in the case of napi, therefore, disable for now
@@ -5735,9 +5707,8 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
5735 nv_mgmt_acquire_sema(dev) && 5707 nv_mgmt_acquire_sema(dev) &&
5736 nv_mgmt_get_version(dev)) { 5708 nv_mgmt_get_version(dev)) {
5737 np->mac_in_use = 1; 5709 np->mac_in_use = 1;
5738 if (np->mgmt_version > 0) { 5710 if (np->mgmt_version > 0)
5739 np->mac_in_use = readl(base + NvRegMgmtUnitControl) & NVREG_MGMTUNITCONTROL_INUSE; 5711 np->mac_in_use = readl(base + NvRegMgmtUnitControl) & NVREG_MGMTUNITCONTROL_INUSE;
5740 }
5741 dprintk(KERN_INFO "%s: mgmt unit is running. mac in use %x.\n", 5712 dprintk(KERN_INFO "%s: mgmt unit is running. mac in use %x.\n",
5742 pci_name(pci_dev), np->mac_in_use); 5713 pci_name(pci_dev), np->mac_in_use);
5743 /* management unit setup the phy already? */ 5714 /* management unit setup the phy already? */
@@ -5799,9 +5770,8 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
5799 } else { 5770 } else {
5800 /* see if it is a gigabit phy */ 5771 /* see if it is a gigabit phy */
5801 u32 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ); 5772 u32 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
5802 if (mii_status & PHY_GIGABIT) { 5773 if (mii_status & PHY_GIGABIT)
5803 np->gigabit = PHY_GIGABIT; 5774 np->gigabit = PHY_GIGABIT;
5804 }
5805 } 5775 }
5806 5776
5807 /* set default link speed settings */ 5777 /* set default link speed settings */
@@ -5829,19 +5799,19 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
5829 dev->dev_addr[5]); 5799 dev->dev_addr[5]);
5830 5800
5831 dev_printk(KERN_INFO, &pci_dev->dev, "%s%s%s%s%s%s%s%s%s%sdesc-v%u\n", 5801 dev_printk(KERN_INFO, &pci_dev->dev, "%s%s%s%s%s%s%s%s%s%sdesc-v%u\n",
5832 dev->features & NETIF_F_HIGHDMA ? "highdma " : "", 5802 dev->features & NETIF_F_HIGHDMA ? "highdma " : "",
5833 dev->features & (NETIF_F_IP_CSUM | NETIF_F_SG) ? 5803 dev->features & (NETIF_F_IP_CSUM | NETIF_F_SG) ?
5834 "csum " : "", 5804 "csum " : "",
5835 dev->features & (NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_TX) ? 5805 dev->features & (NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_TX) ?
5836 "vlan " : "", 5806 "vlan " : "",
5837 id->driver_data & DEV_HAS_POWER_CNTRL ? "pwrctl " : "", 5807 id->driver_data & DEV_HAS_POWER_CNTRL ? "pwrctl " : "",
5838 id->driver_data & DEV_HAS_MGMT_UNIT ? "mgmt " : "", 5808 id->driver_data & DEV_HAS_MGMT_UNIT ? "mgmt " : "",
5839 id->driver_data & DEV_NEED_TIMERIRQ ? "timirq " : "", 5809 id->driver_data & DEV_NEED_TIMERIRQ ? "timirq " : "",
5840 np->gigabit == PHY_GIGABIT ? "gbit " : "", 5810 np->gigabit == PHY_GIGABIT ? "gbit " : "",
5841 np->need_linktimer ? "lnktim " : "", 5811 np->need_linktimer ? "lnktim " : "",
5842 np->msi_flags & NV_MSI_CAPABLE ? "msi " : "", 5812 np->msi_flags & NV_MSI_CAPABLE ? "msi " : "",
5843 np->msi_flags & NV_MSI_X_CAPABLE ? "msi-x " : "", 5813 np->msi_flags & NV_MSI_X_CAPABLE ? "msi-x " : "",
5844 np->desc_ver); 5814 np->desc_ver);
5845 5815
5846 return 0; 5816 return 0;
5847 5817
@@ -5931,13 +5901,13 @@ static int nv_suspend(struct pci_dev *pdev, pm_message_t state)
5931 int i; 5901 int i;
5932 5902
5933 if (netif_running(dev)) { 5903 if (netif_running(dev)) {
5934 // Gross. 5904 /* Gross. */
5935 nv_close(dev); 5905 nv_close(dev);
5936 } 5906 }
5937 netif_device_detach(dev); 5907 netif_device_detach(dev);
5938 5908
5939 /* save non-pci configuration space */ 5909 /* save non-pci configuration space */
5940 for (i = 0;i <= np->register_size/sizeof(u32); i++) 5910 for (i = 0; i <= np->register_size/sizeof(u32); i++)
5941 np->saved_config_space[i] = readl(base + i*sizeof(u32)); 5911 np->saved_config_space[i] = readl(base + i*sizeof(u32));
5942 5912
5943 pci_save_state(pdev); 5913 pci_save_state(pdev);
@@ -5960,7 +5930,7 @@ static int nv_resume(struct pci_dev *pdev)
5960 pci_enable_wake(pdev, PCI_D0, 0); 5930 pci_enable_wake(pdev, PCI_D0, 0);
5961 5931
5962 /* restore non-pci configuration space */ 5932 /* restore non-pci configuration space */
5963 for (i = 0;i <= np->register_size/sizeof(u32); i++) 5933 for (i = 0; i <= np->register_size/sizeof(u32); i++)
5964 writel(np->saved_config_space[i], base+i*sizeof(u32)); 5934 writel(np->saved_config_space[i], base+i*sizeof(u32));
5965 5935
5966 if (np->driver_data & DEV_NEED_MSI_FIX) 5936 if (np->driver_data & DEV_NEED_MSI_FIX)
@@ -5990,9 +5960,8 @@ static void nv_shutdown(struct pci_dev *pdev)
5990 * If we really go for poweroff, we must not restore the MAC, 5960 * If we really go for poweroff, we must not restore the MAC,
5991 * otherwise the MAC for WOL will be reversed at least on some boards. 5961 * otherwise the MAC for WOL will be reversed at least on some boards.
5992 */ 5962 */
5993 if (system_state != SYSTEM_POWER_OFF) { 5963 if (system_state != SYSTEM_POWER_OFF)
5994 nv_restore_mac_addr(pdev); 5964 nv_restore_mac_addr(pdev);
5995 }
5996 5965
5997 pci_disable_device(pdev); 5966 pci_disable_device(pdev);
5998 /* 5967 /*