aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/au1000_eth.c1
-rw-r--r--drivers/net/benet/be.h2
-rw-r--r--drivers/net/benet/be_cmds.h2
-rw-r--r--drivers/net/benet/be_ethtool.c8
-rw-r--r--drivers/net/benet/be_main.c28
-rw-r--r--drivers/net/bnx2.h9
-rw-r--r--drivers/net/bonding/bond_main.c10
-rw-r--r--drivers/net/can/dev.c17
-rw-r--r--drivers/net/can/usb/ems_usb.c5
-rw-r--r--drivers/net/cassini.c5
-rw-r--r--drivers/net/cnic.c6
-rw-r--r--drivers/net/davinci_emac.c2
-rw-r--r--drivers/net/dm9000.h2
-rw-r--r--drivers/net/e100.c26
-rw-r--r--drivers/net/e1000e/defines.h2
-rw-r--r--drivers/net/e1000e/e1000.h26
-rw-r--r--drivers/net/e1000e/hw.h3
-rw-r--r--drivers/net/e1000e/ich8lan.c628
-rw-r--r--drivers/net/e1000e/phy.c476
-rw-r--r--drivers/net/fsl_pq_mdio.c1
-rw-r--r--drivers/net/ifb.c3
-rw-r--r--drivers/net/igb/igb_ethtool.c35
-rw-r--r--drivers/net/igbvf/ethtool.c30
-rw-r--r--drivers/net/ixgbe/ixgbe_ethtool.c22
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c84
-rw-r--r--drivers/net/macsonic.c117
-rw-r--r--drivers/net/mlx4/main.c1
-rw-r--r--drivers/net/myri10ge/myri10ge.c17
-rw-r--r--drivers/net/netxen/netxen_nic_hdr.h1
-rw-r--r--drivers/net/netxen/netxen_nic_hw.c14
-rw-r--r--drivers/net/netxen/netxen_nic_init.c8
-rw-r--r--drivers/net/netxen/netxen_nic_main.c1
-rw-r--r--drivers/net/pcmcia/pcnet_cs.c2
-rw-r--r--drivers/net/pppoe.c129
-rw-r--r--drivers/net/qlge/qlge.h1
-rw-r--r--drivers/net/qlge/qlge_main.c80
-rw-r--r--drivers/net/qlge/qlge_mpi.c25
-rw-r--r--drivers/net/r8169.c15
-rw-r--r--drivers/net/sfc/rx.c9
-rw-r--r--drivers/net/sfc/sfe4001.c4
-rw-r--r--drivers/net/sh_eth.c1
-rw-r--r--drivers/net/sky2.c2
-rw-r--r--drivers/net/tokenring/ibmtr.c11
-rw-r--r--drivers/net/usb/Kconfig2
-rw-r--r--drivers/net/usb/cdc_ether.c42
-rw-r--r--drivers/net/usb/dm9601.c4
-rw-r--r--drivers/net/usb/rndis_host.c6
-rw-r--r--drivers/net/virtio_net.c20
-rw-r--r--drivers/net/wireless/airo.c5
-rw-r--r--drivers/net/wireless/ath/ath9k/rc.c2
-rw-r--r--drivers/net/wireless/b43/dma.c15
-rw-r--r--drivers/net/wireless/b43/leds.h1
-rw-r--r--drivers/net/wireless/b43/main.c1
-rw-r--r--drivers/net/wireless/b43/rfkill.c3
-rw-r--r--drivers/net/wireless/libertas/if_spi.c10
-rw-r--r--drivers/net/wireless/libertas/if_usb.c2
-rw-r--r--drivers/net/wireless/ray_cs.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt2800usb.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00dev.c4
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00link.c11
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00usb.c9
-rw-r--r--drivers/net/wireless/rt2x00/rt73usb.c5
-rw-r--r--drivers/net/wireless/rtl818x/rtl8187_leds.c4
63 files changed, 1444 insertions, 577 deletions
diff --git a/drivers/net/au1000_eth.c b/drivers/net/au1000_eth.c
index 04f63c77071d..ce6f1ac25df8 100644
--- a/drivers/net/au1000_eth.c
+++ b/drivers/net/au1000_eth.c
@@ -34,6 +34,7 @@
34 * 34 *
35 * 35 *
36 */ 36 */
37#include <linux/capability.h>
37#include <linux/dma-mapping.h> 38#include <linux/dma-mapping.h>
38#include <linux/module.h> 39#include <linux/module.h>
39#include <linux/kernel.h> 40#include <linux/kernel.h>
diff --git a/drivers/net/benet/be.h b/drivers/net/benet/be.h
index a80da0e14a52..3b79a225628a 100644
--- a/drivers/net/benet/be.h
+++ b/drivers/net/benet/be.h
@@ -259,6 +259,8 @@ struct be_adapter {
259 u32 port_num; 259 u32 port_num;
260 bool promiscuous; 260 bool promiscuous;
261 u32 cap; 261 u32 cap;
262 u32 rx_fc; /* Rx flow control */
263 u32 tx_fc; /* Tx flow control */
262}; 264};
263 265
264extern const struct ethtool_ops be_ethtool_ops; 266extern const struct ethtool_ops be_ethtool_ops;
diff --git a/drivers/net/benet/be_cmds.h b/drivers/net/benet/be_cmds.h
index 49953787e41c..e5f9676cf1bc 100644
--- a/drivers/net/benet/be_cmds.h
+++ b/drivers/net/benet/be_cmds.h
@@ -68,7 +68,7 @@ enum {
68#define CQE_STATUS_COMPL_MASK 0xFFFF 68#define CQE_STATUS_COMPL_MASK 0xFFFF
69#define CQE_STATUS_COMPL_SHIFT 0 /* bits 0 - 15 */ 69#define CQE_STATUS_COMPL_SHIFT 0 /* bits 0 - 15 */
70#define CQE_STATUS_EXTD_MASK 0xFFFF 70#define CQE_STATUS_EXTD_MASK 0xFFFF
71#define CQE_STATUS_EXTD_SHIFT 0 /* bits 0 - 15 */ 71#define CQE_STATUS_EXTD_SHIFT 16 /* bits 16 - 31 */
72 72
73struct be_mcc_compl { 73struct be_mcc_compl {
74 u32 status; /* dword 0 */ 74 u32 status; /* dword 0 */
diff --git a/drivers/net/benet/be_ethtool.c b/drivers/net/benet/be_ethtool.c
index cda5bf2fc50a..f0fd95b43c07 100644
--- a/drivers/net/benet/be_ethtool.c
+++ b/drivers/net/benet/be_ethtool.c
@@ -323,10 +323,12 @@ be_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *ecmd)
323 323
324 if (ecmd->autoneg != 0) 324 if (ecmd->autoneg != 0)
325 return -EINVAL; 325 return -EINVAL;
326 adapter->tx_fc = ecmd->tx_pause;
327 adapter->rx_fc = ecmd->rx_pause;
326 328
327 status = be_cmd_set_flow_control(adapter, ecmd->tx_pause, 329 status = be_cmd_set_flow_control(adapter,
328 ecmd->rx_pause); 330 adapter->tx_fc, adapter->rx_fc);
329 if (!status) 331 if (status)
330 dev_warn(&adapter->pdev->dev, "Pause param set failed.\n"); 332 dev_warn(&adapter->pdev->dev, "Pause param set failed.\n");
331 333
332 return status; 334 return status;
diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c
index 1f941f027718..876b357101fa 100644
--- a/drivers/net/benet/be_main.c
+++ b/drivers/net/benet/be_main.c
@@ -1610,11 +1610,21 @@ static int be_open(struct net_device *netdev)
1610 1610
1611 status = be_cmd_link_status_query(adapter, &link_up); 1611 status = be_cmd_link_status_query(adapter, &link_up);
1612 if (status) 1612 if (status)
1613 return status; 1613 goto ret_sts;
1614 be_link_status_update(adapter, link_up); 1614 be_link_status_update(adapter, link_up);
1615 1615
1616 status = be_vid_config(adapter);
1617 if (status)
1618 goto ret_sts;
1619
1620 status = be_cmd_set_flow_control(adapter,
1621 adapter->tx_fc, adapter->rx_fc);
1622 if (status)
1623 goto ret_sts;
1624
1616 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100)); 1625 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
1617 return 0; 1626ret_sts:
1627 return status;
1618} 1628}
1619 1629
1620static int be_setup(struct be_adapter *adapter) 1630static int be_setup(struct be_adapter *adapter)
@@ -1648,17 +1658,8 @@ static int be_setup(struct be_adapter *adapter)
1648 if (status != 0) 1658 if (status != 0)
1649 goto rx_qs_destroy; 1659 goto rx_qs_destroy;
1650 1660
1651 status = be_vid_config(adapter);
1652 if (status != 0)
1653 goto mccqs_destroy;
1654
1655 status = be_cmd_set_flow_control(adapter, true, true);
1656 if (status != 0)
1657 goto mccqs_destroy;
1658 return 0; 1661 return 0;
1659 1662
1660mccqs_destroy:
1661 be_mcc_queues_destroy(adapter);
1662rx_qs_destroy: 1663rx_qs_destroy:
1663 be_rx_queues_destroy(adapter); 1664 be_rx_queues_destroy(adapter);
1664tx_qs_destroy: 1665tx_qs_destroy:
@@ -1909,6 +1910,10 @@ static void be_netdev_init(struct net_device *netdev)
1909 1910
1910 adapter->rx_csum = true; 1911 adapter->rx_csum = true;
1911 1912
1913 /* Default settings for Rx and Tx flow control */
1914 adapter->rx_fc = true;
1915 adapter->tx_fc = true;
1916
1912 netif_set_gso_max_size(netdev, 65535); 1917 netif_set_gso_max_size(netdev, 65535);
1913 1918
1914 BE_SET_NETDEV_OPS(netdev, &be_netdev_ops); 1919 BE_SET_NETDEV_OPS(netdev, &be_netdev_ops);
@@ -2171,6 +2176,7 @@ static int be_suspend(struct pci_dev *pdev, pm_message_t state)
2171 be_close(netdev); 2176 be_close(netdev);
2172 rtnl_unlock(); 2177 rtnl_unlock();
2173 } 2178 }
2179 be_cmd_get_flow_control(adapter, &adapter->tx_fc, &adapter->rx_fc);
2174 be_clear(adapter); 2180 be_clear(adapter);
2175 2181
2176 pci_save_state(pdev); 2182 pci_save_state(pdev);
diff --git a/drivers/net/bnx2.h b/drivers/net/bnx2.h
index 6c7f795d12de..a4d83409f205 100644
--- a/drivers/net/bnx2.h
+++ b/drivers/net/bnx2.h
@@ -361,9 +361,12 @@ struct l2_fhdr {
361#define BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE (1<<28) 361#define BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE (1<<28)
362 362
363#define BNX2_L2CTX_HOST_BDIDX 0x00000004 363#define BNX2_L2CTX_HOST_BDIDX 0x00000004
364#define BNX2_L2CTX_STATUSB_NUM_SHIFT 16 364#define BNX2_L2CTX_L5_STATUSB_NUM_SHIFT 16
365#define BNX2_L2CTX_STATUSB_NUM(sb_id) \ 365#define BNX2_L2CTX_L2_STATUSB_NUM_SHIFT 24
366 (((sb_id) > 0) ? (((sb_id) + 7) << BNX2_L2CTX_STATUSB_NUM_SHIFT) : 0) 366#define BNX2_L2CTX_L5_STATUSB_NUM(sb_id) \
367 (((sb_id) > 0) ? (((sb_id) + 7) << BNX2_L2CTX_L5_STATUSB_NUM_SHIFT) : 0)
368#define BNX2_L2CTX_L2_STATUSB_NUM(sb_id) \
369 (((sb_id) > 0) ? (((sb_id) + 7) << BNX2_L2CTX_L2_STATUSB_NUM_SHIFT) : 0)
367#define BNX2_L2CTX_HOST_BSEQ 0x00000008 370#define BNX2_L2CTX_HOST_BSEQ 0x00000008
368#define BNX2_L2CTX_NX_BSEQ 0x0000000c 371#define BNX2_L2CTX_NX_BSEQ 0x0000000c
369#define BNX2_L2CTX_NX_BDHADDR_HI 0x00000010 372#define BNX2_L2CTX_NX_BDHADDR_HI 0x00000010
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 69c5b15e22da..40fb5eefc72e 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -691,7 +691,7 @@ static int bond_check_dev_link(struct bonding *bond,
691 struct net_device *slave_dev, int reporting) 691 struct net_device *slave_dev, int reporting)
692{ 692{
693 const struct net_device_ops *slave_ops = slave_dev->netdev_ops; 693 const struct net_device_ops *slave_ops = slave_dev->netdev_ops;
694 static int (*ioctl)(struct net_device *, struct ifreq *, int); 694 int (*ioctl)(struct net_device *, struct ifreq *, int);
695 struct ifreq ifr; 695 struct ifreq ifr;
696 struct mii_ioctl_data *mii; 696 struct mii_ioctl_data *mii;
697 697
@@ -3665,10 +3665,10 @@ static int bond_xmit_hash_policy_l23(struct sk_buff *skb,
3665 3665
3666 if (skb->protocol == htons(ETH_P_IP)) { 3666 if (skb->protocol == htons(ETH_P_IP)) {
3667 return ((ntohl(iph->saddr ^ iph->daddr) & 0xffff) ^ 3667 return ((ntohl(iph->saddr ^ iph->daddr) & 0xffff) ^
3668 (data->h_dest[5] ^ bond_dev->dev_addr[5])) % count; 3668 (data->h_dest[5] ^ data->h_source[5])) % count;
3669 } 3669 }
3670 3670
3671 return (data->h_dest[5] ^ bond_dev->dev_addr[5]) % count; 3671 return (data->h_dest[5] ^ data->h_source[5]) % count;
3672} 3672}
3673 3673
3674/* 3674/*
@@ -3695,7 +3695,7 @@ static int bond_xmit_hash_policy_l34(struct sk_buff *skb,
3695 3695
3696 } 3696 }
3697 3697
3698 return (data->h_dest[5] ^ bond_dev->dev_addr[5]) % count; 3698 return (data->h_dest[5] ^ data->h_source[5]) % count;
3699} 3699}
3700 3700
3701/* 3701/*
@@ -3706,7 +3706,7 @@ static int bond_xmit_hash_policy_l2(struct sk_buff *skb,
3706{ 3706{
3707 struct ethhdr *data = (struct ethhdr *)skb->data; 3707 struct ethhdr *data = (struct ethhdr *)skb->data;
3708 3708
3709 return (data->h_dest[5] ^ bond_dev->dev_addr[5]) % count; 3709 return (data->h_dest[5] ^ data->h_source[5]) % count;
3710} 3710}
3711 3711
3712/*-------------------------- Device entry points ----------------------------*/ 3712/*-------------------------- Device entry points ----------------------------*/
diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
index f0b9a1e1db46..564e31c9fee4 100644
--- a/drivers/net/can/dev.c
+++ b/drivers/net/can/dev.c
@@ -589,6 +589,22 @@ static int can_changelink(struct net_device *dev,
589 return 0; 589 return 0;
590} 590}
591 591
592static size_t can_get_size(const struct net_device *dev)
593{
594 struct can_priv *priv = netdev_priv(dev);
595 size_t size;
596
597 size = nla_total_size(sizeof(u32)); /* IFLA_CAN_STATE */
598 size += sizeof(struct can_ctrlmode); /* IFLA_CAN_CTRLMODE */
599 size += nla_total_size(sizeof(u32)); /* IFLA_CAN_RESTART_MS */
600 size += sizeof(struct can_bittiming); /* IFLA_CAN_BITTIMING */
601 size += sizeof(struct can_clock); /* IFLA_CAN_CLOCK */
602 if (priv->bittiming_const) /* IFLA_CAN_BITTIMING_CONST */
603 size += sizeof(struct can_bittiming_const);
604
605 return size;
606}
607
592static int can_fill_info(struct sk_buff *skb, const struct net_device *dev) 608static int can_fill_info(struct sk_buff *skb, const struct net_device *dev)
593{ 609{
594 struct can_priv *priv = netdev_priv(dev); 610 struct can_priv *priv = netdev_priv(dev);
@@ -639,6 +655,7 @@ static struct rtnl_link_ops can_link_ops __read_mostly = {
639 .setup = can_setup, 655 .setup = can_setup,
640 .newlink = can_newlink, 656 .newlink = can_newlink,
641 .changelink = can_changelink, 657 .changelink = can_changelink,
658 .get_size = can_get_size,
642 .fill_info = can_fill_info, 659 .fill_info = can_fill_info,
643 .fill_xstats = can_fill_xstats, 660 .fill_xstats = can_fill_xstats,
644}; 661};
diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c
index 9012e0abc626..abdbd9c2b788 100644
--- a/drivers/net/can/usb/ems_usb.c
+++ b/drivers/net/can/usb/ems_usb.c
@@ -319,7 +319,7 @@ static void ems_usb_rx_can_msg(struct ems_usb *dev, struct ems_cpc_msg *msg)
319 319
320 cf = (struct can_frame *)skb_put(skb, sizeof(struct can_frame)); 320 cf = (struct can_frame *)skb_put(skb, sizeof(struct can_frame));
321 321
322 cf->can_id = msg->msg.can_msg.id; 322 cf->can_id = le32_to_cpu(msg->msg.can_msg.id);
323 cf->can_dlc = min_t(u8, msg->msg.can_msg.length, 8); 323 cf->can_dlc = min_t(u8, msg->msg.can_msg.length, 8);
324 324
325 if (msg->type == CPC_MSG_TYPE_EXT_CAN_FRAME 325 if (msg->type == CPC_MSG_TYPE_EXT_CAN_FRAME
@@ -813,6 +813,9 @@ static netdev_tx_t ems_usb_start_xmit(struct sk_buff *skb, struct net_device *ne
813 msg->length = CPC_CAN_MSG_MIN_SIZE + cf->can_dlc; 813 msg->length = CPC_CAN_MSG_MIN_SIZE + cf->can_dlc;
814 } 814 }
815 815
816 /* Respect byte order */
817 msg->msg.can_msg.id = cpu_to_le32(msg->msg.can_msg.id);
818
816 for (i = 0; i < MAX_TX_URBS; i++) { 819 for (i = 0; i < MAX_TX_URBS; i++) {
817 if (dev->tx_contexts[i].echo_index == MAX_TX_URBS) { 820 if (dev->tx_contexts[i].echo_index == MAX_TX_URBS) {
818 context = &dev->tx_contexts[i]; 821 context = &dev->tx_contexts[i];
diff --git a/drivers/net/cassini.c b/drivers/net/cassini.c
index 05916aafa4f1..f857afe8e488 100644
--- a/drivers/net/cassini.c
+++ b/drivers/net/cassini.c
@@ -4342,11 +4342,11 @@ static int cas_open(struct net_device *dev)
4342 cas_unlock_all_restore(cp, flags); 4342 cas_unlock_all_restore(cp, flags);
4343 } 4343 }
4344 4344
4345 err = -ENOMEM;
4345 if (cas_tx_tiny_alloc(cp) < 0) 4346 if (cas_tx_tiny_alloc(cp) < 0)
4346 return -ENOMEM; 4347 goto err_unlock;
4347 4348
4348 /* alloc rx descriptors */ 4349 /* alloc rx descriptors */
4349 err = -ENOMEM;
4350 if (cas_alloc_rxds(cp) < 0) 4350 if (cas_alloc_rxds(cp) < 0)
4351 goto err_tx_tiny; 4351 goto err_tx_tiny;
4352 4352
@@ -4386,6 +4386,7 @@ err_spare:
4386 cas_free_rxds(cp); 4386 cas_free_rxds(cp);
4387err_tx_tiny: 4387err_tx_tiny:
4388 cas_tx_tiny_free(cp); 4388 cas_tx_tiny_free(cp);
4389err_unlock:
4389 mutex_unlock(&cp->pm_mutex); 4390 mutex_unlock(&cp->pm_mutex);
4390 return err; 4391 return err;
4391} 4392}
diff --git a/drivers/net/cnic.c b/drivers/net/cnic.c
index 46c87ec7960c..3bf1b04f2cab 100644
--- a/drivers/net/cnic.c
+++ b/drivers/net/cnic.c
@@ -2264,9 +2264,9 @@ static void cnic_init_bnx2_rx_ring(struct cnic_dev *dev)
2264 cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_CTX_TYPE, val); 2264 cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_CTX_TYPE, val);
2265 2265
2266 if (sb_id == 0) 2266 if (sb_id == 0)
2267 val = 2 << BNX2_L2CTX_STATUSB_NUM_SHIFT; 2267 val = 2 << BNX2_L2CTX_L2_STATUSB_NUM_SHIFT;
2268 else 2268 else
2269 val = BNX2_L2CTX_STATUSB_NUM(sb_id); 2269 val = BNX2_L2CTX_L2_STATUSB_NUM(sb_id);
2270 cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_HOST_BDIDX, val); 2270 cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_HOST_BDIDX, val);
2271 2271
2272 rxbd = (struct rx_bd *) (cp->l2_ring + BCM_PAGE_SIZE); 2272 rxbd = (struct rx_bd *) (cp->l2_ring + BCM_PAGE_SIZE);
@@ -2423,7 +2423,7 @@ static int cnic_start_bnx2_hw(struct cnic_dev *dev)
2423 cp->int_num = 0; 2423 cp->int_num = 0;
2424 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) { 2424 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
2425 u32 sb_id = cp->status_blk_num; 2425 u32 sb_id = cp->status_blk_num;
2426 u32 sb = BNX2_L2CTX_STATUSB_NUM(sb_id); 2426 u32 sb = BNX2_L2CTX_L5_STATUSB_NUM(sb_id);
2427 2427
2428 cp->int_num = sb_id << BNX2_PCICFG_INT_ACK_CMD_INT_NUM_SHIFT; 2428 cp->int_num = sb_id << BNX2_PCICFG_INT_ACK_CMD_INT_NUM_SHIFT;
2429 cnic_ctx_wr(dev, cp->kwq_cid_addr, L5_KRNLQ_HOST_QIDX, sb); 2429 cnic_ctx_wr(dev, cp->kwq_cid_addr, L5_KRNLQ_HOST_QIDX, sb);
diff --git a/drivers/net/davinci_emac.c b/drivers/net/davinci_emac.c
index f72c56dec33c..3179521aee90 100644
--- a/drivers/net/davinci_emac.c
+++ b/drivers/net/davinci_emac.c
@@ -2221,7 +2221,7 @@ void emac_poll_controller(struct net_device *ndev)
2221 struct emac_priv *priv = netdev_priv(ndev); 2221 struct emac_priv *priv = netdev_priv(ndev);
2222 2222
2223 emac_int_disable(priv); 2223 emac_int_disable(priv);
2224 emac_irq(ndev->irq, priv); 2224 emac_irq(ndev->irq, ndev);
2225 emac_int_enable(priv); 2225 emac_int_enable(priv);
2226} 2226}
2227#endif 2227#endif
diff --git a/drivers/net/dm9000.h b/drivers/net/dm9000.h
index 80817c2edfb3..fb1c924d79b4 100644
--- a/drivers/net/dm9000.h
+++ b/drivers/net/dm9000.h
@@ -50,7 +50,7 @@
50#define DM9000_RCSR 0x32 50#define DM9000_RCSR 0x32
51 51
52#define CHIPR_DM9000A 0x19 52#define CHIPR_DM9000A 0x19
53#define CHIPR_DM9000B 0x1B 53#define CHIPR_DM9000B 0x1A
54 54
55#define DM9000_MRCMDX 0xF0 55#define DM9000_MRCMDX 0xF0
56#define DM9000_MRCMD 0xF2 56#define DM9000_MRCMD 0xF2
diff --git a/drivers/net/e100.c b/drivers/net/e100.c
index 5d2f48f02251..3c29a20b751e 100644
--- a/drivers/net/e100.c
+++ b/drivers/net/e100.c
@@ -1427,19 +1427,31 @@ static int e100_phy_init(struct nic *nic)
1427 } else 1427 } else
1428 DPRINTK(HW, DEBUG, "phy_addr = %d\n", nic->mii.phy_id); 1428 DPRINTK(HW, DEBUG, "phy_addr = %d\n", nic->mii.phy_id);
1429 1429
1430 /* Isolate all the PHY ids */
1431 for (addr = 0; addr < 32; addr++)
1432 mdio_write(netdev, addr, MII_BMCR, BMCR_ISOLATE);
1433 /* Select the discovered PHY */
1434 bmcr &= ~BMCR_ISOLATE;
1435 mdio_write(netdev, nic->mii.phy_id, MII_BMCR, bmcr);
1436
1437 /* Get phy ID */ 1430 /* Get phy ID */
1438 id_lo = mdio_read(netdev, nic->mii.phy_id, MII_PHYSID1); 1431 id_lo = mdio_read(netdev, nic->mii.phy_id, MII_PHYSID1);
1439 id_hi = mdio_read(netdev, nic->mii.phy_id, MII_PHYSID2); 1432 id_hi = mdio_read(netdev, nic->mii.phy_id, MII_PHYSID2);
1440 nic->phy = (u32)id_hi << 16 | (u32)id_lo; 1433 nic->phy = (u32)id_hi << 16 | (u32)id_lo;
1441 DPRINTK(HW, DEBUG, "phy ID = 0x%08X\n", nic->phy); 1434 DPRINTK(HW, DEBUG, "phy ID = 0x%08X\n", nic->phy);
1442 1435
1436 /* Select the phy and isolate the rest */
1437 for (addr = 0; addr < 32; addr++) {
1438 if (addr != nic->mii.phy_id) {
1439 mdio_write(netdev, addr, MII_BMCR, BMCR_ISOLATE);
1440 } else if (nic->phy != phy_82552_v) {
1441 bmcr = mdio_read(netdev, addr, MII_BMCR);
1442 mdio_write(netdev, addr, MII_BMCR,
1443 bmcr & ~BMCR_ISOLATE);
1444 }
1445 }
1446 /*
1447 * Workaround for 82552:
1448 * Clear the ISOLATE bit on selected phy_id last (mirrored on all
1449 * other phy_id's) using bmcr value from addr discovery loop above.
1450 */
1451 if (nic->phy == phy_82552_v)
1452 mdio_write(netdev, nic->mii.phy_id, MII_BMCR,
1453 bmcr & ~BMCR_ISOLATE);
1454
1443 /* Handle National tx phys */ 1455 /* Handle National tx phys */
1444#define NCS_PHY_MODEL_MASK 0xFFF0FFFF 1456#define NCS_PHY_MODEL_MASK 0xFFF0FFFF
1445 if ((nic->phy & NCS_PHY_MODEL_MASK) == phy_nsc_tx) { 1457 if ((nic->phy & NCS_PHY_MODEL_MASK) == phy_nsc_tx) {
diff --git a/drivers/net/e1000e/defines.h b/drivers/net/e1000e/defines.h
index c0f185beb8bc..1190167a8b3d 100644
--- a/drivers/net/e1000e/defines.h
+++ b/drivers/net/e1000e/defines.h
@@ -76,6 +76,7 @@
76/* Extended Device Control */ 76/* Extended Device Control */
77#define E1000_CTRL_EXT_SDP7_DATA 0x00000080 /* Value of SW Definable Pin 7 */ 77#define E1000_CTRL_EXT_SDP7_DATA 0x00000080 /* Value of SW Definable Pin 7 */
78#define E1000_CTRL_EXT_EE_RST 0x00002000 /* Reinitialize from EEPROM */ 78#define E1000_CTRL_EXT_EE_RST 0x00002000 /* Reinitialize from EEPROM */
79#define E1000_CTRL_EXT_SPD_BYPS 0x00008000 /* Speed Select Bypass */
79#define E1000_CTRL_EXT_RO_DIS 0x00020000 /* Relaxed Ordering disable */ 80#define E1000_CTRL_EXT_RO_DIS 0x00020000 /* Relaxed Ordering disable */
80#define E1000_CTRL_EXT_DMA_DYN_CLK_EN 0x00080000 /* DMA Dynamic Clock Gating */ 81#define E1000_CTRL_EXT_DMA_DYN_CLK_EN 0x00080000 /* DMA Dynamic Clock Gating */
81#define E1000_CTRL_EXT_LINK_MODE_MASK 0x00C00000 82#define E1000_CTRL_EXT_LINK_MODE_MASK 0x00C00000
@@ -347,6 +348,7 @@
347/* Extended Configuration Control and Size */ 348/* Extended Configuration Control and Size */
348#define E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP 0x00000020 349#define E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP 0x00000020
349#define E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE 0x00000001 350#define E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE 0x00000001
351#define E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE 0x00000008
350#define E1000_EXTCNF_CTRL_SWFLAG 0x00000020 352#define E1000_EXTCNF_CTRL_SWFLAG 0x00000020
351#define E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK 0x00FF0000 353#define E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK 0x00FF0000
352#define E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT 16 354#define E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT 16
diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h
index 981936c1fb46..189dfa2d6c76 100644
--- a/drivers/net/e1000e/e1000.h
+++ b/drivers/net/e1000e/e1000.h
@@ -141,6 +141,20 @@ struct e1000_info;
141#define HV_TNCRS_UPPER PHY_REG(778, 29) /* Transmit with no CRS */ 141#define HV_TNCRS_UPPER PHY_REG(778, 29) /* Transmit with no CRS */
142#define HV_TNCRS_LOWER PHY_REG(778, 30) 142#define HV_TNCRS_LOWER PHY_REG(778, 30)
143 143
144/* BM PHY Copper Specific Status */
145#define BM_CS_STATUS 17
146#define BM_CS_STATUS_LINK_UP 0x0400
147#define BM_CS_STATUS_RESOLVED 0x0800
148#define BM_CS_STATUS_SPEED_MASK 0xC000
149#define BM_CS_STATUS_SPEED_1000 0x8000
150
151/* 82577 Mobile Phy Status Register */
152#define HV_M_STATUS 26
153#define HV_M_STATUS_AUTONEG_COMPLETE 0x1000
154#define HV_M_STATUS_SPEED_MASK 0x0300
155#define HV_M_STATUS_SPEED_1000 0x0200
156#define HV_M_STATUS_LINK_UP 0x0040
157
144enum e1000_boards { 158enum e1000_boards {
145 board_82571, 159 board_82571,
146 board_82572, 160 board_82572,
@@ -519,9 +533,13 @@ extern s32 e1000e_phy_force_speed_duplex_igp(struct e1000_hw *hw);
519extern s32 e1000e_get_cable_length_igp_2(struct e1000_hw *hw); 533extern s32 e1000e_get_cable_length_igp_2(struct e1000_hw *hw);
520extern s32 e1000e_get_phy_info_igp(struct e1000_hw *hw); 534extern s32 e1000e_get_phy_info_igp(struct e1000_hw *hw);
521extern s32 e1000e_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data); 535extern s32 e1000e_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data);
536extern s32 e1000e_read_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset,
537 u16 *data);
522extern s32 e1000e_phy_hw_reset_generic(struct e1000_hw *hw); 538extern s32 e1000e_phy_hw_reset_generic(struct e1000_hw *hw);
523extern s32 e1000e_set_d3_lplu_state(struct e1000_hw *hw, bool active); 539extern s32 e1000e_set_d3_lplu_state(struct e1000_hw *hw, bool active);
524extern s32 e1000e_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data); 540extern s32 e1000e_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data);
541extern s32 e1000e_write_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset,
542 u16 data);
525extern s32 e1000e_phy_sw_reset(struct e1000_hw *hw); 543extern s32 e1000e_phy_sw_reset(struct e1000_hw *hw);
526extern s32 e1000e_phy_force_speed_duplex_m88(struct e1000_hw *hw); 544extern s32 e1000e_phy_force_speed_duplex_m88(struct e1000_hw *hw);
527extern s32 e1000e_get_cfg_done(struct e1000_hw *hw); 545extern s32 e1000e_get_cfg_done(struct e1000_hw *hw);
@@ -538,7 +556,11 @@ extern s32 e1000e_read_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 *data);
538extern s32 e1000e_write_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 data); 556extern s32 e1000e_write_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 data);
539extern void e1000e_phy_force_speed_duplex_setup(struct e1000_hw *hw, u16 *phy_ctrl); 557extern void e1000e_phy_force_speed_duplex_setup(struct e1000_hw *hw, u16 *phy_ctrl);
540extern s32 e1000e_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data); 558extern s32 e1000e_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data);
559extern s32 e1000e_write_kmrn_reg_locked(struct e1000_hw *hw, u32 offset,
560 u16 data);
541extern s32 e1000e_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data); 561extern s32 e1000e_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data);
562extern s32 e1000e_read_kmrn_reg_locked(struct e1000_hw *hw, u32 offset,
563 u16 *data);
542extern s32 e1000e_phy_has_link_generic(struct e1000_hw *hw, u32 iterations, 564extern s32 e1000e_phy_has_link_generic(struct e1000_hw *hw, u32 iterations,
543 u32 usec_interval, bool *success); 565 u32 usec_interval, bool *success);
544extern s32 e1000e_phy_reset_dsp(struct e1000_hw *hw); 566extern s32 e1000e_phy_reset_dsp(struct e1000_hw *hw);
@@ -546,7 +568,11 @@ extern s32 e1000e_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data);
546extern s32 e1000e_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data); 568extern s32 e1000e_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data);
547extern s32 e1000e_check_downshift(struct e1000_hw *hw); 569extern s32 e1000e_check_downshift(struct e1000_hw *hw);
548extern s32 e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data); 570extern s32 e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data);
571extern s32 e1000_read_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset,
572 u16 *data);
549extern s32 e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data); 573extern s32 e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data);
574extern s32 e1000_write_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset,
575 u16 data);
550extern s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw, bool slow); 576extern s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw, bool slow);
551extern s32 e1000_link_stall_workaround_hv(struct e1000_hw *hw); 577extern s32 e1000_link_stall_workaround_hv(struct e1000_hw *hw);
552extern s32 e1000_copper_link_setup_82577(struct e1000_hw *hw); 578extern s32 e1000_copper_link_setup_82577(struct e1000_hw *hw);
diff --git a/drivers/net/e1000e/hw.h b/drivers/net/e1000e/hw.h
index fd44d9f90769..aaea41ef794d 100644
--- a/drivers/net/e1000e/hw.h
+++ b/drivers/net/e1000e/hw.h
@@ -764,11 +764,13 @@ struct e1000_phy_operations {
764 s32 (*get_cable_length)(struct e1000_hw *); 764 s32 (*get_cable_length)(struct e1000_hw *);
765 s32 (*get_phy_info)(struct e1000_hw *); 765 s32 (*get_phy_info)(struct e1000_hw *);
766 s32 (*read_phy_reg)(struct e1000_hw *, u32, u16 *); 766 s32 (*read_phy_reg)(struct e1000_hw *, u32, u16 *);
767 s32 (*read_phy_reg_locked)(struct e1000_hw *, u32, u16 *);
767 void (*release_phy)(struct e1000_hw *); 768 void (*release_phy)(struct e1000_hw *);
768 s32 (*reset_phy)(struct e1000_hw *); 769 s32 (*reset_phy)(struct e1000_hw *);
769 s32 (*set_d0_lplu_state)(struct e1000_hw *, bool); 770 s32 (*set_d0_lplu_state)(struct e1000_hw *, bool);
770 s32 (*set_d3_lplu_state)(struct e1000_hw *, bool); 771 s32 (*set_d3_lplu_state)(struct e1000_hw *, bool);
771 s32 (*write_phy_reg)(struct e1000_hw *, u32, u16); 772 s32 (*write_phy_reg)(struct e1000_hw *, u32, u16);
773 s32 (*write_phy_reg_locked)(struct e1000_hw *, u32, u16);
772 s32 (*cfg_on_link_up)(struct e1000_hw *); 774 s32 (*cfg_on_link_up)(struct e1000_hw *);
773}; 775};
774 776
@@ -901,6 +903,7 @@ struct e1000_shadow_ram {
901struct e1000_dev_spec_ich8lan { 903struct e1000_dev_spec_ich8lan {
902 bool kmrn_lock_loss_workaround_enabled; 904 bool kmrn_lock_loss_workaround_enabled;
903 struct e1000_shadow_ram shadow_ram[E1000_ICH8_SHADOW_RAM_WORDS]; 905 struct e1000_shadow_ram shadow_ram[E1000_ICH8_SHADOW_RAM_WORDS];
906 bool nvm_k1_enabled;
904}; 907};
905 908
906struct e1000_hw { 909struct e1000_hw {
diff --git a/drivers/net/e1000e/ich8lan.c b/drivers/net/e1000e/ich8lan.c
index 99df2abf82a9..51ddb04ab195 100644
--- a/drivers/net/e1000e/ich8lan.c
+++ b/drivers/net/e1000e/ich8lan.c
@@ -122,6 +122,27 @@
122 122
123#define HV_LED_CONFIG PHY_REG(768, 30) /* LED Configuration */ 123#define HV_LED_CONFIG PHY_REG(768, 30) /* LED Configuration */
124 124
125#define SW_FLAG_TIMEOUT 1000 /* SW Semaphore flag timeout in milliseconds */
126
127/* SMBus Address Phy Register */
128#define HV_SMB_ADDR PHY_REG(768, 26)
129#define HV_SMB_ADDR_PEC_EN 0x0200
130#define HV_SMB_ADDR_VALID 0x0080
131
132/* Strapping Option Register - RO */
133#define E1000_STRAP 0x0000C
134#define E1000_STRAP_SMBUS_ADDRESS_MASK 0x00FE0000
135#define E1000_STRAP_SMBUS_ADDRESS_SHIFT 17
136
137/* OEM Bits Phy Register */
138#define HV_OEM_BITS PHY_REG(768, 25)
139#define HV_OEM_BITS_LPLU 0x0004 /* Low Power Link Up */
140#define HV_OEM_BITS_GBE_DIS 0x0040 /* Gigabit Disable */
141#define HV_OEM_BITS_RESTART_AN 0x0400 /* Restart Auto-negotiation */
142
143#define E1000_NVM_K1_CONFIG 0x1B /* NVM K1 Config Word */
144#define E1000_NVM_K1_ENABLE 0x1 /* NVM Enable K1 bit */
145
125/* ICH GbE Flash Hardware Sequencing Flash Status Register bit breakdown */ 146/* ICH GbE Flash Hardware Sequencing Flash Status Register bit breakdown */
126/* Offset 04h HSFSTS */ 147/* Offset 04h HSFSTS */
127union ich8_hws_flash_status { 148union ich8_hws_flash_status {
@@ -200,6 +221,10 @@ static s32 e1000_setup_led_pchlan(struct e1000_hw *hw);
200static s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw); 221static s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw);
201static s32 e1000_led_on_pchlan(struct e1000_hw *hw); 222static s32 e1000_led_on_pchlan(struct e1000_hw *hw);
202static s32 e1000_led_off_pchlan(struct e1000_hw *hw); 223static s32 e1000_led_off_pchlan(struct e1000_hw *hw);
224static s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active);
225static void e1000_lan_init_done_ich8lan(struct e1000_hw *hw);
226static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link);
227static s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable);
203 228
204static inline u16 __er16flash(struct e1000_hw *hw, unsigned long reg) 229static inline u16 __er16flash(struct e1000_hw *hw, unsigned long reg)
205{ 230{
@@ -242,7 +267,11 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
242 267
243 phy->ops.check_polarity = e1000_check_polarity_ife_ich8lan; 268 phy->ops.check_polarity = e1000_check_polarity_ife_ich8lan;
244 phy->ops.read_phy_reg = e1000_read_phy_reg_hv; 269 phy->ops.read_phy_reg = e1000_read_phy_reg_hv;
270 phy->ops.read_phy_reg_locked = e1000_read_phy_reg_hv_locked;
271 phy->ops.set_d0_lplu_state = e1000_set_lplu_state_pchlan;
272 phy->ops.set_d3_lplu_state = e1000_set_lplu_state_pchlan;
245 phy->ops.write_phy_reg = e1000_write_phy_reg_hv; 273 phy->ops.write_phy_reg = e1000_write_phy_reg_hv;
274 phy->ops.write_phy_reg_locked = e1000_write_phy_reg_hv_locked;
246 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; 275 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
247 276
248 phy->id = e1000_phy_unknown; 277 phy->id = e1000_phy_unknown;
@@ -303,6 +332,8 @@ static s32 e1000_init_phy_params_ich8lan(struct e1000_hw *hw)
303 case IGP03E1000_E_PHY_ID: 332 case IGP03E1000_E_PHY_ID:
304 phy->type = e1000_phy_igp_3; 333 phy->type = e1000_phy_igp_3;
305 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; 334 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
335 phy->ops.read_phy_reg_locked = e1000e_read_phy_reg_igp_locked;
336 phy->ops.write_phy_reg_locked = e1000e_write_phy_reg_igp_locked;
306 break; 337 break;
307 case IFE_E_PHY_ID: 338 case IFE_E_PHY_ID:
308 case IFE_PLUS_E_PHY_ID: 339 case IFE_PLUS_E_PHY_ID:
@@ -469,14 +500,6 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
469 goto out; 500 goto out;
470 } 501 }
471 502
472 if (hw->mac.type == e1000_pchlan) {
473 ret_val = e1000e_write_kmrn_reg(hw,
474 E1000_KMRNCTRLSTA_K1_CONFIG,
475 E1000_KMRNCTRLSTA_K1_ENABLE);
476 if (ret_val)
477 goto out;
478 }
479
480 /* 503 /*
481 * First we want to see if the MII Status Register reports 504 * First we want to see if the MII Status Register reports
482 * link. If so, then we want to get the current speed/duplex 505 * link. If so, then we want to get the current speed/duplex
@@ -486,6 +509,12 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
486 if (ret_val) 509 if (ret_val)
487 goto out; 510 goto out;
488 511
512 if (hw->mac.type == e1000_pchlan) {
513 ret_val = e1000_k1_gig_workaround_hv(hw, link);
514 if (ret_val)
515 goto out;
516 }
517
489 if (!link) 518 if (!link)
490 goto out; /* No link detected */ 519 goto out; /* No link detected */
491 520
@@ -568,12 +597,39 @@ static s32 e1000_get_variants_ich8lan(struct e1000_adapter *adapter)
568static DEFINE_MUTEX(nvm_mutex); 597static DEFINE_MUTEX(nvm_mutex);
569 598
570/** 599/**
600 * e1000_acquire_nvm_ich8lan - Acquire NVM mutex
601 * @hw: pointer to the HW structure
602 *
603 * Acquires the mutex for performing NVM operations.
604 **/
605static s32 e1000_acquire_nvm_ich8lan(struct e1000_hw *hw)
606{
607 mutex_lock(&nvm_mutex);
608
609 return 0;
610}
611
612/**
613 * e1000_release_nvm_ich8lan - Release NVM mutex
614 * @hw: pointer to the HW structure
615 *
616 * Releases the mutex used while performing NVM operations.
617 **/
618static void e1000_release_nvm_ich8lan(struct e1000_hw *hw)
619{
620 mutex_unlock(&nvm_mutex);
621
622 return;
623}
624
625static DEFINE_MUTEX(swflag_mutex);
626
627/**
571 * e1000_acquire_swflag_ich8lan - Acquire software control flag 628 * e1000_acquire_swflag_ich8lan - Acquire software control flag
572 * @hw: pointer to the HW structure 629 * @hw: pointer to the HW structure
573 * 630 *
574 * Acquires the software control flag for performing NVM and PHY 631 * Acquires the software control flag for performing PHY and select
575 * operations. This is a function pointer entry point only called by 632 * MAC CSR accesses.
576 * read/write routines for the PHY and NVM parts.
577 **/ 633 **/
578static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw) 634static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw)
579{ 635{
@@ -582,7 +638,7 @@ static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw)
582 638
583 might_sleep(); 639 might_sleep();
584 640
585 mutex_lock(&nvm_mutex); 641 mutex_lock(&swflag_mutex);
586 642
587 while (timeout) { 643 while (timeout) {
588 extcnf_ctrl = er32(EXTCNF_CTRL); 644 extcnf_ctrl = er32(EXTCNF_CTRL);
@@ -599,7 +655,7 @@ static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw)
599 goto out; 655 goto out;
600 } 656 }
601 657
602 timeout = PHY_CFG_TIMEOUT * 2; 658 timeout = SW_FLAG_TIMEOUT;
603 659
604 extcnf_ctrl |= E1000_EXTCNF_CTRL_SWFLAG; 660 extcnf_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
605 ew32(EXTCNF_CTRL, extcnf_ctrl); 661 ew32(EXTCNF_CTRL, extcnf_ctrl);
@@ -623,7 +679,7 @@ static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw)
623 679
624out: 680out:
625 if (ret_val) 681 if (ret_val)
626 mutex_unlock(&nvm_mutex); 682 mutex_unlock(&swflag_mutex);
627 683
628 return ret_val; 684 return ret_val;
629} 685}
@@ -632,9 +688,8 @@ out:
632 * e1000_release_swflag_ich8lan - Release software control flag 688 * e1000_release_swflag_ich8lan - Release software control flag
633 * @hw: pointer to the HW structure 689 * @hw: pointer to the HW structure
634 * 690 *
635 * Releases the software control flag for performing NVM and PHY operations. 691 * Releases the software control flag for performing PHY and select
636 * This is a function pointer entry point only called by read/write 692 * MAC CSR accesses.
637 * routines for the PHY and NVM parts.
638 **/ 693 **/
639static void e1000_release_swflag_ich8lan(struct e1000_hw *hw) 694static void e1000_release_swflag_ich8lan(struct e1000_hw *hw)
640{ 695{
@@ -644,7 +699,9 @@ static void e1000_release_swflag_ich8lan(struct e1000_hw *hw)
644 extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG; 699 extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
645 ew32(EXTCNF_CTRL, extcnf_ctrl); 700 ew32(EXTCNF_CTRL, extcnf_ctrl);
646 701
647 mutex_unlock(&nvm_mutex); 702 mutex_unlock(&swflag_mutex);
703
704 return;
648} 705}
649 706
650/** 707/**
@@ -752,6 +809,326 @@ static s32 e1000_phy_force_speed_duplex_ich8lan(struct e1000_hw *hw)
752} 809}
753 810
754/** 811/**
812 * e1000_sw_lcd_config_ich8lan - SW-based LCD Configuration
813 * @hw: pointer to the HW structure
814 *
815 * SW should configure the LCD from the NVM extended configuration region
816 * as a workaround for certain parts.
817 **/
818static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
819{
820 struct e1000_phy_info *phy = &hw->phy;
821 u32 i, data, cnf_size, cnf_base_addr, sw_cfg_mask;
822 s32 ret_val;
823 u16 word_addr, reg_data, reg_addr, phy_page = 0;
824
825 ret_val = hw->phy.ops.acquire_phy(hw);
826 if (ret_val)
827 return ret_val;
828
829 /*
830 * Initialize the PHY from the NVM on ICH platforms. This
831 * is needed due to an issue where the NVM configuration is
832 * not properly autoloaded after power transitions.
833 * Therefore, after each PHY reset, we will load the
834 * configuration data out of the NVM manually.
835 */
836 if ((hw->mac.type == e1000_ich8lan && phy->type == e1000_phy_igp_3) ||
837 (hw->mac.type == e1000_pchlan)) {
838 struct e1000_adapter *adapter = hw->adapter;
839
840 /* Check if SW needs to configure the PHY */
841 if ((adapter->pdev->device == E1000_DEV_ID_ICH8_IGP_M_AMT) ||
842 (adapter->pdev->device == E1000_DEV_ID_ICH8_IGP_M) ||
843 (hw->mac.type == e1000_pchlan))
844 sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M;
845 else
846 sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG;
847
848 data = er32(FEXTNVM);
849 if (!(data & sw_cfg_mask))
850 goto out;
851
852 /* Wait for basic configuration completes before proceeding */
853 e1000_lan_init_done_ich8lan(hw);
854
855 /*
856 * Make sure HW does not configure LCD from PHY
857 * extended configuration before SW configuration
858 */
859 data = er32(EXTCNF_CTRL);
860 if (data & E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE)
861 goto out;
862
863 cnf_size = er32(EXTCNF_SIZE);
864 cnf_size &= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK;
865 cnf_size >>= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT;
866 if (!cnf_size)
867 goto out;
868
869 cnf_base_addr = data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK;
870 cnf_base_addr >>= E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT;
871
872 if (!(data & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE) &&
873 (hw->mac.type == e1000_pchlan)) {
874 /*
875 * HW configures the SMBus address and LEDs when the
876 * OEM and LCD Write Enable bits are set in the NVM.
877 * When both NVM bits are cleared, SW will configure
878 * them instead.
879 */
880 data = er32(STRAP);
881 data &= E1000_STRAP_SMBUS_ADDRESS_MASK;
882 reg_data = data >> E1000_STRAP_SMBUS_ADDRESS_SHIFT;
883 reg_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
884 ret_val = e1000_write_phy_reg_hv_locked(hw, HV_SMB_ADDR,
885 reg_data);
886 if (ret_val)
887 goto out;
888
889 data = er32(LEDCTL);
890 ret_val = e1000_write_phy_reg_hv_locked(hw,
891 HV_LED_CONFIG,
892 (u16)data);
893 if (ret_val)
894 goto out;
895 }
896 /* Configure LCD from extended configuration region. */
897
898 /* cnf_base_addr is in DWORD */
899 word_addr = (u16)(cnf_base_addr << 1);
900
901 for (i = 0; i < cnf_size; i++) {
902 ret_val = e1000_read_nvm(hw, (word_addr + i * 2), 1,
903 &reg_data);
904 if (ret_val)
905 goto out;
906
907 ret_val = e1000_read_nvm(hw, (word_addr + i * 2 + 1),
908 1, &reg_addr);
909 if (ret_val)
910 goto out;
911
912 /* Save off the PHY page for future writes. */
913 if (reg_addr == IGP01E1000_PHY_PAGE_SELECT) {
914 phy_page = reg_data;
915 continue;
916 }
917
918 reg_addr &= PHY_REG_MASK;
919 reg_addr |= phy_page;
920
921 ret_val = phy->ops.write_phy_reg_locked(hw,
922 (u32)reg_addr,
923 reg_data);
924 if (ret_val)
925 goto out;
926 }
927 }
928
929out:
930 hw->phy.ops.release_phy(hw);
931 return ret_val;
932}
933
934/**
935 * e1000_k1_gig_workaround_hv - K1 Si workaround
936 * @hw: pointer to the HW structure
937 * @link: link up bool flag
938 *
939 * If K1 is enabled for 1Gbps, the MAC might stall when transitioning
940 * from a lower speed. This workaround disables K1 whenever link is at 1Gig
941 * If link is down, the function will restore the default K1 setting located
942 * in the NVM.
943 **/
944static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link)
945{
946 s32 ret_val = 0;
947 u16 status_reg = 0;
948 bool k1_enable = hw->dev_spec.ich8lan.nvm_k1_enabled;
949
950 if (hw->mac.type != e1000_pchlan)
951 goto out;
952
953 /* Wrap the whole flow with the sw flag */
954 ret_val = hw->phy.ops.acquire_phy(hw);
955 if (ret_val)
956 goto out;
957
958 /* Disable K1 when link is 1Gbps, otherwise use the NVM setting */
959 if (link) {
960 if (hw->phy.type == e1000_phy_82578) {
961 ret_val = hw->phy.ops.read_phy_reg_locked(hw,
962 BM_CS_STATUS,
963 &status_reg);
964 if (ret_val)
965 goto release;
966
967 status_reg &= BM_CS_STATUS_LINK_UP |
968 BM_CS_STATUS_RESOLVED |
969 BM_CS_STATUS_SPEED_MASK;
970
971 if (status_reg == (BM_CS_STATUS_LINK_UP |
972 BM_CS_STATUS_RESOLVED |
973 BM_CS_STATUS_SPEED_1000))
974 k1_enable = false;
975 }
976
977 if (hw->phy.type == e1000_phy_82577) {
978 ret_val = hw->phy.ops.read_phy_reg_locked(hw,
979 HV_M_STATUS,
980 &status_reg);
981 if (ret_val)
982 goto release;
983
984 status_reg &= HV_M_STATUS_LINK_UP |
985 HV_M_STATUS_AUTONEG_COMPLETE |
986 HV_M_STATUS_SPEED_MASK;
987
988 if (status_reg == (HV_M_STATUS_LINK_UP |
989 HV_M_STATUS_AUTONEG_COMPLETE |
990 HV_M_STATUS_SPEED_1000))
991 k1_enable = false;
992 }
993
994 /* Link stall fix for link up */
995 ret_val = hw->phy.ops.write_phy_reg_locked(hw, PHY_REG(770, 19),
996 0x0100);
997 if (ret_val)
998 goto release;
999
1000 } else {
1001 /* Link stall fix for link down */
1002 ret_val = hw->phy.ops.write_phy_reg_locked(hw, PHY_REG(770, 19),
1003 0x4100);
1004 if (ret_val)
1005 goto release;
1006 }
1007
1008 ret_val = e1000_configure_k1_ich8lan(hw, k1_enable);
1009
1010release:
1011 hw->phy.ops.release_phy(hw);
1012out:
1013 return ret_val;
1014}
1015
1016/**
1017 * e1000_configure_k1_ich8lan - Configure K1 power state
1018 * @hw: pointer to the HW structure
1019 * @enable: K1 state to configure
1020 *
1021 * Configure the K1 power state based on the provided parameter.
1022 * Assumes semaphore already acquired.
1023 *
1024 * Success returns 0, Failure returns -E1000_ERR_PHY (-2)
1025 **/
1026static s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable)
1027{
1028 s32 ret_val = 0;
1029 u32 ctrl_reg = 0;
1030 u32 ctrl_ext = 0;
1031 u32 reg = 0;
1032 u16 kmrn_reg = 0;
1033
1034 ret_val = e1000e_read_kmrn_reg_locked(hw,
1035 E1000_KMRNCTRLSTA_K1_CONFIG,
1036 &kmrn_reg);
1037 if (ret_val)
1038 goto out;
1039
1040 if (k1_enable)
1041 kmrn_reg |= E1000_KMRNCTRLSTA_K1_ENABLE;
1042 else
1043 kmrn_reg &= ~E1000_KMRNCTRLSTA_K1_ENABLE;
1044
1045 ret_val = e1000e_write_kmrn_reg_locked(hw,
1046 E1000_KMRNCTRLSTA_K1_CONFIG,
1047 kmrn_reg);
1048 if (ret_val)
1049 goto out;
1050
1051 udelay(20);
1052 ctrl_ext = er32(CTRL_EXT);
1053 ctrl_reg = er32(CTRL);
1054
1055 reg = ctrl_reg & ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100);
1056 reg |= E1000_CTRL_FRCSPD;
1057 ew32(CTRL, reg);
1058
1059 ew32(CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_SPD_BYPS);
1060 udelay(20);
1061 ew32(CTRL, ctrl_reg);
1062 ew32(CTRL_EXT, ctrl_ext);
1063 udelay(20);
1064
1065out:
1066 return ret_val;
1067}
1068
1069/**
1070 * e1000_oem_bits_config_ich8lan - SW-based LCD Configuration
1071 * @hw: pointer to the HW structure
1072 * @d0_state: boolean if entering d0 or d3 device state
1073 *
1074 * SW will configure Gbe Disable and LPLU based on the NVM. The four bits are
1075 * collectively called OEM bits. The OEM Write Enable bit and SW Config bit
1076 * in NVM determines whether HW should configure LPLU and Gbe Disable.
1077 **/
1078static s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state)
1079{
1080 s32 ret_val = 0;
1081 u32 mac_reg;
1082 u16 oem_reg;
1083
1084 if (hw->mac.type != e1000_pchlan)
1085 return ret_val;
1086
1087 ret_val = hw->phy.ops.acquire_phy(hw);
1088 if (ret_val)
1089 return ret_val;
1090
1091 mac_reg = er32(EXTCNF_CTRL);
1092 if (mac_reg & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE)
1093 goto out;
1094
1095 mac_reg = er32(FEXTNVM);
1096 if (!(mac_reg & E1000_FEXTNVM_SW_CONFIG_ICH8M))
1097 goto out;
1098
1099 mac_reg = er32(PHY_CTRL);
1100
1101 ret_val = hw->phy.ops.read_phy_reg_locked(hw, HV_OEM_BITS, &oem_reg);
1102 if (ret_val)
1103 goto out;
1104
1105 oem_reg &= ~(HV_OEM_BITS_GBE_DIS | HV_OEM_BITS_LPLU);
1106
1107 if (d0_state) {
1108 if (mac_reg & E1000_PHY_CTRL_GBE_DISABLE)
1109 oem_reg |= HV_OEM_BITS_GBE_DIS;
1110
1111 if (mac_reg & E1000_PHY_CTRL_D0A_LPLU)
1112 oem_reg |= HV_OEM_BITS_LPLU;
1113 } else {
1114 if (mac_reg & E1000_PHY_CTRL_NOND0A_GBE_DISABLE)
1115 oem_reg |= HV_OEM_BITS_GBE_DIS;
1116
1117 if (mac_reg & E1000_PHY_CTRL_NOND0A_LPLU)
1118 oem_reg |= HV_OEM_BITS_LPLU;
1119 }
1120 /* Restart auto-neg to activate the bits */
1121 oem_reg |= HV_OEM_BITS_RESTART_AN;
1122 ret_val = hw->phy.ops.write_phy_reg_locked(hw, HV_OEM_BITS, oem_reg);
1123
1124out:
1125 hw->phy.ops.release_phy(hw);
1126
1127 return ret_val;
1128}
1129
1130
1131/**
755 * e1000_hv_phy_workarounds_ich8lan - A series of Phy workarounds to be 1132 * e1000_hv_phy_workarounds_ich8lan - A series of Phy workarounds to be
756 * done after every PHY reset. 1133 * done after every PHY reset.
757 **/ 1134 **/
@@ -791,10 +1168,20 @@ static s32 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw)
791 ret_val = hw->phy.ops.acquire_phy(hw); 1168 ret_val = hw->phy.ops.acquire_phy(hw);
792 if (ret_val) 1169 if (ret_val)
793 return ret_val; 1170 return ret_val;
1171
794 hw->phy.addr = 1; 1172 hw->phy.addr = 1;
795 e1000e_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, 0); 1173 ret_val = e1000e_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, 0);
1174 if (ret_val)
1175 goto out;
796 hw->phy.ops.release_phy(hw); 1176 hw->phy.ops.release_phy(hw);
797 1177
1178 /*
1179 * Configure the K1 Si workaround during phy reset assuming there is
1180 * link so that it disables K1 if link is in 1Gbps.
1181 */
1182 ret_val = e1000_k1_gig_workaround_hv(hw, true);
1183
1184out:
798 return ret_val; 1185 return ret_val;
799} 1186}
800 1187
@@ -840,11 +1227,8 @@ static void e1000_lan_init_done_ich8lan(struct e1000_hw *hw)
840 **/ 1227 **/
841static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw) 1228static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw)
842{ 1229{
843 struct e1000_phy_info *phy = &hw->phy; 1230 s32 ret_val = 0;
844 u32 i; 1231 u16 reg;
845 u32 data, cnf_size, cnf_base_addr, sw_cfg_mask;
846 s32 ret_val;
847 u16 word_addr, reg_data, reg_addr, phy_page = 0;
848 1232
849 ret_val = e1000e_phy_hw_reset_generic(hw); 1233 ret_val = e1000e_phy_hw_reset_generic(hw);
850 if (ret_val) 1234 if (ret_val)
@@ -859,81 +1243,20 @@ static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw)
859 return ret_val; 1243 return ret_val;
860 } 1244 }
861 1245
862 /* 1246 /* Dummy read to clear the phy wakeup bit after lcd reset */
863 * Initialize the PHY from the NVM on ICH platforms. This 1247 if (hw->mac.type == e1000_pchlan)
864 * is needed due to an issue where the NVM configuration is 1248 e1e_rphy(hw, BM_WUC, &reg);
865 * not properly autoloaded after power transitions.
866 * Therefore, after each PHY reset, we will load the
867 * configuration data out of the NVM manually.
868 */
869 if (hw->mac.type == e1000_ich8lan && phy->type == e1000_phy_igp_3) {
870 struct e1000_adapter *adapter = hw->adapter;
871
872 /* Check if SW needs configure the PHY */
873 if ((adapter->pdev->device == E1000_DEV_ID_ICH8_IGP_M_AMT) ||
874 (adapter->pdev->device == E1000_DEV_ID_ICH8_IGP_M))
875 sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M;
876 else
877 sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG;
878
879 data = er32(FEXTNVM);
880 if (!(data & sw_cfg_mask))
881 return 0;
882
883 /* Wait for basic configuration completes before proceeding */
884 e1000_lan_init_done_ich8lan(hw);
885
886 /*
887 * Make sure HW does not configure LCD from PHY
888 * extended configuration before SW configuration
889 */
890 data = er32(EXTCNF_CTRL);
891 if (data & E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE)
892 return 0;
893
894 cnf_size = er32(EXTCNF_SIZE);
895 cnf_size &= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK;
896 cnf_size >>= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT;
897 if (!cnf_size)
898 return 0;
899
900 cnf_base_addr = data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK;
901 cnf_base_addr >>= E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT;
902
903 /* Configure LCD from extended configuration region. */
904
905 /* cnf_base_addr is in DWORD */
906 word_addr = (u16)(cnf_base_addr << 1);
907
908 for (i = 0; i < cnf_size; i++) {
909 ret_val = e1000_read_nvm(hw,
910 (word_addr + i * 2),
911 1,
912 &reg_data);
913 if (ret_val)
914 return ret_val;
915
916 ret_val = e1000_read_nvm(hw,
917 (word_addr + i * 2 + 1),
918 1,
919 &reg_addr);
920 if (ret_val)
921 return ret_val;
922
923 /* Save off the PHY page for future writes. */
924 if (reg_addr == IGP01E1000_PHY_PAGE_SELECT) {
925 phy_page = reg_data;
926 continue;
927 }
928 1249
929 reg_addr |= phy_page; 1250 /* Configure the LCD with the extended configuration region in NVM */
1251 ret_val = e1000_sw_lcd_config_ich8lan(hw);
1252 if (ret_val)
1253 goto out;
930 1254
931 ret_val = e1e_wphy(hw, (u32)reg_addr, reg_data); 1255 /* Configure the LCD with the OEM bits in NVM */
932 if (ret_val) 1256 if (hw->mac.type == e1000_pchlan)
933 return ret_val; 1257 ret_val = e1000_oem_bits_config_ich8lan(hw, true);
934 }
935 }
936 1258
1259out:
937 return 0; 1260 return 0;
938} 1261}
939 1262
@@ -1054,6 +1377,38 @@ static s32 e1000_check_polarity_ife_ich8lan(struct e1000_hw *hw)
1054} 1377}
1055 1378
1056/** 1379/**
1380 * e1000_set_lplu_state_pchlan - Set Low Power Link Up state
1381 * @hw: pointer to the HW structure
1382 * @active: true to enable LPLU, false to disable
1383 *
1384 * Sets the LPLU state according to the active flag. For PCH, if OEM write
1385 * bit are disabled in the NVM, writing the LPLU bits in the MAC will not set
1386 * the phy speed. This function will manually set the LPLU bit and restart
1387 * auto-neg as hw would do. D3 and D0 LPLU will call the same function
1388 * since it configures the same bit.
1389 **/
1390static s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active)
1391{
1392 s32 ret_val = 0;
1393 u16 oem_reg;
1394
1395 ret_val = e1e_rphy(hw, HV_OEM_BITS, &oem_reg);
1396 if (ret_val)
1397 goto out;
1398
1399 if (active)
1400 oem_reg |= HV_OEM_BITS_LPLU;
1401 else
1402 oem_reg &= ~HV_OEM_BITS_LPLU;
1403
1404 oem_reg |= HV_OEM_BITS_RESTART_AN;
1405 ret_val = e1e_wphy(hw, HV_OEM_BITS, oem_reg);
1406
1407out:
1408 return ret_val;
1409}
1410
1411/**
1057 * e1000_set_d0_lplu_state_ich8lan - Set Low Power Linkup D0 state 1412 * e1000_set_d0_lplu_state_ich8lan - Set Low Power Linkup D0 state
1058 * @hw: pointer to the HW structure 1413 * @hw: pointer to the HW structure
1059 * @active: TRUE to enable LPLU, FALSE to disable 1414 * @active: TRUE to enable LPLU, FALSE to disable
@@ -1314,12 +1669,11 @@ static s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
1314 if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) || 1669 if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
1315 (words == 0)) { 1670 (words == 0)) {
1316 hw_dbg(hw, "nvm parameter(s) out of bounds\n"); 1671 hw_dbg(hw, "nvm parameter(s) out of bounds\n");
1317 return -E1000_ERR_NVM; 1672 ret_val = -E1000_ERR_NVM;
1673 goto out;
1318 } 1674 }
1319 1675
1320 ret_val = e1000_acquire_swflag_ich8lan(hw); 1676 nvm->ops.acquire_nvm(hw);
1321 if (ret_val)
1322 goto out;
1323 1677
1324 ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank); 1678 ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
1325 if (ret_val) { 1679 if (ret_val) {
@@ -1345,7 +1699,7 @@ static s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
1345 } 1699 }
1346 } 1700 }
1347 1701
1348 e1000_release_swflag_ich8lan(hw); 1702 nvm->ops.release_nvm(hw);
1349 1703
1350out: 1704out:
1351 if (ret_val) 1705 if (ret_val)
@@ -1603,11 +1957,15 @@ static s32 e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
1603 return -E1000_ERR_NVM; 1957 return -E1000_ERR_NVM;
1604 } 1958 }
1605 1959
1960 nvm->ops.acquire_nvm(hw);
1961
1606 for (i = 0; i < words; i++) { 1962 for (i = 0; i < words; i++) {
1607 dev_spec->shadow_ram[offset+i].modified = 1; 1963 dev_spec->shadow_ram[offset+i].modified = 1;
1608 dev_spec->shadow_ram[offset+i].value = data[i]; 1964 dev_spec->shadow_ram[offset+i].value = data[i];
1609 } 1965 }
1610 1966
1967 nvm->ops.release_nvm(hw);
1968
1611 return 0; 1969 return 0;
1612} 1970}
1613 1971
@@ -1637,9 +1995,7 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
1637 if (nvm->type != e1000_nvm_flash_sw) 1995 if (nvm->type != e1000_nvm_flash_sw)
1638 goto out; 1996 goto out;
1639 1997
1640 ret_val = e1000_acquire_swflag_ich8lan(hw); 1998 nvm->ops.acquire_nvm(hw);
1641 if (ret_val)
1642 goto out;
1643 1999
1644 /* 2000 /*
1645 * We're writing to the opposite bank so if we're on bank 1, 2001 * We're writing to the opposite bank so if we're on bank 1,
@@ -1657,7 +2013,7 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
1657 old_bank_offset = 0; 2013 old_bank_offset = 0;
1658 ret_val = e1000_erase_flash_bank_ich8lan(hw, 1); 2014 ret_val = e1000_erase_flash_bank_ich8lan(hw, 1);
1659 if (ret_val) { 2015 if (ret_val) {
1660 e1000_release_swflag_ich8lan(hw); 2016 nvm->ops.release_nvm(hw);
1661 goto out; 2017 goto out;
1662 } 2018 }
1663 } else { 2019 } else {
@@ -1665,7 +2021,7 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
1665 new_bank_offset = 0; 2021 new_bank_offset = 0;
1666 ret_val = e1000_erase_flash_bank_ich8lan(hw, 0); 2022 ret_val = e1000_erase_flash_bank_ich8lan(hw, 0);
1667 if (ret_val) { 2023 if (ret_val) {
1668 e1000_release_swflag_ich8lan(hw); 2024 nvm->ops.release_nvm(hw);
1669 goto out; 2025 goto out;
1670 } 2026 }
1671 } 2027 }
@@ -1723,7 +2079,7 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
1723 if (ret_val) { 2079 if (ret_val) {
1724 /* Possibly read-only, see e1000e_write_protect_nvm_ich8lan() */ 2080 /* Possibly read-only, see e1000e_write_protect_nvm_ich8lan() */
1725 hw_dbg(hw, "Flash commit failed.\n"); 2081 hw_dbg(hw, "Flash commit failed.\n");
1726 e1000_release_swflag_ich8lan(hw); 2082 nvm->ops.release_nvm(hw);
1727 goto out; 2083 goto out;
1728 } 2084 }
1729 2085
@@ -1736,7 +2092,7 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
1736 act_offset = new_bank_offset + E1000_ICH_NVM_SIG_WORD; 2092 act_offset = new_bank_offset + E1000_ICH_NVM_SIG_WORD;
1737 ret_val = e1000_read_flash_word_ich8lan(hw, act_offset, &data); 2093 ret_val = e1000_read_flash_word_ich8lan(hw, act_offset, &data);
1738 if (ret_val) { 2094 if (ret_val) {
1739 e1000_release_swflag_ich8lan(hw); 2095 nvm->ops.release_nvm(hw);
1740 goto out; 2096 goto out;
1741 } 2097 }
1742 data &= 0xBFFF; 2098 data &= 0xBFFF;
@@ -1744,7 +2100,7 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
1744 act_offset * 2 + 1, 2100 act_offset * 2 + 1,
1745 (u8)(data >> 8)); 2101 (u8)(data >> 8));
1746 if (ret_val) { 2102 if (ret_val) {
1747 e1000_release_swflag_ich8lan(hw); 2103 nvm->ops.release_nvm(hw);
1748 goto out; 2104 goto out;
1749 } 2105 }
1750 2106
@@ -1757,7 +2113,7 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
1757 act_offset = (old_bank_offset + E1000_ICH_NVM_SIG_WORD) * 2 + 1; 2113 act_offset = (old_bank_offset + E1000_ICH_NVM_SIG_WORD) * 2 + 1;
1758 ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset, 0); 2114 ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset, 0);
1759 if (ret_val) { 2115 if (ret_val) {
1760 e1000_release_swflag_ich8lan(hw); 2116 nvm->ops.release_nvm(hw);
1761 goto out; 2117 goto out;
1762 } 2118 }
1763 2119
@@ -1767,7 +2123,7 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
1767 dev_spec->shadow_ram[i].value = 0xFFFF; 2123 dev_spec->shadow_ram[i].value = 0xFFFF;
1768 } 2124 }
1769 2125
1770 e1000_release_swflag_ich8lan(hw); 2126 nvm->ops.release_nvm(hw);
1771 2127
1772 /* 2128 /*
1773 * Reload the EEPROM, or else modifications will not appear 2129 * Reload the EEPROM, or else modifications will not appear
@@ -1831,14 +2187,12 @@ static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw)
1831 **/ 2187 **/
1832void e1000e_write_protect_nvm_ich8lan(struct e1000_hw *hw) 2188void e1000e_write_protect_nvm_ich8lan(struct e1000_hw *hw)
1833{ 2189{
2190 struct e1000_nvm_info *nvm = &hw->nvm;
1834 union ich8_flash_protected_range pr0; 2191 union ich8_flash_protected_range pr0;
1835 union ich8_hws_flash_status hsfsts; 2192 union ich8_hws_flash_status hsfsts;
1836 u32 gfpreg; 2193 u32 gfpreg;
1837 s32 ret_val;
1838 2194
1839 ret_val = e1000_acquire_swflag_ich8lan(hw); 2195 nvm->ops.acquire_nvm(hw);
1840 if (ret_val)
1841 return;
1842 2196
1843 gfpreg = er32flash(ICH_FLASH_GFPREG); 2197 gfpreg = er32flash(ICH_FLASH_GFPREG);
1844 2198
@@ -1859,7 +2213,7 @@ void e1000e_write_protect_nvm_ich8lan(struct e1000_hw *hw)
1859 hsfsts.hsf_status.flockdn = true; 2213 hsfsts.hsf_status.flockdn = true;
1860 ew32flash(ICH_FLASH_HSFSTS, hsfsts.regval); 2214 ew32flash(ICH_FLASH_HSFSTS, hsfsts.regval);
1861 2215
1862 e1000_release_swflag_ich8lan(hw); 2216 nvm->ops.release_nvm(hw);
1863} 2217}
1864 2218
1865/** 2219/**
@@ -2229,6 +2583,8 @@ static s32 e1000_get_bus_info_ich8lan(struct e1000_hw *hw)
2229 **/ 2583 **/
2230static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw) 2584static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
2231{ 2585{
2586 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
2587 u16 reg;
2232 u32 ctrl, icr, kab; 2588 u32 ctrl, icr, kab;
2233 s32 ret_val; 2589 s32 ret_val;
2234 2590
@@ -2263,6 +2619,18 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
2263 ew32(PBS, E1000_PBS_16K); 2619 ew32(PBS, E1000_PBS_16K);
2264 } 2620 }
2265 2621
2622 if (hw->mac.type == e1000_pchlan) {
2623 /* Save the NVM K1 bit setting*/
2624 ret_val = e1000_read_nvm(hw, E1000_NVM_K1_CONFIG, 1, &reg);
2625 if (ret_val)
2626 return ret_val;
2627
2628 if (reg & E1000_NVM_K1_ENABLE)
2629 dev_spec->nvm_k1_enabled = true;
2630 else
2631 dev_spec->nvm_k1_enabled = false;
2632 }
2633
2266 ctrl = er32(CTRL); 2634 ctrl = er32(CTRL);
2267 2635
2268 if (!e1000_check_reset_block(hw)) { 2636 if (!e1000_check_reset_block(hw)) {
@@ -2304,7 +2672,19 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
2304 hw_dbg(hw, "Auto Read Done did not complete\n"); 2672 hw_dbg(hw, "Auto Read Done did not complete\n");
2305 } 2673 }
2306 } 2674 }
2675 /* Dummy read to clear the phy wakeup bit after lcd reset */
2676 if (hw->mac.type == e1000_pchlan)
2677 e1e_rphy(hw, BM_WUC, &reg);
2307 2678
2679 ret_val = e1000_sw_lcd_config_ich8lan(hw);
2680 if (ret_val)
2681 goto out;
2682
2683 if (hw->mac.type == e1000_pchlan) {
2684 ret_val = e1000_oem_bits_config_ich8lan(hw, true);
2685 if (ret_val)
2686 goto out;
2687 }
2308 /* 2688 /*
2309 * For PCH, this write will make sure that any noise 2689 * For PCH, this write will make sure that any noise
2310 * will be detected as a CRC error and be dropped rather than show up 2690 * will be detected as a CRC error and be dropped rather than show up
@@ -2323,6 +2703,7 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
2323 if (hw->mac.type == e1000_pchlan) 2703 if (hw->mac.type == e1000_pchlan)
2324 ret_val = e1000_hv_phy_workarounds_ich8lan(hw); 2704 ret_val = e1000_hv_phy_workarounds_ich8lan(hw);
2325 2705
2706out:
2326 return ret_val; 2707 return ret_val;
2327} 2708}
2328 2709
@@ -2627,14 +3008,6 @@ static s32 e1000_get_link_up_info_ich8lan(struct e1000_hw *hw, u16 *speed,
2627 if (ret_val) 3008 if (ret_val)
2628 return ret_val; 3009 return ret_val;
2629 3010
2630 if ((hw->mac.type == e1000_pchlan) && (*speed == SPEED_1000)) {
2631 ret_val = e1000e_write_kmrn_reg(hw,
2632 E1000_KMRNCTRLSTA_K1_CONFIG,
2633 E1000_KMRNCTRLSTA_K1_DISABLE);
2634 if (ret_val)
2635 return ret_val;
2636 }
2637
2638 if ((hw->mac.type == e1000_ich8lan) && 3011 if ((hw->mac.type == e1000_ich8lan) &&
2639 (hw->phy.type == e1000_phy_igp_3) && 3012 (hw->phy.type == e1000_phy_igp_3) &&
2640 (*speed == SPEED_1000)) { 3013 (*speed == SPEED_1000)) {
@@ -2843,9 +3216,8 @@ void e1000e_disable_gig_wol_ich8lan(struct e1000_hw *hw)
2843 E1000_PHY_CTRL_GBE_DISABLE; 3216 E1000_PHY_CTRL_GBE_DISABLE;
2844 ew32(PHY_CTRL, phy_ctrl); 3217 ew32(PHY_CTRL, phy_ctrl);
2845 3218
2846 /* Workaround SWFLAG unexpectedly set during S0->Sx */
2847 if (hw->mac.type == e1000_pchlan) 3219 if (hw->mac.type == e1000_pchlan)
2848 udelay(500); 3220 e1000_phy_hw_reset_ich8lan(hw);
2849 default: 3221 default:
2850 break; 3222 break;
2851 } 3223 }
@@ -3113,9 +3485,9 @@ static struct e1000_phy_operations ich8_phy_ops = {
3113}; 3485};
3114 3486
3115static struct e1000_nvm_operations ich8_nvm_ops = { 3487static struct e1000_nvm_operations ich8_nvm_ops = {
3116 .acquire_nvm = e1000_acquire_swflag_ich8lan, 3488 .acquire_nvm = e1000_acquire_nvm_ich8lan,
3117 .read_nvm = e1000_read_nvm_ich8lan, 3489 .read_nvm = e1000_read_nvm_ich8lan,
3118 .release_nvm = e1000_release_swflag_ich8lan, 3490 .release_nvm = e1000_release_nvm_ich8lan,
3119 .update_nvm = e1000_update_nvm_checksum_ich8lan, 3491 .update_nvm = e1000_update_nvm_checksum_ich8lan,
3120 .valid_led_default = e1000_valid_led_default_ich8lan, 3492 .valid_led_default = e1000_valid_led_default_ich8lan,
3121 .validate_nvm = e1000_validate_nvm_checksum_ich8lan, 3493 .validate_nvm = e1000_validate_nvm_checksum_ich8lan,
diff --git a/drivers/net/e1000e/phy.c b/drivers/net/e1000e/phy.c
index 994401fd0664..03175b3a2c9e 100644
--- a/drivers/net/e1000e/phy.c
+++ b/drivers/net/e1000e/phy.c
@@ -95,13 +95,6 @@ static const u16 e1000_igp_2_cable_length_table[] =
95/* BM PHY Copper Specific Control 1 */ 95/* BM PHY Copper Specific Control 1 */
96#define BM_CS_CTRL1 16 96#define BM_CS_CTRL1 16
97 97
98/* BM PHY Copper Specific Status */
99#define BM_CS_STATUS 17
100#define BM_CS_STATUS_LINK_UP 0x0400
101#define BM_CS_STATUS_RESOLVED 0x0800
102#define BM_CS_STATUS_SPEED_MASK 0xC000
103#define BM_CS_STATUS_SPEED_1000 0x8000
104
105#define HV_MUX_DATA_CTRL PHY_REG(776, 16) 98#define HV_MUX_DATA_CTRL PHY_REG(776, 16)
106#define HV_MUX_DATA_CTRL_GEN_TO_MAC 0x0400 99#define HV_MUX_DATA_CTRL_GEN_TO_MAC 0x0400
107#define HV_MUX_DATA_CTRL_FORCE_SPEED 0x0004 100#define HV_MUX_DATA_CTRL_FORCE_SPEED 0x0004
@@ -164,16 +157,25 @@ s32 e1000e_get_phy_id(struct e1000_hw *hw)
164 * MDIC mode. No harm in trying again in this case since 157 * MDIC mode. No harm in trying again in this case since
165 * the PHY ID is unknown at this point anyway 158 * the PHY ID is unknown at this point anyway
166 */ 159 */
160 ret_val = phy->ops.acquire_phy(hw);
161 if (ret_val)
162 goto out;
167 ret_val = e1000_set_mdio_slow_mode_hv(hw, true); 163 ret_val = e1000_set_mdio_slow_mode_hv(hw, true);
168 if (ret_val) 164 if (ret_val)
169 goto out; 165 goto out;
166 phy->ops.release_phy(hw);
170 167
171 retry_count++; 168 retry_count++;
172 } 169 }
173out: 170out:
174 /* Revert to MDIO fast mode, if applicable */ 171 /* Revert to MDIO fast mode, if applicable */
175 if (retry_count) 172 if (retry_count) {
173 ret_val = phy->ops.acquire_phy(hw);
174 if (ret_val)
175 return ret_val;
176 ret_val = e1000_set_mdio_slow_mode_hv(hw, false); 176 ret_val = e1000_set_mdio_slow_mode_hv(hw, false);
177 phy->ops.release_phy(hw);
178 }
177 179
178 return ret_val; 180 return ret_val;
179} 181}
@@ -354,94 +356,173 @@ s32 e1000e_write_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 data)
354} 356}
355 357
356/** 358/**
357 * e1000e_read_phy_reg_igp - Read igp PHY register 359 * __e1000e_read_phy_reg_igp - Read igp PHY register
358 * @hw: pointer to the HW structure 360 * @hw: pointer to the HW structure
359 * @offset: register offset to be read 361 * @offset: register offset to be read
360 * @data: pointer to the read data 362 * @data: pointer to the read data
363 * @locked: semaphore has already been acquired or not
361 * 364 *
362 * Acquires semaphore, if necessary, then reads the PHY register at offset 365 * Acquires semaphore, if necessary, then reads the PHY register at offset
363 * and storing the retrieved information in data. Release any acquired 366 * and stores the retrieved information in data. Release any acquired
364 * semaphores before exiting. 367 * semaphores before exiting.
365 **/ 368 **/
366s32 e1000e_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data) 369static s32 __e1000e_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data,
370 bool locked)
367{ 371{
368 s32 ret_val; 372 s32 ret_val = 0;
369 373
370 ret_val = hw->phy.ops.acquire_phy(hw); 374 if (!locked) {
371 if (ret_val) 375 if (!(hw->phy.ops.acquire_phy))
372 return ret_val; 376 goto out;
377
378 ret_val = hw->phy.ops.acquire_phy(hw);
379 if (ret_val)
380 goto out;
381 }
373 382
374 if (offset > MAX_PHY_MULTI_PAGE_REG) { 383 if (offset > MAX_PHY_MULTI_PAGE_REG) {
375 ret_val = e1000e_write_phy_reg_mdic(hw, 384 ret_val = e1000e_write_phy_reg_mdic(hw,
376 IGP01E1000_PHY_PAGE_SELECT, 385 IGP01E1000_PHY_PAGE_SELECT,
377 (u16)offset); 386 (u16)offset);
378 if (ret_val) { 387 if (ret_val)
379 hw->phy.ops.release_phy(hw); 388 goto release;
380 return ret_val;
381 }
382 } 389 }
383 390
384 ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, 391 ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
385 data); 392 data);
386
387 hw->phy.ops.release_phy(hw);
388 393
394release:
395 if (!locked)
396 hw->phy.ops.release_phy(hw);
397out:
389 return ret_val; 398 return ret_val;
390} 399}
391 400
392/** 401/**
402 * e1000e_read_phy_reg_igp - Read igp PHY register
403 * @hw: pointer to the HW structure
404 * @offset: register offset to be read
405 * @data: pointer to the read data
406 *
407 * Acquires semaphore then reads the PHY register at offset and stores the
408 * retrieved information in data.
409 * Release the acquired semaphore before exiting.
410 **/
411s32 e1000e_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data)
412{
413 return __e1000e_read_phy_reg_igp(hw, offset, data, false);
414}
415
416/**
417 * e1000e_read_phy_reg_igp_locked - Read igp PHY register
418 * @hw: pointer to the HW structure
419 * @offset: register offset to be read
420 * @data: pointer to the read data
421 *
422 * Reads the PHY register at offset and stores the retrieved information
423 * in data. Assumes semaphore already acquired.
424 **/
425s32 e1000e_read_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 *data)
426{
427 return __e1000e_read_phy_reg_igp(hw, offset, data, true);
428}
429
430/**
393 * e1000e_write_phy_reg_igp - Write igp PHY register 431 * e1000e_write_phy_reg_igp - Write igp PHY register
394 * @hw: pointer to the HW structure 432 * @hw: pointer to the HW structure
395 * @offset: register offset to write to 433 * @offset: register offset to write to
396 * @data: data to write at register offset 434 * @data: data to write at register offset
435 * @locked: semaphore has already been acquired or not
397 * 436 *
398 * Acquires semaphore, if necessary, then writes the data to PHY register 437 * Acquires semaphore, if necessary, then writes the data to PHY register
399 * at the offset. Release any acquired semaphores before exiting. 438 * at the offset. Release any acquired semaphores before exiting.
400 **/ 439 **/
401s32 e1000e_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data) 440static s32 __e1000e_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data,
441 bool locked)
402{ 442{
403 s32 ret_val; 443 s32 ret_val = 0;
404 444
405 ret_val = hw->phy.ops.acquire_phy(hw); 445 if (!locked) {
406 if (ret_val) 446 if (!(hw->phy.ops.acquire_phy))
407 return ret_val; 447 goto out;
448
449 ret_val = hw->phy.ops.acquire_phy(hw);
450 if (ret_val)
451 goto out;
452 }
408 453
409 if (offset > MAX_PHY_MULTI_PAGE_REG) { 454 if (offset > MAX_PHY_MULTI_PAGE_REG) {
410 ret_val = e1000e_write_phy_reg_mdic(hw, 455 ret_val = e1000e_write_phy_reg_mdic(hw,
411 IGP01E1000_PHY_PAGE_SELECT, 456 IGP01E1000_PHY_PAGE_SELECT,
412 (u16)offset); 457 (u16)offset);
413 if (ret_val) { 458 if (ret_val)
414 hw->phy.ops.release_phy(hw); 459 goto release;
415 return ret_val;
416 }
417 } 460 }
418 461
419 ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, 462 ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
420 data); 463 data);
421 464
422 hw->phy.ops.release_phy(hw); 465release:
466 if (!locked)
467 hw->phy.ops.release_phy(hw);
423 468
469out:
424 return ret_val; 470 return ret_val;
425} 471}
426 472
427/** 473/**
428 * e1000e_read_kmrn_reg - Read kumeran register 474 * e1000e_write_phy_reg_igp - Write igp PHY register
475 * @hw: pointer to the HW structure
476 * @offset: register offset to write to
477 * @data: data to write at register offset
478 *
479 * Acquires semaphore then writes the data to PHY register
480 * at the offset. Release any acquired semaphores before exiting.
481 **/
482s32 e1000e_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data)
483{
484 return __e1000e_write_phy_reg_igp(hw, offset, data, false);
485}
486
487/**
488 * e1000e_write_phy_reg_igp_locked - Write igp PHY register
489 * @hw: pointer to the HW structure
490 * @offset: register offset to write to
491 * @data: data to write at register offset
492 *
493 * Writes the data to PHY register at the offset.
494 * Assumes semaphore already acquired.
495 **/
496s32 e1000e_write_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 data)
497{
498 return __e1000e_write_phy_reg_igp(hw, offset, data, true);
499}
500
501/**
502 * __e1000_read_kmrn_reg - Read kumeran register
429 * @hw: pointer to the HW structure 503 * @hw: pointer to the HW structure
430 * @offset: register offset to be read 504 * @offset: register offset to be read
431 * @data: pointer to the read data 505 * @data: pointer to the read data
506 * @locked: semaphore has already been acquired or not
432 * 507 *
433 * Acquires semaphore, if necessary. Then reads the PHY register at offset 508 * Acquires semaphore, if necessary. Then reads the PHY register at offset
434 * using the kumeran interface. The information retrieved is stored in data. 509 * using the kumeran interface. The information retrieved is stored in data.
435 * Release any acquired semaphores before exiting. 510 * Release any acquired semaphores before exiting.
436 **/ 511 **/
437s32 e1000e_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data) 512static s32 __e1000_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data,
513 bool locked)
438{ 514{
439 u32 kmrnctrlsta; 515 u32 kmrnctrlsta;
440 s32 ret_val; 516 s32 ret_val = 0;
441 517
442 ret_val = hw->phy.ops.acquire_phy(hw); 518 if (!locked) {
443 if (ret_val) 519 if (!(hw->phy.ops.acquire_phy))
444 return ret_val; 520 goto out;
521
522 ret_val = hw->phy.ops.acquire_phy(hw);
523 if (ret_val)
524 goto out;
525 }
445 526
446 kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) & 527 kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) &
447 E1000_KMRNCTRLSTA_OFFSET) | E1000_KMRNCTRLSTA_REN; 528 E1000_KMRNCTRLSTA_OFFSET) | E1000_KMRNCTRLSTA_REN;
@@ -452,41 +533,111 @@ s32 e1000e_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data)
452 kmrnctrlsta = er32(KMRNCTRLSTA); 533 kmrnctrlsta = er32(KMRNCTRLSTA);
453 *data = (u16)kmrnctrlsta; 534 *data = (u16)kmrnctrlsta;
454 535
455 hw->phy.ops.release_phy(hw); 536 if (!locked)
537 hw->phy.ops.release_phy(hw);
456 538
539out:
457 return ret_val; 540 return ret_val;
458} 541}
459 542
460/** 543/**
461 * e1000e_write_kmrn_reg - Write kumeran register 544 * e1000e_read_kmrn_reg - Read kumeran register
545 * @hw: pointer to the HW structure
546 * @offset: register offset to be read
547 * @data: pointer to the read data
548 *
549 * Acquires semaphore then reads the PHY register at offset using the
550 * kumeran interface. The information retrieved is stored in data.
551 * Release the acquired semaphore before exiting.
552 **/
553s32 e1000e_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data)
554{
555 return __e1000_read_kmrn_reg(hw, offset, data, false);
556}
557
558/**
559 * e1000e_read_kmrn_reg_locked - Read kumeran register
560 * @hw: pointer to the HW structure
561 * @offset: register offset to be read
562 * @data: pointer to the read data
563 *
564 * Reads the PHY register at offset using the kumeran interface. The
565 * information retrieved is stored in data.
566 * Assumes semaphore already acquired.
567 **/
568s32 e1000e_read_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 *data)
569{
570 return __e1000_read_kmrn_reg(hw, offset, data, true);
571}
572
573/**
574 * __e1000_write_kmrn_reg - Write kumeran register
462 * @hw: pointer to the HW structure 575 * @hw: pointer to the HW structure
463 * @offset: register offset to write to 576 * @offset: register offset to write to
464 * @data: data to write at register offset 577 * @data: data to write at register offset
578 * @locked: semaphore has already been acquired or not
465 * 579 *
466 * Acquires semaphore, if necessary. Then write the data to PHY register 580 * Acquires semaphore, if necessary. Then write the data to PHY register
467 * at the offset using the kumeran interface. Release any acquired semaphores 581 * at the offset using the kumeran interface. Release any acquired semaphores
468 * before exiting. 582 * before exiting.
469 **/ 583 **/
470s32 e1000e_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data) 584static s32 __e1000_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data,
585 bool locked)
471{ 586{
472 u32 kmrnctrlsta; 587 u32 kmrnctrlsta;
473 s32 ret_val; 588 s32 ret_val = 0;
474 589
475 ret_val = hw->phy.ops.acquire_phy(hw); 590 if (!locked) {
476 if (ret_val) 591 if (!(hw->phy.ops.acquire_phy))
477 return ret_val; 592 goto out;
593
594 ret_val = hw->phy.ops.acquire_phy(hw);
595 if (ret_val)
596 goto out;
597 }
478 598
479 kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) & 599 kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) &
480 E1000_KMRNCTRLSTA_OFFSET) | data; 600 E1000_KMRNCTRLSTA_OFFSET) | data;
481 ew32(KMRNCTRLSTA, kmrnctrlsta); 601 ew32(KMRNCTRLSTA, kmrnctrlsta);
482 602
483 udelay(2); 603 udelay(2);
484 hw->phy.ops.release_phy(hw);
485 604
605 if (!locked)
606 hw->phy.ops.release_phy(hw);
607
608out:
486 return ret_val; 609 return ret_val;
487} 610}
488 611
489/** 612/**
613 * e1000e_write_kmrn_reg - Write kumeran register
614 * @hw: pointer to the HW structure
615 * @offset: register offset to write to
616 * @data: data to write at register offset
617 *
618 * Acquires semaphore then writes the data to the PHY register at the offset
619 * using the kumeran interface. Release the acquired semaphore before exiting.
620 **/
621s32 e1000e_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data)
622{
623 return __e1000_write_kmrn_reg(hw, offset, data, false);
624}
625
626/**
627 * e1000e_write_kmrn_reg_locked - Write kumeran register
628 * @hw: pointer to the HW structure
629 * @offset: register offset to write to
630 * @data: data to write at register offset
631 *
632 * Write the data to PHY register at the offset using the kumeran interface.
633 * Assumes semaphore already acquired.
634 **/
635s32 e1000e_write_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 data)
636{
637 return __e1000_write_kmrn_reg(hw, offset, data, true);
638}
639
640/**
490 * e1000_copper_link_setup_82577 - Setup 82577 PHY for copper link 641 * e1000_copper_link_setup_82577 - Setup 82577 PHY for copper link
491 * @hw: pointer to the HW structure 642 * @hw: pointer to the HW structure
492 * 643 *
@@ -2105,6 +2256,10 @@ s32 e1000e_write_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 data)
2105 u32 page = offset >> IGP_PAGE_SHIFT; 2256 u32 page = offset >> IGP_PAGE_SHIFT;
2106 u32 page_shift = 0; 2257 u32 page_shift = 0;
2107 2258
2259 ret_val = hw->phy.ops.acquire_phy(hw);
2260 if (ret_val)
2261 return ret_val;
2262
2108 /* Page 800 works differently than the rest so it has its own func */ 2263 /* Page 800 works differently than the rest so it has its own func */
2109 if (page == BM_WUC_PAGE) { 2264 if (page == BM_WUC_PAGE) {
2110 ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, &data, 2265 ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, &data,
@@ -2112,10 +2267,6 @@ s32 e1000e_write_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 data)
2112 goto out; 2267 goto out;
2113 } 2268 }
2114 2269
2115 ret_val = hw->phy.ops.acquire_phy(hw);
2116 if (ret_val)
2117 goto out;
2118
2119 hw->phy.addr = e1000_get_phy_addr_for_bm_page(page, offset); 2270 hw->phy.addr = e1000_get_phy_addr_for_bm_page(page, offset);
2120 2271
2121 if (offset > MAX_PHY_MULTI_PAGE_REG) { 2272 if (offset > MAX_PHY_MULTI_PAGE_REG) {
@@ -2135,18 +2286,15 @@ s32 e1000e_write_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 data)
2135 /* Page is shifted left, PHY expects (page x 32) */ 2286 /* Page is shifted left, PHY expects (page x 32) */
2136 ret_val = e1000e_write_phy_reg_mdic(hw, page_select, 2287 ret_val = e1000e_write_phy_reg_mdic(hw, page_select,
2137 (page << page_shift)); 2288 (page << page_shift));
2138 if (ret_val) { 2289 if (ret_val)
2139 hw->phy.ops.release_phy(hw);
2140 goto out; 2290 goto out;
2141 }
2142 } 2291 }
2143 2292
2144 ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, 2293 ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
2145 data); 2294 data);
2146 2295
2147 hw->phy.ops.release_phy(hw);
2148
2149out: 2296out:
2297 hw->phy.ops.release_phy(hw);
2150 return ret_val; 2298 return ret_val;
2151} 2299}
2152 2300
@@ -2167,6 +2315,10 @@ s32 e1000e_read_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 *data)
2167 u32 page = offset >> IGP_PAGE_SHIFT; 2315 u32 page = offset >> IGP_PAGE_SHIFT;
2168 u32 page_shift = 0; 2316 u32 page_shift = 0;
2169 2317
2318 ret_val = hw->phy.ops.acquire_phy(hw);
2319 if (ret_val)
2320 return ret_val;
2321
2170 /* Page 800 works differently than the rest so it has its own func */ 2322 /* Page 800 works differently than the rest so it has its own func */
2171 if (page == BM_WUC_PAGE) { 2323 if (page == BM_WUC_PAGE) {
2172 ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, data, 2324 ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, data,
@@ -2174,10 +2326,6 @@ s32 e1000e_read_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 *data)
2174 goto out; 2326 goto out;
2175 } 2327 }
2176 2328
2177 ret_val = hw->phy.ops.acquire_phy(hw);
2178 if (ret_val)
2179 goto out;
2180
2181 hw->phy.addr = e1000_get_phy_addr_for_bm_page(page, offset); 2329 hw->phy.addr = e1000_get_phy_addr_for_bm_page(page, offset);
2182 2330
2183 if (offset > MAX_PHY_MULTI_PAGE_REG) { 2331 if (offset > MAX_PHY_MULTI_PAGE_REG) {
@@ -2197,17 +2345,14 @@ s32 e1000e_read_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 *data)
2197 /* Page is shifted left, PHY expects (page x 32) */ 2345 /* Page is shifted left, PHY expects (page x 32) */
2198 ret_val = e1000e_write_phy_reg_mdic(hw, page_select, 2346 ret_val = e1000e_write_phy_reg_mdic(hw, page_select,
2199 (page << page_shift)); 2347 (page << page_shift));
2200 if (ret_val) { 2348 if (ret_val)
2201 hw->phy.ops.release_phy(hw);
2202 goto out; 2349 goto out;
2203 }
2204 } 2350 }
2205 2351
2206 ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, 2352 ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
2207 data); 2353 data);
2208 hw->phy.ops.release_phy(hw);
2209
2210out: 2354out:
2355 hw->phy.ops.release_phy(hw);
2211 return ret_val; 2356 return ret_val;
2212} 2357}
2213 2358
@@ -2226,17 +2371,17 @@ s32 e1000e_read_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 *data)
2226 s32 ret_val; 2371 s32 ret_val;
2227 u16 page = (u16)(offset >> IGP_PAGE_SHIFT); 2372 u16 page = (u16)(offset >> IGP_PAGE_SHIFT);
2228 2373
2374 ret_val = hw->phy.ops.acquire_phy(hw);
2375 if (ret_val)
2376 return ret_val;
2377
2229 /* Page 800 works differently than the rest so it has its own func */ 2378 /* Page 800 works differently than the rest so it has its own func */
2230 if (page == BM_WUC_PAGE) { 2379 if (page == BM_WUC_PAGE) {
2231 ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, data, 2380 ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, data,
2232 true); 2381 true);
2233 return ret_val; 2382 goto out;
2234 } 2383 }
2235 2384
2236 ret_val = hw->phy.ops.acquire_phy(hw);
2237 if (ret_val)
2238 return ret_val;
2239
2240 hw->phy.addr = 1; 2385 hw->phy.addr = 1;
2241 2386
2242 if (offset > MAX_PHY_MULTI_PAGE_REG) { 2387 if (offset > MAX_PHY_MULTI_PAGE_REG) {
@@ -2245,16 +2390,14 @@ s32 e1000e_read_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 *data)
2245 ret_val = e1000e_write_phy_reg_mdic(hw, BM_PHY_PAGE_SELECT, 2390 ret_val = e1000e_write_phy_reg_mdic(hw, BM_PHY_PAGE_SELECT,
2246 page); 2391 page);
2247 2392
2248 if (ret_val) { 2393 if (ret_val)
2249 hw->phy.ops.release_phy(hw); 2394 goto out;
2250 return ret_val;
2251 }
2252 } 2395 }
2253 2396
2254 ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, 2397 ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
2255 data); 2398 data);
2399out:
2256 hw->phy.ops.release_phy(hw); 2400 hw->phy.ops.release_phy(hw);
2257
2258 return ret_val; 2401 return ret_val;
2259} 2402}
2260 2403
@@ -2272,17 +2415,17 @@ s32 e1000e_write_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 data)
2272 s32 ret_val; 2415 s32 ret_val;
2273 u16 page = (u16)(offset >> IGP_PAGE_SHIFT); 2416 u16 page = (u16)(offset >> IGP_PAGE_SHIFT);
2274 2417
2418 ret_val = hw->phy.ops.acquire_phy(hw);
2419 if (ret_val)
2420 return ret_val;
2421
2275 /* Page 800 works differently than the rest so it has its own func */ 2422 /* Page 800 works differently than the rest so it has its own func */
2276 if (page == BM_WUC_PAGE) { 2423 if (page == BM_WUC_PAGE) {
2277 ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, &data, 2424 ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, &data,
2278 false); 2425 false);
2279 return ret_val; 2426 goto out;
2280 } 2427 }
2281 2428
2282 ret_val = hw->phy.ops.acquire_phy(hw);
2283 if (ret_val)
2284 return ret_val;
2285
2286 hw->phy.addr = 1; 2429 hw->phy.addr = 1;
2287 2430
2288 if (offset > MAX_PHY_MULTI_PAGE_REG) { 2431 if (offset > MAX_PHY_MULTI_PAGE_REG) {
@@ -2290,17 +2433,15 @@ s32 e1000e_write_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 data)
2290 ret_val = e1000e_write_phy_reg_mdic(hw, BM_PHY_PAGE_SELECT, 2433 ret_val = e1000e_write_phy_reg_mdic(hw, BM_PHY_PAGE_SELECT,
2291 page); 2434 page);
2292 2435
2293 if (ret_val) { 2436 if (ret_val)
2294 hw->phy.ops.release_phy(hw); 2437 goto out;
2295 return ret_val;
2296 }
2297 } 2438 }
2298 2439
2299 ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, 2440 ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
2300 data); 2441 data);
2301 2442
2443out:
2302 hw->phy.ops.release_phy(hw); 2444 hw->phy.ops.release_phy(hw);
2303
2304 return ret_val; 2445 return ret_val;
2305} 2446}
2306 2447
@@ -2320,6 +2461,8 @@ s32 e1000e_write_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 data)
2320 * 3) Write the address using the address opcode (0x11) 2461 * 3) Write the address using the address opcode (0x11)
2321 * 4) Read or write the data using the data opcode (0x12) 2462 * 4) Read or write the data using the data opcode (0x12)
2322 * 5) Restore 769_17.2 to its original value 2463 * 5) Restore 769_17.2 to its original value
2464 *
2465 * Assumes semaphore already acquired.
2323 **/ 2466 **/
2324static s32 e1000_access_phy_wakeup_reg_bm(struct e1000_hw *hw, u32 offset, 2467static s32 e1000_access_phy_wakeup_reg_bm(struct e1000_hw *hw, u32 offset,
2325 u16 *data, bool read) 2468 u16 *data, bool read)
@@ -2327,20 +2470,12 @@ static s32 e1000_access_phy_wakeup_reg_bm(struct e1000_hw *hw, u32 offset,
2327 s32 ret_val; 2470 s32 ret_val;
2328 u16 reg = BM_PHY_REG_NUM(offset); 2471 u16 reg = BM_PHY_REG_NUM(offset);
2329 u16 phy_reg = 0; 2472 u16 phy_reg = 0;
2330 u8 phy_acquired = 1;
2331
2332 2473
2333 /* Gig must be disabled for MDIO accesses to page 800 */ 2474 /* Gig must be disabled for MDIO accesses to page 800 */
2334 if ((hw->mac.type == e1000_pchlan) && 2475 if ((hw->mac.type == e1000_pchlan) &&
2335 (!(er32(PHY_CTRL) & E1000_PHY_CTRL_GBE_DISABLE))) 2476 (!(er32(PHY_CTRL) & E1000_PHY_CTRL_GBE_DISABLE)))
2336 hw_dbg(hw, "Attempting to access page 800 while gig enabled\n"); 2477 hw_dbg(hw, "Attempting to access page 800 while gig enabled\n");
2337 2478
2338 ret_val = hw->phy.ops.acquire_phy(hw);
2339 if (ret_val) {
2340 phy_acquired = 0;
2341 goto out;
2342 }
2343
2344 /* All operations in this function are phy address 1 */ 2479 /* All operations in this function are phy address 1 */
2345 hw->phy.addr = 1; 2480 hw->phy.addr = 1;
2346 2481
@@ -2397,8 +2532,6 @@ static s32 e1000_access_phy_wakeup_reg_bm(struct e1000_hw *hw, u32 offset,
2397 ret_val = e1000e_write_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, phy_reg); 2532 ret_val = e1000e_write_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, phy_reg);
2398 2533
2399out: 2534out:
2400 if (phy_acquired == 1)
2401 hw->phy.ops.release_phy(hw);
2402 return ret_val; 2535 return ret_val;
2403} 2536}
2404 2537
@@ -2439,52 +2572,63 @@ static s32 e1000_set_d0_lplu_state(struct e1000_hw *hw, bool active)
2439 return 0; 2572 return 0;
2440} 2573}
2441 2574
2575/**
2576 * e1000_set_mdio_slow_mode_hv - Set slow MDIO access mode
2577 * @hw: pointer to the HW structure
2578 * @slow: true for slow mode, false for normal mode
2579 *
2580 * Assumes semaphore already acquired.
2581 **/
2442s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw, bool slow) 2582s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw, bool slow)
2443{ 2583{
2444 s32 ret_val = 0; 2584 s32 ret_val = 0;
2445 u16 data = 0; 2585 u16 data = 0;
2446 2586
2447 ret_val = hw->phy.ops.acquire_phy(hw);
2448 if (ret_val)
2449 return ret_val;
2450
2451 /* Set MDIO mode - page 769, register 16: 0x2580==slow, 0x2180==fast */ 2587 /* Set MDIO mode - page 769, register 16: 0x2580==slow, 0x2180==fast */
2452 hw->phy.addr = 1; 2588 hw->phy.addr = 1;
2453 ret_val = e1000e_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, 2589 ret_val = e1000e_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT,
2454 (BM_PORT_CTRL_PAGE << IGP_PAGE_SHIFT)); 2590 (BM_PORT_CTRL_PAGE << IGP_PAGE_SHIFT));
2455 if (ret_val) { 2591 if (ret_val)
2456 hw->phy.ops.release_phy(hw); 2592 goto out;
2457 return ret_val; 2593
2458 }
2459 ret_val = e1000e_write_phy_reg_mdic(hw, BM_CS_CTRL1, 2594 ret_val = e1000e_write_phy_reg_mdic(hw, BM_CS_CTRL1,
2460 (0x2180 | (slow << 10))); 2595 (0x2180 | (slow << 10)));
2596 if (ret_val)
2597 goto out;
2461 2598
2462 /* dummy read when reverting to fast mode - throw away result */ 2599 /* dummy read when reverting to fast mode - throw away result */
2463 if (!slow) 2600 if (!slow)
2464 e1000e_read_phy_reg_mdic(hw, BM_CS_CTRL1, &data); 2601 ret_val = e1000e_read_phy_reg_mdic(hw, BM_CS_CTRL1, &data);
2465
2466 hw->phy.ops.release_phy(hw);
2467 2602
2603out:
2468 return ret_val; 2604 return ret_val;
2469} 2605}
2470 2606
2471/** 2607/**
2472 * e1000_read_phy_reg_hv - Read HV PHY register 2608 * __e1000_read_phy_reg_hv - Read HV PHY register
2473 * @hw: pointer to the HW structure 2609 * @hw: pointer to the HW structure
2474 * @offset: register offset to be read 2610 * @offset: register offset to be read
2475 * @data: pointer to the read data 2611 * @data: pointer to the read data
2612 * @locked: semaphore has already been acquired or not
2476 * 2613 *
2477 * Acquires semaphore, if necessary, then reads the PHY register at offset 2614 * Acquires semaphore, if necessary, then reads the PHY register at offset
2478 * and storing the retrieved information in data. Release any acquired 2615 * and stores the retrieved information in data. Release any acquired
2479 * semaphore before exiting. 2616 * semaphore before exiting.
2480 **/ 2617 **/
2481s32 e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data) 2618static s32 __e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data,
2619 bool locked)
2482{ 2620{
2483 s32 ret_val; 2621 s32 ret_val;
2484 u16 page = BM_PHY_REG_PAGE(offset); 2622 u16 page = BM_PHY_REG_PAGE(offset);
2485 u16 reg = BM_PHY_REG_NUM(offset); 2623 u16 reg = BM_PHY_REG_NUM(offset);
2486 bool in_slow_mode = false; 2624 bool in_slow_mode = false;
2487 2625
2626 if (!locked) {
2627 ret_val = hw->phy.ops.acquire_phy(hw);
2628 if (ret_val)
2629 return ret_val;
2630 }
2631
2488 /* Workaround failure in MDIO access while cable is disconnected */ 2632 /* Workaround failure in MDIO access while cable is disconnected */
2489 if ((hw->phy.type == e1000_phy_82577) && 2633 if ((hw->phy.type == e1000_phy_82577) &&
2490 !(er32(STATUS) & E1000_STATUS_LU)) { 2634 !(er32(STATUS) & E1000_STATUS_LU)) {
@@ -2508,10 +2652,6 @@ s32 e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data)
2508 goto out; 2652 goto out;
2509 } 2653 }
2510 2654
2511 ret_val = hw->phy.ops.acquire_phy(hw);
2512 if (ret_val)
2513 goto out;
2514
2515 hw->phy.addr = e1000_get_phy_addr_for_hv_page(page); 2655 hw->phy.addr = e1000_get_phy_addr_for_hv_page(page);
2516 2656
2517 if (page == HV_INTC_FC_PAGE_START) 2657 if (page == HV_INTC_FC_PAGE_START)
@@ -2529,42 +2669,76 @@ s32 e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data)
2529 ret_val = e1000e_write_phy_reg_mdic(hw, 2669 ret_val = e1000e_write_phy_reg_mdic(hw,
2530 IGP01E1000_PHY_PAGE_SELECT, 2670 IGP01E1000_PHY_PAGE_SELECT,
2531 (page << IGP_PAGE_SHIFT)); 2671 (page << IGP_PAGE_SHIFT));
2532 if (ret_val) {
2533 hw->phy.ops.release_phy(hw);
2534 goto out;
2535 }
2536 hw->phy.addr = phy_addr; 2672 hw->phy.addr = phy_addr;
2537 } 2673 }
2538 } 2674 }
2539 2675
2540 ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & reg, 2676 ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & reg,
2541 data); 2677 data);
2542 hw->phy.ops.release_phy(hw);
2543
2544out: 2678out:
2545 /* Revert to MDIO fast mode, if applicable */ 2679 /* Revert to MDIO fast mode, if applicable */
2546 if ((hw->phy.type == e1000_phy_82577) && in_slow_mode) 2680 if ((hw->phy.type == e1000_phy_82577) && in_slow_mode)
2547 ret_val = e1000_set_mdio_slow_mode_hv(hw, false); 2681 ret_val = e1000_set_mdio_slow_mode_hv(hw, false);
2548 2682
2683 if (!locked)
2684 hw->phy.ops.release_phy(hw);
2685
2549 return ret_val; 2686 return ret_val;
2550} 2687}
2551 2688
2552/** 2689/**
2553 * e1000_write_phy_reg_hv - Write HV PHY register 2690 * e1000_read_phy_reg_hv - Read HV PHY register
2691 * @hw: pointer to the HW structure
2692 * @offset: register offset to be read
2693 * @data: pointer to the read data
2694 *
2695 * Acquires semaphore then reads the PHY register at offset and stores
2696 * the retrieved information in data. Release the acquired semaphore
2697 * before exiting.
2698 **/
2699s32 e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data)
2700{
2701 return __e1000_read_phy_reg_hv(hw, offset, data, false);
2702}
2703
2704/**
2705 * e1000_read_phy_reg_hv_locked - Read HV PHY register
2706 * @hw: pointer to the HW structure
2707 * @offset: register offset to be read
2708 * @data: pointer to the read data
2709 *
2710 * Reads the PHY register at offset and stores the retrieved information
2711 * in data. Assumes semaphore already acquired.
2712 **/
2713s32 e1000_read_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset, u16 *data)
2714{
2715 return __e1000_read_phy_reg_hv(hw, offset, data, true);
2716}
2717
2718/**
2719 * __e1000_write_phy_reg_hv - Write HV PHY register
2554 * @hw: pointer to the HW structure 2720 * @hw: pointer to the HW structure
2555 * @offset: register offset to write to 2721 * @offset: register offset to write to
2556 * @data: data to write at register offset 2722 * @data: data to write at register offset
2723 * @locked: semaphore has already been acquired or not
2557 * 2724 *
2558 * Acquires semaphore, if necessary, then writes the data to PHY register 2725 * Acquires semaphore, if necessary, then writes the data to PHY register
2559 * at the offset. Release any acquired semaphores before exiting. 2726 * at the offset. Release any acquired semaphores before exiting.
2560 **/ 2727 **/
2561s32 e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data) 2728static s32 __e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data,
2729 bool locked)
2562{ 2730{
2563 s32 ret_val; 2731 s32 ret_val;
2564 u16 page = BM_PHY_REG_PAGE(offset); 2732 u16 page = BM_PHY_REG_PAGE(offset);
2565 u16 reg = BM_PHY_REG_NUM(offset); 2733 u16 reg = BM_PHY_REG_NUM(offset);
2566 bool in_slow_mode = false; 2734 bool in_slow_mode = false;
2567 2735
2736 if (!locked) {
2737 ret_val = hw->phy.ops.acquire_phy(hw);
2738 if (ret_val)
2739 return ret_val;
2740 }
2741
2568 /* Workaround failure in MDIO access while cable is disconnected */ 2742 /* Workaround failure in MDIO access while cable is disconnected */
2569 if ((hw->phy.type == e1000_phy_82577) && 2743 if ((hw->phy.type == e1000_phy_82577) &&
2570 !(er32(STATUS) & E1000_STATUS_LU)) { 2744 !(er32(STATUS) & E1000_STATUS_LU)) {
@@ -2588,10 +2762,6 @@ s32 e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data)
2588 goto out; 2762 goto out;
2589 } 2763 }
2590 2764
2591 ret_val = hw->phy.ops.acquire_phy(hw);
2592 if (ret_val)
2593 goto out;
2594
2595 hw->phy.addr = e1000_get_phy_addr_for_hv_page(page); 2765 hw->phy.addr = e1000_get_phy_addr_for_hv_page(page);
2596 2766
2597 if (page == HV_INTC_FC_PAGE_START) 2767 if (page == HV_INTC_FC_PAGE_START)
@@ -2607,15 +2777,10 @@ s32 e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data)
2607 ((MAX_PHY_REG_ADDRESS & reg) == 0) && 2777 ((MAX_PHY_REG_ADDRESS & reg) == 0) &&
2608 (data & (1 << 11))) { 2778 (data & (1 << 11))) {
2609 u16 data2 = 0x7EFF; 2779 u16 data2 = 0x7EFF;
2610 hw->phy.ops.release_phy(hw);
2611 ret_val = e1000_access_phy_debug_regs_hv(hw, (1 << 6) | 0x3, 2780 ret_val = e1000_access_phy_debug_regs_hv(hw, (1 << 6) | 0x3,
2612 &data2, false); 2781 &data2, false);
2613 if (ret_val) 2782 if (ret_val)
2614 goto out; 2783 goto out;
2615
2616 ret_val = hw->phy.ops.acquire_phy(hw);
2617 if (ret_val)
2618 goto out;
2619 } 2784 }
2620 2785
2621 if (reg > MAX_PHY_MULTI_PAGE_REG) { 2786 if (reg > MAX_PHY_MULTI_PAGE_REG) {
@@ -2630,27 +2795,53 @@ s32 e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data)
2630 ret_val = e1000e_write_phy_reg_mdic(hw, 2795 ret_val = e1000e_write_phy_reg_mdic(hw,
2631 IGP01E1000_PHY_PAGE_SELECT, 2796 IGP01E1000_PHY_PAGE_SELECT,
2632 (page << IGP_PAGE_SHIFT)); 2797 (page << IGP_PAGE_SHIFT));
2633 if (ret_val) {
2634 hw->phy.ops.release_phy(hw);
2635 goto out;
2636 }
2637 hw->phy.addr = phy_addr; 2798 hw->phy.addr = phy_addr;
2638 } 2799 }
2639 } 2800 }
2640 2801
2641 ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & reg, 2802 ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & reg,
2642 data); 2803 data);
2643 hw->phy.ops.release_phy(hw);
2644 2804
2645out: 2805out:
2646 /* Revert to MDIO fast mode, if applicable */ 2806 /* Revert to MDIO fast mode, if applicable */
2647 if ((hw->phy.type == e1000_phy_82577) && in_slow_mode) 2807 if ((hw->phy.type == e1000_phy_82577) && in_slow_mode)
2648 ret_val = e1000_set_mdio_slow_mode_hv(hw, false); 2808 ret_val = e1000_set_mdio_slow_mode_hv(hw, false);
2649 2809
2810 if (!locked)
2811 hw->phy.ops.release_phy(hw);
2812
2650 return ret_val; 2813 return ret_val;
2651} 2814}
2652 2815
2653/** 2816/**
2817 * e1000_write_phy_reg_hv - Write HV PHY register
2818 * @hw: pointer to the HW structure
2819 * @offset: register offset to write to
2820 * @data: data to write at register offset
2821 *
2822 * Acquires semaphore then writes the data to PHY register at the offset.
2823 * Release the acquired semaphores before exiting.
2824 **/
2825s32 e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data)
2826{
2827 return __e1000_write_phy_reg_hv(hw, offset, data, false);
2828}
2829
2830/**
2831 * e1000_write_phy_reg_hv_locked - Write HV PHY register
2832 * @hw: pointer to the HW structure
2833 * @offset: register offset to write to
2834 * @data: data to write at register offset
2835 *
2836 * Writes the data to PHY register at the offset. Assumes semaphore
2837 * already acquired.
2838 **/
2839s32 e1000_write_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset, u16 data)
2840{
2841 return __e1000_write_phy_reg_hv(hw, offset, data, true);
2842}
2843
2844/**
2654 * e1000_get_phy_addr_for_hv_page - Get PHY adrress based on page 2845 * e1000_get_phy_addr_for_hv_page - Get PHY adrress based on page
2655 * @page: page to be accessed 2846 * @page: page to be accessed
2656 **/ 2847 **/
@@ -2671,10 +2862,9 @@ static u32 e1000_get_phy_addr_for_hv_page(u32 page)
2671 * @data: pointer to the data to be read or written 2862 * @data: pointer to the data to be read or written
2672 * @read: determines if operation is read or written 2863 * @read: determines if operation is read or written
2673 * 2864 *
2674 * Acquires semaphore, if necessary, then reads the PHY register at offset 2865 * Reads the PHY register at offset and stores the retreived information
2675 * and storing the retreived information in data. Release any acquired 2866 * in data. Assumes semaphore already acquired. Note that the procedure
2676 * semaphores before exiting. Note that the procedure to read these regs 2867 * to read these regs uses the address port and data port to read/write.
2677 * uses the address port and data port to read/write.
2678 **/ 2868 **/
2679static s32 e1000_access_phy_debug_regs_hv(struct e1000_hw *hw, u32 offset, 2869static s32 e1000_access_phy_debug_regs_hv(struct e1000_hw *hw, u32 offset,
2680 u16 *data, bool read) 2870 u16 *data, bool read)
@@ -2682,20 +2872,12 @@ static s32 e1000_access_phy_debug_regs_hv(struct e1000_hw *hw, u32 offset,
2682 s32 ret_val; 2872 s32 ret_val;
2683 u32 addr_reg = 0; 2873 u32 addr_reg = 0;
2684 u32 data_reg = 0; 2874 u32 data_reg = 0;
2685 u8 phy_acquired = 1;
2686 2875
2687 /* This takes care of the difference with desktop vs mobile phy */ 2876 /* This takes care of the difference with desktop vs mobile phy */
2688 addr_reg = (hw->phy.type == e1000_phy_82578) ? 2877 addr_reg = (hw->phy.type == e1000_phy_82578) ?
2689 I82578_ADDR_REG : I82577_ADDR_REG; 2878 I82578_ADDR_REG : I82577_ADDR_REG;
2690 data_reg = addr_reg + 1; 2879 data_reg = addr_reg + 1;
2691 2880
2692 ret_val = hw->phy.ops.acquire_phy(hw);
2693 if (ret_val) {
2694 hw_dbg(hw, "Could not acquire PHY\n");
2695 phy_acquired = 0;
2696 goto out;
2697 }
2698
2699 /* All operations in this function are phy address 2 */ 2881 /* All operations in this function are phy address 2 */
2700 hw->phy.addr = 2; 2882 hw->phy.addr = 2;
2701 2883
@@ -2718,8 +2900,6 @@ static s32 e1000_access_phy_debug_regs_hv(struct e1000_hw *hw, u32 offset,
2718 } 2900 }
2719 2901
2720out: 2902out:
2721 if (phy_acquired == 1)
2722 hw->phy.ops.release_phy(hw);
2723 return ret_val; 2903 return ret_val;
2724} 2904}
2725 2905
diff --git a/drivers/net/fsl_pq_mdio.c b/drivers/net/fsl_pq_mdio.c
index 6ac464866972..efbf67689eca 100644
--- a/drivers/net/fsl_pq_mdio.c
+++ b/drivers/net/fsl_pq_mdio.c
@@ -427,3 +427,4 @@ void fsl_pq_mdio_exit(void)
427 of_unregister_platform_driver(&fsl_pq_mdio_driver); 427 of_unregister_platform_driver(&fsl_pq_mdio_driver);
428} 428}
429module_exit(fsl_pq_mdio_exit); 429module_exit(fsl_pq_mdio_exit);
430MODULE_LICENSE("GPL");
diff --git a/drivers/net/ifb.c b/drivers/net/ifb.c
index 801f088c134f..030913f8bd26 100644
--- a/drivers/net/ifb.c
+++ b/drivers/net/ifb.c
@@ -98,12 +98,13 @@ static void ri_tasklet(unsigned long dev)
98 stats->tx_packets++; 98 stats->tx_packets++;
99 stats->tx_bytes +=skb->len; 99 stats->tx_bytes +=skb->len;
100 100
101 skb->dev = __dev_get_by_index(&init_net, skb->iif); 101 skb->dev = dev_get_by_index(&init_net, skb->iif);
102 if (!skb->dev) { 102 if (!skb->dev) {
103 dev_kfree_skb(skb); 103 dev_kfree_skb(skb);
104 stats->tx_dropped++; 104 stats->tx_dropped++;
105 break; 105 break;
106 } 106 }
107 dev_put(skb->dev);
107 skb->iif = _dev->ifindex; 108 skb->iif = _dev->ifindex;
108 109
109 if (from & AT_EGRESS) { 110 if (from & AT_EGRESS) {
diff --git a/drivers/net/igb/igb_ethtool.c b/drivers/net/igb/igb_ethtool.c
index deaea8fa1032..b243ed3b0c36 100644
--- a/drivers/net/igb/igb_ethtool.c
+++ b/drivers/net/igb/igb_ethtool.c
@@ -732,7 +732,7 @@ static int igb_set_ringparam(struct net_device *netdev,
732{ 732{
733 struct igb_adapter *adapter = netdev_priv(netdev); 733 struct igb_adapter *adapter = netdev_priv(netdev);
734 struct igb_ring *temp_ring; 734 struct igb_ring *temp_ring;
735 int i, err; 735 int i, err = 0;
736 u32 new_rx_count, new_tx_count; 736 u32 new_rx_count, new_tx_count;
737 737
738 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) 738 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
@@ -752,18 +752,30 @@ static int igb_set_ringparam(struct net_device *netdev,
752 return 0; 752 return 0;
753 } 753 }
754 754
755 while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
756 msleep(1);
757
758 if (!netif_running(adapter->netdev)) {
759 for (i = 0; i < adapter->num_tx_queues; i++)
760 adapter->tx_ring[i].count = new_tx_count;
761 for (i = 0; i < adapter->num_rx_queues; i++)
762 adapter->rx_ring[i].count = new_rx_count;
763 adapter->tx_ring_count = new_tx_count;
764 adapter->rx_ring_count = new_rx_count;
765 goto clear_reset;
766 }
767
755 if (adapter->num_tx_queues > adapter->num_rx_queues) 768 if (adapter->num_tx_queues > adapter->num_rx_queues)
756 temp_ring = vmalloc(adapter->num_tx_queues * sizeof(struct igb_ring)); 769 temp_ring = vmalloc(adapter->num_tx_queues * sizeof(struct igb_ring));
757 else 770 else
758 temp_ring = vmalloc(adapter->num_rx_queues * sizeof(struct igb_ring)); 771 temp_ring = vmalloc(adapter->num_rx_queues * sizeof(struct igb_ring));
759 if (!temp_ring)
760 return -ENOMEM;
761 772
762 while (test_and_set_bit(__IGB_RESETTING, &adapter->state)) 773 if (!temp_ring) {
763 msleep(1); 774 err = -ENOMEM;
775 goto clear_reset;
776 }
764 777
765 if (netif_running(adapter->netdev)) 778 igb_down(adapter);
766 igb_down(adapter);
767 779
768 /* 780 /*
769 * We can't just free everything and then setup again, 781 * We can't just free everything and then setup again,
@@ -820,14 +832,11 @@ static int igb_set_ringparam(struct net_device *netdev,
820 832
821 adapter->rx_ring_count = new_rx_count; 833 adapter->rx_ring_count = new_rx_count;
822 } 834 }
823
824 err = 0;
825err_setup: 835err_setup:
826 if (netif_running(adapter->netdev)) 836 igb_up(adapter);
827 igb_up(adapter);
828
829 clear_bit(__IGB_RESETTING, &adapter->state);
830 vfree(temp_ring); 837 vfree(temp_ring);
838clear_reset:
839 clear_bit(__IGB_RESETTING, &adapter->state);
831 return err; 840 return err;
832} 841}
833 842
diff --git a/drivers/net/igbvf/ethtool.c b/drivers/net/igbvf/ethtool.c
index ee17a097d1ca..c68265bd0d1a 100644
--- a/drivers/net/igbvf/ethtool.c
+++ b/drivers/net/igbvf/ethtool.c
@@ -279,7 +279,7 @@ static int igbvf_set_ringparam(struct net_device *netdev,
279{ 279{
280 struct igbvf_adapter *adapter = netdev_priv(netdev); 280 struct igbvf_adapter *adapter = netdev_priv(netdev);
281 struct igbvf_ring *temp_ring; 281 struct igbvf_ring *temp_ring;
282 int err; 282 int err = 0;
283 u32 new_rx_count, new_tx_count; 283 u32 new_rx_count, new_tx_count;
284 284
285 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) 285 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
@@ -299,15 +299,22 @@ static int igbvf_set_ringparam(struct net_device *netdev,
299 return 0; 299 return 0;
300 } 300 }
301 301
302 temp_ring = vmalloc(sizeof(struct igbvf_ring));
303 if (!temp_ring)
304 return -ENOMEM;
305
306 while (test_and_set_bit(__IGBVF_RESETTING, &adapter->state)) 302 while (test_and_set_bit(__IGBVF_RESETTING, &adapter->state))
307 msleep(1); 303 msleep(1);
308 304
309 if (netif_running(adapter->netdev)) 305 if (!netif_running(adapter->netdev)) {
310 igbvf_down(adapter); 306 adapter->tx_ring->count = new_tx_count;
307 adapter->rx_ring->count = new_rx_count;
308 goto clear_reset;
309 }
310
311 temp_ring = vmalloc(sizeof(struct igbvf_ring));
312 if (!temp_ring) {
313 err = -ENOMEM;
314 goto clear_reset;
315 }
316
317 igbvf_down(adapter);
311 318
312 /* 319 /*
313 * We can't just free everything and then setup again, 320 * We can't just free everything and then setup again,
@@ -339,14 +346,11 @@ static int igbvf_set_ringparam(struct net_device *netdev,
339 346
340 memcpy(adapter->rx_ring, temp_ring,sizeof(struct igbvf_ring)); 347 memcpy(adapter->rx_ring, temp_ring,sizeof(struct igbvf_ring));
341 } 348 }
342
343 err = 0;
344err_setup: 349err_setup:
345 if (netif_running(adapter->netdev)) 350 igbvf_up(adapter);
346 igbvf_up(adapter);
347
348 clear_bit(__IGBVF_RESETTING, &adapter->state);
349 vfree(temp_ring); 351 vfree(temp_ring);
352clear_reset:
353 clear_bit(__IGBVF_RESETTING, &adapter->state);
350 return err; 354 return err;
351} 355}
352 356
diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c
index fa314cb005a4..856c18c207f3 100644
--- a/drivers/net/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ixgbe/ixgbe_ethtool.c
@@ -798,7 +798,7 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
798{ 798{
799 struct ixgbe_adapter *adapter = netdev_priv(netdev); 799 struct ixgbe_adapter *adapter = netdev_priv(netdev);
800 struct ixgbe_ring *temp_tx_ring, *temp_rx_ring; 800 struct ixgbe_ring *temp_tx_ring, *temp_rx_ring;
801 int i, err; 801 int i, err = 0;
802 u32 new_rx_count, new_tx_count; 802 u32 new_rx_count, new_tx_count;
803 bool need_update = false; 803 bool need_update = false;
804 804
@@ -822,6 +822,16 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
822 while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state)) 822 while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
823 msleep(1); 823 msleep(1);
824 824
825 if (!netif_running(adapter->netdev)) {
826 for (i = 0; i < adapter->num_tx_queues; i++)
827 adapter->tx_ring[i].count = new_tx_count;
828 for (i = 0; i < adapter->num_rx_queues; i++)
829 adapter->rx_ring[i].count = new_rx_count;
830 adapter->tx_ring_count = new_tx_count;
831 adapter->rx_ring_count = new_rx_count;
832 goto err_setup;
833 }
834
825 temp_tx_ring = kcalloc(adapter->num_tx_queues, 835 temp_tx_ring = kcalloc(adapter->num_tx_queues,
826 sizeof(struct ixgbe_ring), GFP_KERNEL); 836 sizeof(struct ixgbe_ring), GFP_KERNEL);
827 if (!temp_tx_ring) { 837 if (!temp_tx_ring) {
@@ -879,8 +889,7 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
879 889
880 /* if rings need to be updated, here's the place to do it in one shot */ 890 /* if rings need to be updated, here's the place to do it in one shot */
881 if (need_update) { 891 if (need_update) {
882 if (netif_running(netdev)) 892 ixgbe_down(adapter);
883 ixgbe_down(adapter);
884 893
885 /* tx */ 894 /* tx */
886 if (new_tx_count != adapter->tx_ring_count) { 895 if (new_tx_count != adapter->tx_ring_count) {
@@ -897,13 +906,8 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
897 temp_rx_ring = NULL; 906 temp_rx_ring = NULL;
898 adapter->rx_ring_count = new_rx_count; 907 adapter->rx_ring_count = new_rx_count;
899 } 908 }
900 }
901
902 /* success! */
903 err = 0;
904 if (netif_running(netdev))
905 ixgbe_up(adapter); 909 ixgbe_up(adapter);
906 910 }
907err_setup: 911err_setup:
908 clear_bit(__IXGBE_RESETTING, &adapter->state); 912 clear_bit(__IXGBE_RESETTING, &adapter->state);
909 return err; 913 return err;
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index cbb143ca1eb8..5bd9e6bf6f2f 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -44,6 +44,7 @@
44 44
45#include "ixgbe.h" 45#include "ixgbe.h"
46#include "ixgbe_common.h" 46#include "ixgbe_common.h"
47#include "ixgbe_dcb_82599.h"
47 48
48char ixgbe_driver_name[] = "ixgbe"; 49char ixgbe_driver_name[] = "ixgbe";
49static const char ixgbe_driver_string[] = 50static const char ixgbe_driver_string[] =
@@ -226,6 +227,56 @@ static void ixgbe_unmap_and_free_tx_resource(struct ixgbe_adapter *adapter,
226 /* tx_buffer_info must be completely set up in the transmit path */ 227 /* tx_buffer_info must be completely set up in the transmit path */
227} 228}
228 229
230/**
231 * ixgbe_tx_is_paused - check if the tx ring is paused
232 * @adapter: the ixgbe adapter
233 * @tx_ring: the corresponding tx_ring
234 *
235 * If not in DCB mode, checks TFCS.TXOFF, otherwise, find out the
236 * corresponding TC of this tx_ring when checking TFCS.
237 *
238 * Returns : true if paused
239 */
240static inline bool ixgbe_tx_is_paused(struct ixgbe_adapter *adapter,
241 struct ixgbe_ring *tx_ring)
242{
243 int tc;
244 u32 txoff = IXGBE_TFCS_TXOFF;
245
246#ifdef CONFIG_IXGBE_DCB
247 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
248 int reg_idx = tx_ring->reg_idx;
249 int dcb_i = adapter->ring_feature[RING_F_DCB].indices;
250
251 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
252 tc = reg_idx >> 2;
253 txoff = IXGBE_TFCS_TXOFF0;
254 } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
255 tc = 0;
256 txoff = IXGBE_TFCS_TXOFF;
257 if (dcb_i == 8) {
258 /* TC0, TC1 */
259 tc = reg_idx >> 5;
260 if (tc == 2) /* TC2, TC3 */
261 tc += (reg_idx - 64) >> 4;
262 else if (tc == 3) /* TC4, TC5, TC6, TC7 */
263 tc += 1 + ((reg_idx - 96) >> 3);
264 } else if (dcb_i == 4) {
265 /* TC0, TC1 */
266 tc = reg_idx >> 6;
267 if (tc == 1) {
268 tc += (reg_idx - 64) >> 5;
269 if (tc == 2) /* TC2, TC3 */
270 tc += (reg_idx - 96) >> 4;
271 }
272 }
273 }
274 txoff <<= tc;
275 }
276#endif
277 return IXGBE_READ_REG(&adapter->hw, IXGBE_TFCS) & txoff;
278}
279
229static inline bool ixgbe_check_tx_hang(struct ixgbe_adapter *adapter, 280static inline bool ixgbe_check_tx_hang(struct ixgbe_adapter *adapter,
230 struct ixgbe_ring *tx_ring, 281 struct ixgbe_ring *tx_ring,
231 unsigned int eop) 282 unsigned int eop)
@@ -237,7 +288,7 @@ static inline bool ixgbe_check_tx_hang(struct ixgbe_adapter *adapter,
237 adapter->detect_tx_hung = false; 288 adapter->detect_tx_hung = false;
238 if (tx_ring->tx_buffer_info[eop].time_stamp && 289 if (tx_ring->tx_buffer_info[eop].time_stamp &&
239 time_after(jiffies, tx_ring->tx_buffer_info[eop].time_stamp + HZ) && 290 time_after(jiffies, tx_ring->tx_buffer_info[eop].time_stamp + HZ) &&
240 !(IXGBE_READ_REG(&adapter->hw, IXGBE_TFCS) & IXGBE_TFCS_TXOFF)) { 291 !ixgbe_tx_is_paused(adapter, tx_ring)) {
241 /* detected Tx unit hang */ 292 /* detected Tx unit hang */
242 union ixgbe_adv_tx_desc *tx_desc; 293 union ixgbe_adv_tx_desc *tx_desc;
243 tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop); 294 tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop);
@@ -412,19 +463,23 @@ static void ixgbe_update_tx_dca(struct ixgbe_adapter *adapter,
412 u32 txctrl; 463 u32 txctrl;
413 int cpu = get_cpu(); 464 int cpu = get_cpu();
414 int q = tx_ring - adapter->tx_ring; 465 int q = tx_ring - adapter->tx_ring;
466 struct ixgbe_hw *hw = &adapter->hw;
415 467
416 if (tx_ring->cpu != cpu) { 468 if (tx_ring->cpu != cpu) {
417 txctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_DCA_TXCTRL(q));
418 if (adapter->hw.mac.type == ixgbe_mac_82598EB) { 469 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
470 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(q));
419 txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK; 471 txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK;
420 txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu); 472 txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
473 txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN;
474 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(q), txctrl);
421 } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) { 475 } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
476 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(q));
422 txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK_82599; 477 txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK_82599;
423 txctrl |= (dca3_get_tag(&adapter->pdev->dev, cpu) << 478 txctrl |= (dca3_get_tag(&adapter->pdev->dev, cpu) <<
424 IXGBE_DCA_TXCTRL_CPUID_SHIFT_82599); 479 IXGBE_DCA_TXCTRL_CPUID_SHIFT_82599);
480 txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN;
481 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(q), txctrl);
425 } 482 }
426 txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN;
427 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_TXCTRL(q), txctrl);
428 tx_ring->cpu = cpu; 483 tx_ring->cpu = cpu;
429 } 484 }
430 put_cpu(); 485 put_cpu();
@@ -1913,11 +1968,25 @@ static void ixgbe_configure_tx(struct ixgbe_adapter *adapter)
1913 break; 1968 break;
1914 } 1969 }
1915 } 1970 }
1971
1916 if (hw->mac.type == ixgbe_mac_82599EB) { 1972 if (hw->mac.type == ixgbe_mac_82599EB) {
1973 u32 rttdcs;
1974
1975 /* disable the arbiter while setting MTQC */
1976 rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
1977 rttdcs |= IXGBE_RTTDCS_ARBDIS;
1978 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
1979
1917 /* We enable 8 traffic classes, DCB only */ 1980 /* We enable 8 traffic classes, DCB only */
1918 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) 1981 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED)
1919 IXGBE_WRITE_REG(hw, IXGBE_MTQC, (IXGBE_MTQC_RT_ENA | 1982 IXGBE_WRITE_REG(hw, IXGBE_MTQC, (IXGBE_MTQC_RT_ENA |
1920 IXGBE_MTQC_8TC_8TQ)); 1983 IXGBE_MTQC_8TC_8TQ));
1984 else
1985 IXGBE_WRITE_REG(hw, IXGBE_MTQC, IXGBE_MTQC_64Q_1PB);
1986
1987 /* re-eable the arbiter */
1988 rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
1989 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
1921 } 1990 }
1922} 1991}
1923 1992
@@ -2471,7 +2540,10 @@ static void ixgbe_configure(struct ixgbe_adapter *adapter)
2471 ixgbe_restore_vlan(adapter); 2540 ixgbe_restore_vlan(adapter);
2472#ifdef CONFIG_IXGBE_DCB 2541#ifdef CONFIG_IXGBE_DCB
2473 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { 2542 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
2474 netif_set_gso_max_size(netdev, 32768); 2543 if (hw->mac.type == ixgbe_mac_82598EB)
2544 netif_set_gso_max_size(netdev, 32768);
2545 else
2546 netif_set_gso_max_size(netdev, 65536);
2475 ixgbe_configure_dcb(adapter); 2547 ixgbe_configure_dcb(adapter);
2476 } else { 2548 } else {
2477 netif_set_gso_max_size(netdev, 65536); 2549 netif_set_gso_max_size(netdev, 65536);
diff --git a/drivers/net/macsonic.c b/drivers/net/macsonic.c
index 61eabcac734c..b3d7d8d77f46 100644
--- a/drivers/net/macsonic.c
+++ b/drivers/net/macsonic.c
@@ -223,69 +223,73 @@ static int __devinit macsonic_init(struct net_device *dev)
223 return 0; 223 return 0;
224} 224}
225 225
226static int __devinit mac_onboard_sonic_ethernet_addr(struct net_device *dev) 226#define INVALID_MAC(mac) (memcmp(mac, "\x08\x00\x07", 3) && \
227 memcmp(mac, "\x00\xA0\x40", 3) && \
228 memcmp(mac, "\x00\x80\x19", 3) && \
229 memcmp(mac, "\x00\x05\x02", 3))
230
231static void __devinit mac_onboard_sonic_ethernet_addr(struct net_device *dev)
227{ 232{
228 struct sonic_local *lp = netdev_priv(dev); 233 struct sonic_local *lp = netdev_priv(dev);
229 const int prom_addr = ONBOARD_SONIC_PROM_BASE; 234 const int prom_addr = ONBOARD_SONIC_PROM_BASE;
230 int i; 235 unsigned short val;
231 236
232 /* On NuBus boards we can sometimes look in the ROM resources. 237 /*
233 No such luck for comm-slot/onboard. */ 238 * On NuBus boards we can sometimes look in the ROM resources.
234 for(i = 0; i < 6; i++) 239 * No such luck for comm-slot/onboard.
235 dev->dev_addr[i] = SONIC_READ_PROM(i); 240 * On the PowerBook 520, the PROM base address is a mystery.
241 */
242 if (hwreg_present((void *)prom_addr)) {
243 int i;
244
245 for (i = 0; i < 6; i++)
246 dev->dev_addr[i] = SONIC_READ_PROM(i);
247 if (!INVALID_MAC(dev->dev_addr))
248 return;
236 249
237 /* Most of the time, the address is bit-reversed. The NetBSD 250 /*
238 source has a rather long and detailed historical account of 251 * Most of the time, the address is bit-reversed. The NetBSD
239 why this is so. */ 252 * source has a rather long and detailed historical account of
240 if (memcmp(dev->dev_addr, "\x08\x00\x07", 3) && 253 * why this is so.
241 memcmp(dev->dev_addr, "\x00\xA0\x40", 3) && 254 */
242 memcmp(dev->dev_addr, "\x00\x80\x19", 3) &&
243 memcmp(dev->dev_addr, "\x00\x05\x02", 3))
244 bit_reverse_addr(dev->dev_addr); 255 bit_reverse_addr(dev->dev_addr);
245 else 256 if (!INVALID_MAC(dev->dev_addr))
246 return 0; 257 return;
247 258
248 /* If we still have what seems to be a bogus address, we'll
249 look in the CAM. The top entry should be ours. */
250 /* Danger! This only works if MacOS has already initialized
251 the card... */
252 if (memcmp(dev->dev_addr, "\x08\x00\x07", 3) &&
253 memcmp(dev->dev_addr, "\x00\xA0\x40", 3) &&
254 memcmp(dev->dev_addr, "\x00\x80\x19", 3) &&
255 memcmp(dev->dev_addr, "\x00\x05\x02", 3))
256 {
257 unsigned short val;
258
259 printk(KERN_INFO "macsonic: PROM seems to be wrong, trying CAM entry 15\n");
260
261 SONIC_WRITE(SONIC_CMD, SONIC_CR_RST);
262 SONIC_WRITE(SONIC_CEP, 15);
263
264 val = SONIC_READ(SONIC_CAP2);
265 dev->dev_addr[5] = val >> 8;
266 dev->dev_addr[4] = val & 0xff;
267 val = SONIC_READ(SONIC_CAP1);
268 dev->dev_addr[3] = val >> 8;
269 dev->dev_addr[2] = val & 0xff;
270 val = SONIC_READ(SONIC_CAP0);
271 dev->dev_addr[1] = val >> 8;
272 dev->dev_addr[0] = val & 0xff;
273
274 printk(KERN_INFO "HW Address from CAM 15: %pM\n",
275 dev->dev_addr);
276 } else return 0;
277
278 if (memcmp(dev->dev_addr, "\x08\x00\x07", 3) &&
279 memcmp(dev->dev_addr, "\x00\xA0\x40", 3) &&
280 memcmp(dev->dev_addr, "\x00\x80\x19", 3) &&
281 memcmp(dev->dev_addr, "\x00\x05\x02", 3))
282 {
283 /* 259 /*
284 * Still nonsense ... messed up someplace! 260 * If we still have what seems to be a bogus address, we'll
261 * look in the CAM. The top entry should be ours.
285 */ 262 */
286 printk(KERN_ERR "macsonic: ERROR (INVALID MAC)\n"); 263 printk(KERN_WARNING "macsonic: MAC address in PROM seems "
287 return -EIO; 264 "to be invalid, trying CAM\n");
288 } else return 0; 265 } else {
266 printk(KERN_WARNING "macsonic: cannot read MAC address from "
267 "PROM, trying CAM\n");
268 }
269
270 /* This only works if MacOS has already initialized the card. */
271
272 SONIC_WRITE(SONIC_CMD, SONIC_CR_RST);
273 SONIC_WRITE(SONIC_CEP, 15);
274
275 val = SONIC_READ(SONIC_CAP2);
276 dev->dev_addr[5] = val >> 8;
277 dev->dev_addr[4] = val & 0xff;
278 val = SONIC_READ(SONIC_CAP1);
279 dev->dev_addr[3] = val >> 8;
280 dev->dev_addr[2] = val & 0xff;
281 val = SONIC_READ(SONIC_CAP0);
282 dev->dev_addr[1] = val >> 8;
283 dev->dev_addr[0] = val & 0xff;
284
285 if (!INVALID_MAC(dev->dev_addr))
286 return;
287
288 /* Still nonsense ... messed up someplace! */
289
290 printk(KERN_WARNING "macsonic: MAC address in CAM entry 15 "
291 "seems invalid, will use a random MAC\n");
292 random_ether_addr(dev->dev_addr);
289} 293}
290 294
291static int __devinit mac_onboard_sonic_probe(struct net_device *dev) 295static int __devinit mac_onboard_sonic_probe(struct net_device *dev)
@@ -402,8 +406,7 @@ static int __devinit mac_onboard_sonic_probe(struct net_device *dev)
402 SONIC_WRITE(SONIC_ISR, 0x7fff); 406 SONIC_WRITE(SONIC_ISR, 0x7fff);
403 407
404 /* Now look for the MAC address. */ 408 /* Now look for the MAC address. */
405 if (mac_onboard_sonic_ethernet_addr(dev) != 0) 409 mac_onboard_sonic_ethernet_addr(dev);
406 return -ENODEV;
407 410
408 /* Shared init code */ 411 /* Shared init code */
409 return macsonic_init(dev); 412 return macsonic_init(dev);
diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c
index 5dd7225b178e..291a505fd4fc 100644
--- a/drivers/net/mlx4/main.c
+++ b/drivers/net/mlx4/main.c
@@ -1282,6 +1282,7 @@ static struct pci_device_id mlx4_pci_table[] = {
1282 { PCI_VDEVICE(MELLANOX, 0x6372) }, /* MT25458 ConnectX EN 10GBASE-T 10GigE */ 1282 { PCI_VDEVICE(MELLANOX, 0x6372) }, /* MT25458 ConnectX EN 10GBASE-T 10GigE */
1283 { PCI_VDEVICE(MELLANOX, 0x675a) }, /* MT25458 ConnectX EN 10GBASE-T+Gen2 10GigE */ 1283 { PCI_VDEVICE(MELLANOX, 0x675a) }, /* MT25458 ConnectX EN 10GBASE-T+Gen2 10GigE */
1284 { PCI_VDEVICE(MELLANOX, 0x6764) }, /* MT26468 ConnectX EN 10GigE PCIe gen2*/ 1284 { PCI_VDEVICE(MELLANOX, 0x6764) }, /* MT26468 ConnectX EN 10GigE PCIe gen2*/
1285 { PCI_VDEVICE(MELLANOX, 0x6746) }, /* MT26438 ConnectX EN 40GigE PCIe gen2 5GT/s */
1285 { PCI_VDEVICE(MELLANOX, 0x676e) }, /* MT26478 ConnectX2 40GigE PCIe gen2 */ 1286 { PCI_VDEVICE(MELLANOX, 0x676e) }, /* MT26478 ConnectX2 40GigE PCIe gen2 */
1286 { 0, } 1287 { 0, }
1287}; 1288};
diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c
index 6930c87f362e..f3624517cb0e 100644
--- a/drivers/net/myri10ge/myri10ge.c
+++ b/drivers/net/myri10ge/myri10ge.c
@@ -75,7 +75,7 @@
75#include "myri10ge_mcp.h" 75#include "myri10ge_mcp.h"
76#include "myri10ge_mcp_gen_header.h" 76#include "myri10ge_mcp_gen_header.h"
77 77
78#define MYRI10GE_VERSION_STR "1.5.0-1.432" 78#define MYRI10GE_VERSION_STR "1.5.1-1.451"
79 79
80MODULE_DESCRIPTION("Myricom 10G driver (10GbE)"); 80MODULE_DESCRIPTION("Myricom 10G driver (10GbE)");
81MODULE_AUTHOR("Maintainer: help@myri.com"); 81MODULE_AUTHOR("Maintainer: help@myri.com");
@@ -1624,10 +1624,21 @@ myri10ge_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
1624 return 0; 1624 return 0;
1625 } 1625 }
1626 } 1626 }
1627 if (*ptr == 'R' || *ptr == 'Q') { 1627 if (*ptr == '2')
1628 /* We've found either an XFP or quad ribbon fiber */ 1628 ptr++;
1629 if (*ptr == 'R' || *ptr == 'Q' || *ptr == 'S') {
1630 /* We've found either an XFP, quad ribbon fiber, or SFP+ */
1629 cmd->port = PORT_FIBRE; 1631 cmd->port = PORT_FIBRE;
1632 cmd->supported |= SUPPORTED_FIBRE;
1633 cmd->advertising |= ADVERTISED_FIBRE;
1634 } else {
1635 cmd->port = PORT_OTHER;
1630 } 1636 }
1637 if (*ptr == 'R' || *ptr == 'S')
1638 cmd->transceiver = XCVR_EXTERNAL;
1639 else
1640 cmd->transceiver = XCVR_INTERNAL;
1641
1631 return 0; 1642 return 0;
1632} 1643}
1633 1644
diff --git a/drivers/net/netxen/netxen_nic_hdr.h b/drivers/net/netxen/netxen_nic_hdr.h
index 7a7177421d7c..1c46da632125 100644
--- a/drivers/net/netxen/netxen_nic_hdr.h
+++ b/drivers/net/netxen/netxen_nic_hdr.h
@@ -419,6 +419,7 @@ enum {
419#define NETXEN_CRB_ROMUSB \ 419#define NETXEN_CRB_ROMUSB \
420 NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_ROMUSB) 420 NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_ROMUSB)
421#define NETXEN_CRB_I2Q NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_I2Q) 421#define NETXEN_CRB_I2Q NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_I2Q)
422#define NETXEN_CRB_I2C0 NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_I2C0)
422#define NETXEN_CRB_SMB NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_SMB) 423#define NETXEN_CRB_SMB NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_SMB)
423#define NETXEN_CRB_MAX NETXEN_PCI_CRB_WINDOW(64) 424#define NETXEN_CRB_MAX NETXEN_PCI_CRB_WINDOW(64)
424 425
diff --git a/drivers/net/netxen/netxen_nic_hw.c b/drivers/net/netxen/netxen_nic_hw.c
index 32314000dfcd..3185a98b0917 100644
--- a/drivers/net/netxen/netxen_nic_hw.c
+++ b/drivers/net/netxen/netxen_nic_hw.c
@@ -1901,22 +1901,16 @@ netxen_setup_hwops(struct netxen_adapter *adapter)
1901 1901
1902int netxen_nic_get_board_info(struct netxen_adapter *adapter) 1902int netxen_nic_get_board_info(struct netxen_adapter *adapter)
1903{ 1903{
1904 int offset, board_type, magic, header_version; 1904 int offset, board_type, magic;
1905 struct pci_dev *pdev = adapter->pdev; 1905 struct pci_dev *pdev = adapter->pdev;
1906 1906
1907 offset = NX_FW_MAGIC_OFFSET; 1907 offset = NX_FW_MAGIC_OFFSET;
1908 if (netxen_rom_fast_read(adapter, offset, &magic)) 1908 if (netxen_rom_fast_read(adapter, offset, &magic))
1909 return -EIO; 1909 return -EIO;
1910 1910
1911 offset = NX_HDR_VERSION_OFFSET; 1911 if (magic != NETXEN_BDINFO_MAGIC) {
1912 if (netxen_rom_fast_read(adapter, offset, &header_version)) 1912 dev_err(&pdev->dev, "invalid board config, magic=%08x\n",
1913 return -EIO; 1913 magic);
1914
1915 if (magic != NETXEN_BDINFO_MAGIC ||
1916 header_version != NETXEN_BDINFO_VERSION) {
1917 dev_err(&pdev->dev,
1918 "invalid board config, magic=%08x, version=%08x\n",
1919 magic, header_version);
1920 return -EIO; 1914 return -EIO;
1921 } 1915 }
1922 1916
diff --git a/drivers/net/netxen/netxen_nic_init.c b/drivers/net/netxen/netxen_nic_init.c
index 91c2bc61c8eb..e40b914d6faf 100644
--- a/drivers/net/netxen/netxen_nic_init.c
+++ b/drivers/net/netxen/netxen_nic_init.c
@@ -531,6 +531,8 @@ int netxen_pinit_from_rom(struct netxen_adapter *adapter, int verbose)
531 continue; 531 continue;
532 532
533 if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) { 533 if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
534 if (off == (NETXEN_CRB_I2C0 + 0x1c))
535 continue;
534 /* do not reset PCI */ 536 /* do not reset PCI */
535 if (off == (ROMUSB_GLB + 0xbc)) 537 if (off == (ROMUSB_GLB + 0xbc))
536 continue; 538 continue;
@@ -553,12 +555,6 @@ int netxen_pinit_from_rom(struct netxen_adapter *adapter, int verbose)
553 continue; 555 continue;
554 } 556 }
555 557
556 if (off == NETXEN_ADDR_ERROR) {
557 printk(KERN_ERR "%s: Err: Unknown addr: 0x%08x\n",
558 netxen_nic_driver_name, buf[i].addr);
559 continue;
560 }
561
562 init_delay = 1; 558 init_delay = 1;
563 /* After writing this register, HW needs time for CRB */ 559 /* After writing this register, HW needs time for CRB */
564 /* to quiet down (else crb_window returns 0xffffffff) */ 560 /* to quiet down (else crb_window returns 0xffffffff) */
diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c
index 7fc15e9e8adb..0b4a56a8c8d5 100644
--- a/drivers/net/netxen/netxen_nic_main.c
+++ b/drivers/net/netxen/netxen_nic_main.c
@@ -1919,6 +1919,7 @@ static void netxen_tx_timeout_task(struct work_struct *work)
1919 1919
1920request_reset: 1920request_reset:
1921 adapter->need_fw_reset = 1; 1921 adapter->need_fw_reset = 1;
1922 clear_bit(__NX_RESETTING, &adapter->state);
1922} 1923}
1923 1924
1924struct net_device_stats *netxen_nic_get_stats(struct net_device *netdev) 1925struct net_device_stats *netxen_nic_get_stats(struct net_device *netdev)
diff --git a/drivers/net/pcmcia/pcnet_cs.c b/drivers/net/pcmcia/pcnet_cs.c
index bd3447f04902..94c9ad2746bc 100644
--- a/drivers/net/pcmcia/pcnet_cs.c
+++ b/drivers/net/pcmcia/pcnet_cs.c
@@ -1760,7 +1760,7 @@ static struct pcmcia_device_id pcnet_ids[] = {
1760 PCMCIA_DEVICE_CIS_MANF_CARD(0xc00f, 0x0002, "cis/LA-PCM.cis"), 1760 PCMCIA_DEVICE_CIS_MANF_CARD(0xc00f, 0x0002, "cis/LA-PCM.cis"),
1761 PCMCIA_DEVICE_CIS_PROD_ID12("KTI", "PE520 PLUS", 0xad180345, 0x9d58d392, "PE520.cis"), 1761 PCMCIA_DEVICE_CIS_PROD_ID12("KTI", "PE520 PLUS", 0xad180345, 0x9d58d392, "PE520.cis"),
1762 PCMCIA_DEVICE_CIS_PROD_ID12("NDC", "Ethernet", 0x01c43ae1, 0x00b2e941, "cis/NE2K.cis"), 1762 PCMCIA_DEVICE_CIS_PROD_ID12("NDC", "Ethernet", 0x01c43ae1, 0x00b2e941, "cis/NE2K.cis"),
1763 PCMCIA_DEVICE_CIS_PROD_ID12("PMX ", "PE-200", 0x34f3f1c8, 0x10b59f8c, "PE-200.cis"), 1763 PCMCIA_DEVICE_CIS_PROD_ID12("PMX ", "PE-200", 0x34f3f1c8, 0x10b59f8c, "cis/PE-200.cis"),
1764 PCMCIA_DEVICE_CIS_PROD_ID12("TAMARACK", "Ethernet", 0xcf434fba, 0x00b2e941, "cis/tamarack.cis"), 1764 PCMCIA_DEVICE_CIS_PROD_ID12("TAMARACK", "Ethernet", 0xcf434fba, 0x00b2e941, "cis/tamarack.cis"),
1765 PCMCIA_DEVICE_PROD_ID12("Ethernet", "CF Size PC Card", 0x00b2e941, 0x43ac239b), 1765 PCMCIA_DEVICE_PROD_ID12("Ethernet", "CF Size PC Card", 0x00b2e941, 0x43ac239b),
1766 PCMCIA_DEVICE_PROD_ID123("Fast Ethernet", "CF Size PC Card", "1.0", 1766 PCMCIA_DEVICE_PROD_ID123("Fast Ethernet", "CF Size PC Card", "1.0",
diff --git a/drivers/net/pppoe.c b/drivers/net/pppoe.c
index 7cbf6f9b51de..2559991eea6a 100644
--- a/drivers/net/pppoe.c
+++ b/drivers/net/pppoe.c
@@ -111,9 +111,6 @@ struct pppoe_net {
111 rwlock_t hash_lock; 111 rwlock_t hash_lock;
112}; 112};
113 113
114/* to eliminate a race btw pppoe_flush_dev and pppoe_release */
115static DEFINE_SPINLOCK(flush_lock);
116
117/* 114/*
118 * PPPoE could be in the following stages: 115 * PPPoE could be in the following stages:
119 * 1) Discovery stage (to obtain remote MAC and Session ID) 116 * 1) Discovery stage (to obtain remote MAC and Session ID)
@@ -303,45 +300,48 @@ static void pppoe_flush_dev(struct net_device *dev)
303 write_lock_bh(&pn->hash_lock); 300 write_lock_bh(&pn->hash_lock);
304 for (i = 0; i < PPPOE_HASH_SIZE; i++) { 301 for (i = 0; i < PPPOE_HASH_SIZE; i++) {
305 struct pppox_sock *po = pn->hash_table[i]; 302 struct pppox_sock *po = pn->hash_table[i];
303 struct sock *sk;
306 304
307 while (po != NULL) { 305 while (po) {
308 struct sock *sk; 306 while (po && po->pppoe_dev != dev) {
309 if (po->pppoe_dev != dev) {
310 po = po->next; 307 po = po->next;
311 continue;
312 } 308 }
309
310 if (!po)
311 break;
312
313 sk = sk_pppox(po); 313 sk = sk_pppox(po);
314 spin_lock(&flush_lock);
315 po->pppoe_dev = NULL;
316 spin_unlock(&flush_lock);
317 dev_put(dev);
318 314
319 /* We always grab the socket lock, followed by the 315 /* We always grab the socket lock, followed by the
320 * hash_lock, in that order. Since we should 316 * hash_lock, in that order. Since we should hold the
321 * hold the sock lock while doing any unbinding, 317 * sock lock while doing any unbinding, we need to
322 * we need to release the lock we're holding. 318 * release the lock we're holding. Hold a reference to
323 * Hold a reference to the sock so it doesn't disappear 319 * the sock so it doesn't disappear as we're jumping
324 * as we're jumping between locks. 320 * between locks.
325 */ 321 */
326 322
327 sock_hold(sk); 323 sock_hold(sk);
328
329 write_unlock_bh(&pn->hash_lock); 324 write_unlock_bh(&pn->hash_lock);
330 lock_sock(sk); 325 lock_sock(sk);
331 326
332 if (sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND)) { 327 if (po->pppoe_dev == dev
328 && sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND)) {
333 pppox_unbind_sock(sk); 329 pppox_unbind_sock(sk);
334 sk->sk_state = PPPOX_ZOMBIE; 330 sk->sk_state = PPPOX_ZOMBIE;
335 sk->sk_state_change(sk); 331 sk->sk_state_change(sk);
332 po->pppoe_dev = NULL;
333 dev_put(dev);
336 } 334 }
337 335
338 release_sock(sk); 336 release_sock(sk);
339 sock_put(sk); 337 sock_put(sk);
340 338
341 /* Restart scan at the beginning of this hash chain. 339 /* Restart the process from the start of the current
342 * While the lock was dropped the chain contents may 340 * hash chain. We dropped locks so the world may have
343 * have changed. 341 * change from underneath us.
344 */ 342 */
343
344 BUG_ON(pppoe_pernet(dev_net(dev)) == NULL);
345 write_lock_bh(&pn->hash_lock); 345 write_lock_bh(&pn->hash_lock);
346 po = pn->hash_table[i]; 346 po = pn->hash_table[i];
347 } 347 }
@@ -388,11 +388,16 @@ static int pppoe_rcv_core(struct sock *sk, struct sk_buff *skb)
388 struct pppox_sock *po = pppox_sk(sk); 388 struct pppox_sock *po = pppox_sk(sk);
389 struct pppox_sock *relay_po; 389 struct pppox_sock *relay_po;
390 390
391 /* Backlog receive. Semantics of backlog rcv preclude any code from
392 * executing in lock_sock()/release_sock() bounds; meaning sk->sk_state
393 * can't change.
394 */
395
391 if (sk->sk_state & PPPOX_BOUND) { 396 if (sk->sk_state & PPPOX_BOUND) {
392 ppp_input(&po->chan, skb); 397 ppp_input(&po->chan, skb);
393 } else if (sk->sk_state & PPPOX_RELAY) { 398 } else if (sk->sk_state & PPPOX_RELAY) {
394 relay_po = get_item_by_addr(dev_net(po->pppoe_dev), 399 relay_po = get_item_by_addr(sock_net(sk),
395 &po->pppoe_relay); 400 &po->pppoe_relay);
396 if (relay_po == NULL) 401 if (relay_po == NULL)
397 goto abort_kfree; 402 goto abort_kfree;
398 403
@@ -447,6 +452,10 @@ static int pppoe_rcv(struct sk_buff *skb, struct net_device *dev,
447 goto drop; 452 goto drop;
448 453
449 pn = pppoe_pernet(dev_net(dev)); 454 pn = pppoe_pernet(dev_net(dev));
455
456 /* Note that get_item does a sock_hold(), so sk_pppox(po)
457 * is known to be safe.
458 */
450 po = get_item(pn, ph->sid, eth_hdr(skb)->h_source, dev->ifindex); 459 po = get_item(pn, ph->sid, eth_hdr(skb)->h_source, dev->ifindex);
451 if (!po) 460 if (!po)
452 goto drop; 461 goto drop;
@@ -561,6 +570,7 @@ static int pppoe_release(struct socket *sock)
561 struct sock *sk = sock->sk; 570 struct sock *sk = sock->sk;
562 struct pppox_sock *po; 571 struct pppox_sock *po;
563 struct pppoe_net *pn; 572 struct pppoe_net *pn;
573 struct net *net = NULL;
564 574
565 if (!sk) 575 if (!sk)
566 return 0; 576 return 0;
@@ -571,44 +581,28 @@ static int pppoe_release(struct socket *sock)
571 return -EBADF; 581 return -EBADF;
572 } 582 }
573 583
584 po = pppox_sk(sk);
585
586 if (sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND)) {
587 dev_put(po->pppoe_dev);
588 po->pppoe_dev = NULL;
589 }
590
574 pppox_unbind_sock(sk); 591 pppox_unbind_sock(sk);
575 592
576 /* Signal the death of the socket. */ 593 /* Signal the death of the socket. */
577 sk->sk_state = PPPOX_DEAD; 594 sk->sk_state = PPPOX_DEAD;
578 595
579 /* 596 net = sock_net(sk);
580 * pppoe_flush_dev could lead to a race with 597 pn = pppoe_pernet(net);
581 * this routine so we use flush_lock to eliminate
582 * such a case (we only need per-net specific data)
583 */
584 spin_lock(&flush_lock);
585 po = pppox_sk(sk);
586 if (!po->pppoe_dev) {
587 spin_unlock(&flush_lock);
588 goto out;
589 }
590 pn = pppoe_pernet(dev_net(po->pppoe_dev));
591 spin_unlock(&flush_lock);
592 598
593 /* 599 /*
594 * protect "po" from concurrent updates 600 * protect "po" from concurrent updates
595 * on pppoe_flush_dev 601 * on pppoe_flush_dev
596 */ 602 */
597 write_lock_bh(&pn->hash_lock); 603 delete_item(pn, po->pppoe_pa.sid, po->pppoe_pa.remote,
604 po->pppoe_ifindex);
598 605
599 po = pppox_sk(sk);
600 if (stage_session(po->pppoe_pa.sid))
601 __delete_item(pn, po->pppoe_pa.sid, po->pppoe_pa.remote,
602 po->pppoe_ifindex);
603
604 if (po->pppoe_dev) {
605 dev_put(po->pppoe_dev);
606 po->pppoe_dev = NULL;
607 }
608
609 write_unlock_bh(&pn->hash_lock);
610
611out:
612 sock_orphan(sk); 606 sock_orphan(sk);
613 sock->sk = NULL; 607 sock->sk = NULL;
614 608
@@ -625,8 +619,9 @@ static int pppoe_connect(struct socket *sock, struct sockaddr *uservaddr,
625 struct sock *sk = sock->sk; 619 struct sock *sk = sock->sk;
626 struct sockaddr_pppox *sp = (struct sockaddr_pppox *)uservaddr; 620 struct sockaddr_pppox *sp = (struct sockaddr_pppox *)uservaddr;
627 struct pppox_sock *po = pppox_sk(sk); 621 struct pppox_sock *po = pppox_sk(sk);
628 struct net_device *dev; 622 struct net_device *dev = NULL;
629 struct pppoe_net *pn; 623 struct pppoe_net *pn;
624 struct net *net = NULL;
630 int error; 625 int error;
631 626
632 lock_sock(sk); 627 lock_sock(sk);
@@ -652,12 +647,14 @@ static int pppoe_connect(struct socket *sock, struct sockaddr *uservaddr,
652 /* Delete the old binding */ 647 /* Delete the old binding */
653 if (stage_session(po->pppoe_pa.sid)) { 648 if (stage_session(po->pppoe_pa.sid)) {
654 pppox_unbind_sock(sk); 649 pppox_unbind_sock(sk);
650 pn = pppoe_pernet(sock_net(sk));
651 delete_item(pn, po->pppoe_pa.sid,
652 po->pppoe_pa.remote, po->pppoe_ifindex);
655 if (po->pppoe_dev) { 653 if (po->pppoe_dev) {
656 pn = pppoe_pernet(dev_net(po->pppoe_dev));
657 delete_item(pn, po->pppoe_pa.sid,
658 po->pppoe_pa.remote, po->pppoe_ifindex);
659 dev_put(po->pppoe_dev); 654 dev_put(po->pppoe_dev);
655 po->pppoe_dev = NULL;
660 } 656 }
657
661 memset(sk_pppox(po) + 1, 0, 658 memset(sk_pppox(po) + 1, 0,
662 sizeof(struct pppox_sock) - sizeof(struct sock)); 659 sizeof(struct pppox_sock) - sizeof(struct sock));
663 sk->sk_state = PPPOX_NONE; 660 sk->sk_state = PPPOX_NONE;
@@ -666,16 +663,15 @@ static int pppoe_connect(struct socket *sock, struct sockaddr *uservaddr,
666 /* Re-bind in session stage only */ 663 /* Re-bind in session stage only */
667 if (stage_session(sp->sa_addr.pppoe.sid)) { 664 if (stage_session(sp->sa_addr.pppoe.sid)) {
668 error = -ENODEV; 665 error = -ENODEV;
669 dev = dev_get_by_name(sock_net(sk), sp->sa_addr.pppoe.dev); 666 net = sock_net(sk);
667 dev = dev_get_by_name(net, sp->sa_addr.pppoe.dev);
670 if (!dev) 668 if (!dev)
671 goto end; 669 goto err_put;
672 670
673 po->pppoe_dev = dev; 671 po->pppoe_dev = dev;
674 po->pppoe_ifindex = dev->ifindex; 672 po->pppoe_ifindex = dev->ifindex;
675 pn = pppoe_pernet(dev_net(dev)); 673 pn = pppoe_pernet(net);
676 write_lock_bh(&pn->hash_lock);
677 if (!(dev->flags & IFF_UP)) { 674 if (!(dev->flags & IFF_UP)) {
678 write_unlock_bh(&pn->hash_lock);
679 goto err_put; 675 goto err_put;
680 } 676 }
681 677
@@ -683,6 +679,7 @@ static int pppoe_connect(struct socket *sock, struct sockaddr *uservaddr,
683 &sp->sa_addr.pppoe, 679 &sp->sa_addr.pppoe,
684 sizeof(struct pppoe_addr)); 680 sizeof(struct pppoe_addr));
685 681
682 write_lock_bh(&pn->hash_lock);
686 error = __set_item(pn, po); 683 error = __set_item(pn, po);
687 write_unlock_bh(&pn->hash_lock); 684 write_unlock_bh(&pn->hash_lock);
688 if (error < 0) 685 if (error < 0)
@@ -696,8 +693,11 @@ static int pppoe_connect(struct socket *sock, struct sockaddr *uservaddr,
696 po->chan.ops = &pppoe_chan_ops; 693 po->chan.ops = &pppoe_chan_ops;
697 694
698 error = ppp_register_net_channel(dev_net(dev), &po->chan); 695 error = ppp_register_net_channel(dev_net(dev), &po->chan);
699 if (error) 696 if (error) {
697 delete_item(pn, po->pppoe_pa.sid,
698 po->pppoe_pa.remote, po->pppoe_ifindex);
700 goto err_put; 699 goto err_put;
700 }
701 701
702 sk->sk_state = PPPOX_CONNECTED; 702 sk->sk_state = PPPOX_CONNECTED;
703 } 703 }
@@ -915,6 +915,14 @@ static int __pppoe_xmit(struct sock *sk, struct sk_buff *skb)
915 struct pppoe_hdr *ph; 915 struct pppoe_hdr *ph;
916 int data_len = skb->len; 916 int data_len = skb->len;
917 917
918 /* The higher-level PPP code (ppp_unregister_channel()) ensures the PPP
919 * xmit operations conclude prior to an unregistration call. Thus
920 * sk->sk_state cannot change, so we don't need to do lock_sock().
921 * But, we also can't do a lock_sock since that introduces a potential
922 * deadlock as we'd reverse the lock ordering used when calling
923 * ppp_unregister_channel().
924 */
925
918 if (sock_flag(sk, SOCK_DEAD) || !(sk->sk_state & PPPOX_CONNECTED)) 926 if (sock_flag(sk, SOCK_DEAD) || !(sk->sk_state & PPPOX_CONNECTED))
919 goto abort; 927 goto abort;
920 928
@@ -944,7 +952,6 @@ static int __pppoe_xmit(struct sock *sk, struct sk_buff *skb)
944 po->pppoe_pa.remote, NULL, data_len); 952 po->pppoe_pa.remote, NULL, data_len);
945 953
946 dev_queue_xmit(skb); 954 dev_queue_xmit(skb);
947
948 return 1; 955 return 1;
949 956
950abort: 957abort:
diff --git a/drivers/net/qlge/qlge.h b/drivers/net/qlge/qlge.h
index e7285f01bd04..c2383adcd527 100644
--- a/drivers/net/qlge/qlge.h
+++ b/drivers/net/qlge/qlge.h
@@ -95,6 +95,7 @@ enum {
95 95
96 /* Misc. stuff */ 96 /* Misc. stuff */
97 MAILBOX_COUNT = 16, 97 MAILBOX_COUNT = 16,
98 MAILBOX_TIMEOUT = 5,
98 99
99 PROC_ADDR_RDY = (1 << 31), 100 PROC_ADDR_RDY = (1 << 31),
100 PROC_ADDR_R = (1 << 30), 101 PROC_ADDR_R = (1 << 30),
diff --git a/drivers/net/qlge/qlge_main.c b/drivers/net/qlge/qlge_main.c
index 48b45df85ec9..a2fc70a0d0cc 100644
--- a/drivers/net/qlge/qlge_main.c
+++ b/drivers/net/qlge/qlge_main.c
@@ -3916,6 +3916,9 @@ static int __devinit ql_init_device(struct pci_dev *pdev,
3916 goto err_out; 3916 goto err_out;
3917 } 3917 }
3918 3918
3919 /* Set PCIe reset type for EEH to fundamental. */
3920 pdev->needs_freset = 1;
3921 pci_save_state(pdev);
3919 qdev->reg_base = 3922 qdev->reg_base =
3920 ioremap_nocache(pci_resource_start(pdev, 1), 3923 ioremap_nocache(pci_resource_start(pdev, 1),
3921 pci_resource_len(pdev, 1)); 3924 pci_resource_len(pdev, 1));
@@ -4070,6 +4073,33 @@ static void __devexit qlge_remove(struct pci_dev *pdev)
4070 free_netdev(ndev); 4073 free_netdev(ndev);
4071} 4074}
4072 4075
4076/* Clean up resources without touching hardware. */
4077static void ql_eeh_close(struct net_device *ndev)
4078{
4079 int i;
4080 struct ql_adapter *qdev = netdev_priv(ndev);
4081
4082 if (netif_carrier_ok(ndev)) {
4083 netif_carrier_off(ndev);
4084 netif_stop_queue(ndev);
4085 }
4086
4087 if (test_bit(QL_ADAPTER_UP, &qdev->flags))
4088 cancel_delayed_work_sync(&qdev->asic_reset_work);
4089 cancel_delayed_work_sync(&qdev->mpi_reset_work);
4090 cancel_delayed_work_sync(&qdev->mpi_work);
4091 cancel_delayed_work_sync(&qdev->mpi_idc_work);
4092 cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
4093
4094 for (i = 0; i < qdev->rss_ring_count; i++)
4095 netif_napi_del(&qdev->rx_ring[i].napi);
4096
4097 clear_bit(QL_ADAPTER_UP, &qdev->flags);
4098 ql_tx_ring_clean(qdev);
4099 ql_free_rx_buffers(qdev);
4100 ql_release_adapter_resources(qdev);
4101}
4102
4073/* 4103/*
4074 * This callback is called by the PCI subsystem whenever 4104 * This callback is called by the PCI subsystem whenever
4075 * a PCI bus error is detected. 4105 * a PCI bus error is detected.
@@ -4078,17 +4108,21 @@ static pci_ers_result_t qlge_io_error_detected(struct pci_dev *pdev,
4078 enum pci_channel_state state) 4108 enum pci_channel_state state)
4079{ 4109{
4080 struct net_device *ndev = pci_get_drvdata(pdev); 4110 struct net_device *ndev = pci_get_drvdata(pdev);
4081 struct ql_adapter *qdev = netdev_priv(ndev);
4082
4083 netif_device_detach(ndev);
4084 4111
4085 if (state == pci_channel_io_perm_failure) 4112 switch (state) {
4113 case pci_channel_io_normal:
4114 return PCI_ERS_RESULT_CAN_RECOVER;
4115 case pci_channel_io_frozen:
4116 netif_device_detach(ndev);
4117 if (netif_running(ndev))
4118 ql_eeh_close(ndev);
4119 pci_disable_device(pdev);
4120 return PCI_ERS_RESULT_NEED_RESET;
4121 case pci_channel_io_perm_failure:
4122 dev_err(&pdev->dev,
4123 "%s: pci_channel_io_perm_failure.\n", __func__);
4086 return PCI_ERS_RESULT_DISCONNECT; 4124 return PCI_ERS_RESULT_DISCONNECT;
4087 4125 }
4088 if (netif_running(ndev))
4089 ql_adapter_down(qdev);
4090
4091 pci_disable_device(pdev);
4092 4126
4093 /* Request a slot reset. */ 4127 /* Request a slot reset. */
4094 return PCI_ERS_RESULT_NEED_RESET; 4128 return PCI_ERS_RESULT_NEED_RESET;
@@ -4105,25 +4139,15 @@ static pci_ers_result_t qlge_io_slot_reset(struct pci_dev *pdev)
4105 struct net_device *ndev = pci_get_drvdata(pdev); 4139 struct net_device *ndev = pci_get_drvdata(pdev);
4106 struct ql_adapter *qdev = netdev_priv(ndev); 4140 struct ql_adapter *qdev = netdev_priv(ndev);
4107 4141
4142 pdev->error_state = pci_channel_io_normal;
4143
4144 pci_restore_state(pdev);
4108 if (pci_enable_device(pdev)) { 4145 if (pci_enable_device(pdev)) {
4109 QPRINTK(qdev, IFUP, ERR, 4146 QPRINTK(qdev, IFUP, ERR,
4110 "Cannot re-enable PCI device after reset.\n"); 4147 "Cannot re-enable PCI device after reset.\n");
4111 return PCI_ERS_RESULT_DISCONNECT; 4148 return PCI_ERS_RESULT_DISCONNECT;
4112 } 4149 }
4113
4114 pci_set_master(pdev); 4150 pci_set_master(pdev);
4115
4116 netif_carrier_off(ndev);
4117 ql_adapter_reset(qdev);
4118
4119 /* Make sure the EEPROM is good */
4120 memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
4121
4122 if (!is_valid_ether_addr(ndev->perm_addr)) {
4123 QPRINTK(qdev, IFUP, ERR, "After reset, invalid MAC address.\n");
4124 return PCI_ERS_RESULT_DISCONNECT;
4125 }
4126
4127 return PCI_ERS_RESULT_RECOVERED; 4151 return PCI_ERS_RESULT_RECOVERED;
4128} 4152}
4129 4153
@@ -4131,17 +4155,21 @@ static void qlge_io_resume(struct pci_dev *pdev)
4131{ 4155{
4132 struct net_device *ndev = pci_get_drvdata(pdev); 4156 struct net_device *ndev = pci_get_drvdata(pdev);
4133 struct ql_adapter *qdev = netdev_priv(ndev); 4157 struct ql_adapter *qdev = netdev_priv(ndev);
4158 int err = 0;
4134 4159
4135 pci_set_master(pdev); 4160 if (ql_adapter_reset(qdev))
4136 4161 QPRINTK(qdev, DRV, ERR, "reset FAILED!\n");
4137 if (netif_running(ndev)) { 4162 if (netif_running(ndev)) {
4138 if (ql_adapter_up(qdev)) { 4163 err = qlge_open(ndev);
4164 if (err) {
4139 QPRINTK(qdev, IFUP, ERR, 4165 QPRINTK(qdev, IFUP, ERR,
4140 "Device initialization failed after reset.\n"); 4166 "Device initialization failed after reset.\n");
4141 return; 4167 return;
4142 } 4168 }
4169 } else {
4170 QPRINTK(qdev, IFUP, ERR,
4171 "Device was not running prior to EEH.\n");
4143 } 4172 }
4144
4145 netif_device_attach(ndev); 4173 netif_device_attach(ndev);
4146} 4174}
4147 4175
diff --git a/drivers/net/qlge/qlge_mpi.c b/drivers/net/qlge/qlge_mpi.c
index 99e58e3f8e22..aec05f266107 100644
--- a/drivers/net/qlge/qlge_mpi.c
+++ b/drivers/net/qlge/qlge_mpi.c
@@ -470,7 +470,8 @@ end:
470 */ 470 */
471static int ql_mailbox_command(struct ql_adapter *qdev, struct mbox_params *mbcp) 471static int ql_mailbox_command(struct ql_adapter *qdev, struct mbox_params *mbcp)
472{ 472{
473 int status, count; 473 int status;
474 unsigned long count;
474 475
475 476
476 /* Begin polled mode for MPI */ 477 /* Begin polled mode for MPI */
@@ -491,14 +492,14 @@ static int ql_mailbox_command(struct ql_adapter *qdev, struct mbox_params *mbcp)
491 /* Wait for the command to complete. We loop 492 /* Wait for the command to complete. We loop
492 * here because some AEN might arrive while 493 * here because some AEN might arrive while
493 * we're waiting for the mailbox command to 494 * we're waiting for the mailbox command to
494 * complete. If more than 5 arrive then we can 495 * complete. If more than 5 seconds expire we can
495 * assume something is wrong. */ 496 * assume something is wrong. */
496 count = 5; 497 count = jiffies + HZ * MAILBOX_TIMEOUT;
497 do { 498 do {
498 /* Wait for the interrupt to come in. */ 499 /* Wait for the interrupt to come in. */
499 status = ql_wait_mbx_cmd_cmplt(qdev); 500 status = ql_wait_mbx_cmd_cmplt(qdev);
500 if (status) 501 if (status)
501 goto end; 502 continue;
502 503
503 /* Process the event. If it's an AEN, it 504 /* Process the event. If it's an AEN, it
504 * will be handled in-line or a worker 505 * will be handled in-line or a worker
@@ -517,15 +518,15 @@ static int ql_mailbox_command(struct ql_adapter *qdev, struct mbox_params *mbcp)
517 MB_CMD_STS_GOOD) || 518 MB_CMD_STS_GOOD) ||
518 ((mbcp->mbox_out[0] & 0x0000f000) == 519 ((mbcp->mbox_out[0] & 0x0000f000) ==
519 MB_CMD_STS_INTRMDT)) 520 MB_CMD_STS_INTRMDT))
520 break; 521 goto done;
521 } while (--count); 522 } while (time_before(jiffies, count));
522 523
523 if (!count) { 524 QPRINTK(qdev, DRV, ERR,
524 QPRINTK(qdev, DRV, ERR, 525 "Timed out waiting for mailbox complete.\n");
525 "Timed out waiting for mailbox complete.\n"); 526 status = -ETIMEDOUT;
526 status = -ETIMEDOUT; 527 goto end;
527 goto end; 528
528 } 529done:
529 530
530 /* Now we can clear the interrupt condition 531 /* Now we can clear the interrupt condition
531 * and look at our status. 532 * and look at our status.
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index 83c47d95c3aa..fa4935678488 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -1029,7 +1029,10 @@ static void rtl8169_vlan_rx_register(struct net_device *dev,
1029 1029
1030 spin_lock_irqsave(&tp->lock, flags); 1030 spin_lock_irqsave(&tp->lock, flags);
1031 tp->vlgrp = grp; 1031 tp->vlgrp = grp;
1032 if (tp->vlgrp) 1032 /*
1033 * Do not disable RxVlan on 8110SCd.
1034 */
1035 if (tp->vlgrp || (tp->mac_version == RTL_GIGA_MAC_VER_05))
1033 tp->cp_cmd |= RxVlan; 1036 tp->cp_cmd |= RxVlan;
1034 else 1037 else
1035 tp->cp_cmd &= ~RxVlan; 1038 tp->cp_cmd &= ~RxVlan;
@@ -3197,6 +3200,14 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
3197 } 3200 }
3198 3201
3199 rtl8169_init_phy(dev, tp); 3202 rtl8169_init_phy(dev, tp);
3203
3204 /*
3205 * Pretend we are using VLANs; This bypasses a nasty bug where
3206 * Interrupts stop flowing on high load on 8110SCd controllers.
3207 */
3208 if (tp->mac_version == RTL_GIGA_MAC_VER_05)
3209 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) | RxVlan);
3210
3200 device_set_wakeup_enable(&pdev->dev, tp->features & RTL_FEATURE_WOL); 3211 device_set_wakeup_enable(&pdev->dev, tp->features & RTL_FEATURE_WOL);
3201 3212
3202out: 3213out:
@@ -3368,7 +3379,7 @@ static u16 rtl_rw_cpluscmd(void __iomem *ioaddr)
3368static void rtl_set_rx_max_size(void __iomem *ioaddr, unsigned int rx_buf_sz) 3379static void rtl_set_rx_max_size(void __iomem *ioaddr, unsigned int rx_buf_sz)
3369{ 3380{
3370 /* Low hurts. Let's disable the filtering. */ 3381 /* Low hurts. Let's disable the filtering. */
3371 RTL_W16(RxMaxSize, rx_buf_sz); 3382 RTL_W16(RxMaxSize, rx_buf_sz + 1);
3372} 3383}
3373 3384
3374static void rtl8169_set_magic_reg(void __iomem *ioaddr, unsigned mac_version) 3385static void rtl8169_set_magic_reg(void __iomem *ioaddr, unsigned mac_version)
diff --git a/drivers/net/sfc/rx.c b/drivers/net/sfc/rx.c
index 01f9432c31ef..98bff5ada09a 100644
--- a/drivers/net/sfc/rx.c
+++ b/drivers/net/sfc/rx.c
@@ -444,7 +444,8 @@ static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue,
444 * the appropriate LRO method 444 * the appropriate LRO method
445 */ 445 */
446static void efx_rx_packet_lro(struct efx_channel *channel, 446static void efx_rx_packet_lro(struct efx_channel *channel,
447 struct efx_rx_buffer *rx_buf) 447 struct efx_rx_buffer *rx_buf,
448 bool checksummed)
448{ 449{
449 struct napi_struct *napi = &channel->napi_str; 450 struct napi_struct *napi = &channel->napi_str;
450 451
@@ -466,7 +467,8 @@ static void efx_rx_packet_lro(struct efx_channel *channel,
466 skb->len = rx_buf->len; 467 skb->len = rx_buf->len;
467 skb->data_len = rx_buf->len; 468 skb->data_len = rx_buf->len;
468 skb->truesize += rx_buf->len; 469 skb->truesize += rx_buf->len;
469 skb->ip_summed = CHECKSUM_UNNECESSARY; 470 skb->ip_summed =
471 checksummed ? CHECKSUM_UNNECESSARY : CHECKSUM_NONE;
470 472
471 napi_gro_frags(napi); 473 napi_gro_frags(napi);
472 474
@@ -475,6 +477,7 @@ out:
475 rx_buf->page = NULL; 477 rx_buf->page = NULL;
476 } else { 478 } else {
477 EFX_BUG_ON_PARANOID(!rx_buf->skb); 479 EFX_BUG_ON_PARANOID(!rx_buf->skb);
480 EFX_BUG_ON_PARANOID(!checksummed);
478 481
479 napi_gro_receive(napi, rx_buf->skb); 482 napi_gro_receive(napi, rx_buf->skb);
480 rx_buf->skb = NULL; 483 rx_buf->skb = NULL;
@@ -570,7 +573,7 @@ void __efx_rx_packet(struct efx_channel *channel,
570 } 573 }
571 574
572 if (likely(checksummed || rx_buf->page)) { 575 if (likely(checksummed || rx_buf->page)) {
573 efx_rx_packet_lro(channel, rx_buf); 576 efx_rx_packet_lro(channel, rx_buf, checksummed);
574 goto done; 577 goto done;
575 } 578 }
576 579
diff --git a/drivers/net/sfc/sfe4001.c b/drivers/net/sfc/sfe4001.c
index cee00ad49b57..49eb91b5f50c 100644
--- a/drivers/net/sfc/sfe4001.c
+++ b/drivers/net/sfc/sfe4001.c
@@ -188,7 +188,7 @@ static int sfn4111t_reset(struct efx_nic *efx)
188 efx_oword_t reg; 188 efx_oword_t reg;
189 189
190 /* GPIO 3 and the GPIO register are shared with I2C, so block that */ 190 /* GPIO 3 and the GPIO register are shared with I2C, so block that */
191 mutex_lock(&efx->i2c_adap.bus_lock); 191 i2c_lock_adapter(&efx->i2c_adap);
192 192
193 /* Pull RST_N (GPIO 2) low then let it up again, setting the 193 /* Pull RST_N (GPIO 2) low then let it up again, setting the
194 * FLASH_CFG_1 strap (GPIO 3) appropriately. Only change the 194 * FLASH_CFG_1 strap (GPIO 3) appropriately. Only change the
@@ -204,7 +204,7 @@ static int sfn4111t_reset(struct efx_nic *efx)
204 falcon_write(efx, &reg, GPIO_CTL_REG_KER); 204 falcon_write(efx, &reg, GPIO_CTL_REG_KER);
205 msleep(1); 205 msleep(1);
206 206
207 mutex_unlock(&efx->i2c_adap.bus_lock); 207 i2c_unlock_adapter(&efx->i2c_adap);
208 208
209 ssleep(1); 209 ssleep(1);
210 return 0; 210 return 0;
diff --git a/drivers/net/sh_eth.c b/drivers/net/sh_eth.c
index f49d0800c1d1..528b912a4b0d 100644
--- a/drivers/net/sh_eth.c
+++ b/drivers/net/sh_eth.c
@@ -30,6 +30,7 @@
30#include <linux/phy.h> 30#include <linux/phy.h>
31#include <linux/cache.h> 31#include <linux/cache.h>
32#include <linux/io.h> 32#include <linux/io.h>
33#include <asm/cacheflush.h>
33 34
34#include "sh_eth.h" 35#include "sh_eth.h"
35 36
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
index 2ab5c39f33ca..6a10d7ba5877 100644
--- a/drivers/net/sky2.c
+++ b/drivers/net/sky2.c
@@ -4538,6 +4538,8 @@ static int __devinit sky2_probe(struct pci_dev *pdev,
4538 goto err_out_free_netdev; 4538 goto err_out_free_netdev;
4539 } 4539 }
4540 4540
4541 netif_carrier_off(dev);
4542
4541 netif_napi_add(dev, &hw->napi, sky2_poll, NAPI_WEIGHT); 4543 netif_napi_add(dev, &hw->napi, sky2_poll, NAPI_WEIGHT);
4542 4544
4543 err = request_irq(pdev->irq, sky2_intr, 4545 err = request_irq(pdev->irq, sky2_intr,
diff --git a/drivers/net/tokenring/ibmtr.c b/drivers/net/tokenring/ibmtr.c
index 36cb2423bcf1..75fa32e34fd0 100644
--- a/drivers/net/tokenring/ibmtr.c
+++ b/drivers/net/tokenring/ibmtr.c
@@ -1144,9 +1144,16 @@ static void dir_open_adapter (struct net_device *dev)
1144 } else { 1144 } else {
1145 char **prphase = printphase; 1145 char **prphase = printphase;
1146 char **prerror = printerror; 1146 char **prerror = printerror;
1147 int pnr = err / 16 - 1;
1148 int enr = err % 16 - 1;
1147 DPRINTK("TR Adapter misc open failure, error code = "); 1149 DPRINTK("TR Adapter misc open failure, error code = ");
1148 printk("0x%x, Phase: %s, Error: %s\n", 1150 if (pnr < 0 || pnr >= ARRAY_SIZE(printphase) ||
1149 err, prphase[err/16 -1], prerror[err%16 -1]); 1151 enr < 0 ||
1152 enr >= ARRAY_SIZE(printerror))
1153 printk("0x%x, invalid Phase/Error.", err);
1154 else
1155 printk("0x%x, Phase: %s, Error: %s\n", err,
1156 prphase[pnr], prerror[enr]);
1150 printk(" retrying after %ds delay...\n", 1157 printk(" retrying after %ds delay...\n",
1151 TR_RETRY_INTERVAL/HZ); 1158 TR_RETRY_INTERVAL/HZ);
1152 } 1159 }
diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig
index c47237c2d638..32d93564a74d 100644
--- a/drivers/net/usb/Kconfig
+++ b/drivers/net/usb/Kconfig
@@ -174,7 +174,7 @@ config USB_NET_CDCETHER
174 * Ericsson Mobile Broadband Module (all variants) 174 * Ericsson Mobile Broadband Module (all variants)
175 * Motorola (DM100 and SB4100) 175 * Motorola (DM100 and SB4100)
176 * Broadcom Cable Modem (reference design) 176 * Broadcom Cable Modem (reference design)
177 * Toshiba (PCX1100U and F3507g) 177 * Toshiba (PCX1100U and F3507g/F3607gw)
178 * ... 178 * ...
179 179
180 This driver creates an interface named "ethX", where X depends on 180 This driver creates an interface named "ethX", where X depends on
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index 4a6aff579403..21e1ba160008 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -544,20 +544,60 @@ static const struct usb_device_id products [] = {
544 USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE), 544 USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
545 .driver_info = (unsigned long) &cdc_info, 545 .driver_info = (unsigned long) &cdc_info,
546}, { 546}, {
547 /* Ericsson F3307 */ 547 /* Ericsson F3607gw ver 2 */
548 USB_DEVICE_AND_INTERFACE_INFO(0x0bdb, 0x1905, USB_CLASS_COMM,
549 USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
550 .driver_info = (unsigned long) &cdc_info,
551}, {
552 /* Ericsson F3607gw ver 3 */
548 USB_DEVICE_AND_INTERFACE_INFO(0x0bdb, 0x1906, USB_CLASS_COMM, 553 USB_DEVICE_AND_INTERFACE_INFO(0x0bdb, 0x1906, USB_CLASS_COMM,
549 USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE), 554 USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
550 .driver_info = (unsigned long) &cdc_info, 555 .driver_info = (unsigned long) &cdc_info,
551}, { 556}, {
557 /* Ericsson F3307 */
558 USB_DEVICE_AND_INTERFACE_INFO(0x0bdb, 0x190a, USB_CLASS_COMM,
559 USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
560 .driver_info = (unsigned long) &cdc_info,
561}, {
562 /* Ericsson F3307 ver 2 */
563 USB_DEVICE_AND_INTERFACE_INFO(0x0bdb, 0x1909, USB_CLASS_COMM,
564 USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
565 .driver_info = (unsigned long) &cdc_info,
566}, {
567 /* Ericsson C3607w */
568 USB_DEVICE_AND_INTERFACE_INFO(0x0bdb, 0x1049, USB_CLASS_COMM,
569 USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
570 .driver_info = (unsigned long) &cdc_info,
571}, {
552 /* Toshiba F3507g */ 572 /* Toshiba F3507g */
553 USB_DEVICE_AND_INTERFACE_INFO(0x0930, 0x130b, USB_CLASS_COMM, 573 USB_DEVICE_AND_INTERFACE_INFO(0x0930, 0x130b, USB_CLASS_COMM,
554 USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE), 574 USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
555 .driver_info = (unsigned long) &cdc_info, 575 .driver_info = (unsigned long) &cdc_info,
556}, { 576}, {
577 /* Toshiba F3607gw */
578 USB_DEVICE_AND_INTERFACE_INFO(0x0930, 0x130c, USB_CLASS_COMM,
579 USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
580 .driver_info = (unsigned long) &cdc_info,
581}, {
582 /* Toshiba F3607gw ver 2 */
583 USB_DEVICE_AND_INTERFACE_INFO(0x0930, 0x1311, USB_CLASS_COMM,
584 USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
585 .driver_info = (unsigned long) &cdc_info,
586}, {
557 /* Dell F3507g */ 587 /* Dell F3507g */
558 USB_DEVICE_AND_INTERFACE_INFO(0x413c, 0x8147, USB_CLASS_COMM, 588 USB_DEVICE_AND_INTERFACE_INFO(0x413c, 0x8147, USB_CLASS_COMM,
559 USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE), 589 USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
560 .driver_info = (unsigned long) &cdc_info, 590 .driver_info = (unsigned long) &cdc_info,
591}, {
592 /* Dell F3607gw */
593 USB_DEVICE_AND_INTERFACE_INFO(0x413c, 0x8183, USB_CLASS_COMM,
594 USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
595 .driver_info = (unsigned long) &cdc_info,
596}, {
597 /* Dell F3607gw ver 2 */
598 USB_DEVICE_AND_INTERFACE_INFO(0x413c, 0x8184, USB_CLASS_COMM,
599 USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
600 .driver_info = (unsigned long) &cdc_info,
561}, 601},
562 { }, // END 602 { }, // END
563}; 603};
diff --git a/drivers/net/usb/dm9601.c b/drivers/net/usb/dm9601.c
index 72470f77f556..a2b30a10064f 100644
--- a/drivers/net/usb/dm9601.c
+++ b/drivers/net/usb/dm9601.c
@@ -649,6 +649,10 @@ static const struct usb_device_id products[] = {
649 USB_DEVICE(0x0fe6, 0x8101), /* DM9601 USB to Fast Ethernet Adapter */ 649 USB_DEVICE(0x0fe6, 0x8101), /* DM9601 USB to Fast Ethernet Adapter */
650 .driver_info = (unsigned long)&dm9601_info, 650 .driver_info = (unsigned long)&dm9601_info,
651 }, 651 },
652 {
653 USB_DEVICE(0x0a46, 0x9000), /* DM9000E */
654 .driver_info = (unsigned long)&dm9601_info,
655 },
652 {}, // END 656 {}, // END
653}; 657};
654 658
diff --git a/drivers/net/usb/rndis_host.c b/drivers/net/usb/rndis_host.c
index 0caa8008c51c..f56dec6119c3 100644
--- a/drivers/net/usb/rndis_host.c
+++ b/drivers/net/usb/rndis_host.c
@@ -362,12 +362,12 @@ generic_rndis_bind(struct usbnet *dev, struct usb_interface *intf, int flags)
362 retval = -EINVAL; 362 retval = -EINVAL;
363 goto halt_fail_and_release; 363 goto halt_fail_and_release;
364 } 364 }
365 dev->hard_mtu = tmp;
366 net->mtu = dev->hard_mtu - net->hard_header_len;
367 dev_warn(&intf->dev, 365 dev_warn(&intf->dev,
368 "dev can't take %u byte packets (max %u), " 366 "dev can't take %u byte packets (max %u), "
369 "adjusting MTU to %u\n", 367 "adjusting MTU to %u\n",
370 dev->hard_mtu, tmp, net->mtu); 368 dev->hard_mtu, tmp, tmp - net->hard_header_len);
369 dev->hard_mtu = tmp;
370 net->mtu = dev->hard_mtu - net->hard_header_len;
371 } 371 }
372 372
373 /* REVISIT: peripheral "alignment" request is ignored ... */ 373 /* REVISIT: peripheral "alignment" request is ignored ... */
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 3709d6af9abf..b9e002fccbca 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -516,8 +516,7 @@ again:
516 /* Free up any pending old buffers before queueing new ones. */ 516 /* Free up any pending old buffers before queueing new ones. */
517 free_old_xmit_skbs(vi); 517 free_old_xmit_skbs(vi);
518 518
519 /* Put new one in send queue and do transmit */ 519 /* Try to transmit */
520 __skb_queue_head(&vi->send, skb);
521 capacity = xmit_skb(vi, skb); 520 capacity = xmit_skb(vi, skb);
522 521
523 /* This can happen with OOM and indirect buffers. */ 522 /* This can happen with OOM and indirect buffers. */
@@ -531,8 +530,17 @@ again:
531 } 530 }
532 return NETDEV_TX_BUSY; 531 return NETDEV_TX_BUSY;
533 } 532 }
534
535 vi->svq->vq_ops->kick(vi->svq); 533 vi->svq->vq_ops->kick(vi->svq);
534
535 /*
536 * Put new one in send queue. You'd expect we'd need this before
537 * xmit_skb calls add_buf(), since the callback can be triggered
538 * immediately after that. But since the callback just triggers
539 * another call back here, normal network xmit locking prevents the
540 * race.
541 */
542 __skb_queue_head(&vi->send, skb);
543
536 /* Don't wait up for transmitted skbs to be freed. */ 544 /* Don't wait up for transmitted skbs to be freed. */
537 skb_orphan(skb); 545 skb_orphan(skb);
538 nf_reset(skb); 546 nf_reset(skb);
@@ -990,7 +998,7 @@ static unsigned int features[] = {
990 VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN, 998 VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN,
991}; 999};
992 1000
993static struct virtio_driver virtio_net = { 1001static struct virtio_driver virtio_net_driver = {
994 .feature_table = features, 1002 .feature_table = features,
995 .feature_table_size = ARRAY_SIZE(features), 1003 .feature_table_size = ARRAY_SIZE(features),
996 .driver.name = KBUILD_MODNAME, 1004 .driver.name = KBUILD_MODNAME,
@@ -1003,12 +1011,12 @@ static struct virtio_driver virtio_net = {
1003 1011
1004static int __init init(void) 1012static int __init init(void)
1005{ 1013{
1006 return register_virtio_driver(&virtio_net); 1014 return register_virtio_driver(&virtio_net_driver);
1007} 1015}
1008 1016
1009static void __exit fini(void) 1017static void __exit fini(void)
1010{ 1018{
1011 unregister_virtio_driver(&virtio_net); 1019 unregister_virtio_driver(&virtio_net_driver);
1012} 1020}
1013module_init(init); 1021module_init(init);
1014module_exit(fini); 1022module_exit(fini);
diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
index 7116a1aa20ce..abf896a7390e 100644
--- a/drivers/net/wireless/airo.c
+++ b/drivers/net/wireless/airo.c
@@ -4790,9 +4790,8 @@ static int proc_stats_rid_open( struct inode *inode,
4790static int get_dec_u16( char *buffer, int *start, int limit ) { 4790static int get_dec_u16( char *buffer, int *start, int limit ) {
4791 u16 value; 4791 u16 value;
4792 int valid = 0; 4792 int valid = 0;
4793 for( value = 0; buffer[*start] >= '0' && 4793 for (value = 0; *start < limit && buffer[*start] >= '0' &&
4794 buffer[*start] <= '9' && 4794 buffer[*start] <= '9'; (*start)++) {
4795 *start < limit; (*start)++ ) {
4796 valid = 1; 4795 valid = 1;
4797 value *= 10; 4796 value *= 10;
4798 value += buffer[*start] - '0'; 4797 value += buffer[*start] - '0';
diff --git a/drivers/net/wireless/ath/ath9k/rc.c b/drivers/net/wireless/ath/ath9k/rc.c
index 16a271787b85..1895d63aad0a 100644
--- a/drivers/net/wireless/ath/ath9k/rc.c
+++ b/drivers/net/wireless/ath/ath9k/rc.c
@@ -679,7 +679,7 @@ static u8 ath_rc_get_highest_rix(struct ath_softc *sc,
679 return rate; 679 return rate;
680 680
681 if (rate_table->info[rate].valid_single_stream && 681 if (rate_table->info[rate].valid_single_stream &&
682 !(ath_rc_priv->ht_cap & WLAN_RC_DS_FLAG)); 682 !(ath_rc_priv->ht_cap & WLAN_RC_DS_FLAG))
683 return rate; 683 return rate;
684 684
685 /* This should not happen */ 685 /* This should not happen */
diff --git a/drivers/net/wireless/b43/dma.c b/drivers/net/wireless/b43/dma.c
index 8701034569fa..de4e804bedf0 100644
--- a/drivers/net/wireless/b43/dma.c
+++ b/drivers/net/wireless/b43/dma.c
@@ -1157,8 +1157,9 @@ struct b43_dmaring *parse_cookie(struct b43_wldev *dev, u16 cookie, int *slot)
1157} 1157}
1158 1158
1159static int dma_tx_fragment(struct b43_dmaring *ring, 1159static int dma_tx_fragment(struct b43_dmaring *ring,
1160 struct sk_buff *skb) 1160 struct sk_buff **in_skb)
1161{ 1161{
1162 struct sk_buff *skb = *in_skb;
1162 const struct b43_dma_ops *ops = ring->ops; 1163 const struct b43_dma_ops *ops = ring->ops;
1163 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 1164 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1164 u8 *header; 1165 u8 *header;
@@ -1224,8 +1225,14 @@ static int dma_tx_fragment(struct b43_dmaring *ring,
1224 } 1225 }
1225 1226
1226 memcpy(skb_put(bounce_skb, skb->len), skb->data, skb->len); 1227 memcpy(skb_put(bounce_skb, skb->len), skb->data, skb->len);
1228 memcpy(bounce_skb->cb, skb->cb, sizeof(skb->cb));
1229 bounce_skb->dev = skb->dev;
1230 skb_set_queue_mapping(bounce_skb, skb_get_queue_mapping(skb));
1231 info = IEEE80211_SKB_CB(bounce_skb);
1232
1227 dev_kfree_skb_any(skb); 1233 dev_kfree_skb_any(skb);
1228 skb = bounce_skb; 1234 skb = bounce_skb;
1235 *in_skb = bounce_skb;
1229 meta->skb = skb; 1236 meta->skb = skb;
1230 meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1); 1237 meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
1231 if (b43_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) { 1238 if (b43_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) {
@@ -1355,7 +1362,11 @@ int b43_dma_tx(struct b43_wldev *dev, struct sk_buff *skb)
1355 * static, so we don't need to store it per frame. */ 1362 * static, so we don't need to store it per frame. */
1356 ring->queue_prio = skb_get_queue_mapping(skb); 1363 ring->queue_prio = skb_get_queue_mapping(skb);
1357 1364
1358 err = dma_tx_fragment(ring, skb); 1365 /* dma_tx_fragment might reallocate the skb, so invalidate pointers pointing
1366 * into the skb data or cb now. */
1367 hdr = NULL;
1368 info = NULL;
1369 err = dma_tx_fragment(ring, &skb);
1359 if (unlikely(err == -ENOKEY)) { 1370 if (unlikely(err == -ENOKEY)) {
1360 /* Drop this packet, as we don't have the encryption key 1371 /* Drop this packet, as we don't have the encryption key
1361 * anymore and must not transmit it unencrypted. */ 1372 * anymore and must not transmit it unencrypted. */
diff --git a/drivers/net/wireless/b43/leds.h b/drivers/net/wireless/b43/leds.h
index 4c56187810fc..32b66d53cdac 100644
--- a/drivers/net/wireless/b43/leds.h
+++ b/drivers/net/wireless/b43/leds.h
@@ -1,6 +1,7 @@
1#ifndef B43_LEDS_H_ 1#ifndef B43_LEDS_H_
2#define B43_LEDS_H_ 2#define B43_LEDS_H_
3 3
4struct b43_wl;
4struct b43_wldev; 5struct b43_wldev;
5 6
6#ifdef CONFIG_B43_LEDS 7#ifdef CONFIG_B43_LEDS
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
index df6b26a0c05e..86f35827f008 100644
--- a/drivers/net/wireless/b43/main.c
+++ b/drivers/net/wireless/b43/main.c
@@ -4501,7 +4501,6 @@ static void b43_op_stop(struct ieee80211_hw *hw)
4501 4501
4502 cancel_work_sync(&(wl->beacon_update_trigger)); 4502 cancel_work_sync(&(wl->beacon_update_trigger));
4503 4503
4504 wiphy_rfkill_stop_polling(hw->wiphy);
4505 mutex_lock(&wl->mutex); 4504 mutex_lock(&wl->mutex);
4506 if (b43_status(dev) >= B43_STAT_STARTED) { 4505 if (b43_status(dev) >= B43_STAT_STARTED) {
4507 dev = b43_wireless_core_stop(dev); 4506 dev = b43_wireless_core_stop(dev);
diff --git a/drivers/net/wireless/b43/rfkill.c b/drivers/net/wireless/b43/rfkill.c
index 7a3218c5ba7d..ffdce6f3c909 100644
--- a/drivers/net/wireless/b43/rfkill.c
+++ b/drivers/net/wireless/b43/rfkill.c
@@ -33,7 +33,8 @@ bool b43_is_hw_radio_enabled(struct b43_wldev *dev)
33 & B43_MMIO_RADIO_HWENABLED_HI_MASK)) 33 & B43_MMIO_RADIO_HWENABLED_HI_MASK))
34 return 1; 34 return 1;
35 } else { 35 } else {
36 if (b43_read16(dev, B43_MMIO_RADIO_HWENABLED_LO) 36 if (b43_status(dev) >= B43_STAT_STARTED &&
37 b43_read16(dev, B43_MMIO_RADIO_HWENABLED_LO)
37 & B43_MMIO_RADIO_HWENABLED_LO_MASK) 38 & B43_MMIO_RADIO_HWENABLED_LO_MASK)
38 return 1; 39 return 1;
39 } 40 }
diff --git a/drivers/net/wireless/libertas/if_spi.c b/drivers/net/wireless/libertas/if_spi.c
index cb8be8d7abc1..5b3672c4d0cc 100644
--- a/drivers/net/wireless/libertas/if_spi.c
+++ b/drivers/net/wireless/libertas/if_spi.c
@@ -134,7 +134,7 @@ static void spu_transaction_finish(struct if_spi_card *card)
134static int spu_write(struct if_spi_card *card, u16 reg, const u8 *buf, int len) 134static int spu_write(struct if_spi_card *card, u16 reg, const u8 *buf, int len)
135{ 135{
136 int err = 0; 136 int err = 0;
137 u16 reg_out = cpu_to_le16(reg | IF_SPI_WRITE_OPERATION_MASK); 137 __le16 reg_out = cpu_to_le16(reg | IF_SPI_WRITE_OPERATION_MASK);
138 struct spi_message m; 138 struct spi_message m;
139 struct spi_transfer reg_trans; 139 struct spi_transfer reg_trans;
140 struct spi_transfer data_trans; 140 struct spi_transfer data_trans;
@@ -166,7 +166,7 @@ static int spu_write(struct if_spi_card *card, u16 reg, const u8 *buf, int len)
166 166
167static inline int spu_write_u16(struct if_spi_card *card, u16 reg, u16 val) 167static inline int spu_write_u16(struct if_spi_card *card, u16 reg, u16 val)
168{ 168{
169 u16 buff; 169 __le16 buff;
170 170
171 buff = cpu_to_le16(val); 171 buff = cpu_to_le16(val);
172 return spu_write(card, reg, (u8 *)&buff, sizeof(u16)); 172 return spu_write(card, reg, (u8 *)&buff, sizeof(u16));
@@ -188,7 +188,7 @@ static int spu_read(struct if_spi_card *card, u16 reg, u8 *buf, int len)
188{ 188{
189 unsigned int delay; 189 unsigned int delay;
190 int err = 0; 190 int err = 0;
191 u16 reg_out = cpu_to_le16(reg | IF_SPI_READ_OPERATION_MASK); 191 __le16 reg_out = cpu_to_le16(reg | IF_SPI_READ_OPERATION_MASK);
192 struct spi_message m; 192 struct spi_message m;
193 struct spi_transfer reg_trans; 193 struct spi_transfer reg_trans;
194 struct spi_transfer dummy_trans; 194 struct spi_transfer dummy_trans;
@@ -235,7 +235,7 @@ static int spu_read(struct if_spi_card *card, u16 reg, u8 *buf, int len)
235/* Read 16 bits from an SPI register */ 235/* Read 16 bits from an SPI register */
236static inline int spu_read_u16(struct if_spi_card *card, u16 reg, u16 *val) 236static inline int spu_read_u16(struct if_spi_card *card, u16 reg, u16 *val)
237{ 237{
238 u16 buf; 238 __le16 buf;
239 int ret; 239 int ret;
240 240
241 ret = spu_read(card, reg, (u8 *)&buf, sizeof(buf)); 241 ret = spu_read(card, reg, (u8 *)&buf, sizeof(buf));
@@ -248,7 +248,7 @@ static inline int spu_read_u16(struct if_spi_card *card, u16 reg, u16 *val)
248 * The low 16 bits are read first. */ 248 * The low 16 bits are read first. */
249static int spu_read_u32(struct if_spi_card *card, u16 reg, u32 *val) 249static int spu_read_u32(struct if_spi_card *card, u16 reg, u32 *val)
250{ 250{
251 u32 buf; 251 __le32 buf;
252 int err; 252 int err;
253 253
254 err = spu_read(card, reg, (u8 *)&buf, sizeof(buf)); 254 err = spu_read(card, reg, (u8 *)&buf, sizeof(buf));
diff --git a/drivers/net/wireless/libertas/if_usb.c b/drivers/net/wireless/libertas/if_usb.c
index 92bc8c5f1ca2..3fac4efa5ac8 100644
--- a/drivers/net/wireless/libertas/if_usb.c
+++ b/drivers/net/wireless/libertas/if_usb.c
@@ -508,7 +508,7 @@ static int __if_usb_submit_rx_urb(struct if_usb_card *cardp,
508 /* Fill the receive configuration URB and initialise the Rx call back */ 508 /* Fill the receive configuration URB and initialise the Rx call back */
509 usb_fill_bulk_urb(cardp->rx_urb, cardp->udev, 509 usb_fill_bulk_urb(cardp->rx_urb, cardp->udev,
510 usb_rcvbulkpipe(cardp->udev, cardp->ep_in), 510 usb_rcvbulkpipe(cardp->udev, cardp->ep_in),
511 (void *) (skb->tail + (size_t) IPFIELD_ALIGN_OFFSET), 511 skb->data + IPFIELD_ALIGN_OFFSET,
512 MRVDRV_ETH_RX_PACKET_BUFFER_SIZE, callbackfn, 512 MRVDRV_ETH_RX_PACKET_BUFFER_SIZE, callbackfn,
513 cardp); 513 cardp);
514 514
diff --git a/drivers/net/wireless/ray_cs.c b/drivers/net/wireless/ray_cs.c
index 88cd58eb3b9f..1c88c2ea59aa 100644
--- a/drivers/net/wireless/ray_cs.c
+++ b/drivers/net/wireless/ray_cs.c
@@ -2879,7 +2879,7 @@ static int write_essid(struct file *file, const char __user *buffer,
2879 unsigned long count, void *data) 2879 unsigned long count, void *data)
2880{ 2880{
2881 static char proc_essid[33]; 2881 static char proc_essid[33];
2882 int len = count; 2882 unsigned int len = count;
2883 2883
2884 if (len > 32) 2884 if (len > 32)
2885 len = 32; 2885 len = 32;
diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c
index a084077a1c61..9fe770f7d7bb 100644
--- a/drivers/net/wireless/rt2x00/rt2800usb.c
+++ b/drivers/net/wireless/rt2x00/rt2800usb.c
@@ -1994,7 +1994,7 @@ static void rt2800usb_write_tx_desc(struct rt2x00_dev *rt2x00dev,
1994 rt2x00_set_field32(&word, TXWI_W1_BW_WIN_SIZE, txdesc->ba_size); 1994 rt2x00_set_field32(&word, TXWI_W1_BW_WIN_SIZE, txdesc->ba_size);
1995 rt2x00_set_field32(&word, TXWI_W1_WIRELESS_CLI_ID, 1995 rt2x00_set_field32(&word, TXWI_W1_WIRELESS_CLI_ID,
1996 test_bit(ENTRY_TXD_ENCRYPT, &txdesc->flags) ? 1996 test_bit(ENTRY_TXD_ENCRYPT, &txdesc->flags) ?
1997 (skbdesc->entry->entry_idx + 1) : 0xff); 1997 txdesc->key_idx : 0xff);
1998 rt2x00_set_field32(&word, TXWI_W1_MPDU_TOTAL_BYTE_COUNT, 1998 rt2x00_set_field32(&word, TXWI_W1_MPDU_TOTAL_BYTE_COUNT,
1999 skb->len - txdesc->l2pad); 1999 skb->len - txdesc->l2pad);
2000 rt2x00_set_field32(&word, TXWI_W1_PACKETID, 2000 rt2x00_set_field32(&word, TXWI_W1_PACKETID,
diff --git a/drivers/net/wireless/rt2x00/rt2x00dev.c b/drivers/net/wireless/rt2x00/rt2x00dev.c
index 71761b343839..73bbec58341e 100644
--- a/drivers/net/wireless/rt2x00/rt2x00dev.c
+++ b/drivers/net/wireless/rt2x00/rt2x00dev.c
@@ -815,6 +815,8 @@ int rt2x00lib_probe_dev(struct rt2x00_dev *rt2x00dev)
815 815
816 mutex_init(&rt2x00dev->csr_mutex); 816 mutex_init(&rt2x00dev->csr_mutex);
817 817
818 set_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags);
819
818 /* 820 /*
819 * Make room for rt2x00_intf inside the per-interface 821 * Make room for rt2x00_intf inside the per-interface
820 * structure ieee80211_vif. 822 * structure ieee80211_vif.
@@ -871,8 +873,6 @@ int rt2x00lib_probe_dev(struct rt2x00_dev *rt2x00dev)
871 rt2x00leds_register(rt2x00dev); 873 rt2x00leds_register(rt2x00dev);
872 rt2x00debug_register(rt2x00dev); 874 rt2x00debug_register(rt2x00dev);
873 875
874 set_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags);
875
876 return 0; 876 return 0;
877 877
878exit: 878exit:
diff --git a/drivers/net/wireless/rt2x00/rt2x00link.c b/drivers/net/wireless/rt2x00/rt2x00link.c
index c64db0ba7f40..c708d0be9155 100644
--- a/drivers/net/wireless/rt2x00/rt2x00link.c
+++ b/drivers/net/wireless/rt2x00/rt2x00link.c
@@ -362,8 +362,9 @@ void rt2x00link_start_tuner(struct rt2x00_dev *rt2x00dev)
362 362
363 rt2x00link_reset_tuner(rt2x00dev, false); 363 rt2x00link_reset_tuner(rt2x00dev, false);
364 364
365 ieee80211_queue_delayed_work(rt2x00dev->hw, 365 if (test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags))
366 &link->work, LINK_TUNE_INTERVAL); 366 ieee80211_queue_delayed_work(rt2x00dev->hw,
367 &link->work, LINK_TUNE_INTERVAL);
367} 368}
368 369
369void rt2x00link_stop_tuner(struct rt2x00_dev *rt2x00dev) 370void rt2x00link_stop_tuner(struct rt2x00_dev *rt2x00dev)
@@ -469,8 +470,10 @@ static void rt2x00link_tuner(struct work_struct *work)
469 * Increase tuner counter, and reschedule the next link tuner run. 470 * Increase tuner counter, and reschedule the next link tuner run.
470 */ 471 */
471 link->count++; 472 link->count++;
472 ieee80211_queue_delayed_work(rt2x00dev->hw, 473
473 &link->work, LINK_TUNE_INTERVAL); 474 if (test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags))
475 ieee80211_queue_delayed_work(rt2x00dev->hw,
476 &link->work, LINK_TUNE_INTERVAL);
474} 477}
475 478
476void rt2x00link_register(struct rt2x00_dev *rt2x00dev) 479void rt2x00link_register(struct rt2x00_dev *rt2x00dev)
diff --git a/drivers/net/wireless/rt2x00/rt2x00usb.c b/drivers/net/wireless/rt2x00/rt2x00usb.c
index 501544882c2c..f02b48a90593 100644
--- a/drivers/net/wireless/rt2x00/rt2x00usb.c
+++ b/drivers/net/wireless/rt2x00/rt2x00usb.c
@@ -47,6 +47,8 @@ int rt2x00usb_vendor_request(struct rt2x00_dev *rt2x00dev,
47 (requesttype == USB_VENDOR_REQUEST_IN) ? 47 (requesttype == USB_VENDOR_REQUEST_IN) ?
48 usb_rcvctrlpipe(usb_dev, 0) : usb_sndctrlpipe(usb_dev, 0); 48 usb_rcvctrlpipe(usb_dev, 0) : usb_sndctrlpipe(usb_dev, 0);
49 49
50 if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags))
51 return -ENODEV;
50 52
51 for (i = 0; i < REGISTER_BUSY_COUNT; i++) { 53 for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
52 status = usb_control_msg(usb_dev, pipe, request, requesttype, 54 status = usb_control_msg(usb_dev, pipe, request, requesttype,
@@ -60,8 +62,10 @@ int rt2x00usb_vendor_request(struct rt2x00_dev *rt2x00dev,
60 * -ENODEV: Device has disappeared, no point continuing. 62 * -ENODEV: Device has disappeared, no point continuing.
61 * All other errors: Try again. 63 * All other errors: Try again.
62 */ 64 */
63 else if (status == -ENODEV) 65 else if (status == -ENODEV) {
66 clear_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags);
64 break; 67 break;
68 }
65 } 69 }
66 70
67 ERROR(rt2x00dev, 71 ERROR(rt2x00dev,
@@ -161,6 +165,9 @@ int rt2x00usb_regbusy_read(struct rt2x00_dev *rt2x00dev,
161{ 165{
162 unsigned int i; 166 unsigned int i;
163 167
168 if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags))
169 return -ENODEV;
170
164 for (i = 0; i < REGISTER_BUSY_COUNT; i++) { 171 for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
165 rt2x00usb_register_read_lock(rt2x00dev, offset, reg); 172 rt2x00usb_register_read_lock(rt2x00dev, offset, reg);
166 if (!rt2x00_get_field32(*reg, field)) 173 if (!rt2x00_get_field32(*reg, field))
diff --git a/drivers/net/wireless/rt2x00/rt73usb.c b/drivers/net/wireless/rt2x00/rt73usb.c
index b8f5ee33445e..14e7bb210075 100644
--- a/drivers/net/wireless/rt2x00/rt73usb.c
+++ b/drivers/net/wireless/rt2x00/rt73usb.c
@@ -2389,10 +2389,13 @@ static struct usb_device_id rt73usb_device_table[] = {
2389 { USB_DEVICE(0x13b1, 0x0023), USB_DEVICE_DATA(&rt73usb_ops) }, 2389 { USB_DEVICE(0x13b1, 0x0023), USB_DEVICE_DATA(&rt73usb_ops) },
2390 { USB_DEVICE(0x13b1, 0x0028), USB_DEVICE_DATA(&rt73usb_ops) }, 2390 { USB_DEVICE(0x13b1, 0x0028), USB_DEVICE_DATA(&rt73usb_ops) },
2391 /* MSI */ 2391 /* MSI */
2392 { USB_DEVICE(0x0db0, 0x4600), USB_DEVICE_DATA(&rt73usb_ops) },
2392 { USB_DEVICE(0x0db0, 0x6877), USB_DEVICE_DATA(&rt73usb_ops) }, 2393 { USB_DEVICE(0x0db0, 0x6877), USB_DEVICE_DATA(&rt73usb_ops) },
2393 { USB_DEVICE(0x0db0, 0x6874), USB_DEVICE_DATA(&rt73usb_ops) }, 2394 { USB_DEVICE(0x0db0, 0x6874), USB_DEVICE_DATA(&rt73usb_ops) },
2394 { USB_DEVICE(0x0db0, 0xa861), USB_DEVICE_DATA(&rt73usb_ops) }, 2395 { USB_DEVICE(0x0db0, 0xa861), USB_DEVICE_DATA(&rt73usb_ops) },
2395 { USB_DEVICE(0x0db0, 0xa874), USB_DEVICE_DATA(&rt73usb_ops) }, 2396 { USB_DEVICE(0x0db0, 0xa874), USB_DEVICE_DATA(&rt73usb_ops) },
2397 /* Ovislink */
2398 { USB_DEVICE(0x1b75, 0x7318), USB_DEVICE_DATA(&rt73usb_ops) },
2396 /* Ralink */ 2399 /* Ralink */
2397 { USB_DEVICE(0x04bb, 0x093d), USB_DEVICE_DATA(&rt73usb_ops) }, 2400 { USB_DEVICE(0x04bb, 0x093d), USB_DEVICE_DATA(&rt73usb_ops) },
2398 { USB_DEVICE(0x148f, 0x2573), USB_DEVICE_DATA(&rt73usb_ops) }, 2401 { USB_DEVICE(0x148f, 0x2573), USB_DEVICE_DATA(&rt73usb_ops) },
@@ -2420,6 +2423,8 @@ static struct usb_device_id rt73usb_device_table[] = {
2420 /* Planex */ 2423 /* Planex */
2421 { USB_DEVICE(0x2019, 0xab01), USB_DEVICE_DATA(&rt73usb_ops) }, 2424 { USB_DEVICE(0x2019, 0xab01), USB_DEVICE_DATA(&rt73usb_ops) },
2422 { USB_DEVICE(0x2019, 0xab50), USB_DEVICE_DATA(&rt73usb_ops) }, 2425 { USB_DEVICE(0x2019, 0xab50), USB_DEVICE_DATA(&rt73usb_ops) },
2426 /* WideTell */
2427 { USB_DEVICE(0x7167, 0x3840), USB_DEVICE_DATA(&rt73usb_ops) },
2423 /* Zcom */ 2428 /* Zcom */
2424 { USB_DEVICE(0x0cde, 0x001c), USB_DEVICE_DATA(&rt73usb_ops) }, 2429 { USB_DEVICE(0x0cde, 0x001c), USB_DEVICE_DATA(&rt73usb_ops) },
2425 /* ZyXEL */ 2430 /* ZyXEL */
diff --git a/drivers/net/wireless/rtl818x/rtl8187_leds.c b/drivers/net/wireless/rtl818x/rtl8187_leds.c
index a1c670fc1552..cf8a4a40fdf6 100644
--- a/drivers/net/wireless/rtl818x/rtl8187_leds.c
+++ b/drivers/net/wireless/rtl818x/rtl8187_leds.c
@@ -210,10 +210,10 @@ void rtl8187_leds_exit(struct ieee80211_hw *dev)
210 210
211 /* turn the LED off before exiting */ 211 /* turn the LED off before exiting */
212 ieee80211_queue_delayed_work(dev, &priv->led_off, 0); 212 ieee80211_queue_delayed_work(dev, &priv->led_off, 0);
213 cancel_delayed_work_sync(&priv->led_off);
214 cancel_delayed_work_sync(&priv->led_on);
215 rtl8187_unregister_led(&priv->led_rx); 213 rtl8187_unregister_led(&priv->led_rx);
216 rtl8187_unregister_led(&priv->led_tx); 214 rtl8187_unregister_led(&priv->led_tx);
215 cancel_delayed_work_sync(&priv->led_off);
216 cancel_delayed_work_sync(&priv->led_on);
217} 217}
218#endif /* def CONFIG_RTL8187_LED */ 218#endif /* def CONFIG_RTL8187_LED */
219 219