aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/isdn/i4l/isdn_net.h6
-rw-r--r--drivers/net/Kconfig5
-rw-r--r--drivers/net/Makefile1
-rw-r--r--drivers/net/au1000_eth.c1
-rw-r--r--drivers/net/benet/be_cmds.c33
-rw-r--r--drivers/net/benet/be_cmds.h5
-rw-r--r--drivers/net/benet/be_main.c27
-rw-r--r--drivers/net/bonding/bond_main.c8
-rw-r--r--drivers/net/can/sja1000/sja1000_of_platform.c1
-rw-r--r--drivers/net/dm9000.h2
-rw-r--r--drivers/net/e1000e/e1000.h12
-rw-r--r--drivers/net/e1000e/hw.h2
-rw-r--r--drivers/net/e1000e/ich8lan.c150
-rw-r--r--drivers/net/e1000e/phy.c469
-rw-r--r--drivers/net/ethoc.c21
-rw-r--r--drivers/net/fec.c2
-rw-r--r--drivers/net/fec_mpc52xx.c6
-rw-r--r--drivers/net/fec_mpc52xx_phy.c1
-rw-r--r--drivers/net/fs_enet/fs_enet-main.c1
-rw-r--r--drivers/net/fs_enet/mii-bitbang.c1
-rw-r--r--drivers/net/fs_enet/mii-fec.c1
-rw-r--r--drivers/net/fsl_pq_mdio.c1
-rw-r--r--drivers/net/gianfar.c4
-rw-r--r--drivers/net/ibm_newemac/core.c2
-rw-r--r--drivers/net/ifb.c3
-rw-r--r--drivers/net/igb/igb_ethtool.c35
-rw-r--r--drivers/net/igbvf/ethtool.c30
-rw-r--r--drivers/net/ixgbe/ixgbe_ethtool.c22
-rw-r--r--drivers/net/ks8851.c42
-rw-r--r--drivers/net/ks8851.h1
-rw-r--r--drivers/net/myri10ge/myri10ge.c17
-rw-r--r--drivers/net/netxen/netxen_nic_hdr.h1
-rw-r--r--drivers/net/netxen/netxen_nic_hw.c14
-rw-r--r--drivers/net/netxen/netxen_nic_init.c8
-rw-r--r--drivers/net/netxen/netxen_nic_main.c1
-rw-r--r--drivers/net/niu.c2
-rw-r--r--drivers/net/phy/mdio-gpio.c1
-rw-r--r--drivers/net/pppoe.c129
-rw-r--r--drivers/net/r8169.c13
-rw-r--r--drivers/net/sh_eth.c2
-rw-r--r--drivers/net/stmmac/Kconfig53
-rw-r--r--drivers/net/stmmac/Makefile4
-rw-r--r--drivers/net/stmmac/common.h330
-rw-r--r--drivers/net/stmmac/descs.h163
-rw-r--r--drivers/net/stmmac/gmac.c693
-rw-r--r--drivers/net/stmmac/gmac.h204
-rw-r--r--drivers/net/stmmac/mac100.c517
-rw-r--r--drivers/net/stmmac/mac100.h116
-rw-r--r--drivers/net/stmmac/stmmac.h98
-rw-r--r--drivers/net/stmmac/stmmac_ethtool.c395
-rw-r--r--drivers/net/stmmac/stmmac_main.c2204
-rw-r--r--drivers/net/stmmac/stmmac_mdio.c217
-rw-r--r--drivers/net/stmmac/stmmac_timer.c140
-rw-r--r--drivers/net/stmmac/stmmac_timer.h41
-rw-r--r--drivers/net/virtio_net.c2
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c38
-rw-r--r--drivers/net/vmxnet3/vmxnet3_int.h2
-rw-r--r--include/net/inet_timewait_sock.h8
-rw-r--r--net/bluetooth/hci_sysfs.c4
-rw-r--r--net/bluetooth/l2cap.c9
-rw-r--r--net/core/pktgen.c15
-rw-r--r--net/ipv4/inet_connection_sock.c34
-rw-r--r--net/ipv4/ip_sockglue.c7
-rw-r--r--net/ipv4/tcp.c59
-rw-r--r--net/ipv4/tcp_minisocks.c5
-rw-r--r--net/ipv6/ipv6_sockglue.c6
-rw-r--r--net/unix/af_unix.c2
67 files changed, 6046 insertions, 403 deletions
diff --git a/drivers/isdn/i4l/isdn_net.h b/drivers/isdn/i4l/isdn_net.h
index 74032d0881ef..7511f08effa5 100644
--- a/drivers/isdn/i4l/isdn_net.h
+++ b/drivers/isdn/i4l/isdn_net.h
@@ -83,19 +83,19 @@ static __inline__ isdn_net_local * isdn_net_get_locked_lp(isdn_net_dev *nd)
83 83
84 spin_lock_irqsave(&nd->queue_lock, flags); 84 spin_lock_irqsave(&nd->queue_lock, flags);
85 lp = nd->queue; /* get lp on top of queue */ 85 lp = nd->queue; /* get lp on top of queue */
86 spin_lock(&nd->queue->xmit_lock);
87 while (isdn_net_lp_busy(nd->queue)) { 86 while (isdn_net_lp_busy(nd->queue)) {
88 spin_unlock(&nd->queue->xmit_lock);
89 nd->queue = nd->queue->next; 87 nd->queue = nd->queue->next;
90 if (nd->queue == lp) { /* not found -- should never happen */ 88 if (nd->queue == lp) { /* not found -- should never happen */
91 lp = NULL; 89 lp = NULL;
92 goto errout; 90 goto errout;
93 } 91 }
94 spin_lock(&nd->queue->xmit_lock);
95 } 92 }
96 lp = nd->queue; 93 lp = nd->queue;
97 nd->queue = nd->queue->next; 94 nd->queue = nd->queue->next;
95 spin_unlock_irqrestore(&nd->queue_lock, flags);
96 spin_lock(&lp->xmit_lock);
98 local_bh_disable(); 97 local_bh_disable();
98 return lp;
99errout: 99errout:
100 spin_unlock_irqrestore(&nd->queue_lock, flags); 100 spin_unlock_irqrestore(&nd->queue_lock, flags);
101 return lp; 101 return lp;
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 04fb8b0ca3e6..e012c2e0825a 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -1741,6 +1741,7 @@ config KS8851
1741config KS8851_MLL 1741config KS8851_MLL
1742 tristate "Micrel KS8851 MLL" 1742 tristate "Micrel KS8851 MLL"
1743 depends on HAS_IOMEM 1743 depends on HAS_IOMEM
1744 select MII
1744 help 1745 help
1745 This platform driver is for Micrel KS8851 Address/data bus 1746 This platform driver is for Micrel KS8851 Address/data bus
1746 multiplexed network chip. 1747 multiplexed network chip.
@@ -2482,6 +2483,8 @@ config S6GMAC
2482 To compile this driver as a module, choose M here. The module 2483 To compile this driver as a module, choose M here. The module
2483 will be called s6gmac. 2484 will be called s6gmac.
2484 2485
2486source "drivers/net/stmmac/Kconfig"
2487
2485endif # NETDEV_1000 2488endif # NETDEV_1000
2486 2489
2487# 2490#
@@ -3232,7 +3235,7 @@ config VIRTIO_NET
3232 3235
3233config VMXNET3 3236config VMXNET3
3234 tristate "VMware VMXNET3 ethernet driver" 3237 tristate "VMware VMXNET3 ethernet driver"
3235 depends on PCI && X86 3238 depends on PCI && X86 && INET
3236 help 3239 help
3237 This driver supports VMware's vmxnet3 virtual ethernet NIC. 3240 This driver supports VMware's vmxnet3 virtual ethernet NIC.
3238 To compile this driver as a module, choose M here: the 3241 To compile this driver as a module, choose M here: the
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index fc6c8bb92c50..246323d7f161 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -100,6 +100,7 @@ obj-$(CONFIG_VIA_VELOCITY) += via-velocity.o
100obj-$(CONFIG_ADAPTEC_STARFIRE) += starfire.o 100obj-$(CONFIG_ADAPTEC_STARFIRE) += starfire.o
101obj-$(CONFIG_RIONET) += rionet.o 101obj-$(CONFIG_RIONET) += rionet.o
102obj-$(CONFIG_SH_ETH) += sh_eth.o 102obj-$(CONFIG_SH_ETH) += sh_eth.o
103obj-$(CONFIG_STMMAC_ETH) += stmmac/
103 104
104# 105#
105# end link order section 106# end link order section
diff --git a/drivers/net/au1000_eth.c b/drivers/net/au1000_eth.c
index 04f63c77071d..ce6f1ac25df8 100644
--- a/drivers/net/au1000_eth.c
+++ b/drivers/net/au1000_eth.c
@@ -34,6 +34,7 @@
34 * 34 *
35 * 35 *
36 */ 36 */
37#include <linux/capability.h>
37#include <linux/dma-mapping.h> 38#include <linux/dma-mapping.h>
38#include <linux/module.h> 39#include <linux/module.h>
39#include <linux/kernel.h> 40#include <linux/kernel.h>
diff --git a/drivers/net/benet/be_cmds.c b/drivers/net/benet/be_cmds.c
index 25b6602e464c..827d86b5e70b 100644
--- a/drivers/net/benet/be_cmds.c
+++ b/drivers/net/benet/be_cmds.c
@@ -243,15 +243,26 @@ static int be_POST_stage_get(struct be_adapter *adapter, u16 *stage)
243 243
244int be_cmd_POST(struct be_adapter *adapter) 244int be_cmd_POST(struct be_adapter *adapter)
245{ 245{
246 u16 stage, error; 246 u16 stage;
247 int status, timeout = 0;
247 248
248 error = be_POST_stage_get(adapter, &stage); 249 do {
249 if (error || stage != POST_STAGE_ARMFW_RDY) { 250 status = be_POST_stage_get(adapter, &stage);
250 dev_err(&adapter->pdev->dev, "POST failed.\n"); 251 if (status) {
251 return -1; 252 dev_err(&adapter->pdev->dev, "POST error; stage=0x%x\n",
252 } 253 stage);
254 return -1;
255 } else if (stage != POST_STAGE_ARMFW_RDY) {
256 set_current_state(TASK_INTERRUPTIBLE);
257 schedule_timeout(2 * HZ);
258 timeout += 2;
259 } else {
260 return 0;
261 }
262 } while (timeout < 20);
253 263
254 return 0; 264 dev_err(&adapter->pdev->dev, "POST timeout; stage=0x%x\n", stage);
265 return -1;
255} 266}
256 267
257static inline void *embedded_payload(struct be_mcc_wrb *wrb) 268static inline void *embedded_payload(struct be_mcc_wrb *wrb)
@@ -729,8 +740,8 @@ int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
729/* Create an rx filtering policy configuration on an i/f 740/* Create an rx filtering policy configuration on an i/f
730 * Uses mbox 741 * Uses mbox
731 */ 742 */
732int be_cmd_if_create(struct be_adapter *adapter, u32 flags, u8 *mac, 743int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags,
733 bool pmac_invalid, u32 *if_handle, u32 *pmac_id) 744 u8 *mac, bool pmac_invalid, u32 *if_handle, u32 *pmac_id)
734{ 745{
735 struct be_mcc_wrb *wrb; 746 struct be_mcc_wrb *wrb;
736 struct be_cmd_req_if_create *req; 747 struct be_cmd_req_if_create *req;
@@ -746,8 +757,8 @@ int be_cmd_if_create(struct be_adapter *adapter, u32 flags, u8 *mac,
746 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 757 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
747 OPCODE_COMMON_NTWK_INTERFACE_CREATE, sizeof(*req)); 758 OPCODE_COMMON_NTWK_INTERFACE_CREATE, sizeof(*req));
748 759
749 req->capability_flags = cpu_to_le32(flags); 760 req->capability_flags = cpu_to_le32(cap_flags);
750 req->enable_flags = cpu_to_le32(flags); 761 req->enable_flags = cpu_to_le32(en_flags);
751 req->pmac_invalid = pmac_invalid; 762 req->pmac_invalid = pmac_invalid;
752 if (!pmac_invalid) 763 if (!pmac_invalid)
753 memcpy(req->mac_addr, mac, ETH_ALEN); 764 memcpy(req->mac_addr, mac, ETH_ALEN);
diff --git a/drivers/net/benet/be_cmds.h b/drivers/net/benet/be_cmds.h
index a1e78cc3e171..fe9f535eff12 100644
--- a/drivers/net/benet/be_cmds.h
+++ b/drivers/net/benet/be_cmds.h
@@ -753,8 +753,9 @@ extern int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
753extern int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr, 753extern int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
754 u32 if_id, u32 *pmac_id); 754 u32 if_id, u32 *pmac_id);
755extern int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, u32 pmac_id); 755extern int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, u32 pmac_id);
756extern int be_cmd_if_create(struct be_adapter *adapter, u32 if_flags, u8 *mac, 756extern int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags,
757 bool pmac_invalid, u32 *if_handle, u32 *pmac_id); 757 u32 en_flags, u8 *mac, bool pmac_invalid,
758 u32 *if_handle, u32 *pmac_id);
758extern int be_cmd_if_destroy(struct be_adapter *adapter, u32 if_handle); 759extern int be_cmd_if_destroy(struct be_adapter *adapter, u32 if_handle);
759extern int be_cmd_eq_create(struct be_adapter *adapter, 760extern int be_cmd_eq_create(struct be_adapter *adapter,
760 struct be_queue_info *eq, int eq_delay); 761 struct be_queue_info *eq, int eq_delay);
diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c
index e0f9d6477184..21b0657de9e8 100644
--- a/drivers/net/benet/be_main.c
+++ b/drivers/net/benet/be_main.c
@@ -1616,19 +1616,22 @@ static int be_open(struct net_device *netdev)
1616static int be_setup(struct be_adapter *adapter) 1616static int be_setup(struct be_adapter *adapter)
1617{ 1617{
1618 struct net_device *netdev = adapter->netdev; 1618 struct net_device *netdev = adapter->netdev;
1619 u32 if_flags; 1619 u32 cap_flags, en_flags;
1620 int status; 1620 int status;
1621 1621
1622 if_flags = BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_PROMISCUOUS | 1622 cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
1623 BE_IF_FLAGS_MCAST_PROMISCUOUS | BE_IF_FLAGS_UNTAGGED | 1623 BE_IF_FLAGS_MCAST_PROMISCUOUS |
1624 BE_IF_FLAGS_PASS_L3L4_ERRORS; 1624 BE_IF_FLAGS_PROMISCUOUS |
1625 status = be_cmd_if_create(adapter, if_flags, netdev->dev_addr, 1625 BE_IF_FLAGS_PASS_L3L4_ERRORS;
1626 false/* pmac_invalid */, &adapter->if_handle, 1626 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
1627 &adapter->pmac_id); 1627 BE_IF_FLAGS_PASS_L3L4_ERRORS;
1628
1629 status = be_cmd_if_create(adapter, cap_flags, en_flags,
1630 netdev->dev_addr, false/* pmac_invalid */,
1631 &adapter->if_handle, &adapter->pmac_id);
1628 if (status != 0) 1632 if (status != 0)
1629 goto do_none; 1633 goto do_none;
1630 1634
1631
1632 status = be_tx_queues_create(adapter); 1635 status = be_tx_queues_create(adapter);
1633 if (status != 0) 1636 if (status != 0)
1634 goto if_destroy; 1637 goto if_destroy;
@@ -2051,6 +2054,10 @@ static int be_hw_up(struct be_adapter *adapter)
2051 if (status) 2054 if (status)
2052 return status; 2055 return status;
2053 2056
2057 status = be_cmd_reset_function(adapter);
2058 if (status)
2059 return status;
2060
2054 status = be_cmd_get_fw_ver(adapter, adapter->fw_ver); 2061 status = be_cmd_get_fw_ver(adapter, adapter->fw_ver);
2055 if (status) 2062 if (status)
2056 return status; 2063 return status;
@@ -2104,10 +2111,6 @@ static int __devinit be_probe(struct pci_dev *pdev,
2104 if (status) 2111 if (status)
2105 goto free_netdev; 2112 goto free_netdev;
2106 2113
2107 status = be_cmd_reset_function(adapter);
2108 if (status)
2109 goto ctrl_clean;
2110
2111 status = be_stats_init(adapter); 2114 status = be_stats_init(adapter);
2112 if (status) 2115 if (status)
2113 goto ctrl_clean; 2116 goto ctrl_clean;
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index feb03ad0d803..3adbeed2c057 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -3704,10 +3704,10 @@ static int bond_xmit_hash_policy_l23(struct sk_buff *skb,
3704 3704
3705 if (skb->protocol == htons(ETH_P_IP)) { 3705 if (skb->protocol == htons(ETH_P_IP)) {
3706 return ((ntohl(iph->saddr ^ iph->daddr) & 0xffff) ^ 3706 return ((ntohl(iph->saddr ^ iph->daddr) & 0xffff) ^
3707 (data->h_dest[5] ^ bond_dev->dev_addr[5])) % count; 3707 (data->h_dest[5] ^ data->h_source[5])) % count;
3708 } 3708 }
3709 3709
3710 return (data->h_dest[5] ^ bond_dev->dev_addr[5]) % count; 3710 return (data->h_dest[5] ^ data->h_source[5]) % count;
3711} 3711}
3712 3712
3713/* 3713/*
@@ -3734,7 +3734,7 @@ static int bond_xmit_hash_policy_l34(struct sk_buff *skb,
3734 3734
3735 } 3735 }
3736 3736
3737 return (data->h_dest[5] ^ bond_dev->dev_addr[5]) % count; 3737 return (data->h_dest[5] ^ data->h_source[5]) % count;
3738} 3738}
3739 3739
3740/* 3740/*
@@ -3745,7 +3745,7 @@ static int bond_xmit_hash_policy_l2(struct sk_buff *skb,
3745{ 3745{
3746 struct ethhdr *data = (struct ethhdr *)skb->data; 3746 struct ethhdr *data = (struct ethhdr *)skb->data;
3747 3747
3748 return (data->h_dest[5] ^ bond_dev->dev_addr[5]) % count; 3748 return (data->h_dest[5] ^ data->h_source[5]) % count;
3749} 3749}
3750 3750
3751/*-------------------------- Device entry points ----------------------------*/ 3751/*-------------------------- Device entry points ----------------------------*/
diff --git a/drivers/net/can/sja1000/sja1000_of_platform.c b/drivers/net/can/sja1000/sja1000_of_platform.c
index 3373560405ba..9dd076a626a5 100644
--- a/drivers/net/can/sja1000/sja1000_of_platform.c
+++ b/drivers/net/can/sja1000/sja1000_of_platform.c
@@ -213,6 +213,7 @@ static struct of_device_id __devinitdata sja1000_ofp_table[] = {
213 {.compatible = "nxp,sja1000"}, 213 {.compatible = "nxp,sja1000"},
214 {}, 214 {},
215}; 215};
216MODULE_DEVICE_TABLE(of, sja1000_ofp_table);
216 217
217static struct of_platform_driver sja1000_ofp_driver = { 218static struct of_platform_driver sja1000_ofp_driver = {
218 .owner = THIS_MODULE, 219 .owner = THIS_MODULE,
diff --git a/drivers/net/dm9000.h b/drivers/net/dm9000.h
index 80817c2edfb3..fb1c924d79b4 100644
--- a/drivers/net/dm9000.h
+++ b/drivers/net/dm9000.h
@@ -50,7 +50,7 @@
50#define DM9000_RCSR 0x32 50#define DM9000_RCSR 0x32
51 51
52#define CHIPR_DM9000A 0x19 52#define CHIPR_DM9000A 0x19
53#define CHIPR_DM9000B 0x1B 53#define CHIPR_DM9000B 0x1A
54 54
55#define DM9000_MRCMDX 0xF0 55#define DM9000_MRCMDX 0xF0
56#define DM9000_MRCMD 0xF2 56#define DM9000_MRCMD 0xF2
diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h
index 1211df9ae883..08a4f9dd20e9 100644
--- a/drivers/net/e1000e/e1000.h
+++ b/drivers/net/e1000e/e1000.h
@@ -518,9 +518,13 @@ extern s32 e1000e_phy_force_speed_duplex_igp(struct e1000_hw *hw);
518extern s32 e1000e_get_cable_length_igp_2(struct e1000_hw *hw); 518extern s32 e1000e_get_cable_length_igp_2(struct e1000_hw *hw);
519extern s32 e1000e_get_phy_info_igp(struct e1000_hw *hw); 519extern s32 e1000e_get_phy_info_igp(struct e1000_hw *hw);
520extern s32 e1000e_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data); 520extern s32 e1000e_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data);
521extern s32 e1000e_read_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset,
522 u16 *data);
521extern s32 e1000e_phy_hw_reset_generic(struct e1000_hw *hw); 523extern s32 e1000e_phy_hw_reset_generic(struct e1000_hw *hw);
522extern s32 e1000e_set_d3_lplu_state(struct e1000_hw *hw, bool active); 524extern s32 e1000e_set_d3_lplu_state(struct e1000_hw *hw, bool active);
523extern s32 e1000e_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data); 525extern s32 e1000e_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data);
526extern s32 e1000e_write_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset,
527 u16 data);
524extern s32 e1000e_phy_sw_reset(struct e1000_hw *hw); 528extern s32 e1000e_phy_sw_reset(struct e1000_hw *hw);
525extern s32 e1000e_phy_force_speed_duplex_m88(struct e1000_hw *hw); 529extern s32 e1000e_phy_force_speed_duplex_m88(struct e1000_hw *hw);
526extern s32 e1000e_get_cfg_done(struct e1000_hw *hw); 530extern s32 e1000e_get_cfg_done(struct e1000_hw *hw);
@@ -537,7 +541,11 @@ extern s32 e1000e_read_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 *data);
537extern s32 e1000e_write_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 data); 541extern s32 e1000e_write_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 data);
538extern void e1000e_phy_force_speed_duplex_setup(struct e1000_hw *hw, u16 *phy_ctrl); 542extern void e1000e_phy_force_speed_duplex_setup(struct e1000_hw *hw, u16 *phy_ctrl);
539extern s32 e1000e_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data); 543extern s32 e1000e_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data);
544extern s32 e1000e_write_kmrn_reg_locked(struct e1000_hw *hw, u32 offset,
545 u16 data);
540extern s32 e1000e_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data); 546extern s32 e1000e_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data);
547extern s32 e1000e_read_kmrn_reg_locked(struct e1000_hw *hw, u32 offset,
548 u16 *data);
541extern s32 e1000e_phy_has_link_generic(struct e1000_hw *hw, u32 iterations, 549extern s32 e1000e_phy_has_link_generic(struct e1000_hw *hw, u32 iterations,
542 u32 usec_interval, bool *success); 550 u32 usec_interval, bool *success);
543extern s32 e1000e_phy_reset_dsp(struct e1000_hw *hw); 551extern s32 e1000e_phy_reset_dsp(struct e1000_hw *hw);
@@ -545,7 +553,11 @@ extern s32 e1000e_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data);
545extern s32 e1000e_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data); 553extern s32 e1000e_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data);
546extern s32 e1000e_check_downshift(struct e1000_hw *hw); 554extern s32 e1000e_check_downshift(struct e1000_hw *hw);
547extern s32 e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data); 555extern s32 e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data);
556extern s32 e1000_read_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset,
557 u16 *data);
548extern s32 e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data); 558extern s32 e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data);
559extern s32 e1000_write_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset,
560 u16 data);
549extern s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw, bool slow); 561extern s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw, bool slow);
550extern s32 e1000_link_stall_workaround_hv(struct e1000_hw *hw); 562extern s32 e1000_link_stall_workaround_hv(struct e1000_hw *hw);
551extern s32 e1000_copper_link_setup_82577(struct e1000_hw *hw); 563extern s32 e1000_copper_link_setup_82577(struct e1000_hw *hw);
diff --git a/drivers/net/e1000e/hw.h b/drivers/net/e1000e/hw.h
index fd44d9f90769..7b05cf47f7f5 100644
--- a/drivers/net/e1000e/hw.h
+++ b/drivers/net/e1000e/hw.h
@@ -764,11 +764,13 @@ struct e1000_phy_operations {
764 s32 (*get_cable_length)(struct e1000_hw *); 764 s32 (*get_cable_length)(struct e1000_hw *);
765 s32 (*get_phy_info)(struct e1000_hw *); 765 s32 (*get_phy_info)(struct e1000_hw *);
766 s32 (*read_phy_reg)(struct e1000_hw *, u32, u16 *); 766 s32 (*read_phy_reg)(struct e1000_hw *, u32, u16 *);
767 s32 (*read_phy_reg_locked)(struct e1000_hw *, u32, u16 *);
767 void (*release_phy)(struct e1000_hw *); 768 void (*release_phy)(struct e1000_hw *);
768 s32 (*reset_phy)(struct e1000_hw *); 769 s32 (*reset_phy)(struct e1000_hw *);
769 s32 (*set_d0_lplu_state)(struct e1000_hw *, bool); 770 s32 (*set_d0_lplu_state)(struct e1000_hw *, bool);
770 s32 (*set_d3_lplu_state)(struct e1000_hw *, bool); 771 s32 (*set_d3_lplu_state)(struct e1000_hw *, bool);
771 s32 (*write_phy_reg)(struct e1000_hw *, u32, u16); 772 s32 (*write_phy_reg)(struct e1000_hw *, u32, u16);
773 s32 (*write_phy_reg_locked)(struct e1000_hw *, u32, u16);
772 s32 (*cfg_on_link_up)(struct e1000_hw *); 774 s32 (*cfg_on_link_up)(struct e1000_hw *);
773}; 775};
774 776
diff --git a/drivers/net/e1000e/ich8lan.c b/drivers/net/e1000e/ich8lan.c
index 99df2abf82a9..b6388b9535fd 100644
--- a/drivers/net/e1000e/ich8lan.c
+++ b/drivers/net/e1000e/ich8lan.c
@@ -122,6 +122,13 @@
122 122
123#define HV_LED_CONFIG PHY_REG(768, 30) /* LED Configuration */ 123#define HV_LED_CONFIG PHY_REG(768, 30) /* LED Configuration */
124 124
125#define SW_FLAG_TIMEOUT 1000 /* SW Semaphore flag timeout in milliseconds */
126
127/* OEM Bits Phy Register */
128#define HV_OEM_BITS PHY_REG(768, 25)
129#define HV_OEM_BITS_LPLU 0x0004 /* Low Power Link Up */
130#define HV_OEM_BITS_RESTART_AN 0x0400 /* Restart Auto-negotiation */
131
125/* ICH GbE Flash Hardware Sequencing Flash Status Register bit breakdown */ 132/* ICH GbE Flash Hardware Sequencing Flash Status Register bit breakdown */
126/* Offset 04h HSFSTS */ 133/* Offset 04h HSFSTS */
127union ich8_hws_flash_status { 134union ich8_hws_flash_status {
@@ -200,6 +207,7 @@ static s32 e1000_setup_led_pchlan(struct e1000_hw *hw);
200static s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw); 207static s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw);
201static s32 e1000_led_on_pchlan(struct e1000_hw *hw); 208static s32 e1000_led_on_pchlan(struct e1000_hw *hw);
202static s32 e1000_led_off_pchlan(struct e1000_hw *hw); 209static s32 e1000_led_off_pchlan(struct e1000_hw *hw);
210static s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active);
203 211
204static inline u16 __er16flash(struct e1000_hw *hw, unsigned long reg) 212static inline u16 __er16flash(struct e1000_hw *hw, unsigned long reg)
205{ 213{
@@ -242,7 +250,11 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
242 250
243 phy->ops.check_polarity = e1000_check_polarity_ife_ich8lan; 251 phy->ops.check_polarity = e1000_check_polarity_ife_ich8lan;
244 phy->ops.read_phy_reg = e1000_read_phy_reg_hv; 252 phy->ops.read_phy_reg = e1000_read_phy_reg_hv;
253 phy->ops.read_phy_reg_locked = e1000_read_phy_reg_hv_locked;
254 phy->ops.set_d0_lplu_state = e1000_set_lplu_state_pchlan;
255 phy->ops.set_d3_lplu_state = e1000_set_lplu_state_pchlan;
245 phy->ops.write_phy_reg = e1000_write_phy_reg_hv; 256 phy->ops.write_phy_reg = e1000_write_phy_reg_hv;
257 phy->ops.write_phy_reg_locked = e1000_write_phy_reg_hv_locked;
246 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; 258 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
247 259
248 phy->id = e1000_phy_unknown; 260 phy->id = e1000_phy_unknown;
@@ -303,6 +315,8 @@ static s32 e1000_init_phy_params_ich8lan(struct e1000_hw *hw)
303 case IGP03E1000_E_PHY_ID: 315 case IGP03E1000_E_PHY_ID:
304 phy->type = e1000_phy_igp_3; 316 phy->type = e1000_phy_igp_3;
305 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; 317 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
318 phy->ops.read_phy_reg_locked = e1000e_read_phy_reg_igp_locked;
319 phy->ops.write_phy_reg_locked = e1000e_write_phy_reg_igp_locked;
306 break; 320 break;
307 case IFE_E_PHY_ID: 321 case IFE_E_PHY_ID:
308 case IFE_PLUS_E_PHY_ID: 322 case IFE_PLUS_E_PHY_ID:
@@ -568,12 +582,39 @@ static s32 e1000_get_variants_ich8lan(struct e1000_adapter *adapter)
568static DEFINE_MUTEX(nvm_mutex); 582static DEFINE_MUTEX(nvm_mutex);
569 583
570/** 584/**
585 * e1000_acquire_nvm_ich8lan - Acquire NVM mutex
586 * @hw: pointer to the HW structure
587 *
588 * Acquires the mutex for performing NVM operations.
589 **/
590static s32 e1000_acquire_nvm_ich8lan(struct e1000_hw *hw)
591{
592 mutex_lock(&nvm_mutex);
593
594 return 0;
595}
596
597/**
598 * e1000_release_nvm_ich8lan - Release NVM mutex
599 * @hw: pointer to the HW structure
600 *
601 * Releases the mutex used while performing NVM operations.
602 **/
603static void e1000_release_nvm_ich8lan(struct e1000_hw *hw)
604{
605 mutex_unlock(&nvm_mutex);
606
607 return;
608}
609
610static DEFINE_MUTEX(swflag_mutex);
611
612/**
571 * e1000_acquire_swflag_ich8lan - Acquire software control flag 613 * e1000_acquire_swflag_ich8lan - Acquire software control flag
572 * @hw: pointer to the HW structure 614 * @hw: pointer to the HW structure
573 * 615 *
574 * Acquires the software control flag for performing NVM and PHY 616 * Acquires the software control flag for performing PHY and select
575 * operations. This is a function pointer entry point only called by 617 * MAC CSR accesses.
576 * read/write routines for the PHY and NVM parts.
577 **/ 618 **/
578static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw) 619static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw)
579{ 620{
@@ -582,7 +623,7 @@ static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw)
582 623
583 might_sleep(); 624 might_sleep();
584 625
585 mutex_lock(&nvm_mutex); 626 mutex_lock(&swflag_mutex);
586 627
587 while (timeout) { 628 while (timeout) {
588 extcnf_ctrl = er32(EXTCNF_CTRL); 629 extcnf_ctrl = er32(EXTCNF_CTRL);
@@ -599,7 +640,7 @@ static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw)
599 goto out; 640 goto out;
600 } 641 }
601 642
602 timeout = PHY_CFG_TIMEOUT * 2; 643 timeout = SW_FLAG_TIMEOUT;
603 644
604 extcnf_ctrl |= E1000_EXTCNF_CTRL_SWFLAG; 645 extcnf_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
605 ew32(EXTCNF_CTRL, extcnf_ctrl); 646 ew32(EXTCNF_CTRL, extcnf_ctrl);
@@ -623,7 +664,7 @@ static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw)
623 664
624out: 665out:
625 if (ret_val) 666 if (ret_val)
626 mutex_unlock(&nvm_mutex); 667 mutex_unlock(&swflag_mutex);
627 668
628 return ret_val; 669 return ret_val;
629} 670}
@@ -632,9 +673,8 @@ out:
632 * e1000_release_swflag_ich8lan - Release software control flag 673 * e1000_release_swflag_ich8lan - Release software control flag
633 * @hw: pointer to the HW structure 674 * @hw: pointer to the HW structure
634 * 675 *
635 * Releases the software control flag for performing NVM and PHY operations. 676 * Releases the software control flag for performing PHY and select
636 * This is a function pointer entry point only called by read/write 677 * MAC CSR accesses.
637 * routines for the PHY and NVM parts.
638 **/ 678 **/
639static void e1000_release_swflag_ich8lan(struct e1000_hw *hw) 679static void e1000_release_swflag_ich8lan(struct e1000_hw *hw)
640{ 680{
@@ -644,7 +684,9 @@ static void e1000_release_swflag_ich8lan(struct e1000_hw *hw)
644 extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG; 684 extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
645 ew32(EXTCNF_CTRL, extcnf_ctrl); 685 ew32(EXTCNF_CTRL, extcnf_ctrl);
646 686
647 mutex_unlock(&nvm_mutex); 687 mutex_unlock(&swflag_mutex);
688
689 return;
648} 690}
649 691
650/** 692/**
@@ -844,7 +886,7 @@ static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw)
844 u32 i; 886 u32 i;
845 u32 data, cnf_size, cnf_base_addr, sw_cfg_mask; 887 u32 data, cnf_size, cnf_base_addr, sw_cfg_mask;
846 s32 ret_val; 888 s32 ret_val;
847 u16 word_addr, reg_data, reg_addr, phy_page = 0; 889 u16 reg, word_addr, reg_data, reg_addr, phy_page = 0;
848 890
849 ret_val = e1000e_phy_hw_reset_generic(hw); 891 ret_val = e1000e_phy_hw_reset_generic(hw);
850 if (ret_val) 892 if (ret_val)
@@ -859,6 +901,10 @@ static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw)
859 return ret_val; 901 return ret_val;
860 } 902 }
861 903
904 /* Dummy read to clear the phy wakeup bit after lcd reset */
905 if (hw->mac.type == e1000_pchlan)
906 e1e_rphy(hw, BM_WUC, &reg);
907
862 /* 908 /*
863 * Initialize the PHY from the NVM on ICH platforms. This 909 * Initialize the PHY from the NVM on ICH platforms. This
864 * is needed due to an issue where the NVM configuration is 910 * is needed due to an issue where the NVM configuration is
@@ -1054,6 +1100,38 @@ static s32 e1000_check_polarity_ife_ich8lan(struct e1000_hw *hw)
1054} 1100}
1055 1101
1056/** 1102/**
1103 * e1000_set_lplu_state_pchlan - Set Low Power Link Up state
1104 * @hw: pointer to the HW structure
1105 * @active: true to enable LPLU, false to disable
1106 *
1107 * Sets the LPLU state according to the active flag. For PCH, if OEM write
1108 * bit are disabled in the NVM, writing the LPLU bits in the MAC will not set
1109 * the phy speed. This function will manually set the LPLU bit and restart
1110 * auto-neg as hw would do. D3 and D0 LPLU will call the same function
1111 * since it configures the same bit.
1112 **/
1113static s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active)
1114{
1115 s32 ret_val = 0;
1116 u16 oem_reg;
1117
1118 ret_val = e1e_rphy(hw, HV_OEM_BITS, &oem_reg);
1119 if (ret_val)
1120 goto out;
1121
1122 if (active)
1123 oem_reg |= HV_OEM_BITS_LPLU;
1124 else
1125 oem_reg &= ~HV_OEM_BITS_LPLU;
1126
1127 oem_reg |= HV_OEM_BITS_RESTART_AN;
1128 ret_val = e1e_wphy(hw, HV_OEM_BITS, oem_reg);
1129
1130out:
1131 return ret_val;
1132}
1133
1134/**
1057 * e1000_set_d0_lplu_state_ich8lan - Set Low Power Linkup D0 state 1135 * e1000_set_d0_lplu_state_ich8lan - Set Low Power Linkup D0 state
1058 * @hw: pointer to the HW structure 1136 * @hw: pointer to the HW structure
1059 * @active: TRUE to enable LPLU, FALSE to disable 1137 * @active: TRUE to enable LPLU, FALSE to disable
@@ -1314,12 +1392,11 @@ static s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
1314 if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) || 1392 if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
1315 (words == 0)) { 1393 (words == 0)) {
1316 hw_dbg(hw, "nvm parameter(s) out of bounds\n"); 1394 hw_dbg(hw, "nvm parameter(s) out of bounds\n");
1317 return -E1000_ERR_NVM; 1395 ret_val = -E1000_ERR_NVM;
1396 goto out;
1318 } 1397 }
1319 1398
1320 ret_val = e1000_acquire_swflag_ich8lan(hw); 1399 nvm->ops.acquire_nvm(hw);
1321 if (ret_val)
1322 goto out;
1323 1400
1324 ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank); 1401 ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
1325 if (ret_val) { 1402 if (ret_val) {
@@ -1345,7 +1422,7 @@ static s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
1345 } 1422 }
1346 } 1423 }
1347 1424
1348 e1000_release_swflag_ich8lan(hw); 1425 nvm->ops.release_nvm(hw);
1349 1426
1350out: 1427out:
1351 if (ret_val) 1428 if (ret_val)
@@ -1603,11 +1680,15 @@ static s32 e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
1603 return -E1000_ERR_NVM; 1680 return -E1000_ERR_NVM;
1604 } 1681 }
1605 1682
1683 nvm->ops.acquire_nvm(hw);
1684
1606 for (i = 0; i < words; i++) { 1685 for (i = 0; i < words; i++) {
1607 dev_spec->shadow_ram[offset+i].modified = 1; 1686 dev_spec->shadow_ram[offset+i].modified = 1;
1608 dev_spec->shadow_ram[offset+i].value = data[i]; 1687 dev_spec->shadow_ram[offset+i].value = data[i];
1609 } 1688 }
1610 1689
1690 nvm->ops.release_nvm(hw);
1691
1611 return 0; 1692 return 0;
1612} 1693}
1613 1694
@@ -1637,9 +1718,7 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
1637 if (nvm->type != e1000_nvm_flash_sw) 1718 if (nvm->type != e1000_nvm_flash_sw)
1638 goto out; 1719 goto out;
1639 1720
1640 ret_val = e1000_acquire_swflag_ich8lan(hw); 1721 nvm->ops.acquire_nvm(hw);
1641 if (ret_val)
1642 goto out;
1643 1722
1644 /* 1723 /*
1645 * We're writing to the opposite bank so if we're on bank 1, 1724 * We're writing to the opposite bank so if we're on bank 1,
@@ -1657,7 +1736,7 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
1657 old_bank_offset = 0; 1736 old_bank_offset = 0;
1658 ret_val = e1000_erase_flash_bank_ich8lan(hw, 1); 1737 ret_val = e1000_erase_flash_bank_ich8lan(hw, 1);
1659 if (ret_val) { 1738 if (ret_val) {
1660 e1000_release_swflag_ich8lan(hw); 1739 nvm->ops.release_nvm(hw);
1661 goto out; 1740 goto out;
1662 } 1741 }
1663 } else { 1742 } else {
@@ -1665,7 +1744,7 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
1665 new_bank_offset = 0; 1744 new_bank_offset = 0;
1666 ret_val = e1000_erase_flash_bank_ich8lan(hw, 0); 1745 ret_val = e1000_erase_flash_bank_ich8lan(hw, 0);
1667 if (ret_val) { 1746 if (ret_val) {
1668 e1000_release_swflag_ich8lan(hw); 1747 nvm->ops.release_nvm(hw);
1669 goto out; 1748 goto out;
1670 } 1749 }
1671 } 1750 }
@@ -1723,7 +1802,7 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
1723 if (ret_val) { 1802 if (ret_val) {
1724 /* Possibly read-only, see e1000e_write_protect_nvm_ich8lan() */ 1803 /* Possibly read-only, see e1000e_write_protect_nvm_ich8lan() */
1725 hw_dbg(hw, "Flash commit failed.\n"); 1804 hw_dbg(hw, "Flash commit failed.\n");
1726 e1000_release_swflag_ich8lan(hw); 1805 nvm->ops.release_nvm(hw);
1727 goto out; 1806 goto out;
1728 } 1807 }
1729 1808
@@ -1736,7 +1815,7 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
1736 act_offset = new_bank_offset + E1000_ICH_NVM_SIG_WORD; 1815 act_offset = new_bank_offset + E1000_ICH_NVM_SIG_WORD;
1737 ret_val = e1000_read_flash_word_ich8lan(hw, act_offset, &data); 1816 ret_val = e1000_read_flash_word_ich8lan(hw, act_offset, &data);
1738 if (ret_val) { 1817 if (ret_val) {
1739 e1000_release_swflag_ich8lan(hw); 1818 nvm->ops.release_nvm(hw);
1740 goto out; 1819 goto out;
1741 } 1820 }
1742 data &= 0xBFFF; 1821 data &= 0xBFFF;
@@ -1744,7 +1823,7 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
1744 act_offset * 2 + 1, 1823 act_offset * 2 + 1,
1745 (u8)(data >> 8)); 1824 (u8)(data >> 8));
1746 if (ret_val) { 1825 if (ret_val) {
1747 e1000_release_swflag_ich8lan(hw); 1826 nvm->ops.release_nvm(hw);
1748 goto out; 1827 goto out;
1749 } 1828 }
1750 1829
@@ -1757,7 +1836,7 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
1757 act_offset = (old_bank_offset + E1000_ICH_NVM_SIG_WORD) * 2 + 1; 1836 act_offset = (old_bank_offset + E1000_ICH_NVM_SIG_WORD) * 2 + 1;
1758 ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset, 0); 1837 ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset, 0);
1759 if (ret_val) { 1838 if (ret_val) {
1760 e1000_release_swflag_ich8lan(hw); 1839 nvm->ops.release_nvm(hw);
1761 goto out; 1840 goto out;
1762 } 1841 }
1763 1842
@@ -1767,7 +1846,7 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
1767 dev_spec->shadow_ram[i].value = 0xFFFF; 1846 dev_spec->shadow_ram[i].value = 0xFFFF;
1768 } 1847 }
1769 1848
1770 e1000_release_swflag_ich8lan(hw); 1849 nvm->ops.release_nvm(hw);
1771 1850
1772 /* 1851 /*
1773 * Reload the EEPROM, or else modifications will not appear 1852 * Reload the EEPROM, or else modifications will not appear
@@ -1831,14 +1910,12 @@ static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw)
1831 **/ 1910 **/
1832void e1000e_write_protect_nvm_ich8lan(struct e1000_hw *hw) 1911void e1000e_write_protect_nvm_ich8lan(struct e1000_hw *hw)
1833{ 1912{
1913 struct e1000_nvm_info *nvm = &hw->nvm;
1834 union ich8_flash_protected_range pr0; 1914 union ich8_flash_protected_range pr0;
1835 union ich8_hws_flash_status hsfsts; 1915 union ich8_hws_flash_status hsfsts;
1836 u32 gfpreg; 1916 u32 gfpreg;
1837 s32 ret_val;
1838 1917
1839 ret_val = e1000_acquire_swflag_ich8lan(hw); 1918 nvm->ops.acquire_nvm(hw);
1840 if (ret_val)
1841 return;
1842 1919
1843 gfpreg = er32flash(ICH_FLASH_GFPREG); 1920 gfpreg = er32flash(ICH_FLASH_GFPREG);
1844 1921
@@ -1859,7 +1936,7 @@ void e1000e_write_protect_nvm_ich8lan(struct e1000_hw *hw)
1859 hsfsts.hsf_status.flockdn = true; 1936 hsfsts.hsf_status.flockdn = true;
1860 ew32flash(ICH_FLASH_HSFSTS, hsfsts.regval); 1937 ew32flash(ICH_FLASH_HSFSTS, hsfsts.regval);
1861 1938
1862 e1000_release_swflag_ich8lan(hw); 1939 nvm->ops.release_nvm(hw);
1863} 1940}
1864 1941
1865/** 1942/**
@@ -2229,6 +2306,7 @@ static s32 e1000_get_bus_info_ich8lan(struct e1000_hw *hw)
2229 **/ 2306 **/
2230static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw) 2307static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
2231{ 2308{
2309 u16 reg;
2232 u32 ctrl, icr, kab; 2310 u32 ctrl, icr, kab;
2233 s32 ret_val; 2311 s32 ret_val;
2234 2312
@@ -2304,6 +2382,9 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
2304 hw_dbg(hw, "Auto Read Done did not complete\n"); 2382 hw_dbg(hw, "Auto Read Done did not complete\n");
2305 } 2383 }
2306 } 2384 }
2385 /* Dummy read to clear the phy wakeup bit after lcd reset */
2386 if (hw->mac.type == e1000_pchlan)
2387 e1e_rphy(hw, BM_WUC, &reg);
2307 2388
2308 /* 2389 /*
2309 * For PCH, this write will make sure that any noise 2390 * For PCH, this write will make sure that any noise
@@ -2843,9 +2924,8 @@ void e1000e_disable_gig_wol_ich8lan(struct e1000_hw *hw)
2843 E1000_PHY_CTRL_GBE_DISABLE; 2924 E1000_PHY_CTRL_GBE_DISABLE;
2844 ew32(PHY_CTRL, phy_ctrl); 2925 ew32(PHY_CTRL, phy_ctrl);
2845 2926
2846 /* Workaround SWFLAG unexpectedly set during S0->Sx */
2847 if (hw->mac.type == e1000_pchlan) 2927 if (hw->mac.type == e1000_pchlan)
2848 udelay(500); 2928 e1000_phy_hw_reset_ich8lan(hw);
2849 default: 2929 default:
2850 break; 2930 break;
2851 } 2931 }
@@ -3113,9 +3193,9 @@ static struct e1000_phy_operations ich8_phy_ops = {
3113}; 3193};
3114 3194
3115static struct e1000_nvm_operations ich8_nvm_ops = { 3195static struct e1000_nvm_operations ich8_nvm_ops = {
3116 .acquire_nvm = e1000_acquire_swflag_ich8lan, 3196 .acquire_nvm = e1000_acquire_nvm_ich8lan,
3117 .read_nvm = e1000_read_nvm_ich8lan, 3197 .read_nvm = e1000_read_nvm_ich8lan,
3118 .release_nvm = e1000_release_swflag_ich8lan, 3198 .release_nvm = e1000_release_nvm_ich8lan,
3119 .update_nvm = e1000_update_nvm_checksum_ich8lan, 3199 .update_nvm = e1000_update_nvm_checksum_ich8lan,
3120 .valid_led_default = e1000_valid_led_default_ich8lan, 3200 .valid_led_default = e1000_valid_led_default_ich8lan,
3121 .validate_nvm = e1000_validate_nvm_checksum_ich8lan, 3201 .validate_nvm = e1000_validate_nvm_checksum_ich8lan,
diff --git a/drivers/net/e1000e/phy.c b/drivers/net/e1000e/phy.c
index 994401fd0664..f9d33ab05e97 100644
--- a/drivers/net/e1000e/phy.c
+++ b/drivers/net/e1000e/phy.c
@@ -164,16 +164,25 @@ s32 e1000e_get_phy_id(struct e1000_hw *hw)
164 * MDIC mode. No harm in trying again in this case since 164 * MDIC mode. No harm in trying again in this case since
165 * the PHY ID is unknown at this point anyway 165 * the PHY ID is unknown at this point anyway
166 */ 166 */
167 ret_val = phy->ops.acquire_phy(hw);
168 if (ret_val)
169 goto out;
167 ret_val = e1000_set_mdio_slow_mode_hv(hw, true); 170 ret_val = e1000_set_mdio_slow_mode_hv(hw, true);
168 if (ret_val) 171 if (ret_val)
169 goto out; 172 goto out;
173 phy->ops.release_phy(hw);
170 174
171 retry_count++; 175 retry_count++;
172 } 176 }
173out: 177out:
174 /* Revert to MDIO fast mode, if applicable */ 178 /* Revert to MDIO fast mode, if applicable */
175 if (retry_count) 179 if (retry_count) {
180 ret_val = phy->ops.acquire_phy(hw);
181 if (ret_val)
182 return ret_val;
176 ret_val = e1000_set_mdio_slow_mode_hv(hw, false); 183 ret_val = e1000_set_mdio_slow_mode_hv(hw, false);
184 phy->ops.release_phy(hw);
185 }
177 186
178 return ret_val; 187 return ret_val;
179} 188}
@@ -354,94 +363,173 @@ s32 e1000e_write_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 data)
354} 363}
355 364
356/** 365/**
357 * e1000e_read_phy_reg_igp - Read igp PHY register 366 * __e1000e_read_phy_reg_igp - Read igp PHY register
358 * @hw: pointer to the HW structure 367 * @hw: pointer to the HW structure
359 * @offset: register offset to be read 368 * @offset: register offset to be read
360 * @data: pointer to the read data 369 * @data: pointer to the read data
370 * @locked: semaphore has already been acquired or not
361 * 371 *
362 * Acquires semaphore, if necessary, then reads the PHY register at offset 372 * Acquires semaphore, if necessary, then reads the PHY register at offset
363 * and storing the retrieved information in data. Release any acquired 373 * and stores the retrieved information in data. Release any acquired
364 * semaphores before exiting. 374 * semaphores before exiting.
365 **/ 375 **/
366s32 e1000e_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data) 376static s32 __e1000e_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data,
377 bool locked)
367{ 378{
368 s32 ret_val; 379 s32 ret_val = 0;
369 380
370 ret_val = hw->phy.ops.acquire_phy(hw); 381 if (!locked) {
371 if (ret_val) 382 if (!(hw->phy.ops.acquire_phy))
372 return ret_val; 383 goto out;
384
385 ret_val = hw->phy.ops.acquire_phy(hw);
386 if (ret_val)
387 goto out;
388 }
373 389
374 if (offset > MAX_PHY_MULTI_PAGE_REG) { 390 if (offset > MAX_PHY_MULTI_PAGE_REG) {
375 ret_val = e1000e_write_phy_reg_mdic(hw, 391 ret_val = e1000e_write_phy_reg_mdic(hw,
376 IGP01E1000_PHY_PAGE_SELECT, 392 IGP01E1000_PHY_PAGE_SELECT,
377 (u16)offset); 393 (u16)offset);
378 if (ret_val) { 394 if (ret_val)
379 hw->phy.ops.release_phy(hw); 395 goto release;
380 return ret_val;
381 }
382 } 396 }
383 397
384 ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, 398 ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
385 data); 399 data);
386
387 hw->phy.ops.release_phy(hw);
388 400
401release:
402 if (!locked)
403 hw->phy.ops.release_phy(hw);
404out:
389 return ret_val; 405 return ret_val;
390} 406}
391 407
392/** 408/**
409 * e1000e_read_phy_reg_igp - Read igp PHY register
410 * @hw: pointer to the HW structure
411 * @offset: register offset to be read
412 * @data: pointer to the read data
413 *
414 * Acquires semaphore then reads the PHY register at offset and stores the
415 * retrieved information in data.
416 * Release the acquired semaphore before exiting.
417 **/
418s32 e1000e_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data)
419{
420 return __e1000e_read_phy_reg_igp(hw, offset, data, false);
421}
422
423/**
424 * e1000e_read_phy_reg_igp_locked - Read igp PHY register
425 * @hw: pointer to the HW structure
426 * @offset: register offset to be read
427 * @data: pointer to the read data
428 *
429 * Reads the PHY register at offset and stores the retrieved information
430 * in data. Assumes semaphore already acquired.
431 **/
432s32 e1000e_read_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 *data)
433{
434 return __e1000e_read_phy_reg_igp(hw, offset, data, true);
435}
436
437/**
393 * e1000e_write_phy_reg_igp - Write igp PHY register 438 * e1000e_write_phy_reg_igp - Write igp PHY register
394 * @hw: pointer to the HW structure 439 * @hw: pointer to the HW structure
395 * @offset: register offset to write to 440 * @offset: register offset to write to
396 * @data: data to write at register offset 441 * @data: data to write at register offset
442 * @locked: semaphore has already been acquired or not
397 * 443 *
398 * Acquires semaphore, if necessary, then writes the data to PHY register 444 * Acquires semaphore, if necessary, then writes the data to PHY register
399 * at the offset. Release any acquired semaphores before exiting. 445 * at the offset. Release any acquired semaphores before exiting.
400 **/ 446 **/
401s32 e1000e_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data) 447static s32 __e1000e_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data,
448 bool locked)
402{ 449{
403 s32 ret_val; 450 s32 ret_val = 0;
404 451
405 ret_val = hw->phy.ops.acquire_phy(hw); 452 if (!locked) {
406 if (ret_val) 453 if (!(hw->phy.ops.acquire_phy))
407 return ret_val; 454 goto out;
455
456 ret_val = hw->phy.ops.acquire_phy(hw);
457 if (ret_val)
458 goto out;
459 }
408 460
409 if (offset > MAX_PHY_MULTI_PAGE_REG) { 461 if (offset > MAX_PHY_MULTI_PAGE_REG) {
410 ret_val = e1000e_write_phy_reg_mdic(hw, 462 ret_val = e1000e_write_phy_reg_mdic(hw,
411 IGP01E1000_PHY_PAGE_SELECT, 463 IGP01E1000_PHY_PAGE_SELECT,
412 (u16)offset); 464 (u16)offset);
413 if (ret_val) { 465 if (ret_val)
414 hw->phy.ops.release_phy(hw); 466 goto release;
415 return ret_val;
416 }
417 } 467 }
418 468
419 ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, 469 ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
420 data); 470 data);
421 471
422 hw->phy.ops.release_phy(hw); 472release:
473 if (!locked)
474 hw->phy.ops.release_phy(hw);
423 475
476out:
424 return ret_val; 477 return ret_val;
425} 478}
426 479
427/** 480/**
428 * e1000e_read_kmrn_reg - Read kumeran register 481 * e1000e_write_phy_reg_igp - Write igp PHY register
482 * @hw: pointer to the HW structure
483 * @offset: register offset to write to
484 * @data: data to write at register offset
485 *
486 * Acquires semaphore then writes the data to PHY register
487 * at the offset. Release any acquired semaphores before exiting.
488 **/
489s32 e1000e_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data)
490{
491 return __e1000e_write_phy_reg_igp(hw, offset, data, false);
492}
493
494/**
495 * e1000e_write_phy_reg_igp_locked - Write igp PHY register
496 * @hw: pointer to the HW structure
497 * @offset: register offset to write to
498 * @data: data to write at register offset
499 *
500 * Writes the data to PHY register at the offset.
501 * Assumes semaphore already acquired.
502 **/
503s32 e1000e_write_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 data)
504{
505 return __e1000e_write_phy_reg_igp(hw, offset, data, true);
506}
507
508/**
509 * __e1000_read_kmrn_reg - Read kumeran register
429 * @hw: pointer to the HW structure 510 * @hw: pointer to the HW structure
430 * @offset: register offset to be read 511 * @offset: register offset to be read
431 * @data: pointer to the read data 512 * @data: pointer to the read data
513 * @locked: semaphore has already been acquired or not
432 * 514 *
433 * Acquires semaphore, if necessary. Then reads the PHY register at offset 515 * Acquires semaphore, if necessary. Then reads the PHY register at offset
434 * using the kumeran interface. The information retrieved is stored in data. 516 * using the kumeran interface. The information retrieved is stored in data.
435 * Release any acquired semaphores before exiting. 517 * Release any acquired semaphores before exiting.
436 **/ 518 **/
437s32 e1000e_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data) 519static s32 __e1000_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data,
520 bool locked)
438{ 521{
439 u32 kmrnctrlsta; 522 u32 kmrnctrlsta;
440 s32 ret_val; 523 s32 ret_val = 0;
441 524
442 ret_val = hw->phy.ops.acquire_phy(hw); 525 if (!locked) {
443 if (ret_val) 526 if (!(hw->phy.ops.acquire_phy))
444 return ret_val; 527 goto out;
528
529 ret_val = hw->phy.ops.acquire_phy(hw);
530 if (ret_val)
531 goto out;
532 }
445 533
446 kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) & 534 kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) &
447 E1000_KMRNCTRLSTA_OFFSET) | E1000_KMRNCTRLSTA_REN; 535 E1000_KMRNCTRLSTA_OFFSET) | E1000_KMRNCTRLSTA_REN;
@@ -452,41 +540,111 @@ s32 e1000e_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data)
452 kmrnctrlsta = er32(KMRNCTRLSTA); 540 kmrnctrlsta = er32(KMRNCTRLSTA);
453 *data = (u16)kmrnctrlsta; 541 *data = (u16)kmrnctrlsta;
454 542
455 hw->phy.ops.release_phy(hw); 543 if (!locked)
544 hw->phy.ops.release_phy(hw);
456 545
546out:
457 return ret_val; 547 return ret_val;
458} 548}
459 549
460/** 550/**
461 * e1000e_write_kmrn_reg - Write kumeran register 551 * e1000e_read_kmrn_reg - Read kumeran register
552 * @hw: pointer to the HW structure
553 * @offset: register offset to be read
554 * @data: pointer to the read data
555 *
556 * Acquires semaphore then reads the PHY register at offset using the
557 * kumeran interface. The information retrieved is stored in data.
558 * Release the acquired semaphore before exiting.
559 **/
560s32 e1000e_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data)
561{
562 return __e1000_read_kmrn_reg(hw, offset, data, false);
563}
564
565/**
566 * e1000_read_kmrn_reg_locked - Read kumeran register
567 * @hw: pointer to the HW structure
568 * @offset: register offset to be read
569 * @data: pointer to the read data
570 *
571 * Reads the PHY register at offset using the kumeran interface. The
572 * information retrieved is stored in data.
573 * Assumes semaphore already acquired.
574 **/
575s32 e1000_read_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 *data)
576{
577 return __e1000_read_kmrn_reg(hw, offset, data, true);
578}
579
580/**
581 * __e1000_write_kmrn_reg - Write kumeran register
462 * @hw: pointer to the HW structure 582 * @hw: pointer to the HW structure
463 * @offset: register offset to write to 583 * @offset: register offset to write to
464 * @data: data to write at register offset 584 * @data: data to write at register offset
585 * @locked: semaphore has already been acquired or not
465 * 586 *
466 * Acquires semaphore, if necessary. Then write the data to PHY register 587 * Acquires semaphore, if necessary. Then write the data to PHY register
467 * at the offset using the kumeran interface. Release any acquired semaphores 588 * at the offset using the kumeran interface. Release any acquired semaphores
468 * before exiting. 589 * before exiting.
469 **/ 590 **/
470s32 e1000e_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data) 591static s32 __e1000_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data,
592 bool locked)
471{ 593{
472 u32 kmrnctrlsta; 594 u32 kmrnctrlsta;
473 s32 ret_val; 595 s32 ret_val = 0;
474 596
475 ret_val = hw->phy.ops.acquire_phy(hw); 597 if (!locked) {
476 if (ret_val) 598 if (!(hw->phy.ops.acquire_phy))
477 return ret_val; 599 goto out;
600
601 ret_val = hw->phy.ops.acquire_phy(hw);
602 if (ret_val)
603 goto out;
604 }
478 605
479 kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) & 606 kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) &
480 E1000_KMRNCTRLSTA_OFFSET) | data; 607 E1000_KMRNCTRLSTA_OFFSET) | data;
481 ew32(KMRNCTRLSTA, kmrnctrlsta); 608 ew32(KMRNCTRLSTA, kmrnctrlsta);
482 609
483 udelay(2); 610 udelay(2);
484 hw->phy.ops.release_phy(hw);
485 611
612 if (!locked)
613 hw->phy.ops.release_phy(hw);
614
615out:
486 return ret_val; 616 return ret_val;
487} 617}
488 618
489/** 619/**
620 * e1000e_write_kmrn_reg - Write kumeran register
621 * @hw: pointer to the HW structure
622 * @offset: register offset to write to
623 * @data: data to write at register offset
624 *
625 * Acquires semaphore then writes the data to the PHY register at the offset
626 * using the kumeran interface. Release the acquired semaphore before exiting.
627 **/
628s32 e1000e_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data)
629{
630 return __e1000_write_kmrn_reg(hw, offset, data, false);
631}
632
633/**
634 * e1000_write_kmrn_reg_locked - Write kumeran register
635 * @hw: pointer to the HW structure
636 * @offset: register offset to write to
637 * @data: data to write at register offset
638 *
639 * Write the data to PHY register at the offset using the kumeran interface.
640 * Assumes semaphore already acquired.
641 **/
642s32 e1000_write_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 data)
643{
644 return __e1000_write_kmrn_reg(hw, offset, data, true);
645}
646
647/**
490 * e1000_copper_link_setup_82577 - Setup 82577 PHY for copper link 648 * e1000_copper_link_setup_82577 - Setup 82577 PHY for copper link
491 * @hw: pointer to the HW structure 649 * @hw: pointer to the HW structure
492 * 650 *
@@ -2105,6 +2263,10 @@ s32 e1000e_write_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 data)
2105 u32 page = offset >> IGP_PAGE_SHIFT; 2263 u32 page = offset >> IGP_PAGE_SHIFT;
2106 u32 page_shift = 0; 2264 u32 page_shift = 0;
2107 2265
2266 ret_val = hw->phy.ops.acquire_phy(hw);
2267 if (ret_val)
2268 return ret_val;
2269
2108 /* Page 800 works differently than the rest so it has its own func */ 2270 /* Page 800 works differently than the rest so it has its own func */
2109 if (page == BM_WUC_PAGE) { 2271 if (page == BM_WUC_PAGE) {
2110 ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, &data, 2272 ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, &data,
@@ -2112,10 +2274,6 @@ s32 e1000e_write_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 data)
2112 goto out; 2274 goto out;
2113 } 2275 }
2114 2276
2115 ret_val = hw->phy.ops.acquire_phy(hw);
2116 if (ret_val)
2117 goto out;
2118
2119 hw->phy.addr = e1000_get_phy_addr_for_bm_page(page, offset); 2277 hw->phy.addr = e1000_get_phy_addr_for_bm_page(page, offset);
2120 2278
2121 if (offset > MAX_PHY_MULTI_PAGE_REG) { 2279 if (offset > MAX_PHY_MULTI_PAGE_REG) {
@@ -2135,18 +2293,15 @@ s32 e1000e_write_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 data)
2135 /* Page is shifted left, PHY expects (page x 32) */ 2293 /* Page is shifted left, PHY expects (page x 32) */
2136 ret_val = e1000e_write_phy_reg_mdic(hw, page_select, 2294 ret_val = e1000e_write_phy_reg_mdic(hw, page_select,
2137 (page << page_shift)); 2295 (page << page_shift));
2138 if (ret_val) { 2296 if (ret_val)
2139 hw->phy.ops.release_phy(hw);
2140 goto out; 2297 goto out;
2141 }
2142 } 2298 }
2143 2299
2144 ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, 2300 ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
2145 data); 2301 data);
2146 2302
2147 hw->phy.ops.release_phy(hw);
2148
2149out: 2303out:
2304 hw->phy.ops.release_phy(hw);
2150 return ret_val; 2305 return ret_val;
2151} 2306}
2152 2307
@@ -2167,6 +2322,10 @@ s32 e1000e_read_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 *data)
2167 u32 page = offset >> IGP_PAGE_SHIFT; 2322 u32 page = offset >> IGP_PAGE_SHIFT;
2168 u32 page_shift = 0; 2323 u32 page_shift = 0;
2169 2324
2325 ret_val = hw->phy.ops.acquire_phy(hw);
2326 if (ret_val)
2327 return ret_val;
2328
2170 /* Page 800 works differently than the rest so it has its own func */ 2329 /* Page 800 works differently than the rest so it has its own func */
2171 if (page == BM_WUC_PAGE) { 2330 if (page == BM_WUC_PAGE) {
2172 ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, data, 2331 ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, data,
@@ -2174,10 +2333,6 @@ s32 e1000e_read_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 *data)
2174 goto out; 2333 goto out;
2175 } 2334 }
2176 2335
2177 ret_val = hw->phy.ops.acquire_phy(hw);
2178 if (ret_val)
2179 goto out;
2180
2181 hw->phy.addr = e1000_get_phy_addr_for_bm_page(page, offset); 2336 hw->phy.addr = e1000_get_phy_addr_for_bm_page(page, offset);
2182 2337
2183 if (offset > MAX_PHY_MULTI_PAGE_REG) { 2338 if (offset > MAX_PHY_MULTI_PAGE_REG) {
@@ -2197,17 +2352,14 @@ s32 e1000e_read_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 *data)
2197 /* Page is shifted left, PHY expects (page x 32) */ 2352 /* Page is shifted left, PHY expects (page x 32) */
2198 ret_val = e1000e_write_phy_reg_mdic(hw, page_select, 2353 ret_val = e1000e_write_phy_reg_mdic(hw, page_select,
2199 (page << page_shift)); 2354 (page << page_shift));
2200 if (ret_val) { 2355 if (ret_val)
2201 hw->phy.ops.release_phy(hw);
2202 goto out; 2356 goto out;
2203 }
2204 } 2357 }
2205 2358
2206 ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, 2359 ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
2207 data); 2360 data);
2208 hw->phy.ops.release_phy(hw);
2209
2210out: 2361out:
2362 hw->phy.ops.release_phy(hw);
2211 return ret_val; 2363 return ret_val;
2212} 2364}
2213 2365
@@ -2226,17 +2378,17 @@ s32 e1000e_read_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 *data)
2226 s32 ret_val; 2378 s32 ret_val;
2227 u16 page = (u16)(offset >> IGP_PAGE_SHIFT); 2379 u16 page = (u16)(offset >> IGP_PAGE_SHIFT);
2228 2380
2381 ret_val = hw->phy.ops.acquire_phy(hw);
2382 if (ret_val)
2383 return ret_val;
2384
2229 /* Page 800 works differently than the rest so it has its own func */ 2385 /* Page 800 works differently than the rest so it has its own func */
2230 if (page == BM_WUC_PAGE) { 2386 if (page == BM_WUC_PAGE) {
2231 ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, data, 2387 ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, data,
2232 true); 2388 true);
2233 return ret_val; 2389 goto out;
2234 } 2390 }
2235 2391
2236 ret_val = hw->phy.ops.acquire_phy(hw);
2237 if (ret_val)
2238 return ret_val;
2239
2240 hw->phy.addr = 1; 2392 hw->phy.addr = 1;
2241 2393
2242 if (offset > MAX_PHY_MULTI_PAGE_REG) { 2394 if (offset > MAX_PHY_MULTI_PAGE_REG) {
@@ -2245,16 +2397,14 @@ s32 e1000e_read_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 *data)
2245 ret_val = e1000e_write_phy_reg_mdic(hw, BM_PHY_PAGE_SELECT, 2397 ret_val = e1000e_write_phy_reg_mdic(hw, BM_PHY_PAGE_SELECT,
2246 page); 2398 page);
2247 2399
2248 if (ret_val) { 2400 if (ret_val)
2249 hw->phy.ops.release_phy(hw); 2401 goto out;
2250 return ret_val;
2251 }
2252 } 2402 }
2253 2403
2254 ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, 2404 ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
2255 data); 2405 data);
2406out:
2256 hw->phy.ops.release_phy(hw); 2407 hw->phy.ops.release_phy(hw);
2257
2258 return ret_val; 2408 return ret_val;
2259} 2409}
2260 2410
@@ -2272,17 +2422,17 @@ s32 e1000e_write_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 data)
2272 s32 ret_val; 2422 s32 ret_val;
2273 u16 page = (u16)(offset >> IGP_PAGE_SHIFT); 2423 u16 page = (u16)(offset >> IGP_PAGE_SHIFT);
2274 2424
2425 ret_val = hw->phy.ops.acquire_phy(hw);
2426 if (ret_val)
2427 return ret_val;
2428
2275 /* Page 800 works differently than the rest so it has its own func */ 2429 /* Page 800 works differently than the rest so it has its own func */
2276 if (page == BM_WUC_PAGE) { 2430 if (page == BM_WUC_PAGE) {
2277 ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, &data, 2431 ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, &data,
2278 false); 2432 false);
2279 return ret_val; 2433 goto out;
2280 } 2434 }
2281 2435
2282 ret_val = hw->phy.ops.acquire_phy(hw);
2283 if (ret_val)
2284 return ret_val;
2285
2286 hw->phy.addr = 1; 2436 hw->phy.addr = 1;
2287 2437
2288 if (offset > MAX_PHY_MULTI_PAGE_REG) { 2438 if (offset > MAX_PHY_MULTI_PAGE_REG) {
@@ -2290,17 +2440,15 @@ s32 e1000e_write_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 data)
2290 ret_val = e1000e_write_phy_reg_mdic(hw, BM_PHY_PAGE_SELECT, 2440 ret_val = e1000e_write_phy_reg_mdic(hw, BM_PHY_PAGE_SELECT,
2291 page); 2441 page);
2292 2442
2293 if (ret_val) { 2443 if (ret_val)
2294 hw->phy.ops.release_phy(hw); 2444 goto out;
2295 return ret_val;
2296 }
2297 } 2445 }
2298 2446
2299 ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, 2447 ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
2300 data); 2448 data);
2301 2449
2450out:
2302 hw->phy.ops.release_phy(hw); 2451 hw->phy.ops.release_phy(hw);
2303
2304 return ret_val; 2452 return ret_val;
2305} 2453}
2306 2454
@@ -2320,6 +2468,8 @@ s32 e1000e_write_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 data)
2320 * 3) Write the address using the address opcode (0x11) 2468 * 3) Write the address using the address opcode (0x11)
2321 * 4) Read or write the data using the data opcode (0x12) 2469 * 4) Read or write the data using the data opcode (0x12)
2322 * 5) Restore 769_17.2 to its original value 2470 * 5) Restore 769_17.2 to its original value
2471 *
2472 * Assumes semaphore already acquired.
2323 **/ 2473 **/
2324static s32 e1000_access_phy_wakeup_reg_bm(struct e1000_hw *hw, u32 offset, 2474static s32 e1000_access_phy_wakeup_reg_bm(struct e1000_hw *hw, u32 offset,
2325 u16 *data, bool read) 2475 u16 *data, bool read)
@@ -2327,20 +2477,12 @@ static s32 e1000_access_phy_wakeup_reg_bm(struct e1000_hw *hw, u32 offset,
2327 s32 ret_val; 2477 s32 ret_val;
2328 u16 reg = BM_PHY_REG_NUM(offset); 2478 u16 reg = BM_PHY_REG_NUM(offset);
2329 u16 phy_reg = 0; 2479 u16 phy_reg = 0;
2330 u8 phy_acquired = 1;
2331
2332 2480
2333 /* Gig must be disabled for MDIO accesses to page 800 */ 2481 /* Gig must be disabled for MDIO accesses to page 800 */
2334 if ((hw->mac.type == e1000_pchlan) && 2482 if ((hw->mac.type == e1000_pchlan) &&
2335 (!(er32(PHY_CTRL) & E1000_PHY_CTRL_GBE_DISABLE))) 2483 (!(er32(PHY_CTRL) & E1000_PHY_CTRL_GBE_DISABLE)))
2336 hw_dbg(hw, "Attempting to access page 800 while gig enabled\n"); 2484 hw_dbg(hw, "Attempting to access page 800 while gig enabled\n");
2337 2485
2338 ret_val = hw->phy.ops.acquire_phy(hw);
2339 if (ret_val) {
2340 phy_acquired = 0;
2341 goto out;
2342 }
2343
2344 /* All operations in this function are phy address 1 */ 2486 /* All operations in this function are phy address 1 */
2345 hw->phy.addr = 1; 2487 hw->phy.addr = 1;
2346 2488
@@ -2397,8 +2539,6 @@ static s32 e1000_access_phy_wakeup_reg_bm(struct e1000_hw *hw, u32 offset,
2397 ret_val = e1000e_write_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, phy_reg); 2539 ret_val = e1000e_write_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, phy_reg);
2398 2540
2399out: 2541out:
2400 if (phy_acquired == 1)
2401 hw->phy.ops.release_phy(hw);
2402 return ret_val; 2542 return ret_val;
2403} 2543}
2404 2544
@@ -2439,52 +2579,63 @@ static s32 e1000_set_d0_lplu_state(struct e1000_hw *hw, bool active)
2439 return 0; 2579 return 0;
2440} 2580}
2441 2581
2582/**
2583 * e1000_set_mdio_slow_mode_hv - Set slow MDIO access mode
2584 * @hw: pointer to the HW structure
2585 * @slow: true for slow mode, false for normal mode
2586 *
2587 * Assumes semaphore already acquired.
2588 **/
2442s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw, bool slow) 2589s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw, bool slow)
2443{ 2590{
2444 s32 ret_val = 0; 2591 s32 ret_val = 0;
2445 u16 data = 0; 2592 u16 data = 0;
2446 2593
2447 ret_val = hw->phy.ops.acquire_phy(hw);
2448 if (ret_val)
2449 return ret_val;
2450
2451 /* Set MDIO mode - page 769, register 16: 0x2580==slow, 0x2180==fast */ 2594 /* Set MDIO mode - page 769, register 16: 0x2580==slow, 0x2180==fast */
2452 hw->phy.addr = 1; 2595 hw->phy.addr = 1;
2453 ret_val = e1000e_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, 2596 ret_val = e1000e_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT,
2454 (BM_PORT_CTRL_PAGE << IGP_PAGE_SHIFT)); 2597 (BM_PORT_CTRL_PAGE << IGP_PAGE_SHIFT));
2455 if (ret_val) { 2598 if (ret_val)
2456 hw->phy.ops.release_phy(hw); 2599 goto out;
2457 return ret_val; 2600
2458 }
2459 ret_val = e1000e_write_phy_reg_mdic(hw, BM_CS_CTRL1, 2601 ret_val = e1000e_write_phy_reg_mdic(hw, BM_CS_CTRL1,
2460 (0x2180 | (slow << 10))); 2602 (0x2180 | (slow << 10)));
2603 if (ret_val)
2604 goto out;
2461 2605
2462 /* dummy read when reverting to fast mode - throw away result */ 2606 /* dummy read when reverting to fast mode - throw away result */
2463 if (!slow) 2607 if (!slow)
2464 e1000e_read_phy_reg_mdic(hw, BM_CS_CTRL1, &data); 2608 ret_val = e1000e_read_phy_reg_mdic(hw, BM_CS_CTRL1, &data);
2465
2466 hw->phy.ops.release_phy(hw);
2467 2609
2610out:
2468 return ret_val; 2611 return ret_val;
2469} 2612}
2470 2613
2471/** 2614/**
2472 * e1000_read_phy_reg_hv - Read HV PHY register 2615 * __e1000_read_phy_reg_hv - Read HV PHY register
2473 * @hw: pointer to the HW structure 2616 * @hw: pointer to the HW structure
2474 * @offset: register offset to be read 2617 * @offset: register offset to be read
2475 * @data: pointer to the read data 2618 * @data: pointer to the read data
2619 * @locked: semaphore has already been acquired or not
2476 * 2620 *
2477 * Acquires semaphore, if necessary, then reads the PHY register at offset 2621 * Acquires semaphore, if necessary, then reads the PHY register at offset
2478 * and storing the retrieved information in data. Release any acquired 2622 * and stores the retrieved information in data. Release any acquired
2479 * semaphore before exiting. 2623 * semaphore before exiting.
2480 **/ 2624 **/
2481s32 e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data) 2625static s32 __e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data,
2626 bool locked)
2482{ 2627{
2483 s32 ret_val; 2628 s32 ret_val;
2484 u16 page = BM_PHY_REG_PAGE(offset); 2629 u16 page = BM_PHY_REG_PAGE(offset);
2485 u16 reg = BM_PHY_REG_NUM(offset); 2630 u16 reg = BM_PHY_REG_NUM(offset);
2486 bool in_slow_mode = false; 2631 bool in_slow_mode = false;
2487 2632
2633 if (!locked) {
2634 ret_val = hw->phy.ops.acquire_phy(hw);
2635 if (ret_val)
2636 return ret_val;
2637 }
2638
2488 /* Workaround failure in MDIO access while cable is disconnected */ 2639 /* Workaround failure in MDIO access while cable is disconnected */
2489 if ((hw->phy.type == e1000_phy_82577) && 2640 if ((hw->phy.type == e1000_phy_82577) &&
2490 !(er32(STATUS) & E1000_STATUS_LU)) { 2641 !(er32(STATUS) & E1000_STATUS_LU)) {
@@ -2508,10 +2659,6 @@ s32 e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data)
2508 goto out; 2659 goto out;
2509 } 2660 }
2510 2661
2511 ret_val = hw->phy.ops.acquire_phy(hw);
2512 if (ret_val)
2513 goto out;
2514
2515 hw->phy.addr = e1000_get_phy_addr_for_hv_page(page); 2662 hw->phy.addr = e1000_get_phy_addr_for_hv_page(page);
2516 2663
2517 if (page == HV_INTC_FC_PAGE_START) 2664 if (page == HV_INTC_FC_PAGE_START)
@@ -2529,42 +2676,76 @@ s32 e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data)
2529 ret_val = e1000e_write_phy_reg_mdic(hw, 2676 ret_val = e1000e_write_phy_reg_mdic(hw,
2530 IGP01E1000_PHY_PAGE_SELECT, 2677 IGP01E1000_PHY_PAGE_SELECT,
2531 (page << IGP_PAGE_SHIFT)); 2678 (page << IGP_PAGE_SHIFT));
2532 if (ret_val) {
2533 hw->phy.ops.release_phy(hw);
2534 goto out;
2535 }
2536 hw->phy.addr = phy_addr; 2679 hw->phy.addr = phy_addr;
2537 } 2680 }
2538 } 2681 }
2539 2682
2540 ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & reg, 2683 ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & reg,
2541 data); 2684 data);
2542 hw->phy.ops.release_phy(hw);
2543
2544out: 2685out:
2545 /* Revert to MDIO fast mode, if applicable */ 2686 /* Revert to MDIO fast mode, if applicable */
2546 if ((hw->phy.type == e1000_phy_82577) && in_slow_mode) 2687 if ((hw->phy.type == e1000_phy_82577) && in_slow_mode)
2547 ret_val = e1000_set_mdio_slow_mode_hv(hw, false); 2688 ret_val = e1000_set_mdio_slow_mode_hv(hw, false);
2548 2689
2690 if (!locked)
2691 hw->phy.ops.release_phy(hw);
2692
2549 return ret_val; 2693 return ret_val;
2550} 2694}
2551 2695
2552/** 2696/**
2553 * e1000_write_phy_reg_hv - Write HV PHY register 2697 * e1000_read_phy_reg_hv - Read HV PHY register
2698 * @hw: pointer to the HW structure
2699 * @offset: register offset to be read
2700 * @data: pointer to the read data
2701 *
2702 * Acquires semaphore then reads the PHY register at offset and stores
2703 * the retrieved information in data. Release the acquired semaphore
2704 * before exiting.
2705 **/
2706s32 e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data)
2707{
2708 return __e1000_read_phy_reg_hv(hw, offset, data, false);
2709}
2710
2711/**
2712 * e1000_read_phy_reg_hv_locked - Read HV PHY register
2713 * @hw: pointer to the HW structure
2714 * @offset: register offset to be read
2715 * @data: pointer to the read data
2716 *
2717 * Reads the PHY register at offset and stores the retrieved information
2718 * in data. Assumes semaphore already acquired.
2719 **/
2720s32 e1000_read_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset, u16 *data)
2721{
2722 return __e1000_read_phy_reg_hv(hw, offset, data, true);
2723}
2724
2725/**
2726 * __e1000_write_phy_reg_hv - Write HV PHY register
2554 * @hw: pointer to the HW structure 2727 * @hw: pointer to the HW structure
2555 * @offset: register offset to write to 2728 * @offset: register offset to write to
2556 * @data: data to write at register offset 2729 * @data: data to write at register offset
2730 * @locked: semaphore has already been acquired or not
2557 * 2731 *
2558 * Acquires semaphore, if necessary, then writes the data to PHY register 2732 * Acquires semaphore, if necessary, then writes the data to PHY register
2559 * at the offset. Release any acquired semaphores before exiting. 2733 * at the offset. Release any acquired semaphores before exiting.
2560 **/ 2734 **/
2561s32 e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data) 2735static s32 __e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data,
2736 bool locked)
2562{ 2737{
2563 s32 ret_val; 2738 s32 ret_val;
2564 u16 page = BM_PHY_REG_PAGE(offset); 2739 u16 page = BM_PHY_REG_PAGE(offset);
2565 u16 reg = BM_PHY_REG_NUM(offset); 2740 u16 reg = BM_PHY_REG_NUM(offset);
2566 bool in_slow_mode = false; 2741 bool in_slow_mode = false;
2567 2742
2743 if (!locked) {
2744 ret_val = hw->phy.ops.acquire_phy(hw);
2745 if (ret_val)
2746 return ret_val;
2747 }
2748
2568 /* Workaround failure in MDIO access while cable is disconnected */ 2749 /* Workaround failure in MDIO access while cable is disconnected */
2569 if ((hw->phy.type == e1000_phy_82577) && 2750 if ((hw->phy.type == e1000_phy_82577) &&
2570 !(er32(STATUS) & E1000_STATUS_LU)) { 2751 !(er32(STATUS) & E1000_STATUS_LU)) {
@@ -2588,10 +2769,6 @@ s32 e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data)
2588 goto out; 2769 goto out;
2589 } 2770 }
2590 2771
2591 ret_val = hw->phy.ops.acquire_phy(hw);
2592 if (ret_val)
2593 goto out;
2594
2595 hw->phy.addr = e1000_get_phy_addr_for_hv_page(page); 2772 hw->phy.addr = e1000_get_phy_addr_for_hv_page(page);
2596 2773
2597 if (page == HV_INTC_FC_PAGE_START) 2774 if (page == HV_INTC_FC_PAGE_START)
@@ -2607,15 +2784,10 @@ s32 e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data)
2607 ((MAX_PHY_REG_ADDRESS & reg) == 0) && 2784 ((MAX_PHY_REG_ADDRESS & reg) == 0) &&
2608 (data & (1 << 11))) { 2785 (data & (1 << 11))) {
2609 u16 data2 = 0x7EFF; 2786 u16 data2 = 0x7EFF;
2610 hw->phy.ops.release_phy(hw);
2611 ret_val = e1000_access_phy_debug_regs_hv(hw, (1 << 6) | 0x3, 2787 ret_val = e1000_access_phy_debug_regs_hv(hw, (1 << 6) | 0x3,
2612 &data2, false); 2788 &data2, false);
2613 if (ret_val) 2789 if (ret_val)
2614 goto out; 2790 goto out;
2615
2616 ret_val = hw->phy.ops.acquire_phy(hw);
2617 if (ret_val)
2618 goto out;
2619 } 2791 }
2620 2792
2621 if (reg > MAX_PHY_MULTI_PAGE_REG) { 2793 if (reg > MAX_PHY_MULTI_PAGE_REG) {
@@ -2630,27 +2802,53 @@ s32 e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data)
2630 ret_val = e1000e_write_phy_reg_mdic(hw, 2802 ret_val = e1000e_write_phy_reg_mdic(hw,
2631 IGP01E1000_PHY_PAGE_SELECT, 2803 IGP01E1000_PHY_PAGE_SELECT,
2632 (page << IGP_PAGE_SHIFT)); 2804 (page << IGP_PAGE_SHIFT));
2633 if (ret_val) {
2634 hw->phy.ops.release_phy(hw);
2635 goto out;
2636 }
2637 hw->phy.addr = phy_addr; 2805 hw->phy.addr = phy_addr;
2638 } 2806 }
2639 } 2807 }
2640 2808
2641 ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & reg, 2809 ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & reg,
2642 data); 2810 data);
2643 hw->phy.ops.release_phy(hw);
2644 2811
2645out: 2812out:
2646 /* Revert to MDIO fast mode, if applicable */ 2813 /* Revert to MDIO fast mode, if applicable */
2647 if ((hw->phy.type == e1000_phy_82577) && in_slow_mode) 2814 if ((hw->phy.type == e1000_phy_82577) && in_slow_mode)
2648 ret_val = e1000_set_mdio_slow_mode_hv(hw, false); 2815 ret_val = e1000_set_mdio_slow_mode_hv(hw, false);
2649 2816
2817 if (!locked)
2818 hw->phy.ops.release_phy(hw);
2819
2650 return ret_val; 2820 return ret_val;
2651} 2821}
2652 2822
2653/** 2823/**
2824 * e1000_write_phy_reg_hv - Write HV PHY register
2825 * @hw: pointer to the HW structure
2826 * @offset: register offset to write to
2827 * @data: data to write at register offset
2828 *
2829 * Acquires semaphore then writes the data to PHY register at the offset.
2830 * Release the acquired semaphores before exiting.
2831 **/
2832s32 e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data)
2833{
2834 return __e1000_write_phy_reg_hv(hw, offset, data, false);
2835}
2836
2837/**
2838 * e1000_write_phy_reg_hv_locked - Write HV PHY register
2839 * @hw: pointer to the HW structure
2840 * @offset: register offset to write to
2841 * @data: data to write at register offset
2842 *
2843 * Writes the data to PHY register at the offset. Assumes semaphore
2844 * already acquired.
2845 **/
2846s32 e1000_write_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset, u16 data)
2847{
2848 return __e1000_write_phy_reg_hv(hw, offset, data, true);
2849}
2850
2851/**
2654 * e1000_get_phy_addr_for_hv_page - Get PHY adrress based on page 2852 * e1000_get_phy_addr_for_hv_page - Get PHY adrress based on page
2655 * @page: page to be accessed 2853 * @page: page to be accessed
2656 **/ 2854 **/
@@ -2671,10 +2869,9 @@ static u32 e1000_get_phy_addr_for_hv_page(u32 page)
2671 * @data: pointer to the data to be read or written 2869 * @data: pointer to the data to be read or written
2672 * @read: determines if operation is read or written 2870 * @read: determines if operation is read or written
2673 * 2871 *
2674 * Acquires semaphore, if necessary, then reads the PHY register at offset 2872 * Reads the PHY register at offset and stores the retreived information
2675 * and storing the retreived information in data. Release any acquired 2873 * in data. Assumes semaphore already acquired. Note that the procedure
2676 * semaphores before exiting. Note that the procedure to read these regs 2874 * to read these regs uses the address port and data port to read/write.
2677 * uses the address port and data port to read/write.
2678 **/ 2875 **/
2679static s32 e1000_access_phy_debug_regs_hv(struct e1000_hw *hw, u32 offset, 2876static s32 e1000_access_phy_debug_regs_hv(struct e1000_hw *hw, u32 offset,
2680 u16 *data, bool read) 2877 u16 *data, bool read)
@@ -2682,20 +2879,12 @@ static s32 e1000_access_phy_debug_regs_hv(struct e1000_hw *hw, u32 offset,
2682 s32 ret_val; 2879 s32 ret_val;
2683 u32 addr_reg = 0; 2880 u32 addr_reg = 0;
2684 u32 data_reg = 0; 2881 u32 data_reg = 0;
2685 u8 phy_acquired = 1;
2686 2882
2687 /* This takes care of the difference with desktop vs mobile phy */ 2883 /* This takes care of the difference with desktop vs mobile phy */
2688 addr_reg = (hw->phy.type == e1000_phy_82578) ? 2884 addr_reg = (hw->phy.type == e1000_phy_82578) ?
2689 I82578_ADDR_REG : I82577_ADDR_REG; 2885 I82578_ADDR_REG : I82577_ADDR_REG;
2690 data_reg = addr_reg + 1; 2886 data_reg = addr_reg + 1;
2691 2887
2692 ret_val = hw->phy.ops.acquire_phy(hw);
2693 if (ret_val) {
2694 hw_dbg(hw, "Could not acquire PHY\n");
2695 phy_acquired = 0;
2696 goto out;
2697 }
2698
2699 /* All operations in this function are phy address 2 */ 2888 /* All operations in this function are phy address 2 */
2700 hw->phy.addr = 2; 2889 hw->phy.addr = 2;
2701 2890
@@ -2718,8 +2907,6 @@ static s32 e1000_access_phy_debug_regs_hv(struct e1000_hw *hw, u32 offset,
2718 } 2907 }
2719 2908
2720out: 2909out:
2721 if (phy_acquired == 1)
2722 hw->phy.ops.release_phy(hw);
2723 return ret_val; 2910 return ret_val;
2724} 2911}
2725 2912
diff --git a/drivers/net/ethoc.c b/drivers/net/ethoc.c
index a25f8ed8109d..f1c565282d58 100644
--- a/drivers/net/ethoc.c
+++ b/drivers/net/ethoc.c
@@ -222,24 +222,25 @@ struct ethoc_bd {
222 u32 addr; 222 u32 addr;
223}; 223};
224 224
225static u32 ethoc_read(struct ethoc *dev, loff_t offset) 225static inline u32 ethoc_read(struct ethoc *dev, loff_t offset)
226{ 226{
227 return ioread32(dev->iobase + offset); 227 return ioread32(dev->iobase + offset);
228} 228}
229 229
230static void ethoc_write(struct ethoc *dev, loff_t offset, u32 data) 230static inline void ethoc_write(struct ethoc *dev, loff_t offset, u32 data)
231{ 231{
232 iowrite32(data, dev->iobase + offset); 232 iowrite32(data, dev->iobase + offset);
233} 233}
234 234
235static void ethoc_read_bd(struct ethoc *dev, int index, struct ethoc_bd *bd) 235static inline void ethoc_read_bd(struct ethoc *dev, int index,
236 struct ethoc_bd *bd)
236{ 237{
237 loff_t offset = ETHOC_BD_BASE + (index * sizeof(struct ethoc_bd)); 238 loff_t offset = ETHOC_BD_BASE + (index * sizeof(struct ethoc_bd));
238 bd->stat = ethoc_read(dev, offset + 0); 239 bd->stat = ethoc_read(dev, offset + 0);
239 bd->addr = ethoc_read(dev, offset + 4); 240 bd->addr = ethoc_read(dev, offset + 4);
240} 241}
241 242
242static void ethoc_write_bd(struct ethoc *dev, int index, 243static inline void ethoc_write_bd(struct ethoc *dev, int index,
243 const struct ethoc_bd *bd) 244 const struct ethoc_bd *bd)
244{ 245{
245 loff_t offset = ETHOC_BD_BASE + (index * sizeof(struct ethoc_bd)); 246 loff_t offset = ETHOC_BD_BASE + (index * sizeof(struct ethoc_bd));
@@ -247,33 +248,33 @@ static void ethoc_write_bd(struct ethoc *dev, int index,
247 ethoc_write(dev, offset + 4, bd->addr); 248 ethoc_write(dev, offset + 4, bd->addr);
248} 249}
249 250
250static void ethoc_enable_irq(struct ethoc *dev, u32 mask) 251static inline void ethoc_enable_irq(struct ethoc *dev, u32 mask)
251{ 252{
252 u32 imask = ethoc_read(dev, INT_MASK); 253 u32 imask = ethoc_read(dev, INT_MASK);
253 imask |= mask; 254 imask |= mask;
254 ethoc_write(dev, INT_MASK, imask); 255 ethoc_write(dev, INT_MASK, imask);
255} 256}
256 257
257static void ethoc_disable_irq(struct ethoc *dev, u32 mask) 258static inline void ethoc_disable_irq(struct ethoc *dev, u32 mask)
258{ 259{
259 u32 imask = ethoc_read(dev, INT_MASK); 260 u32 imask = ethoc_read(dev, INT_MASK);
260 imask &= ~mask; 261 imask &= ~mask;
261 ethoc_write(dev, INT_MASK, imask); 262 ethoc_write(dev, INT_MASK, imask);
262} 263}
263 264
264static void ethoc_ack_irq(struct ethoc *dev, u32 mask) 265static inline void ethoc_ack_irq(struct ethoc *dev, u32 mask)
265{ 266{
266 ethoc_write(dev, INT_SOURCE, mask); 267 ethoc_write(dev, INT_SOURCE, mask);
267} 268}
268 269
269static void ethoc_enable_rx_and_tx(struct ethoc *dev) 270static inline void ethoc_enable_rx_and_tx(struct ethoc *dev)
270{ 271{
271 u32 mode = ethoc_read(dev, MODER); 272 u32 mode = ethoc_read(dev, MODER);
272 mode |= MODER_RXEN | MODER_TXEN; 273 mode |= MODER_RXEN | MODER_TXEN;
273 ethoc_write(dev, MODER, mode); 274 ethoc_write(dev, MODER, mode);
274} 275}
275 276
276static void ethoc_disable_rx_and_tx(struct ethoc *dev) 277static inline void ethoc_disable_rx_and_tx(struct ethoc *dev)
277{ 278{
278 u32 mode = ethoc_read(dev, MODER); 279 u32 mode = ethoc_read(dev, MODER);
279 mode &= ~(MODER_RXEN | MODER_TXEN); 280 mode &= ~(MODER_RXEN | MODER_TXEN);
@@ -507,7 +508,7 @@ static irqreturn_t ethoc_interrupt(int irq, void *dev_id)
507 return IRQ_NONE; 508 return IRQ_NONE;
508 } 509 }
509 510
510 ethoc_ack_irq(priv, INT_MASK_ALL); 511 ethoc_ack_irq(priv, pending);
511 512
512 if (pending & INT_MASK_BUSY) { 513 if (pending & INT_MASK_BUSY) {
513 dev_err(&dev->dev, "packet dropped\n"); 514 dev_err(&dev->dev, "packet dropped\n");
diff --git a/drivers/net/fec.c b/drivers/net/fec.c
index 29234380e6c6..16a1d58419d9 100644
--- a/drivers/net/fec.c
+++ b/drivers/net/fec.c
@@ -1654,7 +1654,7 @@ static const struct net_device_ops fec_netdev_ops = {
1654 * 1654 *
1655 * index is only used in legacy code 1655 * index is only used in legacy code
1656 */ 1656 */
1657int __init fec_enet_init(struct net_device *dev, int index) 1657static int fec_enet_init(struct net_device *dev, int index)
1658{ 1658{
1659 struct fec_enet_private *fep = netdev_priv(dev); 1659 struct fec_enet_private *fep = netdev_priv(dev);
1660 struct bufdesc *cbd_base; 1660 struct bufdesc *cbd_base;
diff --git a/drivers/net/fec_mpc52xx.c b/drivers/net/fec_mpc52xx.c
index c40113f58963..66dace6d324f 100644
--- a/drivers/net/fec_mpc52xx.c
+++ b/drivers/net/fec_mpc52xx.c
@@ -759,12 +759,6 @@ static void mpc52xx_fec_reset(struct net_device *dev)
759 759
760 mpc52xx_fec_hw_init(dev); 760 mpc52xx_fec_hw_init(dev);
761 761
762 if (priv->phydev) {
763 phy_stop(priv->phydev);
764 phy_write(priv->phydev, MII_BMCR, BMCR_RESET);
765 phy_start(priv->phydev);
766 }
767
768 bcom_fec_rx_reset(priv->rx_dmatsk); 762 bcom_fec_rx_reset(priv->rx_dmatsk);
769 bcom_fec_tx_reset(priv->tx_dmatsk); 763 bcom_fec_tx_reset(priv->tx_dmatsk);
770 764
diff --git a/drivers/net/fec_mpc52xx_phy.c b/drivers/net/fec_mpc52xx_phy.c
index 31e6d62b785d..ee0f3c6d3f88 100644
--- a/drivers/net/fec_mpc52xx_phy.c
+++ b/drivers/net/fec_mpc52xx_phy.c
@@ -155,6 +155,7 @@ static struct of_device_id mpc52xx_fec_mdio_match[] = {
155 { .compatible = "mpc5200b-fec-phy", }, 155 { .compatible = "mpc5200b-fec-phy", },
156 {} 156 {}
157}; 157};
158MODULE_DEVICE_TABLE(of, mpc52xx_fec_mdio_match);
158 159
159struct of_platform_driver mpc52xx_fec_mdio_driver = { 160struct of_platform_driver mpc52xx_fec_mdio_driver = {
160 .name = "mpc5200b-fec-phy", 161 .name = "mpc5200b-fec-phy",
diff --git a/drivers/net/fs_enet/fs_enet-main.c b/drivers/net/fs_enet/fs_enet-main.c
index 2bc2d2b20644..ec2f5034457f 100644
--- a/drivers/net/fs_enet/fs_enet-main.c
+++ b/drivers/net/fs_enet/fs_enet-main.c
@@ -1110,6 +1110,7 @@ static struct of_device_id fs_enet_match[] = {
1110#endif 1110#endif
1111 {} 1111 {}
1112}; 1112};
1113MODULE_DEVICE_TABLE(of, fs_enet_match);
1113 1114
1114static struct of_platform_driver fs_enet_driver = { 1115static struct of_platform_driver fs_enet_driver = {
1115 .name = "fs_enet", 1116 .name = "fs_enet",
diff --git a/drivers/net/fs_enet/mii-bitbang.c b/drivers/net/fs_enet/mii-bitbang.c
index 93b481b0e3c7..24ff9f43a62b 100644
--- a/drivers/net/fs_enet/mii-bitbang.c
+++ b/drivers/net/fs_enet/mii-bitbang.c
@@ -221,6 +221,7 @@ static struct of_device_id fs_enet_mdio_bb_match[] = {
221 }, 221 },
222 {}, 222 {},
223}; 223};
224MODULE_DEVICE_TABLE(of, fs_enet_mdio_bb_match);
224 225
225static struct of_platform_driver fs_enet_bb_mdio_driver = { 226static struct of_platform_driver fs_enet_bb_mdio_driver = {
226 .name = "fsl-bb-mdio", 227 .name = "fsl-bb-mdio",
diff --git a/drivers/net/fs_enet/mii-fec.c b/drivers/net/fs_enet/mii-fec.c
index a2d69c1cd07e..96eba4280c5c 100644
--- a/drivers/net/fs_enet/mii-fec.c
+++ b/drivers/net/fs_enet/mii-fec.c
@@ -219,6 +219,7 @@ static struct of_device_id fs_enet_mdio_fec_match[] = {
219#endif 219#endif
220 {}, 220 {},
221}; 221};
222MODULE_DEVICE_TABLE(of, fs_enet_mdio_fec_match);
222 223
223static struct of_platform_driver fs_enet_fec_mdio_driver = { 224static struct of_platform_driver fs_enet_fec_mdio_driver = {
224 .name = "fsl-fec-mdio", 225 .name = "fsl-fec-mdio",
diff --git a/drivers/net/fsl_pq_mdio.c b/drivers/net/fsl_pq_mdio.c
index d167090248e2..6ac464866972 100644
--- a/drivers/net/fsl_pq_mdio.c
+++ b/drivers/net/fsl_pq_mdio.c
@@ -407,6 +407,7 @@ static struct of_device_id fsl_pq_mdio_match[] = {
407 }, 407 },
408 {}, 408 {},
409}; 409};
410MODULE_DEVICE_TABLE(of, fsl_pq_mdio_match);
410 411
411static struct of_platform_driver fsl_pq_mdio_driver = { 412static struct of_platform_driver fsl_pq_mdio_driver = {
412 .name = "fsl-pq_mdio", 413 .name = "fsl-pq_mdio",
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
index c6f6d3b7f4df..f7141865869d 100644
--- a/drivers/net/gianfar.c
+++ b/drivers/net/gianfar.c
@@ -2397,9 +2397,6 @@ static irqreturn_t gfar_error(int irq, void *dev_id)
2397 return IRQ_HANDLED; 2397 return IRQ_HANDLED;
2398} 2398}
2399 2399
2400/* work with hotplug and coldplug */
2401MODULE_ALIAS("platform:fsl-gianfar");
2402
2403static struct of_device_id gfar_match[] = 2400static struct of_device_id gfar_match[] =
2404{ 2401{
2405 { 2402 {
@@ -2408,6 +2405,7 @@ static struct of_device_id gfar_match[] =
2408 }, 2405 },
2409 {}, 2406 {},
2410}; 2407};
2408MODULE_DEVICE_TABLE(of, gfar_match);
2411 2409
2412/* Structure for a device driver */ 2410/* Structure for a device driver */
2413static struct of_platform_driver gfar_driver = { 2411static struct of_platform_driver gfar_driver = {
diff --git a/drivers/net/ibm_newemac/core.c b/drivers/net/ibm_newemac/core.c
index c5d92ec176d0..af117c626e73 100644
--- a/drivers/net/ibm_newemac/core.c
+++ b/drivers/net/ibm_newemac/core.c
@@ -24,6 +24,7 @@
24 * 24 *
25 */ 25 */
26 26
27#include <linux/module.h>
27#include <linux/sched.h> 28#include <linux/sched.h>
28#include <linux/string.h> 29#include <linux/string.h>
29#include <linux/errno.h> 30#include <linux/errno.h>
@@ -2990,6 +2991,7 @@ static struct of_device_id emac_match[] =
2990 }, 2991 },
2991 {}, 2992 {},
2992}; 2993};
2994MODULE_DEVICE_TABLE(of, emac_match);
2993 2995
2994static struct of_platform_driver emac_driver = { 2996static struct of_platform_driver emac_driver = {
2995 .name = "emac", 2997 .name = "emac",
diff --git a/drivers/net/ifb.c b/drivers/net/ifb.c
index 801f088c134f..030913f8bd26 100644
--- a/drivers/net/ifb.c
+++ b/drivers/net/ifb.c
@@ -98,12 +98,13 @@ static void ri_tasklet(unsigned long dev)
98 stats->tx_packets++; 98 stats->tx_packets++;
99 stats->tx_bytes +=skb->len; 99 stats->tx_bytes +=skb->len;
100 100
101 skb->dev = __dev_get_by_index(&init_net, skb->iif); 101 skb->dev = dev_get_by_index(&init_net, skb->iif);
102 if (!skb->dev) { 102 if (!skb->dev) {
103 dev_kfree_skb(skb); 103 dev_kfree_skb(skb);
104 stats->tx_dropped++; 104 stats->tx_dropped++;
105 break; 105 break;
106 } 106 }
107 dev_put(skb->dev);
107 skb->iif = _dev->ifindex; 108 skb->iif = _dev->ifindex;
108 109
109 if (from & AT_EGRESS) { 110 if (from & AT_EGRESS) {
diff --git a/drivers/net/igb/igb_ethtool.c b/drivers/net/igb/igb_ethtool.c
index a6da32f25a83..dafb25bfd9e1 100644
--- a/drivers/net/igb/igb_ethtool.c
+++ b/drivers/net/igb/igb_ethtool.c
@@ -739,7 +739,7 @@ static int igb_set_ringparam(struct net_device *netdev,
739{ 739{
740 struct igb_adapter *adapter = netdev_priv(netdev); 740 struct igb_adapter *adapter = netdev_priv(netdev);
741 struct igb_ring *temp_ring; 741 struct igb_ring *temp_ring;
742 int i, err; 742 int i, err = 0;
743 u32 new_rx_count, new_tx_count; 743 u32 new_rx_count, new_tx_count;
744 744
745 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) 745 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
@@ -759,18 +759,30 @@ static int igb_set_ringparam(struct net_device *netdev,
759 return 0; 759 return 0;
760 } 760 }
761 761
762 while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
763 msleep(1);
764
765 if (!netif_running(adapter->netdev)) {
766 for (i = 0; i < adapter->num_tx_queues; i++)
767 adapter->tx_ring[i].count = new_tx_count;
768 for (i = 0; i < adapter->num_rx_queues; i++)
769 adapter->rx_ring[i].count = new_rx_count;
770 adapter->tx_ring_count = new_tx_count;
771 adapter->rx_ring_count = new_rx_count;
772 goto clear_reset;
773 }
774
762 if (adapter->num_tx_queues > adapter->num_rx_queues) 775 if (adapter->num_tx_queues > adapter->num_rx_queues)
763 temp_ring = vmalloc(adapter->num_tx_queues * sizeof(struct igb_ring)); 776 temp_ring = vmalloc(adapter->num_tx_queues * sizeof(struct igb_ring));
764 else 777 else
765 temp_ring = vmalloc(adapter->num_rx_queues * sizeof(struct igb_ring)); 778 temp_ring = vmalloc(adapter->num_rx_queues * sizeof(struct igb_ring));
766 if (!temp_ring)
767 return -ENOMEM;
768 779
769 while (test_and_set_bit(__IGB_RESETTING, &adapter->state)) 780 if (!temp_ring) {
770 msleep(1); 781 err = -ENOMEM;
782 goto clear_reset;
783 }
771 784
772 if (netif_running(adapter->netdev)) 785 igb_down(adapter);
773 igb_down(adapter);
774 786
775 /* 787 /*
776 * We can't just free everything and then setup again, 788 * We can't just free everything and then setup again,
@@ -827,14 +839,11 @@ static int igb_set_ringparam(struct net_device *netdev,
827 839
828 adapter->rx_ring_count = new_rx_count; 840 adapter->rx_ring_count = new_rx_count;
829 } 841 }
830
831 err = 0;
832err_setup: 842err_setup:
833 if (netif_running(adapter->netdev)) 843 igb_up(adapter);
834 igb_up(adapter);
835
836 clear_bit(__IGB_RESETTING, &adapter->state);
837 vfree(temp_ring); 844 vfree(temp_ring);
845clear_reset:
846 clear_bit(__IGB_RESETTING, &adapter->state);
838 return err; 847 return err;
839} 848}
840 849
diff --git a/drivers/net/igbvf/ethtool.c b/drivers/net/igbvf/ethtool.c
index bc606f8b61aa..8afff07ff559 100644
--- a/drivers/net/igbvf/ethtool.c
+++ b/drivers/net/igbvf/ethtool.c
@@ -279,7 +279,7 @@ static int igbvf_set_ringparam(struct net_device *netdev,
279{ 279{
280 struct igbvf_adapter *adapter = netdev_priv(netdev); 280 struct igbvf_adapter *adapter = netdev_priv(netdev);
281 struct igbvf_ring *temp_ring; 281 struct igbvf_ring *temp_ring;
282 int err; 282 int err = 0;
283 u32 new_rx_count, new_tx_count; 283 u32 new_rx_count, new_tx_count;
284 284
285 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) 285 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
@@ -299,15 +299,22 @@ static int igbvf_set_ringparam(struct net_device *netdev,
299 return 0; 299 return 0;
300 } 300 }
301 301
302 temp_ring = vmalloc(sizeof(struct igbvf_ring));
303 if (!temp_ring)
304 return -ENOMEM;
305
306 while (test_and_set_bit(__IGBVF_RESETTING, &adapter->state)) 302 while (test_and_set_bit(__IGBVF_RESETTING, &adapter->state))
307 msleep(1); 303 msleep(1);
308 304
309 if (netif_running(adapter->netdev)) 305 if (!netif_running(adapter->netdev)) {
310 igbvf_down(adapter); 306 adapter->tx_ring->count = new_tx_count;
307 adapter->rx_ring->count = new_rx_count;
308 goto clear_reset;
309 }
310
311 temp_ring = vmalloc(sizeof(struct igbvf_ring));
312 if (!temp_ring) {
313 err = -ENOMEM;
314 goto clear_reset;
315 }
316
317 igbvf_down(adapter);
311 318
312 /* 319 /*
313 * We can't just free everything and then setup again, 320 * We can't just free everything and then setup again,
@@ -339,14 +346,11 @@ static int igbvf_set_ringparam(struct net_device *netdev,
339 346
340 memcpy(adapter->rx_ring, temp_ring,sizeof(struct igbvf_ring)); 347 memcpy(adapter->rx_ring, temp_ring,sizeof(struct igbvf_ring));
341 } 348 }
342
343 err = 0;
344err_setup: 349err_setup:
345 if (netif_running(adapter->netdev)) 350 igbvf_up(adapter);
346 igbvf_up(adapter);
347
348 clear_bit(__IGBVF_RESETTING, &adapter->state);
349 vfree(temp_ring); 351 vfree(temp_ring);
352clear_reset:
353 clear_bit(__IGBVF_RESETTING, &adapter->state);
350 return err; 354 return err;
351} 355}
352 356
diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c
index 08eccf418c67..9d2cc833691b 100644
--- a/drivers/net/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ixgbe/ixgbe_ethtool.c
@@ -806,7 +806,7 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
806{ 806{
807 struct ixgbe_adapter *adapter = netdev_priv(netdev); 807 struct ixgbe_adapter *adapter = netdev_priv(netdev);
808 struct ixgbe_ring *temp_tx_ring, *temp_rx_ring; 808 struct ixgbe_ring *temp_tx_ring, *temp_rx_ring;
809 int i, err; 809 int i, err = 0;
810 u32 new_rx_count, new_tx_count; 810 u32 new_rx_count, new_tx_count;
811 bool need_update = false; 811 bool need_update = false;
812 812
@@ -830,6 +830,16 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
830 while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state)) 830 while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
831 msleep(1); 831 msleep(1);
832 832
833 if (!netif_running(adapter->netdev)) {
834 for (i = 0; i < adapter->num_tx_queues; i++)
835 adapter->tx_ring[i].count = new_tx_count;
836 for (i = 0; i < adapter->num_rx_queues; i++)
837 adapter->rx_ring[i].count = new_rx_count;
838 adapter->tx_ring_count = new_tx_count;
839 adapter->rx_ring_count = new_rx_count;
840 goto err_setup;
841 }
842
833 temp_tx_ring = kcalloc(adapter->num_tx_queues, 843 temp_tx_ring = kcalloc(adapter->num_tx_queues,
834 sizeof(struct ixgbe_ring), GFP_KERNEL); 844 sizeof(struct ixgbe_ring), GFP_KERNEL);
835 if (!temp_tx_ring) { 845 if (!temp_tx_ring) {
@@ -887,8 +897,7 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
887 897
888 /* if rings need to be updated, here's the place to do it in one shot */ 898 /* if rings need to be updated, here's the place to do it in one shot */
889 if (need_update) { 899 if (need_update) {
890 if (netif_running(netdev)) 900 ixgbe_down(adapter);
891 ixgbe_down(adapter);
892 901
893 /* tx */ 902 /* tx */
894 if (new_tx_count != adapter->tx_ring_count) { 903 if (new_tx_count != adapter->tx_ring_count) {
@@ -905,13 +914,8 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
905 temp_rx_ring = NULL; 914 temp_rx_ring = NULL;
906 adapter->rx_ring_count = new_rx_count; 915 adapter->rx_ring_count = new_rx_count;
907 } 916 }
908 }
909
910 /* success! */
911 err = 0;
912 if (netif_running(netdev))
913 ixgbe_up(adapter); 917 ixgbe_up(adapter);
914 918 }
915err_setup: 919err_setup:
916 clear_bit(__IXGBE_RESETTING, &adapter->state); 920 clear_bit(__IXGBE_RESETTING, &adapter->state);
917 return err; 921 return err;
diff --git a/drivers/net/ks8851.c b/drivers/net/ks8851.c
index 237835864357..a23f739d222f 100644
--- a/drivers/net/ks8851.c
+++ b/drivers/net/ks8851.c
@@ -171,6 +171,36 @@ static void ks8851_wrreg16(struct ks8851_net *ks, unsigned reg, unsigned val)
171} 171}
172 172
173/** 173/**
174 * ks8851_wrreg8 - write 8bit register value to chip
175 * @ks: The chip state
176 * @reg: The register address
177 * @val: The value to write
178 *
179 * Issue a write to put the value @val into the register specified in @reg.
180 */
181static void ks8851_wrreg8(struct ks8851_net *ks, unsigned reg, unsigned val)
182{
183 struct spi_transfer *xfer = &ks->spi_xfer1;
184 struct spi_message *msg = &ks->spi_msg1;
185 __le16 txb[2];
186 int ret;
187 int bit;
188
189 bit = 1 << (reg & 3);
190
191 txb[0] = cpu_to_le16(MK_OP(bit, reg) | KS_SPIOP_WR);
192 txb[1] = val;
193
194 xfer->tx_buf = txb;
195 xfer->rx_buf = NULL;
196 xfer->len = 3;
197
198 ret = spi_sync(ks->spidev, msg);
199 if (ret < 0)
200 ks_err(ks, "spi_sync() failed\n");
201}
202
203/**
174 * ks8851_rx_1msg - select whether to use one or two messages for spi read 204 * ks8851_rx_1msg - select whether to use one or two messages for spi read
175 * @ks: The device structure 205 * @ks: The device structure
176 * 206 *
@@ -322,13 +352,12 @@ static void ks8851_soft_reset(struct ks8851_net *ks, unsigned op)
322static int ks8851_write_mac_addr(struct net_device *dev) 352static int ks8851_write_mac_addr(struct net_device *dev)
323{ 353{
324 struct ks8851_net *ks = netdev_priv(dev); 354 struct ks8851_net *ks = netdev_priv(dev);
325 u16 *mcp = (u16 *)dev->dev_addr; 355 int i;
326 356
327 mutex_lock(&ks->lock); 357 mutex_lock(&ks->lock);
328 358
329 ks8851_wrreg16(ks, KS_MARL, mcp[0]); 359 for (i = 0; i < ETH_ALEN; i++)
330 ks8851_wrreg16(ks, KS_MARM, mcp[1]); 360 ks8851_wrreg8(ks, KS_MAR(i), dev->dev_addr[i]);
331 ks8851_wrreg16(ks, KS_MARH, mcp[2]);
332 361
333 mutex_unlock(&ks->lock); 362 mutex_unlock(&ks->lock);
334 363
@@ -951,7 +980,7 @@ static void ks8851_set_rx_mode(struct net_device *dev)
951 mcptr = mcptr->next; 980 mcptr = mcptr->next;
952 } 981 }
953 982
954 rxctrl.rxcr1 = RXCR1_RXME | RXCR1_RXAE | RXCR1_RXPAFMA; 983 rxctrl.rxcr1 = RXCR1_RXME | RXCR1_RXPAFMA;
955 } else { 984 } else {
956 /* just accept broadcast / unicast */ 985 /* just accept broadcast / unicast */
957 rxctrl.rxcr1 = RXCR1_RXPAFMA; 986 rxctrl.rxcr1 = RXCR1_RXPAFMA;
@@ -1239,6 +1268,9 @@ static int __devinit ks8851_probe(struct spi_device *spi)
1239 ndev->netdev_ops = &ks8851_netdev_ops; 1268 ndev->netdev_ops = &ks8851_netdev_ops;
1240 ndev->irq = spi->irq; 1269 ndev->irq = spi->irq;
1241 1270
1271 /* issue a global soft reset to reset the device. */
1272 ks8851_soft_reset(ks, GRR_GSR);
1273
1242 /* simple check for a valid chip being connected to the bus */ 1274 /* simple check for a valid chip being connected to the bus */
1243 1275
1244 if ((ks8851_rdreg16(ks, KS_CIDER) & ~CIDER_REV_MASK) != CIDER_ID) { 1276 if ((ks8851_rdreg16(ks, KS_CIDER) & ~CIDER_REV_MASK) != CIDER_ID) {
diff --git a/drivers/net/ks8851.h b/drivers/net/ks8851.h
index 85abe147afbf..f52c312cc356 100644
--- a/drivers/net/ks8851.h
+++ b/drivers/net/ks8851.h
@@ -16,6 +16,7 @@
16#define CCR_32PIN (1 << 0) 16#define CCR_32PIN (1 << 0)
17 17
18/* MAC address registers */ 18/* MAC address registers */
19#define KS_MAR(_m) 0x15 - (_m)
19#define KS_MARL 0x10 20#define KS_MARL 0x10
20#define KS_MARM 0x12 21#define KS_MARM 0x12
21#define KS_MARH 0x14 22#define KS_MARH 0x14
diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c
index 29c9fe2951e0..5319db9901d8 100644
--- a/drivers/net/myri10ge/myri10ge.c
+++ b/drivers/net/myri10ge/myri10ge.c
@@ -75,7 +75,7 @@
75#include "myri10ge_mcp.h" 75#include "myri10ge_mcp.h"
76#include "myri10ge_mcp_gen_header.h" 76#include "myri10ge_mcp_gen_header.h"
77 77
78#define MYRI10GE_VERSION_STR "1.5.0-1.432" 78#define MYRI10GE_VERSION_STR "1.5.1-1.451"
79 79
80MODULE_DESCRIPTION("Myricom 10G driver (10GbE)"); 80MODULE_DESCRIPTION("Myricom 10G driver (10GbE)");
81MODULE_AUTHOR("Maintainer: help@myri.com"); 81MODULE_AUTHOR("Maintainer: help@myri.com");
@@ -1623,10 +1623,21 @@ myri10ge_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
1623 return 0; 1623 return 0;
1624 } 1624 }
1625 } 1625 }
1626 if (*ptr == 'R' || *ptr == 'Q') { 1626 if (*ptr == '2')
1627 /* We've found either an XFP or quad ribbon fiber */ 1627 ptr++;
1628 if (*ptr == 'R' || *ptr == 'Q' || *ptr == 'S') {
1629 /* We've found either an XFP, quad ribbon fiber, or SFP+ */
1628 cmd->port = PORT_FIBRE; 1630 cmd->port = PORT_FIBRE;
1631 cmd->supported |= SUPPORTED_FIBRE;
1632 cmd->advertising |= ADVERTISED_FIBRE;
1633 } else {
1634 cmd->port = PORT_OTHER;
1629 } 1635 }
1636 if (*ptr == 'R' || *ptr == 'S')
1637 cmd->transceiver = XCVR_EXTERNAL;
1638 else
1639 cmd->transceiver = XCVR_INTERNAL;
1640
1630 return 0; 1641 return 0;
1631} 1642}
1632 1643
diff --git a/drivers/net/netxen/netxen_nic_hdr.h b/drivers/net/netxen/netxen_nic_hdr.h
index 7386a7cce2ba..a39155d61bad 100644
--- a/drivers/net/netxen/netxen_nic_hdr.h
+++ b/drivers/net/netxen/netxen_nic_hdr.h
@@ -419,6 +419,7 @@ enum {
419#define NETXEN_CRB_ROMUSB \ 419#define NETXEN_CRB_ROMUSB \
420 NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_ROMUSB) 420 NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_ROMUSB)
421#define NETXEN_CRB_I2Q NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_I2Q) 421#define NETXEN_CRB_I2Q NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_I2Q)
422#define NETXEN_CRB_I2C0 NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_I2C0)
422#define NETXEN_CRB_SMB NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_SMB) 423#define NETXEN_CRB_SMB NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_SMB)
423#define NETXEN_CRB_MAX NETXEN_PCI_CRB_WINDOW(64) 424#define NETXEN_CRB_MAX NETXEN_PCI_CRB_WINDOW(64)
424 425
diff --git a/drivers/net/netxen/netxen_nic_hw.c b/drivers/net/netxen/netxen_nic_hw.c
index e43cbbd5bec1..69205ace16eb 100644
--- a/drivers/net/netxen/netxen_nic_hw.c
+++ b/drivers/net/netxen/netxen_nic_hw.c
@@ -1778,22 +1778,16 @@ netxen_setup_hwops(struct netxen_adapter *adapter)
1778 1778
1779int netxen_nic_get_board_info(struct netxen_adapter *adapter) 1779int netxen_nic_get_board_info(struct netxen_adapter *adapter)
1780{ 1780{
1781 int offset, board_type, magic, header_version; 1781 int offset, board_type, magic;
1782 struct pci_dev *pdev = adapter->pdev; 1782 struct pci_dev *pdev = adapter->pdev;
1783 1783
1784 offset = NX_FW_MAGIC_OFFSET; 1784 offset = NX_FW_MAGIC_OFFSET;
1785 if (netxen_rom_fast_read(adapter, offset, &magic)) 1785 if (netxen_rom_fast_read(adapter, offset, &magic))
1786 return -EIO; 1786 return -EIO;
1787 1787
1788 offset = NX_HDR_VERSION_OFFSET; 1788 if (magic != NETXEN_BDINFO_MAGIC) {
1789 if (netxen_rom_fast_read(adapter, offset, &header_version)) 1789 dev_err(&pdev->dev, "invalid board config, magic=%08x\n",
1790 return -EIO; 1790 magic);
1791
1792 if (magic != NETXEN_BDINFO_MAGIC ||
1793 header_version != NETXEN_BDINFO_VERSION) {
1794 dev_err(&pdev->dev,
1795 "invalid board config, magic=%08x, version=%08x\n",
1796 magic, header_version);
1797 return -EIO; 1791 return -EIO;
1798 } 1792 }
1799 1793
diff --git a/drivers/net/netxen/netxen_nic_init.c b/drivers/net/netxen/netxen_nic_init.c
index d8c4b70e35ba..27d20cbae0aa 100644
--- a/drivers/net/netxen/netxen_nic_init.c
+++ b/drivers/net/netxen/netxen_nic_init.c
@@ -514,6 +514,8 @@ int netxen_pinit_from_rom(struct netxen_adapter *adapter)
514 continue; 514 continue;
515 515
516 if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) { 516 if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
517 if (off == (NETXEN_CRB_I2C0 + 0x1c))
518 continue;
517 /* do not reset PCI */ 519 /* do not reset PCI */
518 if (off == (ROMUSB_GLB + 0xbc)) 520 if (off == (ROMUSB_GLB + 0xbc))
519 continue; 521 continue;
@@ -537,12 +539,6 @@ int netxen_pinit_from_rom(struct netxen_adapter *adapter)
537 continue; 539 continue;
538 } 540 }
539 541
540 if (off == NETXEN_ADDR_ERROR) {
541 printk(KERN_ERR "%s: Err: Unknown addr: 0x%08x\n",
542 netxen_nic_driver_name, buf[i].addr);
543 continue;
544 }
545
546 init_delay = 1; 542 init_delay = 1;
547 /* After writing this register, HW needs time for CRB */ 543 /* After writing this register, HW needs time for CRB */
548 /* to quiet down (else crb_window returns 0xffffffff) */ 544 /* to quiet down (else crb_window returns 0xffffffff) */
diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c
index 1071f090a124..c2bdfd3c7aad 100644
--- a/drivers/net/netxen/netxen_nic_main.c
+++ b/drivers/net/netxen/netxen_nic_main.c
@@ -1925,6 +1925,7 @@ static void netxen_tx_timeout_task(struct work_struct *work)
1925 1925
1926request_reset: 1926request_reset:
1927 adapter->need_fw_reset = 1; 1927 adapter->need_fw_reset = 1;
1928 clear_bit(__NX_RESETTING, &adapter->state);
1928} 1929}
1929 1930
1930struct net_device_stats *netxen_nic_get_stats(struct net_device *netdev) 1931struct net_device_stats *netxen_nic_get_stats(struct net_device *netdev)
diff --git a/drivers/net/niu.c b/drivers/net/niu.c
index 1d1e657991d2..5506f870037f 100644
--- a/drivers/net/niu.c
+++ b/drivers/net/niu.c
@@ -3545,7 +3545,7 @@ static int niu_process_rx_pkt(struct napi_struct *napi, struct niu *np,
3545 rp->rcr_index = index; 3545 rp->rcr_index = index;
3546 3546
3547 skb_reserve(skb, NET_IP_ALIGN); 3547 skb_reserve(skb, NET_IP_ALIGN);
3548 __pskb_pull_tail(skb, min(len, NIU_RXPULL_MAX)); 3548 __pskb_pull_tail(skb, min(len, VLAN_ETH_HLEN));
3549 3549
3550 rp->rx_packets++; 3550 rp->rx_packets++;
3551 rp->rx_bytes += skb->len; 3551 rp->rx_bytes += skb->len;
diff --git a/drivers/net/phy/mdio-gpio.c b/drivers/net/phy/mdio-gpio.c
index 250e10f2c35b..8659d341e769 100644
--- a/drivers/net/phy/mdio-gpio.c
+++ b/drivers/net/phy/mdio-gpio.c
@@ -238,6 +238,7 @@ static struct of_device_id mdio_ofgpio_match[] = {
238 }, 238 },
239 {}, 239 {},
240}; 240};
241MODULE_DEVICE_TABLE(of, mdio_ofgpio_match);
241 242
242static struct of_platform_driver mdio_ofgpio_driver = { 243static struct of_platform_driver mdio_ofgpio_driver = {
243 .name = "mdio-gpio", 244 .name = "mdio-gpio",
diff --git a/drivers/net/pppoe.c b/drivers/net/pppoe.c
index 7cbf6f9b51de..2559991eea6a 100644
--- a/drivers/net/pppoe.c
+++ b/drivers/net/pppoe.c
@@ -111,9 +111,6 @@ struct pppoe_net {
111 rwlock_t hash_lock; 111 rwlock_t hash_lock;
112}; 112};
113 113
114/* to eliminate a race btw pppoe_flush_dev and pppoe_release */
115static DEFINE_SPINLOCK(flush_lock);
116
117/* 114/*
118 * PPPoE could be in the following stages: 115 * PPPoE could be in the following stages:
119 * 1) Discovery stage (to obtain remote MAC and Session ID) 116 * 1) Discovery stage (to obtain remote MAC and Session ID)
@@ -303,45 +300,48 @@ static void pppoe_flush_dev(struct net_device *dev)
303 write_lock_bh(&pn->hash_lock); 300 write_lock_bh(&pn->hash_lock);
304 for (i = 0; i < PPPOE_HASH_SIZE; i++) { 301 for (i = 0; i < PPPOE_HASH_SIZE; i++) {
305 struct pppox_sock *po = pn->hash_table[i]; 302 struct pppox_sock *po = pn->hash_table[i];
303 struct sock *sk;
306 304
307 while (po != NULL) { 305 while (po) {
308 struct sock *sk; 306 while (po && po->pppoe_dev != dev) {
309 if (po->pppoe_dev != dev) {
310 po = po->next; 307 po = po->next;
311 continue;
312 } 308 }
309
310 if (!po)
311 break;
312
313 sk = sk_pppox(po); 313 sk = sk_pppox(po);
314 spin_lock(&flush_lock);
315 po->pppoe_dev = NULL;
316 spin_unlock(&flush_lock);
317 dev_put(dev);
318 314
319 /* We always grab the socket lock, followed by the 315 /* We always grab the socket lock, followed by the
320 * hash_lock, in that order. Since we should 316 * hash_lock, in that order. Since we should hold the
321 * hold the sock lock while doing any unbinding, 317 * sock lock while doing any unbinding, we need to
322 * we need to release the lock we're holding. 318 * release the lock we're holding. Hold a reference to
323 * Hold a reference to the sock so it doesn't disappear 319 * the sock so it doesn't disappear as we're jumping
324 * as we're jumping between locks. 320 * between locks.
325 */ 321 */
326 322
327 sock_hold(sk); 323 sock_hold(sk);
328
329 write_unlock_bh(&pn->hash_lock); 324 write_unlock_bh(&pn->hash_lock);
330 lock_sock(sk); 325 lock_sock(sk);
331 326
332 if (sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND)) { 327 if (po->pppoe_dev == dev
328 && sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND)) {
333 pppox_unbind_sock(sk); 329 pppox_unbind_sock(sk);
334 sk->sk_state = PPPOX_ZOMBIE; 330 sk->sk_state = PPPOX_ZOMBIE;
335 sk->sk_state_change(sk); 331 sk->sk_state_change(sk);
332 po->pppoe_dev = NULL;
333 dev_put(dev);
336 } 334 }
337 335
338 release_sock(sk); 336 release_sock(sk);
339 sock_put(sk); 337 sock_put(sk);
340 338
341 /* Restart scan at the beginning of this hash chain. 339 /* Restart the process from the start of the current
342 * While the lock was dropped the chain contents may 340 * hash chain. We dropped locks so the world may have
343 * have changed. 341 * change from underneath us.
344 */ 342 */
343
344 BUG_ON(pppoe_pernet(dev_net(dev)) == NULL);
345 write_lock_bh(&pn->hash_lock); 345 write_lock_bh(&pn->hash_lock);
346 po = pn->hash_table[i]; 346 po = pn->hash_table[i];
347 } 347 }
@@ -388,11 +388,16 @@ static int pppoe_rcv_core(struct sock *sk, struct sk_buff *skb)
388 struct pppox_sock *po = pppox_sk(sk); 388 struct pppox_sock *po = pppox_sk(sk);
389 struct pppox_sock *relay_po; 389 struct pppox_sock *relay_po;
390 390
391 /* Backlog receive. Semantics of backlog rcv preclude any code from
392 * executing in lock_sock()/release_sock() bounds; meaning sk->sk_state
393 * can't change.
394 */
395
391 if (sk->sk_state & PPPOX_BOUND) { 396 if (sk->sk_state & PPPOX_BOUND) {
392 ppp_input(&po->chan, skb); 397 ppp_input(&po->chan, skb);
393 } else if (sk->sk_state & PPPOX_RELAY) { 398 } else if (sk->sk_state & PPPOX_RELAY) {
394 relay_po = get_item_by_addr(dev_net(po->pppoe_dev), 399 relay_po = get_item_by_addr(sock_net(sk),
395 &po->pppoe_relay); 400 &po->pppoe_relay);
396 if (relay_po == NULL) 401 if (relay_po == NULL)
397 goto abort_kfree; 402 goto abort_kfree;
398 403
@@ -447,6 +452,10 @@ static int pppoe_rcv(struct sk_buff *skb, struct net_device *dev,
447 goto drop; 452 goto drop;
448 453
449 pn = pppoe_pernet(dev_net(dev)); 454 pn = pppoe_pernet(dev_net(dev));
455
456 /* Note that get_item does a sock_hold(), so sk_pppox(po)
457 * is known to be safe.
458 */
450 po = get_item(pn, ph->sid, eth_hdr(skb)->h_source, dev->ifindex); 459 po = get_item(pn, ph->sid, eth_hdr(skb)->h_source, dev->ifindex);
451 if (!po) 460 if (!po)
452 goto drop; 461 goto drop;
@@ -561,6 +570,7 @@ static int pppoe_release(struct socket *sock)
561 struct sock *sk = sock->sk; 570 struct sock *sk = sock->sk;
562 struct pppox_sock *po; 571 struct pppox_sock *po;
563 struct pppoe_net *pn; 572 struct pppoe_net *pn;
573 struct net *net = NULL;
564 574
565 if (!sk) 575 if (!sk)
566 return 0; 576 return 0;
@@ -571,44 +581,28 @@ static int pppoe_release(struct socket *sock)
571 return -EBADF; 581 return -EBADF;
572 } 582 }
573 583
584 po = pppox_sk(sk);
585
586 if (sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND)) {
587 dev_put(po->pppoe_dev);
588 po->pppoe_dev = NULL;
589 }
590
574 pppox_unbind_sock(sk); 591 pppox_unbind_sock(sk);
575 592
576 /* Signal the death of the socket. */ 593 /* Signal the death of the socket. */
577 sk->sk_state = PPPOX_DEAD; 594 sk->sk_state = PPPOX_DEAD;
578 595
579 /* 596 net = sock_net(sk);
580 * pppoe_flush_dev could lead to a race with 597 pn = pppoe_pernet(net);
581 * this routine so we use flush_lock to eliminate
582 * such a case (we only need per-net specific data)
583 */
584 spin_lock(&flush_lock);
585 po = pppox_sk(sk);
586 if (!po->pppoe_dev) {
587 spin_unlock(&flush_lock);
588 goto out;
589 }
590 pn = pppoe_pernet(dev_net(po->pppoe_dev));
591 spin_unlock(&flush_lock);
592 598
593 /* 599 /*
594 * protect "po" from concurrent updates 600 * protect "po" from concurrent updates
595 * on pppoe_flush_dev 601 * on pppoe_flush_dev
596 */ 602 */
597 write_lock_bh(&pn->hash_lock); 603 delete_item(pn, po->pppoe_pa.sid, po->pppoe_pa.remote,
604 po->pppoe_ifindex);
598 605
599 po = pppox_sk(sk);
600 if (stage_session(po->pppoe_pa.sid))
601 __delete_item(pn, po->pppoe_pa.sid, po->pppoe_pa.remote,
602 po->pppoe_ifindex);
603
604 if (po->pppoe_dev) {
605 dev_put(po->pppoe_dev);
606 po->pppoe_dev = NULL;
607 }
608
609 write_unlock_bh(&pn->hash_lock);
610
611out:
612 sock_orphan(sk); 606 sock_orphan(sk);
613 sock->sk = NULL; 607 sock->sk = NULL;
614 608
@@ -625,8 +619,9 @@ static int pppoe_connect(struct socket *sock, struct sockaddr *uservaddr,
625 struct sock *sk = sock->sk; 619 struct sock *sk = sock->sk;
626 struct sockaddr_pppox *sp = (struct sockaddr_pppox *)uservaddr; 620 struct sockaddr_pppox *sp = (struct sockaddr_pppox *)uservaddr;
627 struct pppox_sock *po = pppox_sk(sk); 621 struct pppox_sock *po = pppox_sk(sk);
628 struct net_device *dev; 622 struct net_device *dev = NULL;
629 struct pppoe_net *pn; 623 struct pppoe_net *pn;
624 struct net *net = NULL;
630 int error; 625 int error;
631 626
632 lock_sock(sk); 627 lock_sock(sk);
@@ -652,12 +647,14 @@ static int pppoe_connect(struct socket *sock, struct sockaddr *uservaddr,
652 /* Delete the old binding */ 647 /* Delete the old binding */
653 if (stage_session(po->pppoe_pa.sid)) { 648 if (stage_session(po->pppoe_pa.sid)) {
654 pppox_unbind_sock(sk); 649 pppox_unbind_sock(sk);
650 pn = pppoe_pernet(sock_net(sk));
651 delete_item(pn, po->pppoe_pa.sid,
652 po->pppoe_pa.remote, po->pppoe_ifindex);
655 if (po->pppoe_dev) { 653 if (po->pppoe_dev) {
656 pn = pppoe_pernet(dev_net(po->pppoe_dev));
657 delete_item(pn, po->pppoe_pa.sid,
658 po->pppoe_pa.remote, po->pppoe_ifindex);
659 dev_put(po->pppoe_dev); 654 dev_put(po->pppoe_dev);
655 po->pppoe_dev = NULL;
660 } 656 }
657
661 memset(sk_pppox(po) + 1, 0, 658 memset(sk_pppox(po) + 1, 0,
662 sizeof(struct pppox_sock) - sizeof(struct sock)); 659 sizeof(struct pppox_sock) - sizeof(struct sock));
663 sk->sk_state = PPPOX_NONE; 660 sk->sk_state = PPPOX_NONE;
@@ -666,16 +663,15 @@ static int pppoe_connect(struct socket *sock, struct sockaddr *uservaddr,
666 /* Re-bind in session stage only */ 663 /* Re-bind in session stage only */
667 if (stage_session(sp->sa_addr.pppoe.sid)) { 664 if (stage_session(sp->sa_addr.pppoe.sid)) {
668 error = -ENODEV; 665 error = -ENODEV;
669 dev = dev_get_by_name(sock_net(sk), sp->sa_addr.pppoe.dev); 666 net = sock_net(sk);
667 dev = dev_get_by_name(net, sp->sa_addr.pppoe.dev);
670 if (!dev) 668 if (!dev)
671 goto end; 669 goto err_put;
672 670
673 po->pppoe_dev = dev; 671 po->pppoe_dev = dev;
674 po->pppoe_ifindex = dev->ifindex; 672 po->pppoe_ifindex = dev->ifindex;
675 pn = pppoe_pernet(dev_net(dev)); 673 pn = pppoe_pernet(net);
676 write_lock_bh(&pn->hash_lock);
677 if (!(dev->flags & IFF_UP)) { 674 if (!(dev->flags & IFF_UP)) {
678 write_unlock_bh(&pn->hash_lock);
679 goto err_put; 675 goto err_put;
680 } 676 }
681 677
@@ -683,6 +679,7 @@ static int pppoe_connect(struct socket *sock, struct sockaddr *uservaddr,
683 &sp->sa_addr.pppoe, 679 &sp->sa_addr.pppoe,
684 sizeof(struct pppoe_addr)); 680 sizeof(struct pppoe_addr));
685 681
682 write_lock_bh(&pn->hash_lock);
686 error = __set_item(pn, po); 683 error = __set_item(pn, po);
687 write_unlock_bh(&pn->hash_lock); 684 write_unlock_bh(&pn->hash_lock);
688 if (error < 0) 685 if (error < 0)
@@ -696,8 +693,11 @@ static int pppoe_connect(struct socket *sock, struct sockaddr *uservaddr,
696 po->chan.ops = &pppoe_chan_ops; 693 po->chan.ops = &pppoe_chan_ops;
697 694
698 error = ppp_register_net_channel(dev_net(dev), &po->chan); 695 error = ppp_register_net_channel(dev_net(dev), &po->chan);
699 if (error) 696 if (error) {
697 delete_item(pn, po->pppoe_pa.sid,
698 po->pppoe_pa.remote, po->pppoe_ifindex);
700 goto err_put; 699 goto err_put;
700 }
701 701
702 sk->sk_state = PPPOX_CONNECTED; 702 sk->sk_state = PPPOX_CONNECTED;
703 } 703 }
@@ -915,6 +915,14 @@ static int __pppoe_xmit(struct sock *sk, struct sk_buff *skb)
915 struct pppoe_hdr *ph; 915 struct pppoe_hdr *ph;
916 int data_len = skb->len; 916 int data_len = skb->len;
917 917
918 /* The higher-level PPP code (ppp_unregister_channel()) ensures the PPP
919 * xmit operations conclude prior to an unregistration call. Thus
920 * sk->sk_state cannot change, so we don't need to do lock_sock().
921 * But, we also can't do a lock_sock since that introduces a potential
922 * deadlock as we'd reverse the lock ordering used when calling
923 * ppp_unregister_channel().
924 */
925
918 if (sock_flag(sk, SOCK_DEAD) || !(sk->sk_state & PPPOX_CONNECTED)) 926 if (sock_flag(sk, SOCK_DEAD) || !(sk->sk_state & PPPOX_CONNECTED))
919 goto abort; 927 goto abort;
920 928
@@ -944,7 +952,6 @@ static int __pppoe_xmit(struct sock *sk, struct sk_buff *skb)
944 po->pppoe_pa.remote, NULL, data_len); 952 po->pppoe_pa.remote, NULL, data_len);
945 953
946 dev_queue_xmit(skb); 954 dev_queue_xmit(skb);
947
948 return 1; 955 return 1;
949 956
950abort: 957abort:
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index 7d9fc06ceb98..1f7946c7d4e8 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -1029,7 +1029,10 @@ static void rtl8169_vlan_rx_register(struct net_device *dev,
1029 1029
1030 spin_lock_irqsave(&tp->lock, flags); 1030 spin_lock_irqsave(&tp->lock, flags);
1031 tp->vlgrp = grp; 1031 tp->vlgrp = grp;
1032 if (tp->vlgrp) 1032 /*
1033 * Do not disable RxVlan on 8110SCd.
1034 */
1035 if (tp->vlgrp || (tp->mac_version == RTL_GIGA_MAC_VER_05))
1033 tp->cp_cmd |= RxVlan; 1036 tp->cp_cmd |= RxVlan;
1034 else 1037 else
1035 tp->cp_cmd &= ~RxVlan; 1038 tp->cp_cmd &= ~RxVlan;
@@ -3197,6 +3200,14 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
3197 } 3200 }
3198 3201
3199 rtl8169_init_phy(dev, tp); 3202 rtl8169_init_phy(dev, tp);
3203
3204 /*
3205 * Pretend we are using VLANs; This bypasses a nasty bug where
3206 * Interrupts stop flowing on high load on 8110SCd controllers.
3207 */
3208 if (tp->mac_version == RTL_GIGA_MAC_VER_05)
3209 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) | RxVlan);
3210
3200 device_set_wakeup_enable(&pdev->dev, tp->features & RTL_FEATURE_WOL); 3211 device_set_wakeup_enable(&pdev->dev, tp->features & RTL_FEATURE_WOL);
3201 3212
3202out: 3213out:
diff --git a/drivers/net/sh_eth.c b/drivers/net/sh_eth.c
index 161181a4b3d6..5783f50d18e9 100644
--- a/drivers/net/sh_eth.c
+++ b/drivers/net/sh_eth.c
@@ -31,6 +31,8 @@
31#include <linux/cache.h> 31#include <linux/cache.h>
32#include <linux/io.h> 32#include <linux/io.h>
33#include <linux/pm_runtime.h> 33#include <linux/pm_runtime.h>
34#include <asm/cacheflush.h>
35
34#include "sh_eth.h" 36#include "sh_eth.h"
35 37
36/* There is CPU dependent code */ 38/* There is CPU dependent code */
diff --git a/drivers/net/stmmac/Kconfig b/drivers/net/stmmac/Kconfig
new file mode 100644
index 000000000000..35eaa5251d7f
--- /dev/null
+++ b/drivers/net/stmmac/Kconfig
@@ -0,0 +1,53 @@
1config STMMAC_ETH
2 tristate "STMicroelectronics 10/100/1000 Ethernet driver"
3 select MII
4 select PHYLIB
5 depends on NETDEVICES && CPU_SUBTYPE_ST40
6 help
7 This is the driver for the ST MAC 10/100/1000 on-chip Ethernet
8 controllers. ST Ethernet IPs are built around a Synopsys IP Core.
9
10if STMMAC_ETH
11
12config STMMAC_DA
13 bool "STMMAC DMA arbitration scheme"
14 default n
15 help
16 Selecting this option, rx has priority over Tx (only for Giga
17 Ethernet device).
18 By default, the DMA arbitration scheme is based on Round-robin
19 (rx:tx priority is 1:1).
20
21config STMMAC_DUAL_MAC
22 bool "STMMAC: dual mac support (EXPERIMENTAL)"
23 default n
24 depends on EXPERIMENTAL && STMMAC_ETH && !STMMAC_TIMER
25 help
26 Some ST SoCs (for example the stx7141 and stx7200c2) have two
27 Ethernet Controllers. This option turns on the second Ethernet
28 device on this kind of platforms.
29
30config STMMAC_TIMER
31 bool "STMMAC Timer optimisation"
32 default n
33 help
34 Use an external timer for mitigating the number of network
35 interrupts.
36
37choice
38 prompt "Select Timer device"
39 depends on STMMAC_TIMER
40
41config STMMAC_TMU_TIMER
42 bool "TMU channel 2"
43 depends on CPU_SH4
44 help
45
46config STMMAC_RTC_TIMER
47 bool "Real time clock"
48 depends on RTC_CLASS
49 help
50
51endchoice
52
53endif
diff --git a/drivers/net/stmmac/Makefile b/drivers/net/stmmac/Makefile
new file mode 100644
index 000000000000..b2d7a5564dfa
--- /dev/null
+++ b/drivers/net/stmmac/Makefile
@@ -0,0 +1,4 @@
1obj-$(CONFIG_STMMAC_ETH) += stmmac.o
2stmmac-$(CONFIG_STMMAC_TIMER) += stmmac_timer.o
3stmmac-objs:= stmmac_main.o stmmac_ethtool.o stmmac_mdio.o \
4 mac100.o gmac.o $(stmmac-y)
diff --git a/drivers/net/stmmac/common.h b/drivers/net/stmmac/common.h
new file mode 100644
index 000000000000..e49e5188e887
--- /dev/null
+++ b/drivers/net/stmmac/common.h
@@ -0,0 +1,330 @@
1/*******************************************************************************
2 STMMAC Common Header File
3
4 Copyright (C) 2007-2009 STMicroelectronics Ltd
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
23*******************************************************************************/
24
25#include "descs.h"
26#include <linux/io.h>
27
28/* *********************************************
29 DMA CRS Control and Status Register Mapping
30 * *********************************************/
31#define DMA_BUS_MODE 0x00001000 /* Bus Mode */
32#define DMA_XMT_POLL_DEMAND 0x00001004 /* Transmit Poll Demand */
33#define DMA_RCV_POLL_DEMAND 0x00001008 /* Received Poll Demand */
34#define DMA_RCV_BASE_ADDR 0x0000100c /* Receive List Base */
35#define DMA_TX_BASE_ADDR 0x00001010 /* Transmit List Base */
36#define DMA_STATUS 0x00001014 /* Status Register */
37#define DMA_CONTROL 0x00001018 /* Ctrl (Operational Mode) */
38#define DMA_INTR_ENA 0x0000101c /* Interrupt Enable */
39#define DMA_MISSED_FRAME_CTR 0x00001020 /* Missed Frame Counter */
40#define DMA_CUR_TX_BUF_ADDR 0x00001050 /* Current Host Tx Buffer */
41#define DMA_CUR_RX_BUF_ADDR 0x00001054 /* Current Host Rx Buffer */
42
43/* ********************************
44 DMA Control register defines
45 * ********************************/
46#define DMA_CONTROL_ST 0x00002000 /* Start/Stop Transmission */
47#define DMA_CONTROL_SR 0x00000002 /* Start/Stop Receive */
48
49/* **************************************
50 DMA Interrupt Enable register defines
51 * **************************************/
52/**** NORMAL INTERRUPT ****/
53#define DMA_INTR_ENA_NIE 0x00010000 /* Normal Summary */
54#define DMA_INTR_ENA_TIE 0x00000001 /* Transmit Interrupt */
55#define DMA_INTR_ENA_TUE 0x00000004 /* Transmit Buffer Unavailable */
56#define DMA_INTR_ENA_RIE 0x00000040 /* Receive Interrupt */
57#define DMA_INTR_ENA_ERE 0x00004000 /* Early Receive */
58
59#define DMA_INTR_NORMAL (DMA_INTR_ENA_NIE | DMA_INTR_ENA_RIE | \
60 DMA_INTR_ENA_TIE)
61
62/**** ABNORMAL INTERRUPT ****/
63#define DMA_INTR_ENA_AIE 0x00008000 /* Abnormal Summary */
64#define DMA_INTR_ENA_FBE 0x00002000 /* Fatal Bus Error */
65#define DMA_INTR_ENA_ETE 0x00000400 /* Early Transmit */
66#define DMA_INTR_ENA_RWE 0x00000200 /* Receive Watchdog */
67#define DMA_INTR_ENA_RSE 0x00000100 /* Receive Stopped */
68#define DMA_INTR_ENA_RUE 0x00000080 /* Receive Buffer Unavailable */
69#define DMA_INTR_ENA_UNE 0x00000020 /* Tx Underflow */
70#define DMA_INTR_ENA_OVE 0x00000010 /* Receive Overflow */
71#define DMA_INTR_ENA_TJE 0x00000008 /* Transmit Jabber */
72#define DMA_INTR_ENA_TSE 0x00000002 /* Transmit Stopped */
73
74#define DMA_INTR_ABNORMAL (DMA_INTR_ENA_AIE | DMA_INTR_ENA_FBE | \
75 DMA_INTR_ENA_UNE)
76
77/* DMA default interrupt mask */
78#define DMA_INTR_DEFAULT_MASK (DMA_INTR_NORMAL | DMA_INTR_ABNORMAL)
79
80/* ****************************
81 * DMA Status register defines
82 * ****************************/
83#define DMA_STATUS_GPI 0x10000000 /* PMT interrupt */
84#define DMA_STATUS_GMI 0x08000000 /* MMC interrupt */
85#define DMA_STATUS_GLI 0x04000000 /* GMAC Line interface int. */
86#define DMA_STATUS_GMI 0x08000000
87#define DMA_STATUS_GLI 0x04000000
88#define DMA_STATUS_EB_MASK 0x00380000 /* Error Bits Mask */
89#define DMA_STATUS_EB_TX_ABORT 0x00080000 /* Error Bits - TX Abort */
90#define DMA_STATUS_EB_RX_ABORT 0x00100000 /* Error Bits - RX Abort */
91#define DMA_STATUS_TS_MASK 0x00700000 /* Transmit Process State */
92#define DMA_STATUS_TS_SHIFT 20
93#define DMA_STATUS_RS_MASK 0x000e0000 /* Receive Process State */
94#define DMA_STATUS_RS_SHIFT 17
95#define DMA_STATUS_NIS 0x00010000 /* Normal Interrupt Summary */
96#define DMA_STATUS_AIS 0x00008000 /* Abnormal Interrupt Summary */
97#define DMA_STATUS_ERI 0x00004000 /* Early Receive Interrupt */
98#define DMA_STATUS_FBI 0x00002000 /* Fatal Bus Error Interrupt */
99#define DMA_STATUS_ETI 0x00000400 /* Early Transmit Interrupt */
100#define DMA_STATUS_RWT 0x00000200 /* Receive Watchdog Timeout */
101#define DMA_STATUS_RPS 0x00000100 /* Receive Process Stopped */
102#define DMA_STATUS_RU 0x00000080 /* Receive Buffer Unavailable */
103#define DMA_STATUS_RI 0x00000040 /* Receive Interrupt */
104#define DMA_STATUS_UNF 0x00000020 /* Transmit Underflow */
105#define DMA_STATUS_OVF 0x00000010 /* Receive Overflow */
106#define DMA_STATUS_TJT 0x00000008 /* Transmit Jabber Timeout */
107#define DMA_STATUS_TU 0x00000004 /* Transmit Buffer Unavailable */
108#define DMA_STATUS_TPS 0x00000002 /* Transmit Process Stopped */
109#define DMA_STATUS_TI 0x00000001 /* Transmit Interrupt */
110
111/* Other defines */
112#define HASH_TABLE_SIZE 64
113#define PAUSE_TIME 0x200
114
115/* Flow Control defines */
116#define FLOW_OFF 0
117#define FLOW_RX 1
118#define FLOW_TX 2
119#define FLOW_AUTO (FLOW_TX | FLOW_RX)
120
121/* DMA STORE-AND-FORWARD Operation Mode */
122#define SF_DMA_MODE 1
123
124#define HW_CSUM 1
125#define NO_HW_CSUM 0
126
127/* GMAC TX FIFO is 8K, Rx FIFO is 16K */
128#define BUF_SIZE_16KiB 16384
129#define BUF_SIZE_8KiB 8192
130#define BUF_SIZE_4KiB 4096
131#define BUF_SIZE_2KiB 2048
132
133/* Power Down and WOL */
134#define PMT_NOT_SUPPORTED 0
135#define PMT_SUPPORTED 1
136
137/* Common MAC defines */
138#define MAC_CTRL_REG 0x00000000 /* MAC Control */
139#define MAC_ENABLE_TX 0x00000008 /* Transmitter Enable */
140#define MAC_RNABLE_RX 0x00000004 /* Receiver Enable */
141
142/* MAC Management Counters register */
143#define MMC_CONTROL 0x00000100 /* MMC Control */
144#define MMC_HIGH_INTR 0x00000104 /* MMC High Interrupt */
145#define MMC_LOW_INTR 0x00000108 /* MMC Low Interrupt */
146#define MMC_HIGH_INTR_MASK 0x0000010c /* MMC High Interrupt Mask */
147#define MMC_LOW_INTR_MASK 0x00000110 /* MMC Low Interrupt Mask */
148
149#define MMC_CONTROL_MAX_FRM_MASK 0x0003ff8 /* Maximum Frame Size */
150#define MMC_CONTROL_MAX_FRM_SHIFT 3
151#define MMC_CONTROL_MAX_FRAME 0x7FF
152
153struct stmmac_extra_stats {
154 /* Transmit errors */
155 unsigned long tx_underflow ____cacheline_aligned;
156 unsigned long tx_carrier;
157 unsigned long tx_losscarrier;
158 unsigned long tx_heartbeat;
159 unsigned long tx_deferred;
160 unsigned long tx_vlan;
161 unsigned long tx_jabber;
162 unsigned long tx_frame_flushed;
163 unsigned long tx_payload_error;
164 unsigned long tx_ip_header_error;
165 /* Receive errors */
166 unsigned long rx_desc;
167 unsigned long rx_partial;
168 unsigned long rx_runt;
169 unsigned long rx_toolong;
170 unsigned long rx_collision;
171 unsigned long rx_crc;
172 unsigned long rx_lenght;
173 unsigned long rx_mii;
174 unsigned long rx_multicast;
175 unsigned long rx_gmac_overflow;
176 unsigned long rx_watchdog;
177 unsigned long da_rx_filter_fail;
178 unsigned long sa_rx_filter_fail;
179 unsigned long rx_missed_cntr;
180 unsigned long rx_overflow_cntr;
181 unsigned long rx_vlan;
182 /* Tx/Rx IRQ errors */
183 unsigned long tx_undeflow_irq;
184 unsigned long tx_process_stopped_irq;
185 unsigned long tx_jabber_irq;
186 unsigned long rx_overflow_irq;
187 unsigned long rx_buf_unav_irq;
188 unsigned long rx_process_stopped_irq;
189 unsigned long rx_watchdog_irq;
190 unsigned long tx_early_irq;
191 unsigned long fatal_bus_error_irq;
192 /* Extra info */
193 unsigned long threshold;
194 unsigned long tx_pkt_n;
195 unsigned long rx_pkt_n;
196 unsigned long poll_n;
197 unsigned long sched_timer_n;
198 unsigned long normal_irq_n;
199};
200
201/* GMAC core can compute the checksums in HW. */
202enum rx_frame_status {
203 good_frame = 0,
204 discard_frame = 1,
205 csum_none = 2,
206};
207
208static inline void stmmac_set_mac_addr(unsigned long ioaddr, u8 addr[6],
209 unsigned int high, unsigned int low)
210{
211 unsigned long data;
212
213 data = (addr[5] << 8) | addr[4];
214 writel(data, ioaddr + high);
215 data = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0];
216 writel(data, ioaddr + low);
217
218 return;
219}
220
221static inline void stmmac_get_mac_addr(unsigned long ioaddr,
222 unsigned char *addr, unsigned int high,
223 unsigned int low)
224{
225 unsigned int hi_addr, lo_addr;
226
227 /* Read the MAC address from the hardware */
228 hi_addr = readl(ioaddr + high);
229 lo_addr = readl(ioaddr + low);
230
231 /* Extract the MAC address from the high and low words */
232 addr[0] = lo_addr & 0xff;
233 addr[1] = (lo_addr >> 8) & 0xff;
234 addr[2] = (lo_addr >> 16) & 0xff;
235 addr[3] = (lo_addr >> 24) & 0xff;
236 addr[4] = hi_addr & 0xff;
237 addr[5] = (hi_addr >> 8) & 0xff;
238
239 return;
240}
241
242struct stmmac_ops {
243 /* MAC core initialization */
244 void (*core_init) (unsigned long ioaddr) ____cacheline_aligned;
245 /* DMA core initialization */
246 int (*dma_init) (unsigned long ioaddr, int pbl, u32 dma_tx, u32 dma_rx);
247 /* Dump MAC registers */
248 void (*dump_mac_regs) (unsigned long ioaddr);
249 /* Dump DMA registers */
250 void (*dump_dma_regs) (unsigned long ioaddr);
251 /* Set tx/rx threshold in the csr6 register
252 * An invalid value enables the store-and-forward mode */
253 void (*dma_mode) (unsigned long ioaddr, int txmode, int rxmode);
254 /* To track extra statistic (if supported) */
255 void (*dma_diagnostic_fr) (void *data, struct stmmac_extra_stats *x,
256 unsigned long ioaddr);
257 /* RX descriptor ring initialization */
258 void (*init_rx_desc) (struct dma_desc *p, unsigned int ring_size,
259 int disable_rx_ic);
260 /* TX descriptor ring initialization */
261 void (*init_tx_desc) (struct dma_desc *p, unsigned int ring_size);
262
263 /* Invoked by the xmit function to prepare the tx descriptor */
264 void (*prepare_tx_desc) (struct dma_desc *p, int is_fs, int len,
265 int csum_flag);
266 /* Set/get the owner of the descriptor */
267 void (*set_tx_owner) (struct dma_desc *p);
268 int (*get_tx_owner) (struct dma_desc *p);
269 /* Invoked by the xmit function to close the tx descriptor */
270 void (*close_tx_desc) (struct dma_desc *p);
271 /* Clean the tx descriptor as soon as the tx irq is received */
272 void (*release_tx_desc) (struct dma_desc *p);
273 /* Clear interrupt on tx frame completion. When this bit is
274 * set an interrupt happens as soon as the frame is transmitted */
275 void (*clear_tx_ic) (struct dma_desc *p);
276 /* Last tx segment reports the transmit status */
277 int (*get_tx_ls) (struct dma_desc *p);
278 /* Return the transmit status looking at the TDES1 */
279 int (*tx_status) (void *data, struct stmmac_extra_stats *x,
280 struct dma_desc *p, unsigned long ioaddr);
281 /* Get the buffer size from the descriptor */
282 int (*get_tx_len) (struct dma_desc *p);
283 /* Handle extra events on specific interrupts hw dependent */
284 void (*host_irq_status) (unsigned long ioaddr);
285 int (*get_rx_owner) (struct dma_desc *p);
286 void (*set_rx_owner) (struct dma_desc *p);
287 /* Get the receive frame size */
288 int (*get_rx_frame_len) (struct dma_desc *p);
289 /* Return the reception status looking at the RDES1 */
290 int (*rx_status) (void *data, struct stmmac_extra_stats *x,
291 struct dma_desc *p);
292 /* Multicast filter setting */
293 void (*set_filter) (struct net_device *dev);
294 /* Flow control setting */
295 void (*flow_ctrl) (unsigned long ioaddr, unsigned int duplex,
296 unsigned int fc, unsigned int pause_time);
297 /* Set power management mode (e.g. magic frame) */
298 void (*pmt) (unsigned long ioaddr, unsigned long mode);
299 /* Set/Get Unicast MAC addresses */
300 void (*set_umac_addr) (unsigned long ioaddr, unsigned char *addr,
301 unsigned int reg_n);
302 void (*get_umac_addr) (unsigned long ioaddr, unsigned char *addr,
303 unsigned int reg_n);
304};
305
306struct mac_link {
307 int port;
308 int duplex;
309 int speed;
310};
311
312struct mii_regs {
313 unsigned int addr; /* MII Address */
314 unsigned int data; /* MII Data */
315};
316
317struct hw_cap {
318 unsigned int version; /* Core Version register (GMAC) */
319 unsigned int pmt; /* Power-Down mode (GMAC) */
320 struct mac_link link;
321 struct mii_regs mii;
322};
323
324struct mac_device_info {
325 struct hw_cap hw;
326 struct stmmac_ops *ops;
327};
328
329struct mac_device_info *gmac_setup(unsigned long addr);
330struct mac_device_info *mac100_setup(unsigned long addr);
diff --git a/drivers/net/stmmac/descs.h b/drivers/net/stmmac/descs.h
new file mode 100644
index 000000000000..6d2a0b2f5e57
--- /dev/null
+++ b/drivers/net/stmmac/descs.h
@@ -0,0 +1,163 @@
1/*******************************************************************************
2 Header File to describe the DMA descriptors
3 Use enhanced descriptors in case of GMAC Cores.
4
5 This program is free software; you can redistribute it and/or modify it
6 under the terms and conditions of the GNU General Public License,
7 version 2, as published by the Free Software Foundation.
8
9 This program is distributed in the hope it will be useful, but WITHOUT
10 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 more details.
13
14 You should have received a copy of the GNU General Public License along with
15 this program; if not, write to the Free Software Foundation, Inc.,
16 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
17
18 The full GNU General Public License is included in this distribution in
19 the file called "COPYING".
20
21 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
22*******************************************************************************/
23struct dma_desc {
24 /* Receive descriptor */
25 union {
26 struct {
27 /* RDES0 */
28 u32 reserved1:1;
29 u32 crc_error:1;
30 u32 dribbling:1;
31 u32 mii_error:1;
32 u32 receive_watchdog:1;
33 u32 frame_type:1;
34 u32 collision:1;
35 u32 frame_too_long:1;
36 u32 last_descriptor:1;
37 u32 first_descriptor:1;
38 u32 multicast_frame:1;
39 u32 run_frame:1;
40 u32 length_error:1;
41 u32 partial_frame_error:1;
42 u32 descriptor_error:1;
43 u32 error_summary:1;
44 u32 frame_length:14;
45 u32 filtering_fail:1;
46 u32 own:1;
47 /* RDES1 */
48 u32 buffer1_size:11;
49 u32 buffer2_size:11;
50 u32 reserved2:2;
51 u32 second_address_chained:1;
52 u32 end_ring:1;
53 u32 reserved3:5;
54 u32 disable_ic:1;
55 } rx;
56 struct {
57 /* RDES0 */
58 u32 payload_csum_error:1;
59 u32 crc_error:1;
60 u32 dribbling:1;
61 u32 error_gmii:1;
62 u32 receive_watchdog:1;
63 u32 frame_type:1;
64 u32 late_collision:1;
65 u32 ipc_csum_error:1;
66 u32 last_descriptor:1;
67 u32 first_descriptor:1;
68 u32 vlan_tag:1;
69 u32 overflow_error:1;
70 u32 length_error:1;
71 u32 sa_filter_fail:1;
72 u32 descriptor_error:1;
73 u32 error_summary:1;
74 u32 frame_length:14;
75 u32 da_filter_fail:1;
76 u32 own:1;
77 /* RDES1 */
78 u32 buffer1_size:13;
79 u32 reserved1:1;
80 u32 second_address_chained:1;
81 u32 end_ring:1;
82 u32 buffer2_size:13;
83 u32 reserved2:2;
84 u32 disable_ic:1;
85 } erx; /* -- enhanced -- */
86
87 /* Transmit descriptor */
88 struct {
89 /* TDES0 */
90 u32 deferred:1;
91 u32 underflow_error:1;
92 u32 excessive_deferral:1;
93 u32 collision_count:4;
94 u32 heartbeat_fail:1;
95 u32 excessive_collisions:1;
96 u32 late_collision:1;
97 u32 no_carrier:1;
98 u32 loss_carrier:1;
99 u32 reserved1:3;
100 u32 error_summary:1;
101 u32 reserved2:15;
102 u32 own:1;
103 /* TDES1 */
104 u32 buffer1_size:11;
105 u32 buffer2_size:11;
106 u32 reserved3:1;
107 u32 disable_padding:1;
108 u32 second_address_chained:1;
109 u32 end_ring:1;
110 u32 crc_disable:1;
111 u32 reserved4:2;
112 u32 first_segment:1;
113 u32 last_segment:1;
114 u32 interrupt:1;
115 } tx;
116 struct {
117 /* TDES0 */
118 u32 deferred:1;
119 u32 underflow_error:1;
120 u32 excessive_deferral:1;
121 u32 collision_count:4;
122 u32 vlan_frame:1;
123 u32 excessive_collisions:1;
124 u32 late_collision:1;
125 u32 no_carrier:1;
126 u32 loss_carrier:1;
127 u32 payload_error:1;
128 u32 frame_flushed:1;
129 u32 jabber_timeout:1;
130 u32 error_summary:1;
131 u32 ip_header_error:1;
132 u32 time_stamp_status:1;
133 u32 reserved1:2;
134 u32 second_address_chained:1;
135 u32 end_ring:1;
136 u32 checksum_insertion:2;
137 u32 reserved2:1;
138 u32 time_stamp_enable:1;
139 u32 disable_padding:1;
140 u32 crc_disable:1;
141 u32 first_segment:1;
142 u32 last_segment:1;
143 u32 interrupt:1;
144 u32 own:1;
145 /* TDES1 */
146 u32 buffer1_size:13;
147 u32 reserved3:3;
148 u32 buffer2_size:13;
149 u32 reserved4:3;
150 } etx; /* -- enhanced -- */
151 } des01;
152 unsigned int des2;
153 unsigned int des3;
154};
155
156/* Transmit checksum insertion control */
157enum tdes_csum_insertion {
158 cic_disabled = 0, /* Checksum Insertion Control */
159 cic_only_ip = 1, /* Only IP header */
160 cic_no_pseudoheader = 2, /* IP header but pseudoheader
161 * is not calculated */
162 cic_full = 3, /* IP header and pseudoheader */
163};
diff --git a/drivers/net/stmmac/gmac.c b/drivers/net/stmmac/gmac.c
new file mode 100644
index 000000000000..b624bb5bae0a
--- /dev/null
+++ b/drivers/net/stmmac/gmac.c
@@ -0,0 +1,693 @@
1/*******************************************************************************
2 This is the driver for the GMAC on-chip Ethernet controller for ST SoCs.
3 DWC Ether MAC 10/100/1000 Universal version 3.41a has been used for
4 developing this code.
5
6 Copyright (C) 2007-2009 STMicroelectronics Ltd
7
8 This program is free software; you can redistribute it and/or modify it
9 under the terms and conditions of the GNU General Public License,
10 version 2, as published by the Free Software Foundation.
11
12 This program is distributed in the hope it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 more details.
16
17 You should have received a copy of the GNU General Public License along with
18 this program; if not, write to the Free Software Foundation, Inc.,
19 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
20
21 The full GNU General Public License is included in this distribution in
22 the file called "COPYING".
23
24 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
25*******************************************************************************/
26
27#include <linux/netdevice.h>
28#include <linux/crc32.h>
29#include <linux/mii.h>
30#include <linux/phy.h>
31
32#include "stmmac.h"
33#include "gmac.h"
34
35#undef GMAC_DEBUG
36/*#define GMAC_DEBUG*/
37#undef FRAME_FILTER_DEBUG
38/*#define FRAME_FILTER_DEBUG*/
39#ifdef GMAC_DEBUG
40#define DBG(fmt, args...) printk(fmt, ## args)
41#else
42#define DBG(fmt, args...) do { } while (0)
43#endif
44
45static void gmac_dump_regs(unsigned long ioaddr)
46{
47 int i;
48 pr_info("\t----------------------------------------------\n"
49 "\t GMAC registers (base addr = 0x%8x)\n"
50 "\t----------------------------------------------\n",
51 (unsigned int)ioaddr);
52
53 for (i = 0; i < 55; i++) {
54 int offset = i * 4;
55 pr_info("\tReg No. %d (offset 0x%x): 0x%08x\n", i,
56 offset, readl(ioaddr + offset));
57 }
58 return;
59}
60
61static int gmac_dma_init(unsigned long ioaddr, int pbl, u32 dma_tx, u32 dma_rx)
62{
63 u32 value = readl(ioaddr + DMA_BUS_MODE);
64 /* DMA SW reset */
65 value |= DMA_BUS_MODE_SFT_RESET;
66 writel(value, ioaddr + DMA_BUS_MODE);
67 do {} while ((readl(ioaddr + DMA_BUS_MODE) & DMA_BUS_MODE_SFT_RESET));
68
69 value = /* DMA_BUS_MODE_FB | */ DMA_BUS_MODE_4PBL |
70 ((pbl << DMA_BUS_MODE_PBL_SHIFT) |
71 (pbl << DMA_BUS_MODE_RPBL_SHIFT));
72
73#ifdef CONFIG_STMMAC_DA
74 value |= DMA_BUS_MODE_DA; /* Rx has priority over tx */
75#endif
76 writel(value, ioaddr + DMA_BUS_MODE);
77
78 /* Mask interrupts by writing to CSR7 */
79 writel(DMA_INTR_DEFAULT_MASK, ioaddr + DMA_INTR_ENA);
80
81 /* The base address of the RX/TX descriptor lists must be written into
82 * DMA CSR3 and CSR4, respectively. */
83 writel(dma_tx, ioaddr + DMA_TX_BASE_ADDR);
84 writel(dma_rx, ioaddr + DMA_RCV_BASE_ADDR);
85
86 return 0;
87}
88
89/* Transmit FIFO flush operation */
90static void gmac_flush_tx_fifo(unsigned long ioaddr)
91{
92 u32 csr6 = readl(ioaddr + DMA_CONTROL);
93 writel((csr6 | DMA_CONTROL_FTF), ioaddr + DMA_CONTROL);
94
95 do {} while ((readl(ioaddr + DMA_CONTROL) & DMA_CONTROL_FTF));
96}
97
98static void gmac_dma_operation_mode(unsigned long ioaddr, int txmode,
99 int rxmode)
100{
101 u32 csr6 = readl(ioaddr + DMA_CONTROL);
102
103 if (txmode == SF_DMA_MODE) {
104 DBG(KERN_DEBUG "GMAC: enabling TX store and forward mode\n");
105 /* Transmit COE type 2 cannot be done in cut-through mode. */
106 csr6 |= DMA_CONTROL_TSF;
107 /* Operating on second frame increase the performance
108 * especially when transmit store-and-forward is used.*/
109 csr6 |= DMA_CONTROL_OSF;
110 } else {
111 DBG(KERN_DEBUG "GMAC: disabling TX store and forward mode"
112 " (threshold = %d)\n", txmode);
113 csr6 &= ~DMA_CONTROL_TSF;
114 csr6 &= DMA_CONTROL_TC_TX_MASK;
115 /* Set the transmit threashold */
116 if (txmode <= 32)
117 csr6 |= DMA_CONTROL_TTC_32;
118 else if (txmode <= 64)
119 csr6 |= DMA_CONTROL_TTC_64;
120 else if (txmode <= 128)
121 csr6 |= DMA_CONTROL_TTC_128;
122 else if (txmode <= 192)
123 csr6 |= DMA_CONTROL_TTC_192;
124 else
125 csr6 |= DMA_CONTROL_TTC_256;
126 }
127
128 if (rxmode == SF_DMA_MODE) {
129 DBG(KERN_DEBUG "GMAC: enabling RX store and forward mode\n");
130 csr6 |= DMA_CONTROL_RSF;
131 } else {
132 DBG(KERN_DEBUG "GMAC: disabling RX store and forward mode"
133 " (threshold = %d)\n", rxmode);
134 csr6 &= ~DMA_CONTROL_RSF;
135 csr6 &= DMA_CONTROL_TC_RX_MASK;
136 if (rxmode <= 32)
137 csr6 |= DMA_CONTROL_RTC_32;
138 else if (rxmode <= 64)
139 csr6 |= DMA_CONTROL_RTC_64;
140 else if (rxmode <= 96)
141 csr6 |= DMA_CONTROL_RTC_96;
142 else
143 csr6 |= DMA_CONTROL_RTC_128;
144 }
145
146 writel(csr6, ioaddr + DMA_CONTROL);
147 return;
148}
149
150/* Not yet implemented --- no RMON module */
151static void gmac_dma_diagnostic_fr(void *data, struct stmmac_extra_stats *x,
152 unsigned long ioaddr)
153{
154 return;
155}
156
157static void gmac_dump_dma_regs(unsigned long ioaddr)
158{
159 int i;
160 pr_info(" DMA registers\n");
161 for (i = 0; i < 22; i++) {
162 if ((i < 9) || (i > 17)) {
163 int offset = i * 4;
164 pr_err("\t Reg No. %d (offset 0x%x): 0x%08x\n", i,
165 (DMA_BUS_MODE + offset),
166 readl(ioaddr + DMA_BUS_MODE + offset));
167 }
168 }
169 return;
170}
171
172static int gmac_get_tx_frame_status(void *data, struct stmmac_extra_stats *x,
173 struct dma_desc *p, unsigned long ioaddr)
174{
175 int ret = 0;
176 struct net_device_stats *stats = (struct net_device_stats *)data;
177
178 if (unlikely(p->des01.etx.error_summary)) {
179 DBG(KERN_ERR "GMAC TX error... 0x%08x\n", p->des01.etx);
180 if (unlikely(p->des01.etx.jabber_timeout)) {
181 DBG(KERN_ERR "\tjabber_timeout error\n");
182 x->tx_jabber++;
183 }
184
185 if (unlikely(p->des01.etx.frame_flushed)) {
186 DBG(KERN_ERR "\tframe_flushed error\n");
187 x->tx_frame_flushed++;
188 gmac_flush_tx_fifo(ioaddr);
189 }
190
191 if (unlikely(p->des01.etx.loss_carrier)) {
192 DBG(KERN_ERR "\tloss_carrier error\n");
193 x->tx_losscarrier++;
194 stats->tx_carrier_errors++;
195 }
196 if (unlikely(p->des01.etx.no_carrier)) {
197 DBG(KERN_ERR "\tno_carrier error\n");
198 x->tx_carrier++;
199 stats->tx_carrier_errors++;
200 }
201 if (unlikely(p->des01.etx.late_collision)) {
202 DBG(KERN_ERR "\tlate_collision error\n");
203 stats->collisions += p->des01.etx.collision_count;
204 }
205 if (unlikely(p->des01.etx.excessive_collisions)) {
206 DBG(KERN_ERR "\texcessive_collisions\n");
207 stats->collisions += p->des01.etx.collision_count;
208 }
209 if (unlikely(p->des01.etx.excessive_deferral)) {
210 DBG(KERN_INFO "\texcessive tx_deferral\n");
211 x->tx_deferred++;
212 }
213
214 if (unlikely(p->des01.etx.underflow_error)) {
215 DBG(KERN_ERR "\tunderflow error\n");
216 gmac_flush_tx_fifo(ioaddr);
217 x->tx_underflow++;
218 }
219
220 if (unlikely(p->des01.etx.ip_header_error)) {
221 DBG(KERN_ERR "\tTX IP header csum error\n");
222 x->tx_ip_header_error++;
223 }
224
225 if (unlikely(p->des01.etx.payload_error)) {
226 DBG(KERN_ERR "\tAddr/Payload csum error\n");
227 x->tx_payload_error++;
228 gmac_flush_tx_fifo(ioaddr);
229 }
230
231 ret = -1;
232 }
233
234 if (unlikely(p->des01.etx.deferred)) {
235 DBG(KERN_INFO "GMAC TX status: tx deferred\n");
236 x->tx_deferred++;
237 }
238#ifdef STMMAC_VLAN_TAG_USED
239 if (p->des01.etx.vlan_frame) {
240 DBG(KERN_INFO "GMAC TX status: VLAN frame\n");
241 x->tx_vlan++;
242 }
243#endif
244
245 return ret;
246}
247
248static int gmac_get_tx_len(struct dma_desc *p)
249{
250 return p->des01.etx.buffer1_size;
251}
252
253static int gmac_coe_rdes0(int ipc_err, int type, int payload_err)
254{
255 int ret = good_frame;
256 u32 status = (type << 2 | ipc_err << 1 | payload_err) & 0x7;
257
258 /* bits 5 7 0 | Frame status
259 * ----------------------------------------------------------
260 * 0 0 0 | IEEE 802.3 Type frame (lenght < 1536 octects)
261 * 1 0 0 | IPv4/6 No CSUM errorS.
262 * 1 0 1 | IPv4/6 CSUM PAYLOAD error
263 * 1 1 0 | IPv4/6 CSUM IP HR error
264 * 1 1 1 | IPv4/6 IP PAYLOAD AND HEADER errorS
265 * 0 0 1 | IPv4/6 unsupported IP PAYLOAD
266 * 0 1 1 | COE bypassed.. no IPv4/6 frame
267 * 0 1 0 | Reserved.
268 */
269 if (status == 0x0) {
270 DBG(KERN_INFO "RX Des0 status: IEEE 802.3 Type frame.\n");
271 ret = good_frame;
272 } else if (status == 0x4) {
273 DBG(KERN_INFO "RX Des0 status: IPv4/6 No CSUM errorS.\n");
274 ret = good_frame;
275 } else if (status == 0x5) {
276 DBG(KERN_ERR "RX Des0 status: IPv4/6 Payload Error.\n");
277 ret = csum_none;
278 } else if (status == 0x6) {
279 DBG(KERN_ERR "RX Des0 status: IPv4/6 Header Error.\n");
280 ret = csum_none;
281 } else if (status == 0x7) {
282 DBG(KERN_ERR
283 "RX Des0 status: IPv4/6 Header and Payload Error.\n");
284 ret = csum_none;
285 } else if (status == 0x1) {
286 DBG(KERN_ERR
287 "RX Des0 status: IPv4/6 unsupported IP PAYLOAD.\n");
288 ret = discard_frame;
289 } else if (status == 0x3) {
290 DBG(KERN_ERR "RX Des0 status: No IPv4, IPv6 frame.\n");
291 ret = discard_frame;
292 }
293 return ret;
294}
295
296static int gmac_get_rx_frame_status(void *data, struct stmmac_extra_stats *x,
297 struct dma_desc *p)
298{
299 int ret = good_frame;
300 struct net_device_stats *stats = (struct net_device_stats *)data;
301
302 if (unlikely(p->des01.erx.error_summary)) {
303 DBG(KERN_ERR "GMAC RX Error Summary... 0x%08x\n", p->des01.erx);
304 if (unlikely(p->des01.erx.descriptor_error)) {
305 DBG(KERN_ERR "\tdescriptor error\n");
306 x->rx_desc++;
307 stats->rx_length_errors++;
308 }
309 if (unlikely(p->des01.erx.overflow_error)) {
310 DBG(KERN_ERR "\toverflow error\n");
311 x->rx_gmac_overflow++;
312 }
313
314 if (unlikely(p->des01.erx.ipc_csum_error))
315 DBG(KERN_ERR "\tIPC Csum Error/Giant frame\n");
316
317 if (unlikely(p->des01.erx.late_collision)) {
318 DBG(KERN_ERR "\tlate_collision error\n");
319 stats->collisions++;
320 stats->collisions++;
321 }
322 if (unlikely(p->des01.erx.receive_watchdog)) {
323 DBG(KERN_ERR "\treceive_watchdog error\n");
324 x->rx_watchdog++;
325 }
326 if (unlikely(p->des01.erx.error_gmii)) {
327 DBG(KERN_ERR "\tReceive Error\n");
328 x->rx_mii++;
329 }
330 if (unlikely(p->des01.erx.crc_error)) {
331 DBG(KERN_ERR "\tCRC error\n");
332 x->rx_crc++;
333 stats->rx_crc_errors++;
334 }
335 ret = discard_frame;
336 }
337
338 /* After a payload csum error, the ES bit is set.
339 * It doesn't match with the information reported into the databook.
340 * At any rate, we need to understand if the CSUM hw computation is ok
341 * and report this info to the upper layers. */
342 ret = gmac_coe_rdes0(p->des01.erx.ipc_csum_error,
343 p->des01.erx.frame_type, p->des01.erx.payload_csum_error);
344
345 if (unlikely(p->des01.erx.dribbling)) {
346 DBG(KERN_ERR "GMAC RX: dribbling error\n");
347 ret = discard_frame;
348 }
349 if (unlikely(p->des01.erx.sa_filter_fail)) {
350 DBG(KERN_ERR "GMAC RX : Source Address filter fail\n");
351 x->sa_rx_filter_fail++;
352 ret = discard_frame;
353 }
354 if (unlikely(p->des01.erx.da_filter_fail)) {
355 DBG(KERN_ERR "GMAC RX : Destination Address filter fail\n");
356 x->da_rx_filter_fail++;
357 ret = discard_frame;
358 }
359 if (unlikely(p->des01.erx.length_error)) {
360 DBG(KERN_ERR "GMAC RX: length_error error\n");
361 x->rx_lenght++;
362 ret = discard_frame;
363 }
364#ifdef STMMAC_VLAN_TAG_USED
365 if (p->des01.erx.vlan_tag) {
366 DBG(KERN_INFO "GMAC RX: VLAN frame tagged\n");
367 x->rx_vlan++;
368 }
369#endif
370 return ret;
371}
372
373static void gmac_irq_status(unsigned long ioaddr)
374{
375 u32 intr_status = readl(ioaddr + GMAC_INT_STATUS);
376
377 /* Not used events (e.g. MMC interrupts) are not handled. */
378 if ((intr_status & mmc_tx_irq))
379 DBG(KERN_DEBUG "GMAC: MMC tx interrupt: 0x%08x\n",
380 readl(ioaddr + GMAC_MMC_TX_INTR));
381 if (unlikely(intr_status & mmc_rx_irq))
382 DBG(KERN_DEBUG "GMAC: MMC rx interrupt: 0x%08x\n",
383 readl(ioaddr + GMAC_MMC_RX_INTR));
384 if (unlikely(intr_status & mmc_rx_csum_offload_irq))
385 DBG(KERN_DEBUG "GMAC: MMC rx csum offload: 0x%08x\n",
386 readl(ioaddr + GMAC_MMC_RX_CSUM_OFFLOAD));
387 if (unlikely(intr_status & pmt_irq)) {
388 DBG(KERN_DEBUG "GMAC: received Magic frame\n");
389 /* clear the PMT bits 5 and 6 by reading the PMT
390 * status register. */
391 readl(ioaddr + GMAC_PMT);
392 }
393
394 return;
395}
396
397static void gmac_core_init(unsigned long ioaddr)
398{
399 u32 value = readl(ioaddr + GMAC_CONTROL);
400 value |= GMAC_CORE_INIT;
401 writel(value, ioaddr + GMAC_CONTROL);
402
403 /* STBus Bridge Configuration */
404 /*writel(0xc5608, ioaddr + 0x00007000);*/
405
406 /* Freeze MMC counters */
407 writel(0x8, ioaddr + GMAC_MMC_CTRL);
408 /* Mask GMAC interrupts */
409 writel(0x207, ioaddr + GMAC_INT_MASK);
410
411#ifdef STMMAC_VLAN_TAG_USED
412 /* Tag detection without filtering */
413 writel(0x0, ioaddr + GMAC_VLAN_TAG);
414#endif
415 return;
416}
417
418static void gmac_set_umac_addr(unsigned long ioaddr, unsigned char *addr,
419 unsigned int reg_n)
420{
421 stmmac_set_mac_addr(ioaddr, addr, GMAC_ADDR_HIGH(reg_n),
422 GMAC_ADDR_LOW(reg_n));
423}
424
425static void gmac_get_umac_addr(unsigned long ioaddr, unsigned char *addr,
426 unsigned int reg_n)
427{
428 stmmac_get_mac_addr(ioaddr, addr, GMAC_ADDR_HIGH(reg_n),
429 GMAC_ADDR_LOW(reg_n));
430}
431
432static void gmac_set_filter(struct net_device *dev)
433{
434 unsigned long ioaddr = dev->base_addr;
435 unsigned int value = 0;
436
437 DBG(KERN_INFO "%s: # mcasts %d, # unicast %d\n",
438 __func__, dev->mc_count, dev->uc_count);
439
440 if (dev->flags & IFF_PROMISC)
441 value = GMAC_FRAME_FILTER_PR;
442 else if ((dev->mc_count > HASH_TABLE_SIZE)
443 || (dev->flags & IFF_ALLMULTI)) {
444 value = GMAC_FRAME_FILTER_PM; /* pass all multi */
445 writel(0xffffffff, ioaddr + GMAC_HASH_HIGH);
446 writel(0xffffffff, ioaddr + GMAC_HASH_LOW);
447 } else if (dev->mc_count > 0) {
448 int i;
449 u32 mc_filter[2];
450 struct dev_mc_list *mclist;
451
452 /* Hash filter for multicast */
453 value = GMAC_FRAME_FILTER_HMC;
454
455 memset(mc_filter, 0, sizeof(mc_filter));
456 for (i = 0, mclist = dev->mc_list;
457 mclist && i < dev->mc_count; i++, mclist = mclist->next) {
458 /* The upper 6 bits of the calculated CRC are used to
459 index the contens of the hash table */
460 int bit_nr =
461 bitrev32(~crc32_le(~0, mclist->dmi_addr, 6)) >> 26;
462 /* The most significant bit determines the register to
463 * use (H/L) while the other 5 bits determine the bit
464 * within the register. */
465 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
466 }
467 writel(mc_filter[0], ioaddr + GMAC_HASH_LOW);
468 writel(mc_filter[1], ioaddr + GMAC_HASH_HIGH);
469 }
470
471 /* Handle multiple unicast addresses (perfect filtering)*/
472 if (dev->uc_count > GMAC_MAX_UNICAST_ADDRESSES)
473 /* Switch to promiscuous mode is more than 16 addrs
474 are required */
475 value |= GMAC_FRAME_FILTER_PR;
476 else {
477 int i;
478 struct dev_addr_list *uc_ptr = dev->uc_list;
479
480 for (i = 0; i < dev->uc_count; i++) {
481 gmac_set_umac_addr(ioaddr, uc_ptr->da_addr,
482 i + 1);
483
484 DBG(KERN_INFO "\t%d "
485 "- Unicast addr %02x:%02x:%02x:%02x:%02x:"
486 "%02x\n", i + 1,
487 uc_ptr->da_addr[0], uc_ptr->da_addr[1],
488 uc_ptr->da_addr[2], uc_ptr->da_addr[3],
489 uc_ptr->da_addr[4], uc_ptr->da_addr[5]);
490 uc_ptr = uc_ptr->next;
491 }
492 }
493
494#ifdef FRAME_FILTER_DEBUG
495 /* Enable Receive all mode (to debug filtering_fail errors) */
496 value |= GMAC_FRAME_FILTER_RA;
497#endif
498 writel(value, ioaddr + GMAC_FRAME_FILTER);
499
500 DBG(KERN_INFO "\tFrame Filter reg: 0x%08x\n\tHash regs: "
501 "HI 0x%08x, LO 0x%08x\n", readl(ioaddr + GMAC_FRAME_FILTER),
502 readl(ioaddr + GMAC_HASH_HIGH), readl(ioaddr + GMAC_HASH_LOW));
503
504 return;
505}
506
507static void gmac_flow_ctrl(unsigned long ioaddr, unsigned int duplex,
508 unsigned int fc, unsigned int pause_time)
509{
510 unsigned int flow = 0;
511
512 DBG(KERN_DEBUG "GMAC Flow-Control:\n");
513 if (fc & FLOW_RX) {
514 DBG(KERN_DEBUG "\tReceive Flow-Control ON\n");
515 flow |= GMAC_FLOW_CTRL_RFE;
516 }
517 if (fc & FLOW_TX) {
518 DBG(KERN_DEBUG "\tTransmit Flow-Control ON\n");
519 flow |= GMAC_FLOW_CTRL_TFE;
520 }
521
522 if (duplex) {
523 DBG(KERN_DEBUG "\tduplex mode: pause time: %d\n", pause_time);
524 flow |= (pause_time << GMAC_FLOW_CTRL_PT_SHIFT);
525 }
526
527 writel(flow, ioaddr + GMAC_FLOW_CTRL);
528 return;
529}
530
531static void gmac_pmt(unsigned long ioaddr, unsigned long mode)
532{
533 unsigned int pmt = 0;
534
535 if (mode == WAKE_MAGIC) {
536 DBG(KERN_DEBUG "GMAC: WOL Magic frame\n");
537 pmt |= power_down | magic_pkt_en;
538 } else if (mode == WAKE_UCAST) {
539 DBG(KERN_DEBUG "GMAC: WOL on global unicast\n");
540 pmt |= global_unicast;
541 }
542
543 writel(pmt, ioaddr + GMAC_PMT);
544 return;
545}
546
547static void gmac_init_rx_desc(struct dma_desc *p, unsigned int ring_size,
548 int disable_rx_ic)
549{
550 int i;
551 for (i = 0; i < ring_size; i++) {
552 p->des01.erx.own = 1;
553 p->des01.erx.buffer1_size = BUF_SIZE_8KiB - 1;
554 /* To support jumbo frames */
555 p->des01.erx.buffer2_size = BUF_SIZE_8KiB - 1;
556 if (i == ring_size - 1)
557 p->des01.erx.end_ring = 1;
558 if (disable_rx_ic)
559 p->des01.erx.disable_ic = 1;
560 p++;
561 }
562 return;
563}
564
565static void gmac_init_tx_desc(struct dma_desc *p, unsigned int ring_size)
566{
567 int i;
568
569 for (i = 0; i < ring_size; i++) {
570 p->des01.etx.own = 0;
571 if (i == ring_size - 1)
572 p->des01.etx.end_ring = 1;
573 p++;
574 }
575
576 return;
577}
578
579static int gmac_get_tx_owner(struct dma_desc *p)
580{
581 return p->des01.etx.own;
582}
583
584static int gmac_get_rx_owner(struct dma_desc *p)
585{
586 return p->des01.erx.own;
587}
588
589static void gmac_set_tx_owner(struct dma_desc *p)
590{
591 p->des01.etx.own = 1;
592}
593
594static void gmac_set_rx_owner(struct dma_desc *p)
595{
596 p->des01.erx.own = 1;
597}
598
599static int gmac_get_tx_ls(struct dma_desc *p)
600{
601 return p->des01.etx.last_segment;
602}
603
604static void gmac_release_tx_desc(struct dma_desc *p)
605{
606 int ter = p->des01.etx.end_ring;
607
608 memset(p, 0, sizeof(struct dma_desc));
609 p->des01.etx.end_ring = ter;
610
611 return;
612}
613
614static void gmac_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
615 int csum_flag)
616{
617 p->des01.etx.first_segment = is_fs;
618 if (unlikely(len > BUF_SIZE_4KiB)) {
619 p->des01.etx.buffer1_size = BUF_SIZE_4KiB;
620 p->des01.etx.buffer2_size = len - BUF_SIZE_4KiB;
621 } else {
622 p->des01.etx.buffer1_size = len;
623 }
624 if (likely(csum_flag))
625 p->des01.etx.checksum_insertion = cic_full;
626}
627
628static void gmac_clear_tx_ic(struct dma_desc *p)
629{
630 p->des01.etx.interrupt = 0;
631}
632
633static void gmac_close_tx_desc(struct dma_desc *p)
634{
635 p->des01.etx.last_segment = 1;
636 p->des01.etx.interrupt = 1;
637}
638
639static int gmac_get_rx_frame_len(struct dma_desc *p)
640{
641 return p->des01.erx.frame_length;
642}
643
644struct stmmac_ops gmac_driver = {
645 .core_init = gmac_core_init,
646 .dump_mac_regs = gmac_dump_regs,
647 .dma_init = gmac_dma_init,
648 .dump_dma_regs = gmac_dump_dma_regs,
649 .dma_mode = gmac_dma_operation_mode,
650 .dma_diagnostic_fr = gmac_dma_diagnostic_fr,
651 .tx_status = gmac_get_tx_frame_status,
652 .rx_status = gmac_get_rx_frame_status,
653 .get_tx_len = gmac_get_tx_len,
654 .set_filter = gmac_set_filter,
655 .flow_ctrl = gmac_flow_ctrl,
656 .pmt = gmac_pmt,
657 .init_rx_desc = gmac_init_rx_desc,
658 .init_tx_desc = gmac_init_tx_desc,
659 .get_tx_owner = gmac_get_tx_owner,
660 .get_rx_owner = gmac_get_rx_owner,
661 .release_tx_desc = gmac_release_tx_desc,
662 .prepare_tx_desc = gmac_prepare_tx_desc,
663 .clear_tx_ic = gmac_clear_tx_ic,
664 .close_tx_desc = gmac_close_tx_desc,
665 .get_tx_ls = gmac_get_tx_ls,
666 .set_tx_owner = gmac_set_tx_owner,
667 .set_rx_owner = gmac_set_rx_owner,
668 .get_rx_frame_len = gmac_get_rx_frame_len,
669 .host_irq_status = gmac_irq_status,
670 .set_umac_addr = gmac_set_umac_addr,
671 .get_umac_addr = gmac_get_umac_addr,
672};
673
674struct mac_device_info *gmac_setup(unsigned long ioaddr)
675{
676 struct mac_device_info *mac;
677 u32 uid = readl(ioaddr + GMAC_VERSION);
678
679 pr_info("\tGMAC - user ID: 0x%x, Synopsys ID: 0x%x\n",
680 ((uid & 0x0000ff00) >> 8), (uid & 0x000000ff));
681
682 mac = kzalloc(sizeof(const struct mac_device_info), GFP_KERNEL);
683
684 mac->ops = &gmac_driver;
685 mac->hw.pmt = PMT_SUPPORTED;
686 mac->hw.link.port = GMAC_CONTROL_PS;
687 mac->hw.link.duplex = GMAC_CONTROL_DM;
688 mac->hw.link.speed = GMAC_CONTROL_FES;
689 mac->hw.mii.addr = GMAC_MII_ADDR;
690 mac->hw.mii.data = GMAC_MII_DATA;
691
692 return mac;
693}
diff --git a/drivers/net/stmmac/gmac.h b/drivers/net/stmmac/gmac.h
new file mode 100644
index 000000000000..684a363120a9
--- /dev/null
+++ b/drivers/net/stmmac/gmac.h
@@ -0,0 +1,204 @@
1/*******************************************************************************
2 Copyright (C) 2007-2009 STMicroelectronics Ltd
3
4 This program is free software; you can redistribute it and/or modify it
5 under the terms and conditions of the GNU General Public License,
6 version 2, as published by the Free Software Foundation.
7
8 This program is distributed in the hope it will be useful, but WITHOUT
9 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 more details.
12
13 You should have received a copy of the GNU General Public License along with
14 this program; if not, write to the Free Software Foundation, Inc.,
15 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
16
17 The full GNU General Public License is included in this distribution in
18 the file called "COPYING".
19
20 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
21*******************************************************************************/
22
23#define GMAC_CONTROL 0x00000000 /* Configuration */
24#define GMAC_FRAME_FILTER 0x00000004 /* Frame Filter */
25#define GMAC_HASH_HIGH 0x00000008 /* Multicast Hash Table High */
26#define GMAC_HASH_LOW 0x0000000c /* Multicast Hash Table Low */
27#define GMAC_MII_ADDR 0x00000010 /* MII Address */
28#define GMAC_MII_DATA 0x00000014 /* MII Data */
29#define GMAC_FLOW_CTRL 0x00000018 /* Flow Control */
30#define GMAC_VLAN_TAG 0x0000001c /* VLAN Tag */
31#define GMAC_VERSION 0x00000020 /* GMAC CORE Version */
32#define GMAC_WAKEUP_FILTER 0x00000028 /* Wake-up Frame Filter */
33
34#define GMAC_INT_STATUS 0x00000038 /* interrupt status register */
35enum gmac_irq_status {
36 time_stamp_irq = 0x0200,
37 mmc_rx_csum_offload_irq = 0x0080,
38 mmc_tx_irq = 0x0040,
39 mmc_rx_irq = 0x0020,
40 mmc_irq = 0x0010,
41 pmt_irq = 0x0008,
42 pcs_ane_irq = 0x0004,
43 pcs_link_irq = 0x0002,
44 rgmii_irq = 0x0001,
45};
46#define GMAC_INT_MASK 0x0000003c /* interrupt mask register */
47
48/* PMT Control and Status */
49#define GMAC_PMT 0x0000002c
50enum power_event {
51 pointer_reset = 0x80000000,
52 global_unicast = 0x00000200,
53 wake_up_rx_frame = 0x00000040,
54 magic_frame = 0x00000020,
55 wake_up_frame_en = 0x00000004,
56 magic_pkt_en = 0x00000002,
57 power_down = 0x00000001,
58};
59
60/* GMAC HW ADDR regs */
61#define GMAC_ADDR_HIGH(reg) (0x00000040+(reg * 8))
62#define GMAC_ADDR_LOW(reg) (0x00000044+(reg * 8))
63#define GMAC_MAX_UNICAST_ADDRESSES 16
64
65#define GMAC_AN_CTRL 0x000000c0 /* AN control */
66#define GMAC_AN_STATUS 0x000000c4 /* AN status */
67#define GMAC_ANE_ADV 0x000000c8 /* Auto-Neg. Advertisement */
68#define GMAC_ANE_LINK 0x000000cc /* Auto-Neg. link partener ability */
69#define GMAC_ANE_EXP 0x000000d0 /* ANE expansion */
70#define GMAC_TBI 0x000000d4 /* TBI extend status */
71#define GMAC_GMII_STATUS 0x000000d8 /* S/R-GMII status */
72
73/* GMAC Configuration defines */
74#define GMAC_CONTROL_TC 0x01000000 /* Transmit Conf. in RGMII/SGMII */
75#define GMAC_CONTROL_WD 0x00800000 /* Disable Watchdog on receive */
76#define GMAC_CONTROL_JD 0x00400000 /* Jabber disable */
77#define GMAC_CONTROL_BE 0x00200000 /* Frame Burst Enable */
78#define GMAC_CONTROL_JE 0x00100000 /* Jumbo frame */
79enum inter_frame_gap {
80 GMAC_CONTROL_IFG_88 = 0x00040000,
81 GMAC_CONTROL_IFG_80 = 0x00020000,
82 GMAC_CONTROL_IFG_40 = 0x000e0000,
83};
84#define GMAC_CONTROL_DCRS 0x00010000 /* Disable carrier sense during tx */
85#define GMAC_CONTROL_PS 0x00008000 /* Port Select 0:GMI 1:MII */
86#define GMAC_CONTROL_FES 0x00004000 /* Speed 0:10 1:100 */
87#define GMAC_CONTROL_DO 0x00002000 /* Disable Rx Own */
88#define GMAC_CONTROL_LM 0x00001000 /* Loop-back mode */
89#define GMAC_CONTROL_DM 0x00000800 /* Duplex Mode */
90#define GMAC_CONTROL_IPC 0x00000400 /* Checksum Offload */
91#define GMAC_CONTROL_DR 0x00000200 /* Disable Retry */
92#define GMAC_CONTROL_LUD 0x00000100 /* Link up/down */
93#define GMAC_CONTROL_ACS 0x00000080 /* Automatic Pad Stripping */
94#define GMAC_CONTROL_DC 0x00000010 /* Deferral Check */
95#define GMAC_CONTROL_TE 0x00000008 /* Transmitter Enable */
96#define GMAC_CONTROL_RE 0x00000004 /* Receiver Enable */
97
98#define GMAC_CORE_INIT (GMAC_CONTROL_JD | GMAC_CONTROL_PS | GMAC_CONTROL_ACS | \
99 GMAC_CONTROL_IPC | GMAC_CONTROL_JE | GMAC_CONTROL_BE)
100
101/* GMAC Frame Filter defines */
102#define GMAC_FRAME_FILTER_PR 0x00000001 /* Promiscuous Mode */
103#define GMAC_FRAME_FILTER_HUC 0x00000002 /* Hash Unicast */
104#define GMAC_FRAME_FILTER_HMC 0x00000004 /* Hash Multicast */
105#define GMAC_FRAME_FILTER_DAIF 0x00000008 /* DA Inverse Filtering */
106#define GMAC_FRAME_FILTER_PM 0x00000010 /* Pass all multicast */
107#define GMAC_FRAME_FILTER_DBF 0x00000020 /* Disable Broadcast frames */
108#define GMAC_FRAME_FILTER_SAIF 0x00000100 /* Inverse Filtering */
109#define GMAC_FRAME_FILTER_SAF 0x00000200 /* Source Address Filter */
110#define GMAC_FRAME_FILTER_HPF 0x00000400 /* Hash or perfect Filter */
111#define GMAC_FRAME_FILTER_RA 0x80000000 /* Receive all mode */
112/* GMII ADDR defines */
113#define GMAC_MII_ADDR_WRITE 0x00000002 /* MII Write */
114#define GMAC_MII_ADDR_BUSY 0x00000001 /* MII Busy */
115/* GMAC FLOW CTRL defines */
116#define GMAC_FLOW_CTRL_PT_MASK 0xffff0000 /* Pause Time Mask */
117#define GMAC_FLOW_CTRL_PT_SHIFT 16
118#define GMAC_FLOW_CTRL_RFE 0x00000004 /* Rx Flow Control Enable */
119#define GMAC_FLOW_CTRL_TFE 0x00000002 /* Tx Flow Control Enable */
120#define GMAC_FLOW_CTRL_FCB_BPA 0x00000001 /* Flow Control Busy ... */
121
122/*--- DMA BLOCK defines ---*/
123/* DMA Bus Mode register defines */
124#define DMA_BUS_MODE_SFT_RESET 0x00000001 /* Software Reset */
125#define DMA_BUS_MODE_DA 0x00000002 /* Arbitration scheme */
126#define DMA_BUS_MODE_DSL_MASK 0x0000007c /* Descriptor Skip Length */
127#define DMA_BUS_MODE_DSL_SHIFT 2 /* (in DWORDS) */
128/* Programmable burst length (passed thorugh platform)*/
129#define DMA_BUS_MODE_PBL_MASK 0x00003f00 /* Programmable Burst Len */
130#define DMA_BUS_MODE_PBL_SHIFT 8
131
132enum rx_tx_priority_ratio {
133 double_ratio = 0x00004000, /*2:1 */
134 triple_ratio = 0x00008000, /*3:1 */
135 quadruple_ratio = 0x0000c000, /*4:1 */
136};
137
138#define DMA_BUS_MODE_FB 0x00010000 /* Fixed burst */
139#define DMA_BUS_MODE_RPBL_MASK 0x003e0000 /* Rx-Programmable Burst Len */
140#define DMA_BUS_MODE_RPBL_SHIFT 17
141#define DMA_BUS_MODE_USP 0x00800000
142#define DMA_BUS_MODE_4PBL 0x01000000
143#define DMA_BUS_MODE_AAL 0x02000000
144
145/* DMA CRS Control and Status Register Mapping */
146#define DMA_HOST_TX_DESC 0x00001048 /* Current Host Tx descriptor */
147#define DMA_HOST_RX_DESC 0x0000104c /* Current Host Rx descriptor */
148/* DMA Bus Mode register defines */
149#define DMA_BUS_PR_RATIO_MASK 0x0000c000 /* Rx/Tx priority ratio */
150#define DMA_BUS_PR_RATIO_SHIFT 14
151#define DMA_BUS_FB 0x00010000 /* Fixed Burst */
152
153/* DMA operation mode defines (start/stop tx/rx are placed in common header)*/
154#define DMA_CONTROL_DT 0x04000000 /* Disable Drop TCP/IP csum error */
155#define DMA_CONTROL_RSF 0x02000000 /* Receive Store and Forward */
156#define DMA_CONTROL_DFF 0x01000000 /* Disaable flushing */
157/* Theshold for Activating the FC */
158enum rfa {
159 act_full_minus_1 = 0x00800000,
160 act_full_minus_2 = 0x00800200,
161 act_full_minus_3 = 0x00800400,
162 act_full_minus_4 = 0x00800600,
163};
164/* Theshold for Deactivating the FC */
165enum rfd {
166 deac_full_minus_1 = 0x00400000,
167 deac_full_minus_2 = 0x00400800,
168 deac_full_minus_3 = 0x00401000,
169 deac_full_minus_4 = 0x00401800,
170};
171#define DMA_CONTROL_TSF 0x00200000 /* Transmit Store and Forward */
172#define DMA_CONTROL_FTF 0x00100000 /* Flush transmit FIFO */
173
174enum ttc_control {
175 DMA_CONTROL_TTC_64 = 0x00000000,
176 DMA_CONTROL_TTC_128 = 0x00004000,
177 DMA_CONTROL_TTC_192 = 0x00008000,
178 DMA_CONTROL_TTC_256 = 0x0000c000,
179 DMA_CONTROL_TTC_40 = 0x00010000,
180 DMA_CONTROL_TTC_32 = 0x00014000,
181 DMA_CONTROL_TTC_24 = 0x00018000,
182 DMA_CONTROL_TTC_16 = 0x0001c000,
183};
184#define DMA_CONTROL_TC_TX_MASK 0xfffe3fff
185
186#define DMA_CONTROL_EFC 0x00000100
187#define DMA_CONTROL_FEF 0x00000080
188#define DMA_CONTROL_FUF 0x00000040
189
190enum rtc_control {
191 DMA_CONTROL_RTC_64 = 0x00000000,
192 DMA_CONTROL_RTC_32 = 0x00000008,
193 DMA_CONTROL_RTC_96 = 0x00000010,
194 DMA_CONTROL_RTC_128 = 0x00000018,
195};
196#define DMA_CONTROL_TC_RX_MASK 0xffffffe7
197
198#define DMA_CONTROL_OSF 0x00000004 /* Operate on second frame */
199
200/* MMC registers offset */
201#define GMAC_MMC_CTRL 0x100
202#define GMAC_MMC_RX_INTR 0x104
203#define GMAC_MMC_TX_INTR 0x108
204#define GMAC_MMC_RX_CSUM_OFFLOAD 0x208
diff --git a/drivers/net/stmmac/mac100.c b/drivers/net/stmmac/mac100.c
new file mode 100644
index 000000000000..625171b6062b
--- /dev/null
+++ b/drivers/net/stmmac/mac100.c
@@ -0,0 +1,517 @@
1/*******************************************************************************
2 This is the driver for the MAC 10/100 on-chip Ethernet controller
3 currently tested on all the ST boards based on STb7109 and stx7200 SoCs.
4
5 DWC Ether MAC 10/100 Universal version 4.0 has been used for developing
6 this code.
7
8 Copyright (C) 2007-2009 STMicroelectronics Ltd
9
10 This program is free software; you can redistribute it and/or modify it
11 under the terms and conditions of the GNU General Public License,
12 version 2, as published by the Free Software Foundation.
13
14 This program is distributed in the hope it will be useful, but WITHOUT
15 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 more details.
18
19 You should have received a copy of the GNU General Public License along with
20 this program; if not, write to the Free Software Foundation, Inc.,
21 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
22
23 The full GNU General Public License is included in this distribution in
24 the file called "COPYING".
25
26 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
27*******************************************************************************/
28
29#include <linux/netdevice.h>
30#include <linux/crc32.h>
31#include <linux/mii.h>
32#include <linux/phy.h>
33
34#include "common.h"
35#include "mac100.h"
36
37#undef MAC100_DEBUG
38/*#define MAC100_DEBUG*/
39#ifdef MAC100_DEBUG
40#define DBG(fmt, args...) printk(fmt, ## args)
41#else
42#define DBG(fmt, args...) do { } while (0)
43#endif
44
45static void mac100_core_init(unsigned long ioaddr)
46{
47 u32 value = readl(ioaddr + MAC_CONTROL);
48
49 writel((value | MAC_CORE_INIT), ioaddr + MAC_CONTROL);
50
51#ifdef STMMAC_VLAN_TAG_USED
52 writel(ETH_P_8021Q, ioaddr + MAC_VLAN1);
53#endif
54 return;
55}
56
57static void mac100_dump_mac_regs(unsigned long ioaddr)
58{
59 pr_info("\t----------------------------------------------\n"
60 "\t MAC100 CSR (base addr = 0x%8x)\n"
61 "\t----------------------------------------------\n",
62 (unsigned int)ioaddr);
63 pr_info("\tcontrol reg (offset 0x%x): 0x%08x\n", MAC_CONTROL,
64 readl(ioaddr + MAC_CONTROL));
65 pr_info("\taddr HI (offset 0x%x): 0x%08x\n ", MAC_ADDR_HIGH,
66 readl(ioaddr + MAC_ADDR_HIGH));
67 pr_info("\taddr LO (offset 0x%x): 0x%08x\n", MAC_ADDR_LOW,
68 readl(ioaddr + MAC_ADDR_LOW));
69 pr_info("\tmulticast hash HI (offset 0x%x): 0x%08x\n",
70 MAC_HASH_HIGH, readl(ioaddr + MAC_HASH_HIGH));
71 pr_info("\tmulticast hash LO (offset 0x%x): 0x%08x\n",
72 MAC_HASH_LOW, readl(ioaddr + MAC_HASH_LOW));
73 pr_info("\tflow control (offset 0x%x): 0x%08x\n",
74 MAC_FLOW_CTRL, readl(ioaddr + MAC_FLOW_CTRL));
75 pr_info("\tVLAN1 tag (offset 0x%x): 0x%08x\n", MAC_VLAN1,
76 readl(ioaddr + MAC_VLAN1));
77 pr_info("\tVLAN2 tag (offset 0x%x): 0x%08x\n", MAC_VLAN2,
78 readl(ioaddr + MAC_VLAN2));
79 pr_info("\n\tMAC management counter registers\n");
80 pr_info("\t MMC crtl (offset 0x%x): 0x%08x\n",
81 MMC_CONTROL, readl(ioaddr + MMC_CONTROL));
82 pr_info("\t MMC High Interrupt (offset 0x%x): 0x%08x\n",
83 MMC_HIGH_INTR, readl(ioaddr + MMC_HIGH_INTR));
84 pr_info("\t MMC Low Interrupt (offset 0x%x): 0x%08x\n",
85 MMC_LOW_INTR, readl(ioaddr + MMC_LOW_INTR));
86 pr_info("\t MMC High Interrupt Mask (offset 0x%x): 0x%08x\n",
87 MMC_HIGH_INTR_MASK, readl(ioaddr + MMC_HIGH_INTR_MASK));
88 pr_info("\t MMC Low Interrupt Mask (offset 0x%x): 0x%08x\n",
89 MMC_LOW_INTR_MASK, readl(ioaddr + MMC_LOW_INTR_MASK));
90 return;
91}
92
93static int mac100_dma_init(unsigned long ioaddr, int pbl, u32 dma_tx,
94 u32 dma_rx)
95{
96 u32 value = readl(ioaddr + DMA_BUS_MODE);
97 /* DMA SW reset */
98 value |= DMA_BUS_MODE_SFT_RESET;
99 writel(value, ioaddr + DMA_BUS_MODE);
100 do {} while ((readl(ioaddr + DMA_BUS_MODE) & DMA_BUS_MODE_SFT_RESET));
101
102 /* Enable Application Access by writing to DMA CSR0 */
103 writel(DMA_BUS_MODE_DEFAULT | (pbl << DMA_BUS_MODE_PBL_SHIFT),
104 ioaddr + DMA_BUS_MODE);
105
106 /* Mask interrupts by writing to CSR7 */
107 writel(DMA_INTR_DEFAULT_MASK, ioaddr + DMA_INTR_ENA);
108
109 /* The base address of the RX/TX descriptor lists must be written into
110 * DMA CSR3 and CSR4, respectively. */
111 writel(dma_tx, ioaddr + DMA_TX_BASE_ADDR);
112 writel(dma_rx, ioaddr + DMA_RCV_BASE_ADDR);
113
114 return 0;
115}
116
117/* Store and Forward capability is not used at all..
118 * The transmit threshold can be programmed by
119 * setting the TTC bits in the DMA control register.*/
120static void mac100_dma_operation_mode(unsigned long ioaddr, int txmode,
121 int rxmode)
122{
123 u32 csr6 = readl(ioaddr + DMA_CONTROL);
124
125 if (txmode <= 32)
126 csr6 |= DMA_CONTROL_TTC_32;
127 else if (txmode <= 64)
128 csr6 |= DMA_CONTROL_TTC_64;
129 else
130 csr6 |= DMA_CONTROL_TTC_128;
131
132 writel(csr6, ioaddr + DMA_CONTROL);
133
134 return;
135}
136
137static void mac100_dump_dma_regs(unsigned long ioaddr)
138{
139 int i;
140
141 DBG(KERN_DEBUG "MAC100 DMA CSR \n");
142 for (i = 0; i < 9; i++)
143 pr_debug("\t CSR%d (offset 0x%x): 0x%08x\n", i,
144 (DMA_BUS_MODE + i * 4),
145 readl(ioaddr + DMA_BUS_MODE + i * 4));
146 DBG(KERN_DEBUG "\t CSR20 (offset 0x%x): 0x%08x\n",
147 DMA_CUR_TX_BUF_ADDR, readl(ioaddr + DMA_CUR_TX_BUF_ADDR));
148 DBG(KERN_DEBUG "\t CSR21 (offset 0x%x): 0x%08x\n",
149 DMA_CUR_RX_BUF_ADDR, readl(ioaddr + DMA_CUR_RX_BUF_ADDR));
150 return;
151}
152
153/* DMA controller has two counters to track the number of
154 the receive missed frames. */
155static void mac100_dma_diagnostic_fr(void *data, struct stmmac_extra_stats *x,
156 unsigned long ioaddr)
157{
158 struct net_device_stats *stats = (struct net_device_stats *)data;
159 u32 csr8 = readl(ioaddr + DMA_MISSED_FRAME_CTR);
160
161 if (unlikely(csr8)) {
162 if (csr8 & DMA_MISSED_FRAME_OVE) {
163 stats->rx_over_errors += 0x800;
164 x->rx_overflow_cntr += 0x800;
165 } else {
166 unsigned int ove_cntr;
167 ove_cntr = ((csr8 & DMA_MISSED_FRAME_OVE_CNTR) >> 17);
168 stats->rx_over_errors += ove_cntr;
169 x->rx_overflow_cntr += ove_cntr;
170 }
171
172 if (csr8 & DMA_MISSED_FRAME_OVE_M) {
173 stats->rx_missed_errors += 0xffff;
174 x->rx_missed_cntr += 0xffff;
175 } else {
176 unsigned int miss_f = (csr8 & DMA_MISSED_FRAME_M_CNTR);
177 stats->rx_missed_errors += miss_f;
178 x->rx_missed_cntr += miss_f;
179 }
180 }
181 return;
182}
183
184static int mac100_get_tx_frame_status(void *data, struct stmmac_extra_stats *x,
185 struct dma_desc *p, unsigned long ioaddr)
186{
187 int ret = 0;
188 struct net_device_stats *stats = (struct net_device_stats *)data;
189
190 if (unlikely(p->des01.tx.error_summary)) {
191 if (unlikely(p->des01.tx.underflow_error)) {
192 x->tx_underflow++;
193 stats->tx_fifo_errors++;
194 }
195 if (unlikely(p->des01.tx.no_carrier)) {
196 x->tx_carrier++;
197 stats->tx_carrier_errors++;
198 }
199 if (unlikely(p->des01.tx.loss_carrier)) {
200 x->tx_losscarrier++;
201 stats->tx_carrier_errors++;
202 }
203 if (unlikely((p->des01.tx.excessive_deferral) ||
204 (p->des01.tx.excessive_collisions) ||
205 (p->des01.tx.late_collision)))
206 stats->collisions += p->des01.tx.collision_count;
207 ret = -1;
208 }
209 if (unlikely(p->des01.tx.heartbeat_fail)) {
210 x->tx_heartbeat++;
211 stats->tx_heartbeat_errors++;
212 ret = -1;
213 }
214 if (unlikely(p->des01.tx.deferred))
215 x->tx_deferred++;
216
217 return ret;
218}
219
220static int mac100_get_tx_len(struct dma_desc *p)
221{
222 return p->des01.tx.buffer1_size;
223}
224
225/* This function verifies if each incoming frame has some errors
226 * and, if required, updates the multicast statistics.
227 * In case of success, it returns csum_none becasue the device
228 * is not able to compute the csum in HW. */
229static int mac100_get_rx_frame_status(void *data, struct stmmac_extra_stats *x,
230 struct dma_desc *p)
231{
232 int ret = csum_none;
233 struct net_device_stats *stats = (struct net_device_stats *)data;
234
235 if (unlikely(p->des01.rx.last_descriptor == 0)) {
236 pr_warning("mac100 Error: Oversized Ethernet "
237 "frame spanned multiple buffers\n");
238 stats->rx_length_errors++;
239 return discard_frame;
240 }
241
242 if (unlikely(p->des01.rx.error_summary)) {
243 if (unlikely(p->des01.rx.descriptor_error))
244 x->rx_desc++;
245 if (unlikely(p->des01.rx.partial_frame_error))
246 x->rx_partial++;
247 if (unlikely(p->des01.rx.run_frame))
248 x->rx_runt++;
249 if (unlikely(p->des01.rx.frame_too_long))
250 x->rx_toolong++;
251 if (unlikely(p->des01.rx.collision)) {
252 x->rx_collision++;
253 stats->collisions++;
254 }
255 if (unlikely(p->des01.rx.crc_error)) {
256 x->rx_crc++;
257 stats->rx_crc_errors++;
258 }
259 ret = discard_frame;
260 }
261 if (unlikely(p->des01.rx.dribbling))
262 ret = discard_frame;
263
264 if (unlikely(p->des01.rx.length_error)) {
265 x->rx_lenght++;
266 ret = discard_frame;
267 }
268 if (unlikely(p->des01.rx.mii_error)) {
269 x->rx_mii++;
270 ret = discard_frame;
271 }
272 if (p->des01.rx.multicast_frame) {
273 x->rx_multicast++;
274 stats->multicast++;
275 }
276 return ret;
277}
278
279static void mac100_irq_status(unsigned long ioaddr)
280{
281 return;
282}
283
284static void mac100_set_umac_addr(unsigned long ioaddr, unsigned char *addr,
285 unsigned int reg_n)
286{
287 stmmac_set_mac_addr(ioaddr, addr, MAC_ADDR_HIGH, MAC_ADDR_LOW);
288}
289
290static void mac100_get_umac_addr(unsigned long ioaddr, unsigned char *addr,
291 unsigned int reg_n)
292{
293 stmmac_get_mac_addr(ioaddr, addr, MAC_ADDR_HIGH, MAC_ADDR_LOW);
294}
295
296static void mac100_set_filter(struct net_device *dev)
297{
298 unsigned long ioaddr = dev->base_addr;
299 u32 value = readl(ioaddr + MAC_CONTROL);
300
301 if (dev->flags & IFF_PROMISC) {
302 value |= MAC_CONTROL_PR;
303 value &= ~(MAC_CONTROL_PM | MAC_CONTROL_IF | MAC_CONTROL_HO |
304 MAC_CONTROL_HP);
305 } else if ((dev->mc_count > HASH_TABLE_SIZE)
306 || (dev->flags & IFF_ALLMULTI)) {
307 value |= MAC_CONTROL_PM;
308 value &= ~(MAC_CONTROL_PR | MAC_CONTROL_IF | MAC_CONTROL_HO);
309 writel(0xffffffff, ioaddr + MAC_HASH_HIGH);
310 writel(0xffffffff, ioaddr + MAC_HASH_LOW);
311 } else if (dev->mc_count == 0) { /* no multicast */
312 value &= ~(MAC_CONTROL_PM | MAC_CONTROL_PR | MAC_CONTROL_IF |
313 MAC_CONTROL_HO | MAC_CONTROL_HP);
314 } else {
315 int i;
316 u32 mc_filter[2];
317 struct dev_mc_list *mclist;
318
319 /* Perfect filter mode for physical address and Hash
320 filter for multicast */
321 value |= MAC_CONTROL_HP;
322 value &= ~(MAC_CONTROL_PM | MAC_CONTROL_PR | MAC_CONTROL_IF
323 | MAC_CONTROL_HO);
324
325 memset(mc_filter, 0, sizeof(mc_filter));
326 for (i = 0, mclist = dev->mc_list;
327 mclist && i < dev->mc_count; i++, mclist = mclist->next) {
328 /* The upper 6 bits of the calculated CRC are used to
329 * index the contens of the hash table */
330 int bit_nr =
331 ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26;
332 /* The most significant bit determines the register to
333 * use (H/L) while the other 5 bits determine the bit
334 * within the register. */
335 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
336 }
337 writel(mc_filter[0], ioaddr + MAC_HASH_LOW);
338 writel(mc_filter[1], ioaddr + MAC_HASH_HIGH);
339 }
340
341 writel(value, ioaddr + MAC_CONTROL);
342
343 DBG(KERN_INFO "%s: CTRL reg: 0x%08x Hash regs: "
344 "HI 0x%08x, LO 0x%08x\n",
345 __func__, readl(ioaddr + MAC_CONTROL),
346 readl(ioaddr + MAC_HASH_HIGH), readl(ioaddr + MAC_HASH_LOW));
347 return;
348}
349
350static void mac100_flow_ctrl(unsigned long ioaddr, unsigned int duplex,
351 unsigned int fc, unsigned int pause_time)
352{
353 unsigned int flow = MAC_FLOW_CTRL_ENABLE;
354
355 if (duplex)
356 flow |= (pause_time << MAC_FLOW_CTRL_PT_SHIFT);
357 writel(flow, ioaddr + MAC_FLOW_CTRL);
358
359 return;
360}
361
362/* No PMT module supported in our SoC for the Ethernet Controller. */
363static void mac100_pmt(unsigned long ioaddr, unsigned long mode)
364{
365 return;
366}
367
368static void mac100_init_rx_desc(struct dma_desc *p, unsigned int ring_size,
369 int disable_rx_ic)
370{
371 int i;
372 for (i = 0; i < ring_size; i++) {
373 p->des01.rx.own = 1;
374 p->des01.rx.buffer1_size = BUF_SIZE_2KiB - 1;
375 if (i == ring_size - 1)
376 p->des01.rx.end_ring = 1;
377 if (disable_rx_ic)
378 p->des01.rx.disable_ic = 1;
379 p++;
380 }
381 return;
382}
383
384static void mac100_init_tx_desc(struct dma_desc *p, unsigned int ring_size)
385{
386 int i;
387 for (i = 0; i < ring_size; i++) {
388 p->des01.tx.own = 0;
389 if (i == ring_size - 1)
390 p->des01.tx.end_ring = 1;
391 p++;
392 }
393 return;
394}
395
396static int mac100_get_tx_owner(struct dma_desc *p)
397{
398 return p->des01.tx.own;
399}
400
401static int mac100_get_rx_owner(struct dma_desc *p)
402{
403 return p->des01.rx.own;
404}
405
406static void mac100_set_tx_owner(struct dma_desc *p)
407{
408 p->des01.tx.own = 1;
409}
410
411static void mac100_set_rx_owner(struct dma_desc *p)
412{
413 p->des01.rx.own = 1;
414}
415
416static int mac100_get_tx_ls(struct dma_desc *p)
417{
418 return p->des01.tx.last_segment;
419}
420
421static void mac100_release_tx_desc(struct dma_desc *p)
422{
423 int ter = p->des01.tx.end_ring;
424
425 /* clean field used within the xmit */
426 p->des01.tx.first_segment = 0;
427 p->des01.tx.last_segment = 0;
428 p->des01.tx.buffer1_size = 0;
429
430 /* clean status reported */
431 p->des01.tx.error_summary = 0;
432 p->des01.tx.underflow_error = 0;
433 p->des01.tx.no_carrier = 0;
434 p->des01.tx.loss_carrier = 0;
435 p->des01.tx.excessive_deferral = 0;
436 p->des01.tx.excessive_collisions = 0;
437 p->des01.tx.late_collision = 0;
438 p->des01.tx.heartbeat_fail = 0;
439 p->des01.tx.deferred = 0;
440
441 /* set termination field */
442 p->des01.tx.end_ring = ter;
443
444 return;
445}
446
447static void mac100_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
448 int csum_flag)
449{
450 p->des01.tx.first_segment = is_fs;
451 p->des01.tx.buffer1_size = len;
452}
453
454static void mac100_clear_tx_ic(struct dma_desc *p)
455{
456 p->des01.tx.interrupt = 0;
457}
458
459static void mac100_close_tx_desc(struct dma_desc *p)
460{
461 p->des01.tx.last_segment = 1;
462 p->des01.tx.interrupt = 1;
463}
464
465static int mac100_get_rx_frame_len(struct dma_desc *p)
466{
467 return p->des01.rx.frame_length;
468}
469
470struct stmmac_ops mac100_driver = {
471 .core_init = mac100_core_init,
472 .dump_mac_regs = mac100_dump_mac_regs,
473 .dma_init = mac100_dma_init,
474 .dump_dma_regs = mac100_dump_dma_regs,
475 .dma_mode = mac100_dma_operation_mode,
476 .dma_diagnostic_fr = mac100_dma_diagnostic_fr,
477 .tx_status = mac100_get_tx_frame_status,
478 .rx_status = mac100_get_rx_frame_status,
479 .get_tx_len = mac100_get_tx_len,
480 .set_filter = mac100_set_filter,
481 .flow_ctrl = mac100_flow_ctrl,
482 .pmt = mac100_pmt,
483 .init_rx_desc = mac100_init_rx_desc,
484 .init_tx_desc = mac100_init_tx_desc,
485 .get_tx_owner = mac100_get_tx_owner,
486 .get_rx_owner = mac100_get_rx_owner,
487 .release_tx_desc = mac100_release_tx_desc,
488 .prepare_tx_desc = mac100_prepare_tx_desc,
489 .clear_tx_ic = mac100_clear_tx_ic,
490 .close_tx_desc = mac100_close_tx_desc,
491 .get_tx_ls = mac100_get_tx_ls,
492 .set_tx_owner = mac100_set_tx_owner,
493 .set_rx_owner = mac100_set_rx_owner,
494 .get_rx_frame_len = mac100_get_rx_frame_len,
495 .host_irq_status = mac100_irq_status,
496 .set_umac_addr = mac100_set_umac_addr,
497 .get_umac_addr = mac100_get_umac_addr,
498};
499
500struct mac_device_info *mac100_setup(unsigned long ioaddr)
501{
502 struct mac_device_info *mac;
503
504 mac = kzalloc(sizeof(const struct mac_device_info), GFP_KERNEL);
505
506 pr_info("\tMAC 10/100\n");
507
508 mac->ops = &mac100_driver;
509 mac->hw.pmt = PMT_NOT_SUPPORTED;
510 mac->hw.link.port = MAC_CONTROL_PS;
511 mac->hw.link.duplex = MAC_CONTROL_F;
512 mac->hw.link.speed = 0;
513 mac->hw.mii.addr = MAC_MII_ADDR;
514 mac->hw.mii.data = MAC_MII_DATA;
515
516 return mac;
517}
diff --git a/drivers/net/stmmac/mac100.h b/drivers/net/stmmac/mac100.h
new file mode 100644
index 000000000000..0f8f110d004a
--- /dev/null
+++ b/drivers/net/stmmac/mac100.h
@@ -0,0 +1,116 @@
1/*******************************************************************************
2 MAC 10/100 Header File
3
4 Copyright (C) 2007-2009 STMicroelectronics Ltd
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
23*******************************************************************************/
24
25/*----------------------------------------------------------------------------
26 * MAC BLOCK defines
27 *---------------------------------------------------------------------------*/
28/* MAC CSR offset */
29#define MAC_CONTROL 0x00000000 /* MAC Control */
30#define MAC_ADDR_HIGH 0x00000004 /* MAC Address High */
31#define MAC_ADDR_LOW 0x00000008 /* MAC Address Low */
32#define MAC_HASH_HIGH 0x0000000c /* Multicast Hash Table High */
33#define MAC_HASH_LOW 0x00000010 /* Multicast Hash Table Low */
34#define MAC_MII_ADDR 0x00000014 /* MII Address */
35#define MAC_MII_DATA 0x00000018 /* MII Data */
36#define MAC_FLOW_CTRL 0x0000001c /* Flow Control */
37#define MAC_VLAN1 0x00000020 /* VLAN1 Tag */
38#define MAC_VLAN2 0x00000024 /* VLAN2 Tag */
39
40/* MAC CTRL defines */
41#define MAC_CONTROL_RA 0x80000000 /* Receive All Mode */
42#define MAC_CONTROL_BLE 0x40000000 /* Endian Mode */
43#define MAC_CONTROL_HBD 0x10000000 /* Heartbeat Disable */
44#define MAC_CONTROL_PS 0x08000000 /* Port Select */
45#define MAC_CONTROL_DRO 0x00800000 /* Disable Receive Own */
46#define MAC_CONTROL_EXT_LOOPBACK 0x00400000 /* Reserved (ext loopback?) */
47#define MAC_CONTROL_OM 0x00200000 /* Loopback Operating Mode */
48#define MAC_CONTROL_F 0x00100000 /* Full Duplex Mode */
49#define MAC_CONTROL_PM 0x00080000 /* Pass All Multicast */
50#define MAC_CONTROL_PR 0x00040000 /* Promiscuous Mode */
51#define MAC_CONTROL_IF 0x00020000 /* Inverse Filtering */
52#define MAC_CONTROL_PB 0x00010000 /* Pass Bad Frames */
53#define MAC_CONTROL_HO 0x00008000 /* Hash Only Filtering Mode */
54#define MAC_CONTROL_HP 0x00002000 /* Hash/Perfect Filtering Mode */
55#define MAC_CONTROL_LCC 0x00001000 /* Late Collision Control */
56#define MAC_CONTROL_DBF 0x00000800 /* Disable Broadcast Frames */
57#define MAC_CONTROL_DRTY 0x00000400 /* Disable Retry */
58#define MAC_CONTROL_ASTP 0x00000100 /* Automatic Pad Stripping */
59#define MAC_CONTROL_BOLMT_10 0x00000000 /* Back Off Limit 10 */
60#define MAC_CONTROL_BOLMT_8 0x00000040 /* Back Off Limit 8 */
61#define MAC_CONTROL_BOLMT_4 0x00000080 /* Back Off Limit 4 */
62#define MAC_CONTROL_BOLMT_1 0x000000c0 /* Back Off Limit 1 */
63#define MAC_CONTROL_DC 0x00000020 /* Deferral Check */
64#define MAC_CONTROL_TE 0x00000008 /* Transmitter Enable */
65#define MAC_CONTROL_RE 0x00000004 /* Receiver Enable */
66
67#define MAC_CORE_INIT (MAC_CONTROL_HBD | MAC_CONTROL_ASTP)
68
69/* MAC FLOW CTRL defines */
70#define MAC_FLOW_CTRL_PT_MASK 0xffff0000 /* Pause Time Mask */
71#define MAC_FLOW_CTRL_PT_SHIFT 16
72#define MAC_FLOW_CTRL_PASS 0x00000004 /* Pass Control Frames */
73#define MAC_FLOW_CTRL_ENABLE 0x00000002 /* Flow Control Enable */
74#define MAC_FLOW_CTRL_PAUSE 0x00000001 /* Flow Control Busy ... */
75
76/* MII ADDR defines */
77#define MAC_MII_ADDR_WRITE 0x00000002 /* MII Write */
78#define MAC_MII_ADDR_BUSY 0x00000001 /* MII Busy */
79
80/*----------------------------------------------------------------------------
81 * DMA BLOCK defines
82 *---------------------------------------------------------------------------*/
83
84/* DMA Bus Mode register defines */
85#define DMA_BUS_MODE_DBO 0x00100000 /* Descriptor Byte Ordering */
86#define DMA_BUS_MODE_BLE 0x00000080 /* Big Endian/Little Endian */
87#define DMA_BUS_MODE_PBL_MASK 0x00003f00 /* Programmable Burst Len */
88#define DMA_BUS_MODE_PBL_SHIFT 8
89#define DMA_BUS_MODE_DSL_MASK 0x0000007c /* Descriptor Skip Length */
90#define DMA_BUS_MODE_DSL_SHIFT 2 /* (in DWORDS) */
91#define DMA_BUS_MODE_BAR_BUS 0x00000002 /* Bar-Bus Arbitration */
92#define DMA_BUS_MODE_SFT_RESET 0x00000001 /* Software Reset */
93#define DMA_BUS_MODE_DEFAULT 0x00000000
94
95/* DMA Control register defines */
96#define DMA_CONTROL_SF 0x00200000 /* Store And Forward */
97
98/* Transmit Threshold Control */
99enum ttc_control {
100 DMA_CONTROL_TTC_DEFAULT = 0x00000000, /* Threshold is 32 DWORDS */
101 DMA_CONTROL_TTC_64 = 0x00004000, /* Threshold is 64 DWORDS */
102 DMA_CONTROL_TTC_128 = 0x00008000, /* Threshold is 128 DWORDS */
103 DMA_CONTROL_TTC_256 = 0x0000c000, /* Threshold is 256 DWORDS */
104 DMA_CONTROL_TTC_18 = 0x00400000, /* Threshold is 18 DWORDS */
105 DMA_CONTROL_TTC_24 = 0x00404000, /* Threshold is 24 DWORDS */
106 DMA_CONTROL_TTC_32 = 0x00408000, /* Threshold is 32 DWORDS */
107 DMA_CONTROL_TTC_40 = 0x0040c000, /* Threshold is 40 DWORDS */
108 DMA_CONTROL_SE = 0x00000008, /* Stop On Empty */
109 DMA_CONTROL_OSF = 0x00000004, /* Operate On 2nd Frame */
110};
111
112/* STMAC110 DMA Missed Frame Counter register defines */
113#define DMA_MISSED_FRAME_OVE 0x10000000 /* FIFO Overflow Overflow */
114#define DMA_MISSED_FRAME_OVE_CNTR 0x0ffe0000 /* Overflow Frame Counter */
115#define DMA_MISSED_FRAME_OVE_M 0x00010000 /* Missed Frame Overflow */
116#define DMA_MISSED_FRAME_M_CNTR 0x0000ffff /* Missed Frame Couinter */
diff --git a/drivers/net/stmmac/stmmac.h b/drivers/net/stmmac/stmmac.h
new file mode 100644
index 000000000000..6d2eae3040e5
--- /dev/null
+++ b/drivers/net/stmmac/stmmac.h
@@ -0,0 +1,98 @@
1/*******************************************************************************
2 Copyright (C) 2007-2009 STMicroelectronics Ltd
3
4 This program is free software; you can redistribute it and/or modify it
5 under the terms and conditions of the GNU General Public License,
6 version 2, as published by the Free Software Foundation.
7
8 This program is distributed in the hope it will be useful, but WITHOUT
9 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 more details.
12
13 You should have received a copy of the GNU General Public License along with
14 this program; if not, write to the Free Software Foundation, Inc.,
15 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
16
17 The full GNU General Public License is included in this distribution in
18 the file called "COPYING".
19
20 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
21*******************************************************************************/
22
23#define DRV_MODULE_VERSION "Oct_09"
24
25#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
26#define STMMAC_VLAN_TAG_USED
27#include <linux/if_vlan.h>
28#endif
29
30#include "common.h"
31#ifdef CONFIG_STMMAC_TIMER
32#include "stmmac_timer.h"
33#endif
34
35struct stmmac_priv {
36 /* Frequently used values are kept adjacent for cache effect */
37 struct dma_desc *dma_tx ____cacheline_aligned;
38 dma_addr_t dma_tx_phy;
39 struct sk_buff **tx_skbuff;
40 unsigned int cur_tx;
41 unsigned int dirty_tx;
42 unsigned int dma_tx_size;
43 int tx_coe;
44 int tx_coalesce;
45
46 struct dma_desc *dma_rx ;
47 unsigned int cur_rx;
48 unsigned int dirty_rx;
49 struct sk_buff **rx_skbuff;
50 dma_addr_t *rx_skbuff_dma;
51 struct sk_buff_head rx_recycle;
52
53 struct net_device *dev;
54 int is_gmac;
55 dma_addr_t dma_rx_phy;
56 unsigned int dma_rx_size;
57 int rx_csum;
58 unsigned int dma_buf_sz;
59 struct device *device;
60 struct mac_device_info *mac_type;
61
62 struct stmmac_extra_stats xstats;
63 struct napi_struct napi;
64
65 phy_interface_t phy_interface;
66 int pbl;
67 int bus_id;
68 int phy_addr;
69 int phy_mask;
70 int (*phy_reset) (void *priv);
71 void (*fix_mac_speed) (void *priv, unsigned int speed);
72 void *bsp_priv;
73
74 int phy_irq;
75 struct phy_device *phydev;
76 int oldlink;
77 int speed;
78 int oldduplex;
79 unsigned int flow_ctrl;
80 unsigned int pause;
81 struct mii_bus *mii;
82
83 u32 msg_enable;
84 spinlock_t lock;
85 int wolopts;
86 int wolenabled;
87 int shutdown;
88#ifdef CONFIG_STMMAC_TIMER
89 struct stmmac_timer *tm;
90#endif
91#ifdef STMMAC_VLAN_TAG_USED
92 struct vlan_group *vlgrp;
93#endif
94};
95
96extern int stmmac_mdio_unregister(struct net_device *ndev);
97extern int stmmac_mdio_register(struct net_device *ndev);
98extern void stmmac_set_ethtool_ops(struct net_device *netdev);
diff --git a/drivers/net/stmmac/stmmac_ethtool.c b/drivers/net/stmmac/stmmac_ethtool.c
new file mode 100644
index 000000000000..694ebe6a0758
--- /dev/null
+++ b/drivers/net/stmmac/stmmac_ethtool.c
@@ -0,0 +1,395 @@
1/*******************************************************************************
2 STMMAC Ethtool support
3
4 Copyright (C) 2007-2009 STMicroelectronics Ltd
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
23*******************************************************************************/
24
25#include <linux/etherdevice.h>
26#include <linux/ethtool.h>
27#include <linux/mii.h>
28#include <linux/phy.h>
29
30#include "stmmac.h"
31
32#define REG_SPACE_SIZE 0x1054
33#define MAC100_ETHTOOL_NAME "st_mac100"
34#define GMAC_ETHTOOL_NAME "st_gmac"
35
36struct stmmac_stats {
37 char stat_string[ETH_GSTRING_LEN];
38 int sizeof_stat;
39 int stat_offset;
40};
41
42#define STMMAC_STAT(m) \
43 { #m, FIELD_SIZEOF(struct stmmac_extra_stats, m), \
44 offsetof(struct stmmac_priv, xstats.m)}
45
46static const struct stmmac_stats stmmac_gstrings_stats[] = {
47 STMMAC_STAT(tx_underflow),
48 STMMAC_STAT(tx_carrier),
49 STMMAC_STAT(tx_losscarrier),
50 STMMAC_STAT(tx_heartbeat),
51 STMMAC_STAT(tx_deferred),
52 STMMAC_STAT(tx_vlan),
53 STMMAC_STAT(rx_vlan),
54 STMMAC_STAT(tx_jabber),
55 STMMAC_STAT(tx_frame_flushed),
56 STMMAC_STAT(tx_payload_error),
57 STMMAC_STAT(tx_ip_header_error),
58 STMMAC_STAT(rx_desc),
59 STMMAC_STAT(rx_partial),
60 STMMAC_STAT(rx_runt),
61 STMMAC_STAT(rx_toolong),
62 STMMAC_STAT(rx_collision),
63 STMMAC_STAT(rx_crc),
64 STMMAC_STAT(rx_lenght),
65 STMMAC_STAT(rx_mii),
66 STMMAC_STAT(rx_multicast),
67 STMMAC_STAT(rx_gmac_overflow),
68 STMMAC_STAT(rx_watchdog),
69 STMMAC_STAT(da_rx_filter_fail),
70 STMMAC_STAT(sa_rx_filter_fail),
71 STMMAC_STAT(rx_missed_cntr),
72 STMMAC_STAT(rx_overflow_cntr),
73 STMMAC_STAT(tx_undeflow_irq),
74 STMMAC_STAT(tx_process_stopped_irq),
75 STMMAC_STAT(tx_jabber_irq),
76 STMMAC_STAT(rx_overflow_irq),
77 STMMAC_STAT(rx_buf_unav_irq),
78 STMMAC_STAT(rx_process_stopped_irq),
79 STMMAC_STAT(rx_watchdog_irq),
80 STMMAC_STAT(tx_early_irq),
81 STMMAC_STAT(fatal_bus_error_irq),
82 STMMAC_STAT(threshold),
83 STMMAC_STAT(tx_pkt_n),
84 STMMAC_STAT(rx_pkt_n),
85 STMMAC_STAT(poll_n),
86 STMMAC_STAT(sched_timer_n),
87 STMMAC_STAT(normal_irq_n),
88};
89#define STMMAC_STATS_LEN ARRAY_SIZE(stmmac_gstrings_stats)
90
91void stmmac_ethtool_getdrvinfo(struct net_device *dev,
92 struct ethtool_drvinfo *info)
93{
94 struct stmmac_priv *priv = netdev_priv(dev);
95
96 if (!priv->is_gmac)
97 strcpy(info->driver, MAC100_ETHTOOL_NAME);
98 else
99 strcpy(info->driver, GMAC_ETHTOOL_NAME);
100
101 strcpy(info->version, DRV_MODULE_VERSION);
102 info->fw_version[0] = '\0';
103 info->n_stats = STMMAC_STATS_LEN;
104 return;
105}
106
107int stmmac_ethtool_getsettings(struct net_device *dev, struct ethtool_cmd *cmd)
108{
109 struct stmmac_priv *priv = netdev_priv(dev);
110 struct phy_device *phy = priv->phydev;
111 int rc;
112 if (phy == NULL) {
113 pr_err("%s: %s: PHY is not registered\n",
114 __func__, dev->name);
115 return -ENODEV;
116 }
117 if (!netif_running(dev)) {
118 pr_err("%s: interface is disabled: we cannot track "
119 "link speed / duplex setting\n", dev->name);
120 return -EBUSY;
121 }
122 cmd->transceiver = XCVR_INTERNAL;
123 spin_lock_irq(&priv->lock);
124 rc = phy_ethtool_gset(phy, cmd);
125 spin_unlock_irq(&priv->lock);
126 return rc;
127}
128
129int stmmac_ethtool_setsettings(struct net_device *dev, struct ethtool_cmd *cmd)
130{
131 struct stmmac_priv *priv = netdev_priv(dev);
132 struct phy_device *phy = priv->phydev;
133 int rc;
134
135 spin_lock(&priv->lock);
136 rc = phy_ethtool_sset(phy, cmd);
137 spin_unlock(&priv->lock);
138
139 return rc;
140}
141
142u32 stmmac_ethtool_getmsglevel(struct net_device *dev)
143{
144 struct stmmac_priv *priv = netdev_priv(dev);
145 return priv->msg_enable;
146}
147
148void stmmac_ethtool_setmsglevel(struct net_device *dev, u32 level)
149{
150 struct stmmac_priv *priv = netdev_priv(dev);
151 priv->msg_enable = level;
152
153}
154
155int stmmac_check_if_running(struct net_device *dev)
156{
157 if (!netif_running(dev))
158 return -EBUSY;
159 return 0;
160}
161
162int stmmac_ethtool_get_regs_len(struct net_device *dev)
163{
164 return REG_SPACE_SIZE;
165}
166
167void stmmac_ethtool_gregs(struct net_device *dev,
168 struct ethtool_regs *regs, void *space)
169{
170 int i;
171 u32 *reg_space = (u32 *) space;
172
173 struct stmmac_priv *priv = netdev_priv(dev);
174
175 memset(reg_space, 0x0, REG_SPACE_SIZE);
176
177 if (!priv->is_gmac) {
178 /* MAC registers */
179 for (i = 0; i < 12; i++)
180 reg_space[i] = readl(dev->base_addr + (i * 4));
181 /* DMA registers */
182 for (i = 0; i < 9; i++)
183 reg_space[i + 12] =
184 readl(dev->base_addr + (DMA_BUS_MODE + (i * 4)));
185 reg_space[22] = readl(dev->base_addr + DMA_CUR_TX_BUF_ADDR);
186 reg_space[23] = readl(dev->base_addr + DMA_CUR_RX_BUF_ADDR);
187 } else {
188 /* MAC registers */
189 for (i = 0; i < 55; i++)
190 reg_space[i] = readl(dev->base_addr + (i * 4));
191 /* DMA registers */
192 for (i = 0; i < 22; i++)
193 reg_space[i + 55] =
194 readl(dev->base_addr + (DMA_BUS_MODE + (i * 4)));
195 }
196
197 return;
198}
199
200int stmmac_ethtool_set_tx_csum(struct net_device *netdev, u32 data)
201{
202 if (data)
203 netdev->features |= NETIF_F_HW_CSUM;
204 else
205 netdev->features &= ~NETIF_F_HW_CSUM;
206
207 return 0;
208}
209
210u32 stmmac_ethtool_get_rx_csum(struct net_device *dev)
211{
212 struct stmmac_priv *priv = netdev_priv(dev);
213
214 return priv->rx_csum;
215}
216
217static void
218stmmac_get_pauseparam(struct net_device *netdev,
219 struct ethtool_pauseparam *pause)
220{
221 struct stmmac_priv *priv = netdev_priv(netdev);
222
223 spin_lock(&priv->lock);
224
225 pause->rx_pause = 0;
226 pause->tx_pause = 0;
227 pause->autoneg = priv->phydev->autoneg;
228
229 if (priv->flow_ctrl & FLOW_RX)
230 pause->rx_pause = 1;
231 if (priv->flow_ctrl & FLOW_TX)
232 pause->tx_pause = 1;
233
234 spin_unlock(&priv->lock);
235 return;
236}
237
238static int
239stmmac_set_pauseparam(struct net_device *netdev,
240 struct ethtool_pauseparam *pause)
241{
242 struct stmmac_priv *priv = netdev_priv(netdev);
243 struct phy_device *phy = priv->phydev;
244 int new_pause = FLOW_OFF;
245 int ret = 0;
246
247 spin_lock(&priv->lock);
248
249 if (pause->rx_pause)
250 new_pause |= FLOW_RX;
251 if (pause->tx_pause)
252 new_pause |= FLOW_TX;
253
254 priv->flow_ctrl = new_pause;
255
256 if (phy->autoneg) {
257 if (netif_running(netdev)) {
258 struct ethtool_cmd cmd;
259 /* auto-negotiation automatically restarted */
260 cmd.cmd = ETHTOOL_NWAY_RST;
261 cmd.supported = phy->supported;
262 cmd.advertising = phy->advertising;
263 cmd.autoneg = phy->autoneg;
264 cmd.speed = phy->speed;
265 cmd.duplex = phy->duplex;
266 cmd.phy_address = phy->addr;
267 ret = phy_ethtool_sset(phy, &cmd);
268 }
269 } else {
270 unsigned long ioaddr = netdev->base_addr;
271 priv->mac_type->ops->flow_ctrl(ioaddr, phy->duplex,
272 priv->flow_ctrl, priv->pause);
273 }
274 spin_unlock(&priv->lock);
275 return ret;
276}
277
278static void stmmac_get_ethtool_stats(struct net_device *dev,
279 struct ethtool_stats *dummy, u64 *data)
280{
281 struct stmmac_priv *priv = netdev_priv(dev);
282 unsigned long ioaddr = dev->base_addr;
283 int i;
284
285 /* Update HW stats if supported */
286 priv->mac_type->ops->dma_diagnostic_fr(&dev->stats, &priv->xstats,
287 ioaddr);
288
289 for (i = 0; i < STMMAC_STATS_LEN; i++) {
290 char *p = (char *)priv + stmmac_gstrings_stats[i].stat_offset;
291 data[i] = (stmmac_gstrings_stats[i].sizeof_stat ==
292 sizeof(u64)) ? (*(u64 *)p) : (*(u32 *)p);
293 }
294
295 return;
296}
297
298static int stmmac_get_sset_count(struct net_device *netdev, int sset)
299{
300 switch (sset) {
301 case ETH_SS_STATS:
302 return STMMAC_STATS_LEN;
303 default:
304 return -EOPNOTSUPP;
305 }
306}
307
308static void stmmac_get_strings(struct net_device *dev, u32 stringset, u8 *data)
309{
310 int i;
311 u8 *p = data;
312
313 switch (stringset) {
314 case ETH_SS_STATS:
315 for (i = 0; i < STMMAC_STATS_LEN; i++) {
316 memcpy(p, stmmac_gstrings_stats[i].stat_string,
317 ETH_GSTRING_LEN);
318 p += ETH_GSTRING_LEN;
319 }
320 break;
321 default:
322 WARN_ON(1);
323 break;
324 }
325 return;
326}
327
328/* Currently only support WOL through Magic packet. */
329static void stmmac_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
330{
331 struct stmmac_priv *priv = netdev_priv(dev);
332
333 spin_lock_irq(&priv->lock);
334 if (priv->wolenabled == PMT_SUPPORTED) {
335 wol->supported = WAKE_MAGIC;
336 wol->wolopts = priv->wolopts;
337 }
338 spin_unlock_irq(&priv->lock);
339}
340
341static int stmmac_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
342{
343 struct stmmac_priv *priv = netdev_priv(dev);
344 u32 support = WAKE_MAGIC;
345
346 if (priv->wolenabled == PMT_NOT_SUPPORTED)
347 return -EINVAL;
348
349 if (wol->wolopts & ~support)
350 return -EINVAL;
351
352 if (wol->wolopts == 0)
353 device_set_wakeup_enable(priv->device, 0);
354 else
355 device_set_wakeup_enable(priv->device, 1);
356
357 spin_lock_irq(&priv->lock);
358 priv->wolopts = wol->wolopts;
359 spin_unlock_irq(&priv->lock);
360
361 return 0;
362}
363
364static struct ethtool_ops stmmac_ethtool_ops = {
365 .begin = stmmac_check_if_running,
366 .get_drvinfo = stmmac_ethtool_getdrvinfo,
367 .get_settings = stmmac_ethtool_getsettings,
368 .set_settings = stmmac_ethtool_setsettings,
369 .get_msglevel = stmmac_ethtool_getmsglevel,
370 .set_msglevel = stmmac_ethtool_setmsglevel,
371 .get_regs = stmmac_ethtool_gregs,
372 .get_regs_len = stmmac_ethtool_get_regs_len,
373 .get_link = ethtool_op_get_link,
374 .get_rx_csum = stmmac_ethtool_get_rx_csum,
375 .get_tx_csum = ethtool_op_get_tx_csum,
376 .set_tx_csum = stmmac_ethtool_set_tx_csum,
377 .get_sg = ethtool_op_get_sg,
378 .set_sg = ethtool_op_set_sg,
379 .get_pauseparam = stmmac_get_pauseparam,
380 .set_pauseparam = stmmac_set_pauseparam,
381 .get_ethtool_stats = stmmac_get_ethtool_stats,
382 .get_strings = stmmac_get_strings,
383 .get_wol = stmmac_get_wol,
384 .set_wol = stmmac_set_wol,
385 .get_sset_count = stmmac_get_sset_count,
386#ifdef NETIF_F_TSO
387 .get_tso = ethtool_op_get_tso,
388 .set_tso = ethtool_op_set_tso,
389#endif
390};
391
392void stmmac_set_ethtool_ops(struct net_device *netdev)
393{
394 SET_ETHTOOL_OPS(netdev, &stmmac_ethtool_ops);
395}
diff --git a/drivers/net/stmmac/stmmac_main.c b/drivers/net/stmmac/stmmac_main.c
new file mode 100644
index 000000000000..c2f14dc9ba28
--- /dev/null
+++ b/drivers/net/stmmac/stmmac_main.c
@@ -0,0 +1,2204 @@
1/*******************************************************************************
2 This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
3 ST Ethernet IPs are built around a Synopsys IP Core.
4
5 Copyright (C) 2007-2009 STMicroelectronics Ltd
6
7 This program is free software; you can redistribute it and/or modify it
8 under the terms and conditions of the GNU General Public License,
9 version 2, as published by the Free Software Foundation.
10
11 This program is distributed in the hope it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 more details.
15
16 You should have received a copy of the GNU General Public License along with
17 this program; if not, write to the Free Software Foundation, Inc.,
18 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19
20 The full GNU General Public License is included in this distribution in
21 the file called "COPYING".
22
23 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
24
25 Documentation available at:
26 http://www.stlinux.com
27 Support available at:
28 https://bugzilla.stlinux.com/
29*******************************************************************************/
30
31#include <linux/module.h>
32#include <linux/init.h>
33#include <linux/kernel.h>
34#include <linux/interrupt.h>
35#include <linux/netdevice.h>
36#include <linux/etherdevice.h>
37#include <linux/platform_device.h>
38#include <linux/ip.h>
39#include <linux/tcp.h>
40#include <linux/skbuff.h>
41#include <linux/ethtool.h>
42#include <linux/if_ether.h>
43#include <linux/crc32.h>
44#include <linux/mii.h>
45#include <linux/phy.h>
46#include <linux/if_vlan.h>
47#include <linux/dma-mapping.h>
48#include <linux/stm/soc.h>
49#include "stmmac.h"
50
51#define STMMAC_RESOURCE_NAME "stmmaceth"
52#define PHY_RESOURCE_NAME "stmmacphy"
53
54#undef STMMAC_DEBUG
55/*#define STMMAC_DEBUG*/
56#ifdef STMMAC_DEBUG
57#define DBG(nlevel, klevel, fmt, args...) \
58 ((void)(netif_msg_##nlevel(priv) && \
59 printk(KERN_##klevel fmt, ## args)))
60#else
61#define DBG(nlevel, klevel, fmt, args...) do { } while (0)
62#endif
63
64#undef STMMAC_RX_DEBUG
65/*#define STMMAC_RX_DEBUG*/
66#ifdef STMMAC_RX_DEBUG
67#define RX_DBG(fmt, args...) printk(fmt, ## args)
68#else
69#define RX_DBG(fmt, args...) do { } while (0)
70#endif
71
72#undef STMMAC_XMIT_DEBUG
73/*#define STMMAC_XMIT_DEBUG*/
74#ifdef STMMAC_TX_DEBUG
75#define TX_DBG(fmt, args...) printk(fmt, ## args)
76#else
77#define TX_DBG(fmt, args...) do { } while (0)
78#endif
79
80#define STMMAC_ALIGN(x) L1_CACHE_ALIGN(x)
81#define JUMBO_LEN 9000
82
83/* Module parameters */
84#define TX_TIMEO 5000 /* default 5 seconds */
85static int watchdog = TX_TIMEO;
86module_param(watchdog, int, S_IRUGO | S_IWUSR);
87MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds");
88
89static int debug = -1; /* -1: default, 0: no output, 16: all */
90module_param(debug, int, S_IRUGO | S_IWUSR);
91MODULE_PARM_DESC(debug, "Message Level (0: no output, 16: all)");
92
93static int phyaddr = -1;
94module_param(phyaddr, int, S_IRUGO);
95MODULE_PARM_DESC(phyaddr, "Physical device address");
96
97#define DMA_TX_SIZE 256
98static int dma_txsize = DMA_TX_SIZE;
99module_param(dma_txsize, int, S_IRUGO | S_IWUSR);
100MODULE_PARM_DESC(dma_txsize, "Number of descriptors in the TX list");
101
102#define DMA_RX_SIZE 256
103static int dma_rxsize = DMA_RX_SIZE;
104module_param(dma_rxsize, int, S_IRUGO | S_IWUSR);
105MODULE_PARM_DESC(dma_rxsize, "Number of descriptors in the RX list");
106
107static int flow_ctrl = FLOW_OFF;
108module_param(flow_ctrl, int, S_IRUGO | S_IWUSR);
109MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");
110
111static int pause = PAUSE_TIME;
112module_param(pause, int, S_IRUGO | S_IWUSR);
113MODULE_PARM_DESC(pause, "Flow Control Pause Time");
114
115#define TC_DEFAULT 64
116static int tc = TC_DEFAULT;
117module_param(tc, int, S_IRUGO | S_IWUSR);
118MODULE_PARM_DESC(tc, "DMA threshold control value");
119
120#define RX_NO_COALESCE 1 /* Always interrupt on completion */
121#define TX_NO_COALESCE -1 /* No moderation by default */
122
123/* Pay attention to tune this parameter; take care of both
124 * hardware capability and network stabitily/performance impact.
125 * Many tests showed that ~4ms latency seems to be good enough. */
126#ifdef CONFIG_STMMAC_TIMER
127#define DEFAULT_PERIODIC_RATE 256
128static int tmrate = DEFAULT_PERIODIC_RATE;
129module_param(tmrate, int, S_IRUGO | S_IWUSR);
130MODULE_PARM_DESC(tmrate, "External timer freq. (default: 256Hz)");
131#endif
132
133#define DMA_BUFFER_SIZE BUF_SIZE_2KiB
134static int buf_sz = DMA_BUFFER_SIZE;
135module_param(buf_sz, int, S_IRUGO | S_IWUSR);
136MODULE_PARM_DESC(buf_sz, "DMA buffer size");
137
138/* In case of Giga ETH, we can enable/disable the COE for the
139 * transmit HW checksum computation.
140 * Note that, if tx csum is off in HW, SG will be still supported. */
141static int tx_coe = HW_CSUM;
142module_param(tx_coe, int, S_IRUGO | S_IWUSR);
143MODULE_PARM_DESC(tx_coe, "GMAC COE type 2 [on/off]");
144
145static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
146 NETIF_MSG_LINK | NETIF_MSG_IFUP |
147 NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
148
149static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
150static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev);
151
152/**
153 * stmmac_verify_args - verify the driver parameters.
154 * Description: it verifies if some wrong parameter is passed to the driver.
155 * Note that wrong parameters are replaced with the default values.
156 */
157static void stmmac_verify_args(void)
158{
159 if (unlikely(watchdog < 0))
160 watchdog = TX_TIMEO;
161 if (unlikely(dma_rxsize < 0))
162 dma_rxsize = DMA_RX_SIZE;
163 if (unlikely(dma_txsize < 0))
164 dma_txsize = DMA_TX_SIZE;
165 if (unlikely((buf_sz < DMA_BUFFER_SIZE) || (buf_sz > BUF_SIZE_16KiB)))
166 buf_sz = DMA_BUFFER_SIZE;
167 if (unlikely(flow_ctrl > 1))
168 flow_ctrl = FLOW_AUTO;
169 else if (likely(flow_ctrl < 0))
170 flow_ctrl = FLOW_OFF;
171 if (unlikely((pause < 0) || (pause > 0xffff)))
172 pause = PAUSE_TIME;
173
174 return;
175}
176
177#if defined(STMMAC_XMIT_DEBUG) || defined(STMMAC_RX_DEBUG)
178static void print_pkt(unsigned char *buf, int len)
179{
180 int j;
181 pr_info("len = %d byte, buf addr: 0x%p", len, buf);
182 for (j = 0; j < len; j++) {
183 if ((j % 16) == 0)
184 pr_info("\n %03x:", j);
185 pr_info(" %02x", buf[j]);
186 }
187 pr_info("\n");
188 return;
189}
190#endif
191
192/* minimum number of free TX descriptors required to wake up TX process */
193#define STMMAC_TX_THRESH(x) (x->dma_tx_size/4)
194
195static inline u32 stmmac_tx_avail(struct stmmac_priv *priv)
196{
197 return priv->dirty_tx + priv->dma_tx_size - priv->cur_tx - 1;
198}
199
200/**
201 * stmmac_adjust_link
202 * @dev: net device structure
203 * Description: it adjusts the link parameters.
204 */
205static void stmmac_adjust_link(struct net_device *dev)
206{
207 struct stmmac_priv *priv = netdev_priv(dev);
208 struct phy_device *phydev = priv->phydev;
209 unsigned long ioaddr = dev->base_addr;
210 unsigned long flags;
211 int new_state = 0;
212 unsigned int fc = priv->flow_ctrl, pause_time = priv->pause;
213
214 if (phydev == NULL)
215 return;
216
217 DBG(probe, DEBUG, "stmmac_adjust_link: called. address %d link %d\n",
218 phydev->addr, phydev->link);
219
220 spin_lock_irqsave(&priv->lock, flags);
221 if (phydev->link) {
222 u32 ctrl = readl(ioaddr + MAC_CTRL_REG);
223
224 /* Now we make sure that we can be in full duplex mode.
225 * If not, we operate in half-duplex mode. */
226 if (phydev->duplex != priv->oldduplex) {
227 new_state = 1;
228 if (!(phydev->duplex))
229 ctrl &= ~priv->mac_type->hw.link.duplex;
230 else
231 ctrl |= priv->mac_type->hw.link.duplex;
232 priv->oldduplex = phydev->duplex;
233 }
234 /* Flow Control operation */
235 if (phydev->pause)
236 priv->mac_type->ops->flow_ctrl(ioaddr, phydev->duplex,
237 fc, pause_time);
238
239 if (phydev->speed != priv->speed) {
240 new_state = 1;
241 switch (phydev->speed) {
242 case 1000:
243 if (likely(priv->is_gmac))
244 ctrl &= ~priv->mac_type->hw.link.port;
245 break;
246 case 100:
247 case 10:
248 if (priv->is_gmac) {
249 ctrl |= priv->mac_type->hw.link.port;
250 if (phydev->speed == SPEED_100) {
251 ctrl |=
252 priv->mac_type->hw.link.
253 speed;
254 } else {
255 ctrl &=
256 ~(priv->mac_type->hw.
257 link.speed);
258 }
259 } else {
260 ctrl &= ~priv->mac_type->hw.link.port;
261 }
262 priv->fix_mac_speed(priv->bsp_priv,
263 phydev->speed);
264 break;
265 default:
266 if (netif_msg_link(priv))
267 pr_warning("%s: Speed (%d) is not 10"
268 " or 100!\n", dev->name, phydev->speed);
269 break;
270 }
271
272 priv->speed = phydev->speed;
273 }
274
275 writel(ctrl, ioaddr + MAC_CTRL_REG);
276
277 if (!priv->oldlink) {
278 new_state = 1;
279 priv->oldlink = 1;
280 }
281 } else if (priv->oldlink) {
282 new_state = 1;
283 priv->oldlink = 0;
284 priv->speed = 0;
285 priv->oldduplex = -1;
286 }
287
288 if (new_state && netif_msg_link(priv))
289 phy_print_status(phydev);
290
291 spin_unlock_irqrestore(&priv->lock, flags);
292
293 DBG(probe, DEBUG, "stmmac_adjust_link: exiting\n");
294}
295
296/**
297 * stmmac_init_phy - PHY initialization
298 * @dev: net device structure
299 * Description: it initializes the driver's PHY state, and attaches the PHY
300 * to the mac driver.
301 * Return value:
302 * 0 on success
303 */
304static int stmmac_init_phy(struct net_device *dev)
305{
306 struct stmmac_priv *priv = netdev_priv(dev);
307 struct phy_device *phydev;
308 char phy_id[BUS_ID_SIZE]; /* PHY to connect */
309 char bus_id[BUS_ID_SIZE];
310
311 priv->oldlink = 0;
312 priv->speed = 0;
313 priv->oldduplex = -1;
314
315 if (priv->phy_addr == -1) {
316 /* We don't have a PHY, so do nothing */
317 return 0;
318 }
319
320 snprintf(bus_id, MII_BUS_ID_SIZE, "%x", priv->bus_id);
321 snprintf(phy_id, BUS_ID_SIZE, PHY_ID_FMT, bus_id, priv->phy_addr);
322 pr_debug("stmmac_init_phy: trying to attach to %s\n", phy_id);
323
324 phydev = phy_connect(dev, phy_id, &stmmac_adjust_link, 0,
325 priv->phy_interface);
326
327 if (IS_ERR(phydev)) {
328 pr_err("%s: Could not attach to PHY\n", dev->name);
329 return PTR_ERR(phydev);
330 }
331
332 /*
333 * Broken HW is sometimes missing the pull-up resistor on the
334 * MDIO line, which results in reads to non-existent devices returning
335 * 0 rather than 0xffff. Catch this here and treat 0 as a non-existent
336 * device as well.
337 * Note: phydev->phy_id is the result of reading the UID PHY registers.
338 */
339 if (phydev->phy_id == 0) {
340 phy_disconnect(phydev);
341 return -ENODEV;
342 }
343 pr_debug("stmmac_init_phy: %s: attached to PHY (UID 0x%x)"
344 " Link = %d\n", dev->name, phydev->phy_id, phydev->link);
345
346 priv->phydev = phydev;
347
348 return 0;
349}
350
351static inline void stmmac_mac_enable_rx(unsigned long ioaddr)
352{
353 u32 value = readl(ioaddr + MAC_CTRL_REG);
354 value |= MAC_RNABLE_RX;
355 /* Set the RE (receive enable bit into the MAC CTRL register). */
356 writel(value, ioaddr + MAC_CTRL_REG);
357}
358
359static inline void stmmac_mac_enable_tx(unsigned long ioaddr)
360{
361 u32 value = readl(ioaddr + MAC_CTRL_REG);
362 value |= MAC_ENABLE_TX;
363 /* Set the TE (transmit enable bit into the MAC CTRL register). */
364 writel(value, ioaddr + MAC_CTRL_REG);
365}
366
367static inline void stmmac_mac_disable_rx(unsigned long ioaddr)
368{
369 u32 value = readl(ioaddr + MAC_CTRL_REG);
370 value &= ~MAC_RNABLE_RX;
371 writel(value, ioaddr + MAC_CTRL_REG);
372}
373
374static inline void stmmac_mac_disable_tx(unsigned long ioaddr)
375{
376 u32 value = readl(ioaddr + MAC_CTRL_REG);
377 value &= ~MAC_ENABLE_TX;
378 writel(value, ioaddr + MAC_CTRL_REG);
379}
380
381/**
382 * display_ring
383 * @p: pointer to the ring.
384 * @size: size of the ring.
385 * Description: display all the descriptors within the ring.
386 */
387static void display_ring(struct dma_desc *p, int size)
388{
389 struct tmp_s {
390 u64 a;
391 unsigned int b;
392 unsigned int c;
393 };
394 int i;
395 for (i = 0; i < size; i++) {
396 struct tmp_s *x = (struct tmp_s *)(p + i);
397 pr_info("\t%d [0x%x]: DES0=0x%x DES1=0x%x BUF1=0x%x BUF2=0x%x",
398 i, (unsigned int)virt_to_phys(&p[i]),
399 (unsigned int)(x->a), (unsigned int)((x->a) >> 32),
400 x->b, x->c);
401 pr_info("\n");
402 }
403}
404
405/**
406 * init_dma_desc_rings - init the RX/TX descriptor rings
407 * @dev: net device structure
408 * Description: this function initializes the DMA RX/TX descriptors
409 * and allocates the socket buffers.
410 */
411static void init_dma_desc_rings(struct net_device *dev)
412{
413 int i;
414 struct stmmac_priv *priv = netdev_priv(dev);
415 struct sk_buff *skb;
416 unsigned int txsize = priv->dma_tx_size;
417 unsigned int rxsize = priv->dma_rx_size;
418 unsigned int bfsize = priv->dma_buf_sz;
419 int buff2_needed = 0;
420 int dis_ic = 0;
421
422#ifdef CONFIG_STMMAC_TIMER
423 /* Using Timers disable interrupts on completion for the reception */
424 dis_ic = 1;
425#endif
426 /* Set the Buffer size according to the MTU;
427 * indeed, in case of jumbo we need to bump-up the buffer sizes.
428 */
429 if (unlikely(dev->mtu >= BUF_SIZE_8KiB))
430 bfsize = BUF_SIZE_16KiB;
431 else if (unlikely(dev->mtu >= BUF_SIZE_4KiB))
432 bfsize = BUF_SIZE_8KiB;
433 else if (unlikely(dev->mtu >= BUF_SIZE_2KiB))
434 bfsize = BUF_SIZE_4KiB;
435 else if (unlikely(dev->mtu >= DMA_BUFFER_SIZE))
436 bfsize = BUF_SIZE_2KiB;
437 else
438 bfsize = DMA_BUFFER_SIZE;
439
440 /* If the MTU exceeds 8k so use the second buffer in the chain */
441 if (bfsize >= BUF_SIZE_8KiB)
442 buff2_needed = 1;
443
444 DBG(probe, INFO, "stmmac: txsize %d, rxsize %d, bfsize %d\n",
445 txsize, rxsize, bfsize);
446
447 priv->rx_skbuff_dma = kmalloc(rxsize * sizeof(dma_addr_t), GFP_KERNEL);
448 priv->rx_skbuff =
449 kmalloc(sizeof(struct sk_buff *) * rxsize, GFP_KERNEL);
450 priv->dma_rx =
451 (struct dma_desc *)dma_alloc_coherent(priv->device,
452 rxsize *
453 sizeof(struct dma_desc),
454 &priv->dma_rx_phy,
455 GFP_KERNEL);
456 priv->tx_skbuff = kmalloc(sizeof(struct sk_buff *) * txsize,
457 GFP_KERNEL);
458 priv->dma_tx =
459 (struct dma_desc *)dma_alloc_coherent(priv->device,
460 txsize *
461 sizeof(struct dma_desc),
462 &priv->dma_tx_phy,
463 GFP_KERNEL);
464
465 if ((priv->dma_rx == NULL) || (priv->dma_tx == NULL)) {
466 pr_err("%s:ERROR allocating the DMA Tx/Rx desc\n", __func__);
467 return;
468 }
469
470 DBG(probe, INFO, "stmmac (%s) DMA desc rings: virt addr (Rx %p, "
471 "Tx %p)\n\tDMA phy addr (Rx 0x%08x, Tx 0x%08x)\n",
472 dev->name, priv->dma_rx, priv->dma_tx,
473 (unsigned int)priv->dma_rx_phy, (unsigned int)priv->dma_tx_phy);
474
475 /* RX INITIALIZATION */
476 DBG(probe, INFO, "stmmac: SKB addresses:\n"
477 "skb\t\tskb data\tdma data\n");
478
479 for (i = 0; i < rxsize; i++) {
480 struct dma_desc *p = priv->dma_rx + i;
481
482 skb = netdev_alloc_skb_ip_align(dev, bfsize);
483 if (unlikely(skb == NULL)) {
484 pr_err("%s: Rx init fails; skb is NULL\n", __func__);
485 break;
486 }
487 priv->rx_skbuff[i] = skb;
488 priv->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data,
489 bfsize, DMA_FROM_DEVICE);
490
491 p->des2 = priv->rx_skbuff_dma[i];
492 if (unlikely(buff2_needed))
493 p->des3 = p->des2 + BUF_SIZE_8KiB;
494 DBG(probe, INFO, "[%p]\t[%p]\t[%x]\n", priv->rx_skbuff[i],
495 priv->rx_skbuff[i]->data, priv->rx_skbuff_dma[i]);
496 }
497 priv->cur_rx = 0;
498 priv->dirty_rx = (unsigned int)(i - rxsize);
499 priv->dma_buf_sz = bfsize;
500 buf_sz = bfsize;
501
502 /* TX INITIALIZATION */
503 for (i = 0; i < txsize; i++) {
504 priv->tx_skbuff[i] = NULL;
505 priv->dma_tx[i].des2 = 0;
506 }
507 priv->dirty_tx = 0;
508 priv->cur_tx = 0;
509
510 /* Clear the Rx/Tx descriptors */
511 priv->mac_type->ops->init_rx_desc(priv->dma_rx, rxsize, dis_ic);
512 priv->mac_type->ops->init_tx_desc(priv->dma_tx, txsize);
513
514 if (netif_msg_hw(priv)) {
515 pr_info("RX descriptor ring:\n");
516 display_ring(priv->dma_rx, rxsize);
517 pr_info("TX descriptor ring:\n");
518 display_ring(priv->dma_tx, txsize);
519 }
520 return;
521}
522
523static void dma_free_rx_skbufs(struct stmmac_priv *priv)
524{
525 int i;
526
527 for (i = 0; i < priv->dma_rx_size; i++) {
528 if (priv->rx_skbuff[i]) {
529 dma_unmap_single(priv->device, priv->rx_skbuff_dma[i],
530 priv->dma_buf_sz, DMA_FROM_DEVICE);
531 dev_kfree_skb_any(priv->rx_skbuff[i]);
532 }
533 priv->rx_skbuff[i] = NULL;
534 }
535 return;
536}
537
538static void dma_free_tx_skbufs(struct stmmac_priv *priv)
539{
540 int i;
541
542 for (i = 0; i < priv->dma_tx_size; i++) {
543 if (priv->tx_skbuff[i] != NULL) {
544 struct dma_desc *p = priv->dma_tx + i;
545 if (p->des2)
546 dma_unmap_single(priv->device, p->des2,
547 priv->mac_type->ops->get_tx_len(p),
548 DMA_TO_DEVICE);
549 dev_kfree_skb_any(priv->tx_skbuff[i]);
550 priv->tx_skbuff[i] = NULL;
551 }
552 }
553 return;
554}
555
556static void free_dma_desc_resources(struct stmmac_priv *priv)
557{
558 /* Release the DMA TX/RX socket buffers */
559 dma_free_rx_skbufs(priv);
560 dma_free_tx_skbufs(priv);
561
562 /* Free the region of consistent memory previously allocated for
563 * the DMA */
564 dma_free_coherent(priv->device,
565 priv->dma_tx_size * sizeof(struct dma_desc),
566 priv->dma_tx, priv->dma_tx_phy);
567 dma_free_coherent(priv->device,
568 priv->dma_rx_size * sizeof(struct dma_desc),
569 priv->dma_rx, priv->dma_rx_phy);
570 kfree(priv->rx_skbuff_dma);
571 kfree(priv->rx_skbuff);
572 kfree(priv->tx_skbuff);
573
574 return;
575}
576
577/**
578 * stmmac_dma_start_tx
579 * @ioaddr: device I/O address
580 * Description: this function starts the DMA tx process.
581 */
582static void stmmac_dma_start_tx(unsigned long ioaddr)
583{
584 u32 value = readl(ioaddr + DMA_CONTROL);
585 value |= DMA_CONTROL_ST;
586 writel(value, ioaddr + DMA_CONTROL);
587 return;
588}
589
590static void stmmac_dma_stop_tx(unsigned long ioaddr)
591{
592 u32 value = readl(ioaddr + DMA_CONTROL);
593 value &= ~DMA_CONTROL_ST;
594 writel(value, ioaddr + DMA_CONTROL);
595 return;
596}
597
598/**
599 * stmmac_dma_start_rx
600 * @ioaddr: device I/O address
601 * Description: this function starts the DMA rx process.
602 */
603static void stmmac_dma_start_rx(unsigned long ioaddr)
604{
605 u32 value = readl(ioaddr + DMA_CONTROL);
606 value |= DMA_CONTROL_SR;
607 writel(value, ioaddr + DMA_CONTROL);
608
609 return;
610}
611
612static void stmmac_dma_stop_rx(unsigned long ioaddr)
613{
614 u32 value = readl(ioaddr + DMA_CONTROL);
615 value &= ~DMA_CONTROL_SR;
616 writel(value, ioaddr + DMA_CONTROL);
617
618 return;
619}
620
621/**
622 * stmmac_dma_operation_mode - HW DMA operation mode
623 * @priv : pointer to the private device structure.
624 * Description: it sets the DMA operation mode: tx/rx DMA thresholds
625 * or Store-And-Forward capability. It also verifies the COE for the
626 * transmission in case of Giga ETH.
627 */
628static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
629{
630 if (!priv->is_gmac) {
631 /* MAC 10/100 */
632 priv->mac_type->ops->dma_mode(priv->dev->base_addr, tc, 0);
633 priv->tx_coe = NO_HW_CSUM;
634 } else {
635 if ((priv->dev->mtu <= ETH_DATA_LEN) && (tx_coe)) {
636 priv->mac_type->ops->dma_mode(priv->dev->base_addr,
637 SF_DMA_MODE, SF_DMA_MODE);
638 tc = SF_DMA_MODE;
639 priv->tx_coe = HW_CSUM;
640 } else {
641 /* Checksum computation is performed in software. */
642 priv->mac_type->ops->dma_mode(priv->dev->base_addr, tc,
643 SF_DMA_MODE);
644 priv->tx_coe = NO_HW_CSUM;
645 }
646 }
647 tx_coe = priv->tx_coe;
648
649 return;
650}
651
652#ifdef STMMAC_DEBUG
653/**
654 * show_tx_process_state
655 * @status: tx descriptor status field
656 * Description: it shows the Transmit Process State for CSR5[22:20]
657 */
658static void show_tx_process_state(unsigned int status)
659{
660 unsigned int state;
661 state = (status & DMA_STATUS_TS_MASK) >> DMA_STATUS_TS_SHIFT;
662
663 switch (state) {
664 case 0:
665 pr_info("- TX (Stopped): Reset or Stop command\n");
666 break;
667 case 1:
668 pr_info("- TX (Running):Fetching the Tx desc\n");
669 break;
670 case 2:
671 pr_info("- TX (Running): Waiting for end of tx\n");
672 break;
673 case 3:
674 pr_info("- TX (Running): Reading the data "
675 "and queuing the data into the Tx buf\n");
676 break;
677 case 6:
678 pr_info("- TX (Suspended): Tx Buff Underflow "
679 "or an unavailable Transmit descriptor\n");
680 break;
681 case 7:
682 pr_info("- TX (Running): Closing Tx descriptor\n");
683 break;
684 default:
685 break;
686 }
687 return;
688}
689
690/**
691 * show_rx_process_state
692 * @status: rx descriptor status field
693 * Description: it shows the Receive Process State for CSR5[19:17]
694 */
695static void show_rx_process_state(unsigned int status)
696{
697 unsigned int state;
698 state = (status & DMA_STATUS_RS_MASK) >> DMA_STATUS_RS_SHIFT;
699
700 switch (state) {
701 case 0:
702 pr_info("- RX (Stopped): Reset or Stop command\n");
703 break;
704 case 1:
705 pr_info("- RX (Running): Fetching the Rx desc\n");
706 break;
707 case 2:
708 pr_info("- RX (Running):Checking for end of pkt\n");
709 break;
710 case 3:
711 pr_info("- RX (Running): Waiting for Rx pkt\n");
712 break;
713 case 4:
714 pr_info("- RX (Suspended): Unavailable Rx buf\n");
715 break;
716 case 5:
717 pr_info("- RX (Running): Closing Rx descriptor\n");
718 break;
719 case 6:
720 pr_info("- RX(Running): Flushing the current frame"
721 " from the Rx buf\n");
722 break;
723 case 7:
724 pr_info("- RX (Running): Queuing the Rx frame"
725 " from the Rx buf into memory\n");
726 break;
727 default:
728 break;
729 }
730 return;
731}
732#endif
733
734/**
735 * stmmac_tx:
736 * @priv: private driver structure
737 * Description: it reclaims resources after transmission completes.
738 */
739static void stmmac_tx(struct stmmac_priv *priv)
740{
741 unsigned int txsize = priv->dma_tx_size;
742 unsigned long ioaddr = priv->dev->base_addr;
743
744 while (priv->dirty_tx != priv->cur_tx) {
745 int last;
746 unsigned int entry = priv->dirty_tx % txsize;
747 struct sk_buff *skb = priv->tx_skbuff[entry];
748 struct dma_desc *p = priv->dma_tx + entry;
749
750 /* Check if the descriptor is owned by the DMA. */
751 if (priv->mac_type->ops->get_tx_owner(p))
752 break;
753
754 /* Verify tx error by looking at the last segment */
755 last = priv->mac_type->ops->get_tx_ls(p);
756 if (likely(last)) {
757 int tx_error =
758 priv->mac_type->ops->tx_status(&priv->dev->stats,
759 &priv->xstats,
760 p, ioaddr);
761 if (likely(tx_error == 0)) {
762 priv->dev->stats.tx_packets++;
763 priv->xstats.tx_pkt_n++;
764 } else
765 priv->dev->stats.tx_errors++;
766 }
767 TX_DBG("%s: curr %d, dirty %d\n", __func__,
768 priv->cur_tx, priv->dirty_tx);
769
770 if (likely(p->des2))
771 dma_unmap_single(priv->device, p->des2,
772 priv->mac_type->ops->get_tx_len(p),
773 DMA_TO_DEVICE);
774 if (unlikely(p->des3))
775 p->des3 = 0;
776
777 if (likely(skb != NULL)) {
778 /*
779 * If there's room in the queue (limit it to size)
780 * we add this skb back into the pool,
781 * if it's the right size.
782 */
783 if ((skb_queue_len(&priv->rx_recycle) <
784 priv->dma_rx_size) &&
785 skb_recycle_check(skb, priv->dma_buf_sz))
786 __skb_queue_head(&priv->rx_recycle, skb);
787 else
788 dev_kfree_skb(skb);
789
790 priv->tx_skbuff[entry] = NULL;
791 }
792
793 priv->mac_type->ops->release_tx_desc(p);
794
795 entry = (++priv->dirty_tx) % txsize;
796 }
797 if (unlikely(netif_queue_stopped(priv->dev) &&
798 stmmac_tx_avail(priv) > STMMAC_TX_THRESH(priv))) {
799 netif_tx_lock(priv->dev);
800 if (netif_queue_stopped(priv->dev) &&
801 stmmac_tx_avail(priv) > STMMAC_TX_THRESH(priv)) {
802 TX_DBG("%s: restart transmit\n", __func__);
803 netif_wake_queue(priv->dev);
804 }
805 netif_tx_unlock(priv->dev);
806 }
807 return;
808}
809
810static inline void stmmac_enable_irq(struct stmmac_priv *priv)
811{
812#ifndef CONFIG_STMMAC_TIMER
813 writel(DMA_INTR_DEFAULT_MASK, priv->dev->base_addr + DMA_INTR_ENA);
814#else
815 priv->tm->timer_start(tmrate);
816#endif
817}
818
819static inline void stmmac_disable_irq(struct stmmac_priv *priv)
820{
821#ifndef CONFIG_STMMAC_TIMER
822 writel(0, priv->dev->base_addr + DMA_INTR_ENA);
823#else
824 priv->tm->timer_stop();
825#endif
826}
827
828static int stmmac_has_work(struct stmmac_priv *priv)
829{
830 unsigned int has_work = 0;
831 int rxret, tx_work = 0;
832
833 rxret = priv->mac_type->ops->get_rx_owner(priv->dma_rx +
834 (priv->cur_rx % priv->dma_rx_size));
835
836 if (priv->dirty_tx != priv->cur_tx)
837 tx_work = 1;
838
839 if (likely(!rxret || tx_work))
840 has_work = 1;
841
842 return has_work;
843}
844
845static inline void _stmmac_schedule(struct stmmac_priv *priv)
846{
847 if (likely(stmmac_has_work(priv))) {
848 stmmac_disable_irq(priv);
849 napi_schedule(&priv->napi);
850 }
851}
852
853#ifdef CONFIG_STMMAC_TIMER
854void stmmac_schedule(struct net_device *dev)
855{
856 struct stmmac_priv *priv = netdev_priv(dev);
857
858 priv->xstats.sched_timer_n++;
859
860 _stmmac_schedule(priv);
861
862 return;
863}
864
865static void stmmac_no_timer_started(unsigned int x)
866{;
867};
868
869static void stmmac_no_timer_stopped(void)
870{;
871};
872#endif
873
874/**
875 * stmmac_tx_err:
876 * @priv: pointer to the private device structure
877 * Description: it cleans the descriptors and restarts the transmission
878 * in case of errors.
879 */
880static void stmmac_tx_err(struct stmmac_priv *priv)
881{
882 netif_stop_queue(priv->dev);
883
884 stmmac_dma_stop_tx(priv->dev->base_addr);
885 dma_free_tx_skbufs(priv);
886 priv->mac_type->ops->init_tx_desc(priv->dma_tx, priv->dma_tx_size);
887 priv->dirty_tx = 0;
888 priv->cur_tx = 0;
889 stmmac_dma_start_tx(priv->dev->base_addr);
890
891 priv->dev->stats.tx_errors++;
892 netif_wake_queue(priv->dev);
893
894 return;
895}
896
897/**
898 * stmmac_dma_interrupt - Interrupt handler for the driver
899 * @dev: net device structure
900 * Description: Interrupt handler for the driver (DMA).
901 */
902static void stmmac_dma_interrupt(struct net_device *dev)
903{
904 unsigned long ioaddr = dev->base_addr;
905 struct stmmac_priv *priv = netdev_priv(dev);
906 /* read the status register (CSR5) */
907 u32 intr_status = readl(ioaddr + DMA_STATUS);
908
909 DBG(intr, INFO, "%s: [CSR5: 0x%08x]\n", __func__, intr_status);
910
911#ifdef STMMAC_DEBUG
912 /* It displays the DMA transmit process state (CSR5 register) */
913 if (netif_msg_tx_done(priv))
914 show_tx_process_state(intr_status);
915 if (netif_msg_rx_status(priv))
916 show_rx_process_state(intr_status);
917#endif
918 /* ABNORMAL interrupts */
919 if (unlikely(intr_status & DMA_STATUS_AIS)) {
920 DBG(intr, INFO, "CSR5[15] DMA ABNORMAL IRQ: ");
921 if (unlikely(intr_status & DMA_STATUS_UNF)) {
922 DBG(intr, INFO, "transmit underflow\n");
923 if (unlikely(tc != SF_DMA_MODE)
924 && (tc <= 256)) {
925 /* Try to bump up the threshold */
926 tc += 64;
927 priv->mac_type->ops->dma_mode(ioaddr, tc,
928 SF_DMA_MODE);
929 priv->xstats.threshold = tc;
930 }
931 stmmac_tx_err(priv);
932 priv->xstats.tx_undeflow_irq++;
933 }
934 if (unlikely(intr_status & DMA_STATUS_TJT)) {
935 DBG(intr, INFO, "transmit jabber\n");
936 priv->xstats.tx_jabber_irq++;
937 }
938 if (unlikely(intr_status & DMA_STATUS_OVF)) {
939 DBG(intr, INFO, "recv overflow\n");
940 priv->xstats.rx_overflow_irq++;
941 }
942 if (unlikely(intr_status & DMA_STATUS_RU)) {
943 DBG(intr, INFO, "receive buffer unavailable\n");
944 priv->xstats.rx_buf_unav_irq++;
945 }
946 if (unlikely(intr_status & DMA_STATUS_RPS)) {
947 DBG(intr, INFO, "receive process stopped\n");
948 priv->xstats.rx_process_stopped_irq++;
949 }
950 if (unlikely(intr_status & DMA_STATUS_RWT)) {
951 DBG(intr, INFO, "receive watchdog\n");
952 priv->xstats.rx_watchdog_irq++;
953 }
954 if (unlikely(intr_status & DMA_STATUS_ETI)) {
955 DBG(intr, INFO, "transmit early interrupt\n");
956 priv->xstats.tx_early_irq++;
957 }
958 if (unlikely(intr_status & DMA_STATUS_TPS)) {
959 DBG(intr, INFO, "transmit process stopped\n");
960 priv->xstats.tx_process_stopped_irq++;
961 stmmac_tx_err(priv);
962 }
963 if (unlikely(intr_status & DMA_STATUS_FBI)) {
964 DBG(intr, INFO, "fatal bus error\n");
965 priv->xstats.fatal_bus_error_irq++;
966 stmmac_tx_err(priv);
967 }
968 }
969
970 /* TX/RX NORMAL interrupts */
971 if (intr_status & DMA_STATUS_NIS) {
972 priv->xstats.normal_irq_n++;
973 if (likely((intr_status & DMA_STATUS_RI) ||
974 (intr_status & (DMA_STATUS_TI))))
975 _stmmac_schedule(priv);
976 }
977
978 /* Optional hardware blocks, interrupts should be disabled */
979 if (unlikely(intr_status &
980 (DMA_STATUS_GPI | DMA_STATUS_GMI | DMA_STATUS_GLI)))
981 pr_info("%s: unexpected status %08x\n", __func__, intr_status);
982
983 /* Clear the interrupt by writing a logic 1 to the CSR5[15-0] */
984 writel((intr_status & 0x1ffff), ioaddr + DMA_STATUS);
985
986 DBG(intr, INFO, "\n\n");
987
988 return;
989}
990
991/**
992 * stmmac_open - open entry point of the driver
993 * @dev : pointer to the device structure.
994 * Description:
995 * This function is the open entry point of the driver.
996 * Return value:
997 * 0 on success and an appropriate (-)ve integer as defined in errno.h
998 * file on failure.
999 */
1000static int stmmac_open(struct net_device *dev)
1001{
1002 struct stmmac_priv *priv = netdev_priv(dev);
1003 unsigned long ioaddr = dev->base_addr;
1004 int ret;
1005
1006 /* Check that the MAC address is valid. If its not, refuse
1007 * to bring the device up. The user must specify an
1008 * address using the following linux command:
1009 * ifconfig eth0 hw ether xx:xx:xx:xx:xx:xx */
1010 if (!is_valid_ether_addr(dev->dev_addr)) {
1011 random_ether_addr(dev->dev_addr);
1012 pr_warning("%s: generated random MAC address %pM\n", dev->name,
1013 dev->dev_addr);
1014 }
1015
1016 stmmac_verify_args();
1017
1018 ret = stmmac_init_phy(dev);
1019 if (unlikely(ret)) {
1020 pr_err("%s: Cannot attach to PHY (error: %d)\n", __func__, ret);
1021 return ret;
1022 }
1023
1024 /* Request the IRQ lines */
1025 ret = request_irq(dev->irq, &stmmac_interrupt,
1026 IRQF_SHARED, dev->name, dev);
1027 if (unlikely(ret < 0)) {
1028 pr_err("%s: ERROR: allocating the IRQ %d (error: %d)\n",
1029 __func__, dev->irq, ret);
1030 return ret;
1031 }
1032
1033#ifdef CONFIG_STMMAC_TIMER
1034 priv->tm = kmalloc(sizeof(struct stmmac_timer *), GFP_KERNEL);
1035 if (unlikely(priv->tm == NULL)) {
1036 pr_err("%s: ERROR: timer memory alloc failed \n", __func__);
1037 return -ENOMEM;
1038 }
1039 priv->tm->freq = tmrate;
1040
1041 /* Test if the HW timer can be actually used.
1042 * In case of failure continue with no timer. */
1043 if (unlikely((stmmac_open_ext_timer(dev, priv->tm)) < 0)) {
1044 pr_warning("stmmaceth: cannot attach the HW timer\n");
1045 tmrate = 0;
1046 priv->tm->freq = 0;
1047 priv->tm->timer_start = stmmac_no_timer_started;
1048 priv->tm->timer_stop = stmmac_no_timer_stopped;
1049 }
1050#endif
1051
1052 /* Create and initialize the TX/RX descriptors chains. */
1053 priv->dma_tx_size = STMMAC_ALIGN(dma_txsize);
1054 priv->dma_rx_size = STMMAC_ALIGN(dma_rxsize);
1055 priv->dma_buf_sz = STMMAC_ALIGN(buf_sz);
1056 init_dma_desc_rings(dev);
1057
1058 /* DMA initialization and SW reset */
1059 if (unlikely(priv->mac_type->ops->dma_init(ioaddr,
1060 priv->pbl, priv->dma_tx_phy, priv->dma_rx_phy) < 0)) {
1061
1062 pr_err("%s: DMA initialization failed\n", __func__);
1063 return -1;
1064 }
1065
1066 /* Copy the MAC addr into the HW */
1067 priv->mac_type->ops->set_umac_addr(ioaddr, dev->dev_addr, 0);
1068 /* Initialize the MAC Core */
1069 priv->mac_type->ops->core_init(ioaddr);
1070
1071 priv->shutdown = 0;
1072
1073 /* Initialise the MMC (if present) to disable all interrupts. */
1074 writel(0xffffffff, ioaddr + MMC_HIGH_INTR_MASK);
1075 writel(0xffffffff, ioaddr + MMC_LOW_INTR_MASK);
1076
1077 /* Enable the MAC Rx/Tx */
1078 stmmac_mac_enable_rx(ioaddr);
1079 stmmac_mac_enable_tx(ioaddr);
1080
1081 /* Set the HW DMA mode and the COE */
1082 stmmac_dma_operation_mode(priv);
1083
1084 /* Extra statistics */
1085 memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats));
1086 priv->xstats.threshold = tc;
1087
1088 /* Start the ball rolling... */
1089 DBG(probe, DEBUG, "%s: DMA RX/TX processes started...\n", dev->name);
1090 stmmac_dma_start_tx(ioaddr);
1091 stmmac_dma_start_rx(ioaddr);
1092
1093#ifdef CONFIG_STMMAC_TIMER
1094 priv->tm->timer_start(tmrate);
1095#endif
1096 /* Dump DMA/MAC registers */
1097 if (netif_msg_hw(priv)) {
1098 priv->mac_type->ops->dump_mac_regs(ioaddr);
1099 priv->mac_type->ops->dump_dma_regs(ioaddr);
1100 }
1101
1102 if (priv->phydev)
1103 phy_start(priv->phydev);
1104
1105 napi_enable(&priv->napi);
1106 skb_queue_head_init(&priv->rx_recycle);
1107 netif_start_queue(dev);
1108 return 0;
1109}
1110
1111/**
1112 * stmmac_release - close entry point of the driver
1113 * @dev : device pointer.
1114 * Description:
1115 * This is the stop entry point of the driver.
1116 */
1117static int stmmac_release(struct net_device *dev)
1118{
1119 struct stmmac_priv *priv = netdev_priv(dev);
1120
1121 /* Stop and disconnect the PHY */
1122 if (priv->phydev) {
1123 phy_stop(priv->phydev);
1124 phy_disconnect(priv->phydev);
1125 priv->phydev = NULL;
1126 }
1127
1128 netif_stop_queue(dev);
1129
1130#ifdef CONFIG_STMMAC_TIMER
1131 /* Stop and release the timer */
1132 stmmac_close_ext_timer();
1133 if (priv->tm != NULL)
1134 kfree(priv->tm);
1135#endif
1136 napi_disable(&priv->napi);
1137 skb_queue_purge(&priv->rx_recycle);
1138
1139 /* Free the IRQ lines */
1140 free_irq(dev->irq, dev);
1141
1142 /* Stop TX/RX DMA and clear the descriptors */
1143 stmmac_dma_stop_tx(dev->base_addr);
1144 stmmac_dma_stop_rx(dev->base_addr);
1145
1146 /* Release and free the Rx/Tx resources */
1147 free_dma_desc_resources(priv);
1148
1149 /* Disable the MAC core */
1150 stmmac_mac_disable_tx(dev->base_addr);
1151 stmmac_mac_disable_rx(dev->base_addr);
1152
1153 netif_carrier_off(dev);
1154
1155 return 0;
1156}
1157
1158/*
1159 * To perform emulated hardware segmentation on skb.
1160 */
1161static int stmmac_sw_tso(struct stmmac_priv *priv, struct sk_buff *skb)
1162{
1163 struct sk_buff *segs, *curr_skb;
1164 int gso_segs = skb_shinfo(skb)->gso_segs;
1165
1166 /* Estimate the number of fragments in the worst case */
1167 if (unlikely(stmmac_tx_avail(priv) < gso_segs)) {
1168 netif_stop_queue(priv->dev);
1169 TX_DBG(KERN_ERR "%s: TSO BUG! Tx Ring full when queue awake\n",
1170 __func__);
1171 if (stmmac_tx_avail(priv) < gso_segs)
1172 return NETDEV_TX_BUSY;
1173
1174 netif_wake_queue(priv->dev);
1175 }
1176 TX_DBG("\tstmmac_sw_tso: segmenting: skb %p (len %d)\n",
1177 skb, skb->len);
1178
1179 segs = skb_gso_segment(skb, priv->dev->features & ~NETIF_F_TSO);
1180 if (unlikely(IS_ERR(segs)))
1181 goto sw_tso_end;
1182
1183 do {
1184 curr_skb = segs;
1185 segs = segs->next;
1186 TX_DBG("\t\tcurrent skb->len: %d, *curr %p,"
1187 "*next %p\n", curr_skb->len, curr_skb, segs);
1188 curr_skb->next = NULL;
1189 stmmac_xmit(curr_skb, priv->dev);
1190 } while (segs);
1191
1192sw_tso_end:
1193 dev_kfree_skb(skb);
1194
1195 return NETDEV_TX_OK;
1196}
1197
1198static unsigned int stmmac_handle_jumbo_frames(struct sk_buff *skb,
1199 struct net_device *dev,
1200 int csum_insertion)
1201{
1202 struct stmmac_priv *priv = netdev_priv(dev);
1203 unsigned int nopaged_len = skb_headlen(skb);
1204 unsigned int txsize = priv->dma_tx_size;
1205 unsigned int entry = priv->cur_tx % txsize;
1206 struct dma_desc *desc = priv->dma_tx + entry;
1207
1208 if (nopaged_len > BUF_SIZE_8KiB) {
1209
1210 int buf2_size = nopaged_len - BUF_SIZE_8KiB;
1211
1212 desc->des2 = dma_map_single(priv->device, skb->data,
1213 BUF_SIZE_8KiB, DMA_TO_DEVICE);
1214 desc->des3 = desc->des2 + BUF_SIZE_4KiB;
1215 priv->mac_type->ops->prepare_tx_desc(desc, 1, BUF_SIZE_8KiB,
1216 csum_insertion);
1217
1218 entry = (++priv->cur_tx) % txsize;
1219 desc = priv->dma_tx + entry;
1220
1221 desc->des2 = dma_map_single(priv->device,
1222 skb->data + BUF_SIZE_8KiB,
1223 buf2_size, DMA_TO_DEVICE);
1224 desc->des3 = desc->des2 + BUF_SIZE_4KiB;
1225 priv->mac_type->ops->prepare_tx_desc(desc, 0,
1226 buf2_size, csum_insertion);
1227 priv->mac_type->ops->set_tx_owner(desc);
1228 priv->tx_skbuff[entry] = NULL;
1229 } else {
1230 desc->des2 = dma_map_single(priv->device, skb->data,
1231 nopaged_len, DMA_TO_DEVICE);
1232 desc->des3 = desc->des2 + BUF_SIZE_4KiB;
1233 priv->mac_type->ops->prepare_tx_desc(desc, 1, nopaged_len,
1234 csum_insertion);
1235 }
1236 return entry;
1237}
1238
1239/**
1240 * stmmac_xmit:
1241 * @skb : the socket buffer
1242 * @dev : device pointer
1243 * Description : Tx entry point of the driver.
1244 */
1245static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
1246{
1247 struct stmmac_priv *priv = netdev_priv(dev);
1248 unsigned int txsize = priv->dma_tx_size;
1249 unsigned int entry;
1250 int i, csum_insertion = 0;
1251 int nfrags = skb_shinfo(skb)->nr_frags;
1252 struct dma_desc *desc, *first;
1253
1254 if (unlikely(stmmac_tx_avail(priv) < nfrags + 1)) {
1255 if (!netif_queue_stopped(dev)) {
1256 netif_stop_queue(dev);
1257 /* This is a hard error, log it. */
1258 pr_err("%s: BUG! Tx Ring full when queue awake\n",
1259 __func__);
1260 }
1261 return NETDEV_TX_BUSY;
1262 }
1263
1264 entry = priv->cur_tx % txsize;
1265
1266#ifdef STMMAC_XMIT_DEBUG
1267 if ((skb->len > ETH_FRAME_LEN) || nfrags)
1268 pr_info("stmmac xmit:\n"
1269 "\tskb addr %p - len: %d - nopaged_len: %d\n"
1270 "\tn_frags: %d - ip_summed: %d - %s gso\n",
1271 skb, skb->len, skb_headlen(skb), nfrags, skb->ip_summed,
1272 !skb_is_gso(skb) ? "isn't" : "is");
1273#endif
1274
1275 if (unlikely(skb_is_gso(skb)))
1276 return stmmac_sw_tso(priv, skb);
1277
1278 if (likely((skb->ip_summed == CHECKSUM_PARTIAL))) {
1279 if (likely(priv->tx_coe == NO_HW_CSUM))
1280 skb_checksum_help(skb);
1281 else
1282 csum_insertion = 1;
1283 }
1284
1285 desc = priv->dma_tx + entry;
1286 first = desc;
1287
1288#ifdef STMMAC_XMIT_DEBUG
1289 if ((nfrags > 0) || (skb->len > ETH_FRAME_LEN))
1290 pr_debug("stmmac xmit: skb len: %d, nopaged_len: %d,\n"
1291 "\t\tn_frags: %d, ip_summed: %d\n",
1292 skb->len, skb_headlen(skb), nfrags, skb->ip_summed);
1293#endif
1294 priv->tx_skbuff[entry] = skb;
1295 if (unlikely(skb->len >= BUF_SIZE_4KiB)) {
1296 entry = stmmac_handle_jumbo_frames(skb, dev, csum_insertion);
1297 desc = priv->dma_tx + entry;
1298 } else {
1299 unsigned int nopaged_len = skb_headlen(skb);
1300 desc->des2 = dma_map_single(priv->device, skb->data,
1301 nopaged_len, DMA_TO_DEVICE);
1302 priv->mac_type->ops->prepare_tx_desc(desc, 1, nopaged_len,
1303 csum_insertion);
1304 }
1305
1306 for (i = 0; i < nfrags; i++) {
1307 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1308 int len = frag->size;
1309
1310 entry = (++priv->cur_tx) % txsize;
1311 desc = priv->dma_tx + entry;
1312
1313 TX_DBG("\t[entry %d] segment len: %d\n", entry, len);
1314 desc->des2 = dma_map_page(priv->device, frag->page,
1315 frag->page_offset,
1316 len, DMA_TO_DEVICE);
1317 priv->tx_skbuff[entry] = NULL;
1318 priv->mac_type->ops->prepare_tx_desc(desc, 0, len,
1319 csum_insertion);
1320 priv->mac_type->ops->set_tx_owner(desc);
1321 }
1322
1323 /* Interrupt on completition only for the latest segment */
1324 priv->mac_type->ops->close_tx_desc(desc);
1325#ifdef CONFIG_STMMAC_TIMER
1326 /* Clean IC while using timers */
1327 priv->mac_type->ops->clear_tx_ic(desc);
1328#endif
1329 /* To avoid raise condition */
1330 priv->mac_type->ops->set_tx_owner(first);
1331
1332 priv->cur_tx++;
1333
1334#ifdef STMMAC_XMIT_DEBUG
1335 if (netif_msg_pktdata(priv)) {
1336 pr_info("stmmac xmit: current=%d, dirty=%d, entry=%d, "
1337 "first=%p, nfrags=%d\n",
1338 (priv->cur_tx % txsize), (priv->dirty_tx % txsize),
1339 entry, first, nfrags);
1340 display_ring(priv->dma_tx, txsize);
1341 pr_info(">>> frame to be transmitted: ");
1342 print_pkt(skb->data, skb->len);
1343 }
1344#endif
1345 if (unlikely(stmmac_tx_avail(priv) <= (MAX_SKB_FRAGS + 1))) {
1346 TX_DBG("%s: stop transmitted packets\n", __func__);
1347 netif_stop_queue(dev);
1348 }
1349
1350 dev->stats.tx_bytes += skb->len;
1351
1352 /* CSR1 enables the transmit DMA to check for new descriptor */
1353 writel(1, dev->base_addr + DMA_XMT_POLL_DEMAND);
1354
1355 return NETDEV_TX_OK;
1356}
1357
1358static inline void stmmac_rx_refill(struct stmmac_priv *priv)
1359{
1360 unsigned int rxsize = priv->dma_rx_size;
1361 int bfsize = priv->dma_buf_sz;
1362 struct dma_desc *p = priv->dma_rx;
1363
1364 for (; priv->cur_rx - priv->dirty_rx > 0; priv->dirty_rx++) {
1365 unsigned int entry = priv->dirty_rx % rxsize;
1366 if (likely(priv->rx_skbuff[entry] == NULL)) {
1367 struct sk_buff *skb;
1368
1369 skb = __skb_dequeue(&priv->rx_recycle);
1370 if (skb == NULL)
1371 skb = netdev_alloc_skb_ip_align(priv->dev,
1372 bfsize);
1373
1374 if (unlikely(skb == NULL))
1375 break;
1376
1377 priv->rx_skbuff[entry] = skb;
1378 priv->rx_skbuff_dma[entry] =
1379 dma_map_single(priv->device, skb->data, bfsize,
1380 DMA_FROM_DEVICE);
1381
1382 (p + entry)->des2 = priv->rx_skbuff_dma[entry];
1383 if (unlikely(priv->is_gmac)) {
1384 if (bfsize >= BUF_SIZE_8KiB)
1385 (p + entry)->des3 =
1386 (p + entry)->des2 + BUF_SIZE_8KiB;
1387 }
1388 RX_DBG(KERN_INFO "\trefill entry #%d\n", entry);
1389 }
1390 priv->mac_type->ops->set_rx_owner(p + entry);
1391 }
1392 return;
1393}
1394
1395static int stmmac_rx(struct stmmac_priv *priv, int limit)
1396{
1397 unsigned int rxsize = priv->dma_rx_size;
1398 unsigned int entry = priv->cur_rx % rxsize;
1399 unsigned int next_entry;
1400 unsigned int count = 0;
1401 struct dma_desc *p = priv->dma_rx + entry;
1402 struct dma_desc *p_next;
1403
1404#ifdef STMMAC_RX_DEBUG
1405 if (netif_msg_hw(priv)) {
1406 pr_debug(">>> stmmac_rx: descriptor ring:\n");
1407 display_ring(priv->dma_rx, rxsize);
1408 }
1409#endif
1410 count = 0;
1411 while (!priv->mac_type->ops->get_rx_owner(p)) {
1412 int status;
1413
1414 if (count >= limit)
1415 break;
1416
1417 count++;
1418
1419 next_entry = (++priv->cur_rx) % rxsize;
1420 p_next = priv->dma_rx + next_entry;
1421 prefetch(p_next);
1422
1423 /* read the status of the incoming frame */
1424 status = (priv->mac_type->ops->rx_status(&priv->dev->stats,
1425 &priv->xstats, p));
1426 if (unlikely(status == discard_frame))
1427 priv->dev->stats.rx_errors++;
1428 else {
1429 struct sk_buff *skb;
1430 /* Length should omit the CRC */
1431 int frame_len =
1432 priv->mac_type->ops->get_rx_frame_len(p) - 4;
1433
1434#ifdef STMMAC_RX_DEBUG
1435 if (frame_len > ETH_FRAME_LEN)
1436 pr_debug("\tRX frame size %d, COE status: %d\n",
1437 frame_len, status);
1438
1439 if (netif_msg_hw(priv))
1440 pr_debug("\tdesc: %p [entry %d] buff=0x%x\n",
1441 p, entry, p->des2);
1442#endif
1443 skb = priv->rx_skbuff[entry];
1444 if (unlikely(!skb)) {
1445 pr_err("%s: Inconsistent Rx descriptor chain\n",
1446 priv->dev->name);
1447 priv->dev->stats.rx_dropped++;
1448 break;
1449 }
1450 prefetch(skb->data - NET_IP_ALIGN);
1451 priv->rx_skbuff[entry] = NULL;
1452
1453 skb_put(skb, frame_len);
1454 dma_unmap_single(priv->device,
1455 priv->rx_skbuff_dma[entry],
1456 priv->dma_buf_sz, DMA_FROM_DEVICE);
1457#ifdef STMMAC_RX_DEBUG
1458 if (netif_msg_pktdata(priv)) {
1459 pr_info(" frame received (%dbytes)", frame_len);
1460 print_pkt(skb->data, frame_len);
1461 }
1462#endif
1463 skb->protocol = eth_type_trans(skb, priv->dev);
1464
1465 if (unlikely(status == csum_none)) {
1466 /* always for the old mac 10/100 */
1467 skb->ip_summed = CHECKSUM_NONE;
1468 netif_receive_skb(skb);
1469 } else {
1470 skb->ip_summed = CHECKSUM_UNNECESSARY;
1471 napi_gro_receive(&priv->napi, skb);
1472 }
1473
1474 priv->dev->stats.rx_packets++;
1475 priv->dev->stats.rx_bytes += frame_len;
1476 priv->dev->last_rx = jiffies;
1477 }
1478 entry = next_entry;
1479 p = p_next; /* use prefetched values */
1480 }
1481
1482 stmmac_rx_refill(priv);
1483
1484 priv->xstats.rx_pkt_n += count;
1485
1486 return count;
1487}
1488
1489/**
1490 * stmmac_poll - stmmac poll method (NAPI)
1491 * @napi : pointer to the napi structure.
1492 * @budget : maximum number of packets that the current CPU can receive from
1493 * all interfaces.
1494 * Description :
1495 * This function implements the the reception process.
1496 * Also it runs the TX completion thread
1497 */
1498static int stmmac_poll(struct napi_struct *napi, int budget)
1499{
1500 struct stmmac_priv *priv = container_of(napi, struct stmmac_priv, napi);
1501 int work_done = 0;
1502
1503 priv->xstats.poll_n++;
1504 stmmac_tx(priv);
1505 work_done = stmmac_rx(priv, budget);
1506
1507 if (work_done < budget) {
1508 napi_complete(napi);
1509 stmmac_enable_irq(priv);
1510 }
1511 return work_done;
1512}
1513
1514/**
1515 * stmmac_tx_timeout
1516 * @dev : Pointer to net device structure
1517 * Description: this function is called when a packet transmission fails to
1518 * complete within a reasonable tmrate. The driver will mark the error in the
1519 * netdev structure and arrange for the device to be reset to a sane state
1520 * in order to transmit a new packet.
1521 */
1522static void stmmac_tx_timeout(struct net_device *dev)
1523{
1524 struct stmmac_priv *priv = netdev_priv(dev);
1525
1526 /* Clear Tx resources and restart transmitting again */
1527 stmmac_tx_err(priv);
1528 return;
1529}
1530
1531/* Configuration changes (passed on by ifconfig) */
1532static int stmmac_config(struct net_device *dev, struct ifmap *map)
1533{
1534 if (dev->flags & IFF_UP) /* can't act on a running interface */
1535 return -EBUSY;
1536
1537 /* Don't allow changing the I/O address */
1538 if (map->base_addr != dev->base_addr) {
1539 pr_warning("%s: can't change I/O address\n", dev->name);
1540 return -EOPNOTSUPP;
1541 }
1542
1543 /* Don't allow changing the IRQ */
1544 if (map->irq != dev->irq) {
1545 pr_warning("%s: can't change IRQ number %d\n",
1546 dev->name, dev->irq);
1547 return -EOPNOTSUPP;
1548 }
1549
1550 /* ignore other fields */
1551 return 0;
1552}
1553
1554/**
1555 * stmmac_multicast_list - entry point for multicast addressing
1556 * @dev : pointer to the device structure
1557 * Description:
1558 * This function is a driver entry point which gets called by the kernel
1559 * whenever multicast addresses must be enabled/disabled.
1560 * Return value:
1561 * void.
1562 */
1563static void stmmac_multicast_list(struct net_device *dev)
1564{
1565 struct stmmac_priv *priv = netdev_priv(dev);
1566
1567 spin_lock(&priv->lock);
1568 priv->mac_type->ops->set_filter(dev);
1569 spin_unlock(&priv->lock);
1570 return;
1571}
1572
1573/**
1574 * stmmac_change_mtu - entry point to change MTU size for the device.
1575 * @dev : device pointer.
1576 * @new_mtu : the new MTU size for the device.
1577 * Description: the Maximum Transfer Unit (MTU) is used by the network layer
1578 * to drive packet transmission. Ethernet has an MTU of 1500 octets
1579 * (ETH_DATA_LEN). This value can be changed with ifconfig.
1580 * Return value:
1581 * 0 on success and an appropriate (-)ve integer as defined in errno.h
1582 * file on failure.
1583 */
1584static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
1585{
1586 struct stmmac_priv *priv = netdev_priv(dev);
1587 int max_mtu;
1588
1589 if (netif_running(dev)) {
1590 pr_err("%s: must be stopped to change its MTU\n", dev->name);
1591 return -EBUSY;
1592 }
1593
1594 if (priv->is_gmac)
1595 max_mtu = JUMBO_LEN;
1596 else
1597 max_mtu = ETH_DATA_LEN;
1598
1599 if ((new_mtu < 46) || (new_mtu > max_mtu)) {
1600 pr_err("%s: invalid MTU, max MTU is: %d\n", dev->name, max_mtu);
1601 return -EINVAL;
1602 }
1603
1604 dev->mtu = new_mtu;
1605
1606 return 0;
1607}
1608
1609static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
1610{
1611 struct net_device *dev = (struct net_device *)dev_id;
1612 struct stmmac_priv *priv = netdev_priv(dev);
1613
1614 if (unlikely(!dev)) {
1615 pr_err("%s: invalid dev pointer\n", __func__);
1616 return IRQ_NONE;
1617 }
1618
1619 if (priv->is_gmac) {
1620 unsigned long ioaddr = dev->base_addr;
1621 /* To handle GMAC own interrupts */
1622 priv->mac_type->ops->host_irq_status(ioaddr);
1623 }
1624 stmmac_dma_interrupt(dev);
1625
1626 return IRQ_HANDLED;
1627}
1628
1629#ifdef CONFIG_NET_POLL_CONTROLLER
1630/* Polling receive - used by NETCONSOLE and other diagnostic tools
1631 * to allow network I/O with interrupts disabled. */
1632static void stmmac_poll_controller(struct net_device *dev)
1633{
1634 disable_irq(dev->irq);
1635 stmmac_interrupt(dev->irq, dev);
1636 enable_irq(dev->irq);
1637}
1638#endif
1639
1640/**
1641 * stmmac_ioctl - Entry point for the Ioctl
1642 * @dev: Device pointer.
1643 * @rq: An IOCTL specefic structure, that can contain a pointer to
1644 * a proprietary structure used to pass information to the driver.
1645 * @cmd: IOCTL command
1646 * Description:
1647 * Currently there are no special functionality supported in IOCTL, just the
1648 * phy_mii_ioctl(...) can be invoked.
1649 */
1650static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1651{
1652 struct stmmac_priv *priv = netdev_priv(dev);
1653 int ret = -EOPNOTSUPP;
1654
1655 if (!netif_running(dev))
1656 return -EINVAL;
1657
1658 switch (cmd) {
1659 case SIOCGMIIPHY:
1660 case SIOCGMIIREG:
1661 case SIOCSMIIREG:
1662 if (!priv->phydev)
1663 return -EINVAL;
1664
1665 spin_lock(&priv->lock);
1666 ret = phy_mii_ioctl(priv->phydev, if_mii(rq), cmd);
1667 spin_unlock(&priv->lock);
1668 default:
1669 break;
1670 }
1671 return ret;
1672}
1673
1674#ifdef STMMAC_VLAN_TAG_USED
1675static void stmmac_vlan_rx_register(struct net_device *dev,
1676 struct vlan_group *grp)
1677{
1678 struct stmmac_priv *priv = netdev_priv(dev);
1679
1680 DBG(probe, INFO, "%s: Setting vlgrp to %p\n", dev->name, grp);
1681
1682 spin_lock(&priv->lock);
1683 priv->vlgrp = grp;
1684 spin_unlock(&priv->lock);
1685
1686 return;
1687}
1688#endif
1689
1690static const struct net_device_ops stmmac_netdev_ops = {
1691 .ndo_open = stmmac_open,
1692 .ndo_start_xmit = stmmac_xmit,
1693 .ndo_stop = stmmac_release,
1694 .ndo_change_mtu = stmmac_change_mtu,
1695 .ndo_set_multicast_list = stmmac_multicast_list,
1696 .ndo_tx_timeout = stmmac_tx_timeout,
1697 .ndo_do_ioctl = stmmac_ioctl,
1698 .ndo_set_config = stmmac_config,
1699#ifdef STMMAC_VLAN_TAG_USED
1700 .ndo_vlan_rx_register = stmmac_vlan_rx_register,
1701#endif
1702#ifdef CONFIG_NET_POLL_CONTROLLER
1703 .ndo_poll_controller = stmmac_poll_controller,
1704#endif
1705 .ndo_set_mac_address = eth_mac_addr,
1706};
1707
1708/**
1709 * stmmac_probe - Initialization of the adapter .
1710 * @dev : device pointer
1711 * Description: The function initializes the network device structure for
1712 * the STMMAC driver. It also calls the low level routines
1713 * in order to init the HW (i.e. the DMA engine)
1714 */
1715static int stmmac_probe(struct net_device *dev)
1716{
1717 int ret = 0;
1718 struct stmmac_priv *priv = netdev_priv(dev);
1719
1720 ether_setup(dev);
1721
1722 dev->netdev_ops = &stmmac_netdev_ops;
1723 stmmac_set_ethtool_ops(dev);
1724
1725 dev->features |= (NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_HIGHDMA);
1726 dev->watchdog_timeo = msecs_to_jiffies(watchdog);
1727#ifdef STMMAC_VLAN_TAG_USED
1728 /* Both mac100 and gmac support receive VLAN tag detection */
1729 dev->features |= NETIF_F_HW_VLAN_RX;
1730#endif
1731 priv->msg_enable = netif_msg_init(debug, default_msg_level);
1732
1733 if (priv->is_gmac)
1734 priv->rx_csum = 1;
1735
1736 if (flow_ctrl)
1737 priv->flow_ctrl = FLOW_AUTO; /* RX/TX pause on */
1738
1739 priv->pause = pause;
1740 netif_napi_add(dev, &priv->napi, stmmac_poll, 64);
1741
1742 /* Get the MAC address */
1743 priv->mac_type->ops->get_umac_addr(dev->base_addr, dev->dev_addr, 0);
1744
1745 if (!is_valid_ether_addr(dev->dev_addr))
1746 pr_warning("\tno valid MAC address;"
1747 "please, use ifconfig or nwhwconfig!\n");
1748
1749 ret = register_netdev(dev);
1750 if (ret) {
1751 pr_err("%s: ERROR %i registering the device\n",
1752 __func__, ret);
1753 return -ENODEV;
1754 }
1755
1756 DBG(probe, DEBUG, "%s: Scatter/Gather: %s - HW checksums: %s\n",
1757 dev->name, (dev->features & NETIF_F_SG) ? "on" : "off",
1758 (dev->features & NETIF_F_HW_CSUM) ? "on" : "off");
1759
1760 spin_lock_init(&priv->lock);
1761
1762 return ret;
1763}
1764
1765/**
1766 * stmmac_mac_device_setup
1767 * @dev : device pointer
1768 * Description: select and initialise the mac device (mac100 or Gmac).
1769 */
1770static int stmmac_mac_device_setup(struct net_device *dev)
1771{
1772 struct stmmac_priv *priv = netdev_priv(dev);
1773 unsigned long ioaddr = dev->base_addr;
1774
1775 struct mac_device_info *device;
1776
1777 if (priv->is_gmac)
1778 device = gmac_setup(ioaddr);
1779 else
1780 device = mac100_setup(ioaddr);
1781
1782 if (!device)
1783 return -ENOMEM;
1784
1785 priv->mac_type = device;
1786
1787 priv->wolenabled = priv->mac_type->hw.pmt; /* PMT supported */
1788 if (priv->wolenabled == PMT_SUPPORTED)
1789 priv->wolopts = WAKE_MAGIC; /* Magic Frame */
1790
1791 return 0;
1792}
1793
1794static int stmmacphy_dvr_probe(struct platform_device *pdev)
1795{
1796 struct plat_stmmacphy_data *plat_dat;
1797 plat_dat = (struct plat_stmmacphy_data *)((pdev->dev).platform_data);
1798
1799 pr_debug("stmmacphy_dvr_probe: added phy for bus %d\n",
1800 plat_dat->bus_id);
1801
1802 return 0;
1803}
1804
1805static int stmmacphy_dvr_remove(struct platform_device *pdev)
1806{
1807 return 0;
1808}
1809
1810static struct platform_driver stmmacphy_driver = {
1811 .driver = {
1812 .name = PHY_RESOURCE_NAME,
1813 },
1814 .probe = stmmacphy_dvr_probe,
1815 .remove = stmmacphy_dvr_remove,
1816};
1817
1818/**
1819 * stmmac_associate_phy
1820 * @dev: pointer to device structure
1821 * @data: points to the private structure.
1822 * Description: Scans through all the PHYs we have registered and checks if
1823 * any are associated with our MAC. If so, then just fill in
1824 * the blanks in our local context structure
1825 */
1826static int stmmac_associate_phy(struct device *dev, void *data)
1827{
1828 struct stmmac_priv *priv = (struct stmmac_priv *)data;
1829 struct plat_stmmacphy_data *plat_dat;
1830
1831 plat_dat = (struct plat_stmmacphy_data *)(dev->platform_data);
1832
1833 DBG(probe, DEBUG, "%s: checking phy for bus %d\n", __func__,
1834 plat_dat->bus_id);
1835
1836 /* Check that this phy is for the MAC being initialised */
1837 if (priv->bus_id != plat_dat->bus_id)
1838 return 0;
1839
1840 /* OK, this PHY is connected to the MAC.
1841 Go ahead and get the parameters */
1842 DBG(probe, DEBUG, "%s: OK. Found PHY config\n", __func__);
1843 priv->phy_irq =
1844 platform_get_irq_byname(to_platform_device(dev), "phyirq");
1845 DBG(probe, DEBUG, "%s: PHY irq on bus %d is %d\n", __func__,
1846 plat_dat->bus_id, priv->phy_irq);
1847
1848 /* Override with kernel parameters if supplied XXX CRS XXX
1849 * this needs to have multiple instances */
1850 if ((phyaddr >= 0) && (phyaddr <= 31))
1851 plat_dat->phy_addr = phyaddr;
1852
1853 priv->phy_addr = plat_dat->phy_addr;
1854 priv->phy_mask = plat_dat->phy_mask;
1855 priv->phy_interface = plat_dat->interface;
1856 priv->phy_reset = plat_dat->phy_reset;
1857
1858 DBG(probe, DEBUG, "%s: exiting\n", __func__);
1859 return 1; /* forces exit of driver_for_each_device() */
1860}
1861
1862/**
1863 * stmmac_dvr_probe
1864 * @pdev: platform device pointer
1865 * Description: the driver is initialized through platform_device.
1866 */
1867static int stmmac_dvr_probe(struct platform_device *pdev)
1868{
1869 int ret = 0;
1870 struct resource *res;
1871 unsigned int *addr = NULL;
1872 struct net_device *ndev = NULL;
1873 struct stmmac_priv *priv;
1874 struct plat_stmmacenet_data *plat_dat;
1875
1876 pr_info("STMMAC driver:\n\tplatform registration... ");
1877 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1878 if (!res) {
1879 ret = -ENODEV;
1880 goto out;
1881 }
1882 pr_info("done!\n");
1883
1884 if (!request_mem_region(res->start, (res->end - res->start),
1885 pdev->name)) {
1886 pr_err("%s: ERROR: memory allocation failed"
1887 "cannot get the I/O addr 0x%x\n",
1888 __func__, (unsigned int)res->start);
1889 ret = -EBUSY;
1890 goto out;
1891 }
1892
1893 addr = ioremap(res->start, (res->end - res->start));
1894 if (!addr) {
1895 pr_err("%s: ERROR: memory mapping failed \n", __func__);
1896 ret = -ENOMEM;
1897 goto out;
1898 }
1899
1900 ndev = alloc_etherdev(sizeof(struct stmmac_priv));
1901 if (!ndev) {
1902 pr_err("%s: ERROR: allocating the device\n", __func__);
1903 ret = -ENOMEM;
1904 goto out;
1905 }
1906
1907 SET_NETDEV_DEV(ndev, &pdev->dev);
1908
1909 /* Get the MAC information */
1910 ndev->irq = platform_get_irq_byname(pdev, "macirq");
1911 if (ndev->irq == -ENXIO) {
1912 pr_err("%s: ERROR: MAC IRQ configuration "
1913 "information not found\n", __func__);
1914 ret = -ENODEV;
1915 goto out;
1916 }
1917
1918 priv = netdev_priv(ndev);
1919 priv->device = &(pdev->dev);
1920 priv->dev = ndev;
1921 plat_dat = (struct plat_stmmacenet_data *)((pdev->dev).platform_data);
1922 priv->bus_id = plat_dat->bus_id;
1923 priv->pbl = plat_dat->pbl; /* TLI */
1924 priv->is_gmac = plat_dat->has_gmac; /* GMAC is on board */
1925
1926 platform_set_drvdata(pdev, ndev);
1927
1928 /* Set the I/O base addr */
1929 ndev->base_addr = (unsigned long)addr;
1930
1931 /* MAC HW revice detection */
1932 ret = stmmac_mac_device_setup(ndev);
1933 if (ret < 0)
1934 goto out;
1935
1936 /* Network Device Registration */
1937 ret = stmmac_probe(ndev);
1938 if (ret < 0)
1939 goto out;
1940
1941 /* associate a PHY - it is provided by another platform bus */
1942 if (!driver_for_each_device
1943 (&(stmmacphy_driver.driver), NULL, (void *)priv,
1944 stmmac_associate_phy)) {
1945 pr_err("No PHY device is associated with this MAC!\n");
1946 ret = -ENODEV;
1947 goto out;
1948 }
1949
1950 priv->fix_mac_speed = plat_dat->fix_mac_speed;
1951 priv->bsp_priv = plat_dat->bsp_priv;
1952
1953 pr_info("\t%s - (dev. name: %s - id: %d, IRQ #%d\n"
1954 "\tIO base addr: 0x%08x)\n", ndev->name, pdev->name,
1955 pdev->id, ndev->irq, (unsigned int)addr);
1956
1957 /* MDIO bus Registration */
1958 pr_debug("\tMDIO bus (id: %d)...", priv->bus_id);
1959 ret = stmmac_mdio_register(ndev);
1960 if (ret < 0)
1961 goto out;
1962 pr_debug("registered!\n");
1963
1964out:
1965 if (ret < 0) {
1966 platform_set_drvdata(pdev, NULL);
1967 release_mem_region(res->start, (res->end - res->start));
1968 if (addr != NULL)
1969 iounmap(addr);
1970 }
1971
1972 return ret;
1973}
1974
1975/**
1976 * stmmac_dvr_remove
1977 * @pdev: platform device pointer
1978 * Description: this function resets the TX/RX processes, disables the MAC RX/TX
1979 * changes the link status, releases the DMA descriptor rings,
1980 * unregisters the MDIO bus and unmaps the allocated memory.
1981 */
1982static int stmmac_dvr_remove(struct platform_device *pdev)
1983{
1984 struct net_device *ndev = platform_get_drvdata(pdev);
1985 struct resource *res;
1986
1987 pr_info("%s:\n\tremoving driver", __func__);
1988
1989 stmmac_dma_stop_rx(ndev->base_addr);
1990 stmmac_dma_stop_tx(ndev->base_addr);
1991
1992 stmmac_mac_disable_rx(ndev->base_addr);
1993 stmmac_mac_disable_tx(ndev->base_addr);
1994
1995 netif_carrier_off(ndev);
1996
1997 stmmac_mdio_unregister(ndev);
1998
1999 platform_set_drvdata(pdev, NULL);
2000 unregister_netdev(ndev);
2001
2002 iounmap((void *)ndev->base_addr);
2003 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2004 release_mem_region(res->start, (res->end - res->start));
2005
2006 free_netdev(ndev);
2007
2008 return 0;
2009}
2010
2011#ifdef CONFIG_PM
2012static int stmmac_suspend(struct platform_device *pdev, pm_message_t state)
2013{
2014 struct net_device *dev = platform_get_drvdata(pdev);
2015 struct stmmac_priv *priv = netdev_priv(dev);
2016 int dis_ic = 0;
2017
2018 if (!dev || !netif_running(dev))
2019 return 0;
2020
2021 spin_lock(&priv->lock);
2022
2023 if (state.event == PM_EVENT_SUSPEND) {
2024 netif_device_detach(dev);
2025 netif_stop_queue(dev);
2026 if (priv->phydev)
2027 phy_stop(priv->phydev);
2028
2029#ifdef CONFIG_STMMAC_TIMER
2030 priv->tm->timer_stop();
2031 dis_ic = 1;
2032#endif
2033 napi_disable(&priv->napi);
2034
2035 /* Stop TX/RX DMA */
2036 stmmac_dma_stop_tx(dev->base_addr);
2037 stmmac_dma_stop_rx(dev->base_addr);
2038 /* Clear the Rx/Tx descriptors */
2039 priv->mac_type->ops->init_rx_desc(priv->dma_rx,
2040 priv->dma_rx_size, dis_ic);
2041 priv->mac_type->ops->init_tx_desc(priv->dma_tx,
2042 priv->dma_tx_size);
2043
2044 stmmac_mac_disable_tx(dev->base_addr);
2045
2046 if (device_may_wakeup(&(pdev->dev))) {
2047 /* Enable Power down mode by programming the PMT regs */
2048 if (priv->wolenabled == PMT_SUPPORTED)
2049 priv->mac_type->ops->pmt(dev->base_addr,
2050 priv->wolopts);
2051 } else {
2052 stmmac_mac_disable_rx(dev->base_addr);
2053 }
2054 } else {
2055 priv->shutdown = 1;
2056 /* Although this can appear slightly redundant it actually
2057 * makes fast the standby operation and guarantees the driver
2058 * working if hibernation is on media. */
2059 stmmac_release(dev);
2060 }
2061
2062 spin_unlock(&priv->lock);
2063 return 0;
2064}
2065
2066static int stmmac_resume(struct platform_device *pdev)
2067{
2068 struct net_device *dev = platform_get_drvdata(pdev);
2069 struct stmmac_priv *priv = netdev_priv(dev);
2070 unsigned long ioaddr = dev->base_addr;
2071
2072 if (!netif_running(dev))
2073 return 0;
2074
2075 spin_lock(&priv->lock);
2076
2077 if (priv->shutdown) {
2078 /* Re-open the interface and re-init the MAC/DMA
2079 and the rings. */
2080 stmmac_open(dev);
2081 goto out_resume;
2082 }
2083
2084 /* Power Down bit, into the PM register, is cleared
2085 * automatically as soon as a magic packet or a Wake-up frame
2086 * is received. Anyway, it's better to manually clear
2087 * this bit because it can generate problems while resuming
2088 * from another devices (e.g. serial console). */
2089 if (device_may_wakeup(&(pdev->dev)))
2090 if (priv->wolenabled == PMT_SUPPORTED)
2091 priv->mac_type->ops->pmt(dev->base_addr, 0);
2092
2093 netif_device_attach(dev);
2094
2095 /* Enable the MAC and DMA */
2096 stmmac_mac_enable_rx(ioaddr);
2097 stmmac_mac_enable_tx(ioaddr);
2098 stmmac_dma_start_tx(ioaddr);
2099 stmmac_dma_start_rx(ioaddr);
2100
2101#ifdef CONFIG_STMMAC_TIMER
2102 priv->tm->timer_start(tmrate);
2103#endif
2104 napi_enable(&priv->napi);
2105
2106 if (priv->phydev)
2107 phy_start(priv->phydev);
2108
2109 netif_start_queue(dev);
2110
2111out_resume:
2112 spin_unlock(&priv->lock);
2113 return 0;
2114}
2115#endif
2116
2117static struct platform_driver stmmac_driver = {
2118 .driver = {
2119 .name = STMMAC_RESOURCE_NAME,
2120 },
2121 .probe = stmmac_dvr_probe,
2122 .remove = stmmac_dvr_remove,
2123#ifdef CONFIG_PM
2124 .suspend = stmmac_suspend,
2125 .resume = stmmac_resume,
2126#endif
2127
2128};
2129
2130/**
2131 * stmmac_init_module - Entry point for the driver
2132 * Description: This function is the entry point for the driver.
2133 */
2134static int __init stmmac_init_module(void)
2135{
2136 int ret;
2137
2138 if (platform_driver_register(&stmmacphy_driver)) {
2139 pr_err("No PHY devices registered!\n");
2140 return -ENODEV;
2141 }
2142
2143 ret = platform_driver_register(&stmmac_driver);
2144 return ret;
2145}
2146
2147/**
2148 * stmmac_cleanup_module - Cleanup routine for the driver
2149 * Description: This function is the cleanup routine for the driver.
2150 */
2151static void __exit stmmac_cleanup_module(void)
2152{
2153 platform_driver_unregister(&stmmacphy_driver);
2154 platform_driver_unregister(&stmmac_driver);
2155}
2156
2157#ifndef MODULE
2158static int __init stmmac_cmdline_opt(char *str)
2159{
2160 char *opt;
2161
2162 if (!str || !*str)
2163 return -EINVAL;
2164 while ((opt = strsep(&str, ",")) != NULL) {
2165 if (!strncmp(opt, "debug:", 6))
2166 strict_strtoul(opt + 6, 0, (unsigned long *)&debug);
2167 else if (!strncmp(opt, "phyaddr:", 8))
2168 strict_strtoul(opt + 8, 0, (unsigned long *)&phyaddr);
2169 else if (!strncmp(opt, "dma_txsize:", 11))
2170 strict_strtoul(opt + 11, 0,
2171 (unsigned long *)&dma_txsize);
2172 else if (!strncmp(opt, "dma_rxsize:", 11))
2173 strict_strtoul(opt + 11, 0,
2174 (unsigned long *)&dma_rxsize);
2175 else if (!strncmp(opt, "buf_sz:", 7))
2176 strict_strtoul(opt + 7, 0, (unsigned long *)&buf_sz);
2177 else if (!strncmp(opt, "tc:", 3))
2178 strict_strtoul(opt + 3, 0, (unsigned long *)&tc);
2179 else if (!strncmp(opt, "tx_coe:", 7))
2180 strict_strtoul(opt + 7, 0, (unsigned long *)&tx_coe);
2181 else if (!strncmp(opt, "watchdog:", 9))
2182 strict_strtoul(opt + 9, 0, (unsigned long *)&watchdog);
2183 else if (!strncmp(opt, "flow_ctrl:", 10))
2184 strict_strtoul(opt + 10, 0,
2185 (unsigned long *)&flow_ctrl);
2186 else if (!strncmp(opt, "pause:", 6))
2187 strict_strtoul(opt + 6, 0, (unsigned long *)&pause);
2188#ifdef CONFIG_STMMAC_TIMER
2189 else if (!strncmp(opt, "tmrate:", 7))
2190 strict_strtoul(opt + 7, 0, (unsigned long *)&tmrate);
2191#endif
2192 }
2193 return 0;
2194}
2195
2196__setup("stmmaceth=", stmmac_cmdline_opt);
2197#endif
2198
2199module_init(stmmac_init_module);
2200module_exit(stmmac_cleanup_module);
2201
2202MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet driver");
2203MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
2204MODULE_LICENSE("GPL");
diff --git a/drivers/net/stmmac/stmmac_mdio.c b/drivers/net/stmmac/stmmac_mdio.c
new file mode 100644
index 000000000000..8498552a22fc
--- /dev/null
+++ b/drivers/net/stmmac/stmmac_mdio.c
@@ -0,0 +1,217 @@
1/*******************************************************************************
2 STMMAC Ethernet Driver -- MDIO bus implementation
3 Provides Bus interface for MII registers
4
5 Copyright (C) 2007-2009 STMicroelectronics Ltd
6
7 This program is free software; you can redistribute it and/or modify it
8 under the terms and conditions of the GNU General Public License,
9 version 2, as published by the Free Software Foundation.
10
11 This program is distributed in the hope it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 more details.
15
16 You should have received a copy of the GNU General Public License along with
17 this program; if not, write to the Free Software Foundation, Inc.,
18 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19
20 The full GNU General Public License is included in this distribution in
21 the file called "COPYING".
22
23 Author: Carl Shaw <carl.shaw@st.com>
24 Maintainer: Giuseppe Cavallaro <peppe.cavallaro@st.com>
25*******************************************************************************/
26
27#include <linux/netdevice.h>
28#include <linux/mii.h>
29#include <linux/phy.h>
30
31#include "stmmac.h"
32
33#define MII_BUSY 0x00000001
34#define MII_WRITE 0x00000002
35
36/**
37 * stmmac_mdio_read
38 * @bus: points to the mii_bus structure
39 * @phyaddr: MII addr reg bits 15-11
40 * @phyreg: MII addr reg bits 10-6
41 * Description: it reads data from the MII register from within the phy device.
42 * For the 7111 GMAC, we must set the bit 0 in the MII address register while
43 * accessing the PHY registers.
44 * Fortunately, it seems this has no drawback for the 7109 MAC.
45 */
46static int stmmac_mdio_read(struct mii_bus *bus, int phyaddr, int phyreg)
47{
48 struct net_device *ndev = bus->priv;
49 struct stmmac_priv *priv = netdev_priv(ndev);
50 unsigned long ioaddr = ndev->base_addr;
51 unsigned int mii_address = priv->mac_type->hw.mii.addr;
52 unsigned int mii_data = priv->mac_type->hw.mii.data;
53
54 int data;
55 u16 regValue = (((phyaddr << 11) & (0x0000F800)) |
56 ((phyreg << 6) & (0x000007C0)));
57 regValue |= MII_BUSY; /* in case of GMAC */
58
59 do {} while (((readl(ioaddr + mii_address)) & MII_BUSY) == 1);
60 writel(regValue, ioaddr + mii_address);
61 do {} while (((readl(ioaddr + mii_address)) & MII_BUSY) == 1);
62
63 /* Read the data from the MII data register */
64 data = (int)readl(ioaddr + mii_data);
65
66 return data;
67}
68
69/**
70 * stmmac_mdio_write
71 * @bus: points to the mii_bus structure
72 * @phyaddr: MII addr reg bits 15-11
73 * @phyreg: MII addr reg bits 10-6
74 * @phydata: phy data
75 * Description: it writes the data into the MII register from within the device.
76 */
77static int stmmac_mdio_write(struct mii_bus *bus, int phyaddr, int phyreg,
78 u16 phydata)
79{
80 struct net_device *ndev = bus->priv;
81 struct stmmac_priv *priv = netdev_priv(ndev);
82 unsigned long ioaddr = ndev->base_addr;
83 unsigned int mii_address = priv->mac_type->hw.mii.addr;
84 unsigned int mii_data = priv->mac_type->hw.mii.data;
85
86 u16 value =
87 (((phyaddr << 11) & (0x0000F800)) | ((phyreg << 6) & (0x000007C0)))
88 | MII_WRITE;
89
90 value |= MII_BUSY;
91
92 /* Wait until any existing MII operation is complete */
93 do {} while (((readl(ioaddr + mii_address)) & MII_BUSY) == 1);
94
95 /* Set the MII address register to write */
96 writel(phydata, ioaddr + mii_data);
97 writel(value, ioaddr + mii_address);
98
99 /* Wait until any existing MII operation is complete */
100 do {} while (((readl(ioaddr + mii_address)) & MII_BUSY) == 1);
101
102 return 0;
103}
104
105/**
106 * stmmac_mdio_reset
107 * @bus: points to the mii_bus structure
108 * Description: reset the MII bus
109 */
110static int stmmac_mdio_reset(struct mii_bus *bus)
111{
112 struct net_device *ndev = bus->priv;
113 struct stmmac_priv *priv = netdev_priv(ndev);
114 unsigned long ioaddr = ndev->base_addr;
115 unsigned int mii_address = priv->mac_type->hw.mii.addr;
116
117 if (priv->phy_reset) {
118 pr_debug("stmmac_mdio_reset: calling phy_reset\n");
119 priv->phy_reset(priv->bsp_priv);
120 }
121
122 /* This is a workaround for problems with the STE101P PHY.
123 * It doesn't complete its reset until at least one clock cycle
124 * on MDC, so perform a dummy mdio read.
125 */
126 writel(0, ioaddr + mii_address);
127
128 return 0;
129}
130
131/**
132 * stmmac_mdio_register
133 * @ndev: net device structure
134 * Description: it registers the MII bus
135 */
136int stmmac_mdio_register(struct net_device *ndev)
137{
138 int err = 0;
139 struct mii_bus *new_bus;
140 int *irqlist;
141 struct stmmac_priv *priv = netdev_priv(ndev);
142 int addr, found;
143
144 new_bus = mdiobus_alloc();
145 if (new_bus == NULL)
146 return -ENOMEM;
147
148 irqlist = kzalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
149 if (irqlist == NULL) {
150 err = -ENOMEM;
151 goto irqlist_alloc_fail;
152 }
153
154 /* Assign IRQ to phy at address phy_addr */
155 if (priv->phy_addr != -1)
156 irqlist[priv->phy_addr] = priv->phy_irq;
157
158 new_bus->name = "STMMAC MII Bus";
159 new_bus->read = &stmmac_mdio_read;
160 new_bus->write = &stmmac_mdio_write;
161 new_bus->reset = &stmmac_mdio_reset;
162 snprintf(new_bus->id, MII_BUS_ID_SIZE, "%x", priv->bus_id);
163 new_bus->priv = ndev;
164 new_bus->irq = irqlist;
165 new_bus->phy_mask = priv->phy_mask;
166 new_bus->parent = priv->device;
167 err = mdiobus_register(new_bus);
168 if (err != 0) {
169 pr_err("%s: Cannot register as MDIO bus\n", new_bus->name);
170 goto bus_register_fail;
171 }
172
173 priv->mii = new_bus;
174
175 found = 0;
176 for (addr = 0; addr < 32; addr++) {
177 struct phy_device *phydev = new_bus->phy_map[addr];
178 if (phydev) {
179 if (priv->phy_addr == -1) {
180 priv->phy_addr = addr;
181 phydev->irq = priv->phy_irq;
182 irqlist[addr] = priv->phy_irq;
183 }
184 pr_info("%s: PHY ID %08x at %d IRQ %d (%s)%s\n",
185 ndev->name, phydev->phy_id, addr,
186 phydev->irq, dev_name(&phydev->dev),
187 (addr == priv->phy_addr) ? " active" : "");
188 found = 1;
189 }
190 }
191
192 if (!found)
193 pr_warning("%s: No PHY found\n", ndev->name);
194
195 return 0;
196bus_register_fail:
197 kfree(irqlist);
198irqlist_alloc_fail:
199 kfree(new_bus);
200 return err;
201}
202
203/**
204 * stmmac_mdio_unregister
205 * @ndev: net device structure
206 * Description: it unregisters the MII bus
207 */
208int stmmac_mdio_unregister(struct net_device *ndev)
209{
210 struct stmmac_priv *priv = netdev_priv(ndev);
211
212 mdiobus_unregister(priv->mii);
213 priv->mii->priv = NULL;
214 kfree(priv->mii);
215
216 return 0;
217}
diff --git a/drivers/net/stmmac/stmmac_timer.c b/drivers/net/stmmac/stmmac_timer.c
new file mode 100644
index 000000000000..b838c6582077
--- /dev/null
+++ b/drivers/net/stmmac/stmmac_timer.c
@@ -0,0 +1,140 @@
1/*******************************************************************************
2 STMMAC external timer support.
3
4 Copyright (C) 2007-2009 STMicroelectronics Ltd
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
23*******************************************************************************/
24
25#include <linux/kernel.h>
26#include <linux/etherdevice.h>
27#include "stmmac_timer.h"
28
29static void stmmac_timer_handler(void *data)
30{
31 struct net_device *dev = (struct net_device *)data;
32
33 stmmac_schedule(dev);
34
35 return;
36}
37
38#define STMMAC_TIMER_MSG(timer, freq) \
39printk(KERN_INFO "stmmac_timer: %s Timer ON (freq %dHz)\n", timer, freq);
40
41#if defined(CONFIG_STMMAC_RTC_TIMER)
42#include <linux/rtc.h>
43static struct rtc_device *stmmac_rtc;
44static rtc_task_t stmmac_task;
45
46static void stmmac_rtc_start(unsigned int new_freq)
47{
48 rtc_irq_set_freq(stmmac_rtc, &stmmac_task, new_freq);
49 rtc_irq_set_state(stmmac_rtc, &stmmac_task, 1);
50 return;
51}
52
53static void stmmac_rtc_stop(void)
54{
55 rtc_irq_set_state(stmmac_rtc, &stmmac_task, 0);
56 return;
57}
58
59int stmmac_open_ext_timer(struct net_device *dev, struct stmmac_timer *tm)
60{
61 stmmac_task.private_data = dev;
62 stmmac_task.func = stmmac_timer_handler;
63
64 stmmac_rtc = rtc_class_open(CONFIG_RTC_HCTOSYS_DEVICE);
65 if (stmmac_rtc == NULL) {
66 pr_error("open rtc device failed\n");
67 return -ENODEV;
68 }
69
70 rtc_irq_register(stmmac_rtc, &stmmac_task);
71
72 /* Periodic mode is not supported */
73 if ((rtc_irq_set_freq(stmmac_rtc, &stmmac_task, tm->freq) < 0)) {
74 pr_error("set periodic failed\n");
75 rtc_irq_unregister(stmmac_rtc, &stmmac_task);
76 rtc_class_close(stmmac_rtc);
77 return -1;
78 }
79
80 STMMAC_TIMER_MSG(CONFIG_RTC_HCTOSYS_DEVICE, tm->freq);
81
82 tm->timer_start = stmmac_rtc_start;
83 tm->timer_stop = stmmac_rtc_stop;
84
85 return 0;
86}
87
88int stmmac_close_ext_timer(void)
89{
90 rtc_irq_set_state(stmmac_rtc, &stmmac_task, 0);
91 rtc_irq_unregister(stmmac_rtc, &stmmac_task);
92 rtc_class_close(stmmac_rtc);
93 return 0;
94}
95
96#elif defined(CONFIG_STMMAC_TMU_TIMER)
97#include <linux/clk.h>
98#define TMU_CHANNEL "tmu2_clk"
99static struct clk *timer_clock;
100
101static void stmmac_tmu_start(unsigned int new_freq)
102{
103 clk_set_rate(timer_clock, new_freq);
104 clk_enable(timer_clock);
105 return;
106}
107
108static void stmmac_tmu_stop(void)
109{
110 clk_disable(timer_clock);
111 return;
112}
113
114int stmmac_open_ext_timer(struct net_device *dev, struct stmmac_timer *tm)
115{
116 timer_clock = clk_get(NULL, TMU_CHANNEL);
117
118 if (timer_clock == NULL)
119 return -1;
120
121 if (tmu2_register_user(stmmac_timer_handler, (void *)dev) < 0) {
122 timer_clock = NULL;
123 return -1;
124 }
125
126 STMMAC_TIMER_MSG("TMU2", tm->freq);
127 tm->timer_start = stmmac_tmu_start;
128 tm->timer_stop = stmmac_tmu_stop;
129
130 return 0;
131}
132
133int stmmac_close_ext_timer(void)
134{
135 clk_disable(timer_clock);
136 tmu2_unregister_user();
137 clk_put(timer_clock);
138 return 0;
139}
140#endif
diff --git a/drivers/net/stmmac/stmmac_timer.h b/drivers/net/stmmac/stmmac_timer.h
new file mode 100644
index 000000000000..f795cae33725
--- /dev/null
+++ b/drivers/net/stmmac/stmmac_timer.h
@@ -0,0 +1,41 @@
1/*******************************************************************************
2 STMMAC external timer Header File.
3
4 Copyright (C) 2007-2009 STMicroelectronics Ltd
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
23*******************************************************************************/
24
25struct stmmac_timer {
26 void (*timer_start) (unsigned int new_freq);
27 void (*timer_stop) (void);
28 unsigned int freq;
29};
30
31/* Open the HW timer device and return 0 in case of success */
32int stmmac_open_ext_timer(struct net_device *dev, struct stmmac_timer *tm);
33/* Stop the timer and release it */
34int stmmac_close_ext_timer(void);
35/* Function used for scheduling task within the stmmac */
36void stmmac_schedule(struct net_device *dev);
37
38#if defined(CONFIG_STMMAC_TMU_TIMER)
39extern int tmu2_register_user(void *fnt, void *data);
40extern void tmu2_unregister_user(void);
41#endif
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 556512dc6072..e78486504d3a 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -451,7 +451,7 @@ static unsigned int free_old_xmit_skbs(struct virtnet_info *vi)
451 vi->dev->stats.tx_bytes += skb->len; 451 vi->dev->stats.tx_bytes += skb->len;
452 vi->dev->stats.tx_packets++; 452 vi->dev->stats.tx_packets++;
453 tot_sgs += skb_vnet_hdr(skb)->num_sg; 453 tot_sgs += skb_vnet_hdr(skb)->num_sg;
454 kfree_skb(skb); 454 dev_kfree_skb_any(skb);
455 } 455 }
456 return tot_sgs; 456 return tot_sgs;
457} 457}
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index 44fb0c5a2800..004353a46af0 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -481,7 +481,8 @@ vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx,
481 } 481 }
482 rq->uncommitted[ring_idx] += num_allocated; 482 rq->uncommitted[ring_idx] += num_allocated;
483 483
484 dprintk(KERN_ERR "alloc_rx_buf: %d allocated, next2fill %u, next2comp " 484 dev_dbg(&adapter->netdev->dev,
485 "alloc_rx_buf: %d allocated, next2fill %u, next2comp "
485 "%u, uncommited %u\n", num_allocated, ring->next2fill, 486 "%u, uncommited %u\n", num_allocated, ring->next2fill,
486 ring->next2comp, rq->uncommitted[ring_idx]); 487 ring->next2comp, rq->uncommitted[ring_idx]);
487 488
@@ -539,7 +540,8 @@ vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx,
539 tbi = tq->buf_info + tq->tx_ring.next2fill; 540 tbi = tq->buf_info + tq->tx_ring.next2fill;
540 tbi->map_type = VMXNET3_MAP_NONE; 541 tbi->map_type = VMXNET3_MAP_NONE;
541 542
542 dprintk(KERN_ERR "txd[%u]: 0x%Lx 0x%x 0x%x\n", 543 dev_dbg(&adapter->netdev->dev,
544 "txd[%u]: 0x%Lx 0x%x 0x%x\n",
543 tq->tx_ring.next2fill, ctx->sop_txd->txd.addr, 545 tq->tx_ring.next2fill, ctx->sop_txd->txd.addr,
544 ctx->sop_txd->dword[2], ctx->sop_txd->dword[3]); 546 ctx->sop_txd->dword[2], ctx->sop_txd->dword[3]);
545 vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring); 547 vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
@@ -572,7 +574,8 @@ vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx,
572 gdesc->dword[2] = dw2 | buf_size; 574 gdesc->dword[2] = dw2 | buf_size;
573 gdesc->dword[3] = 0; 575 gdesc->dword[3] = 0;
574 576
575 dprintk(KERN_ERR "txd[%u]: 0x%Lx 0x%x 0x%x\n", 577 dev_dbg(&adapter->netdev->dev,
578 "txd[%u]: 0x%Lx 0x%x 0x%x\n",
576 tq->tx_ring.next2fill, gdesc->txd.addr, 579 tq->tx_ring.next2fill, gdesc->txd.addr,
577 gdesc->dword[2], gdesc->dword[3]); 580 gdesc->dword[2], gdesc->dword[3]);
578 vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring); 581 vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
@@ -600,7 +603,8 @@ vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx,
600 gdesc->dword[2] = dw2 | frag->size; 603 gdesc->dword[2] = dw2 | frag->size;
601 gdesc->dword[3] = 0; 604 gdesc->dword[3] = 0;
602 605
603 dprintk(KERN_ERR "txd[%u]: 0x%llu %u %u\n", 606 dev_dbg(&adapter->netdev->dev,
607 "txd[%u]: 0x%llu %u %u\n",
604 tq->tx_ring.next2fill, gdesc->txd.addr, 608 tq->tx_ring.next2fill, gdesc->txd.addr,
605 gdesc->dword[2], gdesc->dword[3]); 609 gdesc->dword[2], gdesc->dword[3]);
606 vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring); 610 vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
@@ -697,7 +701,8 @@ vmxnet3_parse_and_copy_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
697 tdd = tq->data_ring.base + tq->tx_ring.next2fill; 701 tdd = tq->data_ring.base + tq->tx_ring.next2fill;
698 702
699 memcpy(tdd->data, skb->data, ctx->copy_size); 703 memcpy(tdd->data, skb->data, ctx->copy_size);
700 dprintk(KERN_ERR "copy %u bytes to dataRing[%u]\n", 704 dev_dbg(&adapter->netdev->dev,
705 "copy %u bytes to dataRing[%u]\n",
701 ctx->copy_size, tq->tx_ring.next2fill); 706 ctx->copy_size, tq->tx_ring.next2fill);
702 return 1; 707 return 1;
703 708
@@ -808,7 +813,8 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
808 813
809 if (count > vmxnet3_cmd_ring_desc_avail(&tq->tx_ring)) { 814 if (count > vmxnet3_cmd_ring_desc_avail(&tq->tx_ring)) {
810 tq->stats.tx_ring_full++; 815 tq->stats.tx_ring_full++;
811 dprintk(KERN_ERR "tx queue stopped on %s, next2comp %u" 816 dev_dbg(&adapter->netdev->dev,
817 "tx queue stopped on %s, next2comp %u"
812 " next2fill %u\n", adapter->netdev->name, 818 " next2fill %u\n", adapter->netdev->name,
813 tq->tx_ring.next2comp, tq->tx_ring.next2fill); 819 tq->tx_ring.next2comp, tq->tx_ring.next2fill);
814 820
@@ -853,7 +859,8 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
853 859
854 /* finally flips the GEN bit of the SOP desc */ 860 /* finally flips the GEN bit of the SOP desc */
855 gdesc->dword[2] ^= VMXNET3_TXD_GEN; 861 gdesc->dword[2] ^= VMXNET3_TXD_GEN;
856 dprintk(KERN_ERR "txd[%u]: SOP 0x%Lx 0x%x 0x%x\n", 862 dev_dbg(&adapter->netdev->dev,
863 "txd[%u]: SOP 0x%Lx 0x%x 0x%x\n",
857 (u32)((union Vmxnet3_GenericDesc *)ctx.sop_txd - 864 (u32)((union Vmxnet3_GenericDesc *)ctx.sop_txd -
858 tq->tx_ring.base), gdesc->txd.addr, gdesc->dword[2], 865 tq->tx_ring.base), gdesc->txd.addr, gdesc->dword[2],
859 gdesc->dword[3]); 866 gdesc->dword[3]);
@@ -990,7 +997,8 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
990 if (unlikely(rcd->len == 0)) { 997 if (unlikely(rcd->len == 0)) {
991 /* Pretend the rx buffer is skipped. */ 998 /* Pretend the rx buffer is skipped. */
992 BUG_ON(!(rcd->sop && rcd->eop)); 999 BUG_ON(!(rcd->sop && rcd->eop));
993 dprintk(KERN_ERR "rxRing[%u][%u] 0 length\n", 1000 dev_dbg(&adapter->netdev->dev,
1001 "rxRing[%u][%u] 0 length\n",
994 ring_idx, idx); 1002 ring_idx, idx);
995 goto rcd_done; 1003 goto rcd_done;
996 } 1004 }
@@ -1314,9 +1322,11 @@ vmxnet3_netpoll(struct net_device *netdev)
1314 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 1322 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
1315 int irq; 1323 int irq;
1316 1324
1325#ifdef CONFIG_PCI_MSI
1317 if (adapter->intr.type == VMXNET3_IT_MSIX) 1326 if (adapter->intr.type == VMXNET3_IT_MSIX)
1318 irq = adapter->intr.msix_entries[0].vector; 1327 irq = adapter->intr.msix_entries[0].vector;
1319 else 1328 else
1329#endif
1320 irq = adapter->pdev->irq; 1330 irq = adapter->pdev->irq;
1321 1331
1322 disable_irq(irq); 1332 disable_irq(irq);
@@ -1330,12 +1340,15 @@ vmxnet3_request_irqs(struct vmxnet3_adapter *adapter)
1330{ 1340{
1331 int err; 1341 int err;
1332 1342
1343#ifdef CONFIG_PCI_MSI
1333 if (adapter->intr.type == VMXNET3_IT_MSIX) { 1344 if (adapter->intr.type == VMXNET3_IT_MSIX) {
1334 /* we only use 1 MSI-X vector */ 1345 /* we only use 1 MSI-X vector */
1335 err = request_irq(adapter->intr.msix_entries[0].vector, 1346 err = request_irq(adapter->intr.msix_entries[0].vector,
1336 vmxnet3_intr, 0, adapter->netdev->name, 1347 vmxnet3_intr, 0, adapter->netdev->name,
1337 adapter->netdev); 1348 adapter->netdev);
1338 } else if (adapter->intr.type == VMXNET3_IT_MSI) { 1349 } else
1350#endif
1351 if (adapter->intr.type == VMXNET3_IT_MSI) {
1339 err = request_irq(adapter->pdev->irq, vmxnet3_intr, 0, 1352 err = request_irq(adapter->pdev->irq, vmxnet3_intr, 0,
1340 adapter->netdev->name, adapter->netdev); 1353 adapter->netdev->name, adapter->netdev);
1341 } else { 1354 } else {
@@ -1376,6 +1389,7 @@ vmxnet3_free_irqs(struct vmxnet3_adapter *adapter)
1376 adapter->intr.num_intrs <= 0); 1389 adapter->intr.num_intrs <= 0);
1377 1390
1378 switch (adapter->intr.type) { 1391 switch (adapter->intr.type) {
1392#ifdef CONFIG_PCI_MSI
1379 case VMXNET3_IT_MSIX: 1393 case VMXNET3_IT_MSIX:
1380 { 1394 {
1381 int i; 1395 int i;
@@ -1385,6 +1399,7 @@ vmxnet3_free_irqs(struct vmxnet3_adapter *adapter)
1385 adapter->netdev); 1399 adapter->netdev);
1386 break; 1400 break;
1387 } 1401 }
1402#endif
1388 case VMXNET3_IT_MSI: 1403 case VMXNET3_IT_MSI:
1389 free_irq(adapter->pdev->irq, adapter->netdev); 1404 free_irq(adapter->pdev->irq, adapter->netdev);
1390 break; 1405 break;
@@ -1676,7 +1691,8 @@ vmxnet3_activate_dev(struct vmxnet3_adapter *adapter)
1676 int err; 1691 int err;
1677 u32 ret; 1692 u32 ret;
1678 1693
1679 dprintk(KERN_ERR "%s: skb_buf_size %d, rx_buf_per_pkt %d, ring sizes" 1694 dev_dbg(&adapter->netdev->dev,
1695 "%s: skb_buf_size %d, rx_buf_per_pkt %d, ring sizes"
1680 " %u %u %u\n", adapter->netdev->name, adapter->skb_buf_size, 1696 " %u %u %u\n", adapter->netdev->name, adapter->skb_buf_size,
1681 adapter->rx_buf_per_pkt, adapter->tx_queue.tx_ring.size, 1697 adapter->rx_buf_per_pkt, adapter->tx_queue.tx_ring.size,
1682 adapter->rx_queue.rx_ring[0].size, 1698 adapter->rx_queue.rx_ring[0].size,
@@ -2134,6 +2150,7 @@ vmxnet3_alloc_intr_resources(struct vmxnet3_adapter *adapter)
2134 if (adapter->intr.type == VMXNET3_IT_AUTO) { 2150 if (adapter->intr.type == VMXNET3_IT_AUTO) {
2135 int err; 2151 int err;
2136 2152
2153#ifdef CONFIG_PCI_MSI
2137 adapter->intr.msix_entries[0].entry = 0; 2154 adapter->intr.msix_entries[0].entry = 0;
2138 err = pci_enable_msix(adapter->pdev, adapter->intr.msix_entries, 2155 err = pci_enable_msix(adapter->pdev, adapter->intr.msix_entries,
2139 VMXNET3_LINUX_MAX_MSIX_VECT); 2156 VMXNET3_LINUX_MAX_MSIX_VECT);
@@ -2142,6 +2159,7 @@ vmxnet3_alloc_intr_resources(struct vmxnet3_adapter *adapter)
2142 adapter->intr.type = VMXNET3_IT_MSIX; 2159 adapter->intr.type = VMXNET3_IT_MSIX;
2143 return; 2160 return;
2144 } 2161 }
2162#endif
2145 2163
2146 err = pci_enable_msi(adapter->pdev); 2164 err = pci_enable_msi(adapter->pdev);
2147 if (!err) { 2165 if (!err) {
diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h
index 6bb91576e999..3c0d70d58111 100644
--- a/drivers/net/vmxnet3/vmxnet3_int.h
+++ b/drivers/net/vmxnet3/vmxnet3_int.h
@@ -30,6 +30,7 @@
30#include <linux/types.h> 30#include <linux/types.h>
31#include <linux/ethtool.h> 31#include <linux/ethtool.h>
32#include <linux/delay.h> 32#include <linux/delay.h>
33#include <linux/device.h>
33#include <linux/netdevice.h> 34#include <linux/netdevice.h>
34#include <linux/pci.h> 35#include <linux/pci.h>
35#include <linux/ethtool.h> 36#include <linux/ethtool.h>
@@ -59,7 +60,6 @@
59#include <linux/if_vlan.h> 60#include <linux/if_vlan.h>
60#include <linux/if_arp.h> 61#include <linux/if_arp.h>
61#include <linux/inetdevice.h> 62#include <linux/inetdevice.h>
62#include <linux/dst.h>
63 63
64#include "vmxnet3_defs.h" 64#include "vmxnet3_defs.h"
65 65
diff --git a/include/net/inet_timewait_sock.h b/include/net/inet_timewait_sock.h
index 37f3aea074a5..773b10fa38e4 100644
--- a/include/net/inet_timewait_sock.h
+++ b/include/net/inet_timewait_sock.h
@@ -130,11 +130,11 @@ struct inet_timewait_sock {
130 __u16 tw_num; 130 __u16 tw_num;
131 kmemcheck_bitfield_begin(flags); 131 kmemcheck_bitfield_begin(flags);
132 /* And these are ours. */ 132 /* And these are ours. */
133 __u8 tw_ipv6only:1, 133 unsigned int tw_ipv6only : 1,
134 tw_transparent:1; 134 tw_transparent : 1,
135 /* 14 bits hole, try to pack */ 135 tw_pad : 14, /* 14 bits hole */
136 tw_ipv6_offset : 16;
136 kmemcheck_bitfield_end(flags); 137 kmemcheck_bitfield_end(flags);
137 __u16 tw_ipv6_offset;
138 unsigned long tw_ttd; 138 unsigned long tw_ttd;
139 struct inet_bind_bucket *tw_tb; 139 struct inet_bind_bucket *tw_tb;
140 struct hlist_node tw_death_node; 140 struct hlist_node tw_death_node;
diff --git a/net/bluetooth/hci_sysfs.c b/net/bluetooth/hci_sysfs.c
index 7f939ce29801..2bc6f6a8de68 100644
--- a/net/bluetooth/hci_sysfs.c
+++ b/net/bluetooth/hci_sysfs.c
@@ -92,6 +92,8 @@ static void add_conn(struct work_struct *work)
92 92
93 dev_set_name(&conn->dev, "%s:%d", hdev->name, conn->handle); 93 dev_set_name(&conn->dev, "%s:%d", hdev->name, conn->handle);
94 94
95 dev_set_drvdata(&conn->dev, conn);
96
95 if (device_add(&conn->dev) < 0) { 97 if (device_add(&conn->dev) < 0) {
96 BT_ERR("Failed to register connection device"); 98 BT_ERR("Failed to register connection device");
97 return; 99 return;
@@ -144,8 +146,6 @@ void hci_conn_init_sysfs(struct hci_conn *conn)
144 conn->dev.class = bt_class; 146 conn->dev.class = bt_class;
145 conn->dev.parent = &hdev->dev; 147 conn->dev.parent = &hdev->dev;
146 148
147 dev_set_drvdata(&conn->dev, conn);
148
149 device_initialize(&conn->dev); 149 device_initialize(&conn->dev);
150 150
151 INIT_WORK(&conn->work_add, add_conn); 151 INIT_WORK(&conn->work_add, add_conn);
diff --git a/net/bluetooth/l2cap.c b/net/bluetooth/l2cap.c
index 4b66bd579f4a..d65101d92ee5 100644
--- a/net/bluetooth/l2cap.c
+++ b/net/bluetooth/l2cap.c
@@ -555,12 +555,12 @@ static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
555 555
556 conn->feat_mask = 0; 556 conn->feat_mask = 0;
557 557
558 setup_timer(&conn->info_timer, l2cap_info_timeout,
559 (unsigned long) conn);
560
561 spin_lock_init(&conn->lock); 558 spin_lock_init(&conn->lock);
562 rwlock_init(&conn->chan_list.lock); 559 rwlock_init(&conn->chan_list.lock);
563 560
561 setup_timer(&conn->info_timer, l2cap_info_timeout,
562 (unsigned long) conn);
563
564 conn->disc_reason = 0x13; 564 conn->disc_reason = 0x13;
565 565
566 return conn; 566 return conn;
@@ -783,6 +783,9 @@ static void l2cap_sock_init(struct sock *sk, struct sock *parent)
783 /* Default config options */ 783 /* Default config options */
784 pi->conf_len = 0; 784 pi->conf_len = 0;
785 pi->flush_to = L2CAP_DEFAULT_FLUSH_TO; 785 pi->flush_to = L2CAP_DEFAULT_FLUSH_TO;
786 skb_queue_head_init(TX_QUEUE(sk));
787 skb_queue_head_init(SREJ_QUEUE(sk));
788 INIT_LIST_HEAD(SREJ_LIST(sk));
786} 789}
787 790
788static struct proto l2cap_proto = { 791static struct proto l2cap_proto = {
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index 1da0e038df78..5ce017bf4afa 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -335,6 +335,7 @@ struct pktgen_dev {
335 __u32 cur_src_mac_offset; 335 __u32 cur_src_mac_offset;
336 __be32 cur_saddr; 336 __be32 cur_saddr;
337 __be32 cur_daddr; 337 __be32 cur_daddr;
338 __u16 ip_id;
338 __u16 cur_udp_dst; 339 __u16 cur_udp_dst;
339 __u16 cur_udp_src; 340 __u16 cur_udp_src;
340 __u16 cur_queue_map; 341 __u16 cur_queue_map;
@@ -2630,6 +2631,8 @@ static struct sk_buff *fill_packet_ipv4(struct net_device *odev,
2630 iph->protocol = IPPROTO_UDP; /* UDP */ 2631 iph->protocol = IPPROTO_UDP; /* UDP */
2631 iph->saddr = pkt_dev->cur_saddr; 2632 iph->saddr = pkt_dev->cur_saddr;
2632 iph->daddr = pkt_dev->cur_daddr; 2633 iph->daddr = pkt_dev->cur_daddr;
2634 iph->id = htons(pkt_dev->ip_id);
2635 pkt_dev->ip_id++;
2633 iph->frag_off = 0; 2636 iph->frag_off = 0;
2634 iplen = 20 + 8 + datalen; 2637 iplen = 20 + 8 + datalen;
2635 iph->tot_len = htons(iplen); 2638 iph->tot_len = htons(iplen);
@@ -2641,24 +2644,26 @@ static struct sk_buff *fill_packet_ipv4(struct net_device *odev,
2641 skb->dev = odev; 2644 skb->dev = odev;
2642 skb->pkt_type = PACKET_HOST; 2645 skb->pkt_type = PACKET_HOST;
2643 2646
2644 if (pkt_dev->nfrags <= 0) 2647 if (pkt_dev->nfrags <= 0) {
2645 pgh = (struct pktgen_hdr *)skb_put(skb, datalen); 2648 pgh = (struct pktgen_hdr *)skb_put(skb, datalen);
2646 else { 2649 memset(pgh + 1, 0, datalen - sizeof(struct pktgen_hdr));
2650 } else {
2647 int frags = pkt_dev->nfrags; 2651 int frags = pkt_dev->nfrags;
2648 int i; 2652 int i, len;
2649 2653
2650 pgh = (struct pktgen_hdr *)(((char *)(udph)) + 8); 2654 pgh = (struct pktgen_hdr *)(((char *)(udph)) + 8);
2651 2655
2652 if (frags > MAX_SKB_FRAGS) 2656 if (frags > MAX_SKB_FRAGS)
2653 frags = MAX_SKB_FRAGS; 2657 frags = MAX_SKB_FRAGS;
2654 if (datalen > frags * PAGE_SIZE) { 2658 if (datalen > frags * PAGE_SIZE) {
2655 skb_put(skb, datalen - frags * PAGE_SIZE); 2659 len = datalen - frags * PAGE_SIZE;
2660 memset(skb_put(skb, len), 0, len);
2656 datalen = frags * PAGE_SIZE; 2661 datalen = frags * PAGE_SIZE;
2657 } 2662 }
2658 2663
2659 i = 0; 2664 i = 0;
2660 while (datalen > 0) { 2665 while (datalen > 0) {
2661 struct page *page = alloc_pages(GFP_KERNEL, 0); 2666 struct page *page = alloc_pages(GFP_KERNEL | __GFP_ZERO, 0);
2662 skb_shinfo(skb)->frags[i].page = page; 2667 skb_shinfo(skb)->frags[i].page = page;
2663 skb_shinfo(skb)->frags[i].page_offset = 0; 2668 skb_shinfo(skb)->frags[i].page_offset = 0;
2664 skb_shinfo(skb)->frags[i].size = 2669 skb_shinfo(skb)->frags[i].size =
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index f6a0af759932..26fb50e91311 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -447,6 +447,28 @@ extern int sysctl_tcp_synack_retries;
447 447
448EXPORT_SYMBOL_GPL(inet_csk_reqsk_queue_hash_add); 448EXPORT_SYMBOL_GPL(inet_csk_reqsk_queue_hash_add);
449 449
450/* Decide when to expire the request and when to resend SYN-ACK */
451static inline void syn_ack_recalc(struct request_sock *req, const int thresh,
452 const int max_retries,
453 const u8 rskq_defer_accept,
454 int *expire, int *resend)
455{
456 if (!rskq_defer_accept) {
457 *expire = req->retrans >= thresh;
458 *resend = 1;
459 return;
460 }
461 *expire = req->retrans >= thresh &&
462 (!inet_rsk(req)->acked || req->retrans >= max_retries);
463 /*
464 * Do not resend while waiting for data after ACK,
465 * start to resend on end of deferring period to give
466 * last chance for data or ACK to create established socket.
467 */
468 *resend = !inet_rsk(req)->acked ||
469 req->retrans >= rskq_defer_accept - 1;
470}
471
450void inet_csk_reqsk_queue_prune(struct sock *parent, 472void inet_csk_reqsk_queue_prune(struct sock *parent,
451 const unsigned long interval, 473 const unsigned long interval,
452 const unsigned long timeout, 474 const unsigned long timeout,
@@ -502,9 +524,15 @@ void inet_csk_reqsk_queue_prune(struct sock *parent,
502 reqp=&lopt->syn_table[i]; 524 reqp=&lopt->syn_table[i];
503 while ((req = *reqp) != NULL) { 525 while ((req = *reqp) != NULL) {
504 if (time_after_eq(now, req->expires)) { 526 if (time_after_eq(now, req->expires)) {
505 if ((req->retrans < thresh || 527 int expire = 0, resend = 0;
506 (inet_rsk(req)->acked && req->retrans < max_retries)) 528
507 && !req->rsk_ops->rtx_syn_ack(parent, req)) { 529 syn_ack_recalc(req, thresh, max_retries,
530 queue->rskq_defer_accept,
531 &expire, &resend);
532 if (!expire &&
533 (!resend ||
534 !req->rsk_ops->rtx_syn_ack(parent, req) ||
535 inet_rsk(req)->acked)) {
508 unsigned long timeo; 536 unsigned long timeo;
509 537
510 if (req->retrans++ == 0) 538 if (req->retrans++ == 0)
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index 2445fedec0b8..a72f43ce33be 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -634,17 +634,16 @@ static int do_ip_setsockopt(struct sock *sk, int level,
634 break; 634 break;
635 } 635 }
636 dev = ip_dev_find(sock_net(sk), mreq.imr_address.s_addr); 636 dev = ip_dev_find(sock_net(sk), mreq.imr_address.s_addr);
637 if (dev) { 637 if (dev)
638 mreq.imr_ifindex = dev->ifindex; 638 mreq.imr_ifindex = dev->ifindex;
639 dev_put(dev);
640 }
641 } else 639 } else
642 dev = __dev_get_by_index(sock_net(sk), mreq.imr_ifindex); 640 dev = dev_get_by_index(sock_net(sk), mreq.imr_ifindex);
643 641
644 642
645 err = -EADDRNOTAVAIL; 643 err = -EADDRNOTAVAIL;
646 if (!dev) 644 if (!dev)
647 break; 645 break;
646 dev_put(dev);
648 647
649 err = -EINVAL; 648 err = -EINVAL;
650 if (sk->sk_bound_dev_if && 649 if (sk->sk_bound_dev_if &&
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 206a291dff03..e0cfa633680a 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -326,6 +326,43 @@ void tcp_enter_memory_pressure(struct sock *sk)
326 326
327EXPORT_SYMBOL(tcp_enter_memory_pressure); 327EXPORT_SYMBOL(tcp_enter_memory_pressure);
328 328
329/* Convert seconds to retransmits based on initial and max timeout */
330static u8 secs_to_retrans(int seconds, int timeout, int rto_max)
331{
332 u8 res = 0;
333
334 if (seconds > 0) {
335 int period = timeout;
336
337 res = 1;
338 while (seconds > period && res < 255) {
339 res++;
340 timeout <<= 1;
341 if (timeout > rto_max)
342 timeout = rto_max;
343 period += timeout;
344 }
345 }
346 return res;
347}
348
349/* Convert retransmits to seconds based on initial and max timeout */
350static int retrans_to_secs(u8 retrans, int timeout, int rto_max)
351{
352 int period = 0;
353
354 if (retrans > 0) {
355 period = timeout;
356 while (--retrans) {
357 timeout <<= 1;
358 if (timeout > rto_max)
359 timeout = rto_max;
360 period += timeout;
361 }
362 }
363 return period;
364}
365
329/* 366/*
330 * Wait for a TCP event. 367 * Wait for a TCP event.
331 * 368 *
@@ -1405,7 +1442,9 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
1405 goto found_ok_skb; 1442 goto found_ok_skb;
1406 if (tcp_hdr(skb)->fin) 1443 if (tcp_hdr(skb)->fin)
1407 goto found_fin_ok; 1444 goto found_fin_ok;
1408 WARN_ON(!(flags & MSG_PEEK)); 1445 WARN(!(flags & MSG_PEEK), KERN_INFO "recvmsg bug 2: "
1446 "copied %X seq %X\n", *seq,
1447 TCP_SKB_CB(skb)->seq);
1409 } 1448 }
1410 1449
1411 /* Well, if we have backlog, try to process it now yet. */ 1450 /* Well, if we have backlog, try to process it now yet. */
@@ -2163,16 +2202,10 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
2163 break; 2202 break;
2164 2203
2165 case TCP_DEFER_ACCEPT: 2204 case TCP_DEFER_ACCEPT:
2166 icsk->icsk_accept_queue.rskq_defer_accept = 0; 2205 /* Translate value in seconds to number of retransmits */
2167 if (val > 0) { 2206 icsk->icsk_accept_queue.rskq_defer_accept =
2168 /* Translate value in seconds to number of 2207 secs_to_retrans(val, TCP_TIMEOUT_INIT / HZ,
2169 * retransmits */ 2208 TCP_RTO_MAX / HZ);
2170 while (icsk->icsk_accept_queue.rskq_defer_accept < 32 &&
2171 val > ((TCP_TIMEOUT_INIT / HZ) <<
2172 icsk->icsk_accept_queue.rskq_defer_accept))
2173 icsk->icsk_accept_queue.rskq_defer_accept++;
2174 icsk->icsk_accept_queue.rskq_defer_accept++;
2175 }
2176 break; 2209 break;
2177 2210
2178 case TCP_WINDOW_CLAMP: 2211 case TCP_WINDOW_CLAMP:
@@ -2353,8 +2386,8 @@ static int do_tcp_getsockopt(struct sock *sk, int level,
2353 val = (val ? : sysctl_tcp_fin_timeout) / HZ; 2386 val = (val ? : sysctl_tcp_fin_timeout) / HZ;
2354 break; 2387 break;
2355 case TCP_DEFER_ACCEPT: 2388 case TCP_DEFER_ACCEPT:
2356 val = !icsk->icsk_accept_queue.rskq_defer_accept ? 0 : 2389 val = retrans_to_secs(icsk->icsk_accept_queue.rskq_defer_accept,
2357 ((TCP_TIMEOUT_INIT / HZ) << (icsk->icsk_accept_queue.rskq_defer_accept - 1)); 2390 TCP_TIMEOUT_INIT / HZ, TCP_RTO_MAX / HZ);
2358 break; 2391 break;
2359 case TCP_WINDOW_CLAMP: 2392 case TCP_WINDOW_CLAMP:
2360 val = tp->window_clamp; 2393 val = tp->window_clamp;
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index e320afea07fc..4c03598ed924 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -641,10 +641,9 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
641 if (!(flg & TCP_FLAG_ACK)) 641 if (!(flg & TCP_FLAG_ACK))
642 return NULL; 642 return NULL;
643 643
644 /* If TCP_DEFER_ACCEPT is set, drop bare ACK. */ 644 /* While TCP_DEFER_ACCEPT is active, drop bare ACK. */
645 if (inet_csk(sk)->icsk_accept_queue.rskq_defer_accept && 645 if (req->retrans < inet_csk(sk)->icsk_accept_queue.rskq_defer_accept &&
646 TCP_SKB_CB(skb)->end_seq == tcp_rsk(req)->rcv_isn + 1) { 646 TCP_SKB_CB(skb)->end_seq == tcp_rsk(req)->rcv_isn + 1) {
647 inet_csk(sk)->icsk_accept_queue.rskq_defer_accept--;
648 inet_rsk(req)->acked = 1; 647 inet_rsk(req)->acked = 1;
649 return NULL; 648 return NULL;
650 } 649 }
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
index 68566de4bcc5..430454ee5ead 100644
--- a/net/ipv6/ipv6_sockglue.c
+++ b/net/ipv6/ipv6_sockglue.c
@@ -497,13 +497,17 @@ done:
497 goto e_inval; 497 goto e_inval;
498 498
499 if (val) { 499 if (val) {
500 struct net_device *dev;
501
500 if (sk->sk_bound_dev_if && sk->sk_bound_dev_if != val) 502 if (sk->sk_bound_dev_if && sk->sk_bound_dev_if != val)
501 goto e_inval; 503 goto e_inval;
502 504
503 if (__dev_get_by_index(net, val) == NULL) { 505 dev = dev_get_by_index(net, val);
506 if (!dev) {
504 retv = -ENODEV; 507 retv = -ENODEV;
505 break; 508 break;
506 } 509 }
510 dev_put(dev);
507 } 511 }
508 np->mcast_oif = val; 512 np->mcast_oif = val;
509 retv = 0; 513 retv = 0;
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 0f133c5a8d3c..3291902f0b88 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -1074,6 +1074,8 @@ restart:
1074 err = -ECONNREFUSED; 1074 err = -ECONNREFUSED;
1075 if (other->sk_state != TCP_LISTEN) 1075 if (other->sk_state != TCP_LISTEN)
1076 goto out_unlock; 1076 goto out_unlock;
1077 if (other->sk_shutdown & RCV_SHUTDOWN)
1078 goto out_unlock;
1077 1079
1078 if (unix_recvq_full(other)) { 1080 if (unix_recvq_full(other)) {
1079 err = -EAGAIN; 1081 err = -EAGAIN;