aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/networking/ip-sysctl.txt4
-rw-r--r--drivers/bluetooth/btmrvl_main.c9
-rw-r--r--drivers/net/bonding/bond_main.c3
-rw-r--r--drivers/net/can/usb/usb_8dev.c5
-rw-r--r--drivers/net/ethernet/atheros/Kconfig18
-rw-r--r--drivers/net/ethernet/atheros/Makefile1
-rw-r--r--drivers/net/ethernet/atheros/alx/Makefile3
-rw-r--r--drivers/net/ethernet/atheros/alx/alx.h114
-rw-r--r--drivers/net/ethernet/atheros/alx/ethtool.c272
-rw-r--r--drivers/net/ethernet/atheros/alx/hw.c1226
-rw-r--r--drivers/net/ethernet/atheros/alx/hw.h499
-rw-r--r--drivers/net/ethernet/atheros/alx/main.c1625
-rw-r--r--drivers/net/ethernet/atheros/alx/reg.h810
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c36
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c14
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c2
-rw-r--r--drivers/net/ethernet/marvell/pxa168_eth.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c3
-rw-r--r--drivers/net/ethernet/octeon/octeon_mgmt.c31
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c2
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c38
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.h2
-rw-r--r--drivers/net/ethernet/sfc/efx.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c66
-rw-r--r--drivers/net/ethernet/ti/cpsw.c5
-rw-r--r--drivers/net/ethernet/ti/davinci_cpdma.c7
-rw-r--r--drivers/net/hyperv/netvsc_drv.c4
-rw-r--r--drivers/net/macvtap.c6
-rw-r--r--drivers/net/tun.c6
-rw-r--r--drivers/net/usb/qmi_wwan.c8
-rw-r--r--drivers/net/vxlan.c40
-rw-r--r--drivers/net/wan/dlci.c26
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_main.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c6
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c4
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/main.c17
-rw-r--r--drivers/net/wireless/iwlegacy/3945-rs.c1
-rw-r--r--drivers/net/wireless/iwlegacy/4965-rs.c2
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/rs.c2
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/rxon.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-drv.c2
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/rs.c1
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/tx.c3
-rw-r--r--drivers/net/wireless/rt2x00/rt2800lib.c29
-rw-r--r--include/linux/if_vlan.h2
-rw-r--r--include/linux/netdevice.h1
-rw-r--r--include/linux/skbuff.h1
-rw-r--r--include/uapi/linux/Kbuild1
-rw-r--r--net/bluetooth/hci_core.c15
-rw-r--r--net/bluetooth/l2cap_core.c5
-rw-r--r--net/bridge/br_multicast.c5
-rw-r--r--net/core/dev.c34
-rw-r--r--net/core/dev_ioctl.c19
-rw-r--r--net/core/ethtool.c6
-rw-r--r--net/core/skbuff.c20
-rw-r--r--net/core/sock.c17
-rw-r--r--net/ipv4/gre.c2
-rw-r--r--net/ipv4/netfilter/ipt_ULOG.c12
-rw-r--r--net/ipv4/tcp_ipv4.c4
-rw-r--r--net/ipv6/addrconf.c12
-rw-r--r--net/ipv6/ip6_output.c13
-rw-r--r--net/ipv6/ndisc.c2
-rw-r--r--net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c2
-rw-r--r--net/key/af_key.c2
-rw-r--r--net/mac80211/cfg.c6
-rw-r--r--net/mac80211/ieee80211_i.h5
-rw-r--r--net/mac80211/mlme.c87
-rw-r--r--net/mac80211/rate.c2
-rw-r--r--net/mac80211/util.c4
-rw-r--r--net/netfilter/ipvs/ip_vs_core.c3
-rw-r--r--net/netfilter/nf_conntrack_labels.c2
-rw-r--r--net/netfilter/nf_conntrack_netlink.c1
-rw-r--r--net/netfilter/nf_nat_sip.c3
-rw-r--r--net/netfilter/xt_TCPMSS.c25
-rw-r--r--net/netfilter/xt_TCPOPTSTRIP.c6
-rw-r--r--net/wireless/nl80211.c11
77 files changed, 5065 insertions, 231 deletions
diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt
index f98ca633b528..3458d6343e01 100644
--- a/Documentation/networking/ip-sysctl.txt
+++ b/Documentation/networking/ip-sysctl.txt
@@ -420,10 +420,10 @@ tcp_synack_retries - INTEGER
420 for a passive TCP connection will happen after 63seconds. 420 for a passive TCP connection will happen after 63seconds.
421 421
422tcp_syncookies - BOOLEAN 422tcp_syncookies - BOOLEAN
423 Only valid when the kernel was compiled with CONFIG_SYNCOOKIES 423 Only valid when the kernel was compiled with CONFIG_SYN_COOKIES
424 Send out syncookies when the syn backlog queue of a socket 424 Send out syncookies when the syn backlog queue of a socket
425 overflows. This is to prevent against the common 'SYN flood attack' 425 overflows. This is to prevent against the common 'SYN flood attack'
426 Default: FALSE 426 Default: 1
427 427
428 Note, that syncookies is fallback facility. 428 Note, that syncookies is fallback facility.
429 It MUST NOT be used to help highly loaded servers to stand 429 It MUST NOT be used to help highly loaded servers to stand
diff --git a/drivers/bluetooth/btmrvl_main.c b/drivers/bluetooth/btmrvl_main.c
index 3a4343b3bd6d..9a9f51875df5 100644
--- a/drivers/bluetooth/btmrvl_main.c
+++ b/drivers/bluetooth/btmrvl_main.c
@@ -498,6 +498,10 @@ static int btmrvl_service_main_thread(void *data)
498 add_wait_queue(&thread->wait_q, &wait); 498 add_wait_queue(&thread->wait_q, &wait);
499 499
500 set_current_state(TASK_INTERRUPTIBLE); 500 set_current_state(TASK_INTERRUPTIBLE);
501 if (kthread_should_stop()) {
502 BT_DBG("main_thread: break from main thread");
503 break;
504 }
501 505
502 if (adapter->wakeup_tries || 506 if (adapter->wakeup_tries ||
503 ((!adapter->int_count) && 507 ((!adapter->int_count) &&
@@ -513,11 +517,6 @@ static int btmrvl_service_main_thread(void *data)
513 517
514 BT_DBG("main_thread woke up"); 518 BT_DBG("main_thread woke up");
515 519
516 if (kthread_should_stop()) {
517 BT_DBG("main_thread: break from main thread");
518 break;
519 }
520
521 spin_lock_irqsave(&priv->driver_lock, flags); 520 spin_lock_irqsave(&priv->driver_lock, flags);
522 if (adapter->int_count) { 521 if (adapter->int_count) {
523 adapter->int_count = 0; 522 adapter->int_count = 0;
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 02d9ae7d527e..f97569613526 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -2413,7 +2413,8 @@ static void bond_miimon_commit(struct bonding *bond)
2413 2413
2414 pr_info("%s: link status definitely up for interface %s, %u Mbps %s duplex.\n", 2414 pr_info("%s: link status definitely up for interface %s, %u Mbps %s duplex.\n",
2415 bond->dev->name, slave->dev->name, 2415 bond->dev->name, slave->dev->name,
2416 slave->speed, slave->duplex ? "full" : "half"); 2416 slave->speed == SPEED_UNKNOWN ? 0 : slave->speed,
2417 slave->duplex ? "full" : "half");
2417 2418
2418 /* notify ad that the link status has changed */ 2419 /* notify ad that the link status has changed */
2419 if (bond->params.mode == BOND_MODE_8023AD) 2420 if (bond->params.mode == BOND_MODE_8023AD)
diff --git a/drivers/net/can/usb/usb_8dev.c b/drivers/net/can/usb/usb_8dev.c
index 6e15ef08f301..cbd388eea682 100644
--- a/drivers/net/can/usb/usb_8dev.c
+++ b/drivers/net/can/usb/usb_8dev.c
@@ -977,7 +977,7 @@ static int usb_8dev_probe(struct usb_interface *intf,
977 err = usb_8dev_cmd_version(priv, &version); 977 err = usb_8dev_cmd_version(priv, &version);
978 if (err) { 978 if (err) {
979 netdev_err(netdev, "can't get firmware version\n"); 979 netdev_err(netdev, "can't get firmware version\n");
980 goto cleanup_cmd_msg_buffer; 980 goto cleanup_unregister_candev;
981 } else { 981 } else {
982 netdev_info(netdev, 982 netdev_info(netdev,
983 "firmware: %d.%d, hardware: %d.%d\n", 983 "firmware: %d.%d, hardware: %d.%d\n",
@@ -989,6 +989,9 @@ static int usb_8dev_probe(struct usb_interface *intf,
989 989
990 return 0; 990 return 0;
991 991
992cleanup_unregister_candev:
993 unregister_netdev(priv->netdev);
994
992cleanup_cmd_msg_buffer: 995cleanup_cmd_msg_buffer:
993 kfree(priv->cmd_msg_buffer); 996 kfree(priv->cmd_msg_buffer);
994 997
diff --git a/drivers/net/ethernet/atheros/Kconfig b/drivers/net/ethernet/atheros/Kconfig
index 36d6abd1cfff..ad6aa1e98348 100644
--- a/drivers/net/ethernet/atheros/Kconfig
+++ b/drivers/net/ethernet/atheros/Kconfig
@@ -67,4 +67,22 @@ config ATL1C
67 To compile this driver as a module, choose M here. The module 67 To compile this driver as a module, choose M here. The module
68 will be called atl1c. 68 will be called atl1c.
69 69
70config ALX
71 tristate "Qualcomm Atheros AR816x/AR817x support"
72 depends on PCI
73 select CRC32
74 select NET_CORE
75 select MDIO
76 help
77 This driver supports the Qualcomm Atheros L1F ethernet adapter,
78 i.e. the following chipsets:
79
80 1969:1091 - AR8161 Gigabit Ethernet
81 1969:1090 - AR8162 Fast Ethernet
82 1969:10A1 - AR8171 Gigabit Ethernet
83 1969:10A0 - AR8172 Fast Ethernet
84
85 To compile this driver as a module, choose M here. The module
86 will be called alx.
87
70endif # NET_VENDOR_ATHEROS 88endif # NET_VENDOR_ATHEROS
diff --git a/drivers/net/ethernet/atheros/Makefile b/drivers/net/ethernet/atheros/Makefile
index e7e76fb576ff..5cf1c65bbce9 100644
--- a/drivers/net/ethernet/atheros/Makefile
+++ b/drivers/net/ethernet/atheros/Makefile
@@ -6,3 +6,4 @@ obj-$(CONFIG_ATL1) += atlx/
6obj-$(CONFIG_ATL2) += atlx/ 6obj-$(CONFIG_ATL2) += atlx/
7obj-$(CONFIG_ATL1E) += atl1e/ 7obj-$(CONFIG_ATL1E) += atl1e/
8obj-$(CONFIG_ATL1C) += atl1c/ 8obj-$(CONFIG_ATL1C) += atl1c/
9obj-$(CONFIG_ALX) += alx/
diff --git a/drivers/net/ethernet/atheros/alx/Makefile b/drivers/net/ethernet/atheros/alx/Makefile
new file mode 100644
index 000000000000..5901fa407d52
--- /dev/null
+++ b/drivers/net/ethernet/atheros/alx/Makefile
@@ -0,0 +1,3 @@
1obj-$(CONFIG_ALX) += alx.o
2alx-objs := main.o ethtool.o hw.o
3ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/ethernet/atheros/alx/alx.h b/drivers/net/ethernet/atheros/alx/alx.h
new file mode 100644
index 000000000000..50b3ae2b143d
--- /dev/null
+++ b/drivers/net/ethernet/atheros/alx/alx.h
@@ -0,0 +1,114 @@
1/*
2 * Copyright (c) 2013 Johannes Berg <johannes@sipsolutions.net>
3 *
4 * This file is free software: you may copy, redistribute and/or modify it
5 * under the terms of the GNU General Public License as published by the
6 * Free Software Foundation, either version 2 of the License, or (at your
7 * option) any later version.
8 *
9 * This file is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16 *
17 * This file incorporates work covered by the following copyright and
18 * permission notice:
19 *
20 * Copyright (c) 2012 Qualcomm Atheros, Inc.
21 *
22 * Permission to use, copy, modify, and/or distribute this software for any
23 * purpose with or without fee is hereby granted, provided that the above
24 * copyright notice and this permission notice appear in all copies.
25 *
26 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
27 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
28 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
29 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
30 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
31 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
32 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
33 */
34
35#ifndef _ALX_H_
36#define _ALX_H_
37
38#include <linux/types.h>
39#include <linux/etherdevice.h>
40#include <linux/dma-mapping.h>
41#include <linux/spinlock.h>
42#include "hw.h"
43
44#define ALX_WATCHDOG_TIME (5 * HZ)
45
46struct alx_buffer {
47 struct sk_buff *skb;
48 DEFINE_DMA_UNMAP_ADDR(dma);
49 DEFINE_DMA_UNMAP_LEN(size);
50};
51
52struct alx_rx_queue {
53 struct alx_rrd *rrd;
54 dma_addr_t rrd_dma;
55
56 struct alx_rfd *rfd;
57 dma_addr_t rfd_dma;
58
59 struct alx_buffer *bufs;
60
61 u16 write_idx, read_idx;
62 u16 rrd_read_idx;
63};
64#define ALX_RX_ALLOC_THRESH 32
65
66struct alx_tx_queue {
67 struct alx_txd *tpd;
68 dma_addr_t tpd_dma;
69 struct alx_buffer *bufs;
70 u16 write_idx, read_idx;
71};
72
73#define ALX_DEFAULT_TX_WORK 128
74
75enum alx_device_quirks {
76 ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG = BIT(0),
77};
78
79struct alx_priv {
80 struct net_device *dev;
81
82 struct alx_hw hw;
83
84 /* all descriptor memory */
85 struct {
86 dma_addr_t dma;
87 void *virt;
88 int size;
89 } descmem;
90
91 /* protect int_mask updates */
92 spinlock_t irq_lock;
93 u32 int_mask;
94
95 int tx_ringsz;
96 int rx_ringsz;
97 int rxbuf_size;
98
99 struct napi_struct napi;
100 struct alx_tx_queue txq;
101 struct alx_rx_queue rxq;
102
103 struct work_struct link_check_wk;
104 struct work_struct reset_wk;
105
106 u16 msg_enable;
107
108 bool msi;
109};
110
111extern const struct ethtool_ops alx_ethtool_ops;
112extern const char alx_drv_name[];
113
114#endif
diff --git a/drivers/net/ethernet/atheros/alx/ethtool.c b/drivers/net/ethernet/atheros/alx/ethtool.c
new file mode 100644
index 000000000000..6fa2aec2bc81
--- /dev/null
+++ b/drivers/net/ethernet/atheros/alx/ethtool.c
@@ -0,0 +1,272 @@
1/*
2 * Copyright (c) 2013 Johannes Berg <johannes@sipsolutions.net>
3 *
4 * This file is free software: you may copy, redistribute and/or modify it
5 * under the terms of the GNU General Public License as published by the
6 * Free Software Foundation, either version 2 of the License, or (at your
7 * option) any later version.
8 *
9 * This file is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16 *
17 * This file incorporates work covered by the following copyright and
18 * permission notice:
19 *
20 * Copyright (c) 2012 Qualcomm Atheros, Inc.
21 *
22 * Permission to use, copy, modify, and/or distribute this software for any
23 * purpose with or without fee is hereby granted, provided that the above
24 * copyright notice and this permission notice appear in all copies.
25 *
26 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
27 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
28 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
29 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
30 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
31 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
32 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
33 */
34
35#include <linux/pci.h>
36#include <linux/ip.h>
37#include <linux/tcp.h>
38#include <linux/netdevice.h>
39#include <linux/etherdevice.h>
40#include <linux/ethtool.h>
41#include <linux/mdio.h>
42#include <linux/interrupt.h>
43#include <asm/byteorder.h>
44
45#include "alx.h"
46#include "reg.h"
47#include "hw.h"
48
49
50static int alx_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
51{
52 struct alx_priv *alx = netdev_priv(netdev);
53 struct alx_hw *hw = &alx->hw;
54
55 ecmd->supported = SUPPORTED_10baseT_Half |
56 SUPPORTED_10baseT_Full |
57 SUPPORTED_100baseT_Half |
58 SUPPORTED_100baseT_Full |
59 SUPPORTED_Autoneg |
60 SUPPORTED_TP |
61 SUPPORTED_Pause;
62 if (alx_hw_giga(hw))
63 ecmd->supported |= SUPPORTED_1000baseT_Full;
64
65 ecmd->advertising = ADVERTISED_TP;
66 if (hw->adv_cfg & ADVERTISED_Autoneg)
67 ecmd->advertising |= hw->adv_cfg;
68
69 ecmd->port = PORT_TP;
70 ecmd->phy_address = 0;
71 if (hw->adv_cfg & ADVERTISED_Autoneg)
72 ecmd->autoneg = AUTONEG_ENABLE;
73 else
74 ecmd->autoneg = AUTONEG_DISABLE;
75 ecmd->transceiver = XCVR_INTERNAL;
76
77 if (hw->flowctrl & ALX_FC_ANEG && hw->adv_cfg & ADVERTISED_Autoneg) {
78 if (hw->flowctrl & ALX_FC_RX) {
79 ecmd->advertising |= ADVERTISED_Pause;
80
81 if (!(hw->flowctrl & ALX_FC_TX))
82 ecmd->advertising |= ADVERTISED_Asym_Pause;
83 } else if (hw->flowctrl & ALX_FC_TX) {
84 ecmd->advertising |= ADVERTISED_Asym_Pause;
85 }
86 }
87
88 if (hw->link_speed != SPEED_UNKNOWN) {
89 ethtool_cmd_speed_set(ecmd,
90 hw->link_speed - hw->link_speed % 10);
91 ecmd->duplex = hw->link_speed % 10;
92 } else {
93 ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
94 ecmd->duplex = DUPLEX_UNKNOWN;
95 }
96
97 return 0;
98}
99
100static int alx_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
101{
102 struct alx_priv *alx = netdev_priv(netdev);
103 struct alx_hw *hw = &alx->hw;
104 u32 adv_cfg;
105
106 ASSERT_RTNL();
107
108 if (ecmd->autoneg == AUTONEG_ENABLE) {
109 if (ecmd->advertising & ADVERTISED_1000baseT_Half)
110 return -EINVAL;
111 adv_cfg = ecmd->advertising | ADVERTISED_Autoneg;
112 } else {
113 int speed = ethtool_cmd_speed(ecmd);
114
115 switch (speed + ecmd->duplex) {
116 case SPEED_10 + DUPLEX_HALF:
117 adv_cfg = ADVERTISED_10baseT_Half;
118 break;
119 case SPEED_10 + DUPLEX_FULL:
120 adv_cfg = ADVERTISED_10baseT_Full;
121 break;
122 case SPEED_100 + DUPLEX_HALF:
123 adv_cfg = ADVERTISED_100baseT_Half;
124 break;
125 case SPEED_100 + DUPLEX_FULL:
126 adv_cfg = ADVERTISED_100baseT_Full;
127 break;
128 default:
129 return -EINVAL;
130 }
131 }
132
133 hw->adv_cfg = adv_cfg;
134 return alx_setup_speed_duplex(hw, adv_cfg, hw->flowctrl);
135}
136
137static void alx_get_pauseparam(struct net_device *netdev,
138 struct ethtool_pauseparam *pause)
139{
140 struct alx_priv *alx = netdev_priv(netdev);
141 struct alx_hw *hw = &alx->hw;
142
143 if (hw->flowctrl & ALX_FC_ANEG &&
144 hw->adv_cfg & ADVERTISED_Autoneg)
145 pause->autoneg = AUTONEG_ENABLE;
146 else
147 pause->autoneg = AUTONEG_DISABLE;
148
149 if (hw->flowctrl & ALX_FC_TX)
150 pause->tx_pause = 1;
151 else
152 pause->tx_pause = 0;
153
154 if (hw->flowctrl & ALX_FC_RX)
155 pause->rx_pause = 1;
156 else
157 pause->rx_pause = 0;
158}
159
160
161static int alx_set_pauseparam(struct net_device *netdev,
162 struct ethtool_pauseparam *pause)
163{
164 struct alx_priv *alx = netdev_priv(netdev);
165 struct alx_hw *hw = &alx->hw;
166 int err = 0;
167 bool reconfig_phy = false;
168 u8 fc = 0;
169
170 if (pause->tx_pause)
171 fc |= ALX_FC_TX;
172 if (pause->rx_pause)
173 fc |= ALX_FC_RX;
174 if (pause->autoneg)
175 fc |= ALX_FC_ANEG;
176
177 ASSERT_RTNL();
178
179 /* restart auto-neg for auto-mode */
180 if (hw->adv_cfg & ADVERTISED_Autoneg) {
181 if (!((fc ^ hw->flowctrl) & ALX_FC_ANEG))
182 reconfig_phy = true;
183 if (fc & hw->flowctrl & ALX_FC_ANEG &&
184 (fc ^ hw->flowctrl) & (ALX_FC_RX | ALX_FC_TX))
185 reconfig_phy = true;
186 }
187
188 if (reconfig_phy) {
189 err = alx_setup_speed_duplex(hw, hw->adv_cfg, fc);
190 return err;
191 }
192
193 /* flow control on mac */
194 if ((fc ^ hw->flowctrl) & (ALX_FC_RX | ALX_FC_TX))
195 alx_cfg_mac_flowcontrol(hw, fc);
196
197 hw->flowctrl = fc;
198
199 return 0;
200}
201
202static u32 alx_get_msglevel(struct net_device *netdev)
203{
204 struct alx_priv *alx = netdev_priv(netdev);
205
206 return alx->msg_enable;
207}
208
209static void alx_set_msglevel(struct net_device *netdev, u32 data)
210{
211 struct alx_priv *alx = netdev_priv(netdev);
212
213 alx->msg_enable = data;
214}
215
216static void alx_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
217{
218 struct alx_priv *alx = netdev_priv(netdev);
219 struct alx_hw *hw = &alx->hw;
220
221 wol->supported = WAKE_MAGIC | WAKE_PHY;
222 wol->wolopts = 0;
223
224 if (hw->sleep_ctrl & ALX_SLEEP_WOL_MAGIC)
225 wol->wolopts |= WAKE_MAGIC;
226 if (hw->sleep_ctrl & ALX_SLEEP_WOL_PHY)
227 wol->wolopts |= WAKE_PHY;
228}
229
230static int alx_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
231{
232 struct alx_priv *alx = netdev_priv(netdev);
233 struct alx_hw *hw = &alx->hw;
234
235 if (wol->wolopts & (WAKE_ARP | WAKE_MAGICSECURE |
236 WAKE_UCAST | WAKE_BCAST | WAKE_MCAST))
237 return -EOPNOTSUPP;
238
239 hw->sleep_ctrl = 0;
240
241 if (wol->wolopts & WAKE_MAGIC)
242 hw->sleep_ctrl |= ALX_SLEEP_WOL_MAGIC;
243 if (wol->wolopts & WAKE_PHY)
244 hw->sleep_ctrl |= ALX_SLEEP_WOL_PHY;
245
246 device_set_wakeup_enable(&alx->hw.pdev->dev, hw->sleep_ctrl);
247
248 return 0;
249}
250
251static void alx_get_drvinfo(struct net_device *netdev,
252 struct ethtool_drvinfo *drvinfo)
253{
254 struct alx_priv *alx = netdev_priv(netdev);
255
256 strlcpy(drvinfo->driver, alx_drv_name, sizeof(drvinfo->driver));
257 strlcpy(drvinfo->bus_info, pci_name(alx->hw.pdev),
258 sizeof(drvinfo->bus_info));
259}
260
261const struct ethtool_ops alx_ethtool_ops = {
262 .get_settings = alx_get_settings,
263 .set_settings = alx_set_settings,
264 .get_pauseparam = alx_get_pauseparam,
265 .set_pauseparam = alx_set_pauseparam,
266 .get_drvinfo = alx_get_drvinfo,
267 .get_msglevel = alx_get_msglevel,
268 .set_msglevel = alx_set_msglevel,
269 .get_wol = alx_get_wol,
270 .set_wol = alx_set_wol,
271 .get_link = ethtool_op_get_link,
272};
diff --git a/drivers/net/ethernet/atheros/alx/hw.c b/drivers/net/ethernet/atheros/alx/hw.c
new file mode 100644
index 000000000000..220a16ad0e49
--- /dev/null
+++ b/drivers/net/ethernet/atheros/alx/hw.c
@@ -0,0 +1,1226 @@
1/*
2 * Copyright (c) 2013 Johannes Berg <johannes@sipsolutions.net>
3 *
4 * This file is free software: you may copy, redistribute and/or modify it
5 * under the terms of the GNU General Public License as published by the
6 * Free Software Foundation, either version 2 of the License, or (at your
7 * option) any later version.
8 *
9 * This file is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16 *
17 * This file incorporates work covered by the following copyright and
18 * permission notice:
19 *
20 * Copyright (c) 2012 Qualcomm Atheros, Inc.
21 *
22 * Permission to use, copy, modify, and/or distribute this software for any
23 * purpose with or without fee is hereby granted, provided that the above
24 * copyright notice and this permission notice appear in all copies.
25 *
26 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
27 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
28 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
29 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
30 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
31 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
32 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
33 */
34#include <linux/etherdevice.h>
35#include <linux/delay.h>
36#include <linux/pci.h>
37#include <linux/mdio.h>
38#include "reg.h"
39#include "hw.h"
40
41static inline bool alx_is_rev_a(u8 rev)
42{
43 return rev == ALX_REV_A0 || rev == ALX_REV_A1;
44}
45
46static int alx_wait_mdio_idle(struct alx_hw *hw)
47{
48 u32 val;
49 int i;
50
51 for (i = 0; i < ALX_MDIO_MAX_AC_TO; i++) {
52 val = alx_read_mem32(hw, ALX_MDIO);
53 if (!(val & ALX_MDIO_BUSY))
54 return 0;
55 udelay(10);
56 }
57
58 return -ETIMEDOUT;
59}
60
61static int alx_read_phy_core(struct alx_hw *hw, bool ext, u8 dev,
62 u16 reg, u16 *phy_data)
63{
64 u32 val, clk_sel;
65 int err;
66
67 *phy_data = 0;
68
69 /* use slow clock when it's in hibernation status */
70 clk_sel = hw->link_speed != SPEED_UNKNOWN ?
71 ALX_MDIO_CLK_SEL_25MD4 :
72 ALX_MDIO_CLK_SEL_25MD128;
73
74 if (ext) {
75 val = dev << ALX_MDIO_EXTN_DEVAD_SHIFT |
76 reg << ALX_MDIO_EXTN_REG_SHIFT;
77 alx_write_mem32(hw, ALX_MDIO_EXTN, val);
78
79 val = ALX_MDIO_SPRES_PRMBL | ALX_MDIO_START |
80 ALX_MDIO_MODE_EXT | ALX_MDIO_OP_READ |
81 clk_sel << ALX_MDIO_CLK_SEL_SHIFT;
82 } else {
83 val = ALX_MDIO_SPRES_PRMBL |
84 clk_sel << ALX_MDIO_CLK_SEL_SHIFT |
85 reg << ALX_MDIO_REG_SHIFT |
86 ALX_MDIO_START | ALX_MDIO_OP_READ;
87 }
88 alx_write_mem32(hw, ALX_MDIO, val);
89
90 err = alx_wait_mdio_idle(hw);
91 if (err)
92 return err;
93 val = alx_read_mem32(hw, ALX_MDIO);
94 *phy_data = ALX_GET_FIELD(val, ALX_MDIO_DATA);
95 return 0;
96}
97
98static int alx_write_phy_core(struct alx_hw *hw, bool ext, u8 dev,
99 u16 reg, u16 phy_data)
100{
101 u32 val, clk_sel;
102
103 /* use slow clock when it's in hibernation status */
104 clk_sel = hw->link_speed != SPEED_UNKNOWN ?
105 ALX_MDIO_CLK_SEL_25MD4 :
106 ALX_MDIO_CLK_SEL_25MD128;
107
108 if (ext) {
109 val = dev << ALX_MDIO_EXTN_DEVAD_SHIFT |
110 reg << ALX_MDIO_EXTN_REG_SHIFT;
111 alx_write_mem32(hw, ALX_MDIO_EXTN, val);
112
113 val = ALX_MDIO_SPRES_PRMBL |
114 clk_sel << ALX_MDIO_CLK_SEL_SHIFT |
115 phy_data << ALX_MDIO_DATA_SHIFT |
116 ALX_MDIO_START | ALX_MDIO_MODE_EXT;
117 } else {
118 val = ALX_MDIO_SPRES_PRMBL |
119 clk_sel << ALX_MDIO_CLK_SEL_SHIFT |
120 reg << ALX_MDIO_REG_SHIFT |
121 phy_data << ALX_MDIO_DATA_SHIFT |
122 ALX_MDIO_START;
123 }
124 alx_write_mem32(hw, ALX_MDIO, val);
125
126 return alx_wait_mdio_idle(hw);
127}
128
129static int __alx_read_phy_reg(struct alx_hw *hw, u16 reg, u16 *phy_data)
130{
131 return alx_read_phy_core(hw, false, 0, reg, phy_data);
132}
133
134static int __alx_write_phy_reg(struct alx_hw *hw, u16 reg, u16 phy_data)
135{
136 return alx_write_phy_core(hw, false, 0, reg, phy_data);
137}
138
139static int __alx_read_phy_ext(struct alx_hw *hw, u8 dev, u16 reg, u16 *pdata)
140{
141 return alx_read_phy_core(hw, true, dev, reg, pdata);
142}
143
144static int __alx_write_phy_ext(struct alx_hw *hw, u8 dev, u16 reg, u16 data)
145{
146 return alx_write_phy_core(hw, true, dev, reg, data);
147}
148
149static int __alx_read_phy_dbg(struct alx_hw *hw, u16 reg, u16 *pdata)
150{
151 int err;
152
153 err = __alx_write_phy_reg(hw, ALX_MII_DBG_ADDR, reg);
154 if (err)
155 return err;
156
157 return __alx_read_phy_reg(hw, ALX_MII_DBG_DATA, pdata);
158}
159
160static int __alx_write_phy_dbg(struct alx_hw *hw, u16 reg, u16 data)
161{
162 int err;
163
164 err = __alx_write_phy_reg(hw, ALX_MII_DBG_ADDR, reg);
165 if (err)
166 return err;
167
168 return __alx_write_phy_reg(hw, ALX_MII_DBG_DATA, data);
169}
170
171int alx_read_phy_reg(struct alx_hw *hw, u16 reg, u16 *phy_data)
172{
173 int err;
174
175 spin_lock(&hw->mdio_lock);
176 err = __alx_read_phy_reg(hw, reg, phy_data);
177 spin_unlock(&hw->mdio_lock);
178
179 return err;
180}
181
182int alx_write_phy_reg(struct alx_hw *hw, u16 reg, u16 phy_data)
183{
184 int err;
185
186 spin_lock(&hw->mdio_lock);
187 err = __alx_write_phy_reg(hw, reg, phy_data);
188 spin_unlock(&hw->mdio_lock);
189
190 return err;
191}
192
193int alx_read_phy_ext(struct alx_hw *hw, u8 dev, u16 reg, u16 *pdata)
194{
195 int err;
196
197 spin_lock(&hw->mdio_lock);
198 err = __alx_read_phy_ext(hw, dev, reg, pdata);
199 spin_unlock(&hw->mdio_lock);
200
201 return err;
202}
203
204int alx_write_phy_ext(struct alx_hw *hw, u8 dev, u16 reg, u16 data)
205{
206 int err;
207
208 spin_lock(&hw->mdio_lock);
209 err = __alx_write_phy_ext(hw, dev, reg, data);
210 spin_unlock(&hw->mdio_lock);
211
212 return err;
213}
214
215static int alx_read_phy_dbg(struct alx_hw *hw, u16 reg, u16 *pdata)
216{
217 int err;
218
219 spin_lock(&hw->mdio_lock);
220 err = __alx_read_phy_dbg(hw, reg, pdata);
221 spin_unlock(&hw->mdio_lock);
222
223 return err;
224}
225
226static int alx_write_phy_dbg(struct alx_hw *hw, u16 reg, u16 data)
227{
228 int err;
229
230 spin_lock(&hw->mdio_lock);
231 err = __alx_write_phy_dbg(hw, reg, data);
232 spin_unlock(&hw->mdio_lock);
233
234 return err;
235}
236
237static u16 alx_get_phy_config(struct alx_hw *hw)
238{
239 u32 val;
240 u16 phy_val;
241
242 val = alx_read_mem32(hw, ALX_PHY_CTRL);
243 /* phy in reset */
244 if ((val & ALX_PHY_CTRL_DSPRST_OUT) == 0)
245 return ALX_DRV_PHY_UNKNOWN;
246
247 val = alx_read_mem32(hw, ALX_DRV);
248 val = ALX_GET_FIELD(val, ALX_DRV_PHY);
249 if (ALX_DRV_PHY_UNKNOWN == val)
250 return ALX_DRV_PHY_UNKNOWN;
251
252 alx_read_phy_reg(hw, ALX_MII_DBG_ADDR, &phy_val);
253 if (ALX_PHY_INITED == phy_val)
254 return val;
255
256 return ALX_DRV_PHY_UNKNOWN;
257}
258
259static bool alx_wait_reg(struct alx_hw *hw, u32 reg, u32 wait, u32 *val)
260{
261 u32 read;
262 int i;
263
264 for (i = 0; i < ALX_SLD_MAX_TO; i++) {
265 read = alx_read_mem32(hw, reg);
266 if ((read & wait) == 0) {
267 if (val)
268 *val = read;
269 return true;
270 }
271 mdelay(1);
272 }
273
274 return false;
275}
276
277static bool alx_read_macaddr(struct alx_hw *hw, u8 *addr)
278{
279 u32 mac0, mac1;
280
281 mac0 = alx_read_mem32(hw, ALX_STAD0);
282 mac1 = alx_read_mem32(hw, ALX_STAD1);
283
284 /* addr should be big-endian */
285 *(__be32 *)(addr + 2) = cpu_to_be32(mac0);
286 *(__be16 *)addr = cpu_to_be16(mac1);
287
288 return is_valid_ether_addr(addr);
289}
290
291int alx_get_perm_macaddr(struct alx_hw *hw, u8 *addr)
292{
293 u32 val;
294
295 /* try to get it from register first */
296 if (alx_read_macaddr(hw, addr))
297 return 0;
298
299 /* try to load from efuse */
300 if (!alx_wait_reg(hw, ALX_SLD, ALX_SLD_STAT | ALX_SLD_START, &val))
301 return -EIO;
302 alx_write_mem32(hw, ALX_SLD, val | ALX_SLD_START);
303 if (!alx_wait_reg(hw, ALX_SLD, ALX_SLD_START, NULL))
304 return -EIO;
305 if (alx_read_macaddr(hw, addr))
306 return 0;
307
308 /* try to load from flash/eeprom (if present) */
309 val = alx_read_mem32(hw, ALX_EFLD);
310 if (val & (ALX_EFLD_F_EXIST | ALX_EFLD_E_EXIST)) {
311 if (!alx_wait_reg(hw, ALX_EFLD,
312 ALX_EFLD_STAT | ALX_EFLD_START, &val))
313 return -EIO;
314 alx_write_mem32(hw, ALX_EFLD, val | ALX_EFLD_START);
315 if (!alx_wait_reg(hw, ALX_EFLD, ALX_EFLD_START, NULL))
316 return -EIO;
317 if (alx_read_macaddr(hw, addr))
318 return 0;
319 }
320
321 return -EIO;
322}
323
324void alx_set_macaddr(struct alx_hw *hw, const u8 *addr)
325{
326 u32 val;
327
328 /* for example: 00-0B-6A-F6-00-DC * STAD0=6AF600DC, STAD1=000B */
329 val = be32_to_cpu(*(__be32 *)(addr + 2));
330 alx_write_mem32(hw, ALX_STAD0, val);
331 val = be16_to_cpu(*(__be16 *)addr);
332 alx_write_mem32(hw, ALX_STAD1, val);
333}
334
335static void alx_enable_osc(struct alx_hw *hw)
336{
337 u32 val;
338
339 /* rising edge */
340 val = alx_read_mem32(hw, ALX_MISC);
341 alx_write_mem32(hw, ALX_MISC, val & ~ALX_MISC_INTNLOSC_OPEN);
342 alx_write_mem32(hw, ALX_MISC, val | ALX_MISC_INTNLOSC_OPEN);
343}
344
345static void alx_reset_osc(struct alx_hw *hw, u8 rev)
346{
347 u32 val, val2;
348
349 /* clear Internal OSC settings, switching OSC by hw itself */
350 val = alx_read_mem32(hw, ALX_MISC3);
351 alx_write_mem32(hw, ALX_MISC3,
352 (val & ~ALX_MISC3_25M_BY_SW) |
353 ALX_MISC3_25M_NOTO_INTNL);
354
355 /* 25M clk from chipset may be unstable 1s after de-assert of
356 * PERST, driver need re-calibrate before enter Sleep for WoL
357 */
358 val = alx_read_mem32(hw, ALX_MISC);
359 if (rev >= ALX_REV_B0) {
360 /* restore over current protection def-val,
361 * this val could be reset by MAC-RST
362 */
363 ALX_SET_FIELD(val, ALX_MISC_PSW_OCP, ALX_MISC_PSW_OCP_DEF);
364 /* a 0->1 change will update the internal val of osc */
365 val &= ~ALX_MISC_INTNLOSC_OPEN;
366 alx_write_mem32(hw, ALX_MISC, val);
367 alx_write_mem32(hw, ALX_MISC, val | ALX_MISC_INTNLOSC_OPEN);
368 /* hw will automatically dis OSC after cab. */
369 val2 = alx_read_mem32(hw, ALX_MSIC2);
370 val2 &= ~ALX_MSIC2_CALB_START;
371 alx_write_mem32(hw, ALX_MSIC2, val2);
372 alx_write_mem32(hw, ALX_MSIC2, val2 | ALX_MSIC2_CALB_START);
373 } else {
374 val &= ~ALX_MISC_INTNLOSC_OPEN;
375 /* disable isolate for rev A devices */
376 if (alx_is_rev_a(rev))
377 val &= ~ALX_MISC_ISO_EN;
378
379 alx_write_mem32(hw, ALX_MISC, val | ALX_MISC_INTNLOSC_OPEN);
380 alx_write_mem32(hw, ALX_MISC, val);
381 }
382
383 udelay(20);
384}
385
386static int alx_stop_mac(struct alx_hw *hw)
387{
388 u32 rxq, txq, val;
389 u16 i;
390
391 rxq = alx_read_mem32(hw, ALX_RXQ0);
392 alx_write_mem32(hw, ALX_RXQ0, rxq & ~ALX_RXQ0_EN);
393 txq = alx_read_mem32(hw, ALX_TXQ0);
394 alx_write_mem32(hw, ALX_TXQ0, txq & ~ALX_TXQ0_EN);
395
396 udelay(40);
397
398 hw->rx_ctrl &= ~(ALX_MAC_CTRL_RX_EN | ALX_MAC_CTRL_TX_EN);
399 alx_write_mem32(hw, ALX_MAC_CTRL, hw->rx_ctrl);
400
401 for (i = 0; i < ALX_DMA_MAC_RST_TO; i++) {
402 val = alx_read_mem32(hw, ALX_MAC_STS);
403 if (!(val & ALX_MAC_STS_IDLE))
404 return 0;
405 udelay(10);
406 }
407
408 return -ETIMEDOUT;
409}
410
411int alx_reset_mac(struct alx_hw *hw)
412{
413 u32 val, pmctrl;
414 int i, ret;
415 u8 rev;
416 bool a_cr;
417
418 pmctrl = 0;
419 rev = alx_hw_revision(hw);
420 a_cr = alx_is_rev_a(rev) && alx_hw_with_cr(hw);
421
422 /* disable all interrupts, RXQ/TXQ */
423 alx_write_mem32(hw, ALX_MSIX_MASK, 0xFFFFFFFF);
424 alx_write_mem32(hw, ALX_IMR, 0);
425 alx_write_mem32(hw, ALX_ISR, ALX_ISR_DIS);
426
427 ret = alx_stop_mac(hw);
428 if (ret)
429 return ret;
430
431 /* mac reset workaroud */
432 alx_write_mem32(hw, ALX_RFD_PIDX, 1);
433
434 /* dis l0s/l1 before mac reset */
435 if (a_cr) {
436 pmctrl = alx_read_mem32(hw, ALX_PMCTRL);
437 if (pmctrl & (ALX_PMCTRL_L1_EN | ALX_PMCTRL_L0S_EN))
438 alx_write_mem32(hw, ALX_PMCTRL,
439 pmctrl & ~(ALX_PMCTRL_L1_EN |
440 ALX_PMCTRL_L0S_EN));
441 }
442
443 /* reset whole mac safely */
444 val = alx_read_mem32(hw, ALX_MASTER);
445 alx_write_mem32(hw, ALX_MASTER,
446 val | ALX_MASTER_DMA_MAC_RST | ALX_MASTER_OOB_DIS);
447
448 /* make sure it's real idle */
449 udelay(10);
450 for (i = 0; i < ALX_DMA_MAC_RST_TO; i++) {
451 val = alx_read_mem32(hw, ALX_RFD_PIDX);
452 if (val == 0)
453 break;
454 udelay(10);
455 }
456 for (; i < ALX_DMA_MAC_RST_TO; i++) {
457 val = alx_read_mem32(hw, ALX_MASTER);
458 if ((val & ALX_MASTER_DMA_MAC_RST) == 0)
459 break;
460 udelay(10);
461 }
462 if (i == ALX_DMA_MAC_RST_TO)
463 return -EIO;
464 udelay(10);
465
466 if (a_cr) {
467 alx_write_mem32(hw, ALX_MASTER, val | ALX_MASTER_PCLKSEL_SRDS);
468 /* restore l0s / l1 */
469 if (pmctrl & (ALX_PMCTRL_L1_EN | ALX_PMCTRL_L0S_EN))
470 alx_write_mem32(hw, ALX_PMCTRL, pmctrl);
471 }
472
473 alx_reset_osc(hw, rev);
474
475 /* clear Internal OSC settings, switching OSC by hw itself,
476 * disable isolate for rev A devices
477 */
478 val = alx_read_mem32(hw, ALX_MISC3);
479 alx_write_mem32(hw, ALX_MISC3,
480 (val & ~ALX_MISC3_25M_BY_SW) |
481 ALX_MISC3_25M_NOTO_INTNL);
482 val = alx_read_mem32(hw, ALX_MISC);
483 val &= ~ALX_MISC_INTNLOSC_OPEN;
484 if (alx_is_rev_a(rev))
485 val &= ~ALX_MISC_ISO_EN;
486 alx_write_mem32(hw, ALX_MISC, val);
487 udelay(20);
488
489 /* driver control speed/duplex, hash-alg */
490 alx_write_mem32(hw, ALX_MAC_CTRL, hw->rx_ctrl);
491
492 val = alx_read_mem32(hw, ALX_SERDES);
493 alx_write_mem32(hw, ALX_SERDES,
494 val | ALX_SERDES_MACCLK_SLWDWN |
495 ALX_SERDES_PHYCLK_SLWDWN);
496
497 return 0;
498}
499
500void alx_reset_phy(struct alx_hw *hw)
501{
502 int i;
503 u32 val;
504 u16 phy_val;
505
506 /* (DSP)reset PHY core */
507 val = alx_read_mem32(hw, ALX_PHY_CTRL);
508 val &= ~(ALX_PHY_CTRL_DSPRST_OUT | ALX_PHY_CTRL_IDDQ |
509 ALX_PHY_CTRL_GATE_25M | ALX_PHY_CTRL_POWER_DOWN |
510 ALX_PHY_CTRL_CLS);
511 val |= ALX_PHY_CTRL_RST_ANALOG;
512
513 val |= (ALX_PHY_CTRL_HIB_PULSE | ALX_PHY_CTRL_HIB_EN);
514 alx_write_mem32(hw, ALX_PHY_CTRL, val);
515 udelay(10);
516 alx_write_mem32(hw, ALX_PHY_CTRL, val | ALX_PHY_CTRL_DSPRST_OUT);
517
518 for (i = 0; i < ALX_PHY_CTRL_DSPRST_TO; i++)
519 udelay(10);
520
521 /* phy power saving & hib */
522 alx_write_phy_dbg(hw, ALX_MIIDBG_LEGCYPS, ALX_LEGCYPS_DEF);
523 alx_write_phy_dbg(hw, ALX_MIIDBG_SYSMODCTRL,
524 ALX_SYSMODCTRL_IECHOADJ_DEF);
525 alx_write_phy_ext(hw, ALX_MIIEXT_PCS, ALX_MIIEXT_VDRVBIAS,
526 ALX_VDRVBIAS_DEF);
527
528 /* EEE advertisement */
529 val = alx_read_mem32(hw, ALX_LPI_CTRL);
530 alx_write_mem32(hw, ALX_LPI_CTRL, val & ~ALX_LPI_CTRL_EN);
531 alx_write_phy_ext(hw, ALX_MIIEXT_ANEG, ALX_MIIEXT_LOCAL_EEEADV, 0);
532
533 /* phy power saving */
534 alx_write_phy_dbg(hw, ALX_MIIDBG_TST10BTCFG, ALX_TST10BTCFG_DEF);
535 alx_write_phy_dbg(hw, ALX_MIIDBG_SRDSYSMOD, ALX_SRDSYSMOD_DEF);
536 alx_write_phy_dbg(hw, ALX_MIIDBG_TST100BTCFG, ALX_TST100BTCFG_DEF);
537 alx_write_phy_dbg(hw, ALX_MIIDBG_ANACTRL, ALX_ANACTRL_DEF);
538 alx_read_phy_dbg(hw, ALX_MIIDBG_GREENCFG2, &phy_val);
539 alx_write_phy_dbg(hw, ALX_MIIDBG_GREENCFG2,
540 phy_val & ~ALX_GREENCFG2_GATE_DFSE_EN);
541 /* rtl8139c, 120m issue */
542 alx_write_phy_ext(hw, ALX_MIIEXT_ANEG, ALX_MIIEXT_NLP78,
543 ALX_MIIEXT_NLP78_120M_DEF);
544 alx_write_phy_ext(hw, ALX_MIIEXT_ANEG, ALX_MIIEXT_S3DIG10,
545 ALX_MIIEXT_S3DIG10_DEF);
546
547 if (hw->lnk_patch) {
548 /* Turn off half amplitude */
549 alx_read_phy_ext(hw, ALX_MIIEXT_PCS, ALX_MIIEXT_CLDCTRL3,
550 &phy_val);
551 alx_write_phy_ext(hw, ALX_MIIEXT_PCS, ALX_MIIEXT_CLDCTRL3,
552 phy_val | ALX_CLDCTRL3_BP_CABLE1TH_DET_GT);
553 /* Turn off Green feature */
554 alx_read_phy_dbg(hw, ALX_MIIDBG_GREENCFG2, &phy_val);
555 alx_write_phy_dbg(hw, ALX_MIIDBG_GREENCFG2,
556 phy_val | ALX_GREENCFG2_BP_GREEN);
557 /* Turn off half Bias */
558 alx_read_phy_ext(hw, ALX_MIIEXT_PCS, ALX_MIIEXT_CLDCTRL5,
559 &phy_val);
560 alx_write_phy_ext(hw, ALX_MIIEXT_PCS, ALX_MIIEXT_CLDCTRL5,
561 phy_val | ALX_CLDCTRL5_BP_VD_HLFBIAS);
562 }
563
564 /* set phy interrupt mask */
565 alx_write_phy_reg(hw, ALX_MII_IER, ALX_IER_LINK_UP | ALX_IER_LINK_DOWN);
566}
567
568#define ALX_PCI_CMD (PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY | PCI_COMMAND_IO)
569
570void alx_reset_pcie(struct alx_hw *hw)
571{
572 u8 rev = alx_hw_revision(hw);
573 u32 val;
574 u16 val16;
575
576 /* Workaround for PCI problem when BIOS sets MMRBC incorrectly. */
577 pci_read_config_word(hw->pdev, PCI_COMMAND, &val16);
578 if (!(val16 & ALX_PCI_CMD) || (val16 & PCI_COMMAND_INTX_DISABLE)) {
579 val16 = (val16 | ALX_PCI_CMD) & ~PCI_COMMAND_INTX_DISABLE;
580 pci_write_config_word(hw->pdev, PCI_COMMAND, val16);
581 }
582
583 /* clear WoL setting/status */
584 val = alx_read_mem32(hw, ALX_WOL0);
585 alx_write_mem32(hw, ALX_WOL0, 0);
586
587 val = alx_read_mem32(hw, ALX_PDLL_TRNS1);
588 alx_write_mem32(hw, ALX_PDLL_TRNS1, val & ~ALX_PDLL_TRNS1_D3PLLOFF_EN);
589
590 /* mask some pcie error bits */
591 val = alx_read_mem32(hw, ALX_UE_SVRT);
592 val &= ~(ALX_UE_SVRT_DLPROTERR | ALX_UE_SVRT_FCPROTERR);
593 alx_write_mem32(hw, ALX_UE_SVRT, val);
594
595 /* wol 25M & pclk */
596 val = alx_read_mem32(hw, ALX_MASTER);
597 if (alx_is_rev_a(rev) && alx_hw_with_cr(hw)) {
598 if ((val & ALX_MASTER_WAKEN_25M) == 0 ||
599 (val & ALX_MASTER_PCLKSEL_SRDS) == 0)
600 alx_write_mem32(hw, ALX_MASTER,
601 val | ALX_MASTER_PCLKSEL_SRDS |
602 ALX_MASTER_WAKEN_25M);
603 } else {
604 if ((val & ALX_MASTER_WAKEN_25M) == 0 ||
605 (val & ALX_MASTER_PCLKSEL_SRDS) != 0)
606 alx_write_mem32(hw, ALX_MASTER,
607 (val & ~ALX_MASTER_PCLKSEL_SRDS) |
608 ALX_MASTER_WAKEN_25M);
609 }
610
611 /* ASPM setting */
612 alx_enable_aspm(hw, true, true);
613
614 udelay(10);
615}
616
617void alx_start_mac(struct alx_hw *hw)
618{
619 u32 mac, txq, rxq;
620
621 rxq = alx_read_mem32(hw, ALX_RXQ0);
622 alx_write_mem32(hw, ALX_RXQ0, rxq | ALX_RXQ0_EN);
623 txq = alx_read_mem32(hw, ALX_TXQ0);
624 alx_write_mem32(hw, ALX_TXQ0, txq | ALX_TXQ0_EN);
625
626 mac = hw->rx_ctrl;
627 if (hw->link_speed % 10 == DUPLEX_FULL)
628 mac |= ALX_MAC_CTRL_FULLD;
629 else
630 mac &= ~ALX_MAC_CTRL_FULLD;
631 ALX_SET_FIELD(mac, ALX_MAC_CTRL_SPEED,
632 hw->link_speed >= SPEED_1000 ? ALX_MAC_CTRL_SPEED_1000 :
633 ALX_MAC_CTRL_SPEED_10_100);
634 mac |= ALX_MAC_CTRL_TX_EN | ALX_MAC_CTRL_RX_EN;
635 hw->rx_ctrl = mac;
636 alx_write_mem32(hw, ALX_MAC_CTRL, mac);
637}
638
639void alx_cfg_mac_flowcontrol(struct alx_hw *hw, u8 fc)
640{
641 if (fc & ALX_FC_RX)
642 hw->rx_ctrl |= ALX_MAC_CTRL_RXFC_EN;
643 else
644 hw->rx_ctrl &= ~ALX_MAC_CTRL_RXFC_EN;
645
646 if (fc & ALX_FC_TX)
647 hw->rx_ctrl |= ALX_MAC_CTRL_TXFC_EN;
648 else
649 hw->rx_ctrl &= ~ALX_MAC_CTRL_TXFC_EN;
650
651 alx_write_mem32(hw, ALX_MAC_CTRL, hw->rx_ctrl);
652}
653
654void alx_enable_aspm(struct alx_hw *hw, bool l0s_en, bool l1_en)
655{
656 u32 pmctrl;
657 u8 rev = alx_hw_revision(hw);
658
659 pmctrl = alx_read_mem32(hw, ALX_PMCTRL);
660
661 ALX_SET_FIELD(pmctrl, ALX_PMCTRL_LCKDET_TIMER,
662 ALX_PMCTRL_LCKDET_TIMER_DEF);
663 pmctrl |= ALX_PMCTRL_RCVR_WT_1US |
664 ALX_PMCTRL_L1_CLKSW_EN |
665 ALX_PMCTRL_L1_SRDSRX_PWD;
666 ALX_SET_FIELD(pmctrl, ALX_PMCTRL_L1REQ_TO, ALX_PMCTRL_L1REG_TO_DEF);
667 ALX_SET_FIELD(pmctrl, ALX_PMCTRL_L1_TIMER, ALX_PMCTRL_L1_TIMER_16US);
668 pmctrl &= ~(ALX_PMCTRL_L1_SRDS_EN |
669 ALX_PMCTRL_L1_SRDSPLL_EN |
670 ALX_PMCTRL_L1_BUFSRX_EN |
671 ALX_PMCTRL_SADLY_EN |
672 ALX_PMCTRL_HOTRST_WTEN|
673 ALX_PMCTRL_L0S_EN |
674 ALX_PMCTRL_L1_EN |
675 ALX_PMCTRL_ASPM_FCEN |
676 ALX_PMCTRL_TXL1_AFTER_L0S |
677 ALX_PMCTRL_RXL1_AFTER_L0S);
678 if (alx_is_rev_a(rev) && alx_hw_with_cr(hw))
679 pmctrl |= ALX_PMCTRL_L1_SRDS_EN | ALX_PMCTRL_L1_SRDSPLL_EN;
680
681 if (l0s_en)
682 pmctrl |= (ALX_PMCTRL_L0S_EN | ALX_PMCTRL_ASPM_FCEN);
683 if (l1_en)
684 pmctrl |= (ALX_PMCTRL_L1_EN | ALX_PMCTRL_ASPM_FCEN);
685
686 alx_write_mem32(hw, ALX_PMCTRL, pmctrl);
687}
688
689
690static u32 ethadv_to_hw_cfg(struct alx_hw *hw, u32 ethadv_cfg)
691{
692 u32 cfg = 0;
693
694 if (ethadv_cfg & ADVERTISED_Autoneg) {
695 cfg |= ALX_DRV_PHY_AUTO;
696 if (ethadv_cfg & ADVERTISED_10baseT_Half)
697 cfg |= ALX_DRV_PHY_10;
698 if (ethadv_cfg & ADVERTISED_10baseT_Full)
699 cfg |= ALX_DRV_PHY_10 | ALX_DRV_PHY_DUPLEX;
700 if (ethadv_cfg & ADVERTISED_100baseT_Half)
701 cfg |= ALX_DRV_PHY_100;
702 if (ethadv_cfg & ADVERTISED_100baseT_Full)
703 cfg |= ALX_DRV_PHY_100 | ALX_DRV_PHY_DUPLEX;
704 if (ethadv_cfg & ADVERTISED_1000baseT_Half)
705 cfg |= ALX_DRV_PHY_1000;
706 if (ethadv_cfg & ADVERTISED_1000baseT_Full)
707 cfg |= ALX_DRV_PHY_100 | ALX_DRV_PHY_DUPLEX;
708 if (ethadv_cfg & ADVERTISED_Pause)
709 cfg |= ADVERTISE_PAUSE_CAP;
710 if (ethadv_cfg & ADVERTISED_Asym_Pause)
711 cfg |= ADVERTISE_PAUSE_ASYM;
712 } else {
713 switch (ethadv_cfg) {
714 case ADVERTISED_10baseT_Half:
715 cfg |= ALX_DRV_PHY_10;
716 break;
717 case ADVERTISED_100baseT_Half:
718 cfg |= ALX_DRV_PHY_100;
719 break;
720 case ADVERTISED_10baseT_Full:
721 cfg |= ALX_DRV_PHY_10 | ALX_DRV_PHY_DUPLEX;
722 break;
723 case ADVERTISED_100baseT_Full:
724 cfg |= ALX_DRV_PHY_100 | ALX_DRV_PHY_DUPLEX;
725 break;
726 }
727 }
728
729 return cfg;
730}
731
732int alx_setup_speed_duplex(struct alx_hw *hw, u32 ethadv, u8 flowctrl)
733{
734 u16 adv, giga, cr;
735 u32 val;
736 int err = 0;
737
738 alx_write_phy_reg(hw, ALX_MII_DBG_ADDR, 0);
739 val = alx_read_mem32(hw, ALX_DRV);
740 ALX_SET_FIELD(val, ALX_DRV_PHY, 0);
741
742 if (ethadv & ADVERTISED_Autoneg) {
743 adv = ADVERTISE_CSMA;
744 adv |= ethtool_adv_to_mii_adv_t(ethadv);
745
746 if (flowctrl & ALX_FC_ANEG) {
747 if (flowctrl & ALX_FC_RX) {
748 adv |= ADVERTISED_Pause;
749 if (!(flowctrl & ALX_FC_TX))
750 adv |= ADVERTISED_Asym_Pause;
751 } else if (flowctrl & ALX_FC_TX) {
752 adv |= ADVERTISED_Asym_Pause;
753 }
754 }
755 giga = 0;
756 if (alx_hw_giga(hw))
757 giga = ethtool_adv_to_mii_ctrl1000_t(ethadv);
758
759 cr = BMCR_RESET | BMCR_ANENABLE | BMCR_ANRESTART;
760
761 if (alx_write_phy_reg(hw, MII_ADVERTISE, adv) ||
762 alx_write_phy_reg(hw, MII_CTRL1000, giga) ||
763 alx_write_phy_reg(hw, MII_BMCR, cr))
764 err = -EBUSY;
765 } else {
766 cr = BMCR_RESET;
767 if (ethadv == ADVERTISED_100baseT_Half ||
768 ethadv == ADVERTISED_100baseT_Full)
769 cr |= BMCR_SPEED100;
770 if (ethadv == ADVERTISED_10baseT_Full ||
771 ethadv == ADVERTISED_100baseT_Full)
772 cr |= BMCR_FULLDPLX;
773
774 err = alx_write_phy_reg(hw, MII_BMCR, cr);
775 }
776
777 if (!err) {
778 alx_write_phy_reg(hw, ALX_MII_DBG_ADDR, ALX_PHY_INITED);
779 val |= ethadv_to_hw_cfg(hw, ethadv);
780 }
781
782 alx_write_mem32(hw, ALX_DRV, val);
783
784 return err;
785}
786
787
788void alx_post_phy_link(struct alx_hw *hw)
789{
790 u16 phy_val, len, agc;
791 u8 revid = alx_hw_revision(hw);
792 bool adj_th = revid == ALX_REV_B0;
793 int speed;
794
795 if (hw->link_speed == SPEED_UNKNOWN)
796 speed = SPEED_UNKNOWN;
797 else
798 speed = hw->link_speed - hw->link_speed % 10;
799
800 if (revid != ALX_REV_B0 && !alx_is_rev_a(revid))
801 return;
802
803 /* 1000BT/AZ, wrong cable length */
804 if (speed != SPEED_UNKNOWN) {
805 alx_read_phy_ext(hw, ALX_MIIEXT_PCS, ALX_MIIEXT_CLDCTRL6,
806 &phy_val);
807 len = ALX_GET_FIELD(phy_val, ALX_CLDCTRL6_CAB_LEN);
808 alx_read_phy_dbg(hw, ALX_MIIDBG_AGC, &phy_val);
809 agc = ALX_GET_FIELD(phy_val, ALX_AGC_2_VGA);
810
811 if ((speed == SPEED_1000 &&
812 (len > ALX_CLDCTRL6_CAB_LEN_SHORT1G ||
813 (len == 0 && agc > ALX_AGC_LONG1G_LIMT))) ||
814 (speed == SPEED_100 &&
815 (len > ALX_CLDCTRL6_CAB_LEN_SHORT100M ||
816 (len == 0 && agc > ALX_AGC_LONG100M_LIMT)))) {
817 alx_write_phy_dbg(hw, ALX_MIIDBG_AZ_ANADECT,
818 ALX_AZ_ANADECT_LONG);
819 alx_read_phy_ext(hw, ALX_MIIEXT_ANEG, ALX_MIIEXT_AFE,
820 &phy_val);
821 alx_write_phy_ext(hw, ALX_MIIEXT_ANEG, ALX_MIIEXT_AFE,
822 phy_val | ALX_AFE_10BT_100M_TH);
823 } else {
824 alx_write_phy_dbg(hw, ALX_MIIDBG_AZ_ANADECT,
825 ALX_AZ_ANADECT_DEF);
826 alx_read_phy_ext(hw, ALX_MIIEXT_ANEG,
827 ALX_MIIEXT_AFE, &phy_val);
828 alx_write_phy_ext(hw, ALX_MIIEXT_ANEG, ALX_MIIEXT_AFE,
829 phy_val & ~ALX_AFE_10BT_100M_TH);
830 }
831
832 /* threshold adjust */
833 if (adj_th && hw->lnk_patch) {
834 if (speed == SPEED_100) {
835 alx_write_phy_dbg(hw, ALX_MIIDBG_MSE16DB,
836 ALX_MSE16DB_UP);
837 } else if (speed == SPEED_1000) {
838 /*
839 * Giga link threshold, raise the tolerance of
840 * noise 50%
841 */
842 alx_read_phy_dbg(hw, ALX_MIIDBG_MSE20DB,
843 &phy_val);
844 ALX_SET_FIELD(phy_val, ALX_MSE20DB_TH,
845 ALX_MSE20DB_TH_HI);
846 alx_write_phy_dbg(hw, ALX_MIIDBG_MSE20DB,
847 phy_val);
848 }
849 }
850 } else {
851 alx_read_phy_ext(hw, ALX_MIIEXT_ANEG, ALX_MIIEXT_AFE,
852 &phy_val);
853 alx_write_phy_ext(hw, ALX_MIIEXT_ANEG, ALX_MIIEXT_AFE,
854 phy_val & ~ALX_AFE_10BT_100M_TH);
855
856 if (adj_th && hw->lnk_patch) {
857 alx_write_phy_dbg(hw, ALX_MIIDBG_MSE16DB,
858 ALX_MSE16DB_DOWN);
859 alx_read_phy_dbg(hw, ALX_MIIDBG_MSE20DB, &phy_val);
860 ALX_SET_FIELD(phy_val, ALX_MSE20DB_TH,
861 ALX_MSE20DB_TH_DEF);
862 alx_write_phy_dbg(hw, ALX_MIIDBG_MSE20DB, phy_val);
863 }
864 }
865}
866
867
868/* NOTE:
869 * 1. phy link must be established before calling this function
870 * 2. wol option (pattern,magic,link,etc.) is configed before call it.
871 */
872int alx_pre_suspend(struct alx_hw *hw, int speed)
873{
874 u32 master, mac, phy, val;
875 int err = 0;
876
877 master = alx_read_mem32(hw, ALX_MASTER);
878 master &= ~ALX_MASTER_PCLKSEL_SRDS;
879 mac = hw->rx_ctrl;
880 /* 10/100 half */
881 ALX_SET_FIELD(mac, ALX_MAC_CTRL_SPEED, ALX_MAC_CTRL_SPEED_10_100);
882 mac &= ~(ALX_MAC_CTRL_FULLD | ALX_MAC_CTRL_RX_EN | ALX_MAC_CTRL_TX_EN);
883
884 phy = alx_read_mem32(hw, ALX_PHY_CTRL);
885 phy &= ~(ALX_PHY_CTRL_DSPRST_OUT | ALX_PHY_CTRL_CLS);
886 phy |= ALX_PHY_CTRL_RST_ANALOG | ALX_PHY_CTRL_HIB_PULSE |
887 ALX_PHY_CTRL_HIB_EN;
888
889 /* without any activity */
890 if (!(hw->sleep_ctrl & ALX_SLEEP_ACTIVE)) {
891 err = alx_write_phy_reg(hw, ALX_MII_IER, 0);
892 if (err)
893 return err;
894 phy |= ALX_PHY_CTRL_IDDQ | ALX_PHY_CTRL_POWER_DOWN;
895 } else {
896 if (hw->sleep_ctrl & (ALX_SLEEP_WOL_MAGIC | ALX_SLEEP_CIFS))
897 mac |= ALX_MAC_CTRL_RX_EN | ALX_MAC_CTRL_BRD_EN;
898 if (hw->sleep_ctrl & ALX_SLEEP_CIFS)
899 mac |= ALX_MAC_CTRL_TX_EN;
900 if (speed % 10 == DUPLEX_FULL)
901 mac |= ALX_MAC_CTRL_FULLD;
902 if (speed >= SPEED_1000)
903 ALX_SET_FIELD(mac, ALX_MAC_CTRL_SPEED,
904 ALX_MAC_CTRL_SPEED_1000);
905 phy |= ALX_PHY_CTRL_DSPRST_OUT;
906 err = alx_write_phy_ext(hw, ALX_MIIEXT_ANEG,
907 ALX_MIIEXT_S3DIG10,
908 ALX_MIIEXT_S3DIG10_SL);
909 if (err)
910 return err;
911 }
912
913 alx_enable_osc(hw);
914 hw->rx_ctrl = mac;
915 alx_write_mem32(hw, ALX_MASTER, master);
916 alx_write_mem32(hw, ALX_MAC_CTRL, mac);
917 alx_write_mem32(hw, ALX_PHY_CTRL, phy);
918
919 /* set val of PDLL D3PLLOFF */
920 val = alx_read_mem32(hw, ALX_PDLL_TRNS1);
921 val |= ALX_PDLL_TRNS1_D3PLLOFF_EN;
922 alx_write_mem32(hw, ALX_PDLL_TRNS1, val);
923
924 return 0;
925}
926
927bool alx_phy_configured(struct alx_hw *hw)
928{
929 u32 cfg, hw_cfg;
930
931 cfg = ethadv_to_hw_cfg(hw, hw->adv_cfg);
932 cfg = ALX_GET_FIELD(cfg, ALX_DRV_PHY);
933 hw_cfg = alx_get_phy_config(hw);
934
935 if (hw_cfg == ALX_DRV_PHY_UNKNOWN)
936 return false;
937
938 return cfg == hw_cfg;
939}
940
941int alx_get_phy_link(struct alx_hw *hw, int *speed)
942{
943 struct pci_dev *pdev = hw->pdev;
944 u16 bmsr, giga;
945 int err;
946
947 err = alx_read_phy_reg(hw, MII_BMSR, &bmsr);
948 if (err)
949 return err;
950
951 err = alx_read_phy_reg(hw, MII_BMSR, &bmsr);
952 if (err)
953 return err;
954
955 if (!(bmsr & BMSR_LSTATUS)) {
956 *speed = SPEED_UNKNOWN;
957 return 0;
958 }
959
960 /* speed/duplex result is saved in PHY Specific Status Register */
961 err = alx_read_phy_reg(hw, ALX_MII_GIGA_PSSR, &giga);
962 if (err)
963 return err;
964
965 if (!(giga & ALX_GIGA_PSSR_SPD_DPLX_RESOLVED))
966 goto wrong_speed;
967
968 switch (giga & ALX_GIGA_PSSR_SPEED) {
969 case ALX_GIGA_PSSR_1000MBS:
970 *speed = SPEED_1000;
971 break;
972 case ALX_GIGA_PSSR_100MBS:
973 *speed = SPEED_100;
974 break;
975 case ALX_GIGA_PSSR_10MBS:
976 *speed = SPEED_10;
977 break;
978 default:
979 goto wrong_speed;
980 }
981
982 *speed += (giga & ALX_GIGA_PSSR_DPLX) ? DUPLEX_FULL : DUPLEX_HALF;
983 return 1;
984
985wrong_speed:
986 dev_err(&pdev->dev, "invalid PHY speed/duplex: 0x%x\n", giga);
987 return -EINVAL;
988}
989
990int alx_clear_phy_intr(struct alx_hw *hw)
991{
992 u16 isr;
993
994 /* clear interrupt status by reading it */
995 return alx_read_phy_reg(hw, ALX_MII_ISR, &isr);
996}
997
998int alx_config_wol(struct alx_hw *hw)
999{
1000 u32 wol = 0;
1001 int err = 0;
1002
1003 /* turn on magic packet event */
1004 if (hw->sleep_ctrl & ALX_SLEEP_WOL_MAGIC)
1005 wol |= ALX_WOL0_MAGIC_EN | ALX_WOL0_PME_MAGIC_EN;
1006
1007 /* turn on link up event */
1008 if (hw->sleep_ctrl & ALX_SLEEP_WOL_PHY) {
1009 wol |= ALX_WOL0_LINK_EN | ALX_WOL0_PME_LINK;
1010 /* only link up can wake up */
1011 err = alx_write_phy_reg(hw, ALX_MII_IER, ALX_IER_LINK_UP);
1012 }
1013 alx_write_mem32(hw, ALX_WOL0, wol);
1014
1015 return err;
1016}
1017
1018void alx_disable_rss(struct alx_hw *hw)
1019{
1020 u32 ctrl = alx_read_mem32(hw, ALX_RXQ0);
1021
1022 ctrl &= ~ALX_RXQ0_RSS_HASH_EN;
1023 alx_write_mem32(hw, ALX_RXQ0, ctrl);
1024}
1025
1026void alx_configure_basic(struct alx_hw *hw)
1027{
1028 u32 val, raw_mtu, max_payload;
1029 u16 val16;
1030 u8 chip_rev = alx_hw_revision(hw);
1031
1032 alx_set_macaddr(hw, hw->mac_addr);
1033
1034 alx_write_mem32(hw, ALX_CLK_GATE, ALX_CLK_GATE_ALL);
1035
1036 /* idle timeout to switch clk_125M */
1037 if (chip_rev >= ALX_REV_B0)
1038 alx_write_mem32(hw, ALX_IDLE_DECISN_TIMER,
1039 ALX_IDLE_DECISN_TIMER_DEF);
1040
1041 alx_write_mem32(hw, ALX_SMB_TIMER, hw->smb_timer * 500UL);
1042
1043 val = alx_read_mem32(hw, ALX_MASTER);
1044 val |= ALX_MASTER_IRQMOD2_EN |
1045 ALX_MASTER_IRQMOD1_EN |
1046 ALX_MASTER_SYSALVTIMER_EN;
1047 alx_write_mem32(hw, ALX_MASTER, val);
1048 alx_write_mem32(hw, ALX_IRQ_MODU_TIMER,
1049 (hw->imt >> 1) << ALX_IRQ_MODU_TIMER1_SHIFT);
1050 /* intr re-trig timeout */
1051 alx_write_mem32(hw, ALX_INT_RETRIG, ALX_INT_RETRIG_TO);
1052 /* tpd threshold to trig int */
1053 alx_write_mem32(hw, ALX_TINT_TPD_THRSHLD, hw->ith_tpd);
1054 alx_write_mem32(hw, ALX_TINT_TIMER, hw->imt);
1055
1056 raw_mtu = hw->mtu + ETH_HLEN;
1057 alx_write_mem32(hw, ALX_MTU, raw_mtu + 8);
1058 if (raw_mtu > ALX_MTU_JUMBO_TH)
1059 hw->rx_ctrl &= ~ALX_MAC_CTRL_FAST_PAUSE;
1060
1061 if ((raw_mtu + 8) < ALX_TXQ1_JUMBO_TSO_TH)
1062 val = (raw_mtu + 8 + 7) >> 3;
1063 else
1064 val = ALX_TXQ1_JUMBO_TSO_TH >> 3;
1065 alx_write_mem32(hw, ALX_TXQ1, val | ALX_TXQ1_ERRLGPKT_DROP_EN);
1066
1067 max_payload = pcie_get_readrq(hw->pdev) >> 8;
1068 /*
1069 * if BIOS had changed the default dma read max length,
1070 * restore it to default value
1071 */
1072 if (max_payload < ALX_DEV_CTRL_MAXRRS_MIN)
1073 pcie_set_readrq(hw->pdev, 128 << ALX_DEV_CTRL_MAXRRS_MIN);
1074
1075 val = ALX_TXQ_TPD_BURSTPREF_DEF << ALX_TXQ0_TPD_BURSTPREF_SHIFT |
1076 ALX_TXQ0_MODE_ENHANCE | ALX_TXQ0_LSO_8023_EN |
1077 ALX_TXQ0_SUPT_IPOPT |
1078 ALX_TXQ_TXF_BURST_PREF_DEF << ALX_TXQ0_TXF_BURST_PREF_SHIFT;
1079 alx_write_mem32(hw, ALX_TXQ0, val);
1080 val = ALX_TXQ_TPD_BURSTPREF_DEF << ALX_HQTPD_Q1_NUMPREF_SHIFT |
1081 ALX_TXQ_TPD_BURSTPREF_DEF << ALX_HQTPD_Q2_NUMPREF_SHIFT |
1082 ALX_TXQ_TPD_BURSTPREF_DEF << ALX_HQTPD_Q3_NUMPREF_SHIFT |
1083 ALX_HQTPD_BURST_EN;
1084 alx_write_mem32(hw, ALX_HQTPD, val);
1085
1086 /* rxq, flow control */
1087 val = alx_read_mem32(hw, ALX_SRAM5);
1088 val = ALX_GET_FIELD(val, ALX_SRAM_RXF_LEN) << 3;
1089 if (val > ALX_SRAM_RXF_LEN_8K) {
1090 val16 = ALX_MTU_STD_ALGN >> 3;
1091 val = (val - ALX_RXQ2_RXF_FLOW_CTRL_RSVD) >> 3;
1092 } else {
1093 val16 = ALX_MTU_STD_ALGN >> 3;
1094 val = (val - ALX_MTU_STD_ALGN) >> 3;
1095 }
1096 alx_write_mem32(hw, ALX_RXQ2,
1097 val16 << ALX_RXQ2_RXF_XOFF_THRESH_SHIFT |
1098 val << ALX_RXQ2_RXF_XON_THRESH_SHIFT);
1099 val = ALX_RXQ0_NUM_RFD_PREF_DEF << ALX_RXQ0_NUM_RFD_PREF_SHIFT |
1100 ALX_RXQ0_RSS_MODE_DIS << ALX_RXQ0_RSS_MODE_SHIFT |
1101 ALX_RXQ0_IDT_TBL_SIZE_DEF << ALX_RXQ0_IDT_TBL_SIZE_SHIFT |
1102 ALX_RXQ0_RSS_HSTYP_ALL | ALX_RXQ0_RSS_HASH_EN |
1103 ALX_RXQ0_IPV6_PARSE_EN;
1104
1105 if (alx_hw_giga(hw))
1106 ALX_SET_FIELD(val, ALX_RXQ0_ASPM_THRESH,
1107 ALX_RXQ0_ASPM_THRESH_100M);
1108
1109 alx_write_mem32(hw, ALX_RXQ0, val);
1110
1111 val = alx_read_mem32(hw, ALX_DMA);
1112 val = ALX_DMA_RORDER_MODE_OUT << ALX_DMA_RORDER_MODE_SHIFT |
1113 ALX_DMA_RREQ_PRI_DATA |
1114 max_payload << ALX_DMA_RREQ_BLEN_SHIFT |
1115 ALX_DMA_WDLY_CNT_DEF << ALX_DMA_WDLY_CNT_SHIFT |
1116 ALX_DMA_RDLY_CNT_DEF << ALX_DMA_RDLY_CNT_SHIFT |
1117 (hw->dma_chnl - 1) << ALX_DMA_RCHNL_SEL_SHIFT;
1118 alx_write_mem32(hw, ALX_DMA, val);
1119
1120 /* default multi-tx-q weights */
1121 val = ALX_WRR_PRI_RESTRICT_NONE << ALX_WRR_PRI_SHIFT |
1122 4 << ALX_WRR_PRI0_SHIFT |
1123 4 << ALX_WRR_PRI1_SHIFT |
1124 4 << ALX_WRR_PRI2_SHIFT |
1125 4 << ALX_WRR_PRI3_SHIFT;
1126 alx_write_mem32(hw, ALX_WRR, val);
1127}
1128
1129static inline u32 alx_speed_to_ethadv(int speed)
1130{
1131 switch (speed) {
1132 case SPEED_1000 + DUPLEX_FULL:
1133 return ADVERTISED_1000baseT_Full;
1134 case SPEED_100 + DUPLEX_FULL:
1135 return ADVERTISED_100baseT_Full;
1136 case SPEED_100 + DUPLEX_HALF:
1137 return ADVERTISED_10baseT_Half;
1138 case SPEED_10 + DUPLEX_FULL:
1139 return ADVERTISED_10baseT_Full;
1140 case SPEED_10 + DUPLEX_HALF:
1141 return ADVERTISED_10baseT_Half;
1142 default:
1143 return 0;
1144 }
1145}
1146
1147int alx_select_powersaving_speed(struct alx_hw *hw, int *speed)
1148{
1149 int i, err, spd;
1150 u16 lpa;
1151
1152 err = alx_get_phy_link(hw, &spd);
1153 if (err < 0)
1154 return err;
1155
1156 if (spd == SPEED_UNKNOWN)
1157 return 0;
1158
1159 err = alx_read_phy_reg(hw, MII_LPA, &lpa);
1160 if (err)
1161 return err;
1162
1163 if (!(lpa & LPA_LPACK)) {
1164 *speed = spd;
1165 return 0;
1166 }
1167
1168 if (lpa & LPA_10FULL)
1169 *speed = SPEED_10 + DUPLEX_FULL;
1170 else if (lpa & LPA_10HALF)
1171 *speed = SPEED_10 + DUPLEX_HALF;
1172 else if (lpa & LPA_100FULL)
1173 *speed = SPEED_100 + DUPLEX_FULL;
1174 else
1175 *speed = SPEED_100 + DUPLEX_HALF;
1176
1177 if (*speed != spd) {
1178 err = alx_write_phy_reg(hw, ALX_MII_IER, 0);
1179 if (err)
1180 return err;
1181 err = alx_setup_speed_duplex(hw,
1182 alx_speed_to_ethadv(*speed) |
1183 ADVERTISED_Autoneg,
1184 ALX_FC_ANEG | ALX_FC_RX |
1185 ALX_FC_TX);
1186 if (err)
1187 return err;
1188
1189 /* wait for linkup */
1190 for (i = 0; i < ALX_MAX_SETUP_LNK_CYCLE; i++) {
1191 int speed2;
1192
1193 msleep(100);
1194
1195 err = alx_get_phy_link(hw, &speed2);
1196 if (err < 0)
1197 return err;
1198 if (speed2 != SPEED_UNKNOWN)
1199 break;
1200 }
1201 if (i == ALX_MAX_SETUP_LNK_CYCLE)
1202 return -ETIMEDOUT;
1203 }
1204
1205 return 0;
1206}
1207
1208bool alx_get_phy_info(struct alx_hw *hw)
1209{
1210 u16 devs1, devs2;
1211
1212 if (alx_read_phy_reg(hw, MII_PHYSID1, &hw->phy_id[0]) ||
1213 alx_read_phy_reg(hw, MII_PHYSID2, &hw->phy_id[1]))
1214 return false;
1215
1216 /* since we haven't PMA/PMD status2 register, we can't
1217 * use mdio45_probe function for prtad and mmds.
1218 * use fixed MMD3 to get mmds.
1219 */
1220 if (alx_read_phy_ext(hw, 3, MDIO_DEVS1, &devs1) ||
1221 alx_read_phy_ext(hw, 3, MDIO_DEVS2, &devs2))
1222 return false;
1223 hw->mdio.mmds = devs1 | devs2 << 16;
1224
1225 return true;
1226}
diff --git a/drivers/net/ethernet/atheros/alx/hw.h b/drivers/net/ethernet/atheros/alx/hw.h
new file mode 100644
index 000000000000..65e723d2172a
--- /dev/null
+++ b/drivers/net/ethernet/atheros/alx/hw.h
@@ -0,0 +1,499 @@
1/*
2 * Copyright (c) 2013 Johannes Berg <johannes@sipsolutions.net>
3 *
4 * This file is free software: you may copy, redistribute and/or modify it
5 * under the terms of the GNU General Public License as published by the
6 * Free Software Foundation, either version 2 of the License, or (at your
7 * option) any later version.
8 *
9 * This file is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16 *
17 * This file incorporates work covered by the following copyright and
18 * permission notice:
19 *
20 * Copyright (c) 2012 Qualcomm Atheros, Inc.
21 *
22 * Permission to use, copy, modify, and/or distribute this software for any
23 * purpose with or without fee is hereby granted, provided that the above
24 * copyright notice and this permission notice appear in all copies.
25 *
26 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
27 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
28 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
29 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
30 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
31 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
32 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
33 */
34
35#ifndef ALX_HW_H_
36#define ALX_HW_H_
37#include <linux/types.h>
38#include <linux/mdio.h>
39#include <linux/pci.h>
40#include "reg.h"
41
42/* Transmit Packet Descriptor, contains 4 32-bit words.
43 *
44 * 31 16 0
45 * +----------------+----------------+
46 * | vlan-tag | buf length |
47 * +----------------+----------------+
48 * | Word 1 |
49 * +----------------+----------------+
50 * | Word 2: buf addr lo |
51 * +----------------+----------------+
52 * | Word 3: buf addr hi |
53 * +----------------+----------------+
54 *
55 * Word 2 and 3 combine to form a 64-bit buffer address
56 *
57 * Word 1 has three forms, depending on the state of bit 8/12/13:
58 * if bit8 =='1', the definition is just for custom checksum offload.
59 * if bit8 == '0' && bit12 == '1' && bit13 == '1', the *FIRST* descriptor
60 * for the skb is special for LSO V2, Word 2 become total skb length ,
61 * Word 3 is meaningless.
62 * other condition, the definition is for general skb or ip/tcp/udp
63 * checksum or LSO(TSO) offload.
64 *
65 * Here is the depiction:
66 *
67 * 0-+ 0-+
68 * 1 | 1 |
69 * 2 | 2 |
70 * 3 | Payload offset 3 | L4 header offset
71 * 4 | (7:0) 4 | (7:0)
72 * 5 | 5 |
73 * 6 | 6 |
74 * 7-+ 7-+
75 * 8 Custom csum enable = 1 8 Custom csum enable = 0
76 * 9 General IPv4 checksum 9 General IPv4 checksum
77 * 10 General TCP checksum 10 General TCP checksum
78 * 11 General UDP checksum 11 General UDP checksum
79 * 12 Large Send Segment enable 12 Large Send Segment enable
80 * 13 Large Send Segment type 13 Large Send Segment type
81 * 14 VLAN tagged 14 VLAN tagged
82 * 15 Insert VLAN tag 15 Insert VLAN tag
83 * 16 IPv4 packet 16 IPv4 packet
84 * 17 Ethernet frame type 17 Ethernet frame type
85 * 18-+ 18-+
86 * 19 | 19 |
87 * 20 | 20 |
88 * 21 | Custom csum offset 21 |
89 * 22 | (25:18) 22 |
90 * 23 | 23 | MSS (30:18)
91 * 24 | 24 |
92 * 25-+ 25 |
93 * 26-+ 26 |
94 * 27 | 27 |
95 * 28 | Reserved 28 |
96 * 29 | 29 |
97 * 30-+ 30-+
98 * 31 End of packet 31 End of packet
99 */
100struct alx_txd {
101 __le16 len;
102 __le16 vlan_tag;
103 __le32 word1;
104 union {
105 __le64 addr;
106 struct {
107 __le32 pkt_len;
108 __le32 resvd;
109 } l;
110 } adrl;
111} __packed;
112
113/* tpd word 1 */
114#define TPD_CXSUMSTART_MASK 0x00FF
115#define TPD_CXSUMSTART_SHIFT 0
116#define TPD_L4HDROFFSET_MASK 0x00FF
117#define TPD_L4HDROFFSET_SHIFT 0
118#define TPD_CXSUM_EN_MASK 0x0001
119#define TPD_CXSUM_EN_SHIFT 8
120#define TPD_IP_XSUM_MASK 0x0001
121#define TPD_IP_XSUM_SHIFT 9
122#define TPD_TCP_XSUM_MASK 0x0001
123#define TPD_TCP_XSUM_SHIFT 10
124#define TPD_UDP_XSUM_MASK 0x0001
125#define TPD_UDP_XSUM_SHIFT 11
126#define TPD_LSO_EN_MASK 0x0001
127#define TPD_LSO_EN_SHIFT 12
128#define TPD_LSO_V2_MASK 0x0001
129#define TPD_LSO_V2_SHIFT 13
130#define TPD_VLTAGGED_MASK 0x0001
131#define TPD_VLTAGGED_SHIFT 14
132#define TPD_INS_VLTAG_MASK 0x0001
133#define TPD_INS_VLTAG_SHIFT 15
134#define TPD_IPV4_MASK 0x0001
135#define TPD_IPV4_SHIFT 16
136#define TPD_ETHTYPE_MASK 0x0001
137#define TPD_ETHTYPE_SHIFT 17
138#define TPD_CXSUMOFFSET_MASK 0x00FF
139#define TPD_CXSUMOFFSET_SHIFT 18
140#define TPD_MSS_MASK 0x1FFF
141#define TPD_MSS_SHIFT 18
142#define TPD_EOP_MASK 0x0001
143#define TPD_EOP_SHIFT 31
144
145#define DESC_GET(_x, _name) ((_x) >> _name##SHIFT & _name##MASK)
146
147/* Receive Free Descriptor */
148struct alx_rfd {
149 __le64 addr; /* data buffer address, length is
150 * declared in register --- every
151 * buffer has the same size
152 */
153} __packed;
154
155/* Receive Return Descriptor, contains 4 32-bit words.
156 *
157 * 31 16 0
158 * +----------------+----------------+
159 * | Word 0 |
160 * +----------------+----------------+
161 * | Word 1: RSS Hash value |
162 * +----------------+----------------+
163 * | Word 2 |
164 * +----------------+----------------+
165 * | Word 3 |
166 * +----------------+----------------+
167 *
168 * Word 0 depiction & Word 2 depiction:
169 *
170 * 0--+ 0--+
171 * 1 | 1 |
172 * 2 | 2 |
173 * 3 | 3 |
174 * 4 | 4 |
175 * 5 | 5 |
176 * 6 | 6 |
177 * 7 | IP payload checksum 7 | VLAN tag
178 * 8 | (15:0) 8 | (15:0)
179 * 9 | 9 |
180 * 10 | 10 |
181 * 11 | 11 |
182 * 12 | 12 |
183 * 13 | 13 |
184 * 14 | 14 |
185 * 15-+ 15-+
186 * 16-+ 16-+
187 * 17 | Number of RFDs 17 |
188 * 18 | (19:16) 18 |
189 * 19-+ 19 | Protocol ID
190 * 20-+ 20 | (23:16)
191 * 21 | 21 |
192 * 22 | 22 |
193 * 23 | 23-+
194 * 24 | 24 | Reserved
195 * 25 | Start index of RFD-ring 25-+
196 * 26 | (31:20) 26 | RSS Q-num (27:25)
197 * 27 | 27-+
198 * 28 | 28-+
199 * 29 | 29 | RSS Hash algorithm
200 * 30 | 30 | (31:28)
201 * 31-+ 31-+
202 *
203 * Word 3 depiction:
204 *
205 * 0--+
206 * 1 |
207 * 2 |
208 * 3 |
209 * 4 |
210 * 5 |
211 * 6 |
212 * 7 | Packet length (include FCS)
213 * 8 | (13:0)
214 * 9 |
215 * 10 |
216 * 11 |
217 * 12 |
218 * 13-+
219 * 14 L4 Header checksum error
220 * 15 IPv4 checksum error
221 * 16 VLAN tagged
222 * 17-+
223 * 18 | Protocol ID (19:17)
224 * 19-+
225 * 20 Receive error summary
226 * 21 FCS(CRC) error
227 * 22 Frame alignment error
228 * 23 Truncated packet
229 * 24 Runt packet
230 * 25 Incomplete packet due to insufficient rx-desc
231 * 26 Broadcast packet
232 * 27 Multicast packet
233 * 28 Ethernet type (EII or 802.3)
234 * 29 FIFO overflow
235 * 30 Length error (for 802.3, length field mismatch with actual len)
236 * 31 Updated, indicate to driver that this RRD is refreshed.
237 */
238struct alx_rrd {
239 __le32 word0;
240 __le32 rss_hash;
241 __le32 word2;
242 __le32 word3;
243} __packed;
244
245/* rrd word 0 */
246#define RRD_XSUM_MASK 0xFFFF
247#define RRD_XSUM_SHIFT 0
248#define RRD_NOR_MASK 0x000F
249#define RRD_NOR_SHIFT 16
250#define RRD_SI_MASK 0x0FFF
251#define RRD_SI_SHIFT 20
252
253/* rrd word 2 */
254#define RRD_VLTAG_MASK 0xFFFF
255#define RRD_VLTAG_SHIFT 0
256#define RRD_PID_MASK 0x00FF
257#define RRD_PID_SHIFT 16
258/* non-ip packet */
259#define RRD_PID_NONIP 0
260/* ipv4(only) */
261#define RRD_PID_IPV4 1
262/* tcp/ipv6 */
263#define RRD_PID_IPV6TCP 2
264/* tcp/ipv4 */
265#define RRD_PID_IPV4TCP 3
266/* udp/ipv6 */
267#define RRD_PID_IPV6UDP 4
268/* udp/ipv4 */
269#define RRD_PID_IPV4UDP 5
270/* ipv6(only) */
271#define RRD_PID_IPV6 6
272/* LLDP packet */
273#define RRD_PID_LLDP 7
274/* 1588 packet */
275#define RRD_PID_1588 8
276#define RRD_RSSQ_MASK 0x0007
277#define RRD_RSSQ_SHIFT 25
278#define RRD_RSSALG_MASK 0x000F
279#define RRD_RSSALG_SHIFT 28
280#define RRD_RSSALG_TCPV6 0x1
281#define RRD_RSSALG_IPV6 0x2
282#define RRD_RSSALG_TCPV4 0x4
283#define RRD_RSSALG_IPV4 0x8
284
285/* rrd word 3 */
286#define RRD_PKTLEN_MASK 0x3FFF
287#define RRD_PKTLEN_SHIFT 0
288#define RRD_ERR_L4_MASK 0x0001
289#define RRD_ERR_L4_SHIFT 14
290#define RRD_ERR_IPV4_MASK 0x0001
291#define RRD_ERR_IPV4_SHIFT 15
292#define RRD_VLTAGGED_MASK 0x0001
293#define RRD_VLTAGGED_SHIFT 16
294#define RRD_OLD_PID_MASK 0x0007
295#define RRD_OLD_PID_SHIFT 17
296#define RRD_ERR_RES_MASK 0x0001
297#define RRD_ERR_RES_SHIFT 20
298#define RRD_ERR_FCS_MASK 0x0001
299#define RRD_ERR_FCS_SHIFT 21
300#define RRD_ERR_FAE_MASK 0x0001
301#define RRD_ERR_FAE_SHIFT 22
302#define RRD_ERR_TRUNC_MASK 0x0001
303#define RRD_ERR_TRUNC_SHIFT 23
304#define RRD_ERR_RUNT_MASK 0x0001
305#define RRD_ERR_RUNT_SHIFT 24
306#define RRD_ERR_ICMP_MASK 0x0001
307#define RRD_ERR_ICMP_SHIFT 25
308#define RRD_BCAST_MASK 0x0001
309#define RRD_BCAST_SHIFT 26
310#define RRD_MCAST_MASK 0x0001
311#define RRD_MCAST_SHIFT 27
312#define RRD_ETHTYPE_MASK 0x0001
313#define RRD_ETHTYPE_SHIFT 28
314#define RRD_ERR_FIFOV_MASK 0x0001
315#define RRD_ERR_FIFOV_SHIFT 29
316#define RRD_ERR_LEN_MASK 0x0001
317#define RRD_ERR_LEN_SHIFT 30
318#define RRD_UPDATED_MASK 0x0001
319#define RRD_UPDATED_SHIFT 31
320
321
322#define ALX_MAX_SETUP_LNK_CYCLE 50
323
324/* for FlowControl */
325#define ALX_FC_RX 0x01
326#define ALX_FC_TX 0x02
327#define ALX_FC_ANEG 0x04
328
329/* for sleep control */
330#define ALX_SLEEP_WOL_PHY 0x00000001
331#define ALX_SLEEP_WOL_MAGIC 0x00000002
332#define ALX_SLEEP_CIFS 0x00000004
333#define ALX_SLEEP_ACTIVE (ALX_SLEEP_WOL_PHY | \
334 ALX_SLEEP_WOL_MAGIC | \
335 ALX_SLEEP_CIFS)
336
337/* for RSS hash type */
338#define ALX_RSS_HASH_TYPE_IPV4 0x1
339#define ALX_RSS_HASH_TYPE_IPV4_TCP 0x2
340#define ALX_RSS_HASH_TYPE_IPV6 0x4
341#define ALX_RSS_HASH_TYPE_IPV6_TCP 0x8
342#define ALX_RSS_HASH_TYPE_ALL (ALX_RSS_HASH_TYPE_IPV4 | \
343 ALX_RSS_HASH_TYPE_IPV4_TCP | \
344 ALX_RSS_HASH_TYPE_IPV6 | \
345 ALX_RSS_HASH_TYPE_IPV6_TCP)
346#define ALX_DEF_RXBUF_SIZE 1536
347#define ALX_MAX_JUMBO_PKT_SIZE (9*1024)
348#define ALX_MAX_TSO_PKT_SIZE (7*1024)
349#define ALX_MAX_FRAME_SIZE ALX_MAX_JUMBO_PKT_SIZE
350#define ALX_MIN_FRAME_SIZE 68
351#define ALX_RAW_MTU(_mtu) (_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN)
352
353#define ALX_MAX_RX_QUEUES 8
354#define ALX_MAX_TX_QUEUES 4
355#define ALX_MAX_HANDLED_INTRS 5
356
357#define ALX_ISR_MISC (ALX_ISR_PCIE_LNKDOWN | \
358 ALX_ISR_DMAW | \
359 ALX_ISR_DMAR | \
360 ALX_ISR_SMB | \
361 ALX_ISR_MANU | \
362 ALX_ISR_TIMER)
363
364#define ALX_ISR_FATAL (ALX_ISR_PCIE_LNKDOWN | \
365 ALX_ISR_DMAW | ALX_ISR_DMAR)
366
367#define ALX_ISR_ALERT (ALX_ISR_RXF_OV | \
368 ALX_ISR_TXF_UR | \
369 ALX_ISR_RFD_UR)
370
371#define ALX_ISR_ALL_QUEUES (ALX_ISR_TX_Q0 | \
372 ALX_ISR_TX_Q1 | \
373 ALX_ISR_TX_Q2 | \
374 ALX_ISR_TX_Q3 | \
375 ALX_ISR_RX_Q0 | \
376 ALX_ISR_RX_Q1 | \
377 ALX_ISR_RX_Q2 | \
378 ALX_ISR_RX_Q3 | \
379 ALX_ISR_RX_Q4 | \
380 ALX_ISR_RX_Q5 | \
381 ALX_ISR_RX_Q6 | \
382 ALX_ISR_RX_Q7)
383
384/* maximum interrupt vectors for msix */
385#define ALX_MAX_MSIX_INTRS 16
386
387#define ALX_GET_FIELD(_data, _field) \
388 (((_data) >> _field ## _SHIFT) & _field ## _MASK)
389
390#define ALX_SET_FIELD(_data, _field, _value) do { \
391 (_data) &= ~(_field ## _MASK << _field ## _SHIFT); \
392 (_data) |= ((_value) & _field ## _MASK) << _field ## _SHIFT;\
393 } while (0)
394
395struct alx_hw {
396 struct pci_dev *pdev;
397 u8 __iomem *hw_addr;
398
399 /* current & permanent mac addr */
400 u8 mac_addr[ETH_ALEN];
401 u8 perm_addr[ETH_ALEN];
402
403 u16 mtu;
404 u16 imt;
405 u8 dma_chnl;
406 u8 max_dma_chnl;
407 /* tpd threshold to trig INT */
408 u32 ith_tpd;
409 u32 rx_ctrl;
410 u32 mc_hash[2];
411
412 u32 smb_timer;
413 /* SPEED_* + DUPLEX_*, SPEED_UNKNOWN if link is down */
414 int link_speed;
415
416 /* auto-neg advertisement or force mode config */
417 u32 adv_cfg;
418 u8 flowctrl;
419
420 u32 sleep_ctrl;
421
422 spinlock_t mdio_lock;
423 struct mdio_if_info mdio;
424 u16 phy_id[2];
425
426 /* PHY link patch flag */
427 bool lnk_patch;
428};
429
430static inline int alx_hw_revision(struct alx_hw *hw)
431{
432 return hw->pdev->revision >> ALX_PCI_REVID_SHIFT;
433}
434
435static inline bool alx_hw_with_cr(struct alx_hw *hw)
436{
437 return hw->pdev->revision & 1;
438}
439
440static inline bool alx_hw_giga(struct alx_hw *hw)
441{
442 return hw->pdev->device & 1;
443}
444
445static inline void alx_write_mem8(struct alx_hw *hw, u32 reg, u8 val)
446{
447 writeb(val, hw->hw_addr + reg);
448}
449
450static inline void alx_write_mem16(struct alx_hw *hw, u32 reg, u16 val)
451{
452 writew(val, hw->hw_addr + reg);
453}
454
455static inline u16 alx_read_mem16(struct alx_hw *hw, u32 reg)
456{
457 return readw(hw->hw_addr + reg);
458}
459
460static inline void alx_write_mem32(struct alx_hw *hw, u32 reg, u32 val)
461{
462 writel(val, hw->hw_addr + reg);
463}
464
465static inline u32 alx_read_mem32(struct alx_hw *hw, u32 reg)
466{
467 return readl(hw->hw_addr + reg);
468}
469
470static inline void alx_post_write(struct alx_hw *hw)
471{
472 readl(hw->hw_addr);
473}
474
475int alx_get_perm_macaddr(struct alx_hw *hw, u8 *addr);
476void alx_reset_phy(struct alx_hw *hw);
477void alx_reset_pcie(struct alx_hw *hw);
478void alx_enable_aspm(struct alx_hw *hw, bool l0s_en, bool l1_en);
479int alx_setup_speed_duplex(struct alx_hw *hw, u32 ethadv, u8 flowctrl);
480void alx_post_phy_link(struct alx_hw *hw);
481int alx_pre_suspend(struct alx_hw *hw, int speed);
482int alx_read_phy_reg(struct alx_hw *hw, u16 reg, u16 *phy_data);
483int alx_write_phy_reg(struct alx_hw *hw, u16 reg, u16 phy_data);
484int alx_read_phy_ext(struct alx_hw *hw, u8 dev, u16 reg, u16 *pdata);
485int alx_write_phy_ext(struct alx_hw *hw, u8 dev, u16 reg, u16 data);
486int alx_get_phy_link(struct alx_hw *hw, int *speed);
487int alx_clear_phy_intr(struct alx_hw *hw);
488int alx_config_wol(struct alx_hw *hw);
489void alx_cfg_mac_flowcontrol(struct alx_hw *hw, u8 fc);
490void alx_start_mac(struct alx_hw *hw);
491int alx_reset_mac(struct alx_hw *hw);
492void alx_set_macaddr(struct alx_hw *hw, const u8 *addr);
493bool alx_phy_configured(struct alx_hw *hw);
494void alx_configure_basic(struct alx_hw *hw);
495void alx_disable_rss(struct alx_hw *hw);
496int alx_select_powersaving_speed(struct alx_hw *hw, int *speed);
497bool alx_get_phy_info(struct alx_hw *hw);
498
499#endif
diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c
new file mode 100644
index 000000000000..418de8b13165
--- /dev/null
+++ b/drivers/net/ethernet/atheros/alx/main.c
@@ -0,0 +1,1625 @@
1/*
2 * Copyright (c) 2013 Johannes Berg <johannes@sipsolutions.net>
3 *
4 * This file is free software: you may copy, redistribute and/or modify it
5 * under the terms of the GNU General Public License as published by the
6 * Free Software Foundation, either version 2 of the License, or (at your
7 * option) any later version.
8 *
9 * This file is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16 *
17 * This file incorporates work covered by the following copyright and
18 * permission notice:
19 *
20 * Copyright (c) 2012 Qualcomm Atheros, Inc.
21 *
22 * Permission to use, copy, modify, and/or distribute this software for any
23 * purpose with or without fee is hereby granted, provided that the above
24 * copyright notice and this permission notice appear in all copies.
25 *
26 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
27 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
28 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
29 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
30 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
31 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
32 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
33 */
34
35#include <linux/module.h>
36#include <linux/pci.h>
37#include <linux/interrupt.h>
38#include <linux/ip.h>
39#include <linux/ipv6.h>
40#include <linux/if_vlan.h>
41#include <linux/mdio.h>
42#include <linux/aer.h>
43#include <linux/bitops.h>
44#include <linux/netdevice.h>
45#include <linux/etherdevice.h>
46#include <net/ip6_checksum.h>
47#include <linux/crc32.h>
48#include "alx.h"
49#include "hw.h"
50#include "reg.h"
51
52const char alx_drv_name[] = "alx";
53
54
55static void alx_free_txbuf(struct alx_priv *alx, int entry)
56{
57 struct alx_buffer *txb = &alx->txq.bufs[entry];
58
59 if (dma_unmap_len(txb, size)) {
60 dma_unmap_single(&alx->hw.pdev->dev,
61 dma_unmap_addr(txb, dma),
62 dma_unmap_len(txb, size),
63 DMA_TO_DEVICE);
64 dma_unmap_len_set(txb, size, 0);
65 }
66
67 if (txb->skb) {
68 dev_kfree_skb_any(txb->skb);
69 txb->skb = NULL;
70 }
71}
72
73static int alx_refill_rx_ring(struct alx_priv *alx, gfp_t gfp)
74{
75 struct alx_rx_queue *rxq = &alx->rxq;
76 struct sk_buff *skb;
77 struct alx_buffer *cur_buf;
78 dma_addr_t dma;
79 u16 cur, next, count = 0;
80
81 next = cur = rxq->write_idx;
82 if (++next == alx->rx_ringsz)
83 next = 0;
84 cur_buf = &rxq->bufs[cur];
85
86 while (!cur_buf->skb && next != rxq->read_idx) {
87 struct alx_rfd *rfd = &rxq->rfd[cur];
88
89 skb = __netdev_alloc_skb(alx->dev, alx->rxbuf_size, gfp);
90 if (!skb)
91 break;
92 dma = dma_map_single(&alx->hw.pdev->dev,
93 skb->data, alx->rxbuf_size,
94 DMA_FROM_DEVICE);
95 if (dma_mapping_error(&alx->hw.pdev->dev, dma)) {
96 dev_kfree_skb(skb);
97 break;
98 }
99
100 /* Unfortunately, RX descriptor buffers must be 4-byte
101 * aligned, so we can't use IP alignment.
102 */
103 if (WARN_ON(dma & 3)) {
104 dev_kfree_skb(skb);
105 break;
106 }
107
108 cur_buf->skb = skb;
109 dma_unmap_len_set(cur_buf, size, alx->rxbuf_size);
110 dma_unmap_addr_set(cur_buf, dma, dma);
111 rfd->addr = cpu_to_le64(dma);
112
113 cur = next;
114 if (++next == alx->rx_ringsz)
115 next = 0;
116 cur_buf = &rxq->bufs[cur];
117 count++;
118 }
119
120 if (count) {
121 /* flush all updates before updating hardware */
122 wmb();
123 rxq->write_idx = cur;
124 alx_write_mem16(&alx->hw, ALX_RFD_PIDX, cur);
125 }
126
127 return count;
128}
129
130static inline int alx_tpd_avail(struct alx_priv *alx)
131{
132 struct alx_tx_queue *txq = &alx->txq;
133
134 if (txq->write_idx >= txq->read_idx)
135 return alx->tx_ringsz + txq->read_idx - txq->write_idx - 1;
136 return txq->read_idx - txq->write_idx - 1;
137}
138
139static bool alx_clean_tx_irq(struct alx_priv *alx)
140{
141 struct alx_tx_queue *txq = &alx->txq;
142 u16 hw_read_idx, sw_read_idx;
143 unsigned int total_bytes = 0, total_packets = 0;
144 int budget = ALX_DEFAULT_TX_WORK;
145
146 sw_read_idx = txq->read_idx;
147 hw_read_idx = alx_read_mem16(&alx->hw, ALX_TPD_PRI0_CIDX);
148
149 if (sw_read_idx != hw_read_idx) {
150 while (sw_read_idx != hw_read_idx && budget > 0) {
151 struct sk_buff *skb;
152
153 skb = txq->bufs[sw_read_idx].skb;
154 if (skb) {
155 total_bytes += skb->len;
156 total_packets++;
157 budget--;
158 }
159
160 alx_free_txbuf(alx, sw_read_idx);
161
162 if (++sw_read_idx == alx->tx_ringsz)
163 sw_read_idx = 0;
164 }
165 txq->read_idx = sw_read_idx;
166
167 netdev_completed_queue(alx->dev, total_packets, total_bytes);
168 }
169
170 if (netif_queue_stopped(alx->dev) && netif_carrier_ok(alx->dev) &&
171 alx_tpd_avail(alx) > alx->tx_ringsz/4)
172 netif_wake_queue(alx->dev);
173
174 return sw_read_idx == hw_read_idx;
175}
176
177static void alx_schedule_link_check(struct alx_priv *alx)
178{
179 schedule_work(&alx->link_check_wk);
180}
181
182static void alx_schedule_reset(struct alx_priv *alx)
183{
184 schedule_work(&alx->reset_wk);
185}
186
187static bool alx_clean_rx_irq(struct alx_priv *alx, int budget)
188{
189 struct alx_rx_queue *rxq = &alx->rxq;
190 struct alx_rrd *rrd;
191 struct alx_buffer *rxb;
192 struct sk_buff *skb;
193 u16 length, rfd_cleaned = 0;
194
195 while (budget > 0) {
196 rrd = &rxq->rrd[rxq->rrd_read_idx];
197 if (!(rrd->word3 & cpu_to_le32(1 << RRD_UPDATED_SHIFT)))
198 break;
199 rrd->word3 &= ~cpu_to_le32(1 << RRD_UPDATED_SHIFT);
200
201 if (ALX_GET_FIELD(le32_to_cpu(rrd->word0),
202 RRD_SI) != rxq->read_idx ||
203 ALX_GET_FIELD(le32_to_cpu(rrd->word0),
204 RRD_NOR) != 1) {
205 alx_schedule_reset(alx);
206 return 0;
207 }
208
209 rxb = &rxq->bufs[rxq->read_idx];
210 dma_unmap_single(&alx->hw.pdev->dev,
211 dma_unmap_addr(rxb, dma),
212 dma_unmap_len(rxb, size),
213 DMA_FROM_DEVICE);
214 dma_unmap_len_set(rxb, size, 0);
215 skb = rxb->skb;
216 rxb->skb = NULL;
217
218 if (rrd->word3 & cpu_to_le32(1 << RRD_ERR_RES_SHIFT) ||
219 rrd->word3 & cpu_to_le32(1 << RRD_ERR_LEN_SHIFT)) {
220 rrd->word3 = 0;
221 dev_kfree_skb_any(skb);
222 goto next_pkt;
223 }
224
225 length = ALX_GET_FIELD(le32_to_cpu(rrd->word3),
226 RRD_PKTLEN) - ETH_FCS_LEN;
227 skb_put(skb, length);
228 skb->protocol = eth_type_trans(skb, alx->dev);
229
230 skb_checksum_none_assert(skb);
231 if (alx->dev->features & NETIF_F_RXCSUM &&
232 !(rrd->word3 & (cpu_to_le32(1 << RRD_ERR_L4_SHIFT) |
233 cpu_to_le32(1 << RRD_ERR_IPV4_SHIFT)))) {
234 switch (ALX_GET_FIELD(le32_to_cpu(rrd->word2),
235 RRD_PID)) {
236 case RRD_PID_IPV6UDP:
237 case RRD_PID_IPV4UDP:
238 case RRD_PID_IPV4TCP:
239 case RRD_PID_IPV6TCP:
240 skb->ip_summed = CHECKSUM_UNNECESSARY;
241 break;
242 }
243 }
244
245 napi_gro_receive(&alx->napi, skb);
246 budget--;
247
248next_pkt:
249 if (++rxq->read_idx == alx->rx_ringsz)
250 rxq->read_idx = 0;
251 if (++rxq->rrd_read_idx == alx->rx_ringsz)
252 rxq->rrd_read_idx = 0;
253
254 if (++rfd_cleaned > ALX_RX_ALLOC_THRESH)
255 rfd_cleaned -= alx_refill_rx_ring(alx, GFP_ATOMIC);
256 }
257
258 if (rfd_cleaned)
259 alx_refill_rx_ring(alx, GFP_ATOMIC);
260
261 return budget > 0;
262}
263
264static int alx_poll(struct napi_struct *napi, int budget)
265{
266 struct alx_priv *alx = container_of(napi, struct alx_priv, napi);
267 struct alx_hw *hw = &alx->hw;
268 bool complete = true;
269 unsigned long flags;
270
271 complete = alx_clean_tx_irq(alx) &&
272 alx_clean_rx_irq(alx, budget);
273
274 if (!complete)
275 return 1;
276
277 napi_complete(&alx->napi);
278
279 /* enable interrupt */
280 spin_lock_irqsave(&alx->irq_lock, flags);
281 alx->int_mask |= ALX_ISR_TX_Q0 | ALX_ISR_RX_Q0;
282 alx_write_mem32(hw, ALX_IMR, alx->int_mask);
283 spin_unlock_irqrestore(&alx->irq_lock, flags);
284
285 alx_post_write(hw);
286
287 return 0;
288}
289
290static irqreturn_t alx_intr_handle(struct alx_priv *alx, u32 intr)
291{
292 struct alx_hw *hw = &alx->hw;
293 bool write_int_mask = false;
294
295 spin_lock(&alx->irq_lock);
296
297 /* ACK interrupt */
298 alx_write_mem32(hw, ALX_ISR, intr | ALX_ISR_DIS);
299 intr &= alx->int_mask;
300
301 if (intr & ALX_ISR_FATAL) {
302 netif_warn(alx, hw, alx->dev,
303 "fatal interrupt 0x%x, resetting\n", intr);
304 alx_schedule_reset(alx);
305 goto out;
306 }
307
308 if (intr & ALX_ISR_ALERT)
309 netdev_warn(alx->dev, "alert interrupt: 0x%x\n", intr);
310
311 if (intr & ALX_ISR_PHY) {
312 /* suppress PHY interrupt, because the source
313 * is from PHY internal. only the internal status
314 * is cleared, the interrupt status could be cleared.
315 */
316 alx->int_mask &= ~ALX_ISR_PHY;
317 write_int_mask = true;
318 alx_schedule_link_check(alx);
319 }
320
321 if (intr & (ALX_ISR_TX_Q0 | ALX_ISR_RX_Q0)) {
322 napi_schedule(&alx->napi);
323 /* mask rx/tx interrupt, enable them when napi complete */
324 alx->int_mask &= ~ALX_ISR_ALL_QUEUES;
325 write_int_mask = true;
326 }
327
328 if (write_int_mask)
329 alx_write_mem32(hw, ALX_IMR, alx->int_mask);
330
331 alx_write_mem32(hw, ALX_ISR, 0);
332
333 out:
334 spin_unlock(&alx->irq_lock);
335 return IRQ_HANDLED;
336}
337
338static irqreturn_t alx_intr_msi(int irq, void *data)
339{
340 struct alx_priv *alx = data;
341
342 return alx_intr_handle(alx, alx_read_mem32(&alx->hw, ALX_ISR));
343}
344
345static irqreturn_t alx_intr_legacy(int irq, void *data)
346{
347 struct alx_priv *alx = data;
348 struct alx_hw *hw = &alx->hw;
349 u32 intr;
350
351 intr = alx_read_mem32(hw, ALX_ISR);
352
353 if (intr & ALX_ISR_DIS || !(intr & alx->int_mask))
354 return IRQ_NONE;
355
356 return alx_intr_handle(alx, intr);
357}
358
359static void alx_init_ring_ptrs(struct alx_priv *alx)
360{
361 struct alx_hw *hw = &alx->hw;
362 u32 addr_hi = ((u64)alx->descmem.dma) >> 32;
363
364 alx->rxq.read_idx = 0;
365 alx->rxq.write_idx = 0;
366 alx->rxq.rrd_read_idx = 0;
367 alx_write_mem32(hw, ALX_RX_BASE_ADDR_HI, addr_hi);
368 alx_write_mem32(hw, ALX_RRD_ADDR_LO, alx->rxq.rrd_dma);
369 alx_write_mem32(hw, ALX_RRD_RING_SZ, alx->rx_ringsz);
370 alx_write_mem32(hw, ALX_RFD_ADDR_LO, alx->rxq.rfd_dma);
371 alx_write_mem32(hw, ALX_RFD_RING_SZ, alx->rx_ringsz);
372 alx_write_mem32(hw, ALX_RFD_BUF_SZ, alx->rxbuf_size);
373
374 alx->txq.read_idx = 0;
375 alx->txq.write_idx = 0;
376 alx_write_mem32(hw, ALX_TX_BASE_ADDR_HI, addr_hi);
377 alx_write_mem32(hw, ALX_TPD_PRI0_ADDR_LO, alx->txq.tpd_dma);
378 alx_write_mem32(hw, ALX_TPD_RING_SZ, alx->tx_ringsz);
379
380 /* load these pointers into the chip */
381 alx_write_mem32(hw, ALX_SRAM9, ALX_SRAM_LOAD_PTR);
382}
383
384static void alx_free_txring_buf(struct alx_priv *alx)
385{
386 struct alx_tx_queue *txq = &alx->txq;
387 int i;
388
389 if (!txq->bufs)
390 return;
391
392 for (i = 0; i < alx->tx_ringsz; i++)
393 alx_free_txbuf(alx, i);
394
395 memset(txq->bufs, 0, alx->tx_ringsz * sizeof(struct alx_buffer));
396 memset(txq->tpd, 0, alx->tx_ringsz * sizeof(struct alx_txd));
397 txq->write_idx = 0;
398 txq->read_idx = 0;
399
400 netdev_reset_queue(alx->dev);
401}
402
403static void alx_free_rxring_buf(struct alx_priv *alx)
404{
405 struct alx_rx_queue *rxq = &alx->rxq;
406 struct alx_buffer *cur_buf;
407 u16 i;
408
409 if (rxq == NULL)
410 return;
411
412 for (i = 0; i < alx->rx_ringsz; i++) {
413 cur_buf = rxq->bufs + i;
414 if (cur_buf->skb) {
415 dma_unmap_single(&alx->hw.pdev->dev,
416 dma_unmap_addr(cur_buf, dma),
417 dma_unmap_len(cur_buf, size),
418 DMA_FROM_DEVICE);
419 dev_kfree_skb(cur_buf->skb);
420 cur_buf->skb = NULL;
421 dma_unmap_len_set(cur_buf, size, 0);
422 dma_unmap_addr_set(cur_buf, dma, 0);
423 }
424 }
425
426 rxq->write_idx = 0;
427 rxq->read_idx = 0;
428 rxq->rrd_read_idx = 0;
429}
430
431static void alx_free_buffers(struct alx_priv *alx)
432{
433 alx_free_txring_buf(alx);
434 alx_free_rxring_buf(alx);
435}
436
437static int alx_reinit_rings(struct alx_priv *alx)
438{
439 alx_free_buffers(alx);
440
441 alx_init_ring_ptrs(alx);
442
443 if (!alx_refill_rx_ring(alx, GFP_KERNEL))
444 return -ENOMEM;
445
446 return 0;
447}
448
449static void alx_add_mc_addr(struct alx_hw *hw, const u8 *addr, u32 *mc_hash)
450{
451 u32 crc32, bit, reg;
452
453 crc32 = ether_crc(ETH_ALEN, addr);
454 reg = (crc32 >> 31) & 0x1;
455 bit = (crc32 >> 26) & 0x1F;
456
457 mc_hash[reg] |= BIT(bit);
458}
459
460static void __alx_set_rx_mode(struct net_device *netdev)
461{
462 struct alx_priv *alx = netdev_priv(netdev);
463 struct alx_hw *hw = &alx->hw;
464 struct netdev_hw_addr *ha;
465 u32 mc_hash[2] = {};
466
467 if (!(netdev->flags & IFF_ALLMULTI)) {
468 netdev_for_each_mc_addr(ha, netdev)
469 alx_add_mc_addr(hw, ha->addr, mc_hash);
470
471 alx_write_mem32(hw, ALX_HASH_TBL0, mc_hash[0]);
472 alx_write_mem32(hw, ALX_HASH_TBL1, mc_hash[1]);
473 }
474
475 hw->rx_ctrl &= ~(ALX_MAC_CTRL_MULTIALL_EN | ALX_MAC_CTRL_PROMISC_EN);
476 if (netdev->flags & IFF_PROMISC)
477 hw->rx_ctrl |= ALX_MAC_CTRL_PROMISC_EN;
478 if (netdev->flags & IFF_ALLMULTI)
479 hw->rx_ctrl |= ALX_MAC_CTRL_MULTIALL_EN;
480
481 alx_write_mem32(hw, ALX_MAC_CTRL, hw->rx_ctrl);
482}
483
484static void alx_set_rx_mode(struct net_device *netdev)
485{
486 __alx_set_rx_mode(netdev);
487}
488
489static int alx_set_mac_address(struct net_device *netdev, void *data)
490{
491 struct alx_priv *alx = netdev_priv(netdev);
492 struct alx_hw *hw = &alx->hw;
493 struct sockaddr *addr = data;
494
495 if (!is_valid_ether_addr(addr->sa_data))
496 return -EADDRNOTAVAIL;
497
498 if (netdev->addr_assign_type & NET_ADDR_RANDOM)
499 netdev->addr_assign_type ^= NET_ADDR_RANDOM;
500
501 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
502 memcpy(hw->mac_addr, addr->sa_data, netdev->addr_len);
503 alx_set_macaddr(hw, hw->mac_addr);
504
505 return 0;
506}
507
508static int alx_alloc_descriptors(struct alx_priv *alx)
509{
510 alx->txq.bufs = kcalloc(alx->tx_ringsz,
511 sizeof(struct alx_buffer),
512 GFP_KERNEL);
513 if (!alx->txq.bufs)
514 return -ENOMEM;
515
516 alx->rxq.bufs = kcalloc(alx->rx_ringsz,
517 sizeof(struct alx_buffer),
518 GFP_KERNEL);
519 if (!alx->rxq.bufs)
520 goto out_free;
521
522 /* physical tx/rx ring descriptors
523 *
524 * Allocate them as a single chunk because they must not cross a
525 * 4G boundary (hardware has a single register for high 32 bits
526 * of addresses only)
527 */
528 alx->descmem.size = sizeof(struct alx_txd) * alx->tx_ringsz +
529 sizeof(struct alx_rrd) * alx->rx_ringsz +
530 sizeof(struct alx_rfd) * alx->rx_ringsz;
531 alx->descmem.virt = dma_zalloc_coherent(&alx->hw.pdev->dev,
532 alx->descmem.size,
533 &alx->descmem.dma,
534 GFP_KERNEL);
535 if (!alx->descmem.virt)
536 goto out_free;
537
538 alx->txq.tpd = (void *)alx->descmem.virt;
539 alx->txq.tpd_dma = alx->descmem.dma;
540
541 /* alignment requirement for next block */
542 BUILD_BUG_ON(sizeof(struct alx_txd) % 8);
543
544 alx->rxq.rrd =
545 (void *)((u8 *)alx->descmem.virt +
546 sizeof(struct alx_txd) * alx->tx_ringsz);
547 alx->rxq.rrd_dma = alx->descmem.dma +
548 sizeof(struct alx_txd) * alx->tx_ringsz;
549
550 /* alignment requirement for next block */
551 BUILD_BUG_ON(sizeof(struct alx_rrd) % 8);
552
553 alx->rxq.rfd =
554 (void *)((u8 *)alx->descmem.virt +
555 sizeof(struct alx_txd) * alx->tx_ringsz +
556 sizeof(struct alx_rrd) * alx->rx_ringsz);
557 alx->rxq.rfd_dma = alx->descmem.dma +
558 sizeof(struct alx_txd) * alx->tx_ringsz +
559 sizeof(struct alx_rrd) * alx->rx_ringsz;
560
561 return 0;
562out_free:
563 kfree(alx->txq.bufs);
564 kfree(alx->rxq.bufs);
565 return -ENOMEM;
566}
567
568static int alx_alloc_rings(struct alx_priv *alx)
569{
570 int err;
571
572 err = alx_alloc_descriptors(alx);
573 if (err)
574 return err;
575
576 alx->int_mask &= ~ALX_ISR_ALL_QUEUES;
577 alx->int_mask |= ALX_ISR_TX_Q0 | ALX_ISR_RX_Q0;
578 alx->tx_ringsz = alx->tx_ringsz;
579
580 netif_napi_add(alx->dev, &alx->napi, alx_poll, 64);
581
582 alx_reinit_rings(alx);
583 return 0;
584}
585
586static void alx_free_rings(struct alx_priv *alx)
587{
588 netif_napi_del(&alx->napi);
589 alx_free_buffers(alx);
590
591 kfree(alx->txq.bufs);
592 kfree(alx->rxq.bufs);
593
594 dma_free_coherent(&alx->hw.pdev->dev,
595 alx->descmem.size,
596 alx->descmem.virt,
597 alx->descmem.dma);
598}
599
600static void alx_config_vector_mapping(struct alx_priv *alx)
601{
602 struct alx_hw *hw = &alx->hw;
603
604 alx_write_mem32(hw, ALX_MSI_MAP_TBL1, 0);
605 alx_write_mem32(hw, ALX_MSI_MAP_TBL2, 0);
606 alx_write_mem32(hw, ALX_MSI_ID_MAP, 0);
607}
608
609static void alx_irq_enable(struct alx_priv *alx)
610{
611 struct alx_hw *hw = &alx->hw;
612
613 /* level-1 interrupt switch */
614 alx_write_mem32(hw, ALX_ISR, 0);
615 alx_write_mem32(hw, ALX_IMR, alx->int_mask);
616 alx_post_write(hw);
617}
618
619static void alx_irq_disable(struct alx_priv *alx)
620{
621 struct alx_hw *hw = &alx->hw;
622
623 alx_write_mem32(hw, ALX_ISR, ALX_ISR_DIS);
624 alx_write_mem32(hw, ALX_IMR, 0);
625 alx_post_write(hw);
626
627 synchronize_irq(alx->hw.pdev->irq);
628}
629
630static int alx_request_irq(struct alx_priv *alx)
631{
632 struct pci_dev *pdev = alx->hw.pdev;
633 struct alx_hw *hw = &alx->hw;
634 int err;
635 u32 msi_ctrl;
636
637 msi_ctrl = (hw->imt >> 1) << ALX_MSI_RETRANS_TM_SHIFT;
638
639 if (!pci_enable_msi(alx->hw.pdev)) {
640 alx->msi = true;
641
642 alx_write_mem32(hw, ALX_MSI_RETRANS_TIMER,
643 msi_ctrl | ALX_MSI_MASK_SEL_LINE);
644 err = request_irq(pdev->irq, alx_intr_msi, 0,
645 alx->dev->name, alx);
646 if (!err)
647 goto out;
648 /* fall back to legacy interrupt */
649 pci_disable_msi(alx->hw.pdev);
650 }
651
652 alx_write_mem32(hw, ALX_MSI_RETRANS_TIMER, 0);
653 err = request_irq(pdev->irq, alx_intr_legacy, IRQF_SHARED,
654 alx->dev->name, alx);
655out:
656 if (!err)
657 alx_config_vector_mapping(alx);
658 return err;
659}
660
661static void alx_free_irq(struct alx_priv *alx)
662{
663 struct pci_dev *pdev = alx->hw.pdev;
664
665 free_irq(pdev->irq, alx);
666
667 if (alx->msi) {
668 pci_disable_msi(alx->hw.pdev);
669 alx->msi = false;
670 }
671}
672
673static int alx_identify_hw(struct alx_priv *alx)
674{
675 struct alx_hw *hw = &alx->hw;
676 int rev = alx_hw_revision(hw);
677
678 if (rev > ALX_REV_C0)
679 return -EINVAL;
680
681 hw->max_dma_chnl = rev >= ALX_REV_B0 ? 4 : 2;
682
683 return 0;
684}
685
686static int alx_init_sw(struct alx_priv *alx)
687{
688 struct pci_dev *pdev = alx->hw.pdev;
689 struct alx_hw *hw = &alx->hw;
690 int err;
691
692 err = alx_identify_hw(alx);
693 if (err) {
694 dev_err(&pdev->dev, "unrecognized chip, aborting\n");
695 return err;
696 }
697
698 alx->hw.lnk_patch =
699 pdev->device == ALX_DEV_ID_AR8161 &&
700 pdev->subsystem_vendor == PCI_VENDOR_ID_ATTANSIC &&
701 pdev->subsystem_device == 0x0091 &&
702 pdev->revision == 0;
703
704 hw->smb_timer = 400;
705 hw->mtu = alx->dev->mtu;
706 alx->rxbuf_size = ALIGN(ALX_RAW_MTU(hw->mtu), 8);
707 alx->tx_ringsz = 256;
708 alx->rx_ringsz = 512;
709 hw->sleep_ctrl = ALX_SLEEP_WOL_MAGIC | ALX_SLEEP_WOL_PHY;
710 hw->imt = 200;
711 alx->int_mask = ALX_ISR_MISC;
712 hw->dma_chnl = hw->max_dma_chnl;
713 hw->ith_tpd = alx->tx_ringsz / 3;
714 hw->link_speed = SPEED_UNKNOWN;
715 hw->adv_cfg = ADVERTISED_Autoneg |
716 ADVERTISED_10baseT_Half |
717 ADVERTISED_10baseT_Full |
718 ADVERTISED_100baseT_Full |
719 ADVERTISED_100baseT_Half |
720 ADVERTISED_1000baseT_Full;
721 hw->flowctrl = ALX_FC_ANEG | ALX_FC_RX | ALX_FC_TX;
722
723 hw->rx_ctrl = ALX_MAC_CTRL_WOLSPED_SWEN |
724 ALX_MAC_CTRL_MHASH_ALG_HI5B |
725 ALX_MAC_CTRL_BRD_EN |
726 ALX_MAC_CTRL_PCRCE |
727 ALX_MAC_CTRL_CRCE |
728 ALX_MAC_CTRL_RXFC_EN |
729 ALX_MAC_CTRL_TXFC_EN |
730 7 << ALX_MAC_CTRL_PRMBLEN_SHIFT;
731
732 return err;
733}
734
735
736static netdev_features_t alx_fix_features(struct net_device *netdev,
737 netdev_features_t features)
738{
739 if (netdev->mtu > ALX_MAX_TSO_PKT_SIZE)
740 features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
741
742 return features;
743}
744
745static void alx_netif_stop(struct alx_priv *alx)
746{
747 alx->dev->trans_start = jiffies;
748 if (netif_carrier_ok(alx->dev)) {
749 netif_carrier_off(alx->dev);
750 netif_tx_disable(alx->dev);
751 napi_disable(&alx->napi);
752 }
753}
754
755static void alx_halt(struct alx_priv *alx)
756{
757 struct alx_hw *hw = &alx->hw;
758
759 alx_netif_stop(alx);
760 hw->link_speed = SPEED_UNKNOWN;
761
762 alx_reset_mac(hw);
763
764 /* disable l0s/l1 */
765 alx_enable_aspm(hw, false, false);
766 alx_irq_disable(alx);
767 alx_free_buffers(alx);
768}
769
770static void alx_configure(struct alx_priv *alx)
771{
772 struct alx_hw *hw = &alx->hw;
773
774 alx_configure_basic(hw);
775 alx_disable_rss(hw);
776 __alx_set_rx_mode(alx->dev);
777
778 alx_write_mem32(hw, ALX_MAC_CTRL, hw->rx_ctrl);
779}
780
781static void alx_activate(struct alx_priv *alx)
782{
783 /* hardware setting lost, restore it */
784 alx_reinit_rings(alx);
785 alx_configure(alx);
786
787 /* clear old interrupts */
788 alx_write_mem32(&alx->hw, ALX_ISR, ~(u32)ALX_ISR_DIS);
789
790 alx_irq_enable(alx);
791
792 alx_schedule_link_check(alx);
793}
794
795static void alx_reinit(struct alx_priv *alx)
796{
797 ASSERT_RTNL();
798
799 alx_halt(alx);
800 alx_activate(alx);
801}
802
803static int alx_change_mtu(struct net_device *netdev, int mtu)
804{
805 struct alx_priv *alx = netdev_priv(netdev);
806 int max_frame = mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
807
808 if ((max_frame < ALX_MIN_FRAME_SIZE) ||
809 (max_frame > ALX_MAX_FRAME_SIZE))
810 return -EINVAL;
811
812 if (netdev->mtu == mtu)
813 return 0;
814
815 netdev->mtu = mtu;
816 alx->hw.mtu = mtu;
817 alx->rxbuf_size = mtu > ALX_DEF_RXBUF_SIZE ?
818 ALIGN(max_frame, 8) : ALX_DEF_RXBUF_SIZE;
819 netdev_update_features(netdev);
820 if (netif_running(netdev))
821 alx_reinit(alx);
822 return 0;
823}
824
825static void alx_netif_start(struct alx_priv *alx)
826{
827 netif_tx_wake_all_queues(alx->dev);
828 napi_enable(&alx->napi);
829 netif_carrier_on(alx->dev);
830}
831
832static int __alx_open(struct alx_priv *alx, bool resume)
833{
834 int err;
835
836 if (!resume)
837 netif_carrier_off(alx->dev);
838
839 err = alx_alloc_rings(alx);
840 if (err)
841 return err;
842
843 alx_configure(alx);
844
845 err = alx_request_irq(alx);
846 if (err)
847 goto out_free_rings;
848
849 /* clear old interrupts */
850 alx_write_mem32(&alx->hw, ALX_ISR, ~(u32)ALX_ISR_DIS);
851
852 alx_irq_enable(alx);
853
854 if (!resume)
855 netif_tx_start_all_queues(alx->dev);
856
857 alx_schedule_link_check(alx);
858 return 0;
859
860out_free_rings:
861 alx_free_rings(alx);
862 return err;
863}
864
865static void __alx_stop(struct alx_priv *alx)
866{
867 alx_halt(alx);
868 alx_free_irq(alx);
869 alx_free_rings(alx);
870}
871
872static const char *alx_speed_desc(u16 speed)
873{
874 switch (speed) {
875 case SPEED_1000 + DUPLEX_FULL:
876 return "1 Gbps Full";
877 case SPEED_100 + DUPLEX_FULL:
878 return "100 Mbps Full";
879 case SPEED_100 + DUPLEX_HALF:
880 return "100 Mbps Half";
881 case SPEED_10 + DUPLEX_FULL:
882 return "10 Mbps Full";
883 case SPEED_10 + DUPLEX_HALF:
884 return "10 Mbps Half";
885 default:
886 return "Unknown speed";
887 }
888}
889
890static void alx_check_link(struct alx_priv *alx)
891{
892 struct alx_hw *hw = &alx->hw;
893 unsigned long flags;
894 int speed, old_speed;
895 int err;
896
897 /* clear PHY internal interrupt status, otherwise the main
898 * interrupt status will be asserted forever
899 */
900 alx_clear_phy_intr(hw);
901
902 err = alx_get_phy_link(hw, &speed);
903 if (err < 0)
904 goto reset;
905
906 spin_lock_irqsave(&alx->irq_lock, flags);
907 alx->int_mask |= ALX_ISR_PHY;
908 alx_write_mem32(hw, ALX_IMR, alx->int_mask);
909 spin_unlock_irqrestore(&alx->irq_lock, flags);
910
911 old_speed = hw->link_speed;
912
913 if (old_speed == speed)
914 return;
915 hw->link_speed = speed;
916
917 if (speed != SPEED_UNKNOWN) {
918 netif_info(alx, link, alx->dev,
919 "NIC Up: %s\n", alx_speed_desc(speed));
920 alx_post_phy_link(hw);
921 alx_enable_aspm(hw, true, true);
922 alx_start_mac(hw);
923
924 if (old_speed == SPEED_UNKNOWN)
925 alx_netif_start(alx);
926 } else {
927 /* link is now down */
928 alx_netif_stop(alx);
929 netif_info(alx, link, alx->dev, "Link Down\n");
930 err = alx_reset_mac(hw);
931 if (err)
932 goto reset;
933 alx_irq_disable(alx);
934
935 /* MAC reset causes all HW settings to be lost, restore all */
936 err = alx_reinit_rings(alx);
937 if (err)
938 goto reset;
939 alx_configure(alx);
940 alx_enable_aspm(hw, false, true);
941 alx_post_phy_link(hw);
942 alx_irq_enable(alx);
943 }
944
945 return;
946
947reset:
948 alx_schedule_reset(alx);
949}
950
951static int alx_open(struct net_device *netdev)
952{
953 return __alx_open(netdev_priv(netdev), false);
954}
955
956static int alx_stop(struct net_device *netdev)
957{
958 __alx_stop(netdev_priv(netdev));
959 return 0;
960}
961
962static int __alx_shutdown(struct pci_dev *pdev, bool *wol_en)
963{
964 struct alx_priv *alx = pci_get_drvdata(pdev);
965 struct net_device *netdev = alx->dev;
966 struct alx_hw *hw = &alx->hw;
967 int err, speed;
968
969 netif_device_detach(netdev);
970
971 if (netif_running(netdev))
972 __alx_stop(alx);
973
974#ifdef CONFIG_PM_SLEEP
975 err = pci_save_state(pdev);
976 if (err)
977 return err;
978#endif
979
980 err = alx_select_powersaving_speed(hw, &speed);
981 if (err)
982 return err;
983 err = alx_clear_phy_intr(hw);
984 if (err)
985 return err;
986 err = alx_pre_suspend(hw, speed);
987 if (err)
988 return err;
989 err = alx_config_wol(hw);
990 if (err)
991 return err;
992
993 *wol_en = false;
994 if (hw->sleep_ctrl & ALX_SLEEP_ACTIVE) {
995 netif_info(alx, wol, netdev,
996 "wol: ctrl=%X, speed=%X\n",
997 hw->sleep_ctrl, speed);
998 device_set_wakeup_enable(&pdev->dev, true);
999 *wol_en = true;
1000 }
1001
1002 pci_disable_device(pdev);
1003
1004 return 0;
1005}
1006
1007static void alx_shutdown(struct pci_dev *pdev)
1008{
1009 int err;
1010 bool wol_en;
1011
1012 err = __alx_shutdown(pdev, &wol_en);
1013 if (!err) {
1014 pci_wake_from_d3(pdev, wol_en);
1015 pci_set_power_state(pdev, PCI_D3hot);
1016 } else {
1017 dev_err(&pdev->dev, "shutdown fail %d\n", err);
1018 }
1019}
1020
1021static void alx_link_check(struct work_struct *work)
1022{
1023 struct alx_priv *alx;
1024
1025 alx = container_of(work, struct alx_priv, link_check_wk);
1026
1027 rtnl_lock();
1028 alx_check_link(alx);
1029 rtnl_unlock();
1030}
1031
1032static void alx_reset(struct work_struct *work)
1033{
1034 struct alx_priv *alx = container_of(work, struct alx_priv, reset_wk);
1035
1036 rtnl_lock();
1037 alx_reinit(alx);
1038 rtnl_unlock();
1039}
1040
1041static int alx_tx_csum(struct sk_buff *skb, struct alx_txd *first)
1042{
1043 u8 cso, css;
1044
1045 if (skb->ip_summed != CHECKSUM_PARTIAL)
1046 return 0;
1047
1048 cso = skb_checksum_start_offset(skb);
1049 if (cso & 1)
1050 return -EINVAL;
1051
1052 css = cso + skb->csum_offset;
1053 first->word1 |= cpu_to_le32((cso >> 1) << TPD_CXSUMSTART_SHIFT);
1054 first->word1 |= cpu_to_le32((css >> 1) << TPD_CXSUMOFFSET_SHIFT);
1055 first->word1 |= cpu_to_le32(1 << TPD_CXSUM_EN_SHIFT);
1056
1057 return 0;
1058}
1059
1060static int alx_map_tx_skb(struct alx_priv *alx, struct sk_buff *skb)
1061{
1062 struct alx_tx_queue *txq = &alx->txq;
1063 struct alx_txd *tpd, *first_tpd;
1064 dma_addr_t dma;
1065 int maplen, f, first_idx = txq->write_idx;
1066
1067 first_tpd = &txq->tpd[txq->write_idx];
1068 tpd = first_tpd;
1069
1070 maplen = skb_headlen(skb);
1071 dma = dma_map_single(&alx->hw.pdev->dev, skb->data, maplen,
1072 DMA_TO_DEVICE);
1073 if (dma_mapping_error(&alx->hw.pdev->dev, dma))
1074 goto err_dma;
1075
1076 dma_unmap_len_set(&txq->bufs[txq->write_idx], size, maplen);
1077 dma_unmap_addr_set(&txq->bufs[txq->write_idx], dma, dma);
1078
1079 tpd->adrl.addr = cpu_to_le64(dma);
1080 tpd->len = cpu_to_le16(maplen);
1081
1082 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) {
1083 struct skb_frag_struct *frag;
1084
1085 frag = &skb_shinfo(skb)->frags[f];
1086
1087 if (++txq->write_idx == alx->tx_ringsz)
1088 txq->write_idx = 0;
1089 tpd = &txq->tpd[txq->write_idx];
1090
1091 tpd->word1 = first_tpd->word1;
1092
1093 maplen = skb_frag_size(frag);
1094 dma = skb_frag_dma_map(&alx->hw.pdev->dev, frag, 0,
1095 maplen, DMA_TO_DEVICE);
1096 if (dma_mapping_error(&alx->hw.pdev->dev, dma))
1097 goto err_dma;
1098 dma_unmap_len_set(&txq->bufs[txq->write_idx], size, maplen);
1099 dma_unmap_addr_set(&txq->bufs[txq->write_idx], dma, dma);
1100
1101 tpd->adrl.addr = cpu_to_le64(dma);
1102 tpd->len = cpu_to_le16(maplen);
1103 }
1104
1105 /* last TPD, set EOP flag and store skb */
1106 tpd->word1 |= cpu_to_le32(1 << TPD_EOP_SHIFT);
1107 txq->bufs[txq->write_idx].skb = skb;
1108
1109 if (++txq->write_idx == alx->tx_ringsz)
1110 txq->write_idx = 0;
1111
1112 return 0;
1113
1114err_dma:
1115 f = first_idx;
1116 while (f != txq->write_idx) {
1117 alx_free_txbuf(alx, f);
1118 if (++f == alx->tx_ringsz)
1119 f = 0;
1120 }
1121 return -ENOMEM;
1122}
1123
1124static netdev_tx_t alx_start_xmit(struct sk_buff *skb,
1125 struct net_device *netdev)
1126{
1127 struct alx_priv *alx = netdev_priv(netdev);
1128 struct alx_tx_queue *txq = &alx->txq;
1129 struct alx_txd *first;
1130 int tpdreq = skb_shinfo(skb)->nr_frags + 1;
1131
1132 if (alx_tpd_avail(alx) < tpdreq) {
1133 netif_stop_queue(alx->dev);
1134 goto drop;
1135 }
1136
1137 first = &txq->tpd[txq->write_idx];
1138 memset(first, 0, sizeof(*first));
1139
1140 if (alx_tx_csum(skb, first))
1141 goto drop;
1142
1143 if (alx_map_tx_skb(alx, skb) < 0)
1144 goto drop;
1145
1146 netdev_sent_queue(alx->dev, skb->len);
1147
1148 /* flush updates before updating hardware */
1149 wmb();
1150 alx_write_mem16(&alx->hw, ALX_TPD_PRI0_PIDX, txq->write_idx);
1151
1152 if (alx_tpd_avail(alx) < alx->tx_ringsz/8)
1153 netif_stop_queue(alx->dev);
1154
1155 return NETDEV_TX_OK;
1156
1157drop:
1158 dev_kfree_skb(skb);
1159 return NETDEV_TX_OK;
1160}
1161
1162static void alx_tx_timeout(struct net_device *dev)
1163{
1164 struct alx_priv *alx = netdev_priv(dev);
1165
1166 alx_schedule_reset(alx);
1167}
1168
1169static int alx_mdio_read(struct net_device *netdev,
1170 int prtad, int devad, u16 addr)
1171{
1172 struct alx_priv *alx = netdev_priv(netdev);
1173 struct alx_hw *hw = &alx->hw;
1174 u16 val;
1175 int err;
1176
1177 if (prtad != hw->mdio.prtad)
1178 return -EINVAL;
1179
1180 if (devad == MDIO_DEVAD_NONE)
1181 err = alx_read_phy_reg(hw, addr, &val);
1182 else
1183 err = alx_read_phy_ext(hw, devad, addr, &val);
1184
1185 if (err)
1186 return err;
1187 return val;
1188}
1189
1190static int alx_mdio_write(struct net_device *netdev,
1191 int prtad, int devad, u16 addr, u16 val)
1192{
1193 struct alx_priv *alx = netdev_priv(netdev);
1194 struct alx_hw *hw = &alx->hw;
1195
1196 if (prtad != hw->mdio.prtad)
1197 return -EINVAL;
1198
1199 if (devad == MDIO_DEVAD_NONE)
1200 return alx_write_phy_reg(hw, addr, val);
1201
1202 return alx_write_phy_ext(hw, devad, addr, val);
1203}
1204
1205static int alx_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
1206{
1207 struct alx_priv *alx = netdev_priv(netdev);
1208
1209 if (!netif_running(netdev))
1210 return -EAGAIN;
1211
1212 return mdio_mii_ioctl(&alx->hw.mdio, if_mii(ifr), cmd);
1213}
1214
1215#ifdef CONFIG_NET_POLL_CONTROLLER
1216static void alx_poll_controller(struct net_device *netdev)
1217{
1218 struct alx_priv *alx = netdev_priv(netdev);
1219
1220 if (alx->msi)
1221 alx_intr_msi(0, alx);
1222 else
1223 alx_intr_legacy(0, alx);
1224}
1225#endif
1226
1227static const struct net_device_ops alx_netdev_ops = {
1228 .ndo_open = alx_open,
1229 .ndo_stop = alx_stop,
1230 .ndo_start_xmit = alx_start_xmit,
1231 .ndo_set_rx_mode = alx_set_rx_mode,
1232 .ndo_validate_addr = eth_validate_addr,
1233 .ndo_set_mac_address = alx_set_mac_address,
1234 .ndo_change_mtu = alx_change_mtu,
1235 .ndo_do_ioctl = alx_ioctl,
1236 .ndo_tx_timeout = alx_tx_timeout,
1237 .ndo_fix_features = alx_fix_features,
1238#ifdef CONFIG_NET_POLL_CONTROLLER
1239 .ndo_poll_controller = alx_poll_controller,
1240#endif
1241};
1242
1243static int alx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1244{
1245 struct net_device *netdev;
1246 struct alx_priv *alx;
1247 struct alx_hw *hw;
1248 bool phy_configured;
1249 int bars, pm_cap, err;
1250
1251 err = pci_enable_device_mem(pdev);
1252 if (err)
1253 return err;
1254
1255 /* The alx chip can DMA to 64-bit addresses, but it uses a single
1256 * shared register for the high 32 bits, so only a single, aligned,
1257 * 4 GB physical address range can be used for descriptors.
1258 */
1259 if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) &&
1260 !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
1261 dev_dbg(&pdev->dev, "DMA to 64-BIT addresses\n");
1262 } else {
1263 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
1264 if (err) {
1265 err = dma_set_coherent_mask(&pdev->dev,
1266 DMA_BIT_MASK(32));
1267 if (err) {
1268 dev_err(&pdev->dev,
1269 "No usable DMA config, aborting\n");
1270 goto out_pci_disable;
1271 }
1272 }
1273 }
1274
1275 bars = pci_select_bars(pdev, IORESOURCE_MEM);
1276 err = pci_request_selected_regions(pdev, bars, alx_drv_name);
1277 if (err) {
1278 dev_err(&pdev->dev,
1279 "pci_request_selected_regions failed(bars:%d)\n", bars);
1280 goto out_pci_disable;
1281 }
1282
1283 pci_enable_pcie_error_reporting(pdev);
1284 pci_set_master(pdev);
1285
1286 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
1287 if (pm_cap == 0) {
1288 dev_err(&pdev->dev,
1289 "Can't find power management capability, aborting\n");
1290 err = -EIO;
1291 goto out_pci_release;
1292 }
1293
1294 err = pci_set_power_state(pdev, PCI_D0);
1295 if (err)
1296 goto out_pci_release;
1297
1298 netdev = alloc_etherdev(sizeof(*alx));
1299 if (!netdev) {
1300 err = -ENOMEM;
1301 goto out_pci_release;
1302 }
1303
1304 SET_NETDEV_DEV(netdev, &pdev->dev);
1305 alx = netdev_priv(netdev);
1306 alx->dev = netdev;
1307 alx->hw.pdev = pdev;
1308 alx->msg_enable = NETIF_MSG_LINK | NETIF_MSG_HW | NETIF_MSG_IFUP |
1309 NETIF_MSG_TX_ERR | NETIF_MSG_RX_ERR | NETIF_MSG_WOL;
1310 hw = &alx->hw;
1311 pci_set_drvdata(pdev, alx);
1312
1313 hw->hw_addr = pci_ioremap_bar(pdev, 0);
1314 if (!hw->hw_addr) {
1315 dev_err(&pdev->dev, "cannot map device registers\n");
1316 err = -EIO;
1317 goto out_free_netdev;
1318 }
1319
1320 netdev->netdev_ops = &alx_netdev_ops;
1321 SET_ETHTOOL_OPS(netdev, &alx_ethtool_ops);
1322 netdev->irq = pdev->irq;
1323 netdev->watchdog_timeo = ALX_WATCHDOG_TIME;
1324
1325 if (ent->driver_data & ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG)
1326 pdev->dev_flags |= PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG;
1327
1328 err = alx_init_sw(alx);
1329 if (err) {
1330 dev_err(&pdev->dev, "net device private data init failed\n");
1331 goto out_unmap;
1332 }
1333
1334 alx_reset_pcie(hw);
1335
1336 phy_configured = alx_phy_configured(hw);
1337
1338 if (!phy_configured)
1339 alx_reset_phy(hw);
1340
1341 err = alx_reset_mac(hw);
1342 if (err) {
1343 dev_err(&pdev->dev, "MAC Reset failed, error = %d\n", err);
1344 goto out_unmap;
1345 }
1346
1347 /* setup link to put it in a known good starting state */
1348 if (!phy_configured) {
1349 err = alx_setup_speed_duplex(hw, hw->adv_cfg, hw->flowctrl);
1350 if (err) {
1351 dev_err(&pdev->dev,
1352 "failed to configure PHY speed/duplex (err=%d)\n",
1353 err);
1354 goto out_unmap;
1355 }
1356 }
1357
1358 netdev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM;
1359
1360 if (alx_get_perm_macaddr(hw, hw->perm_addr)) {
1361 dev_warn(&pdev->dev,
1362 "Invalid permanent address programmed, using random one\n");
1363 eth_hw_addr_random(netdev);
1364 memcpy(hw->perm_addr, netdev->dev_addr, netdev->addr_len);
1365 }
1366
1367 memcpy(hw->mac_addr, hw->perm_addr, ETH_ALEN);
1368 memcpy(netdev->dev_addr, hw->mac_addr, ETH_ALEN);
1369 memcpy(netdev->perm_addr, hw->perm_addr, ETH_ALEN);
1370
1371 hw->mdio.prtad = 0;
1372 hw->mdio.mmds = 0;
1373 hw->mdio.dev = netdev;
1374 hw->mdio.mode_support = MDIO_SUPPORTS_C45 |
1375 MDIO_SUPPORTS_C22 |
1376 MDIO_EMULATE_C22;
1377 hw->mdio.mdio_read = alx_mdio_read;
1378 hw->mdio.mdio_write = alx_mdio_write;
1379
1380 if (!alx_get_phy_info(hw)) {
1381 dev_err(&pdev->dev, "failed to identify PHY\n");
1382 err = -EIO;
1383 goto out_unmap;
1384 }
1385
1386 INIT_WORK(&alx->link_check_wk, alx_link_check);
1387 INIT_WORK(&alx->reset_wk, alx_reset);
1388 spin_lock_init(&alx->hw.mdio_lock);
1389 spin_lock_init(&alx->irq_lock);
1390
1391 netif_carrier_off(netdev);
1392
1393 err = register_netdev(netdev);
1394 if (err) {
1395 dev_err(&pdev->dev, "register netdevice failed\n");
1396 goto out_unmap;
1397 }
1398
1399 device_set_wakeup_enable(&pdev->dev, hw->sleep_ctrl);
1400
1401 netdev_info(netdev,
1402 "Qualcomm Atheros AR816x/AR817x Ethernet [%pM]\n",
1403 netdev->dev_addr);
1404
1405 return 0;
1406
1407out_unmap:
1408 iounmap(hw->hw_addr);
1409out_free_netdev:
1410 free_netdev(netdev);
1411out_pci_release:
1412 pci_release_selected_regions(pdev, bars);
1413out_pci_disable:
1414 pci_disable_device(pdev);
1415 return err;
1416}
1417
1418static void alx_remove(struct pci_dev *pdev)
1419{
1420 struct alx_priv *alx = pci_get_drvdata(pdev);
1421 struct alx_hw *hw = &alx->hw;
1422
1423 cancel_work_sync(&alx->link_check_wk);
1424 cancel_work_sync(&alx->reset_wk);
1425
1426 /* restore permanent mac address */
1427 alx_set_macaddr(hw, hw->perm_addr);
1428
1429 unregister_netdev(alx->dev);
1430 iounmap(hw->hw_addr);
1431 pci_release_selected_regions(pdev,
1432 pci_select_bars(pdev, IORESOURCE_MEM));
1433
1434 pci_disable_pcie_error_reporting(pdev);
1435 pci_disable_device(pdev);
1436 pci_set_drvdata(pdev, NULL);
1437
1438 free_netdev(alx->dev);
1439}
1440
1441#ifdef CONFIG_PM_SLEEP
1442static int alx_suspend(struct device *dev)
1443{
1444 struct pci_dev *pdev = to_pci_dev(dev);
1445 int err;
1446 bool wol_en;
1447
1448 err = __alx_shutdown(pdev, &wol_en);
1449 if (err) {
1450 dev_err(&pdev->dev, "shutdown fail in suspend %d\n", err);
1451 return err;
1452 }
1453
1454 if (wol_en) {
1455 pci_prepare_to_sleep(pdev);
1456 } else {
1457 pci_wake_from_d3(pdev, false);
1458 pci_set_power_state(pdev, PCI_D3hot);
1459 }
1460
1461 return 0;
1462}
1463
1464static int alx_resume(struct device *dev)
1465{
1466 struct pci_dev *pdev = to_pci_dev(dev);
1467 struct alx_priv *alx = pci_get_drvdata(pdev);
1468 struct net_device *netdev = alx->dev;
1469 struct alx_hw *hw = &alx->hw;
1470 int err;
1471
1472 pci_set_power_state(pdev, PCI_D0);
1473 pci_restore_state(pdev);
1474 pci_save_state(pdev);
1475
1476 pci_enable_wake(pdev, PCI_D3hot, 0);
1477 pci_enable_wake(pdev, PCI_D3cold, 0);
1478
1479 hw->link_speed = SPEED_UNKNOWN;
1480 alx->int_mask = ALX_ISR_MISC;
1481
1482 alx_reset_pcie(hw);
1483 alx_reset_phy(hw);
1484
1485 err = alx_reset_mac(hw);
1486 if (err) {
1487 netif_err(alx, hw, alx->dev,
1488 "resume:reset_mac fail %d\n", err);
1489 return -EIO;
1490 }
1491
1492 err = alx_setup_speed_duplex(hw, hw->adv_cfg, hw->flowctrl);
1493 if (err) {
1494 netif_err(alx, hw, alx->dev,
1495 "resume:setup_speed_duplex fail %d\n", err);
1496 return -EIO;
1497 }
1498
1499 if (netif_running(netdev)) {
1500 err = __alx_open(alx, true);
1501 if (err)
1502 return err;
1503 }
1504
1505 netif_device_attach(netdev);
1506
1507 return err;
1508}
1509#endif
1510
1511static pci_ers_result_t alx_pci_error_detected(struct pci_dev *pdev,
1512 pci_channel_state_t state)
1513{
1514 struct alx_priv *alx = pci_get_drvdata(pdev);
1515 struct net_device *netdev = alx->dev;
1516 pci_ers_result_t rc = PCI_ERS_RESULT_NEED_RESET;
1517
1518 dev_info(&pdev->dev, "pci error detected\n");
1519
1520 rtnl_lock();
1521
1522 if (netif_running(netdev)) {
1523 netif_device_detach(netdev);
1524 alx_halt(alx);
1525 }
1526
1527 if (state == pci_channel_io_perm_failure)
1528 rc = PCI_ERS_RESULT_DISCONNECT;
1529 else
1530 pci_disable_device(pdev);
1531
1532 rtnl_unlock();
1533
1534 return rc;
1535}
1536
1537static pci_ers_result_t alx_pci_error_slot_reset(struct pci_dev *pdev)
1538{
1539 struct alx_priv *alx = pci_get_drvdata(pdev);
1540 struct alx_hw *hw = &alx->hw;
1541 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
1542
1543 dev_info(&pdev->dev, "pci error slot reset\n");
1544
1545 rtnl_lock();
1546
1547 if (pci_enable_device(pdev)) {
1548 dev_err(&pdev->dev, "Failed to re-enable PCI device after reset\n");
1549 goto out;
1550 }
1551
1552 pci_set_master(pdev);
1553 pci_enable_wake(pdev, PCI_D3hot, 0);
1554 pci_enable_wake(pdev, PCI_D3cold, 0);
1555
1556 alx_reset_pcie(hw);
1557 if (!alx_reset_mac(hw))
1558 rc = PCI_ERS_RESULT_RECOVERED;
1559out:
1560 pci_cleanup_aer_uncorrect_error_status(pdev);
1561
1562 rtnl_unlock();
1563
1564 return rc;
1565}
1566
1567static void alx_pci_error_resume(struct pci_dev *pdev)
1568{
1569 struct alx_priv *alx = pci_get_drvdata(pdev);
1570 struct net_device *netdev = alx->dev;
1571
1572 dev_info(&pdev->dev, "pci error resume\n");
1573
1574 rtnl_lock();
1575
1576 if (netif_running(netdev)) {
1577 alx_activate(alx);
1578 netif_device_attach(netdev);
1579 }
1580
1581 rtnl_unlock();
1582}
1583
1584static const struct pci_error_handlers alx_err_handlers = {
1585 .error_detected = alx_pci_error_detected,
1586 .slot_reset = alx_pci_error_slot_reset,
1587 .resume = alx_pci_error_resume,
1588};
1589
1590#ifdef CONFIG_PM_SLEEP
1591static SIMPLE_DEV_PM_OPS(alx_pm_ops, alx_suspend, alx_resume);
1592#define ALX_PM_OPS (&alx_pm_ops)
1593#else
1594#define ALX_PM_OPS NULL
1595#endif
1596
1597static DEFINE_PCI_DEVICE_TABLE(alx_pci_tbl) = {
1598 { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_AR8161),
1599 .driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG },
1600 { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_E2200),
1601 .driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG },
1602 { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_AR8162),
1603 .driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG },
1604 { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_AR8171) },
1605 { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_AR8172) },
1606 {}
1607};
1608
1609static struct pci_driver alx_driver = {
1610 .name = alx_drv_name,
1611 .id_table = alx_pci_tbl,
1612 .probe = alx_probe,
1613 .remove = alx_remove,
1614 .shutdown = alx_shutdown,
1615 .err_handler = &alx_err_handlers,
1616 .driver.pm = ALX_PM_OPS,
1617};
1618
1619module_pci_driver(alx_driver);
1620MODULE_DEVICE_TABLE(pci, alx_pci_tbl);
1621MODULE_AUTHOR("Johannes Berg <johannes@sipsolutions.net>");
1622MODULE_AUTHOR("Qualcomm Corporation, <nic-devel@qualcomm.com>");
1623MODULE_DESCRIPTION(
1624 "Qualcomm Atheros(R) AR816x/AR817x PCI-E Ethernet Network Driver");
1625MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/atheros/alx/reg.h b/drivers/net/ethernet/atheros/alx/reg.h
new file mode 100644
index 000000000000..e4358c98bc4e
--- /dev/null
+++ b/drivers/net/ethernet/atheros/alx/reg.h
@@ -0,0 +1,810 @@
1/*
2 * Copyright (c) 2013 Johannes Berg <johannes@sipsolutions.net>
3 *
4 * This file is free software: you may copy, redistribute and/or modify it
5 * under the terms of the GNU General Public License as published by the
6 * Free Software Foundation, either version 2 of the License, or (at your
7 * option) any later version.
8 *
9 * This file is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16 *
17 * This file incorporates work covered by the following copyright and
18 * permission notice:
19 *
20 * Copyright (c) 2012 Qualcomm Atheros, Inc.
21 *
22 * Permission to use, copy, modify, and/or distribute this software for any
23 * purpose with or without fee is hereby granted, provided that the above
24 * copyright notice and this permission notice appear in all copies.
25 *
26 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
27 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
28 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
29 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
30 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
31 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
32 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
33 */
34
35#ifndef ALX_REG_H
36#define ALX_REG_H
37
38#define ALX_DEV_ID_AR8161 0x1091
39#define ALX_DEV_ID_E2200 0xe091
40#define ALX_DEV_ID_AR8162 0x1090
41#define ALX_DEV_ID_AR8171 0x10A1
42#define ALX_DEV_ID_AR8172 0x10A0
43
44/* rev definition,
45 * bit(0): with xD support
46 * bit(1): with Card Reader function
47 * bit(7:2): real revision
48 */
49#define ALX_PCI_REVID_SHIFT 3
50#define ALX_REV_A0 0
51#define ALX_REV_A1 1
52#define ALX_REV_B0 2
53#define ALX_REV_C0 3
54
55#define ALX_DEV_CTRL 0x0060
56#define ALX_DEV_CTRL_MAXRRS_MIN 2
57
58#define ALX_MSIX_MASK 0x0090
59
60#define ALX_UE_SVRT 0x010C
61#define ALX_UE_SVRT_FCPROTERR BIT(13)
62#define ALX_UE_SVRT_DLPROTERR BIT(4)
63
64/* eeprom & flash load register */
65#define ALX_EFLD 0x0204
66#define ALX_EFLD_F_EXIST BIT(10)
67#define ALX_EFLD_E_EXIST BIT(9)
68#define ALX_EFLD_STAT BIT(5)
69#define ALX_EFLD_START BIT(0)
70
71/* eFuse load register */
72#define ALX_SLD 0x0218
73#define ALX_SLD_STAT BIT(12)
74#define ALX_SLD_START BIT(11)
75#define ALX_SLD_MAX_TO 100
76
77#define ALX_PDLL_TRNS1 0x1104
78#define ALX_PDLL_TRNS1_D3PLLOFF_EN BIT(11)
79
80#define ALX_PMCTRL 0x12F8
81#define ALX_PMCTRL_HOTRST_WTEN BIT(31)
82/* bit30: L0s/L1 controlled by MAC based on throughput(setting in 15A0) */
83#define ALX_PMCTRL_ASPM_FCEN BIT(30)
84#define ALX_PMCTRL_SADLY_EN BIT(29)
85#define ALX_PMCTRL_LCKDET_TIMER_MASK 0xF
86#define ALX_PMCTRL_LCKDET_TIMER_SHIFT 24
87#define ALX_PMCTRL_LCKDET_TIMER_DEF 0xC
88/* bit[23:20] if pm_request_l1 time > @, then enter L0s not L1 */
89#define ALX_PMCTRL_L1REQ_TO_MASK 0xF
90#define ALX_PMCTRL_L1REQ_TO_SHIFT 20
91#define ALX_PMCTRL_L1REG_TO_DEF 0xF
92#define ALX_PMCTRL_TXL1_AFTER_L0S BIT(19)
93#define ALX_PMCTRL_L1_TIMER_MASK 0x7
94#define ALX_PMCTRL_L1_TIMER_SHIFT 16
95#define ALX_PMCTRL_L1_TIMER_16US 4
96#define ALX_PMCTRL_RCVR_WT_1US BIT(15)
97/* bit13: enable pcie clk switch in L1 state */
98#define ALX_PMCTRL_L1_CLKSW_EN BIT(13)
99#define ALX_PMCTRL_L0S_EN BIT(12)
100#define ALX_PMCTRL_RXL1_AFTER_L0S BIT(11)
101#define ALX_PMCTRL_L1_BUFSRX_EN BIT(7)
102/* bit6: power down serdes RX */
103#define ALX_PMCTRL_L1_SRDSRX_PWD BIT(6)
104#define ALX_PMCTRL_L1_SRDSPLL_EN BIT(5)
105#define ALX_PMCTRL_L1_SRDS_EN BIT(4)
106#define ALX_PMCTRL_L1_EN BIT(3)
107
108/*******************************************************/
109/* following registers are mapped only to memory space */
110/*******************************************************/
111
112#define ALX_MASTER 0x1400
113/* bit12: 1:alwys select pclk from serdes, not sw to 25M */
114#define ALX_MASTER_PCLKSEL_SRDS BIT(12)
115/* bit11: irq moduration for rx */
116#define ALX_MASTER_IRQMOD2_EN BIT(11)
117/* bit10: irq moduration for tx/rx */
118#define ALX_MASTER_IRQMOD1_EN BIT(10)
119#define ALX_MASTER_SYSALVTIMER_EN BIT(7)
120#define ALX_MASTER_OOB_DIS BIT(6)
121/* bit5: wakeup without pcie clk */
122#define ALX_MASTER_WAKEN_25M BIT(5)
123/* bit0: MAC & DMA reset */
124#define ALX_MASTER_DMA_MAC_RST BIT(0)
125#define ALX_DMA_MAC_RST_TO 50
126
127#define ALX_IRQ_MODU_TIMER 0x1408
128#define ALX_IRQ_MODU_TIMER1_MASK 0xFFFF
129#define ALX_IRQ_MODU_TIMER1_SHIFT 0
130
131#define ALX_PHY_CTRL 0x140C
132#define ALX_PHY_CTRL_100AB_EN BIT(17)
133/* bit14: affect MAC & PHY, go to low power sts */
134#define ALX_PHY_CTRL_POWER_DOWN BIT(14)
135/* bit13: 1:pll always ON, 0:can switch in lpw */
136#define ALX_PHY_CTRL_PLL_ON BIT(13)
137#define ALX_PHY_CTRL_RST_ANALOG BIT(12)
138#define ALX_PHY_CTRL_HIB_PULSE BIT(11)
139#define ALX_PHY_CTRL_HIB_EN BIT(10)
140#define ALX_PHY_CTRL_IDDQ BIT(7)
141#define ALX_PHY_CTRL_GATE_25M BIT(5)
142#define ALX_PHY_CTRL_LED_MODE BIT(2)
143/* bit0: out of dsp RST state */
144#define ALX_PHY_CTRL_DSPRST_OUT BIT(0)
145#define ALX_PHY_CTRL_DSPRST_TO 80
146#define ALX_PHY_CTRL_CLS (ALX_PHY_CTRL_LED_MODE | \
147 ALX_PHY_CTRL_100AB_EN | \
148 ALX_PHY_CTRL_PLL_ON)
149
150#define ALX_MAC_STS 0x1410
151#define ALX_MAC_STS_TXQ_BUSY BIT(3)
152#define ALX_MAC_STS_RXQ_BUSY BIT(2)
153#define ALX_MAC_STS_TXMAC_BUSY BIT(1)
154#define ALX_MAC_STS_RXMAC_BUSY BIT(0)
155#define ALX_MAC_STS_IDLE (ALX_MAC_STS_TXQ_BUSY | \
156 ALX_MAC_STS_RXQ_BUSY | \
157 ALX_MAC_STS_TXMAC_BUSY | \
158 ALX_MAC_STS_RXMAC_BUSY)
159
160#define ALX_MDIO 0x1414
161#define ALX_MDIO_MODE_EXT BIT(30)
162#define ALX_MDIO_BUSY BIT(27)
163#define ALX_MDIO_CLK_SEL_MASK 0x7
164#define ALX_MDIO_CLK_SEL_SHIFT 24
165#define ALX_MDIO_CLK_SEL_25MD4 0
166#define ALX_MDIO_CLK_SEL_25MD128 7
167#define ALX_MDIO_START BIT(23)
168#define ALX_MDIO_SPRES_PRMBL BIT(22)
169/* bit21: 1:read,0:write */
170#define ALX_MDIO_OP_READ BIT(21)
171#define ALX_MDIO_REG_MASK 0x1F
172#define ALX_MDIO_REG_SHIFT 16
173#define ALX_MDIO_DATA_MASK 0xFFFF
174#define ALX_MDIO_DATA_SHIFT 0
175#define ALX_MDIO_MAX_AC_TO 120
176
177#define ALX_MDIO_EXTN 0x1448
178#define ALX_MDIO_EXTN_DEVAD_MASK 0x1F
179#define ALX_MDIO_EXTN_DEVAD_SHIFT 16
180#define ALX_MDIO_EXTN_REG_MASK 0xFFFF
181#define ALX_MDIO_EXTN_REG_SHIFT 0
182
183#define ALX_SERDES 0x1424
184#define ALX_SERDES_PHYCLK_SLWDWN BIT(18)
185#define ALX_SERDES_MACCLK_SLWDWN BIT(17)
186
187#define ALX_LPI_CTRL 0x1440
188#define ALX_LPI_CTRL_EN BIT(0)
189
190/* for B0+, bit[13..] for C0+ */
191#define ALX_HRTBT_EXT_CTRL 0x1AD0
192#define L1F_HRTBT_EXT_CTRL_PERIOD_HIGH_MASK 0x3F
193#define L1F_HRTBT_EXT_CTRL_PERIOD_HIGH_SHIFT 24
194#define L1F_HRTBT_EXT_CTRL_SWOI_STARTUP_PKT_EN BIT(23)
195#define L1F_HRTBT_EXT_CTRL_IOAC_2_FRAGMENTED BIT(22)
196#define L1F_HRTBT_EXT_CTRL_IOAC_1_FRAGMENTED BIT(21)
197#define L1F_HRTBT_EXT_CTRL_IOAC_1_KEEPALIVE_EN BIT(20)
198#define L1F_HRTBT_EXT_CTRL_IOAC_1_HAS_VLAN BIT(19)
199#define L1F_HRTBT_EXT_CTRL_IOAC_1_IS_8023 BIT(18)
200#define L1F_HRTBT_EXT_CTRL_IOAC_1_IS_IPV6 BIT(17)
201#define L1F_HRTBT_EXT_CTRL_IOAC_2_KEEPALIVE_EN BIT(16)
202#define L1F_HRTBT_EXT_CTRL_IOAC_2_HAS_VLAN BIT(15)
203#define L1F_HRTBT_EXT_CTRL_IOAC_2_IS_8023 BIT(14)
204#define L1F_HRTBT_EXT_CTRL_IOAC_2_IS_IPV6 BIT(13)
205#define ALX_HRTBT_EXT_CTRL_NS_EN BIT(12)
206#define ALX_HRTBT_EXT_CTRL_FRAG_LEN_MASK 0xFF
207#define ALX_HRTBT_EXT_CTRL_FRAG_LEN_SHIFT 4
208#define ALX_HRTBT_EXT_CTRL_IS_8023 BIT(3)
209#define ALX_HRTBT_EXT_CTRL_IS_IPV6 BIT(2)
210#define ALX_HRTBT_EXT_CTRL_WAKEUP_EN BIT(1)
211#define ALX_HRTBT_EXT_CTRL_ARP_EN BIT(0)
212
213#define ALX_HRTBT_REM_IPV4_ADDR 0x1AD4
214#define ALX_HRTBT_HOST_IPV4_ADDR 0x1478
215#define ALX_HRTBT_REM_IPV6_ADDR3 0x1AD8
216#define ALX_HRTBT_REM_IPV6_ADDR2 0x1ADC
217#define ALX_HRTBT_REM_IPV6_ADDR1 0x1AE0
218#define ALX_HRTBT_REM_IPV6_ADDR0 0x1AE4
219
220/* 1B8C ~ 1B94 for C0+ */
221#define ALX_SWOI_ACER_CTRL 0x1B8C
222#define ALX_SWOI_ORIG_ACK_NAK_EN BIT(20)
223#define ALX_SWOI_ORIG_ACK_NAK_PKT_LEN_MASK 0XFF
224#define ALX_SWOI_ORIG_ACK_NAK_PKT_LEN_SHIFT 12
225#define ALX_SWOI_ORIG_ACK_ADDR_MASK 0XFFF
226#define ALX_SWOI_ORIG_ACK_ADDR_SHIFT 0
227
228#define ALX_SWOI_IOAC_CTRL_2 0x1B90
229#define ALX_SWOI_IOAC_CTRL_2_SWOI_1_FRAG_LEN_MASK 0xFF
230#define ALX_SWOI_IOAC_CTRL_2_SWOI_1_FRAG_LEN_SHIFT 24
231#define ALX_SWOI_IOAC_CTRL_2_SWOI_1_PKT_LEN_MASK 0xFFF
232#define ALX_SWOI_IOAC_CTRL_2_SWOI_1_PKT_LEN_SHIFT 12
233#define ALX_SWOI_IOAC_CTRL_2_SWOI_1_HDR_ADDR_MASK 0xFFF
234#define ALX_SWOI_IOAC_CTRL_2_SWOI_1_HDR_ADDR_SHIFT 0
235
236#define ALX_SWOI_IOAC_CTRL_3 0x1B94
237#define ALX_SWOI_IOAC_CTRL_3_SWOI_2_FRAG_LEN_MASK 0xFF
238#define ALX_SWOI_IOAC_CTRL_3_SWOI_2_FRAG_LEN_SHIFT 24
239#define ALX_SWOI_IOAC_CTRL_3_SWOI_2_PKT_LEN_MASK 0xFFF
240#define ALX_SWOI_IOAC_CTRL_3_SWOI_2_PKT_LEN_SHIFT 12
241#define ALX_SWOI_IOAC_CTRL_3_SWOI_2_HDR_ADDR_MASK 0xFFF
242#define ALX_SWOI_IOAC_CTRL_3_SWOI_2_HDR_ADDR_SHIFT 0
243
244/* for B0 */
245#define ALX_IDLE_DECISN_TIMER 0x1474
246/* 1ms */
247#define ALX_IDLE_DECISN_TIMER_DEF 0x400
248
249#define ALX_MAC_CTRL 0x1480
250#define ALX_MAC_CTRL_FAST_PAUSE BIT(31)
251#define ALX_MAC_CTRL_WOLSPED_SWEN BIT(30)
252/* bit29: 1:legacy(hi5b), 0:marvl(lo5b)*/
253#define ALX_MAC_CTRL_MHASH_ALG_HI5B BIT(29)
254#define ALX_MAC_CTRL_BRD_EN BIT(26)
255#define ALX_MAC_CTRL_MULTIALL_EN BIT(25)
256#define ALX_MAC_CTRL_SPEED_MASK 0x3
257#define ALX_MAC_CTRL_SPEED_SHIFT 20
258#define ALX_MAC_CTRL_SPEED_10_100 1
259#define ALX_MAC_CTRL_SPEED_1000 2
260#define ALX_MAC_CTRL_PROMISC_EN BIT(15)
261#define ALX_MAC_CTRL_VLANSTRIP BIT(14)
262#define ALX_MAC_CTRL_PRMBLEN_MASK 0xF
263#define ALX_MAC_CTRL_PRMBLEN_SHIFT 10
264#define ALX_MAC_CTRL_PCRCE BIT(7)
265#define ALX_MAC_CTRL_CRCE BIT(6)
266#define ALX_MAC_CTRL_FULLD BIT(5)
267#define ALX_MAC_CTRL_RXFC_EN BIT(3)
268#define ALX_MAC_CTRL_TXFC_EN BIT(2)
269#define ALX_MAC_CTRL_RX_EN BIT(1)
270#define ALX_MAC_CTRL_TX_EN BIT(0)
271
272#define ALX_STAD0 0x1488
273#define ALX_STAD1 0x148C
274
275#define ALX_HASH_TBL0 0x1490
276#define ALX_HASH_TBL1 0x1494
277
278#define ALX_MTU 0x149C
279#define ALX_MTU_JUMBO_TH 1514
280#define ALX_MTU_STD_ALGN 1536
281
282#define ALX_SRAM5 0x1524
283#define ALX_SRAM_RXF_LEN_MASK 0xFFF
284#define ALX_SRAM_RXF_LEN_SHIFT 0
285#define ALX_SRAM_RXF_LEN_8K (8*1024)
286
287#define ALX_SRAM9 0x1534
288#define ALX_SRAM_LOAD_PTR BIT(0)
289
290#define ALX_RX_BASE_ADDR_HI 0x1540
291
292#define ALX_TX_BASE_ADDR_HI 0x1544
293
294#define ALX_RFD_ADDR_LO 0x1550
295#define ALX_RFD_RING_SZ 0x1560
296#define ALX_RFD_BUF_SZ 0x1564
297
298#define ALX_RRD_ADDR_LO 0x1568
299#define ALX_RRD_RING_SZ 0x1578
300
301/* pri3: highest, pri0: lowest */
302#define ALX_TPD_PRI3_ADDR_LO 0x14E4
303#define ALX_TPD_PRI2_ADDR_LO 0x14E0
304#define ALX_TPD_PRI1_ADDR_LO 0x157C
305#define ALX_TPD_PRI0_ADDR_LO 0x1580
306
307/* producer index is 16bit */
308#define ALX_TPD_PRI3_PIDX 0x1618
309#define ALX_TPD_PRI2_PIDX 0x161A
310#define ALX_TPD_PRI1_PIDX 0x15F0
311#define ALX_TPD_PRI0_PIDX 0x15F2
312
313/* consumer index is 16bit */
314#define ALX_TPD_PRI3_CIDX 0x161C
315#define ALX_TPD_PRI2_CIDX 0x161E
316#define ALX_TPD_PRI1_CIDX 0x15F4
317#define ALX_TPD_PRI0_CIDX 0x15F6
318
319#define ALX_TPD_RING_SZ 0x1584
320
321#define ALX_TXQ0 0x1590
322#define ALX_TXQ0_TXF_BURST_PREF_MASK 0xFFFF
323#define ALX_TXQ0_TXF_BURST_PREF_SHIFT 16
324#define ALX_TXQ_TXF_BURST_PREF_DEF 0x200
325#define ALX_TXQ0_LSO_8023_EN BIT(7)
326#define ALX_TXQ0_MODE_ENHANCE BIT(6)
327#define ALX_TXQ0_EN BIT(5)
328#define ALX_TXQ0_SUPT_IPOPT BIT(4)
329#define ALX_TXQ0_TPD_BURSTPREF_MASK 0xF
330#define ALX_TXQ0_TPD_BURSTPREF_SHIFT 0
331#define ALX_TXQ_TPD_BURSTPREF_DEF 5
332
333#define ALX_TXQ1 0x1594
334/* bit11: drop large packet, len > (rfd buf) */
335#define ALX_TXQ1_ERRLGPKT_DROP_EN BIT(11)
336#define ALX_TXQ1_JUMBO_TSO_TH (7*1024)
337
338#define ALX_RXQ0 0x15A0
339#define ALX_RXQ0_EN BIT(31)
340#define ALX_RXQ0_RSS_HASH_EN BIT(29)
341#define ALX_RXQ0_RSS_MODE_MASK 0x3
342#define ALX_RXQ0_RSS_MODE_SHIFT 26
343#define ALX_RXQ0_RSS_MODE_DIS 0
344#define ALX_RXQ0_RSS_MODE_MQMI 3
345#define ALX_RXQ0_NUM_RFD_PREF_MASK 0x3F
346#define ALX_RXQ0_NUM_RFD_PREF_SHIFT 20
347#define ALX_RXQ0_NUM_RFD_PREF_DEF 8
348#define ALX_RXQ0_IDT_TBL_SIZE_MASK 0x1FF
349#define ALX_RXQ0_IDT_TBL_SIZE_SHIFT 8
350#define ALX_RXQ0_IDT_TBL_SIZE_DEF 0x100
351#define ALX_RXQ0_IDT_TBL_SIZE_NORMAL 128
352#define ALX_RXQ0_IPV6_PARSE_EN BIT(7)
353#define ALX_RXQ0_RSS_HSTYP_MASK 0xF
354#define ALX_RXQ0_RSS_HSTYP_SHIFT 2
355#define ALX_RXQ0_RSS_HSTYP_IPV6_TCP_EN BIT(5)
356#define ALX_RXQ0_RSS_HSTYP_IPV6_EN BIT(4)
357#define ALX_RXQ0_RSS_HSTYP_IPV4_TCP_EN BIT(3)
358#define ALX_RXQ0_RSS_HSTYP_IPV4_EN BIT(2)
359#define ALX_RXQ0_RSS_HSTYP_ALL (ALX_RXQ0_RSS_HSTYP_IPV6_TCP_EN | \
360 ALX_RXQ0_RSS_HSTYP_IPV4_TCP_EN | \
361 ALX_RXQ0_RSS_HSTYP_IPV6_EN | \
362 ALX_RXQ0_RSS_HSTYP_IPV4_EN)
363#define ALX_RXQ0_ASPM_THRESH_MASK 0x3
364#define ALX_RXQ0_ASPM_THRESH_SHIFT 0
365#define ALX_RXQ0_ASPM_THRESH_100M 3
366
367#define ALX_RXQ2 0x15A8
368#define ALX_RXQ2_RXF_XOFF_THRESH_MASK 0xFFF
369#define ALX_RXQ2_RXF_XOFF_THRESH_SHIFT 16
370#define ALX_RXQ2_RXF_XON_THRESH_MASK 0xFFF
371#define ALX_RXQ2_RXF_XON_THRESH_SHIFT 0
372/* Size = tx-packet(1522) + IPG(12) + SOF(8) + 64(Pause) + IPG(12) + SOF(8) +
373 * rx-packet(1522) + delay-of-link(64)
374 * = 3212.
375 */
376#define ALX_RXQ2_RXF_FLOW_CTRL_RSVD 3212
377
378#define ALX_DMA 0x15C0
379#define ALX_DMA_RCHNL_SEL_MASK 0x3
380#define ALX_DMA_RCHNL_SEL_SHIFT 26
381#define ALX_DMA_WDLY_CNT_MASK 0xF
382#define ALX_DMA_WDLY_CNT_SHIFT 16
383#define ALX_DMA_WDLY_CNT_DEF 4
384#define ALX_DMA_RDLY_CNT_MASK 0x1F
385#define ALX_DMA_RDLY_CNT_SHIFT 11
386#define ALX_DMA_RDLY_CNT_DEF 15
387/* bit10: 0:tpd with pri, 1: data */
388#define ALX_DMA_RREQ_PRI_DATA BIT(10)
389#define ALX_DMA_RREQ_BLEN_MASK 0x7
390#define ALX_DMA_RREQ_BLEN_SHIFT 4
391#define ALX_DMA_RORDER_MODE_MASK 0x7
392#define ALX_DMA_RORDER_MODE_SHIFT 0
393#define ALX_DMA_RORDER_MODE_OUT 4
394
395#define ALX_WOL0 0x14A0
396#define ALX_WOL0_PME_LINK BIT(5)
397#define ALX_WOL0_LINK_EN BIT(4)
398#define ALX_WOL0_PME_MAGIC_EN BIT(3)
399#define ALX_WOL0_MAGIC_EN BIT(2)
400
401#define ALX_RFD_PIDX 0x15E0
402
403#define ALX_RFD_CIDX 0x15F8
404
405/* MIB */
406#define ALX_MIB_BASE 0x1700
407#define ALX_MIB_RX_OK (ALX_MIB_BASE + 0)
408#define ALX_MIB_RX_ERRADDR (ALX_MIB_BASE + 92)
409#define ALX_MIB_TX_OK (ALX_MIB_BASE + 96)
410#define ALX_MIB_TX_MCCNT (ALX_MIB_BASE + 192)
411
412#define ALX_RX_STATS_BIN ALX_MIB_RX_OK
413#define ALX_RX_STATS_END ALX_MIB_RX_ERRADDR
414#define ALX_TX_STATS_BIN ALX_MIB_TX_OK
415#define ALX_TX_STATS_END ALX_MIB_TX_MCCNT
416
417#define ALX_ISR 0x1600
418#define ALX_ISR_DIS BIT(31)
419#define ALX_ISR_RX_Q7 BIT(30)
420#define ALX_ISR_RX_Q6 BIT(29)
421#define ALX_ISR_RX_Q5 BIT(28)
422#define ALX_ISR_RX_Q4 BIT(27)
423#define ALX_ISR_PCIE_LNKDOWN BIT(26)
424#define ALX_ISR_RX_Q3 BIT(19)
425#define ALX_ISR_RX_Q2 BIT(18)
426#define ALX_ISR_RX_Q1 BIT(17)
427#define ALX_ISR_RX_Q0 BIT(16)
428#define ALX_ISR_TX_Q0 BIT(15)
429#define ALX_ISR_PHY BIT(12)
430#define ALX_ISR_DMAW BIT(10)
431#define ALX_ISR_DMAR BIT(9)
432#define ALX_ISR_TXF_UR BIT(8)
433#define ALX_ISR_TX_Q3 BIT(7)
434#define ALX_ISR_TX_Q2 BIT(6)
435#define ALX_ISR_TX_Q1 BIT(5)
436#define ALX_ISR_RFD_UR BIT(4)
437#define ALX_ISR_RXF_OV BIT(3)
438#define ALX_ISR_MANU BIT(2)
439#define ALX_ISR_TIMER BIT(1)
440#define ALX_ISR_SMB BIT(0)
441
442#define ALX_IMR 0x1604
443
444/* re-send assert msg if SW no response */
445#define ALX_INT_RETRIG 0x1608
446/* 40ms */
447#define ALX_INT_RETRIG_TO 20000
448
449#define ALX_SMB_TIMER 0x15C4
450
451#define ALX_TINT_TPD_THRSHLD 0x15C8
452
453#define ALX_TINT_TIMER 0x15CC
454
455#define ALX_CLK_GATE 0x1814
456#define ALX_CLK_GATE_RXMAC BIT(5)
457#define ALX_CLK_GATE_TXMAC BIT(4)
458#define ALX_CLK_GATE_RXQ BIT(3)
459#define ALX_CLK_GATE_TXQ BIT(2)
460#define ALX_CLK_GATE_DMAR BIT(1)
461#define ALX_CLK_GATE_DMAW BIT(0)
462#define ALX_CLK_GATE_ALL (ALX_CLK_GATE_RXMAC | \
463 ALX_CLK_GATE_TXMAC | \
464 ALX_CLK_GATE_RXQ | \
465 ALX_CLK_GATE_TXQ | \
466 ALX_CLK_GATE_DMAR | \
467 ALX_CLK_GATE_DMAW)
468
469/* interop between drivers */
470#define ALX_DRV 0x1804
471#define ALX_DRV_PHY_AUTO BIT(28)
472#define ALX_DRV_PHY_1000 BIT(27)
473#define ALX_DRV_PHY_100 BIT(26)
474#define ALX_DRV_PHY_10 BIT(25)
475#define ALX_DRV_PHY_DUPLEX BIT(24)
476/* bit23: adv Pause */
477#define ALX_DRV_PHY_PAUSE BIT(23)
478/* bit22: adv Asym Pause */
479#define ALX_DRV_PHY_MASK 0xFF
480#define ALX_DRV_PHY_SHIFT 21
481#define ALX_DRV_PHY_UNKNOWN 0
482
483/* flag of phy inited */
484#define ALX_PHY_INITED 0x003F
485
486/* reg 1830 ~ 186C for C0+, 16 bit map patterns and wake packet detection */
487#define ALX_WOL_CTRL2 0x1830
488#define ALX_WOL_CTRL2_DATA_STORE BIT(3)
489#define ALX_WOL_CTRL2_PTRN_EVT BIT(2)
490#define ALX_WOL_CTRL2_PME_PTRN_EN BIT(1)
491#define ALX_WOL_CTRL2_PTRN_EN BIT(0)
492
493#define ALX_WOL_CTRL3 0x1834
494#define ALX_WOL_CTRL3_PTRN_ADDR_MASK 0xFFFFF
495#define ALX_WOL_CTRL3_PTRN_ADDR_SHIFT 0
496
497#define ALX_WOL_CTRL4 0x1838
498#define ALX_WOL_CTRL4_PT15_MATCH BIT(31)
499#define ALX_WOL_CTRL4_PT14_MATCH BIT(30)
500#define ALX_WOL_CTRL4_PT13_MATCH BIT(29)
501#define ALX_WOL_CTRL4_PT12_MATCH BIT(28)
502#define ALX_WOL_CTRL4_PT11_MATCH BIT(27)
503#define ALX_WOL_CTRL4_PT10_MATCH BIT(26)
504#define ALX_WOL_CTRL4_PT9_MATCH BIT(25)
505#define ALX_WOL_CTRL4_PT8_MATCH BIT(24)
506#define ALX_WOL_CTRL4_PT7_MATCH BIT(23)
507#define ALX_WOL_CTRL4_PT6_MATCH BIT(22)
508#define ALX_WOL_CTRL4_PT5_MATCH BIT(21)
509#define ALX_WOL_CTRL4_PT4_MATCH BIT(20)
510#define ALX_WOL_CTRL4_PT3_MATCH BIT(19)
511#define ALX_WOL_CTRL4_PT2_MATCH BIT(18)
512#define ALX_WOL_CTRL4_PT1_MATCH BIT(17)
513#define ALX_WOL_CTRL4_PT0_MATCH BIT(16)
514#define ALX_WOL_CTRL4_PT15_EN BIT(15)
515#define ALX_WOL_CTRL4_PT14_EN BIT(14)
516#define ALX_WOL_CTRL4_PT13_EN BIT(13)
517#define ALX_WOL_CTRL4_PT12_EN BIT(12)
518#define ALX_WOL_CTRL4_PT11_EN BIT(11)
519#define ALX_WOL_CTRL4_PT10_EN BIT(10)
520#define ALX_WOL_CTRL4_PT9_EN BIT(9)
521#define ALX_WOL_CTRL4_PT8_EN BIT(8)
522#define ALX_WOL_CTRL4_PT7_EN BIT(7)
523#define ALX_WOL_CTRL4_PT6_EN BIT(6)
524#define ALX_WOL_CTRL4_PT5_EN BIT(5)
525#define ALX_WOL_CTRL4_PT4_EN BIT(4)
526#define ALX_WOL_CTRL4_PT3_EN BIT(3)
527#define ALX_WOL_CTRL4_PT2_EN BIT(2)
528#define ALX_WOL_CTRL4_PT1_EN BIT(1)
529#define ALX_WOL_CTRL4_PT0_EN BIT(0)
530
531#define ALX_WOL_CTRL5 0x183C
532#define ALX_WOL_CTRL5_PT3_LEN_MASK 0xFF
533#define ALX_WOL_CTRL5_PT3_LEN_SHIFT 24
534#define ALX_WOL_CTRL5_PT2_LEN_MASK 0xFF
535#define ALX_WOL_CTRL5_PT2_LEN_SHIFT 16
536#define ALX_WOL_CTRL5_PT1_LEN_MASK 0xFF
537#define ALX_WOL_CTRL5_PT1_LEN_SHIFT 8
538#define ALX_WOL_CTRL5_PT0_LEN_MASK 0xFF
539#define ALX_WOL_CTRL5_PT0_LEN_SHIFT 0
540
541#define ALX_WOL_CTRL6 0x1840
542#define ALX_WOL_CTRL5_PT7_LEN_MASK 0xFF
543#define ALX_WOL_CTRL5_PT7_LEN_SHIFT 24
544#define ALX_WOL_CTRL5_PT6_LEN_MASK 0xFF
545#define ALX_WOL_CTRL5_PT6_LEN_SHIFT 16
546#define ALX_WOL_CTRL5_PT5_LEN_MASK 0xFF
547#define ALX_WOL_CTRL5_PT5_LEN_SHIFT 8
548#define ALX_WOL_CTRL5_PT4_LEN_MASK 0xFF
549#define ALX_WOL_CTRL5_PT4_LEN_SHIFT 0
550
551#define ALX_WOL_CTRL7 0x1844
552#define ALX_WOL_CTRL5_PT11_LEN_MASK 0xFF
553#define ALX_WOL_CTRL5_PT11_LEN_SHIFT 24
554#define ALX_WOL_CTRL5_PT10_LEN_MASK 0xFF
555#define ALX_WOL_CTRL5_PT10_LEN_SHIFT 16
556#define ALX_WOL_CTRL5_PT9_LEN_MASK 0xFF
557#define ALX_WOL_CTRL5_PT9_LEN_SHIFT 8
558#define ALX_WOL_CTRL5_PT8_LEN_MASK 0xFF
559#define ALX_WOL_CTRL5_PT8_LEN_SHIFT 0
560
561#define ALX_WOL_CTRL8 0x1848
562#define ALX_WOL_CTRL5_PT15_LEN_MASK 0xFF
563#define ALX_WOL_CTRL5_PT15_LEN_SHIFT 24
564#define ALX_WOL_CTRL5_PT14_LEN_MASK 0xFF
565#define ALX_WOL_CTRL5_PT14_LEN_SHIFT 16
566#define ALX_WOL_CTRL5_PT13_LEN_MASK 0xFF
567#define ALX_WOL_CTRL5_PT13_LEN_SHIFT 8
568#define ALX_WOL_CTRL5_PT12_LEN_MASK 0xFF
569#define ALX_WOL_CTRL5_PT12_LEN_SHIFT 0
570
571#define ALX_ACER_FIXED_PTN0 0x1850
572#define ALX_ACER_FIXED_PTN0_MASK 0xFFFFFFFF
573#define ALX_ACER_FIXED_PTN0_SHIFT 0
574
575#define ALX_ACER_FIXED_PTN1 0x1854
576#define ALX_ACER_FIXED_PTN1_MASK 0xFFFF
577#define ALX_ACER_FIXED_PTN1_SHIFT 0
578
579#define ALX_ACER_RANDOM_NUM0 0x1858
580#define ALX_ACER_RANDOM_NUM0_MASK 0xFFFFFFFF
581#define ALX_ACER_RANDOM_NUM0_SHIFT 0
582
583#define ALX_ACER_RANDOM_NUM1 0x185C
584#define ALX_ACER_RANDOM_NUM1_MASK 0xFFFFFFFF
585#define ALX_ACER_RANDOM_NUM1_SHIFT 0
586
587#define ALX_ACER_RANDOM_NUM2 0x1860
588#define ALX_ACER_RANDOM_NUM2_MASK 0xFFFFFFFF
589#define ALX_ACER_RANDOM_NUM2_SHIFT 0
590
591#define ALX_ACER_RANDOM_NUM3 0x1864
592#define ALX_ACER_RANDOM_NUM3_MASK 0xFFFFFFFF
593#define ALX_ACER_RANDOM_NUM3_SHIFT 0
594
595#define ALX_ACER_MAGIC 0x1868
596#define ALX_ACER_MAGIC_EN BIT(31)
597#define ALX_ACER_MAGIC_PME_EN BIT(30)
598#define ALX_ACER_MAGIC_MATCH BIT(29)
599#define ALX_ACER_MAGIC_FF_CHECK BIT(10)
600#define ALX_ACER_MAGIC_RAN_LEN_MASK 0x1F
601#define ALX_ACER_MAGIC_RAN_LEN_SHIFT 5
602#define ALX_ACER_MAGIC_FIX_LEN_MASK 0x1F
603#define ALX_ACER_MAGIC_FIX_LEN_SHIFT 0
604
605#define ALX_ACER_TIMER 0x186C
606#define ALX_ACER_TIMER_EN BIT(31)
607#define ALX_ACER_TIMER_PME_EN BIT(30)
608#define ALX_ACER_TIMER_MATCH BIT(29)
609#define ALX_ACER_TIMER_THRES_MASK 0x1FFFF
610#define ALX_ACER_TIMER_THRES_SHIFT 0
611#define ALX_ACER_TIMER_THRES_DEF 1
612
613/* RSS definitions */
614#define ALX_RSS_KEY0 0x14B0
615#define ALX_RSS_KEY1 0x14B4
616#define ALX_RSS_KEY2 0x14B8
617#define ALX_RSS_KEY3 0x14BC
618#define ALX_RSS_KEY4 0x14C0
619#define ALX_RSS_KEY5 0x14C4
620#define ALX_RSS_KEY6 0x14C8
621#define ALX_RSS_KEY7 0x14CC
622#define ALX_RSS_KEY8 0x14D0
623#define ALX_RSS_KEY9 0x14D4
624
625#define ALX_RSS_IDT_TBL0 0x1B00
626
627#define ALX_MSI_MAP_TBL1 0x15D0
628#define ALX_MSI_MAP_TBL1_TXQ1_SHIFT 20
629#define ALX_MSI_MAP_TBL1_TXQ0_SHIFT 16
630#define ALX_MSI_MAP_TBL1_RXQ3_SHIFT 12
631#define ALX_MSI_MAP_TBL1_RXQ2_SHIFT 8
632#define ALX_MSI_MAP_TBL1_RXQ1_SHIFT 4
633#define ALX_MSI_MAP_TBL1_RXQ0_SHIFT 0
634
635#define ALX_MSI_MAP_TBL2 0x15D8
636#define ALX_MSI_MAP_TBL2_TXQ3_SHIFT 20
637#define ALX_MSI_MAP_TBL2_TXQ2_SHIFT 16
638#define ALX_MSI_MAP_TBL2_RXQ7_SHIFT 12
639#define ALX_MSI_MAP_TBL2_RXQ6_SHIFT 8
640#define ALX_MSI_MAP_TBL2_RXQ5_SHIFT 4
641#define ALX_MSI_MAP_TBL2_RXQ4_SHIFT 0
642
643#define ALX_MSI_ID_MAP 0x15D4
644
645#define ALX_MSI_RETRANS_TIMER 0x1920
646/* bit16: 1:line,0:standard */
647#define ALX_MSI_MASK_SEL_LINE BIT(16)
648#define ALX_MSI_RETRANS_TM_MASK 0xFFFF
649#define ALX_MSI_RETRANS_TM_SHIFT 0
650
651/* CR DMA ctrl */
652
653/* TX QoS */
654#define ALX_WRR 0x1938
655#define ALX_WRR_PRI_MASK 0x3
656#define ALX_WRR_PRI_SHIFT 29
657#define ALX_WRR_PRI_RESTRICT_NONE 3
658#define ALX_WRR_PRI3_MASK 0x1F
659#define ALX_WRR_PRI3_SHIFT 24
660#define ALX_WRR_PRI2_MASK 0x1F
661#define ALX_WRR_PRI2_SHIFT 16
662#define ALX_WRR_PRI1_MASK 0x1F
663#define ALX_WRR_PRI1_SHIFT 8
664#define ALX_WRR_PRI0_MASK 0x1F
665#define ALX_WRR_PRI0_SHIFT 0
666
667#define ALX_HQTPD 0x193C
668#define ALX_HQTPD_BURST_EN BIT(31)
669#define ALX_HQTPD_Q3_NUMPREF_MASK 0xF
670#define ALX_HQTPD_Q3_NUMPREF_SHIFT 8
671#define ALX_HQTPD_Q2_NUMPREF_MASK 0xF
672#define ALX_HQTPD_Q2_NUMPREF_SHIFT 4
673#define ALX_HQTPD_Q1_NUMPREF_MASK 0xF
674#define ALX_HQTPD_Q1_NUMPREF_SHIFT 0
675
676#define ALX_MISC 0x19C0
677#define ALX_MISC_PSW_OCP_MASK 0x7
678#define ALX_MISC_PSW_OCP_SHIFT 21
679#define ALX_MISC_PSW_OCP_DEF 0x7
680#define ALX_MISC_ISO_EN BIT(12)
681#define ALX_MISC_INTNLOSC_OPEN BIT(3)
682
683#define ALX_MSIC2 0x19C8
684#define ALX_MSIC2_CALB_START BIT(0)
685
686#define ALX_MISC3 0x19CC
687/* bit1: 1:Software control 25M */
688#define ALX_MISC3_25M_BY_SW BIT(1)
689/* bit0: 25M switch to intnl OSC */
690#define ALX_MISC3_25M_NOTO_INTNL BIT(0)
691
692/* MSIX tbl in memory space */
693#define ALX_MSIX_ENTRY_BASE 0x2000
694
695/********************* PHY regs definition ***************************/
696
697/* PHY Specific Status Register */
698#define ALX_MII_GIGA_PSSR 0x11
699#define ALX_GIGA_PSSR_SPD_DPLX_RESOLVED 0x0800
700#define ALX_GIGA_PSSR_DPLX 0x2000
701#define ALX_GIGA_PSSR_SPEED 0xC000
702#define ALX_GIGA_PSSR_10MBS 0x0000
703#define ALX_GIGA_PSSR_100MBS 0x4000
704#define ALX_GIGA_PSSR_1000MBS 0x8000
705
706/* PHY Interrupt Enable Register */
707#define ALX_MII_IER 0x12
708#define ALX_IER_LINK_UP 0x0400
709#define ALX_IER_LINK_DOWN 0x0800
710
711/* PHY Interrupt Status Register */
712#define ALX_MII_ISR 0x13
713
714#define ALX_MII_DBG_ADDR 0x1D
715#define ALX_MII_DBG_DATA 0x1E
716
717/***************************** debug port *************************************/
718
719#define ALX_MIIDBG_ANACTRL 0x00
720#define ALX_ANACTRL_DEF 0x02EF
721
722#define ALX_MIIDBG_SYSMODCTRL 0x04
723/* en half bias */
724#define ALX_SYSMODCTRL_IECHOADJ_DEF 0xBB8B
725
726#define ALX_MIIDBG_SRDSYSMOD 0x05
727#define ALX_SRDSYSMOD_DEEMP_EN 0x0040
728#define ALX_SRDSYSMOD_DEF 0x2C46
729
730#define ALX_MIIDBG_HIBNEG 0x0B
731#define ALX_HIBNEG_PSHIB_EN 0x8000
732#define ALX_HIBNEG_HIB_PSE 0x1000
733#define ALX_HIBNEG_DEF 0xBC40
734#define ALX_HIBNEG_NOHIB (ALX_HIBNEG_DEF & \
735 ~(ALX_HIBNEG_PSHIB_EN | ALX_HIBNEG_HIB_PSE))
736
737#define ALX_MIIDBG_TST10BTCFG 0x12
738#define ALX_TST10BTCFG_DEF 0x4C04
739
740#define ALX_MIIDBG_AZ_ANADECT 0x15
741#define ALX_AZ_ANADECT_DEF 0x3220
742#define ALX_AZ_ANADECT_LONG 0x3210
743
744#define ALX_MIIDBG_MSE16DB 0x18
745#define ALX_MSE16DB_UP 0x05EA
746#define ALX_MSE16DB_DOWN 0x02EA
747
748#define ALX_MIIDBG_MSE20DB 0x1C
749#define ALX_MSE20DB_TH_MASK 0x7F
750#define ALX_MSE20DB_TH_SHIFT 2
751#define ALX_MSE20DB_TH_DEF 0x2E
752#define ALX_MSE20DB_TH_HI 0x54
753
754#define ALX_MIIDBG_AGC 0x23
755#define ALX_AGC_2_VGA_MASK 0x3FU
756#define ALX_AGC_2_VGA_SHIFT 8
757#define ALX_AGC_LONG1G_LIMT 40
758#define ALX_AGC_LONG100M_LIMT 44
759
760#define ALX_MIIDBG_LEGCYPS 0x29
761#define ALX_LEGCYPS_EN 0x8000
762#define ALX_LEGCYPS_DEF 0x129D
763
764#define ALX_MIIDBG_TST100BTCFG 0x36
765#define ALX_TST100BTCFG_DEF 0xE12C
766
767#define ALX_MIIDBG_GREENCFG 0x3B
768#define ALX_GREENCFG_DEF 0x7078
769
770#define ALX_MIIDBG_GREENCFG2 0x3D
771#define ALX_GREENCFG2_BP_GREEN 0x8000
772#define ALX_GREENCFG2_GATE_DFSE_EN 0x0080
773
774/******* dev 3 *********/
775#define ALX_MIIEXT_PCS 3
776
777#define ALX_MIIEXT_CLDCTRL3 0x8003
778#define ALX_CLDCTRL3_BP_CABLE1TH_DET_GT 0x8000
779
780#define ALX_MIIEXT_CLDCTRL5 0x8005
781#define ALX_CLDCTRL5_BP_VD_HLFBIAS 0x4000
782
783#define ALX_MIIEXT_CLDCTRL6 0x8006
784#define ALX_CLDCTRL6_CAB_LEN_MASK 0xFF
785#define ALX_CLDCTRL6_CAB_LEN_SHIFT 0
786#define ALX_CLDCTRL6_CAB_LEN_SHORT1G 116
787#define ALX_CLDCTRL6_CAB_LEN_SHORT100M 152
788
789#define ALX_MIIEXT_VDRVBIAS 0x8062
790#define ALX_VDRVBIAS_DEF 0x3
791
792/********* dev 7 **********/
793#define ALX_MIIEXT_ANEG 7
794
795#define ALX_MIIEXT_LOCAL_EEEADV 0x3C
796#define ALX_LOCAL_EEEADV_1000BT 0x0004
797#define ALX_LOCAL_EEEADV_100BT 0x0002
798
799#define ALX_MIIEXT_AFE 0x801A
800#define ALX_AFE_10BT_100M_TH 0x0040
801
802#define ALX_MIIEXT_S3DIG10 0x8023
803/* bit0: 1:bypass 10BT rx fifo, 0:original 10BT rx */
804#define ALX_MIIEXT_S3DIG10_SL 0x0001
805#define ALX_MIIEXT_S3DIG10_DEF 0
806
807#define ALX_MIIEXT_NLP78 0x8027
808#define ALX_MIIEXT_NLP78_120M_DEF 0x8A05
809
810#endif
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index c777b9013164..a13463e8a2c3 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -744,6 +744,9 @@ static int tg3_ape_lock(struct tg3 *tp, int locknum)
744 status = tg3_ape_read32(tp, gnt + off); 744 status = tg3_ape_read32(tp, gnt + off);
745 if (status == bit) 745 if (status == bit)
746 break; 746 break;
747 if (pci_channel_offline(tp->pdev))
748 break;
749
747 udelay(10); 750 udelay(10);
748 } 751 }
749 752
@@ -1635,6 +1638,9 @@ static void tg3_wait_for_event_ack(struct tg3 *tp)
1635 for (i = 0; i < delay_cnt; i++) { 1638 for (i = 0; i < delay_cnt; i++) {
1636 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT)) 1639 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1637 break; 1640 break;
1641 if (pci_channel_offline(tp->pdev))
1642 break;
1643
1638 udelay(8); 1644 udelay(8);
1639 } 1645 }
1640} 1646}
@@ -1813,6 +1819,9 @@ static int tg3_poll_fw(struct tg3 *tp)
1813 for (i = 0; i < 200; i++) { 1819 for (i = 0; i < 200; i++) {
1814 if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE) 1820 if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1815 return 0; 1821 return 0;
1822 if (pci_channel_offline(tp->pdev))
1823 return -ENODEV;
1824
1816 udelay(100); 1825 udelay(100);
1817 } 1826 }
1818 return -ENODEV; 1827 return -ENODEV;
@@ -1823,6 +1832,15 @@ static int tg3_poll_fw(struct tg3 *tp)
1823 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val); 1832 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1824 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1) 1833 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1825 break; 1834 break;
1835 if (pci_channel_offline(tp->pdev)) {
1836 if (!tg3_flag(tp, NO_FWARE_REPORTED)) {
1837 tg3_flag_set(tp, NO_FWARE_REPORTED);
1838 netdev_info(tp->dev, "No firmware running\n");
1839 }
1840
1841 break;
1842 }
1843
1826 udelay(10); 1844 udelay(10);
1827 } 1845 }
1828 1846
@@ -3520,6 +3538,8 @@ static int tg3_pause_cpu(struct tg3 *tp, u32 cpu_base)
3520 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT); 3538 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
3521 if (tr32(cpu_base + CPU_MODE) & CPU_MODE_HALT) 3539 if (tr32(cpu_base + CPU_MODE) & CPU_MODE_HALT)
3522 break; 3540 break;
3541 if (pci_channel_offline(tp->pdev))
3542 return -EBUSY;
3523 } 3543 }
3524 3544
3525 return (i == iters) ? -EBUSY : 0; 3545 return (i == iters) ? -EBUSY : 0;
@@ -8589,6 +8609,14 @@ static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, boo
8589 tw32_f(ofs, val); 8609 tw32_f(ofs, val);
8590 8610
8591 for (i = 0; i < MAX_WAIT_CNT; i++) { 8611 for (i = 0; i < MAX_WAIT_CNT; i++) {
8612 if (pci_channel_offline(tp->pdev)) {
8613 dev_err(&tp->pdev->dev,
8614 "tg3_stop_block device offline, "
8615 "ofs=%lx enable_bit=%x\n",
8616 ofs, enable_bit);
8617 return -ENODEV;
8618 }
8619
8592 udelay(100); 8620 udelay(100);
8593 val = tr32(ofs); 8621 val = tr32(ofs);
8594 if ((val & enable_bit) == 0) 8622 if ((val & enable_bit) == 0)
@@ -8612,6 +8640,13 @@ static int tg3_abort_hw(struct tg3 *tp, bool silent)
8612 8640
8613 tg3_disable_ints(tp); 8641 tg3_disable_ints(tp);
8614 8642
8643 if (pci_channel_offline(tp->pdev)) {
8644 tp->rx_mode &= ~(RX_MODE_ENABLE | TX_MODE_ENABLE);
8645 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8646 err = -ENODEV;
8647 goto err_no_dev;
8648 }
8649
8615 tp->rx_mode &= ~RX_MODE_ENABLE; 8650 tp->rx_mode &= ~RX_MODE_ENABLE;
8616 tw32_f(MAC_RX_MODE, tp->rx_mode); 8651 tw32_f(MAC_RX_MODE, tp->rx_mode);
8617 udelay(10); 8652 udelay(10);
@@ -8660,6 +8695,7 @@ static int tg3_abort_hw(struct tg3 *tp, bool silent)
8660 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent); 8695 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
8661 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent); 8696 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
8662 8697
8698err_no_dev:
8663 for (i = 0; i < tp->irq_cnt; i++) { 8699 for (i = 0; i < tp->irq_cnt; i++) {
8664 struct tg3_napi *tnapi = &tp->napi[i]; 8700 struct tg3_napi *tnapi = &tp->napi[i];
8665 if (tnapi->hw_status) 8701 if (tnapi->hw_status)
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index a667015be22a..d48099f03b7f 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -516,6 +516,7 @@ fec_restart(struct net_device *ndev, int duplex)
516 /* Set MII speed */ 516 /* Set MII speed */
517 writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED); 517 writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
518 518
519#if !defined(CONFIG_M5272)
519 /* set RX checksum */ 520 /* set RX checksum */
520 val = readl(fep->hwp + FEC_RACC); 521 val = readl(fep->hwp + FEC_RACC);
521 if (fep->csum_flags & FLAG_RX_CSUM_ENABLED) 522 if (fep->csum_flags & FLAG_RX_CSUM_ENABLED)
@@ -523,6 +524,7 @@ fec_restart(struct net_device *ndev, int duplex)
523 else 524 else
524 val &= ~FEC_RACC_OPTIONS; 525 val &= ~FEC_RACC_OPTIONS;
525 writel(val, fep->hwp + FEC_RACC); 526 writel(val, fep->hwp + FEC_RACC);
527#endif
526 528
527 /* 529 /*
528 * The phy interface and speed need to get configured 530 * The phy interface and speed need to get configured
@@ -575,6 +577,7 @@ fec_restart(struct net_device *ndev, int duplex)
575#endif 577#endif
576 } 578 }
577 579
580#if !defined(CONFIG_M5272)
578 /* enable pause frame*/ 581 /* enable pause frame*/
579 if ((fep->pause_flag & FEC_PAUSE_FLAG_ENABLE) || 582 if ((fep->pause_flag & FEC_PAUSE_FLAG_ENABLE) ||
580 ((fep->pause_flag & FEC_PAUSE_FLAG_AUTONEG) && 583 ((fep->pause_flag & FEC_PAUSE_FLAG_AUTONEG) &&
@@ -592,6 +595,7 @@ fec_restart(struct net_device *ndev, int duplex)
592 } else { 595 } else {
593 rcntl &= ~FEC_ENET_FCE; 596 rcntl &= ~FEC_ENET_FCE;
594 } 597 }
598#endif /* !defined(CONFIG_M5272) */
595 599
596 writel(rcntl, fep->hwp + FEC_R_CNTRL); 600 writel(rcntl, fep->hwp + FEC_R_CNTRL);
597 601
@@ -1205,7 +1209,9 @@ static int fec_enet_mii_probe(struct net_device *ndev)
1205 /* mask with MAC supported features */ 1209 /* mask with MAC supported features */
1206 if (id_entry->driver_data & FEC_QUIRK_HAS_GBIT) { 1210 if (id_entry->driver_data & FEC_QUIRK_HAS_GBIT) {
1207 phy_dev->supported &= PHY_GBIT_FEATURES; 1211 phy_dev->supported &= PHY_GBIT_FEATURES;
1212#if !defined(CONFIG_M5272)
1208 phy_dev->supported |= SUPPORTED_Pause; 1213 phy_dev->supported |= SUPPORTED_Pause;
1214#endif
1209 } 1215 }
1210 else 1216 else
1211 phy_dev->supported &= PHY_BASIC_FEATURES; 1217 phy_dev->supported &= PHY_BASIC_FEATURES;
@@ -1390,6 +1396,8 @@ static int fec_enet_get_ts_info(struct net_device *ndev,
1390 } 1396 }
1391} 1397}
1392 1398
1399#if !defined(CONFIG_M5272)
1400
1393static void fec_enet_get_pauseparam(struct net_device *ndev, 1401static void fec_enet_get_pauseparam(struct net_device *ndev,
1394 struct ethtool_pauseparam *pause) 1402 struct ethtool_pauseparam *pause)
1395{ 1403{
@@ -1436,9 +1444,13 @@ static int fec_enet_set_pauseparam(struct net_device *ndev,
1436 return 0; 1444 return 0;
1437} 1445}
1438 1446
1447#endif /* !defined(CONFIG_M5272) */
1448
1439static const struct ethtool_ops fec_enet_ethtool_ops = { 1449static const struct ethtool_ops fec_enet_ethtool_ops = {
1450#if !defined(CONFIG_M5272)
1440 .get_pauseparam = fec_enet_get_pauseparam, 1451 .get_pauseparam = fec_enet_get_pauseparam,
1441 .set_pauseparam = fec_enet_set_pauseparam, 1452 .set_pauseparam = fec_enet_set_pauseparam,
1453#endif
1442 .get_settings = fec_enet_get_settings, 1454 .get_settings = fec_enet_get_settings,
1443 .set_settings = fec_enet_set_settings, 1455 .set_settings = fec_enet_set_settings,
1444 .get_drvinfo = fec_enet_get_drvinfo, 1456 .get_drvinfo = fec_enet_get_drvinfo,
@@ -1874,10 +1886,12 @@ fec_probe(struct platform_device *pdev)
1874 /* setup board info structure */ 1886 /* setup board info structure */
1875 fep = netdev_priv(ndev); 1887 fep = netdev_priv(ndev);
1876 1888
1889#if !defined(CONFIG_M5272)
1877 /* default enable pause frame auto negotiation */ 1890 /* default enable pause frame auto negotiation */
1878 if (pdev->id_entry && 1891 if (pdev->id_entry &&
1879 (pdev->id_entry->driver_data & FEC_QUIRK_HAS_GBIT)) 1892 (pdev->id_entry->driver_data & FEC_QUIRK_HAS_GBIT))
1880 fep->pause_flag |= FEC_PAUSE_FLAG_AUTONEG; 1893 fep->pause_flag |= FEC_PAUSE_FLAG_AUTONEG;
1894#endif
1881 1895
1882 fep->hwp = devm_request_and_ioremap(&pdev->dev, r); 1896 fep->hwp = devm_request_and_ioremap(&pdev->dev, r);
1883 fep->pdev = pdev; 1897 fep->pdev = pdev;
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index 2ad1494efbb3..d1cbfb12c1ca 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -1757,7 +1757,7 @@ static int rxq_init(struct mv643xx_eth_private *mp, int index)
1757 memset(rxq->rx_desc_area, 0, size); 1757 memset(rxq->rx_desc_area, 0, size);
1758 1758
1759 rxq->rx_desc_area_size = size; 1759 rxq->rx_desc_area_size = size;
1760 rxq->rx_skb = kmalloc_array(rxq->rx_ring_size, sizeof(*rxq->rx_skb), 1760 rxq->rx_skb = kcalloc(rxq->rx_ring_size, sizeof(*rxq->rx_skb),
1761 GFP_KERNEL); 1761 GFP_KERNEL);
1762 if (rxq->rx_skb == NULL) 1762 if (rxq->rx_skb == NULL)
1763 goto out_free; 1763 goto out_free;
diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c
index 339bb323cb0c..1c8af8ba08d9 100644
--- a/drivers/net/ethernet/marvell/pxa168_eth.c
+++ b/drivers/net/ethernet/marvell/pxa168_eth.c
@@ -1015,7 +1015,7 @@ static int rxq_init(struct net_device *dev)
1015 int rx_desc_num = pep->rx_ring_size; 1015 int rx_desc_num = pep->rx_ring_size;
1016 1016
1017 /* Allocate RX skb rings */ 1017 /* Allocate RX skb rings */
1018 pep->rx_skb = kmalloc(sizeof(*pep->rx_skb) * pep->rx_ring_size, 1018 pep->rx_skb = kzalloc(sizeof(*pep->rx_skb) * pep->rx_ring_size,
1019 GFP_KERNEL); 1019 GFP_KERNEL);
1020 if (!pep->rx_skb) 1020 if (!pep->rx_skb)
1021 return -ENOMEM; 1021 return -ENOMEM;
@@ -1076,7 +1076,7 @@ static int txq_init(struct net_device *dev)
1076 int size = 0, i = 0; 1076 int size = 0, i = 0;
1077 int tx_desc_num = pep->tx_ring_size; 1077 int tx_desc_num = pep->tx_ring_size;
1078 1078
1079 pep->tx_skb = kmalloc(sizeof(*pep->tx_skb) * pep->tx_ring_size, 1079 pep->tx_skb = kzalloc(sizeof(*pep->tx_skb) * pep->tx_ring_size,
1080 GFP_KERNEL); 1080 GFP_KERNEL);
1081 if (!pep->tx_skb) 1081 if (!pep->tx_skb)
1082 return -ENOMEM; 1082 return -ENOMEM;
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 2f4a26039e80..8a434997a0df 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -632,6 +632,9 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
632 dev->caps.cqe_size = 32; 632 dev->caps.cqe_size = 32;
633 } 633 }
634 634
635 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS;
636 mlx4_warn(dev, "Timestamping is not supported in slave mode.\n");
637
635 slave_adjust_steering_mode(dev, &dev_cap, &hca_param); 638 slave_adjust_steering_mode(dev, &dev_cap, &hca_param);
636 639
637 return 0; 640 return 0;
diff --git a/drivers/net/ethernet/octeon/octeon_mgmt.c b/drivers/net/ethernet/octeon/octeon_mgmt.c
index 921729f9c85c..91a8a5d28037 100644
--- a/drivers/net/ethernet/octeon/octeon_mgmt.c
+++ b/drivers/net/ethernet/octeon/octeon_mgmt.c
@@ -46,17 +46,25 @@
46union mgmt_port_ring_entry { 46union mgmt_port_ring_entry {
47 u64 d64; 47 u64 d64;
48 struct { 48 struct {
49 u64 reserved_62_63:2; 49#define RING_ENTRY_CODE_DONE 0xf
50#define RING_ENTRY_CODE_MORE 0x10
51#ifdef __BIG_ENDIAN_BITFIELD
52 u64 reserved_62_63:2;
50 /* Length of the buffer/packet in bytes */ 53 /* Length of the buffer/packet in bytes */
51 u64 len:14; 54 u64 len:14;
52 /* For TX, signals that the packet should be timestamped */ 55 /* For TX, signals that the packet should be timestamped */
53 u64 tstamp:1; 56 u64 tstamp:1;
54 /* The RX error code */ 57 /* The RX error code */
55 u64 code:7; 58 u64 code:7;
56#define RING_ENTRY_CODE_DONE 0xf
57#define RING_ENTRY_CODE_MORE 0x10
58 /* Physical address of the buffer */ 59 /* Physical address of the buffer */
59 u64 addr:40; 60 u64 addr:40;
61#else
62 u64 addr:40;
63 u64 code:7;
64 u64 tstamp:1;
65 u64 len:14;
66 u64 reserved_62_63:2;
67#endif
60 } s; 68 } s;
61}; 69};
62 70
@@ -1141,10 +1149,13 @@ static int octeon_mgmt_open(struct net_device *netdev)
1141 /* For compensation state to lock. */ 1149 /* For compensation state to lock. */
1142 ndelay(1040 * NS_PER_PHY_CLK); 1150 ndelay(1040 * NS_PER_PHY_CLK);
1143 1151
1144 /* Some Ethernet switches cannot handle standard 1152 /* Default Interframe Gaps are too small. Recommended
1145 * Interframe Gap, increase to 16 bytes. 1153 * workaround is.
1154 *
1155 * AGL_GMX_TX_IFG[IFG1]=14
1156 * AGL_GMX_TX_IFG[IFG2]=10
1146 */ 1157 */
1147 cvmx_write_csr(CVMX_AGL_GMX_TX_IFG, 0x88); 1158 cvmx_write_csr(CVMX_AGL_GMX_TX_IFG, 0xae);
1148 } 1159 }
1149 1160
1150 octeon_mgmt_rx_fill_ring(netdev); 1161 octeon_mgmt_rx_fill_ring(netdev);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
index 43562c256379..6acf82b9f018 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
@@ -642,7 +642,7 @@ void qlcnic_fw_destroy_ctx(struct qlcnic_adapter *adapter)
642 qlcnic_83xx_config_intrpt(adapter, 0); 642 qlcnic_83xx_config_intrpt(adapter, 0);
643 } 643 }
644 /* Allow dma queues to drain after context reset */ 644 /* Allow dma queues to drain after context reset */
645 msleep(20); 645 mdelay(20);
646 } 646 }
647} 647}
648 648
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 5e3982fc5398..e29fe8dbd226 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -380,8 +380,9 @@ static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
380 .eesipr_value = 0x01ff009f, 380 .eesipr_value = 0x01ff009f,
381 381
382 .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO, 382 .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
383 .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RDE | 383 .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
384 EESR_RFRMER | EESR_TFE | EESR_TDE | EESR_ECI, 384 EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE |
385 EESR_ECI,
385 .tx_error_check = EESR_TWB | EESR_TABT | EESR_TDE | EESR_TFE, 386 .tx_error_check = EESR_TWB | EESR_TABT | EESR_TDE | EESR_TFE,
386 387
387 .apr = 1, 388 .apr = 1,
@@ -427,8 +428,9 @@ static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
427 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x01ff009f, 428 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x01ff009f,
428 429
429 .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO, 430 .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
430 .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RDE | 431 .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
431 EESR_RFRMER | EESR_TFE | EESR_TDE | EESR_ECI, 432 EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE |
433 EESR_ECI,
432 .tx_error_check = EESR_TWB | EESR_TABT | EESR_TDE | EESR_TFE, 434 .tx_error_check = EESR_TWB | EESR_TABT | EESR_TDE | EESR_TFE,
433 435
434 .apr = 1, 436 .apr = 1,
@@ -478,8 +480,9 @@ static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
478 .rmcr_value = 0x00000001, 480 .rmcr_value = 0x00000001,
479 481
480 .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO, 482 .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
481 .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RDE | 483 .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
482 EESR_RFRMER | EESR_TFE | EESR_TDE | EESR_ECI, 484 EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE |
485 EESR_ECI,
483 .tx_error_check = EESR_TWB | EESR_TABT | EESR_TDE | EESR_TFE, 486 .tx_error_check = EESR_TWB | EESR_TABT | EESR_TDE | EESR_TFE,
484 487
485 .apr = 1, 488 .apr = 1,
@@ -592,9 +595,9 @@ static struct sh_eth_cpu_data sh_eth_my_cpu_data_giga = {
592 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff, 595 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
593 596
594 .tx_check = EESR_TC1 | EESR_FTC, 597 .tx_check = EESR_TC1 | EESR_FTC,
595 .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | \ 598 .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
596 EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | \ 599 EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
597 EESR_ECI, 600 EESR_TDE | EESR_ECI,
598 .tx_error_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_TDE | \ 601 .tx_error_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_TDE | \
599 EESR_TFE, 602 EESR_TFE,
600 .fdr_value = 0x0000072f, 603 .fdr_value = 0x0000072f,
@@ -674,9 +677,9 @@ static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
674 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff, 677 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
675 678
676 .tx_check = EESR_TC1 | EESR_FTC, 679 .tx_check = EESR_TC1 | EESR_FTC,
677 .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | \ 680 .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
678 EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | \ 681 EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
679 EESR_ECI, 682 EESR_TDE | EESR_ECI,
680 .tx_error_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_TDE | \ 683 .tx_error_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_TDE | \
681 EESR_TFE, 684 EESR_TFE,
682 685
@@ -811,9 +814,9 @@ static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
811 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff, 814 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
812 815
813 .tx_check = EESR_TC1 | EESR_FTC, 816 .tx_check = EESR_TC1 | EESR_FTC,
814 .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | \ 817 .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
815 EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | \ 818 EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
816 EESR_ECI, 819 EESR_TDE | EESR_ECI,
817 .tx_error_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_TDE | \ 820 .tx_error_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_TDE | \
818 EESR_TFE, 821 EESR_TFE,
819 822
@@ -1549,11 +1552,12 @@ static void sh_eth_error(struct net_device *ndev, int intr_status)
1549 1552
1550ignore_link: 1553ignore_link:
1551 if (intr_status & EESR_TWB) { 1554 if (intr_status & EESR_TWB) {
1552 /* Write buck end. unused write back interrupt */ 1555 /* Unused write back interrupt */
1553 if (intr_status & EESR_TABT) /* Transmit Abort int */ 1556 if (intr_status & EESR_TABT) { /* Transmit Abort int */
1554 ndev->stats.tx_aborted_errors++; 1557 ndev->stats.tx_aborted_errors++;
1555 if (netif_msg_tx_err(mdp)) 1558 if (netif_msg_tx_err(mdp))
1556 dev_err(&ndev->dev, "Transmit Abort\n"); 1559 dev_err(&ndev->dev, "Transmit Abort\n");
1560 }
1557 } 1561 }
1558 1562
1559 if (intr_status & EESR_RABT) { 1563 if (intr_status & EESR_RABT) {
diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h
index 1ddc9f235bcb..62689a5823be 100644
--- a/drivers/net/ethernet/renesas/sh_eth.h
+++ b/drivers/net/ethernet/renesas/sh_eth.h
@@ -253,7 +253,7 @@ enum EESR_BIT {
253 253
254#define DEFAULT_TX_CHECK (EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | \ 254#define DEFAULT_TX_CHECK (EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | \
255 EESR_RTO) 255 EESR_RTO)
256#define DEFAULT_EESR_ERR_CHECK (EESR_TWB | EESR_TABT | EESR_RABT | \ 256#define DEFAULT_EESR_ERR_CHECK (EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE | \
257 EESR_RDE | EESR_RFRMER | EESR_ADE | \ 257 EESR_RDE | EESR_RFRMER | EESR_ADE | \
258 EESR_TFE | EESR_TDE | EESR_ECI) 258 EESR_TFE | EESR_TDE | EESR_ECI)
259#define DEFAULT_TX_ERROR_CHECK (EESR_TWB | EESR_TABT | EESR_ADE | EESR_TDE | \ 259#define DEFAULT_TX_ERROR_CHECK (EESR_TWB | EESR_TABT | EESR_ADE | EESR_TDE | \
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index 39e4cb39de29..4a14a940c65e 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -2139,7 +2139,7 @@ show_phy_type(struct device *dev, struct device_attribute *attr, char *buf)
2139 struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev)); 2139 struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
2140 return sprintf(buf, "%d\n", efx->phy_type); 2140 return sprintf(buf, "%d\n", efx->phy_type);
2141} 2141}
2142static DEVICE_ATTR(phy_type, 0644, show_phy_type, NULL); 2142static DEVICE_ATTR(phy_type, 0444, show_phy_type, NULL);
2143 2143
2144static int efx_register_netdev(struct efx_nic *efx) 2144static int efx_register_netdev(struct efx_nic *efx)
2145{ 2145{
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index 7788fbe44f0a..95176979b2d2 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -297,8 +297,8 @@ struct dma_features {
297#define MAC_RNABLE_RX 0x00000004 /* Receiver Enable */ 297#define MAC_RNABLE_RX 0x00000004 /* Receiver Enable */
298 298
299/* Default LPI timers */ 299/* Default LPI timers */
300#define STMMAC_DEFAULT_LIT_LS_TIMER 0x3E8 300#define STMMAC_DEFAULT_LIT_LS 0x3E8
301#define STMMAC_DEFAULT_TWT_LS_TIMER 0x0 301#define STMMAC_DEFAULT_TWT_LS 0x0
302 302
303#define STMMAC_CHAIN_MODE 0x1 303#define STMMAC_CHAIN_MODE 0x1
304#define STMMAC_RING_MODE 0x2 304#define STMMAC_RING_MODE 0x2
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index ee919ca8b8a0..e9eab29db7be 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -130,7 +130,7 @@ static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
130static int eee_timer = STMMAC_DEFAULT_LPI_TIMER; 130static int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
131module_param(eee_timer, int, S_IRUGO | S_IWUSR); 131module_param(eee_timer, int, S_IRUGO | S_IWUSR);
132MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec"); 132MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
133#define STMMAC_LPI_TIMER(x) (jiffies + msecs_to_jiffies(x)) 133#define STMMAC_LPI_T(x) (jiffies + msecs_to_jiffies(x))
134 134
135/* By default the driver will use the ring mode to manage tx and rx descriptors 135/* By default the driver will use the ring mode to manage tx and rx descriptors
136 * but passing this value so user can force to use the chain instead of the ring 136 * but passing this value so user can force to use the chain instead of the ring
@@ -288,7 +288,7 @@ static void stmmac_eee_ctrl_timer(unsigned long arg)
288 struct stmmac_priv *priv = (struct stmmac_priv *)arg; 288 struct stmmac_priv *priv = (struct stmmac_priv *)arg;
289 289
290 stmmac_enable_eee_mode(priv); 290 stmmac_enable_eee_mode(priv);
291 mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_TIMER(eee_timer)); 291 mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
292} 292}
293 293
294/** 294/**
@@ -304,22 +304,34 @@ bool stmmac_eee_init(struct stmmac_priv *priv)
304{ 304{
305 bool ret = false; 305 bool ret = false;
306 306
307 /* Using PCS we cannot dial with the phy registers at this stage
308 * so we do not support extra feature like EEE.
309 */
310 if ((priv->pcs == STMMAC_PCS_RGMII) || (priv->pcs == STMMAC_PCS_TBI) ||
311 (priv->pcs == STMMAC_PCS_RTBI))
312 goto out;
313
307 /* MAC core supports the EEE feature. */ 314 /* MAC core supports the EEE feature. */
308 if (priv->dma_cap.eee) { 315 if (priv->dma_cap.eee) {
309 /* Check if the PHY supports EEE */ 316 /* Check if the PHY supports EEE */
310 if (phy_init_eee(priv->phydev, 1)) 317 if (phy_init_eee(priv->phydev, 1))
311 goto out; 318 goto out;
312 319
313 priv->eee_active = 1; 320 if (!priv->eee_active) {
314 init_timer(&priv->eee_ctrl_timer); 321 priv->eee_active = 1;
315 priv->eee_ctrl_timer.function = stmmac_eee_ctrl_timer; 322 init_timer(&priv->eee_ctrl_timer);
316 priv->eee_ctrl_timer.data = (unsigned long)priv; 323 priv->eee_ctrl_timer.function = stmmac_eee_ctrl_timer;
317 priv->eee_ctrl_timer.expires = STMMAC_LPI_TIMER(eee_timer); 324 priv->eee_ctrl_timer.data = (unsigned long)priv;
318 add_timer(&priv->eee_ctrl_timer); 325 priv->eee_ctrl_timer.expires = STMMAC_LPI_T(eee_timer);
319 326 add_timer(&priv->eee_ctrl_timer);
320 priv->hw->mac->set_eee_timer(priv->ioaddr, 327
321 STMMAC_DEFAULT_LIT_LS_TIMER, 328 priv->hw->mac->set_eee_timer(priv->ioaddr,
322 priv->tx_lpi_timer); 329 STMMAC_DEFAULT_LIT_LS,
330 priv->tx_lpi_timer);
331 } else
332 /* Set HW EEE according to the speed */
333 priv->hw->mac->set_eee_pls(priv->ioaddr,
334 priv->phydev->link);
323 335
324 pr_info("stmmac: Energy-Efficient Ethernet initialized\n"); 336 pr_info("stmmac: Energy-Efficient Ethernet initialized\n");
325 337
@@ -329,20 +341,6 @@ out:
329 return ret; 341 return ret;
330} 342}
331 343
332/**
333 * stmmac_eee_adjust: adjust HW EEE according to the speed
334 * @priv: driver private structure
335 * Description:
336 * When the EEE has been already initialised we have to
337 * modify the PLS bit in the LPI ctrl & status reg according
338 * to the PHY link status. For this reason.
339 */
340static void stmmac_eee_adjust(struct stmmac_priv *priv)
341{
342 if (priv->eee_enabled)
343 priv->hw->mac->set_eee_pls(priv->ioaddr, priv->phydev->link);
344}
345
346/* stmmac_get_tx_hwtstamp: get HW TX timestamps 344/* stmmac_get_tx_hwtstamp: get HW TX timestamps
347 * @priv: driver private structure 345 * @priv: driver private structure
348 * @entry : descriptor index to be used. 346 * @entry : descriptor index to be used.
@@ -769,7 +767,10 @@ static void stmmac_adjust_link(struct net_device *dev)
769 if (new_state && netif_msg_link(priv)) 767 if (new_state && netif_msg_link(priv))
770 phy_print_status(phydev); 768 phy_print_status(phydev);
771 769
772 stmmac_eee_adjust(priv); 770 /* At this stage, it could be needed to setup the EEE or adjust some
771 * MAC related HW registers.
772 */
773 priv->eee_enabled = stmmac_eee_init(priv);
773 774
774 spin_unlock_irqrestore(&priv->lock, flags); 775 spin_unlock_irqrestore(&priv->lock, flags);
775 776
@@ -1277,7 +1278,7 @@ static void stmmac_tx_clean(struct stmmac_priv *priv)
1277 1278
1278 if ((priv->eee_enabled) && (!priv->tx_path_in_lpi_mode)) { 1279 if ((priv->eee_enabled) && (!priv->tx_path_in_lpi_mode)) {
1279 stmmac_enable_eee_mode(priv); 1280 stmmac_enable_eee_mode(priv);
1280 mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_TIMER(eee_timer)); 1281 mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
1281 } 1282 }
1282 spin_unlock(&priv->tx_lock); 1283 spin_unlock(&priv->tx_lock);
1283} 1284}
@@ -1671,14 +1672,9 @@ static int stmmac_open(struct net_device *dev)
1671 if (priv->phydev) 1672 if (priv->phydev)
1672 phy_start(priv->phydev); 1673 phy_start(priv->phydev);
1673 1674
1674 priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS_TIMER; 1675 priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS;
1675 1676
1676 /* Using PCS we cannot dial with the phy registers at this stage 1677 priv->eee_enabled = stmmac_eee_init(priv);
1677 * so we do not support extra feature like EEE.
1678 */
1679 if (priv->pcs != STMMAC_PCS_RGMII && priv->pcs != STMMAC_PCS_TBI &&
1680 priv->pcs != STMMAC_PCS_RTBI)
1681 priv->eee_enabled = stmmac_eee_init(priv);
1682 1678
1683 stmmac_init_tx_coalesce(priv); 1679 stmmac_init_tx_coalesce(priv);
1684 1680
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 21a5b291b4b3..d1a769f35f9d 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -1679,7 +1679,7 @@ static int cpsw_probe(struct platform_device *pdev)
1679 priv->rx_packet_max = max(rx_packet_max, 128); 1679 priv->rx_packet_max = max(rx_packet_max, 128);
1680 priv->cpts = devm_kzalloc(&pdev->dev, sizeof(struct cpts), GFP_KERNEL); 1680 priv->cpts = devm_kzalloc(&pdev->dev, sizeof(struct cpts), GFP_KERNEL);
1681 priv->irq_enabled = true; 1681 priv->irq_enabled = true;
1682 if (!ndev) { 1682 if (!priv->cpts) {
1683 pr_err("error allocating cpts\n"); 1683 pr_err("error allocating cpts\n");
1684 goto clean_ndev_ret; 1684 goto clean_ndev_ret;
1685 } 1685 }
@@ -1973,9 +1973,12 @@ static int cpsw_suspend(struct device *dev)
1973{ 1973{
1974 struct platform_device *pdev = to_platform_device(dev); 1974 struct platform_device *pdev = to_platform_device(dev);
1975 struct net_device *ndev = platform_get_drvdata(pdev); 1975 struct net_device *ndev = platform_get_drvdata(pdev);
1976 struct cpsw_priv *priv = netdev_priv(ndev);
1976 1977
1977 if (netif_running(ndev)) 1978 if (netif_running(ndev))
1978 cpsw_ndo_stop(ndev); 1979 cpsw_ndo_stop(ndev);
1980 soft_reset("sliver 0", &priv->slaves[0].sliver->soft_reset);
1981 soft_reset("sliver 1", &priv->slaves[1].sliver->soft_reset);
1979 pm_runtime_put_sync(&pdev->dev); 1982 pm_runtime_put_sync(&pdev->dev);
1980 1983
1981 return 0; 1984 return 0;
diff --git a/drivers/net/ethernet/ti/davinci_cpdma.c b/drivers/net/ethernet/ti/davinci_cpdma.c
index 49dfd592ac1e..053c84fd0853 100644
--- a/drivers/net/ethernet/ti/davinci_cpdma.c
+++ b/drivers/net/ethernet/ti/davinci_cpdma.c
@@ -705,6 +705,13 @@ int cpdma_chan_submit(struct cpdma_chan *chan, void *token, void *data,
705 } 705 }
706 706
707 buffer = dma_map_single(ctlr->dev, data, len, chan->dir); 707 buffer = dma_map_single(ctlr->dev, data, len, chan->dir);
708 ret = dma_mapping_error(ctlr->dev, buffer);
709 if (ret) {
710 cpdma_desc_free(ctlr->pool, desc, 1);
711 ret = -EINVAL;
712 goto unlock_ret;
713 }
714
708 mode = CPDMA_DESC_OWNER | CPDMA_DESC_SOP | CPDMA_DESC_EOP; 715 mode = CPDMA_DESC_OWNER | CPDMA_DESC_SOP | CPDMA_DESC_EOP;
709 cpdma_desc_to_port(chan, mode, directed); 716 cpdma_desc_to_port(chan, mode, directed);
710 717
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index ab2307b5d9a7..4dccead586be 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -285,7 +285,9 @@ int netvsc_recv_callback(struct hv_device *device_obj,
285 285
286 skb->protocol = eth_type_trans(skb, net); 286 skb->protocol = eth_type_trans(skb, net);
287 skb->ip_summed = CHECKSUM_NONE; 287 skb->ip_summed = CHECKSUM_NONE;
288 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), packet->vlan_tci); 288 if (packet->vlan_tci & VLAN_TAG_PRESENT)
289 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
290 packet->vlan_tci);
289 291
290 net->stats.rx_packets++; 292 net->stats.rx_packets++;
291 net->stats.rx_bytes += packet->total_data_buflen; 293 net->stats.rx_bytes += packet->total_data_buflen;
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index 59e9605de316..b6dd6a75919a 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -524,8 +524,10 @@ static int zerocopy_sg_from_iovec(struct sk_buff *skb, const struct iovec *from,
524 return -EMSGSIZE; 524 return -EMSGSIZE;
525 num_pages = get_user_pages_fast(base, size, 0, &page[i]); 525 num_pages = get_user_pages_fast(base, size, 0, &page[i]);
526 if (num_pages != size) { 526 if (num_pages != size) {
527 for (i = 0; i < num_pages; i++) 527 int j;
528 put_page(page[i]); 528
529 for (j = 0; j < num_pages; j++)
530 put_page(page[i + j]);
529 return -EFAULT; 531 return -EFAULT;
530 } 532 }
531 truesize = size * PAGE_SIZE; 533 truesize = size * PAGE_SIZE;
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index bfa9bb48e42d..9c61f8734a40 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -1010,8 +1010,10 @@ static int zerocopy_sg_from_iovec(struct sk_buff *skb, const struct iovec *from,
1010 return -EMSGSIZE; 1010 return -EMSGSIZE;
1011 num_pages = get_user_pages_fast(base, size, 0, &page[i]); 1011 num_pages = get_user_pages_fast(base, size, 0, &page[i]);
1012 if (num_pages != size) { 1012 if (num_pages != size) {
1013 for (i = 0; i < num_pages; i++) 1013 int j;
1014 put_page(page[i]); 1014
1015 for (j = 0; j < num_pages; j++)
1016 put_page(page[i + j]);
1015 return -EFAULT; 1017 return -EFAULT;
1016 } 1018 }
1017 truesize = size * PAGE_SIZE; 1019 truesize = size * PAGE_SIZE;
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index d095d0d3056b..56459215a22b 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -590,7 +590,13 @@ static const struct usb_device_id products[] = {
590 {QMI_GOBI1K_DEVICE(0x03f0, 0x1f1d)}, /* HP un2400 Gobi Modem Device */ 590 {QMI_GOBI1K_DEVICE(0x03f0, 0x1f1d)}, /* HP un2400 Gobi Modem Device */
591 {QMI_GOBI1K_DEVICE(0x04da, 0x250d)}, /* Panasonic Gobi Modem device */ 591 {QMI_GOBI1K_DEVICE(0x04da, 0x250d)}, /* Panasonic Gobi Modem device */
592 {QMI_GOBI1K_DEVICE(0x413c, 0x8172)}, /* Dell Gobi Modem device */ 592 {QMI_GOBI1K_DEVICE(0x413c, 0x8172)}, /* Dell Gobi Modem device */
593 {QMI_GOBI1K_DEVICE(0x1410, 0xa001)}, /* Novatel Gobi Modem device */ 593 {QMI_GOBI1K_DEVICE(0x1410, 0xa001)}, /* Novatel/Verizon USB-1000 */
594 {QMI_GOBI1K_DEVICE(0x1410, 0xa002)}, /* Novatel Gobi Modem device */
595 {QMI_GOBI1K_DEVICE(0x1410, 0xa003)}, /* Novatel Gobi Modem device */
596 {QMI_GOBI1K_DEVICE(0x1410, 0xa004)}, /* Novatel Gobi Modem device */
597 {QMI_GOBI1K_DEVICE(0x1410, 0xa005)}, /* Novatel Gobi Modem device */
598 {QMI_GOBI1K_DEVICE(0x1410, 0xa006)}, /* Novatel Gobi Modem device */
599 {QMI_GOBI1K_DEVICE(0x1410, 0xa007)}, /* Novatel Gobi Modem device */
594 {QMI_GOBI1K_DEVICE(0x0b05, 0x1776)}, /* Asus Gobi Modem device */ 600 {QMI_GOBI1K_DEVICE(0x0b05, 0x1776)}, /* Asus Gobi Modem device */
595 {QMI_GOBI1K_DEVICE(0x19d2, 0xfff3)}, /* ONDA Gobi Modem device */ 601 {QMI_GOBI1K_DEVICE(0x19d2, 0xfff3)}, /* ONDA Gobi Modem device */
596 {QMI_GOBI1K_DEVICE(0x05c6, 0x9001)}, /* Generic Gobi Modem device */ 602 {QMI_GOBI1K_DEVICE(0x05c6, 0x9001)}, /* Generic Gobi Modem device */
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 3b1d2ee7156b..57325f356d4f 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -565,18 +565,22 @@ skip:
565 565
566/* Watch incoming packets to learn mapping between Ethernet address 566/* Watch incoming packets to learn mapping between Ethernet address
567 * and Tunnel endpoint. 567 * and Tunnel endpoint.
568 * Return true if packet is bogus and should be droppped.
568 */ 569 */
569static void vxlan_snoop(struct net_device *dev, 570static bool vxlan_snoop(struct net_device *dev,
570 __be32 src_ip, const u8 *src_mac) 571 __be32 src_ip, const u8 *src_mac)
571{ 572{
572 struct vxlan_dev *vxlan = netdev_priv(dev); 573 struct vxlan_dev *vxlan = netdev_priv(dev);
573 struct vxlan_fdb *f; 574 struct vxlan_fdb *f;
574 int err;
575 575
576 f = vxlan_find_mac(vxlan, src_mac); 576 f = vxlan_find_mac(vxlan, src_mac);
577 if (likely(f)) { 577 if (likely(f)) {
578 if (likely(f->remote.remote_ip == src_ip)) 578 if (likely(f->remote.remote_ip == src_ip))
579 return; 579 return false;
580
581 /* Don't migrate static entries, drop packets */
582 if (f->state & NUD_NOARP)
583 return true;
580 584
581 if (net_ratelimit()) 585 if (net_ratelimit())
582 netdev_info(dev, 586 netdev_info(dev,
@@ -588,14 +592,19 @@ static void vxlan_snoop(struct net_device *dev,
588 } else { 592 } else {
589 /* learned new entry */ 593 /* learned new entry */
590 spin_lock(&vxlan->hash_lock); 594 spin_lock(&vxlan->hash_lock);
591 err = vxlan_fdb_create(vxlan, src_mac, src_ip, 595
592 NUD_REACHABLE, 596 /* close off race between vxlan_flush and incoming packets */
593 NLM_F_EXCL|NLM_F_CREATE, 597 if (netif_running(dev))
594 vxlan->dst_port, 598 vxlan_fdb_create(vxlan, src_mac, src_ip,
595 vxlan->default_dst.remote_vni, 599 NUD_REACHABLE,
596 0, NTF_SELF); 600 NLM_F_EXCL|NLM_F_CREATE,
601 vxlan->dst_port,
602 vxlan->default_dst.remote_vni,
603 0, NTF_SELF);
597 spin_unlock(&vxlan->hash_lock); 604 spin_unlock(&vxlan->hash_lock);
598 } 605 }
606
607 return false;
599} 608}
600 609
601 610
@@ -727,8 +736,9 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
727 vxlan->dev->dev_addr) == 0) 736 vxlan->dev->dev_addr) == 0)
728 goto drop; 737 goto drop;
729 738
730 if (vxlan->flags & VXLAN_F_LEARN) 739 if ((vxlan->flags & VXLAN_F_LEARN) &&
731 vxlan_snoop(skb->dev, oip->saddr, eth_hdr(skb)->h_source); 740 vxlan_snoop(skb->dev, oip->saddr, eth_hdr(skb)->h_source))
741 goto drop;
732 742
733 __skb_tunnel_rx(skb, vxlan->dev); 743 __skb_tunnel_rx(skb, vxlan->dev);
734 skb_reset_network_header(skb); 744 skb_reset_network_header(skb);
@@ -1151,9 +1161,11 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
1151 struct sk_buff *skb1; 1161 struct sk_buff *skb1;
1152 1162
1153 skb1 = skb_clone(skb, GFP_ATOMIC); 1163 skb1 = skb_clone(skb, GFP_ATOMIC);
1154 rc1 = vxlan_xmit_one(skb1, dev, rdst, did_rsc); 1164 if (skb1) {
1155 if (rc == NETDEV_TX_OK) 1165 rc1 = vxlan_xmit_one(skb1, dev, rdst, did_rsc);
1156 rc = rc1; 1166 if (rc == NETDEV_TX_OK)
1167 rc = rc1;
1168 }
1157 } 1169 }
1158 1170
1159 rc1 = vxlan_xmit_one(skb, dev, rdst0, did_rsc); 1171 rc1 = vxlan_xmit_one(skb, dev, rdst0, did_rsc);
diff --git a/drivers/net/wan/dlci.c b/drivers/net/wan/dlci.c
index 147614ed86aa..6a8a382c5f4c 100644
--- a/drivers/net/wan/dlci.c
+++ b/drivers/net/wan/dlci.c
@@ -384,21 +384,37 @@ static int dlci_del(struct dlci_add *dlci)
384 struct frad_local *flp; 384 struct frad_local *flp;
385 struct net_device *master, *slave; 385 struct net_device *master, *slave;
386 int err; 386 int err;
387 bool found = false;
388
389 rtnl_lock();
387 390
388 /* validate slave device */ 391 /* validate slave device */
389 master = __dev_get_by_name(&init_net, dlci->devname); 392 master = __dev_get_by_name(&init_net, dlci->devname);
390 if (!master) 393 if (!master) {
391 return -ENODEV; 394 err = -ENODEV;
395 goto out;
396 }
397
398 list_for_each_entry(dlp, &dlci_devs, list) {
399 if (dlp->master == master) {
400 found = true;
401 break;
402 }
403 }
404 if (!found) {
405 err = -ENODEV;
406 goto out;
407 }
392 408
393 if (netif_running(master)) { 409 if (netif_running(master)) {
394 return -EBUSY; 410 err = -EBUSY;
411 goto out;
395 } 412 }
396 413
397 dlp = netdev_priv(master); 414 dlp = netdev_priv(master);
398 slave = dlp->slave; 415 slave = dlp->slave;
399 flp = netdev_priv(slave); 416 flp = netdev_priv(slave);
400 417
401 rtnl_lock();
402 err = (*flp->deassoc)(slave, master); 418 err = (*flp->deassoc)(slave, master);
403 if (!err) { 419 if (!err) {
404 list_del(&dlp->list); 420 list_del(&dlp->list);
@@ -407,8 +423,8 @@ static int dlci_del(struct dlci_add *dlci)
407 423
408 dev_put(slave); 424 dev_put(slave);
409 } 425 }
426out:
410 rtnl_unlock(); 427 rtnl_unlock();
411
412 return err; 428 return err;
413} 429}
414 430
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
index 0743a47cef8f..62f1b7636c92 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
@@ -1174,7 +1174,7 @@ static int ath9k_htc_config(struct ieee80211_hw *hw, u32 changed)
1174 mutex_lock(&priv->htc_pm_lock); 1174 mutex_lock(&priv->htc_pm_lock);
1175 1175
1176 priv->ps_idle = !!(conf->flags & IEEE80211_CONF_IDLE); 1176 priv->ps_idle = !!(conf->flags & IEEE80211_CONF_IDLE);
1177 if (priv->ps_idle) 1177 if (!priv->ps_idle)
1178 chip_reset = true; 1178 chip_reset = true;
1179 1179
1180 mutex_unlock(&priv->htc_pm_lock); 1180 mutex_unlock(&priv->htc_pm_lock);
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index 1c9b1bac8b0d..83ab6be3fe6d 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -1570,6 +1570,8 @@ void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
1570 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH) 1570 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
1571 return; 1571 return;
1572 1572
1573 rcu_read_lock();
1574
1573 ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac, list); 1575 ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac, list);
1574 last_ac = list_entry(txq->axq_acq.prev, struct ath_atx_ac, list); 1576 last_ac = list_entry(txq->axq_acq.prev, struct ath_atx_ac, list);
1575 1577
@@ -1608,8 +1610,10 @@ void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
1608 1610
1609 if (ac == last_ac || 1611 if (ac == last_ac ||
1610 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH) 1612 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
1611 return; 1613 break;
1612 } 1614 }
1615
1616 rcu_read_unlock();
1613} 1617}
1614 1618
1615/***********/ 1619/***********/
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
index b98f2235978e..2c593570497c 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
@@ -930,6 +930,10 @@ fail:
930 brcmf_fws_del_interface(ifp); 930 brcmf_fws_del_interface(ifp);
931 brcmf_fws_deinit(drvr); 931 brcmf_fws_deinit(drvr);
932 } 932 }
933 if (drvr->iflist[0]) {
934 free_netdev(ifp->ndev);
935 drvr->iflist[0] = NULL;
936 }
933 if (p2p_ifp) { 937 if (p2p_ifp) {
934 free_netdev(p2p_ifp->ndev); 938 free_netdev(p2p_ifp->ndev);
935 drvr->iflist[1] = NULL; 939 drvr->iflist[1] = NULL;
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/main.c b/drivers/net/wireless/brcm80211/brcmsmac/main.c
index 28e7aeedd184..9fd6f2fef11b 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/main.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/main.c
@@ -3074,21 +3074,8 @@ static void brcms_b_antsel_set(struct brcms_hardware *wlc_hw, u32 antsel_avail)
3074 */ 3074 */
3075static bool brcms_c_ps_allowed(struct brcms_c_info *wlc) 3075static bool brcms_c_ps_allowed(struct brcms_c_info *wlc)
3076{ 3076{
3077 /* disallow PS when one of the following global conditions meets */ 3077 /* not supporting PS so always return false for now */
3078 if (!wlc->pub->associated) 3078 return false;
3079 return false;
3080
3081 /* disallow PS when one of these meets when not scanning */
3082 if (wlc->filter_flags & FIF_PROMISC_IN_BSS)
3083 return false;
3084
3085 if (wlc->bsscfg->type == BRCMS_TYPE_AP)
3086 return false;
3087
3088 if (wlc->bsscfg->type == BRCMS_TYPE_ADHOC)
3089 return false;
3090
3091 return true;
3092} 3079}
3093 3080
3094static void brcms_c_statsupd(struct brcms_c_info *wlc) 3081static void brcms_c_statsupd(struct brcms_c_info *wlc)
diff --git a/drivers/net/wireless/iwlegacy/3945-rs.c b/drivers/net/wireless/iwlegacy/3945-rs.c
index c9f197d9ca1e..fe31590a51b2 100644
--- a/drivers/net/wireless/iwlegacy/3945-rs.c
+++ b/drivers/net/wireless/iwlegacy/3945-rs.c
@@ -816,6 +816,7 @@ out:
816 rs_sta->last_txrate_idx = idx; 816 rs_sta->last_txrate_idx = idx;
817 info->control.rates[0].idx = rs_sta->last_txrate_idx; 817 info->control.rates[0].idx = rs_sta->last_txrate_idx;
818 } 818 }
819 info->control.rates[0].count = 1;
819 820
820 D_RATE("leave: %d\n", idx); 821 D_RATE("leave: %d\n", idx);
821} 822}
diff --git a/drivers/net/wireless/iwlegacy/4965-rs.c b/drivers/net/wireless/iwlegacy/4965-rs.c
index 1fc0b227e120..ed3c42a63a43 100644
--- a/drivers/net/wireless/iwlegacy/4965-rs.c
+++ b/drivers/net/wireless/iwlegacy/4965-rs.c
@@ -2268,7 +2268,7 @@ il4965_rs_get_rate(void *il_r, struct ieee80211_sta *sta, void *il_sta,
2268 info->control.rates[0].flags = 0; 2268 info->control.rates[0].flags = 0;
2269 } 2269 }
2270 info->control.rates[0].idx = rate_idx; 2270 info->control.rates[0].idx = rate_idx;
2271 2271 info->control.rates[0].count = 1;
2272} 2272}
2273 2273
2274static void * 2274static void *
diff --git a/drivers/net/wireless/iwlwifi/dvm/rs.c b/drivers/net/wireless/iwlwifi/dvm/rs.c
index 907bd6e50aad..10fbb176cc8e 100644
--- a/drivers/net/wireless/iwlwifi/dvm/rs.c
+++ b/drivers/net/wireless/iwlwifi/dvm/rs.c
@@ -2799,7 +2799,7 @@ static void rs_get_rate(void *priv_r, struct ieee80211_sta *sta, void *priv_sta,
2799 info->control.rates[0].flags = 0; 2799 info->control.rates[0].flags = 0;
2800 } 2800 }
2801 info->control.rates[0].idx = rate_idx; 2801 info->control.rates[0].idx = rate_idx;
2802 2802 info->control.rates[0].count = 1;
2803} 2803}
2804 2804
2805static void *rs_alloc_sta(void *priv_rate, struct ieee80211_sta *sta, 2805static void *rs_alloc_sta(void *priv_rate, struct ieee80211_sta *sta,
diff --git a/drivers/net/wireless/iwlwifi/dvm/rxon.c b/drivers/net/wireless/iwlwifi/dvm/rxon.c
index 707446fa00bd..cd1ad0019185 100644
--- a/drivers/net/wireless/iwlwifi/dvm/rxon.c
+++ b/drivers/net/wireless/iwlwifi/dvm/rxon.c
@@ -1378,7 +1378,7 @@ static void iwlagn_chain_noise_reset(struct iwl_priv *priv)
1378 struct iwl_chain_noise_data *data = &priv->chain_noise_data; 1378 struct iwl_chain_noise_data *data = &priv->chain_noise_data;
1379 int ret; 1379 int ret;
1380 1380
1381 if (!(priv->calib_disabled & IWL_CHAIN_NOISE_CALIB_DISABLED)) 1381 if (priv->calib_disabled & IWL_CHAIN_NOISE_CALIB_DISABLED)
1382 return; 1382 return;
1383 1383
1384 if ((data->state == IWL_CHAIN_NOISE_ALIVE) && 1384 if ((data->state == IWL_CHAIN_NOISE_ALIVE) &&
diff --git a/drivers/net/wireless/iwlwifi/iwl-drv.c b/drivers/net/wireless/iwlwifi/iwl-drv.c
index 39aad9893e0b..40fed1f511e2 100644
--- a/drivers/net/wireless/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/iwlwifi/iwl-drv.c
@@ -1000,10 +1000,12 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
1000 */ 1000 */
1001 if (load_module) { 1001 if (load_module) {
1002 err = request_module("%s", op->name); 1002 err = request_module("%s", op->name);
1003#ifdef CONFIG_IWLWIFI_OPMODE_MODULAR
1003 if (err) 1004 if (err)
1004 IWL_ERR(drv, 1005 IWL_ERR(drv,
1005 "failed to load module %s (error %d), is dynamic loading enabled?\n", 1006 "failed to load module %s (error %d), is dynamic loading enabled?\n",
1006 op->name, err); 1007 op->name, err);
1008#endif
1007 } 1009 }
1008 return; 1010 return;
1009 1011
diff --git a/drivers/net/wireless/iwlwifi/mvm/rs.c b/drivers/net/wireless/iwlwifi/mvm/rs.c
index 55334d542e26..b99fe3163866 100644
--- a/drivers/net/wireless/iwlwifi/mvm/rs.c
+++ b/drivers/net/wireless/iwlwifi/mvm/rs.c
@@ -2546,6 +2546,7 @@ static void rs_get_rate(void *mvm_r, struct ieee80211_sta *sta, void *mvm_sta,
2546 info->control.rates[0].flags = 0; 2546 info->control.rates[0].flags = 0;
2547 } 2547 }
2548 info->control.rates[0].idx = rate_idx; 2548 info->control.rates[0].idx = rate_idx;
2549 info->control.rates[0].count = 1;
2549} 2550}
2550 2551
2551static void *rs_alloc_sta(void *mvm_rate, struct ieee80211_sta *sta, 2552static void *rs_alloc_sta(void *mvm_rate, struct ieee80211_sta *sta,
diff --git a/drivers/net/wireless/iwlwifi/mvm/tx.c b/drivers/net/wireless/iwlwifi/mvm/tx.c
index f212f16502ff..48c1891e3df6 100644
--- a/drivers/net/wireless/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/iwlwifi/mvm/tx.c
@@ -180,7 +180,8 @@ static void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm,
180 tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_STA_RATE); 180 tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_STA_RATE);
181 return; 181 return;
182 } else if (ieee80211_is_back_req(fc)) { 182 } else if (ieee80211_is_back_req(fc)) {
183 tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_STA_RATE); 183 tx_cmd->tx_flags |=
184 cpu_to_le32(TX_CMD_FLG_ACK | TX_CMD_FLG_BAR);
184 } 185 }
185 186
186 /* HT rate doesn't make sense for a non data frame */ 187 /* HT rate doesn't make sense for a non data frame */
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c
index b52d70c75e1a..72f32e5caa4d 100644
--- a/drivers/net/wireless/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/rt2x00/rt2800lib.c
@@ -3027,19 +3027,26 @@ static void rt2800_config_txpower(struct rt2x00_dev *rt2x00dev,
3027 * TODO: we do not use +6 dBm option to do not increase power beyond 3027 * TODO: we do not use +6 dBm option to do not increase power beyond
3028 * regulatory limit, however this could be utilized for devices with 3028 * regulatory limit, however this could be utilized for devices with
3029 * CAPABILITY_POWER_LIMIT. 3029 * CAPABILITY_POWER_LIMIT.
3030 *
3031 * TODO: add different temperature compensation code for RT3290 & RT5390
3032 * to allow to use BBP_R1 for those chips.
3030 */ 3033 */
3031 rt2800_bbp_read(rt2x00dev, 1, &r1); 3034 if (!rt2x00_rt(rt2x00dev, RT3290) &&
3032 if (delta <= -12) { 3035 !rt2x00_rt(rt2x00dev, RT5390)) {
3033 power_ctrl = 2; 3036 rt2800_bbp_read(rt2x00dev, 1, &r1);
3034 delta += 12; 3037 if (delta <= -12) {
3035 } else if (delta <= -6) { 3038 power_ctrl = 2;
3036 power_ctrl = 1; 3039 delta += 12;
3037 delta += 6; 3040 } else if (delta <= -6) {
3038 } else { 3041 power_ctrl = 1;
3039 power_ctrl = 0; 3042 delta += 6;
3043 } else {
3044 power_ctrl = 0;
3045 }
3046 rt2x00_set_field8(&r1, BBP1_TX_POWER_CTRL, power_ctrl);
3047 rt2800_bbp_write(rt2x00dev, 1, r1);
3040 } 3048 }
3041 rt2x00_set_field8(&r1, BBP1_TX_POWER_CTRL, power_ctrl); 3049
3042 rt2800_bbp_write(rt2x00dev, 1, r1);
3043 offset = TX_PWR_CFG_0; 3050 offset = TX_PWR_CFG_0;
3044 3051
3045 for (i = 0; i < EEPROM_TXPOWER_BYRATE_SIZE; i += 2) { 3052 for (i = 0; i < EEPROM_TXPOWER_BYRATE_SIZE; i += 2) {
diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h
index 52bd03b38962..637fa71de0c7 100644
--- a/include/linux/if_vlan.h
+++ b/include/linux/if_vlan.h
@@ -44,7 +44,7 @@ struct vlan_hdr {
44 * struct vlan_ethhdr - vlan ethernet header (ethhdr + vlan_hdr) 44 * struct vlan_ethhdr - vlan ethernet header (ethhdr + vlan_hdr)
45 * @h_dest: destination ethernet address 45 * @h_dest: destination ethernet address
46 * @h_source: source ethernet address 46 * @h_source: source ethernet address
47 * @h_vlan_proto: ethernet protocol (always 0x8100) 47 * @h_vlan_proto: ethernet protocol
48 * @h_vlan_TCI: priority and VLAN ID 48 * @h_vlan_TCI: priority and VLAN ID
49 * @h_vlan_encapsulated_proto: packet type ID or len 49 * @h_vlan_encapsulated_proto: packet type ID or len
50 */ 50 */
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 60584b185a0c..96e4c21e15e0 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -1695,6 +1695,7 @@ extern int init_dummy_netdev(struct net_device *dev);
1695extern struct net_device *dev_get_by_index(struct net *net, int ifindex); 1695extern struct net_device *dev_get_by_index(struct net *net, int ifindex);
1696extern struct net_device *__dev_get_by_index(struct net *net, int ifindex); 1696extern struct net_device *__dev_get_by_index(struct net *net, int ifindex);
1697extern struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex); 1697extern struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex);
1698extern int netdev_get_name(struct net *net, char *name, int ifindex);
1698extern int dev_restart(struct net_device *dev); 1699extern int dev_restart(struct net_device *dev);
1699#ifdef CONFIG_NETPOLL_TRAP 1700#ifdef CONFIG_NETPOLL_TRAP
1700extern int netpoll_trap(void); 1701extern int netpoll_trap(void);
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 9c676eae3968..dec1748cd002 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -627,6 +627,7 @@ static inline struct rtable *skb_rtable(const struct sk_buff *skb)
627} 627}
628 628
629extern void kfree_skb(struct sk_buff *skb); 629extern void kfree_skb(struct sk_buff *skb);
630extern void kfree_skb_list(struct sk_buff *segs);
630extern void skb_tx_error(struct sk_buff *skb); 631extern void skb_tx_error(struct sk_buff *skb);
631extern void consume_skb(struct sk_buff *skb); 632extern void consume_skb(struct sk_buff *skb);
632extern void __kfree_skb(struct sk_buff *skb); 633extern void __kfree_skb(struct sk_buff *skb);
diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild
index ab5d4992e568..bdc6e87ff3eb 100644
--- a/include/uapi/linux/Kbuild
+++ b/include/uapi/linux/Kbuild
@@ -261,6 +261,7 @@ header-y += net_dropmon.h
261header-y += net_tstamp.h 261header-y += net_tstamp.h
262header-y += netconf.h 262header-y += netconf.h
263header-y += netdevice.h 263header-y += netdevice.h
264header-y += netlink_diag.h
264header-y += netfilter.h 265header-y += netfilter.h
265header-y += netfilter_arp.h 266header-y += netfilter_arp.h
266header-y += netfilter_bridge.h 267header-y += netfilter_bridge.h
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index d817c932d634..ace5e55fe5a3 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -341,7 +341,6 @@ static void hci_init1_req(struct hci_request *req, unsigned long opt)
341 341
342static void bredr_setup(struct hci_request *req) 342static void bredr_setup(struct hci_request *req)
343{ 343{
344 struct hci_cp_delete_stored_link_key cp;
345 __le16 param; 344 __le16 param;
346 __u8 flt_type; 345 __u8 flt_type;
347 346
@@ -365,10 +364,6 @@ static void bredr_setup(struct hci_request *req)
365 param = __constant_cpu_to_le16(0x7d00); 364 param = __constant_cpu_to_le16(0x7d00);
366 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param); 365 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
367 366
368 bacpy(&cp.bdaddr, BDADDR_ANY);
369 cp.delete_all = 0x01;
370 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
371
372 /* Read page scan parameters */ 367 /* Read page scan parameters */
373 if (req->hdev->hci_ver > BLUETOOTH_VER_1_1) { 368 if (req->hdev->hci_ver > BLUETOOTH_VER_1_1) {
374 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL); 369 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
@@ -602,6 +597,16 @@ static void hci_init3_req(struct hci_request *req, unsigned long opt)
602 struct hci_dev *hdev = req->hdev; 597 struct hci_dev *hdev = req->hdev;
603 u8 p; 598 u8 p;
604 599
600 /* Only send HCI_Delete_Stored_Link_Key if it is supported */
601 if (hdev->commands[6] & 0x80) {
602 struct hci_cp_delete_stored_link_key cp;
603
604 bacpy(&cp.bdaddr, BDADDR_ANY);
605 cp.delete_all = 0x01;
606 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
607 sizeof(cp), &cp);
608 }
609
605 if (hdev->commands[5] & 0x10) 610 if (hdev->commands[5] & 0x10)
606 hci_setup_link_policy(req); 611 hci_setup_link_policy(req);
607 612
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index 24bee07ee4ce..68843a28a7af 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -2852,6 +2852,9 @@ static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
2852 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u", 2852 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
2853 conn, code, ident, dlen); 2853 conn, code, ident, dlen);
2854 2854
2855 if (conn->mtu < L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE)
2856 return NULL;
2857
2855 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen; 2858 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2856 count = min_t(unsigned int, conn->mtu, len); 2859 count = min_t(unsigned int, conn->mtu, len);
2857 2860
@@ -4330,7 +4333,7 @@ static inline int l2cap_information_rsp(struct l2cap_conn *conn,
4330 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data; 4333 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
4331 u16 type, result; 4334 u16 type, result;
4332 4335
4333 if (cmd_len != sizeof(*rsp)) 4336 if (cmd_len < sizeof(*rsp))
4334 return -EPROTO; 4337 return -EPROTO;
4335 4338
4336 type = __le16_to_cpu(rsp->type); 4339 type = __le16_to_cpu(rsp->type);
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index 81f2389f78eb..d6448e35e027 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -465,8 +465,9 @@ static struct sk_buff *br_ip6_multicast_alloc_query(struct net_bridge *br,
465 skb_set_transport_header(skb, skb->len); 465 skb_set_transport_header(skb, skb->len);
466 mldq = (struct mld_msg *) icmp6_hdr(skb); 466 mldq = (struct mld_msg *) icmp6_hdr(skb);
467 467
468 interval = ipv6_addr_any(group) ? br->multicast_last_member_interval : 468 interval = ipv6_addr_any(group) ?
469 br->multicast_query_response_interval; 469 br->multicast_query_response_interval :
470 br->multicast_last_member_interval;
470 471
471 mldq->mld_type = ICMPV6_MGM_QUERY; 472 mldq->mld_type = ICMPV6_MGM_QUERY;
472 mldq->mld_code = 0; 473 mldq->mld_code = 0;
diff --git a/net/core/dev.c b/net/core/dev.c
index fc1e289397f5..faebb398fb46 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -792,6 +792,40 @@ struct net_device *dev_get_by_index(struct net *net, int ifindex)
792EXPORT_SYMBOL(dev_get_by_index); 792EXPORT_SYMBOL(dev_get_by_index);
793 793
794/** 794/**
795 * netdev_get_name - get a netdevice name, knowing its ifindex.
796 * @net: network namespace
797 * @name: a pointer to the buffer where the name will be stored.
798 * @ifindex: the ifindex of the interface to get the name from.
799 *
800 * The use of raw_seqcount_begin() and cond_resched() before
801 * retrying is required as we want to give the writers a chance
802 * to complete when CONFIG_PREEMPT is not set.
803 */
804int netdev_get_name(struct net *net, char *name, int ifindex)
805{
806 struct net_device *dev;
807 unsigned int seq;
808
809retry:
810 seq = raw_seqcount_begin(&devnet_rename_seq);
811 rcu_read_lock();
812 dev = dev_get_by_index_rcu(net, ifindex);
813 if (!dev) {
814 rcu_read_unlock();
815 return -ENODEV;
816 }
817
818 strcpy(name, dev->name);
819 rcu_read_unlock();
820 if (read_seqcount_retry(&devnet_rename_seq, seq)) {
821 cond_resched();
822 goto retry;
823 }
824
825 return 0;
826}
827
828/**
795 * dev_getbyhwaddr_rcu - find a device by its hardware address 829 * dev_getbyhwaddr_rcu - find a device by its hardware address
796 * @net: the applicable net namespace 830 * @net: the applicable net namespace
797 * @type: media type of device 831 * @type: media type of device
diff --git a/net/core/dev_ioctl.c b/net/core/dev_ioctl.c
index 6cc0481faade..5b7d0e1d0664 100644
--- a/net/core/dev_ioctl.c
+++ b/net/core/dev_ioctl.c
@@ -19,9 +19,8 @@
19 19
20static int dev_ifname(struct net *net, struct ifreq __user *arg) 20static int dev_ifname(struct net *net, struct ifreq __user *arg)
21{ 21{
22 struct net_device *dev;
23 struct ifreq ifr; 22 struct ifreq ifr;
24 unsigned seq; 23 int error;
25 24
26 /* 25 /*
27 * Fetch the caller's info block. 26 * Fetch the caller's info block.
@@ -30,19 +29,9 @@ static int dev_ifname(struct net *net, struct ifreq __user *arg)
30 if (copy_from_user(&ifr, arg, sizeof(struct ifreq))) 29 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
31 return -EFAULT; 30 return -EFAULT;
32 31
33retry: 32 error = netdev_get_name(net, ifr.ifr_name, ifr.ifr_ifindex);
34 seq = read_seqcount_begin(&devnet_rename_seq); 33 if (error)
35 rcu_read_lock(); 34 return error;
36 dev = dev_get_by_index_rcu(net, ifr.ifr_ifindex);
37 if (!dev) {
38 rcu_read_unlock();
39 return -ENODEV;
40 }
41
42 strcpy(ifr.ifr_name, dev->name);
43 rcu_read_unlock();
44 if (read_seqcount_retry(&devnet_rename_seq, seq))
45 goto retry;
46 35
47 if (copy_to_user(arg, &ifr, sizeof(struct ifreq))) 36 if (copy_to_user(arg, &ifr, sizeof(struct ifreq)))
48 return -EFAULT; 37 return -EFAULT;
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index 22efdaa76ebf..ce91766eeca9 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -60,10 +60,10 @@ static const char netdev_features_strings[NETDEV_FEATURE_COUNT][ETH_GSTRING_LEN]
60 [NETIF_F_IPV6_CSUM_BIT] = "tx-checksum-ipv6", 60 [NETIF_F_IPV6_CSUM_BIT] = "tx-checksum-ipv6",
61 [NETIF_F_HIGHDMA_BIT] = "highdma", 61 [NETIF_F_HIGHDMA_BIT] = "highdma",
62 [NETIF_F_FRAGLIST_BIT] = "tx-scatter-gather-fraglist", 62 [NETIF_F_FRAGLIST_BIT] = "tx-scatter-gather-fraglist",
63 [NETIF_F_HW_VLAN_CTAG_TX_BIT] = "tx-vlan-ctag-hw-insert", 63 [NETIF_F_HW_VLAN_CTAG_TX_BIT] = "tx-vlan-hw-insert",
64 64
65 [NETIF_F_HW_VLAN_CTAG_RX_BIT] = "rx-vlan-ctag-hw-parse", 65 [NETIF_F_HW_VLAN_CTAG_RX_BIT] = "rx-vlan-hw-parse",
66 [NETIF_F_HW_VLAN_CTAG_FILTER_BIT] = "rx-vlan-ctag-filter", 66 [NETIF_F_HW_VLAN_CTAG_FILTER_BIT] = "rx-vlan-filter",
67 [NETIF_F_HW_VLAN_STAG_TX_BIT] = "tx-vlan-stag-hw-insert", 67 [NETIF_F_HW_VLAN_STAG_TX_BIT] = "tx-vlan-stag-hw-insert",
68 [NETIF_F_HW_VLAN_STAG_RX_BIT] = "rx-vlan-stag-hw-parse", 68 [NETIF_F_HW_VLAN_STAG_RX_BIT] = "rx-vlan-stag-hw-parse",
69 [NETIF_F_HW_VLAN_STAG_FILTER_BIT] = "rx-vlan-stag-filter", 69 [NETIF_F_HW_VLAN_STAG_FILTER_BIT] = "rx-vlan-stag-filter",
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index cfd777bd6bd0..1c1738cc4538 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -483,15 +483,8 @@ EXPORT_SYMBOL(skb_add_rx_frag);
483 483
484static void skb_drop_list(struct sk_buff **listp) 484static void skb_drop_list(struct sk_buff **listp)
485{ 485{
486 struct sk_buff *list = *listp; 486 kfree_skb_list(*listp);
487
488 *listp = NULL; 487 *listp = NULL;
489
490 do {
491 struct sk_buff *this = list;
492 list = list->next;
493 kfree_skb(this);
494 } while (list);
495} 488}
496 489
497static inline void skb_drop_fraglist(struct sk_buff *skb) 490static inline void skb_drop_fraglist(struct sk_buff *skb)
@@ -651,6 +644,17 @@ void kfree_skb(struct sk_buff *skb)
651} 644}
652EXPORT_SYMBOL(kfree_skb); 645EXPORT_SYMBOL(kfree_skb);
653 646
647void kfree_skb_list(struct sk_buff *segs)
648{
649 while (segs) {
650 struct sk_buff *next = segs->next;
651
652 kfree_skb(segs);
653 segs = next;
654 }
655}
656EXPORT_SYMBOL(kfree_skb_list);
657
654/** 658/**
655 * skb_tx_error - report an sk_buff xmit error 659 * skb_tx_error - report an sk_buff xmit error
656 * @skb: buffer that triggered an error 660 * @skb: buffer that triggered an error
diff --git a/net/core/sock.c b/net/core/sock.c
index 88868a9d21da..d6d024cfaaaf 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -571,9 +571,7 @@ static int sock_getbindtodevice(struct sock *sk, char __user *optval,
571 int ret = -ENOPROTOOPT; 571 int ret = -ENOPROTOOPT;
572#ifdef CONFIG_NETDEVICES 572#ifdef CONFIG_NETDEVICES
573 struct net *net = sock_net(sk); 573 struct net *net = sock_net(sk);
574 struct net_device *dev;
575 char devname[IFNAMSIZ]; 574 char devname[IFNAMSIZ];
576 unsigned seq;
577 575
578 if (sk->sk_bound_dev_if == 0) { 576 if (sk->sk_bound_dev_if == 0) {
579 len = 0; 577 len = 0;
@@ -584,20 +582,9 @@ static int sock_getbindtodevice(struct sock *sk, char __user *optval,
584 if (len < IFNAMSIZ) 582 if (len < IFNAMSIZ)
585 goto out; 583 goto out;
586 584
587retry: 585 ret = netdev_get_name(net, devname, sk->sk_bound_dev_if);
588 seq = read_seqcount_begin(&devnet_rename_seq); 586 if (ret)
589 rcu_read_lock();
590 dev = dev_get_by_index_rcu(net, sk->sk_bound_dev_if);
591 ret = -ENODEV;
592 if (!dev) {
593 rcu_read_unlock();
594 goto out; 587 goto out;
595 }
596
597 strcpy(devname, dev->name);
598 rcu_read_unlock();
599 if (read_seqcount_retry(&devnet_rename_seq, seq))
600 goto retry;
601 588
602 len = strlen(devname) + 1; 589 len = strlen(devname) + 1;
603 590
diff --git a/net/ipv4/gre.c b/net/ipv4/gre.c
index b2e805af9b87..7856d1651d05 100644
--- a/net/ipv4/gre.c
+++ b/net/ipv4/gre.c
@@ -178,7 +178,7 @@ static struct sk_buff *gre_gso_segment(struct sk_buff *skb,
178 178
179 err = __skb_linearize(skb); 179 err = __skb_linearize(skb);
180 if (err) { 180 if (err) {
181 kfree_skb(segs); 181 kfree_skb_list(segs);
182 segs = ERR_PTR(err); 182 segs = ERR_PTR(err);
183 goto out; 183 goto out;
184 } 184 }
diff --git a/net/ipv4/netfilter/ipt_ULOG.c b/net/ipv4/netfilter/ipt_ULOG.c
index ff4b781b1056..32b0e978c8e0 100644
--- a/net/ipv4/netfilter/ipt_ULOG.c
+++ b/net/ipv4/netfilter/ipt_ULOG.c
@@ -125,15 +125,16 @@ static void ulog_send(struct ulog_net *ulog, unsigned int nlgroupnum)
125/* timer function to flush queue in flushtimeout time */ 125/* timer function to flush queue in flushtimeout time */
126static void ulog_timer(unsigned long data) 126static void ulog_timer(unsigned long data)
127{ 127{
128 unsigned int groupnum = *((unsigned int *)data);
128 struct ulog_net *ulog = container_of((void *)data, 129 struct ulog_net *ulog = container_of((void *)data,
129 struct ulog_net, 130 struct ulog_net,
130 nlgroup[*(unsigned int *)data]); 131 nlgroup[groupnum]);
131 pr_debug("timer function called, calling ulog_send\n"); 132 pr_debug("timer function called, calling ulog_send\n");
132 133
133 /* lock to protect against somebody modifying our structure 134 /* lock to protect against somebody modifying our structure
134 * from ipt_ulog_target at the same time */ 135 * from ipt_ulog_target at the same time */
135 spin_lock_bh(&ulog->lock); 136 spin_lock_bh(&ulog->lock);
136 ulog_send(ulog, data); 137 ulog_send(ulog, groupnum);
137 spin_unlock_bh(&ulog->lock); 138 spin_unlock_bh(&ulog->lock);
138} 139}
139 140
@@ -407,8 +408,11 @@ static int __net_init ulog_tg_net_init(struct net *net)
407 408
408 spin_lock_init(&ulog->lock); 409 spin_lock_init(&ulog->lock);
409 /* initialize ulog_buffers */ 410 /* initialize ulog_buffers */
410 for (i = 0; i < ULOG_MAXNLGROUPS; i++) 411 for (i = 0; i < ULOG_MAXNLGROUPS; i++) {
411 setup_timer(&ulog->ulog_buffers[i].timer, ulog_timer, i); 412 ulog->nlgroup[i] = i;
413 setup_timer(&ulog->ulog_buffers[i].timer, ulog_timer,
414 (unsigned long)&ulog->nlgroup[i]);
415 }
412 416
413 ulog->nflognl = netlink_kernel_create(net, NETLINK_NFLOG, &cfg); 417 ulog->nflognl = netlink_kernel_create(net, NETLINK_NFLOG, &cfg);
414 if (!ulog->nflognl) 418 if (!ulog->nflognl)
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 719652305a29..7999fc55c83b 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -1003,7 +1003,7 @@ int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
1003 struct tcp_sock *tp = tcp_sk(sk); 1003 struct tcp_sock *tp = tcp_sk(sk);
1004 struct tcp_md5sig_info *md5sig; 1004 struct tcp_md5sig_info *md5sig;
1005 1005
1006 key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&addr, AF_INET); 1006 key = tcp_md5_do_lookup(sk, addr, family);
1007 if (key) { 1007 if (key) {
1008 /* Pre-existing entry - just update that one. */ 1008 /* Pre-existing entry - just update that one. */
1009 memcpy(key->key, newkey, newkeylen); 1009 memcpy(key->key, newkey, newkeylen);
@@ -1048,7 +1048,7 @@ int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr, int family)
1048 struct tcp_md5sig_key *key; 1048 struct tcp_md5sig_key *key;
1049 struct tcp_md5sig_info *md5sig; 1049 struct tcp_md5sig_info *md5sig;
1050 1050
1051 key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&addr, AF_INET); 1051 key = tcp_md5_do_lookup(sk, addr, family);
1052 if (!key) 1052 if (!key)
1053 return -ENOENT; 1053 return -ENOENT;
1054 hlist_del_rcu(&key->node); 1054 hlist_del_rcu(&key->node);
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 1bbf744c2cc3..4ab4c38958c6 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -2655,6 +2655,9 @@ static void init_loopback(struct net_device *dev)
2655 if (sp_ifa->flags & (IFA_F_DADFAILED | IFA_F_TENTATIVE)) 2655 if (sp_ifa->flags & (IFA_F_DADFAILED | IFA_F_TENTATIVE))
2656 continue; 2656 continue;
2657 2657
2658 if (sp_ifa->rt)
2659 continue;
2660
2658 sp_rt = addrconf_dst_alloc(idev, &sp_ifa->addr, 0); 2661 sp_rt = addrconf_dst_alloc(idev, &sp_ifa->addr, 0);
2659 2662
2660 /* Failure cases are ignored */ 2663 /* Failure cases are ignored */
@@ -4303,6 +4306,7 @@ static int inet6_set_iftoken(struct inet6_dev *idev, struct in6_addr *token)
4303 struct inet6_ifaddr *ifp; 4306 struct inet6_ifaddr *ifp;
4304 struct net_device *dev = idev->dev; 4307 struct net_device *dev = idev->dev;
4305 bool update_rs = false; 4308 bool update_rs = false;
4309 struct in6_addr ll_addr;
4306 4310
4307 if (token == NULL) 4311 if (token == NULL)
4308 return -EINVAL; 4312 return -EINVAL;
@@ -4322,11 +4326,9 @@ static int inet6_set_iftoken(struct inet6_dev *idev, struct in6_addr *token)
4322 4326
4323 write_unlock_bh(&idev->lock); 4327 write_unlock_bh(&idev->lock);
4324 4328
4325 if (!idev->dead && (idev->if_flags & IF_READY)) { 4329 if (!idev->dead && (idev->if_flags & IF_READY) &&
4326 struct in6_addr ll_addr; 4330 !ipv6_get_lladdr(dev, &ll_addr, IFA_F_TENTATIVE |
4327 4331 IFA_F_OPTIMISTIC)) {
4328 ipv6_get_lladdr(dev, &ll_addr, IFA_F_TENTATIVE |
4329 IFA_F_OPTIMISTIC);
4330 4332
4331 /* If we're not ready, then normal ifup will take care 4333 /* If we're not ready, then normal ifup will take care
4332 * of this. Otherwise, we need to request our rs here. 4334 * of this. Otherwise, we need to request our rs here.
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index dae1949019d7..d5d20cde8d92 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -381,9 +381,8 @@ int ip6_forward(struct sk_buff *skb)
381 * cannot be fragmented, because there is no warranty 381 * cannot be fragmented, because there is no warranty
382 * that different fragments will go along one path. --ANK 382 * that different fragments will go along one path. --ANK
383 */ 383 */
384 if (opt->ra) { 384 if (unlikely(opt->flags & IP6SKB_ROUTERALERT)) {
385 u8 *ptr = skb_network_header(skb) + opt->ra; 385 if (ip6_call_ra_chain(skb, ntohs(opt->ra)))
386 if (ip6_call_ra_chain(skb, (ptr[2]<<8) + ptr[3]))
387 return 0; 386 return 0;
388 } 387 }
389 388
@@ -822,11 +821,17 @@ static struct dst_entry *ip6_sk_dst_check(struct sock *sk,
822 const struct flowi6 *fl6) 821 const struct flowi6 *fl6)
823{ 822{
824 struct ipv6_pinfo *np = inet6_sk(sk); 823 struct ipv6_pinfo *np = inet6_sk(sk);
825 struct rt6_info *rt = (struct rt6_info *)dst; 824 struct rt6_info *rt;
826 825
827 if (!dst) 826 if (!dst)
828 goto out; 827 goto out;
829 828
829 if (dst->ops->family != AF_INET6) {
830 dst_release(dst);
831 return NULL;
832 }
833
834 rt = (struct rt6_info *)dst;
830 /* Yes, checking route validity in not connected 835 /* Yes, checking route validity in not connected
831 * case is not very simple. Take into account, 836 * case is not very simple. Take into account,
832 * that we do not support routing by source, TOS, 837 * that we do not support routing by source, TOS,
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index 2712ab22a174..ca4ffcc287f1 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -1493,7 +1493,7 @@ void ndisc_send_redirect(struct sk_buff *skb, const struct in6_addr *target)
1493 */ 1493 */
1494 1494
1495 if (ha) 1495 if (ha)
1496 ndisc_fill_addr_option(skb, ND_OPT_TARGET_LL_ADDR, ha); 1496 ndisc_fill_addr_option(buff, ND_OPT_TARGET_LL_ADDR, ha);
1497 1497
1498 /* 1498 /*
1499 * build redirect option and copy skb over to the new packet. 1499 * build redirect option and copy skb over to the new packet.
diff --git a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
index 97bcf2bae857..c9b6a6e6a1e8 100644
--- a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
+++ b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
@@ -204,7 +204,7 @@ static unsigned int __ipv6_conntrack_in(struct net *net,
204 if (ct != NULL && !nf_ct_is_untracked(ct)) { 204 if (ct != NULL && !nf_ct_is_untracked(ct)) {
205 help = nfct_help(ct); 205 help = nfct_help(ct);
206 if ((help && help->helper) || !nf_ct_is_confirmed(ct)) { 206 if ((help && help->helper) || !nf_ct_is_confirmed(ct)) {
207 nf_conntrack_get_reasm(skb); 207 nf_conntrack_get_reasm(reasm);
208 NF_HOOK_THRESH(NFPROTO_IPV6, hooknum, reasm, 208 NF_HOOK_THRESH(NFPROTO_IPV6, hooknum, reasm,
209 (struct net_device *)in, 209 (struct net_device *)in,
210 (struct net_device *)out, 210 (struct net_device *)out,
diff --git a/net/key/af_key.c b/net/key/af_key.c
index c5fbd7589681..9da862070dd8 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -1710,6 +1710,7 @@ static int key_notify_sa_flush(const struct km_event *c)
1710 hdr->sadb_msg_version = PF_KEY_V2; 1710 hdr->sadb_msg_version = PF_KEY_V2;
1711 hdr->sadb_msg_errno = (uint8_t) 0; 1711 hdr->sadb_msg_errno = (uint8_t) 0;
1712 hdr->sadb_msg_len = (sizeof(struct sadb_msg) / sizeof(uint64_t)); 1712 hdr->sadb_msg_len = (sizeof(struct sadb_msg) / sizeof(uint64_t));
1713 hdr->sadb_msg_reserved = 0;
1713 1714
1714 pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_ALL, NULL, c->net); 1715 pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_ALL, NULL, c->net);
1715 1716
@@ -2699,6 +2700,7 @@ static int key_notify_policy_flush(const struct km_event *c)
2699 hdr->sadb_msg_errno = (uint8_t) 0; 2700 hdr->sadb_msg_errno = (uint8_t) 0;
2700 hdr->sadb_msg_satype = SADB_SATYPE_UNSPEC; 2701 hdr->sadb_msg_satype = SADB_SATYPE_UNSPEC;
2701 hdr->sadb_msg_len = (sizeof(struct sadb_msg) / sizeof(uint64_t)); 2702 hdr->sadb_msg_len = (sizeof(struct sadb_msg) / sizeof(uint64_t));
2703 hdr->sadb_msg_reserved = 0;
2702 pfkey_broadcast(skb_out, GFP_ATOMIC, BROADCAST_ALL, NULL, c->net); 2704 pfkey_broadcast(skb_out, GFP_ATOMIC, BROADCAST_ALL, NULL, c->net);
2703 return 0; 2705 return 0;
2704 2706
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index 1a89c80e6407..4fdb306e42e0 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -1057,6 +1057,12 @@ static int ieee80211_stop_ap(struct wiphy *wiphy, struct net_device *dev)
1057 clear_bit(SDATA_STATE_OFFCHANNEL_BEACON_STOPPED, &sdata->state); 1057 clear_bit(SDATA_STATE_OFFCHANNEL_BEACON_STOPPED, &sdata->state);
1058 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON_ENABLED); 1058 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON_ENABLED);
1059 1059
1060 if (sdata->wdev.cac_started) {
1061 cancel_delayed_work_sync(&sdata->dfs_cac_timer_work);
1062 cfg80211_cac_event(sdata->dev, NL80211_RADAR_CAC_ABORTED,
1063 GFP_KERNEL);
1064 }
1065
1060 drv_stop_ap(sdata->local, sdata); 1066 drv_stop_ap(sdata->local, sdata);
1061 1067
1062 /* free all potentially still buffered bcast frames */ 1068 /* free all potentially still buffered bcast frames */
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index 44be28cfc6c4..9ca8e3278cc0 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -1497,10 +1497,11 @@ static inline void ieee80211_tx_skb(struct ieee80211_sub_if_data *sdata,
1497 ieee80211_tx_skb_tid(sdata, skb, 7); 1497 ieee80211_tx_skb_tid(sdata, skb, 7);
1498} 1498}
1499 1499
1500u32 ieee802_11_parse_elems_crc(u8 *start, size_t len, bool action, 1500u32 ieee802_11_parse_elems_crc(const u8 *start, size_t len, bool action,
1501 struct ieee802_11_elems *elems, 1501 struct ieee802_11_elems *elems,
1502 u64 filter, u32 crc); 1502 u64 filter, u32 crc);
1503static inline void ieee802_11_parse_elems(u8 *start, size_t len, bool action, 1503static inline void ieee802_11_parse_elems(const u8 *start, size_t len,
1504 bool action,
1504 struct ieee802_11_elems *elems) 1505 struct ieee802_11_elems *elems)
1505{ 1506{
1506 ieee802_11_parse_elems_crc(start, len, action, elems, 0, 0); 1507 ieee802_11_parse_elems_crc(start, len, action, elems, 0, 0);
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index a8c2130c8ba4..741448b30825 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -2522,8 +2522,11 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata,
2522 u16 capab_info, aid; 2522 u16 capab_info, aid;
2523 struct ieee802_11_elems elems; 2523 struct ieee802_11_elems elems;
2524 struct ieee80211_bss_conf *bss_conf = &sdata->vif.bss_conf; 2524 struct ieee80211_bss_conf *bss_conf = &sdata->vif.bss_conf;
2525 const struct cfg80211_bss_ies *bss_ies = NULL;
2526 struct ieee80211_mgd_assoc_data *assoc_data = ifmgd->assoc_data;
2525 u32 changed = 0; 2527 u32 changed = 0;
2526 int err; 2528 int err;
2529 bool ret;
2527 2530
2528 /* AssocResp and ReassocResp have identical structure */ 2531 /* AssocResp and ReassocResp have identical structure */
2529 2532
@@ -2555,21 +2558,86 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata,
2555 ifmgd->aid = aid; 2558 ifmgd->aid = aid;
2556 2559
2557 /* 2560 /*
2561 * Some APs are erroneously not including some information in their
2562 * (re)association response frames. Try to recover by using the data
2563 * from the beacon or probe response. This seems to afflict mobile
2564 * 2G/3G/4G wifi routers, reported models include the "Onda PN51T",
2565 * "Vodafone PocketWiFi 2", "ZTE MF60" and a similar T-Mobile device.
2566 */
2567 if ((assoc_data->wmm && !elems.wmm_param) ||
2568 (!(ifmgd->flags & IEEE80211_STA_DISABLE_HT) &&
2569 (!elems.ht_cap_elem || !elems.ht_operation)) ||
2570 (!(ifmgd->flags & IEEE80211_STA_DISABLE_VHT) &&
2571 (!elems.vht_cap_elem || !elems.vht_operation))) {
2572 const struct cfg80211_bss_ies *ies;
2573 struct ieee802_11_elems bss_elems;
2574
2575 rcu_read_lock();
2576 ies = rcu_dereference(cbss->ies);
2577 if (ies)
2578 bss_ies = kmemdup(ies, sizeof(*ies) + ies->len,
2579 GFP_ATOMIC);
2580 rcu_read_unlock();
2581 if (!bss_ies)
2582 return false;
2583
2584 ieee802_11_parse_elems(bss_ies->data, bss_ies->len,
2585 false, &bss_elems);
2586 if (assoc_data->wmm &&
2587 !elems.wmm_param && bss_elems.wmm_param) {
2588 elems.wmm_param = bss_elems.wmm_param;
2589 sdata_info(sdata,
2590 "AP bug: WMM param missing from AssocResp\n");
2591 }
2592
2593 /*
2594 * Also check if we requested HT/VHT, otherwise the AP doesn't
2595 * have to include the IEs in the (re)association response.
2596 */
2597 if (!elems.ht_cap_elem && bss_elems.ht_cap_elem &&
2598 !(ifmgd->flags & IEEE80211_STA_DISABLE_HT)) {
2599 elems.ht_cap_elem = bss_elems.ht_cap_elem;
2600 sdata_info(sdata,
2601 "AP bug: HT capability missing from AssocResp\n");
2602 }
2603 if (!elems.ht_operation && bss_elems.ht_operation &&
2604 !(ifmgd->flags & IEEE80211_STA_DISABLE_HT)) {
2605 elems.ht_operation = bss_elems.ht_operation;
2606 sdata_info(sdata,
2607 "AP bug: HT operation missing from AssocResp\n");
2608 }
2609 if (!elems.vht_cap_elem && bss_elems.vht_cap_elem &&
2610 !(ifmgd->flags & IEEE80211_STA_DISABLE_VHT)) {
2611 elems.vht_cap_elem = bss_elems.vht_cap_elem;
2612 sdata_info(sdata,
2613 "AP bug: VHT capa missing from AssocResp\n");
2614 }
2615 if (!elems.vht_operation && bss_elems.vht_operation &&
2616 !(ifmgd->flags & IEEE80211_STA_DISABLE_VHT)) {
2617 elems.vht_operation = bss_elems.vht_operation;
2618 sdata_info(sdata,
2619 "AP bug: VHT operation missing from AssocResp\n");
2620 }
2621 }
2622
2623 /*
2558 * We previously checked these in the beacon/probe response, so 2624 * We previously checked these in the beacon/probe response, so
2559 * they should be present here. This is just a safety net. 2625 * they should be present here. This is just a safety net.
2560 */ 2626 */
2561 if (!(ifmgd->flags & IEEE80211_STA_DISABLE_HT) && 2627 if (!(ifmgd->flags & IEEE80211_STA_DISABLE_HT) &&
2562 (!elems.wmm_param || !elems.ht_cap_elem || !elems.ht_operation)) { 2628 (!elems.wmm_param || !elems.ht_cap_elem || !elems.ht_operation)) {
2563 sdata_info(sdata, 2629 sdata_info(sdata,
2564 "HT AP is missing WMM params or HT capability/operation in AssocResp\n"); 2630 "HT AP is missing WMM params or HT capability/operation\n");
2565 return false; 2631 ret = false;
2632 goto out;
2566 } 2633 }
2567 2634
2568 if (!(ifmgd->flags & IEEE80211_STA_DISABLE_VHT) && 2635 if (!(ifmgd->flags & IEEE80211_STA_DISABLE_VHT) &&
2569 (!elems.vht_cap_elem || !elems.vht_operation)) { 2636 (!elems.vht_cap_elem || !elems.vht_operation)) {
2570 sdata_info(sdata, 2637 sdata_info(sdata,
2571 "VHT AP is missing VHT capability/operation in AssocResp\n"); 2638 "VHT AP is missing VHT capability/operation\n");
2572 return false; 2639 ret = false;
2640 goto out;
2573 } 2641 }
2574 2642
2575 mutex_lock(&sdata->local->sta_mtx); 2643 mutex_lock(&sdata->local->sta_mtx);
@@ -2580,7 +2648,8 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata,
2580 sta = sta_info_get(sdata, cbss->bssid); 2648 sta = sta_info_get(sdata, cbss->bssid);
2581 if (WARN_ON(!sta)) { 2649 if (WARN_ON(!sta)) {
2582 mutex_unlock(&sdata->local->sta_mtx); 2650 mutex_unlock(&sdata->local->sta_mtx);
2583 return false; 2651 ret = false;
2652 goto out;
2584 } 2653 }
2585 2654
2586 sband = local->hw.wiphy->bands[ieee80211_get_sdata_band(sdata)]; 2655 sband = local->hw.wiphy->bands[ieee80211_get_sdata_band(sdata)];
@@ -2633,7 +2702,8 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata,
2633 sta->sta.addr); 2702 sta->sta.addr);
2634 WARN_ON(__sta_info_destroy(sta)); 2703 WARN_ON(__sta_info_destroy(sta));
2635 mutex_unlock(&sdata->local->sta_mtx); 2704 mutex_unlock(&sdata->local->sta_mtx);
2636 return false; 2705 ret = false;
2706 goto out;
2637 } 2707 }
2638 2708
2639 mutex_unlock(&sdata->local->sta_mtx); 2709 mutex_unlock(&sdata->local->sta_mtx);
@@ -2673,7 +2743,10 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata,
2673 ieee80211_sta_rx_notify(sdata, (struct ieee80211_hdr *)mgmt); 2743 ieee80211_sta_rx_notify(sdata, (struct ieee80211_hdr *)mgmt);
2674 ieee80211_sta_reset_beacon_monitor(sdata); 2744 ieee80211_sta_reset_beacon_monitor(sdata);
2675 2745
2676 return true; 2746 ret = true;
2747 out:
2748 kfree(bss_ies);
2749 return ret;
2677} 2750}
2678 2751
2679static enum rx_mgmt_action __must_check 2752static enum rx_mgmt_action __must_check
diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
index d3f414fe67e0..a02bef35b134 100644
--- a/net/mac80211/rate.c
+++ b/net/mac80211/rate.c
@@ -615,7 +615,7 @@ static void rate_control_apply_mask(struct ieee80211_sub_if_data *sdata,
615 if (rates[i].idx < 0) 615 if (rates[i].idx < 0)
616 break; 616 break;
617 617
618 rate_idx_match_mask(&rates[i], sband, mask, chan_width, 618 rate_idx_match_mask(&rates[i], sband, chan_width, mask,
619 mcs_mask); 619 mcs_mask);
620 } 620 }
621} 621}
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index 27e07150eb46..72e6292955bb 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -661,12 +661,12 @@ void ieee80211_queue_delayed_work(struct ieee80211_hw *hw,
661} 661}
662EXPORT_SYMBOL(ieee80211_queue_delayed_work); 662EXPORT_SYMBOL(ieee80211_queue_delayed_work);
663 663
664u32 ieee802_11_parse_elems_crc(u8 *start, size_t len, bool action, 664u32 ieee802_11_parse_elems_crc(const u8 *start, size_t len, bool action,
665 struct ieee802_11_elems *elems, 665 struct ieee802_11_elems *elems,
666 u64 filter, u32 crc) 666 u64 filter, u32 crc)
667{ 667{
668 size_t left = len; 668 size_t left = len;
669 u8 *pos = start; 669 const u8 *pos = start;
670 bool calc_crc = filter != 0; 670 bool calc_crc = filter != 0;
671 DECLARE_BITMAP(seen_elems, 256); 671 DECLARE_BITMAP(seen_elems, 256);
672 const u8 *ie; 672 const u8 *ie;
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
index 05565d2b3a61..23b8eb53a569 100644
--- a/net/netfilter/ipvs/ip_vs_core.c
+++ b/net/netfilter/ipvs/ip_vs_core.c
@@ -1442,7 +1442,8 @@ ignore_ipip:
1442 1442
1443 /* do the statistics and put it back */ 1443 /* do the statistics and put it back */
1444 ip_vs_in_stats(cp, skb); 1444 ip_vs_in_stats(cp, skb);
1445 if (IPPROTO_TCP == cih->protocol || IPPROTO_UDP == cih->protocol) 1445 if (IPPROTO_TCP == cih->protocol || IPPROTO_UDP == cih->protocol ||
1446 IPPROTO_SCTP == cih->protocol)
1446 offset += 2 * sizeof(__u16); 1447 offset += 2 * sizeof(__u16);
1447 verdict = ip_vs_icmp_xmit(skb, cp, pp, offset, hooknum, &ciph); 1448 verdict = ip_vs_icmp_xmit(skb, cp, pp, offset, hooknum, &ciph);
1448 1449
diff --git a/net/netfilter/nf_conntrack_labels.c b/net/netfilter/nf_conntrack_labels.c
index 8fe2e99428b7..355d2ef08094 100644
--- a/net/netfilter/nf_conntrack_labels.c
+++ b/net/netfilter/nf_conntrack_labels.c
@@ -45,7 +45,7 @@ int nf_connlabel_set(struct nf_conn *ct, u16 bit)
45 if (test_bit(bit, labels->bits)) 45 if (test_bit(bit, labels->bits))
46 return 0; 46 return 0;
47 47
48 if (test_and_set_bit(bit, labels->bits)) 48 if (!test_and_set_bit(bit, labels->bits))
49 nf_conntrack_event_cache(IPCT_LABEL, ct); 49 nf_conntrack_event_cache(IPCT_LABEL, ct);
50 50
51 return 0; 51 return 0;
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index 6d0f8a17c5b7..ecf065f94032 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -1825,6 +1825,7 @@ ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb,
1825 nf_conntrack_eventmask_report((1 << IPCT_REPLY) | 1825 nf_conntrack_eventmask_report((1 << IPCT_REPLY) |
1826 (1 << IPCT_ASSURED) | 1826 (1 << IPCT_ASSURED) |
1827 (1 << IPCT_HELPER) | 1827 (1 << IPCT_HELPER) |
1828 (1 << IPCT_LABEL) |
1828 (1 << IPCT_PROTOINFO) | 1829 (1 << IPCT_PROTOINFO) |
1829 (1 << IPCT_NATSEQADJ) | 1830 (1 << IPCT_NATSEQADJ) |
1830 (1 << IPCT_MARK), 1831 (1 << IPCT_MARK),
diff --git a/net/netfilter/nf_nat_sip.c b/net/netfilter/nf_nat_sip.c
index 96ccdf78a29f..dac11f73868e 100644
--- a/net/netfilter/nf_nat_sip.c
+++ b/net/netfilter/nf_nat_sip.c
@@ -230,9 +230,10 @@ static unsigned int nf_nat_sip(struct sk_buff *skb, unsigned int protoff,
230 &ct->tuplehash[!dir].tuple.src.u3, 230 &ct->tuplehash[!dir].tuple.src.u3,
231 false); 231 false);
232 if (!mangle_packet(skb, protoff, dataoff, dptr, datalen, 232 if (!mangle_packet(skb, protoff, dataoff, dptr, datalen,
233 poff, plen, buffer, buflen)) 233 poff, plen, buffer, buflen)) {
234 nf_ct_helper_log(skb, ct, "cannot mangle received"); 234 nf_ct_helper_log(skb, ct, "cannot mangle received");
235 return NF_DROP; 235 return NF_DROP;
236 }
236 } 237 }
237 238
238 /* The rport= parameter (RFC 3581) contains the port number 239 /* The rport= parameter (RFC 3581) contains the port number
diff --git a/net/netfilter/xt_TCPMSS.c b/net/netfilter/xt_TCPMSS.c
index afaebc766933..7011c71646f0 100644
--- a/net/netfilter/xt_TCPMSS.c
+++ b/net/netfilter/xt_TCPMSS.c
@@ -45,17 +45,22 @@ optlen(const u_int8_t *opt, unsigned int offset)
45 45
46static int 46static int
47tcpmss_mangle_packet(struct sk_buff *skb, 47tcpmss_mangle_packet(struct sk_buff *skb,
48 const struct xt_tcpmss_info *info, 48 const struct xt_action_param *par,
49 unsigned int in_mtu, 49 unsigned int in_mtu,
50 unsigned int tcphoff, 50 unsigned int tcphoff,
51 unsigned int minlen) 51 unsigned int minlen)
52{ 52{
53 const struct xt_tcpmss_info *info = par->targinfo;
53 struct tcphdr *tcph; 54 struct tcphdr *tcph;
54 unsigned int tcplen, i; 55 unsigned int tcplen, i;
55 __be16 oldval; 56 __be16 oldval;
56 u16 newmss; 57 u16 newmss;
57 u8 *opt; 58 u8 *opt;
58 59
60 /* This is a fragment, no TCP header is available */
61 if (par->fragoff != 0)
62 return XT_CONTINUE;
63
59 if (!skb_make_writable(skb, skb->len)) 64 if (!skb_make_writable(skb, skb->len))
60 return -1; 65 return -1;
61 66
@@ -125,11 +130,17 @@ tcpmss_mangle_packet(struct sk_buff *skb,
125 130
126 skb_put(skb, TCPOLEN_MSS); 131 skb_put(skb, TCPOLEN_MSS);
127 132
128 /* RFC 879 states that the default MSS is 536 without specific 133 /*
129 * knowledge that the destination host is prepared to accept larger. 134 * IPv4: RFC 1122 states "If an MSS option is not received at
130 * Since no MSS was provided, we MUST NOT set a value > 536. 135 * connection setup, TCP MUST assume a default send MSS of 536".
136 * IPv6: RFC 2460 states IPv6 has a minimum MTU of 1280 and a minimum
137 * length IPv6 header of 60, ergo the default MSS value is 1220
138 * Since no MSS was provided, we must use the default values
131 */ 139 */
132 newmss = min(newmss, (u16)536); 140 if (par->family == NFPROTO_IPV4)
141 newmss = min(newmss, (u16)536);
142 else
143 newmss = min(newmss, (u16)1220);
133 144
134 opt = (u_int8_t *)tcph + sizeof(struct tcphdr); 145 opt = (u_int8_t *)tcph + sizeof(struct tcphdr);
135 memmove(opt + TCPOLEN_MSS, opt, tcplen - sizeof(struct tcphdr)); 146 memmove(opt + TCPOLEN_MSS, opt, tcplen - sizeof(struct tcphdr));
@@ -188,7 +199,7 @@ tcpmss_tg4(struct sk_buff *skb, const struct xt_action_param *par)
188 __be16 newlen; 199 __be16 newlen;
189 int ret; 200 int ret;
190 201
191 ret = tcpmss_mangle_packet(skb, par->targinfo, 202 ret = tcpmss_mangle_packet(skb, par,
192 tcpmss_reverse_mtu(skb, PF_INET), 203 tcpmss_reverse_mtu(skb, PF_INET),
193 iph->ihl * 4, 204 iph->ihl * 4,
194 sizeof(*iph) + sizeof(struct tcphdr)); 205 sizeof(*iph) + sizeof(struct tcphdr));
@@ -217,7 +228,7 @@ tcpmss_tg6(struct sk_buff *skb, const struct xt_action_param *par)
217 tcphoff = ipv6_skip_exthdr(skb, sizeof(*ipv6h), &nexthdr, &frag_off); 228 tcphoff = ipv6_skip_exthdr(skb, sizeof(*ipv6h), &nexthdr, &frag_off);
218 if (tcphoff < 0) 229 if (tcphoff < 0)
219 return NF_DROP; 230 return NF_DROP;
220 ret = tcpmss_mangle_packet(skb, par->targinfo, 231 ret = tcpmss_mangle_packet(skb, par,
221 tcpmss_reverse_mtu(skb, PF_INET6), 232 tcpmss_reverse_mtu(skb, PF_INET6),
222 tcphoff, 233 tcphoff,
223 sizeof(*ipv6h) + sizeof(struct tcphdr)); 234 sizeof(*ipv6h) + sizeof(struct tcphdr));
diff --git a/net/netfilter/xt_TCPOPTSTRIP.c b/net/netfilter/xt_TCPOPTSTRIP.c
index 1eb1a44bfd3d..b68fa191710f 100644
--- a/net/netfilter/xt_TCPOPTSTRIP.c
+++ b/net/netfilter/xt_TCPOPTSTRIP.c
@@ -48,11 +48,13 @@ tcpoptstrip_mangle_packet(struct sk_buff *skb,
48 return NF_DROP; 48 return NF_DROP;
49 49
50 len = skb->len - tcphoff; 50 len = skb->len - tcphoff;
51 if (len < (int)sizeof(struct tcphdr) || 51 if (len < (int)sizeof(struct tcphdr))
52 tcp_hdr(skb)->doff * 4 > len)
53 return NF_DROP; 52 return NF_DROP;
54 53
55 tcph = (struct tcphdr *)(skb_network_header(skb) + tcphoff); 54 tcph = (struct tcphdr *)(skb_network_header(skb) + tcphoff);
55 if (tcph->doff * 4 > len)
56 return NF_DROP;
57
56 opt = (u_int8_t *)tcph; 58 opt = (u_int8_t *)tcph;
57 59
58 /* 60 /*
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index d5aed3bb3945..b14b7e3cb6e6 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -1564,12 +1564,17 @@ static int nl80211_dump_wiphy(struct sk_buff *skb, struct netlink_callback *cb)
1564 struct cfg80211_registered_device *dev; 1564 struct cfg80211_registered_device *dev;
1565 s64 filter_wiphy = -1; 1565 s64 filter_wiphy = -1;
1566 bool split = false; 1566 bool split = false;
1567 struct nlattr **tb = nl80211_fam.attrbuf; 1567 struct nlattr **tb;
1568 int res; 1568 int res;
1569 1569
1570 /* will be zeroed in nlmsg_parse() */
1571 tb = kmalloc(sizeof(*tb) * (NL80211_ATTR_MAX + 1), GFP_KERNEL);
1572 if (!tb)
1573 return -ENOMEM;
1574
1570 mutex_lock(&cfg80211_mutex); 1575 mutex_lock(&cfg80211_mutex);
1571 res = nlmsg_parse(cb->nlh, GENL_HDRLEN + nl80211_fam.hdrsize, 1576 res = nlmsg_parse(cb->nlh, GENL_HDRLEN + nl80211_fam.hdrsize,
1572 tb, nl80211_fam.maxattr, nl80211_policy); 1577 tb, NL80211_ATTR_MAX, nl80211_policy);
1573 if (res == 0) { 1578 if (res == 0) {
1574 split = tb[NL80211_ATTR_SPLIT_WIPHY_DUMP]; 1579 split = tb[NL80211_ATTR_SPLIT_WIPHY_DUMP];
1575 if (tb[NL80211_ATTR_WIPHY]) 1580 if (tb[NL80211_ATTR_WIPHY])
@@ -1583,6 +1588,7 @@ static int nl80211_dump_wiphy(struct sk_buff *skb, struct netlink_callback *cb)
1583 netdev = dev_get_by_index(sock_net(skb->sk), ifidx); 1588 netdev = dev_get_by_index(sock_net(skb->sk), ifidx);
1584 if (!netdev) { 1589 if (!netdev) {
1585 mutex_unlock(&cfg80211_mutex); 1590 mutex_unlock(&cfg80211_mutex);
1591 kfree(tb);
1586 return -ENODEV; 1592 return -ENODEV;
1587 } 1593 }
1588 if (netdev->ieee80211_ptr) { 1594 if (netdev->ieee80211_ptr) {
@@ -1593,6 +1599,7 @@ static int nl80211_dump_wiphy(struct sk_buff *skb, struct netlink_callback *cb)
1593 dev_put(netdev); 1599 dev_put(netdev);
1594 } 1600 }
1595 } 1601 }
1602 kfree(tb);
1596 1603
1597 list_for_each_entry(dev, &cfg80211_rdev_list, list) { 1604 list_for_each_entry(dev, &cfg80211_rdev_list, list) {
1598 if (!net_eq(wiphy_net(&dev->wiphy), sock_net(skb->sk))) 1605 if (!net_eq(wiphy_net(&dev->wiphy), sock_net(skb->sk)))