aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/can/Kconfig2
-rw-r--r--drivers/net/dsa/Kconfig1
-rw-r--r--drivers/net/ethernet/Kconfig1
-rw-r--r--drivers/net/ethernet/Makefile1
-rw-r--r--drivers/net/ethernet/adi/bfin_mac.c3
-rw-r--r--drivers/net/ethernet/agere/Kconfig31
-rw-r--r--drivers/net/ethernet/agere/Makefile5
-rw-r--r--drivers/net/ethernet/agere/et131x.c4121
-rw-r--r--drivers/net/ethernet/agere/et131x.h1433
-rw-r--r--drivers/net/ethernet/allwinner/sun4i-emac.c2
-rw-r--r--drivers/net/ethernet/altera/altera_tse_main.c1
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c5
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c13
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c3
-rw-r--r--drivers/net/ethernet/cadence/at91_ether.c1
-rw-r--r--drivers/net/ethernet/cadence/macb.c11
-rw-r--r--drivers/net/ethernet/calxeda/xgmac.c1
-rw-r--r--drivers/net/ethernet/davicom/dm9000.c3
-rw-r--r--drivers/net/ethernet/ethoc.c2
-rw-r--r--drivers/net/ethernet/freescale/fec.h2
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c208
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k.h8
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_netdev.c6
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c2
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_hw.h5
-rw-r--r--drivers/net/ethernet/intel/igb/igb.h1
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c16
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c3
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c6
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c41
-rw-r--r--drivers/net/ethernet/lantiq_etop.c1
-rw-r--r--drivers/net/ethernet/marvell/Kconfig2
-rw-r--r--drivers/net/ethernet/marvell/pxa168_eth.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_port.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c77
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw.c81
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c230
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/qp.c60
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/uar.c4
-rw-r--r--drivers/net/ethernet/moxa/moxart_ether.c1
-rw-r--r--drivers/net/ethernet/netx-eth.c2
-rw-r--r--drivers/net/ethernet/nuvoton/w90p910_ether.c1
-rw-r--r--drivers/net/ethernet/nxp/lpc_eth.c3
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c6
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c2
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c5
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c10
-rw-r--r--drivers/net/ethernet/qualcomm/qca_spi.c2
-rw-r--r--drivers/net/ethernet/realtek/r8169.c436
-rw-r--r--drivers/net/ethernet/smsc/smc911x.c3
-rw-r--r--drivers/net/ethernet/smsc/smc91x.c3
-rw-r--r--drivers/net/ethernet/smsc/smsc911x.c1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c13
-rw-r--r--drivers/net/ethernet/sun/sunvnet.c2
-rw-r--r--drivers/net/ethernet/wiznet/w5100.c1
-rw-r--r--drivers/net/ethernet/wiznet/w5300.c1
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_main.c1
-rw-r--r--drivers/net/hyperv/netvsc_drv.c3
-rw-r--r--drivers/net/irda/Kconfig2
-rw-r--r--drivers/net/macvtap.c18
-rw-r--r--drivers/net/phy/bcm7xxx.c28
-rw-r--r--drivers/net/usb/r8152.c88
-rw-r--r--drivers/net/vxlan.c4
69 files changed, 6399 insertions, 688 deletions
diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig
index e78d6b32431d..98d73aab52fe 100644
--- a/drivers/net/can/Kconfig
+++ b/drivers/net/can/Kconfig
@@ -65,7 +65,7 @@ config CAN_LEDS
65 65
66config CAN_AT91 66config CAN_AT91
67 tristate "Atmel AT91 onchip CAN controller" 67 tristate "Atmel AT91 onchip CAN controller"
68 depends on ARCH_AT91 || COMPILE_TEST 68 depends on (ARCH_AT91 || COMPILE_TEST) && HAS_IOMEM
69 ---help--- 69 ---help---
70 This is a driver for the SoC CAN controller in Atmel's AT91SAM9263 70 This is a driver for the SoC CAN controller in Atmel's AT91SAM9263
71 and AT91SAM9X5 processors. 71 and AT91SAM9X5 processors.
diff --git a/drivers/net/dsa/Kconfig b/drivers/net/dsa/Kconfig
index ea0697eaeff5..9234d808cbb3 100644
--- a/drivers/net/dsa/Kconfig
+++ b/drivers/net/dsa/Kconfig
@@ -47,6 +47,7 @@ config NET_DSA_MV88E6171
47 47
48config NET_DSA_BCM_SF2 48config NET_DSA_BCM_SF2
49 tristate "Broadcom Starfighter 2 Ethernet switch support" 49 tristate "Broadcom Starfighter 2 Ethernet switch support"
50 depends on HAS_IOMEM
50 select NET_DSA 51 select NET_DSA
51 select NET_DSA_TAG_BRCM 52 select NET_DSA_TAG_BRCM
52 select FIXED_PHY if NET_DSA_BCM_SF2=y 53 select FIXED_PHY if NET_DSA_BCM_SF2=y
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig
index 0005e3792e6d..1ed1fbba5d58 100644
--- a/drivers/net/ethernet/Kconfig
+++ b/drivers/net/ethernet/Kconfig
@@ -20,6 +20,7 @@ config SUNGEM_PHY
20source "drivers/net/ethernet/3com/Kconfig" 20source "drivers/net/ethernet/3com/Kconfig"
21source "drivers/net/ethernet/adaptec/Kconfig" 21source "drivers/net/ethernet/adaptec/Kconfig"
22source "drivers/net/ethernet/aeroflex/Kconfig" 22source "drivers/net/ethernet/aeroflex/Kconfig"
23source "drivers/net/ethernet/agere/Kconfig"
23source "drivers/net/ethernet/allwinner/Kconfig" 24source "drivers/net/ethernet/allwinner/Kconfig"
24source "drivers/net/ethernet/alteon/Kconfig" 25source "drivers/net/ethernet/alteon/Kconfig"
25source "drivers/net/ethernet/altera/Kconfig" 26source "drivers/net/ethernet/altera/Kconfig"
diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile
index 153bf2dd9fad..6e0b629e9859 100644
--- a/drivers/net/ethernet/Makefile
+++ b/drivers/net/ethernet/Makefile
@@ -6,6 +6,7 @@ obj-$(CONFIG_NET_VENDOR_3COM) += 3com/
6obj-$(CONFIG_NET_VENDOR_8390) += 8390/ 6obj-$(CONFIG_NET_VENDOR_8390) += 8390/
7obj-$(CONFIG_NET_VENDOR_ADAPTEC) += adaptec/ 7obj-$(CONFIG_NET_VENDOR_ADAPTEC) += adaptec/
8obj-$(CONFIG_GRETH) += aeroflex/ 8obj-$(CONFIG_GRETH) += aeroflex/
9obj-$(CONFIG_NET_VENDOR_AGERE) += agere/
9obj-$(CONFIG_NET_VENDOR_ALLWINNER) += allwinner/ 10obj-$(CONFIG_NET_VENDOR_ALLWINNER) += allwinner/
10obj-$(CONFIG_NET_VENDOR_ALTEON) += alteon/ 11obj-$(CONFIG_NET_VENDOR_ALTEON) += alteon/
11obj-$(CONFIG_ALTERA_TSE) += altera/ 12obj-$(CONFIG_ALTERA_TSE) += altera/
diff --git a/drivers/net/ethernet/adi/bfin_mac.c b/drivers/net/ethernet/adi/bfin_mac.c
index afa66847e10b..8ed4d3408ef6 100644
--- a/drivers/net/ethernet/adi/bfin_mac.c
+++ b/drivers/net/ethernet/adi/bfin_mac.c
@@ -1692,9 +1692,6 @@ static int bfin_mac_probe(struct platform_device *pdev)
1692 lp->vlan1_mask = ETH_P_8021Q | mii_bus_data->vlan1_mask; 1692 lp->vlan1_mask = ETH_P_8021Q | mii_bus_data->vlan1_mask;
1693 lp->vlan2_mask = ETH_P_8021Q | mii_bus_data->vlan2_mask; 1693 lp->vlan2_mask = ETH_P_8021Q | mii_bus_data->vlan2_mask;
1694 1694
1695 /* Fill in the fields of the device structure with ethernet values. */
1696 ether_setup(ndev);
1697
1698 ndev->netdev_ops = &bfin_mac_netdev_ops; 1695 ndev->netdev_ops = &bfin_mac_netdev_ops;
1699 ndev->ethtool_ops = &bfin_mac_ethtool_ops; 1696 ndev->ethtool_ops = &bfin_mac_ethtool_ops;
1700 1697
diff --git a/drivers/net/ethernet/agere/Kconfig b/drivers/net/ethernet/agere/Kconfig
new file mode 100644
index 000000000000..63e805de619e
--- /dev/null
+++ b/drivers/net/ethernet/agere/Kconfig
@@ -0,0 +1,31 @@
1#
2# Agere device configuration
3#
4
5config NET_VENDOR_AGERE
6 bool "Agere devices"
7 default y
8 depends on PCI
9 ---help---
10 If you have a network (Ethernet) card belonging to this class, say Y
11 and read the Ethernet-HOWTO, available from
12 <http://www.tldp.org/docs.html#howto>.
13
14 Note that the answer to this question doesn't directly affect the
15 kernel: saying N will just cause the configurator to skip all
16 the questions about Agere devices. If you say Y, you will be asked
17 for your specific card in the following questions.
18
19if NET_VENDOR_AGERE
20
21config ET131X
22 tristate "Agere ET-1310 Gigabit Ethernet support"
23 depends on PCI
24 select PHYLIB
25 ---help---
26 This driver supports Agere ET-1310 ethernet adapters.
27
28 To compile this driver as a module, choose M here. The module
29 will be called et131x.
30
31endif # NET_VENDOR_AGERE
diff --git a/drivers/net/ethernet/agere/Makefile b/drivers/net/ethernet/agere/Makefile
new file mode 100644
index 000000000000..027ff9453fe1
--- /dev/null
+++ b/drivers/net/ethernet/agere/Makefile
@@ -0,0 +1,5 @@
1#
2# Makefile for the Agere ET-131x ethernet driver
3#
4
5obj-$(CONFIG_ET131X) += et131x.o
diff --git a/drivers/net/ethernet/agere/et131x.c b/drivers/net/ethernet/agere/et131x.c
new file mode 100644
index 000000000000..384dc163851b
--- /dev/null
+++ b/drivers/net/ethernet/agere/et131x.c
@@ -0,0 +1,4121 @@
1/* Agere Systems Inc.
2 * 10/100/1000 Base-T Ethernet Driver for the ET1301 and ET131x series MACs
3 *
4 * Copyright © 2005 Agere Systems Inc.
5 * All rights reserved.
6 * http://www.agere.com
7 *
8 * Copyright (c) 2011 Mark Einon <mark.einon@gmail.com>
9 *
10 *------------------------------------------------------------------------------
11 *
12 * SOFTWARE LICENSE
13 *
14 * This software is provided subject to the following terms and conditions,
15 * which you should read carefully before using the software. Using this
16 * software indicates your acceptance of these terms and conditions. If you do
17 * not agree with these terms and conditions, do not use the software.
18 *
19 * Copyright © 2005 Agere Systems Inc.
20 * All rights reserved.
21 *
22 * Redistribution and use in source or binary forms, with or without
23 * modifications, are permitted provided that the following conditions are met:
24 *
25 * . Redistributions of source code must retain the above copyright notice, this
26 * list of conditions and the following Disclaimer as comments in the code as
27 * well as in the documentation and/or other materials provided with the
28 * distribution.
29 *
30 * . Redistributions in binary form must reproduce the above copyright notice,
31 * this list of conditions and the following Disclaimer in the documentation
32 * and/or other materials provided with the distribution.
33 *
34 * . Neither the name of Agere Systems Inc. nor the names of the contributors
35 * may be used to endorse or promote products derived from this software
36 * without specific prior written permission.
37 *
38 * Disclaimer
39 *
40 * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
41 * INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF
42 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY
43 * USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN
44 * RISK. IN NO EVENT SHALL AGERE SYSTEMS INC. OR CONTRIBUTORS BE LIABLE FOR ANY
45 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
46 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
47 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
48 * ON ANY THEORY OF LIABILITY, INCLUDING, BUT NOT LIMITED TO, CONTRACT, STRICT
49 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
50 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
51 * DAMAGE.
52 */
53
54#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
55
56#include <linux/pci.h>
57#include <linux/module.h>
58#include <linux/types.h>
59#include <linux/kernel.h>
60
61#include <linux/sched.h>
62#include <linux/ptrace.h>
63#include <linux/slab.h>
64#include <linux/ctype.h>
65#include <linux/string.h>
66#include <linux/timer.h>
67#include <linux/interrupt.h>
68#include <linux/in.h>
69#include <linux/delay.h>
70#include <linux/bitops.h>
71#include <linux/io.h>
72
73#include <linux/netdevice.h>
74#include <linux/etherdevice.h>
75#include <linux/skbuff.h>
76#include <linux/if_arp.h>
77#include <linux/ioport.h>
78#include <linux/crc32.h>
79#include <linux/random.h>
80#include <linux/phy.h>
81
82#include "et131x.h"
83
84MODULE_AUTHOR("Victor Soriano <vjsoriano@agere.com>");
85MODULE_AUTHOR("Mark Einon <mark.einon@gmail.com>");
86MODULE_LICENSE("Dual BSD/GPL");
87MODULE_DESCRIPTION("10/100/1000 Base-T Ethernet Driver for the ET1310 by Agere Systems");
88
89/* EEPROM defines */
90#define MAX_NUM_REGISTER_POLLS 1000
91#define MAX_NUM_WRITE_RETRIES 2
92
93/* MAC defines */
94#define COUNTER_WRAP_16_BIT 0x10000
95#define COUNTER_WRAP_12_BIT 0x1000
96
97/* PCI defines */
98#define INTERNAL_MEM_SIZE 0x400 /* 1024 of internal memory */
99#define INTERNAL_MEM_RX_OFFSET 0x1FF /* 50% Tx, 50% Rx */
100
101/* ISR defines */
102/* For interrupts, normal running is:
103 * rxdma_xfr_done, phy_interrupt, mac_stat_interrupt,
104 * watchdog_interrupt & txdma_xfer_done
105 *
106 * In both cases, when flow control is enabled for either Tx or bi-direction,
107 * we additional enable rx_fbr0_low and rx_fbr1_low, so we know when the
108 * buffer rings are running low.
109 */
110#define INT_MASK_DISABLE 0xffffffff
111
112/* NOTE: Masking out MAC_STAT Interrupt for now...
113 * #define INT_MASK_ENABLE 0xfff6bf17
114 * #define INT_MASK_ENABLE_NO_FLOW 0xfff6bfd7
115 */
116#define INT_MASK_ENABLE 0xfffebf17
117#define INT_MASK_ENABLE_NO_FLOW 0xfffebfd7
118
119/* General defines */
120/* Packet and header sizes */
121#define NIC_MIN_PACKET_SIZE 60
122
123/* Multicast list size */
124#define NIC_MAX_MCAST_LIST 128
125
126/* Supported Filters */
127#define ET131X_PACKET_TYPE_DIRECTED 0x0001
128#define ET131X_PACKET_TYPE_MULTICAST 0x0002
129#define ET131X_PACKET_TYPE_BROADCAST 0x0004
130#define ET131X_PACKET_TYPE_PROMISCUOUS 0x0008
131#define ET131X_PACKET_TYPE_ALL_MULTICAST 0x0010
132
133/* Tx Timeout */
134#define ET131X_TX_TIMEOUT (1 * HZ)
135#define NIC_SEND_HANG_THRESHOLD 0
136
137/* MP_ADAPTER flags */
138#define FMP_ADAPTER_INTERRUPT_IN_USE 0x00000008
139
140/* MP_SHARED flags */
141#define FMP_ADAPTER_LOWER_POWER 0x00200000
142
143#define FMP_ADAPTER_NON_RECOVER_ERROR 0x00800000
144#define FMP_ADAPTER_HARDWARE_ERROR 0x04000000
145
146#define FMP_ADAPTER_FAIL_SEND_MASK 0x3ff00000
147
148/* Some offsets in PCI config space that are actually used. */
149#define ET1310_PCI_MAC_ADDRESS 0xA4
150#define ET1310_PCI_EEPROM_STATUS 0xB2
151#define ET1310_PCI_ACK_NACK 0xC0
152#define ET1310_PCI_REPLAY 0xC2
153#define ET1310_PCI_L0L1LATENCY 0xCF
154
155/* PCI Product IDs */
156#define ET131X_PCI_DEVICE_ID_GIG 0xED00 /* ET1310 1000 Base-T 8 */
157#define ET131X_PCI_DEVICE_ID_FAST 0xED01 /* ET1310 100 Base-T */
158
159/* Define order of magnitude converter */
160#define NANO_IN_A_MICRO 1000
161
162#define PARM_RX_NUM_BUFS_DEF 4
163#define PARM_RX_TIME_INT_DEF 10
164#define PARM_RX_MEM_END_DEF 0x2bc
165#define PARM_TX_TIME_INT_DEF 40
166#define PARM_TX_NUM_BUFS_DEF 4
167#define PARM_DMA_CACHE_DEF 0
168
169/* RX defines */
170#define FBR_CHUNKS 32
171#define MAX_DESC_PER_RING_RX 1024
172
173/* number of RFDs - default and min */
174#define RFD_LOW_WATER_MARK 40
175#define NIC_DEFAULT_NUM_RFD 1024
176#define NUM_FBRS 2
177
178#define MAX_PACKETS_HANDLED 256
179
180#define ALCATEL_MULTICAST_PKT 0x01000000
181#define ALCATEL_BROADCAST_PKT 0x02000000
182
183/* typedefs for Free Buffer Descriptors */
184struct fbr_desc {
185 u32 addr_lo;
186 u32 addr_hi;
187 u32 word2; /* Bits 10-31 reserved, 0-9 descriptor */
188};
189
190/* Packet Status Ring Descriptors
191 *
192 * Word 0:
193 *
194 * top 16 bits are from the Alcatel Status Word as enumerated in
195 * PE-MCXMAC Data Sheet IPD DS54 0210-1 (also IPD-DS80 0205-2)
196 *
197 * 0: hp hash pass
198 * 1: ipa IP checksum assist
199 * 2: ipp IP checksum pass
200 * 3: tcpa TCP checksum assist
201 * 4: tcpp TCP checksum pass
202 * 5: wol WOL Event
203 * 6: rxmac_error RXMAC Error Indicator
204 * 7: drop Drop packet
205 * 8: ft Frame Truncated
206 * 9: jp Jumbo Packet
207 * 10: vp VLAN Packet
208 * 11-15: unused
209 * 16: asw_prev_pkt_dropped e.g. IFG too small on previous
210 * 17: asw_RX_DV_event short receive event detected
211 * 18: asw_false_carrier_event bad carrier since last good packet
212 * 19: asw_code_err one or more nibbles signalled as errors
213 * 20: asw_CRC_err CRC error
214 * 21: asw_len_chk_err frame length field incorrect
215 * 22: asw_too_long frame length > 1518 bytes
216 * 23: asw_OK valid CRC + no code error
217 * 24: asw_multicast has a multicast address
218 * 25: asw_broadcast has a broadcast address
219 * 26: asw_dribble_nibble spurious bits after EOP
220 * 27: asw_control_frame is a control frame
221 * 28: asw_pause_frame is a pause frame
222 * 29: asw_unsupported_op unsupported OP code
223 * 30: asw_VLAN_tag VLAN tag detected
224 * 31: asw_long_evt Rx long event
225 *
226 * Word 1:
227 * 0-15: length length in bytes
228 * 16-25: bi Buffer Index
229 * 26-27: ri Ring Index
230 * 28-31: reserved
231 */
232struct pkt_stat_desc {
233 u32 word0;
234 u32 word1;
235};
236
237/* Typedefs for the RX DMA status word */
238
239/* rx status word 0 holds part of the status bits of the Rx DMA engine
240 * that get copied out to memory by the ET-1310. Word 0 is a 32 bit word
241 * which contains the Free Buffer ring 0 and 1 available offset.
242 *
243 * bit 0-9 FBR1 offset
244 * bit 10 Wrap flag for FBR1
245 * bit 16-25 FBR0 offset
246 * bit 26 Wrap flag for FBR0
247 */
248
249/* RXSTAT_WORD1_t structure holds part of the status bits of the Rx DMA engine
250 * that get copied out to memory by the ET-1310. Word 3 is a 32 bit word
251 * which contains the Packet Status Ring available offset.
252 *
253 * bit 0-15 reserved
254 * bit 16-27 PSRoffset
255 * bit 28 PSRwrap
256 * bit 29-31 unused
257 */
258
259/* struct rx_status_block is a structure representing the status of the Rx
260 * DMA engine it sits in free memory, and is pointed to by 0x101c / 0x1020
261 */
262struct rx_status_block {
263 u32 word0;
264 u32 word1;
265};
266
267/* Structure for look-up table holding free buffer ring pointers, addresses
268 * and state.
269 */
270struct fbr_lookup {
271 void *virt[MAX_DESC_PER_RING_RX];
272 u32 bus_high[MAX_DESC_PER_RING_RX];
273 u32 bus_low[MAX_DESC_PER_RING_RX];
274 void *ring_virtaddr;
275 dma_addr_t ring_physaddr;
276 void *mem_virtaddrs[MAX_DESC_PER_RING_RX / FBR_CHUNKS];
277 dma_addr_t mem_physaddrs[MAX_DESC_PER_RING_RX / FBR_CHUNKS];
278 u32 local_full;
279 u32 num_entries;
280 dma_addr_t buffsize;
281};
282
283/* struct rx_ring is the structure representing the adaptor's local
284 * reference(s) to the rings
285 */
286struct rx_ring {
287 struct fbr_lookup *fbr[NUM_FBRS];
288 void *ps_ring_virtaddr;
289 dma_addr_t ps_ring_physaddr;
290 u32 local_psr_full;
291 u32 psr_entries;
292
293 struct rx_status_block *rx_status_block;
294 dma_addr_t rx_status_bus;
295
296 struct list_head recv_list;
297 u32 num_ready_recv;
298
299 u32 num_rfd;
300
301 bool unfinished_receives;
302};
303
304/* TX defines */
305/* word 2 of the control bits in the Tx Descriptor ring for the ET-1310
306 *
307 * 0-15: length of packet
308 * 16-27: VLAN tag
309 * 28: VLAN CFI
310 * 29-31: VLAN priority
311 *
312 * word 3 of the control bits in the Tx Descriptor ring for the ET-1310
313 *
314 * 0: last packet in the sequence
315 * 1: first packet in the sequence
316 * 2: interrupt the processor when this pkt sent
317 * 3: Control word - no packet data
318 * 4: Issue half-duplex backpressure : XON/XOFF
319 * 5: send pause frame
320 * 6: Tx frame has error
321 * 7: append CRC
322 * 8: MAC override
323 * 9: pad packet
324 * 10: Packet is a Huge packet
325 * 11: append VLAN tag
326 * 12: IP checksum assist
327 * 13: TCP checksum assist
328 * 14: UDP checksum assist
329 */
330#define TXDESC_FLAG_LASTPKT 0x0001
331#define TXDESC_FLAG_FIRSTPKT 0x0002
332#define TXDESC_FLAG_INTPROC 0x0004
333
334/* struct tx_desc represents each descriptor on the ring */
335struct tx_desc {
336 u32 addr_hi;
337 u32 addr_lo;
338 u32 len_vlan; /* control words how to xmit the */
339 u32 flags; /* data (detailed above) */
340};
341
342/* The status of the Tx DMA engine it sits in free memory, and is pointed to
343 * by 0x101c / 0x1020. This is a DMA10 type
344 */
345
346/* TCB (Transmit Control Block: Host Side) */
347struct tcb {
348 struct tcb *next; /* Next entry in ring */
349 u32 count; /* Used to spot stuck/lost packets */
350 u32 stale; /* Used to spot stuck/lost packets */
351 struct sk_buff *skb; /* Network skb we are tied to */
352 u32 index; /* Ring indexes */
353 u32 index_start;
354};
355
356/* Structure representing our local reference(s) to the ring */
357struct tx_ring {
358 /* TCB (Transmit Control Block) memory and lists */
359 struct tcb *tcb_ring;
360
361 /* List of TCBs that are ready to be used */
362 struct tcb *tcb_qhead;
363 struct tcb *tcb_qtail;
364
365 /* list of TCBs that are currently being sent. */
366 struct tcb *send_head;
367 struct tcb *send_tail;
368 int used;
369
370 /* The actual descriptor ring */
371 struct tx_desc *tx_desc_ring;
372 dma_addr_t tx_desc_ring_pa;
373
374 /* send_idx indicates where we last wrote to in the descriptor ring. */
375 u32 send_idx;
376
377 /* The location of the write-back status block */
378 u32 *tx_status;
379 dma_addr_t tx_status_pa;
380
381 /* Packets since the last IRQ: used for interrupt coalescing */
382 int since_irq;
383};
384
385/* Do not change these values: if changed, then change also in respective
386 * TXdma and Rxdma engines
387 */
388#define NUM_DESC_PER_RING_TX 512 /* TX Do not change these values */
389#define NUM_TCB 64
390
391/* These values are all superseded by registry entries to facilitate tuning.
392 * Once the desired performance has been achieved, the optimal registry values
393 * should be re-populated to these #defines:
394 */
395#define TX_ERROR_PERIOD 1000
396
397#define LO_MARK_PERCENT_FOR_PSR 15
398#define LO_MARK_PERCENT_FOR_RX 15
399
400/* RFD (Receive Frame Descriptor) */
401struct rfd {
402 struct list_head list_node;
403 struct sk_buff *skb;
404 u32 len; /* total size of receive frame */
405 u16 bufferindex;
406 u8 ringindex;
407};
408
409/* Flow Control */
410#define FLOW_BOTH 0
411#define FLOW_TXONLY 1
412#define FLOW_RXONLY 2
413#define FLOW_NONE 3
414
415/* Struct to define some device statistics */
416struct ce_stats {
417 u32 multicast_pkts_rcvd;
418 u32 rcvd_pkts_dropped;
419
420 u32 tx_underflows;
421 u32 tx_collisions;
422 u32 tx_excessive_collisions;
423 u32 tx_first_collisions;
424 u32 tx_late_collisions;
425 u32 tx_max_pkt_errs;
426 u32 tx_deferred;
427
428 u32 rx_overflows;
429 u32 rx_length_errs;
430 u32 rx_align_errs;
431 u32 rx_crc_errs;
432 u32 rx_code_violations;
433 u32 rx_other_errs;
434
435 u32 interrupt_status;
436};
437
438/* The private adapter structure */
439struct et131x_adapter {
440 struct net_device *netdev;
441 struct pci_dev *pdev;
442 struct mii_bus *mii_bus;
443 struct phy_device *phydev;
444 struct napi_struct napi;
445
446 /* Flags that indicate current state of the adapter */
447 u32 flags;
448
449 /* local link state, to determine if a state change has occurred */
450 int link;
451
452 /* Configuration */
453 u8 rom_addr[ETH_ALEN];
454 u8 addr[ETH_ALEN];
455 bool has_eeprom;
456 u8 eeprom_data[2];
457
458 spinlock_t tcb_send_qlock; /* protects the tx_ring send tcb list */
459 spinlock_t tcb_ready_qlock; /* protects the tx_ring ready tcb list */
460 spinlock_t rcv_lock; /* protects the rx_ring receive list */
461
462 /* Packet Filter and look ahead size */
463 u32 packet_filter;
464
465 /* multicast list */
466 u32 multicast_addr_count;
467 u8 multicast_list[NIC_MAX_MCAST_LIST][ETH_ALEN];
468
469 /* Pointer to the device's PCI register space */
470 struct address_map __iomem *regs;
471
472 /* Registry parameters */
473 u8 wanted_flow; /* Flow we want for 802.3x flow control */
474 u32 registry_jumbo_packet; /* Max supported ethernet packet size */
475
476 /* Derived from the registry: */
477 u8 flow; /* flow control validated by the far-end */
478
479 /* Minimize init-time */
480 struct timer_list error_timer;
481
482 /* variable putting the phy into coma mode when boot up with no cable
483 * plugged in after 5 seconds
484 */
485 u8 boot_coma;
486
487 /* Tx Memory Variables */
488 struct tx_ring tx_ring;
489
490 /* Rx Memory Variables */
491 struct rx_ring rx_ring;
492
493 struct ce_stats stats;
494};
495
496static int eeprom_wait_ready(struct pci_dev *pdev, u32 *status)
497{
498 u32 reg;
499 int i;
500
501 /* 1. Check LBCIF Status Register for bits 6 & 3:2 all equal to 0 and
502 * bits 7,1:0 both equal to 1, at least once after reset.
503 * Subsequent operations need only to check that bits 1:0 are equal
504 * to 1 prior to starting a single byte read/write
505 */
506 for (i = 0; i < MAX_NUM_REGISTER_POLLS; i++) {
507 if (pci_read_config_dword(pdev, LBCIF_DWORD1_GROUP, &reg))
508 return -EIO;
509
510 /* I2C idle and Phy Queue Avail both true */
511 if ((reg & 0x3000) == 0x3000) {
512 if (status)
513 *status = reg;
514 return reg & 0xFF;
515 }
516 }
517 return -ETIMEDOUT;
518}
519
520static int eeprom_write(struct et131x_adapter *adapter, u32 addr, u8 data)
521{
522 struct pci_dev *pdev = adapter->pdev;
523 int index = 0;
524 int retries;
525 int err = 0;
526 int writeok = 0;
527 u32 status;
528 u32 val = 0;
529
530 /* For an EEPROM, an I2C single byte write is defined as a START
531 * condition followed by the device address, EEPROM address, one byte
532 * of data and a STOP condition. The STOP condition will trigger the
533 * EEPROM's internally timed write cycle to the nonvolatile memory.
534 * All inputs are disabled during this write cycle and the EEPROM will
535 * not respond to any access until the internal write is complete.
536 */
537 err = eeprom_wait_ready(pdev, NULL);
538 if (err < 0)
539 return err;
540
541 /* 2. Write to the LBCIF Control Register: bit 7=1, bit 6=1, bit 3=0,
542 * and bits 1:0 both =0. Bit 5 should be set according to the
543 * type of EEPROM being accessed (1=two byte addressing, 0=one
544 * byte addressing).
545 */
546 if (pci_write_config_byte(pdev, LBCIF_CONTROL_REGISTER,
547 LBCIF_CONTROL_LBCIF_ENABLE |
548 LBCIF_CONTROL_I2C_WRITE))
549 return -EIO;
550
551 /* Prepare EEPROM address for Step 3 */
552 for (retries = 0; retries < MAX_NUM_WRITE_RETRIES; retries++) {
553 if (pci_write_config_dword(pdev, LBCIF_ADDRESS_REGISTER, addr))
554 break;
555 /* Write the data to the LBCIF Data Register (the I2C write
556 * will begin).
557 */
558 if (pci_write_config_byte(pdev, LBCIF_DATA_REGISTER, data))
559 break;
560 /* Monitor bit 1:0 of the LBCIF Status Register. When bits
561 * 1:0 are both equal to 1, the I2C write has completed and the
562 * internal write cycle of the EEPROM is about to start.
563 * (bits 1:0 = 01 is a legal state while waiting from both
564 * equal to 1, but bits 1:0 = 10 is invalid and implies that
565 * something is broken).
566 */
567 err = eeprom_wait_ready(pdev, &status);
568 if (err < 0)
569 return 0;
570
571 /* Check bit 3 of the LBCIF Status Register. If equal to 1,
572 * an error has occurred.Don't break here if we are revision
573 * 1, this is so we do a blind write for load bug.
574 */
575 if ((status & LBCIF_STATUS_GENERAL_ERROR) &&
576 adapter->pdev->revision == 0)
577 break;
578
579 /* Check bit 2 of the LBCIF Status Register. If equal to 1 an
580 * ACK error has occurred on the address phase of the write.
581 * This could be due to an actual hardware failure or the
582 * EEPROM may still be in its internal write cycle from a
583 * previous write. This write operation was ignored and must be
584 *repeated later.
585 */
586 if (status & LBCIF_STATUS_ACK_ERROR) {
587 /* This could be due to an actual hardware failure
588 * or the EEPROM may still be in its internal write
589 * cycle from a previous write. This write operation
590 * was ignored and must be repeated later.
591 */
592 udelay(10);
593 continue;
594 }
595
596 writeok = 1;
597 break;
598 }
599
600 udelay(10);
601
602 while (1) {
603 if (pci_write_config_byte(pdev, LBCIF_CONTROL_REGISTER,
604 LBCIF_CONTROL_LBCIF_ENABLE))
605 writeok = 0;
606
607 /* Do read until internal ACK_ERROR goes away meaning write
608 * completed
609 */
610 do {
611 pci_write_config_dword(pdev,
612 LBCIF_ADDRESS_REGISTER,
613 addr);
614 do {
615 pci_read_config_dword(pdev,
616 LBCIF_DATA_REGISTER,
617 &val);
618 } while ((val & 0x00010000) == 0);
619 } while (val & 0x00040000);
620
621 if ((val & 0xFF00) != 0xC000 || index == 10000)
622 break;
623 index++;
624 }
625 return writeok ? 0 : -EIO;
626}
627
628static int eeprom_read(struct et131x_adapter *adapter, u32 addr, u8 *pdata)
629{
630 struct pci_dev *pdev = adapter->pdev;
631 int err;
632 u32 status;
633
634 /* A single byte read is similar to the single byte write, with the
635 * exception of the data flow:
636 */
637 err = eeprom_wait_ready(pdev, NULL);
638 if (err < 0)
639 return err;
640 /* Write to the LBCIF Control Register: bit 7=1, bit 6=0, bit 3=0,
641 * and bits 1:0 both =0. Bit 5 should be set according to the type
642 * of EEPROM being accessed (1=two byte addressing, 0=one byte
643 * addressing).
644 */
645 if (pci_write_config_byte(pdev, LBCIF_CONTROL_REGISTER,
646 LBCIF_CONTROL_LBCIF_ENABLE))
647 return -EIO;
648 /* Write the address to the LBCIF Address Register (I2C read will
649 * begin).
650 */
651 if (pci_write_config_dword(pdev, LBCIF_ADDRESS_REGISTER, addr))
652 return -EIO;
653 /* Monitor bit 0 of the LBCIF Status Register. When = 1, I2C read
654 * is complete. (if bit 1 =1 and bit 0 stays = 0, a hardware failure
655 * has occurred).
656 */
657 err = eeprom_wait_ready(pdev, &status);
658 if (err < 0)
659 return err;
660 /* Regardless of error status, read data byte from LBCIF Data
661 * Register.
662 */
663 *pdata = err;
664
665 return (status & LBCIF_STATUS_ACK_ERROR) ? -EIO : 0;
666}
667
668static int et131x_init_eeprom(struct et131x_adapter *adapter)
669{
670 struct pci_dev *pdev = adapter->pdev;
671 u8 eestatus;
672
673 pci_read_config_byte(pdev, ET1310_PCI_EEPROM_STATUS, &eestatus);
674
675 /* THIS IS A WORKAROUND:
676 * I need to call this function twice to get my card in a
677 * LG M1 Express Dual running. I tried also a msleep before this
678 * function, because I thought there could be some time conditions
679 * but it didn't work. Call the whole function twice also work.
680 */
681 if (pci_read_config_byte(pdev, ET1310_PCI_EEPROM_STATUS, &eestatus)) {
682 dev_err(&pdev->dev,
683 "Could not read PCI config space for EEPROM Status\n");
684 return -EIO;
685 }
686
687 /* Determine if the error(s) we care about are present. If they are
688 * present we need to fail.
689 */
690 if (eestatus & 0x4C) {
691 int write_failed = 0;
692
693 if (pdev->revision == 0x01) {
694 int i;
695 static const u8 eedata[4] = { 0xFE, 0x13, 0x10, 0xFF };
696
697 /* Re-write the first 4 bytes if we have an eeprom
698 * present and the revision id is 1, this fixes the
699 * corruption seen with 1310 B Silicon
700 */
701 for (i = 0; i < 3; i++)
702 if (eeprom_write(adapter, i, eedata[i]) < 0)
703 write_failed = 1;
704 }
705 if (pdev->revision != 0x01 || write_failed) {
706 dev_err(&pdev->dev,
707 "Fatal EEPROM Status Error - 0x%04x\n",
708 eestatus);
709
710 /* This error could mean that there was an error
711 * reading the eeprom or that the eeprom doesn't exist.
712 * We will treat each case the same and not try to
713 * gather additional information that normally would
714 * come from the eeprom, like MAC Address
715 */
716 adapter->has_eeprom = 0;
717 return -EIO;
718 }
719 }
720 adapter->has_eeprom = 1;
721
722 /* Read the EEPROM for information regarding LED behavior. Refer to
723 * et131x_xcvr_init() for its use.
724 */
725 eeprom_read(adapter, 0x70, &adapter->eeprom_data[0]);
726 eeprom_read(adapter, 0x71, &adapter->eeprom_data[1]);
727
728 if (adapter->eeprom_data[0] != 0xcd)
729 /* Disable all optional features */
730 adapter->eeprom_data[1] = 0x00;
731
732 return 0;
733}
734
735static void et131x_rx_dma_enable(struct et131x_adapter *adapter)
736{
737 /* Setup the receive dma configuration register for normal operation */
738 u32 csr = ET_RXDMA_CSR_FBR1_ENABLE;
739 struct rx_ring *rx_ring = &adapter->rx_ring;
740
741 if (rx_ring->fbr[1]->buffsize == 4096)
742 csr |= ET_RXDMA_CSR_FBR1_SIZE_LO;
743 else if (rx_ring->fbr[1]->buffsize == 8192)
744 csr |= ET_RXDMA_CSR_FBR1_SIZE_HI;
745 else if (rx_ring->fbr[1]->buffsize == 16384)
746 csr |= ET_RXDMA_CSR_FBR1_SIZE_LO | ET_RXDMA_CSR_FBR1_SIZE_HI;
747
748 csr |= ET_RXDMA_CSR_FBR0_ENABLE;
749 if (rx_ring->fbr[0]->buffsize == 256)
750 csr |= ET_RXDMA_CSR_FBR0_SIZE_LO;
751 else if (rx_ring->fbr[0]->buffsize == 512)
752 csr |= ET_RXDMA_CSR_FBR0_SIZE_HI;
753 else if (rx_ring->fbr[0]->buffsize == 1024)
754 csr |= ET_RXDMA_CSR_FBR0_SIZE_LO | ET_RXDMA_CSR_FBR0_SIZE_HI;
755 writel(csr, &adapter->regs->rxdma.csr);
756
757 csr = readl(&adapter->regs->rxdma.csr);
758 if (csr & ET_RXDMA_CSR_HALT_STATUS) {
759 udelay(5);
760 csr = readl(&adapter->regs->rxdma.csr);
761 if (csr & ET_RXDMA_CSR_HALT_STATUS) {
762 dev_err(&adapter->pdev->dev,
763 "RX Dma failed to exit halt state. CSR 0x%08x\n",
764 csr);
765 }
766 }
767}
768
769static void et131x_rx_dma_disable(struct et131x_adapter *adapter)
770{
771 u32 csr;
772 /* Setup the receive dma configuration register */
773 writel(ET_RXDMA_CSR_HALT | ET_RXDMA_CSR_FBR1_ENABLE,
774 &adapter->regs->rxdma.csr);
775 csr = readl(&adapter->regs->rxdma.csr);
776 if (!(csr & ET_RXDMA_CSR_HALT_STATUS)) {
777 udelay(5);
778 csr = readl(&adapter->regs->rxdma.csr);
779 if (!(csr & ET_RXDMA_CSR_HALT_STATUS))
780 dev_err(&adapter->pdev->dev,
781 "RX Dma failed to enter halt state. CSR 0x%08x\n",
782 csr);
783 }
784}
785
786static void et131x_tx_dma_enable(struct et131x_adapter *adapter)
787{
788 /* Setup the transmit dma configuration register for normal
789 * operation
790 */
791 writel(ET_TXDMA_SNGL_EPKT | (PARM_DMA_CACHE_DEF << ET_TXDMA_CACHE_SHIFT),
792 &adapter->regs->txdma.csr);
793}
794
795static inline void add_10bit(u32 *v, int n)
796{
797 *v = INDEX10(*v + n) | (*v & ET_DMA10_WRAP);
798}
799
800static inline void add_12bit(u32 *v, int n)
801{
802 *v = INDEX12(*v + n) | (*v & ET_DMA12_WRAP);
803}
804
805static void et1310_config_mac_regs1(struct et131x_adapter *adapter)
806{
807 struct mac_regs __iomem *macregs = &adapter->regs->mac;
808 u32 station1;
809 u32 station2;
810 u32 ipg;
811
812 /* First we need to reset everything. Write to MAC configuration
813 * register 1 to perform reset.
814 */
815 writel(ET_MAC_CFG1_SOFT_RESET | ET_MAC_CFG1_SIM_RESET |
816 ET_MAC_CFG1_RESET_RXMC | ET_MAC_CFG1_RESET_TXMC |
817 ET_MAC_CFG1_RESET_RXFUNC | ET_MAC_CFG1_RESET_TXFUNC,
818 &macregs->cfg1);
819
820 /* Next lets configure the MAC Inter-packet gap register */
821 ipg = 0x38005860; /* IPG1 0x38 IPG2 0x58 B2B 0x60 */
822 ipg |= 0x50 << 8; /* ifg enforce 0x50 */
823 writel(ipg, &macregs->ipg);
824
825 /* Next lets configure the MAC Half Duplex register */
826 /* BEB trunc 0xA, Ex Defer, Rexmit 0xF Coll 0x37 */
827 writel(0x00A1F037, &macregs->hfdp);
828
829 /* Next lets configure the MAC Interface Control register */
830 writel(0, &macregs->if_ctrl);
831
832 writel(ET_MAC_MIIMGMT_CLK_RST, &macregs->mii_mgmt_cfg);
833
834 /* Next lets configure the MAC Station Address register. These
835 * values are read from the EEPROM during initialization and stored
836 * in the adapter structure. We write what is stored in the adapter
837 * structure to the MAC Station Address registers high and low. This
838 * station address is used for generating and checking pause control
839 * packets.
840 */
841 station2 = (adapter->addr[1] << ET_MAC_STATION_ADDR2_OC2_SHIFT) |
842 (adapter->addr[0] << ET_MAC_STATION_ADDR2_OC1_SHIFT);
843 station1 = (adapter->addr[5] << ET_MAC_STATION_ADDR1_OC6_SHIFT) |
844 (adapter->addr[4] << ET_MAC_STATION_ADDR1_OC5_SHIFT) |
845 (adapter->addr[3] << ET_MAC_STATION_ADDR1_OC4_SHIFT) |
846 adapter->addr[2];
847 writel(station1, &macregs->station_addr_1);
848 writel(station2, &macregs->station_addr_2);
849
850 /* Max ethernet packet in bytes that will be passed by the mac without
851 * being truncated. Allow the MAC to pass 4 more than our max packet
852 * size. This is 4 for the Ethernet CRC.
853 *
854 * Packets larger than (registry_jumbo_packet) that do not contain a
855 * VLAN ID will be dropped by the Rx function.
856 */
857 writel(adapter->registry_jumbo_packet + 4, &macregs->max_fm_len);
858
859 /* clear out MAC config reset */
860 writel(0, &macregs->cfg1);
861}
862
863static void et1310_config_mac_regs2(struct et131x_adapter *adapter)
864{
865 int32_t delay = 0;
866 struct mac_regs __iomem *mac = &adapter->regs->mac;
867 struct phy_device *phydev = adapter->phydev;
868 u32 cfg1;
869 u32 cfg2;
870 u32 ifctrl;
871 u32 ctl;
872
873 ctl = readl(&adapter->regs->txmac.ctl);
874 cfg1 = readl(&mac->cfg1);
875 cfg2 = readl(&mac->cfg2);
876 ifctrl = readl(&mac->if_ctrl);
877
878 /* Set up the if mode bits */
879 cfg2 &= ~ET_MAC_CFG2_IFMODE_MASK;
880 if (phydev->speed == SPEED_1000) {
881 cfg2 |= ET_MAC_CFG2_IFMODE_1000;
882 ifctrl &= ~ET_MAC_IFCTRL_PHYMODE;
883 } else {
884 cfg2 |= ET_MAC_CFG2_IFMODE_100;
885 ifctrl |= ET_MAC_IFCTRL_PHYMODE;
886 }
887
888 cfg1 |= ET_MAC_CFG1_RX_ENABLE | ET_MAC_CFG1_TX_ENABLE |
889 ET_MAC_CFG1_TX_FLOW;
890
891 cfg1 &= ~(ET_MAC_CFG1_LOOPBACK | ET_MAC_CFG1_RX_FLOW);
892 if (adapter->flow == FLOW_RXONLY || adapter->flow == FLOW_BOTH)
893 cfg1 |= ET_MAC_CFG1_RX_FLOW;
894 writel(cfg1, &mac->cfg1);
895
896 /* Now we need to initialize the MAC Configuration 2 register */
897 /* preamble 7, check length, huge frame off, pad crc, crc enable
898 * full duplex off
899 */
900 cfg2 |= 0x7 << ET_MAC_CFG2_PREAMBLE_SHIFT;
901 cfg2 |= ET_MAC_CFG2_IFMODE_LEN_CHECK;
902 cfg2 |= ET_MAC_CFG2_IFMODE_PAD_CRC;
903 cfg2 |= ET_MAC_CFG2_IFMODE_CRC_ENABLE;
904 cfg2 &= ~ET_MAC_CFG2_IFMODE_HUGE_FRAME;
905 cfg2 &= ~ET_MAC_CFG2_IFMODE_FULL_DPLX;
906
907 if (phydev->duplex == DUPLEX_FULL)
908 cfg2 |= ET_MAC_CFG2_IFMODE_FULL_DPLX;
909
910 ifctrl &= ~ET_MAC_IFCTRL_GHDMODE;
911 if (phydev->duplex == DUPLEX_HALF)
912 ifctrl |= ET_MAC_IFCTRL_GHDMODE;
913
914 writel(ifctrl, &mac->if_ctrl);
915 writel(cfg2, &mac->cfg2);
916
917 do {
918 udelay(10);
919 delay++;
920 cfg1 = readl(&mac->cfg1);
921 } while ((cfg1 & ET_MAC_CFG1_WAIT) != ET_MAC_CFG1_WAIT && delay < 100);
922
923 if (delay == 100) {
924 dev_warn(&adapter->pdev->dev,
925 "Syncd bits did not respond correctly cfg1 word 0x%08x\n",
926 cfg1);
927 }
928
929 ctl |= ET_TX_CTRL_TXMAC_ENABLE | ET_TX_CTRL_FC_DISABLE;
930 writel(ctl, &adapter->regs->txmac.ctl);
931
932 if (adapter->flags & FMP_ADAPTER_LOWER_POWER) {
933 et131x_rx_dma_enable(adapter);
934 et131x_tx_dma_enable(adapter);
935 }
936}
937
938static int et1310_in_phy_coma(struct et131x_adapter *adapter)
939{
940 u32 pmcsr = readl(&adapter->regs->global.pm_csr);
941
942 return ET_PM_PHY_SW_COMA & pmcsr ? 1 : 0;
943}
944
945static void et1310_setup_device_for_multicast(struct et131x_adapter *adapter)
946{
947 struct rxmac_regs __iomem *rxmac = &adapter->regs->rxmac;
948 u32 hash1 = 0;
949 u32 hash2 = 0;
950 u32 hash3 = 0;
951 u32 hash4 = 0;
952 u32 pm_csr;
953
954 /* If ET131X_PACKET_TYPE_MULTICAST is specified, then we provision
955 * the multi-cast LIST. If it is NOT specified, (and "ALL" is not
956 * specified) then we should pass NO multi-cast addresses to the
957 * driver.
958 */
959 if (adapter->packet_filter & ET131X_PACKET_TYPE_MULTICAST) {
960 int i;
961
962 /* Loop through our multicast array and set up the device */
963 for (i = 0; i < adapter->multicast_addr_count; i++) {
964 u32 result;
965
966 result = ether_crc(6, adapter->multicast_list[i]);
967
968 result = (result & 0x3F800000) >> 23;
969
970 if (result < 32) {
971 hash1 |= (1 << result);
972 } else if ((31 < result) && (result < 64)) {
973 result -= 32;
974 hash2 |= (1 << result);
975 } else if ((63 < result) && (result < 96)) {
976 result -= 64;
977 hash3 |= (1 << result);
978 } else {
979 result -= 96;
980 hash4 |= (1 << result);
981 }
982 }
983 }
984
985 /* Write out the new hash to the device */
986 pm_csr = readl(&adapter->regs->global.pm_csr);
987 if (!et1310_in_phy_coma(adapter)) {
988 writel(hash1, &rxmac->multi_hash1);
989 writel(hash2, &rxmac->multi_hash2);
990 writel(hash3, &rxmac->multi_hash3);
991 writel(hash4, &rxmac->multi_hash4);
992 }
993}
994
995static void et1310_setup_device_for_unicast(struct et131x_adapter *adapter)
996{
997 struct rxmac_regs __iomem *rxmac = &adapter->regs->rxmac;
998 u32 uni_pf1;
999 u32 uni_pf2;
1000 u32 uni_pf3;
1001 u32 pm_csr;
1002
1003 /* Set up unicast packet filter reg 3 to be the first two octets of
1004 * the MAC address for both address
1005 *
1006 * Set up unicast packet filter reg 2 to be the octets 2 - 5 of the
1007 * MAC address for second address
1008 *
1009 * Set up unicast packet filter reg 3 to be the octets 2 - 5 of the
1010 * MAC address for first address
1011 */
1012 uni_pf3 = (adapter->addr[0] << ET_RX_UNI_PF_ADDR2_1_SHIFT) |
1013 (adapter->addr[1] << ET_RX_UNI_PF_ADDR2_2_SHIFT) |
1014 (adapter->addr[0] << ET_RX_UNI_PF_ADDR1_1_SHIFT) |
1015 adapter->addr[1];
1016
1017 uni_pf2 = (adapter->addr[2] << ET_RX_UNI_PF_ADDR2_3_SHIFT) |
1018 (adapter->addr[3] << ET_RX_UNI_PF_ADDR2_4_SHIFT) |
1019 (adapter->addr[4] << ET_RX_UNI_PF_ADDR2_5_SHIFT) |
1020 adapter->addr[5];
1021
1022 uni_pf1 = (adapter->addr[2] << ET_RX_UNI_PF_ADDR1_3_SHIFT) |
1023 (adapter->addr[3] << ET_RX_UNI_PF_ADDR1_4_SHIFT) |
1024 (adapter->addr[4] << ET_RX_UNI_PF_ADDR1_5_SHIFT) |
1025 adapter->addr[5];
1026
1027 pm_csr = readl(&adapter->regs->global.pm_csr);
1028 if (!et1310_in_phy_coma(adapter)) {
1029 writel(uni_pf1, &rxmac->uni_pf_addr1);
1030 writel(uni_pf2, &rxmac->uni_pf_addr2);
1031 writel(uni_pf3, &rxmac->uni_pf_addr3);
1032 }
1033}
1034
1035static void et1310_config_rxmac_regs(struct et131x_adapter *adapter)
1036{
1037 struct rxmac_regs __iomem *rxmac = &adapter->regs->rxmac;
1038 struct phy_device *phydev = adapter->phydev;
1039 u32 sa_lo;
1040 u32 sa_hi = 0;
1041 u32 pf_ctrl = 0;
1042 u32 __iomem *wolw;
1043
1044 /* Disable the MAC while it is being configured (also disable WOL) */
1045 writel(0x8, &rxmac->ctrl);
1046
1047 /* Initialize WOL to disabled. */
1048 writel(0, &rxmac->crc0);
1049 writel(0, &rxmac->crc12);
1050 writel(0, &rxmac->crc34);
1051
1052 /* We need to set the WOL mask0 - mask4 next. We initialize it to
1053 * its default Values of 0x00000000 because there are not WOL masks
1054 * as of this time.
1055 */
1056 for (wolw = &rxmac->mask0_word0; wolw <= &rxmac->mask4_word3; wolw++)
1057 writel(0, wolw);
1058
1059 /* Lets setup the WOL Source Address */
1060 sa_lo = (adapter->addr[2] << ET_RX_WOL_LO_SA3_SHIFT) |
1061 (adapter->addr[3] << ET_RX_WOL_LO_SA4_SHIFT) |
1062 (adapter->addr[4] << ET_RX_WOL_LO_SA5_SHIFT) |
1063 adapter->addr[5];
1064 writel(sa_lo, &rxmac->sa_lo);
1065
1066 sa_hi = (u32)(adapter->addr[0] << ET_RX_WOL_HI_SA1_SHIFT) |
1067 adapter->addr[1];
1068 writel(sa_hi, &rxmac->sa_hi);
1069
1070 /* Disable all Packet Filtering */
1071 writel(0, &rxmac->pf_ctrl);
1072
1073 /* Let's initialize the Unicast Packet filtering address */
1074 if (adapter->packet_filter & ET131X_PACKET_TYPE_DIRECTED) {
1075 et1310_setup_device_for_unicast(adapter);
1076 pf_ctrl |= ET_RX_PFCTRL_UNICST_FILTER_ENABLE;
1077 } else {
1078 writel(0, &rxmac->uni_pf_addr1);
1079 writel(0, &rxmac->uni_pf_addr2);
1080 writel(0, &rxmac->uni_pf_addr3);
1081 }
1082
1083 /* Let's initialize the Multicast hash */
1084 if (!(adapter->packet_filter & ET131X_PACKET_TYPE_ALL_MULTICAST)) {
1085 pf_ctrl |= ET_RX_PFCTRL_MLTCST_FILTER_ENABLE;
1086 et1310_setup_device_for_multicast(adapter);
1087 }
1088
1089 /* Runt packet filtering. Didn't work in version A silicon. */
1090 pf_ctrl |= (NIC_MIN_PACKET_SIZE + 4) << ET_RX_PFCTRL_MIN_PKT_SZ_SHIFT;
1091 pf_ctrl |= ET_RX_PFCTRL_FRAG_FILTER_ENABLE;
1092
1093 if (adapter->registry_jumbo_packet > 8192)
1094 /* In order to transmit jumbo packets greater than 8k, the
1095 * FIFO between RxMAC and RxDMA needs to be reduced in size
1096 * to (16k - Jumbo packet size). In order to implement this,
1097 * we must use "cut through" mode in the RxMAC, which chops
1098 * packets down into segments which are (max_size * 16). In
1099 * this case we selected 256 bytes, since this is the size of
1100 * the PCI-Express TLP's that the 1310 uses.
1101 *
1102 * seg_en on, fc_en off, size 0x10
1103 */
1104 writel(0x41, &rxmac->mcif_ctrl_max_seg);
1105 else
1106 writel(0, &rxmac->mcif_ctrl_max_seg);
1107
1108 writel(0, &rxmac->mcif_water_mark);
1109 writel(0, &rxmac->mif_ctrl);
1110 writel(0, &rxmac->space_avail);
1111
1112 /* Initialize the the mif_ctrl register
1113 * bit 3: Receive code error. One or more nibbles were signaled as
1114 * errors during the reception of the packet. Clear this
1115 * bit in Gigabit, set it in 100Mbit. This was derived
1116 * experimentally at UNH.
1117 * bit 4: Receive CRC error. The packet's CRC did not match the
1118 * internally generated CRC.
1119 * bit 5: Receive length check error. Indicates that frame length
1120 * field value in the packet does not match the actual data
1121 * byte length and is not a type field.
1122 * bit 16: Receive frame truncated.
1123 * bit 17: Drop packet enable
1124 */
1125 if (phydev && phydev->speed == SPEED_100)
1126 writel(0x30038, &rxmac->mif_ctrl);
1127 else
1128 writel(0x30030, &rxmac->mif_ctrl);
1129
1130 /* Finally we initialize RxMac to be enabled & WOL disabled. Packet
1131 * filter is always enabled since it is where the runt packets are
1132 * supposed to be dropped. For version A silicon, runt packet
1133 * dropping doesn't work, so it is disabled in the pf_ctrl register,
1134 * but we still leave the packet filter on.
1135 */
1136 writel(pf_ctrl, &rxmac->pf_ctrl);
1137 writel(ET_RX_CTRL_RXMAC_ENABLE | ET_RX_CTRL_WOL_DISABLE, &rxmac->ctrl);
1138}
1139
1140static void et1310_config_txmac_regs(struct et131x_adapter *adapter)
1141{
1142 struct txmac_regs __iomem *txmac = &adapter->regs->txmac;
1143
1144 /* We need to update the Control Frame Parameters
1145 * cfpt - control frame pause timer set to 64 (0x40)
1146 * cfep - control frame extended pause timer set to 0x0
1147 */
1148 if (adapter->flow == FLOW_NONE)
1149 writel(0, &txmac->cf_param);
1150 else
1151 writel(0x40, &txmac->cf_param);
1152}
1153
1154static void et1310_config_macstat_regs(struct et131x_adapter *adapter)
1155{
1156 struct macstat_regs __iomem *macstat = &adapter->regs->macstat;
1157 u32 __iomem *reg;
1158
1159 /* initialize all the macstat registers to zero on the device */
1160 for (reg = &macstat->txrx_0_64_byte_frames;
1161 reg <= &macstat->carry_reg2; reg++)
1162 writel(0, reg);
1163
1164 /* Unmask any counters that we want to track the overflow of.
1165 * Initially this will be all counters. It may become clear later
1166 * that we do not need to track all counters.
1167 */
1168 writel(0xFFFFBE32, &macstat->carry_reg1_mask);
1169 writel(0xFFFE7E8B, &macstat->carry_reg2_mask);
1170}
1171
1172static int et131x_phy_mii_read(struct et131x_adapter *adapter, u8 addr,
1173 u8 reg, u16 *value)
1174{
1175 struct mac_regs __iomem *mac = &adapter->regs->mac;
1176 int status = 0;
1177 u32 delay = 0;
1178 u32 mii_addr;
1179 u32 mii_cmd;
1180 u32 mii_indicator;
1181
1182 /* Save a local copy of the registers we are dealing with so we can
1183 * set them back
1184 */
1185 mii_addr = readl(&mac->mii_mgmt_addr);
1186 mii_cmd = readl(&mac->mii_mgmt_cmd);
1187
1188 /* Stop the current operation */
1189 writel(0, &mac->mii_mgmt_cmd);
1190
1191 /* Set up the register we need to read from on the correct PHY */
1192 writel(ET_MAC_MII_ADDR(addr, reg), &mac->mii_mgmt_addr);
1193
1194 writel(0x1, &mac->mii_mgmt_cmd);
1195
1196 do {
1197 udelay(50);
1198 delay++;
1199 mii_indicator = readl(&mac->mii_mgmt_indicator);
1200 } while ((mii_indicator & ET_MAC_MGMT_WAIT) && delay < 50);
1201
1202 /* If we hit the max delay, we could not read the register */
1203 if (delay == 50) {
1204 dev_warn(&adapter->pdev->dev,
1205 "reg 0x%08x could not be read\n", reg);
1206 dev_warn(&adapter->pdev->dev, "status is 0x%08x\n",
1207 mii_indicator);
1208
1209 status = -EIO;
1210 goto out;
1211 }
1212
1213 /* If we hit here we were able to read the register and we need to
1214 * return the value to the caller
1215 */
1216 *value = readl(&mac->mii_mgmt_stat) & ET_MAC_MIIMGMT_STAT_PHYCRTL_MASK;
1217
1218out:
1219 /* Stop the read operation */
1220 writel(0, &mac->mii_mgmt_cmd);
1221
1222 /* set the registers we touched back to the state at which we entered
1223 * this function
1224 */
1225 writel(mii_addr, &mac->mii_mgmt_addr);
1226 writel(mii_cmd, &mac->mii_mgmt_cmd);
1227
1228 return status;
1229}
1230
1231static int et131x_mii_read(struct et131x_adapter *adapter, u8 reg, u16 *value)
1232{
1233 struct phy_device *phydev = adapter->phydev;
1234
1235 if (!phydev)
1236 return -EIO;
1237
1238 return et131x_phy_mii_read(adapter, phydev->addr, reg, value);
1239}
1240
1241static int et131x_mii_write(struct et131x_adapter *adapter, u8 addr, u8 reg,
1242 u16 value)
1243{
1244 struct mac_regs __iomem *mac = &adapter->regs->mac;
1245 int status = 0;
1246 u32 delay = 0;
1247 u32 mii_addr;
1248 u32 mii_cmd;
1249 u32 mii_indicator;
1250
1251 /* Save a local copy of the registers we are dealing with so we can
1252 * set them back
1253 */
1254 mii_addr = readl(&mac->mii_mgmt_addr);
1255 mii_cmd = readl(&mac->mii_mgmt_cmd);
1256
1257 /* Stop the current operation */
1258 writel(0, &mac->mii_mgmt_cmd);
1259
1260 /* Set up the register we need to write to on the correct PHY */
1261 writel(ET_MAC_MII_ADDR(addr, reg), &mac->mii_mgmt_addr);
1262
1263 /* Add the value to write to the registers to the mac */
1264 writel(value, &mac->mii_mgmt_ctrl);
1265
1266 do {
1267 udelay(50);
1268 delay++;
1269 mii_indicator = readl(&mac->mii_mgmt_indicator);
1270 } while ((mii_indicator & ET_MAC_MGMT_BUSY) && delay < 100);
1271
1272 /* If we hit the max delay, we could not write the register */
1273 if (delay == 100) {
1274 u16 tmp;
1275
1276 dev_warn(&adapter->pdev->dev,
1277 "reg 0x%08x could not be written", reg);
1278 dev_warn(&adapter->pdev->dev, "status is 0x%08x\n",
1279 mii_indicator);
1280 dev_warn(&adapter->pdev->dev, "command is 0x%08x\n",
1281 readl(&mac->mii_mgmt_cmd));
1282
1283 et131x_mii_read(adapter, reg, &tmp);
1284
1285 status = -EIO;
1286 }
1287 /* Stop the write operation */
1288 writel(0, &mac->mii_mgmt_cmd);
1289
1290 /* set the registers we touched back to the state at which we entered
1291 * this function
1292 */
1293 writel(mii_addr, &mac->mii_mgmt_addr);
1294 writel(mii_cmd, &mac->mii_mgmt_cmd);
1295
1296 return status;
1297}
1298
1299static void et1310_phy_read_mii_bit(struct et131x_adapter *adapter,
1300 u16 regnum,
1301 u16 bitnum,
1302 u8 *value)
1303{
1304 u16 reg;
1305 u16 mask = 1 << bitnum;
1306
1307 et131x_mii_read(adapter, regnum, &reg);
1308
1309 *value = (reg & mask) >> bitnum;
1310}
1311
1312static void et1310_config_flow_control(struct et131x_adapter *adapter)
1313{
1314 struct phy_device *phydev = adapter->phydev;
1315
1316 if (phydev->duplex == DUPLEX_HALF) {
1317 adapter->flow = FLOW_NONE;
1318 } else {
1319 char remote_pause, remote_async_pause;
1320
1321 et1310_phy_read_mii_bit(adapter, 5, 10, &remote_pause);
1322 et1310_phy_read_mii_bit(adapter, 5, 11, &remote_async_pause);
1323
1324 if (remote_pause && remote_async_pause) {
1325 adapter->flow = adapter->wanted_flow;
1326 } else if (remote_pause && !remote_async_pause) {
1327 if (adapter->wanted_flow == FLOW_BOTH)
1328 adapter->flow = FLOW_BOTH;
1329 else
1330 adapter->flow = FLOW_NONE;
1331 } else if (!remote_pause && !remote_async_pause) {
1332 adapter->flow = FLOW_NONE;
1333 } else {
1334 if (adapter->wanted_flow == FLOW_BOTH)
1335 adapter->flow = FLOW_RXONLY;
1336 else
1337 adapter->flow = FLOW_NONE;
1338 }
1339 }
1340}
1341
1342/* et1310_update_macstat_host_counters - Update local copy of the statistics */
1343static void et1310_update_macstat_host_counters(struct et131x_adapter *adapter)
1344{
1345 struct ce_stats *stats = &adapter->stats;
1346 struct macstat_regs __iomem *macstat =
1347 &adapter->regs->macstat;
1348
1349 stats->tx_collisions += readl(&macstat->tx_total_collisions);
1350 stats->tx_first_collisions += readl(&macstat->tx_single_collisions);
1351 stats->tx_deferred += readl(&macstat->tx_deferred);
1352 stats->tx_excessive_collisions +=
1353 readl(&macstat->tx_multiple_collisions);
1354 stats->tx_late_collisions += readl(&macstat->tx_late_collisions);
1355 stats->tx_underflows += readl(&macstat->tx_undersize_frames);
1356 stats->tx_max_pkt_errs += readl(&macstat->tx_oversize_frames);
1357
1358 stats->rx_align_errs += readl(&macstat->rx_align_errs);
1359 stats->rx_crc_errs += readl(&macstat->rx_code_errs);
1360 stats->rcvd_pkts_dropped += readl(&macstat->rx_drops);
1361 stats->rx_overflows += readl(&macstat->rx_oversize_packets);
1362 stats->rx_code_violations += readl(&macstat->rx_fcs_errs);
1363 stats->rx_length_errs += readl(&macstat->rx_frame_len_errs);
1364 stats->rx_other_errs += readl(&macstat->rx_fragment_packets);
1365}
1366
1367/* et1310_handle_macstat_interrupt
1368 *
1369 * One of the MACSTAT counters has wrapped. Update the local copy of
1370 * the statistics held in the adapter structure, checking the "wrap"
1371 * bit for each counter.
1372 */
1373static void et1310_handle_macstat_interrupt(struct et131x_adapter *adapter)
1374{
1375 u32 carry_reg1;
1376 u32 carry_reg2;
1377
1378 /* Read the interrupt bits from the register(s). These are Clear On
1379 * Write.
1380 */
1381 carry_reg1 = readl(&adapter->regs->macstat.carry_reg1);
1382 carry_reg2 = readl(&adapter->regs->macstat.carry_reg2);
1383
1384 writel(carry_reg1, &adapter->regs->macstat.carry_reg1);
1385 writel(carry_reg2, &adapter->regs->macstat.carry_reg2);
1386
1387 /* We need to do update the host copy of all the MAC_STAT counters.
1388 * For each counter, check it's overflow bit. If the overflow bit is
1389 * set, then increment the host version of the count by one complete
1390 * revolution of the counter. This routine is called when the counter
1391 * block indicates that one of the counters has wrapped.
1392 */
1393 if (carry_reg1 & (1 << 14))
1394 adapter->stats.rx_code_violations += COUNTER_WRAP_16_BIT;
1395 if (carry_reg1 & (1 << 8))
1396 adapter->stats.rx_align_errs += COUNTER_WRAP_12_BIT;
1397 if (carry_reg1 & (1 << 7))
1398 adapter->stats.rx_length_errs += COUNTER_WRAP_16_BIT;
1399 if (carry_reg1 & (1 << 2))
1400 adapter->stats.rx_other_errs += COUNTER_WRAP_16_BIT;
1401 if (carry_reg1 & (1 << 6))
1402 adapter->stats.rx_crc_errs += COUNTER_WRAP_16_BIT;
1403 if (carry_reg1 & (1 << 3))
1404 adapter->stats.rx_overflows += COUNTER_WRAP_16_BIT;
1405 if (carry_reg1 & (1 << 0))
1406 adapter->stats.rcvd_pkts_dropped += COUNTER_WRAP_16_BIT;
1407 if (carry_reg2 & (1 << 16))
1408 adapter->stats.tx_max_pkt_errs += COUNTER_WRAP_12_BIT;
1409 if (carry_reg2 & (1 << 15))
1410 adapter->stats.tx_underflows += COUNTER_WRAP_12_BIT;
1411 if (carry_reg2 & (1 << 6))
1412 adapter->stats.tx_first_collisions += COUNTER_WRAP_12_BIT;
1413 if (carry_reg2 & (1 << 8))
1414 adapter->stats.tx_deferred += COUNTER_WRAP_12_BIT;
1415 if (carry_reg2 & (1 << 5))
1416 adapter->stats.tx_excessive_collisions += COUNTER_WRAP_12_BIT;
1417 if (carry_reg2 & (1 << 4))
1418 adapter->stats.tx_late_collisions += COUNTER_WRAP_12_BIT;
1419 if (carry_reg2 & (1 << 2))
1420 adapter->stats.tx_collisions += COUNTER_WRAP_12_BIT;
1421}
1422
1423static int et131x_mdio_read(struct mii_bus *bus, int phy_addr, int reg)
1424{
1425 struct net_device *netdev = bus->priv;
1426 struct et131x_adapter *adapter = netdev_priv(netdev);
1427 u16 value;
1428 int ret;
1429
1430 ret = et131x_phy_mii_read(adapter, phy_addr, reg, &value);
1431
1432 if (ret < 0)
1433 return ret;
1434
1435 return value;
1436}
1437
1438static int et131x_mdio_write(struct mii_bus *bus, int phy_addr,
1439 int reg, u16 value)
1440{
1441 struct net_device *netdev = bus->priv;
1442 struct et131x_adapter *adapter = netdev_priv(netdev);
1443
1444 return et131x_mii_write(adapter, phy_addr, reg, value);
1445}
1446
1447/* et1310_phy_power_switch - PHY power control
1448 * @adapter: device to control
1449 * @down: true for off/false for back on
1450 *
1451 * one hundred, ten, one thousand megs
1452 * How would you like to have your LAN accessed
1453 * Can't you see that this code processed
1454 * Phy power, phy power..
1455 */
1456static void et1310_phy_power_switch(struct et131x_adapter *adapter, bool down)
1457{
1458 u16 data;
1459 struct phy_device *phydev = adapter->phydev;
1460
1461 et131x_mii_read(adapter, MII_BMCR, &data);
1462 data &= ~BMCR_PDOWN;
1463 if (down)
1464 data |= BMCR_PDOWN;
1465 et131x_mii_write(adapter, phydev->addr, MII_BMCR, data);
1466}
1467
1468/* et131x_xcvr_init - Init the phy if we are setting it into force mode */
1469static void et131x_xcvr_init(struct et131x_adapter *adapter)
1470{
1471 u16 lcr2;
1472 struct phy_device *phydev = adapter->phydev;
1473
1474 /* Set the LED behavior such that LED 1 indicates speed (off =
1475 * 10Mbits, blink = 100Mbits, on = 1000Mbits) and LED 2 indicates
1476 * link and activity (on for link, blink off for activity).
1477 *
1478 * NOTE: Some customizations have been added here for specific
1479 * vendors; The LED behavior is now determined by vendor data in the
1480 * EEPROM. However, the above description is the default.
1481 */
1482 if ((adapter->eeprom_data[1] & 0x4) == 0) {
1483 et131x_mii_read(adapter, PHY_LED_2, &lcr2);
1484
1485 lcr2 &= (ET_LED2_LED_100TX | ET_LED2_LED_1000T);
1486 lcr2 |= (LED_VAL_LINKON_ACTIVE << LED_LINK_SHIFT);
1487
1488 if ((adapter->eeprom_data[1] & 0x8) == 0)
1489 lcr2 |= (LED_VAL_1000BT_100BTX << LED_TXRX_SHIFT);
1490 else
1491 lcr2 |= (LED_VAL_LINKON << LED_TXRX_SHIFT);
1492
1493 et131x_mii_write(adapter, phydev->addr, PHY_LED_2, lcr2);
1494 }
1495}
1496
1497/* et131x_configure_global_regs - configure JAGCore global regs */
1498static void et131x_configure_global_regs(struct et131x_adapter *adapter)
1499{
1500 struct global_regs __iomem *regs = &adapter->regs->global;
1501
1502 writel(0, &regs->rxq_start_addr);
1503 writel(INTERNAL_MEM_SIZE - 1, &regs->txq_end_addr);
1504
1505 if (adapter->registry_jumbo_packet < 2048) {
1506 /* Tx / RxDMA and Tx/Rx MAC interfaces have a 1k word
1507 * block of RAM that the driver can split between Tx
1508 * and Rx as it desires. Our default is to split it
1509 * 50/50:
1510 */
1511 writel(PARM_RX_MEM_END_DEF, &regs->rxq_end_addr);
1512 writel(PARM_RX_MEM_END_DEF + 1, &regs->txq_start_addr);
1513 } else if (adapter->registry_jumbo_packet < 8192) {
1514 /* For jumbo packets > 2k but < 8k, split 50-50. */
1515 writel(INTERNAL_MEM_RX_OFFSET, &regs->rxq_end_addr);
1516 writel(INTERNAL_MEM_RX_OFFSET + 1, &regs->txq_start_addr);
1517 } else {
1518 /* 9216 is the only packet size greater than 8k that
1519 * is available. The Tx buffer has to be big enough
1520 * for one whole packet on the Tx side. We'll make
1521 * the Tx 9408, and give the rest to Rx
1522 */
1523 writel(0x01b3, &regs->rxq_end_addr);
1524 writel(0x01b4, &regs->txq_start_addr);
1525 }
1526
1527 /* Initialize the loopback register. Disable all loopbacks. */
1528 writel(0, &regs->loopback);
1529
1530 writel(0, &regs->msi_config);
1531
1532 /* By default, disable the watchdog timer. It will be enabled when
1533 * a packet is queued.
1534 */
1535 writel(0, &regs->watchdog_timer);
1536}
1537
1538/* et131x_config_rx_dma_regs - Start of Rx_DMA init sequence */
1539static void et131x_config_rx_dma_regs(struct et131x_adapter *adapter)
1540{
1541 struct rxdma_regs __iomem *rx_dma = &adapter->regs->rxdma;
1542 struct rx_ring *rx_local = &adapter->rx_ring;
1543 struct fbr_desc *fbr_entry;
1544 u32 entry;
1545 u32 psr_num_des;
1546 unsigned long flags;
1547 u8 id;
1548
1549 et131x_rx_dma_disable(adapter);
1550
1551 /* Load the completion writeback physical address */
1552 writel(upper_32_bits(rx_local->rx_status_bus), &rx_dma->dma_wb_base_hi);
1553 writel(lower_32_bits(rx_local->rx_status_bus), &rx_dma->dma_wb_base_lo);
1554
1555 memset(rx_local->rx_status_block, 0, sizeof(struct rx_status_block));
1556
1557 /* Set the address and parameters of the packet status ring */
1558 writel(upper_32_bits(rx_local->ps_ring_physaddr), &rx_dma->psr_base_hi);
1559 writel(lower_32_bits(rx_local->ps_ring_physaddr), &rx_dma->psr_base_lo);
1560 writel(rx_local->psr_entries - 1, &rx_dma->psr_num_des);
1561 writel(0, &rx_dma->psr_full_offset);
1562
1563 psr_num_des = readl(&rx_dma->psr_num_des) & ET_RXDMA_PSR_NUM_DES_MASK;
1564 writel((psr_num_des * LO_MARK_PERCENT_FOR_PSR) / 100,
1565 &rx_dma->psr_min_des);
1566
1567 spin_lock_irqsave(&adapter->rcv_lock, flags);
1568
1569 /* These local variables track the PSR in the adapter structure */
1570 rx_local->local_psr_full = 0;
1571
1572 for (id = 0; id < NUM_FBRS; id++) {
1573 u32 __iomem *num_des;
1574 u32 __iomem *full_offset;
1575 u32 __iomem *min_des;
1576 u32 __iomem *base_hi;
1577 u32 __iomem *base_lo;
1578 struct fbr_lookup *fbr = rx_local->fbr[id];
1579
1580 if (id == 0) {
1581 num_des = &rx_dma->fbr0_num_des;
1582 full_offset = &rx_dma->fbr0_full_offset;
1583 min_des = &rx_dma->fbr0_min_des;
1584 base_hi = &rx_dma->fbr0_base_hi;
1585 base_lo = &rx_dma->fbr0_base_lo;
1586 } else {
1587 num_des = &rx_dma->fbr1_num_des;
1588 full_offset = &rx_dma->fbr1_full_offset;
1589 min_des = &rx_dma->fbr1_min_des;
1590 base_hi = &rx_dma->fbr1_base_hi;
1591 base_lo = &rx_dma->fbr1_base_lo;
1592 }
1593
1594 /* Now's the best time to initialize FBR contents */
1595 fbr_entry = fbr->ring_virtaddr;
1596 for (entry = 0; entry < fbr->num_entries; entry++) {
1597 fbr_entry->addr_hi = fbr->bus_high[entry];
1598 fbr_entry->addr_lo = fbr->bus_low[entry];
1599 fbr_entry->word2 = entry;
1600 fbr_entry++;
1601 }
1602
1603 /* Set the address and parameters of Free buffer ring 1 and 0 */
1604 writel(upper_32_bits(fbr->ring_physaddr), base_hi);
1605 writel(lower_32_bits(fbr->ring_physaddr), base_lo);
1606 writel(fbr->num_entries - 1, num_des);
1607 writel(ET_DMA10_WRAP, full_offset);
1608
1609 /* This variable tracks the free buffer ring 1 full position,
1610 * so it has to match the above.
1611 */
1612 fbr->local_full = ET_DMA10_WRAP;
1613 writel(((fbr->num_entries * LO_MARK_PERCENT_FOR_RX) / 100) - 1,
1614 min_des);
1615 }
1616
1617 /* Program the number of packets we will receive before generating an
1618 * interrupt.
1619 * For version B silicon, this value gets updated once autoneg is
1620 *complete.
1621 */
1622 writel(PARM_RX_NUM_BUFS_DEF, &rx_dma->num_pkt_done);
1623
1624 /* The "time_done" is not working correctly to coalesce interrupts
1625 * after a given time period, but rather is giving us an interrupt
1626 * regardless of whether we have received packets.
1627 * This value gets updated once autoneg is complete.
1628 */
1629 writel(PARM_RX_TIME_INT_DEF, &rx_dma->max_pkt_time);
1630
1631 spin_unlock_irqrestore(&adapter->rcv_lock, flags);
1632}
1633
1634/* et131x_config_tx_dma_regs - Set up the tx dma section of the JAGCore.
1635 *
1636 * Configure the transmit engine with the ring buffers we have created
1637 * and prepare it for use.
1638 */
1639static void et131x_config_tx_dma_regs(struct et131x_adapter *adapter)
1640{
1641 struct txdma_regs __iomem *txdma = &adapter->regs->txdma;
1642 struct tx_ring *tx_ring = &adapter->tx_ring;
1643
1644 /* Load the hardware with the start of the transmit descriptor ring. */
1645 writel(upper_32_bits(tx_ring->tx_desc_ring_pa), &txdma->pr_base_hi);
1646 writel(lower_32_bits(tx_ring->tx_desc_ring_pa), &txdma->pr_base_lo);
1647
1648 /* Initialise the transmit DMA engine */
1649 writel(NUM_DESC_PER_RING_TX - 1, &txdma->pr_num_des);
1650
1651 /* Load the completion writeback physical address */
1652 writel(upper_32_bits(tx_ring->tx_status_pa), &txdma->dma_wb_base_hi);
1653 writel(lower_32_bits(tx_ring->tx_status_pa), &txdma->dma_wb_base_lo);
1654
1655 *tx_ring->tx_status = 0;
1656
1657 writel(0, &txdma->service_request);
1658 tx_ring->send_idx = 0;
1659}
1660
1661/* et131x_adapter_setup - Set the adapter up as per cassini+ documentation */
1662static void et131x_adapter_setup(struct et131x_adapter *adapter)
1663{
1664 et131x_configure_global_regs(adapter);
1665 et1310_config_mac_regs1(adapter);
1666
1667 /* Configure the MMC registers */
1668 /* All we need to do is initialize the Memory Control Register */
1669 writel(ET_MMC_ENABLE, &adapter->regs->mmc.mmc_ctrl);
1670
1671 et1310_config_rxmac_regs(adapter);
1672 et1310_config_txmac_regs(adapter);
1673
1674 et131x_config_rx_dma_regs(adapter);
1675 et131x_config_tx_dma_regs(adapter);
1676
1677 et1310_config_macstat_regs(adapter);
1678
1679 et1310_phy_power_switch(adapter, 0);
1680 et131x_xcvr_init(adapter);
1681}
1682
1683/* et131x_soft_reset - Issue soft reset to the hardware, complete for ET1310 */
1684static void et131x_soft_reset(struct et131x_adapter *adapter)
1685{
1686 u32 reg;
1687
1688 /* Disable MAC Core */
1689 reg = ET_MAC_CFG1_SOFT_RESET | ET_MAC_CFG1_SIM_RESET |
1690 ET_MAC_CFG1_RESET_RXMC | ET_MAC_CFG1_RESET_TXMC |
1691 ET_MAC_CFG1_RESET_RXFUNC | ET_MAC_CFG1_RESET_TXFUNC;
1692 writel(reg, &adapter->regs->mac.cfg1);
1693
1694 reg = ET_RESET_ALL;
1695 writel(reg, &adapter->regs->global.sw_reset);
1696
1697 reg = ET_MAC_CFG1_RESET_RXMC | ET_MAC_CFG1_RESET_TXMC |
1698 ET_MAC_CFG1_RESET_RXFUNC | ET_MAC_CFG1_RESET_TXFUNC;
1699 writel(reg, &adapter->regs->mac.cfg1);
1700 writel(0, &adapter->regs->mac.cfg1);
1701}
1702
1703static void et131x_enable_interrupts(struct et131x_adapter *adapter)
1704{
1705 u32 mask;
1706
1707 if (adapter->flow == FLOW_TXONLY || adapter->flow == FLOW_BOTH)
1708 mask = INT_MASK_ENABLE;
1709 else
1710 mask = INT_MASK_ENABLE_NO_FLOW;
1711
1712 writel(mask, &adapter->regs->global.int_mask);
1713}
1714
1715static void et131x_disable_interrupts(struct et131x_adapter *adapter)
1716{
1717 writel(INT_MASK_DISABLE, &adapter->regs->global.int_mask);
1718}
1719
1720static void et131x_tx_dma_disable(struct et131x_adapter *adapter)
1721{
1722 /* Setup the transmit dma configuration register */
1723 writel(ET_TXDMA_CSR_HALT | ET_TXDMA_SNGL_EPKT,
1724 &adapter->regs->txdma.csr);
1725}
1726
1727static void et131x_enable_txrx(struct net_device *netdev)
1728{
1729 struct et131x_adapter *adapter = netdev_priv(netdev);
1730
1731 et131x_rx_dma_enable(adapter);
1732 et131x_tx_dma_enable(adapter);
1733
1734 if (adapter->flags & FMP_ADAPTER_INTERRUPT_IN_USE)
1735 et131x_enable_interrupts(adapter);
1736
1737 netif_start_queue(netdev);
1738}
1739
1740static void et131x_disable_txrx(struct net_device *netdev)
1741{
1742 struct et131x_adapter *adapter = netdev_priv(netdev);
1743
1744 netif_stop_queue(netdev);
1745
1746 et131x_rx_dma_disable(adapter);
1747 et131x_tx_dma_disable(adapter);
1748
1749 et131x_disable_interrupts(adapter);
1750}
1751
1752static void et131x_init_send(struct et131x_adapter *adapter)
1753{
1754 int i;
1755 struct tx_ring *tx_ring = &adapter->tx_ring;
1756 struct tcb *tcb = tx_ring->tcb_ring;
1757
1758 tx_ring->tcb_qhead = tcb;
1759
1760 memset(tcb, 0, sizeof(struct tcb) * NUM_TCB);
1761
1762 for (i = 0; i < NUM_TCB; i++) {
1763 tcb->next = tcb + 1;
1764 tcb++;
1765 }
1766
1767 tcb--;
1768 tx_ring->tcb_qtail = tcb;
1769 tcb->next = NULL;
1770 /* Curr send queue should now be empty */
1771 tx_ring->send_head = NULL;
1772 tx_ring->send_tail = NULL;
1773}
1774
1775/* et1310_enable_phy_coma
1776 *
1777 * driver receive an phy status change interrupt while in D0 and check that
1778 * phy_status is down.
1779 *
1780 * -- gate off JAGCore;
1781 * -- set gigE PHY in Coma mode
1782 * -- wake on phy_interrupt; Perform software reset JAGCore,
1783 * re-initialize jagcore and gigE PHY
1784 */
1785static void et1310_enable_phy_coma(struct et131x_adapter *adapter)
1786{
1787 u32 pmcsr = readl(&adapter->regs->global.pm_csr);
1788
1789 /* Stop sending packets. */
1790 adapter->flags |= FMP_ADAPTER_LOWER_POWER;
1791
1792 /* Wait for outstanding Receive packets */
1793 et131x_disable_txrx(adapter->netdev);
1794
1795 /* Gate off JAGCore 3 clock domains */
1796 pmcsr &= ~ET_PMCSR_INIT;
1797 writel(pmcsr, &adapter->regs->global.pm_csr);
1798
1799 /* Program gigE PHY in to Coma mode */
1800 pmcsr |= ET_PM_PHY_SW_COMA;
1801 writel(pmcsr, &adapter->regs->global.pm_csr);
1802}
1803
1804static void et1310_disable_phy_coma(struct et131x_adapter *adapter)
1805{
1806 u32 pmcsr;
1807
1808 pmcsr = readl(&adapter->regs->global.pm_csr);
1809
1810 /* Disable phy_sw_coma register and re-enable JAGCore clocks */
1811 pmcsr |= ET_PMCSR_INIT;
1812 pmcsr &= ~ET_PM_PHY_SW_COMA;
1813 writel(pmcsr, &adapter->regs->global.pm_csr);
1814
1815 /* Restore the GbE PHY speed and duplex modes;
1816 * Reset JAGCore; re-configure and initialize JAGCore and gigE PHY
1817 */
1818
1819 /* Re-initialize the send structures */
1820 et131x_init_send(adapter);
1821
1822 /* Bring the device back to the state it was during init prior to
1823 * autonegotiation being complete. This way, when we get the auto-neg
1824 * complete interrupt, we can complete init by calling ConfigMacREGS2.
1825 */
1826 et131x_soft_reset(adapter);
1827
1828 et131x_adapter_setup(adapter);
1829
1830 /* Allow Tx to restart */
1831 adapter->flags &= ~FMP_ADAPTER_LOWER_POWER;
1832
1833 et131x_enable_txrx(adapter->netdev);
1834}
1835
1836static inline u32 bump_free_buff_ring(u32 *free_buff_ring, u32 limit)
1837{
1838 u32 tmp_free_buff_ring = *free_buff_ring;
1839
1840 tmp_free_buff_ring++;
1841 /* This works for all cases where limit < 1024. The 1023 case
1842 * works because 1023++ is 1024 which means the if condition is not
1843 * taken but the carry of the bit into the wrap bit toggles the wrap
1844 * value correctly
1845 */
1846 if ((tmp_free_buff_ring & ET_DMA10_MASK) > limit) {
1847 tmp_free_buff_ring &= ~ET_DMA10_MASK;
1848 tmp_free_buff_ring ^= ET_DMA10_WRAP;
1849 }
1850 /* For the 1023 case */
1851 tmp_free_buff_ring &= (ET_DMA10_MASK | ET_DMA10_WRAP);
1852 *free_buff_ring = tmp_free_buff_ring;
1853 return tmp_free_buff_ring;
1854}
1855
1856/* et131x_rx_dma_memory_alloc
1857 *
1858 * Allocates Free buffer ring 1 for sure, free buffer ring 0 if required,
1859 * and the Packet Status Ring.
1860 */
1861static int et131x_rx_dma_memory_alloc(struct et131x_adapter *adapter)
1862{
1863 u8 id;
1864 u32 i, j;
1865 u32 bufsize;
1866 u32 psr_size;
1867 u32 fbr_chunksize;
1868 struct rx_ring *rx_ring = &adapter->rx_ring;
1869 struct fbr_lookup *fbr;
1870
1871 /* Alloc memory for the lookup table */
1872 rx_ring->fbr[0] = kzalloc(sizeof(*fbr), GFP_KERNEL);
1873 if (rx_ring->fbr[0] == NULL)
1874 return -ENOMEM;
1875 rx_ring->fbr[1] = kzalloc(sizeof(*fbr), GFP_KERNEL);
1876 if (rx_ring->fbr[1] == NULL)
1877 return -ENOMEM;
1878
1879 /* The first thing we will do is configure the sizes of the buffer
1880 * rings. These will change based on jumbo packet support. Larger
1881 * jumbo packets increases the size of each entry in FBR0, and the
1882 * number of entries in FBR0, while at the same time decreasing the
1883 * number of entries in FBR1.
1884 *
1885 * FBR1 holds "large" frames, FBR0 holds "small" frames. If FBR1
1886 * entries are huge in order to accommodate a "jumbo" frame, then it
1887 * will have less entries. Conversely, FBR1 will now be relied upon
1888 * to carry more "normal" frames, thus it's entry size also increases
1889 * and the number of entries goes up too (since it now carries
1890 * "small" + "regular" packets.
1891 *
1892 * In this scheme, we try to maintain 512 entries between the two
1893 * rings. Also, FBR1 remains a constant size - when it's size doubles
1894 * the number of entries halves. FBR0 increases in size, however.
1895 */
1896 if (adapter->registry_jumbo_packet < 2048) {
1897 rx_ring->fbr[0]->buffsize = 256;
1898 rx_ring->fbr[0]->num_entries = 512;
1899 rx_ring->fbr[1]->buffsize = 2048;
1900 rx_ring->fbr[1]->num_entries = 512;
1901 } else if (adapter->registry_jumbo_packet < 4096) {
1902 rx_ring->fbr[0]->buffsize = 512;
1903 rx_ring->fbr[0]->num_entries = 1024;
1904 rx_ring->fbr[1]->buffsize = 4096;
1905 rx_ring->fbr[1]->num_entries = 512;
1906 } else {
1907 rx_ring->fbr[0]->buffsize = 1024;
1908 rx_ring->fbr[0]->num_entries = 768;
1909 rx_ring->fbr[1]->buffsize = 16384;
1910 rx_ring->fbr[1]->num_entries = 128;
1911 }
1912
1913 rx_ring->psr_entries = rx_ring->fbr[0]->num_entries +
1914 rx_ring->fbr[1]->num_entries;
1915
1916 for (id = 0; id < NUM_FBRS; id++) {
1917 fbr = rx_ring->fbr[id];
1918 /* Allocate an area of memory for Free Buffer Ring */
1919 bufsize = sizeof(struct fbr_desc) * fbr->num_entries;
1920 fbr->ring_virtaddr = dma_alloc_coherent(&adapter->pdev->dev,
1921 bufsize,
1922 &fbr->ring_physaddr,
1923 GFP_KERNEL);
1924 if (!fbr->ring_virtaddr) {
1925 dev_err(&adapter->pdev->dev,
1926 "Cannot alloc memory for Free Buffer Ring %d\n",
1927 id);
1928 return -ENOMEM;
1929 }
1930 }
1931
1932 for (id = 0; id < NUM_FBRS; id++) {
1933 fbr = rx_ring->fbr[id];
1934 fbr_chunksize = (FBR_CHUNKS * fbr->buffsize);
1935
1936 for (i = 0; i < fbr->num_entries / FBR_CHUNKS; i++) {
1937 dma_addr_t fbr_physaddr;
1938
1939 fbr->mem_virtaddrs[i] = dma_alloc_coherent(
1940 &adapter->pdev->dev, fbr_chunksize,
1941 &fbr->mem_physaddrs[i],
1942 GFP_KERNEL);
1943
1944 if (!fbr->mem_virtaddrs[i]) {
1945 dev_err(&adapter->pdev->dev,
1946 "Could not alloc memory\n");
1947 return -ENOMEM;
1948 }
1949
1950 /* See NOTE in "Save Physical Address" comment above */
1951 fbr_physaddr = fbr->mem_physaddrs[i];
1952
1953 for (j = 0; j < FBR_CHUNKS; j++) {
1954 u32 k = (i * FBR_CHUNKS) + j;
1955
1956 /* Save the Virtual address of this index for
1957 * quick access later
1958 */
1959 fbr->virt[k] = (u8 *)fbr->mem_virtaddrs[i] +
1960 (j * fbr->buffsize);
1961
1962 /* now store the physical address in the
1963 * descriptor so the device can access it
1964 */
1965 fbr->bus_high[k] = upper_32_bits(fbr_physaddr);
1966 fbr->bus_low[k] = lower_32_bits(fbr_physaddr);
1967 fbr_physaddr += fbr->buffsize;
1968 }
1969 }
1970 }
1971
1972 /* Allocate an area of memory for FIFO of Packet Status ring entries */
1973 psr_size = sizeof(struct pkt_stat_desc) * rx_ring->psr_entries;
1974
1975 rx_ring->ps_ring_virtaddr = dma_alloc_coherent(&adapter->pdev->dev,
1976 psr_size,
1977 &rx_ring->ps_ring_physaddr,
1978 GFP_KERNEL);
1979
1980 if (!rx_ring->ps_ring_virtaddr) {
1981 dev_err(&adapter->pdev->dev,
1982 "Cannot alloc memory for Packet Status Ring\n");
1983 return -ENOMEM;
1984 }
1985
1986 /* Allocate an area of memory for writeback of status information */
1987 rx_ring->rx_status_block = dma_alloc_coherent(&adapter->pdev->dev,
1988 sizeof(struct rx_status_block),
1989 &rx_ring->rx_status_bus,
1990 GFP_KERNEL);
1991 if (!rx_ring->rx_status_block) {
1992 dev_err(&adapter->pdev->dev,
1993 "Cannot alloc memory for Status Block\n");
1994 return -ENOMEM;
1995 }
1996 rx_ring->num_rfd = NIC_DEFAULT_NUM_RFD;
1997
1998 /* The RFDs are going to be put on lists later on, so initialize the
1999 * lists now.
2000 */
2001 INIT_LIST_HEAD(&rx_ring->recv_list);
2002 return 0;
2003}
2004
2005static void et131x_rx_dma_memory_free(struct et131x_adapter *adapter)
2006{
2007 u8 id;
2008 u32 ii;
2009 u32 bufsize;
2010 u32 psr_size;
2011 struct rfd *rfd;
2012 struct rx_ring *rx_ring = &adapter->rx_ring;
2013 struct fbr_lookup *fbr;
2014
2015 /* Free RFDs and associated packet descriptors */
2016 WARN_ON(rx_ring->num_ready_recv != rx_ring->num_rfd);
2017
2018 while (!list_empty(&rx_ring->recv_list)) {
2019 rfd = list_entry(rx_ring->recv_list.next,
2020 struct rfd, list_node);
2021
2022 list_del(&rfd->list_node);
2023 rfd->skb = NULL;
2024 kfree(rfd);
2025 }
2026
2027 /* Free Free Buffer Rings */
2028 for (id = 0; id < NUM_FBRS; id++) {
2029 fbr = rx_ring->fbr[id];
2030
2031 if (!fbr || !fbr->ring_virtaddr)
2032 continue;
2033
2034 /* First the packet memory */
2035 for (ii = 0; ii < fbr->num_entries / FBR_CHUNKS; ii++) {
2036 if (fbr->mem_virtaddrs[ii]) {
2037 bufsize = fbr->buffsize * FBR_CHUNKS;
2038
2039 dma_free_coherent(&adapter->pdev->dev,
2040 bufsize,
2041 fbr->mem_virtaddrs[ii],
2042 fbr->mem_physaddrs[ii]);
2043
2044 fbr->mem_virtaddrs[ii] = NULL;
2045 }
2046 }
2047
2048 bufsize = sizeof(struct fbr_desc) * fbr->num_entries;
2049
2050 dma_free_coherent(&adapter->pdev->dev,
2051 bufsize,
2052 fbr->ring_virtaddr,
2053 fbr->ring_physaddr);
2054
2055 fbr->ring_virtaddr = NULL;
2056 }
2057
2058 /* Free Packet Status Ring */
2059 if (rx_ring->ps_ring_virtaddr) {
2060 psr_size = sizeof(struct pkt_stat_desc) * rx_ring->psr_entries;
2061
2062 dma_free_coherent(&adapter->pdev->dev, psr_size,
2063 rx_ring->ps_ring_virtaddr,
2064 rx_ring->ps_ring_physaddr);
2065
2066 rx_ring->ps_ring_virtaddr = NULL;
2067 }
2068
2069 /* Free area of memory for the writeback of status information */
2070 if (rx_ring->rx_status_block) {
2071 dma_free_coherent(&adapter->pdev->dev,
2072 sizeof(struct rx_status_block),
2073 rx_ring->rx_status_block,
2074 rx_ring->rx_status_bus);
2075 rx_ring->rx_status_block = NULL;
2076 }
2077
2078 /* Free the FBR Lookup Table */
2079 kfree(rx_ring->fbr[0]);
2080 kfree(rx_ring->fbr[1]);
2081
2082 /* Reset Counters */
2083 rx_ring->num_ready_recv = 0;
2084}
2085
2086/* et131x_init_recv - Initialize receive data structures */
2087static int et131x_init_recv(struct et131x_adapter *adapter)
2088{
2089 struct rfd *rfd;
2090 u32 rfdct;
2091 struct rx_ring *rx_ring = &adapter->rx_ring;
2092
2093 /* Setup each RFD */
2094 for (rfdct = 0; rfdct < rx_ring->num_rfd; rfdct++) {
2095 rfd = kzalloc(sizeof(*rfd), GFP_ATOMIC | GFP_DMA);
2096 if (!rfd)
2097 return -ENOMEM;
2098
2099 rfd->skb = NULL;
2100
2101 /* Add this RFD to the recv_list */
2102 list_add_tail(&rfd->list_node, &rx_ring->recv_list);
2103
2104 /* Increment the available RFD's */
2105 rx_ring->num_ready_recv++;
2106 }
2107
2108 return 0;
2109}
2110
2111/* et131x_set_rx_dma_timer - Set the heartbeat timer according to line rate */
2112static void et131x_set_rx_dma_timer(struct et131x_adapter *adapter)
2113{
2114 struct phy_device *phydev = adapter->phydev;
2115
2116 /* For version B silicon, we do not use the RxDMA timer for 10 and 100
2117 * Mbits/s line rates. We do not enable and RxDMA interrupt coalescing.
2118 */
2119 if ((phydev->speed == SPEED_100) || (phydev->speed == SPEED_10)) {
2120 writel(0, &adapter->regs->rxdma.max_pkt_time);
2121 writel(1, &adapter->regs->rxdma.num_pkt_done);
2122 }
2123}
2124
2125/* nic_return_rfd - Recycle a RFD and put it back onto the receive list */
2126static void nic_return_rfd(struct et131x_adapter *adapter, struct rfd *rfd)
2127{
2128 struct rx_ring *rx_local = &adapter->rx_ring;
2129 struct rxdma_regs __iomem *rx_dma = &adapter->regs->rxdma;
2130 u16 buff_index = rfd->bufferindex;
2131 u8 ring_index = rfd->ringindex;
2132 unsigned long flags;
2133 struct fbr_lookup *fbr = rx_local->fbr[ring_index];
2134
2135 /* We don't use any of the OOB data besides status. Otherwise, we
2136 * need to clean up OOB data
2137 */
2138 if (buff_index < fbr->num_entries) {
2139 u32 free_buff_ring;
2140 u32 __iomem *offset;
2141 struct fbr_desc *next;
2142
2143 if (ring_index == 0)
2144 offset = &rx_dma->fbr0_full_offset;
2145 else
2146 offset = &rx_dma->fbr1_full_offset;
2147
2148 next = (struct fbr_desc *)(fbr->ring_virtaddr) +
2149 INDEX10(fbr->local_full);
2150
2151 /* Handle the Free Buffer Ring advancement here. Write
2152 * the PA / Buffer Index for the returned buffer into
2153 * the oldest (next to be freed)FBR entry
2154 */
2155 next->addr_hi = fbr->bus_high[buff_index];
2156 next->addr_lo = fbr->bus_low[buff_index];
2157 next->word2 = buff_index;
2158
2159 free_buff_ring = bump_free_buff_ring(&fbr->local_full,
2160 fbr->num_entries - 1);
2161 writel(free_buff_ring, offset);
2162 } else {
2163 dev_err(&adapter->pdev->dev,
2164 "%s illegal Buffer Index returned\n", __func__);
2165 }
2166
2167 /* The processing on this RFD is done, so put it back on the tail of
2168 * our list
2169 */
2170 spin_lock_irqsave(&adapter->rcv_lock, flags);
2171 list_add_tail(&rfd->list_node, &rx_local->recv_list);
2172 rx_local->num_ready_recv++;
2173 spin_unlock_irqrestore(&adapter->rcv_lock, flags);
2174
2175 WARN_ON(rx_local->num_ready_recv > rx_local->num_rfd);
2176}
2177
2178/* nic_rx_pkts - Checks the hardware for available packets
2179 *
2180 * Checks the hardware for available packets, using completion ring
2181 * If packets are available, it gets an RFD from the recv_list, attaches
2182 * the packet to it, puts the RFD in the RecvPendList, and also returns
2183 * the pointer to the RFD.
2184 */
2185static struct rfd *nic_rx_pkts(struct et131x_adapter *adapter)
2186{
2187 struct rx_ring *rx_local = &adapter->rx_ring;
2188 struct rx_status_block *status;
2189 struct pkt_stat_desc *psr;
2190 struct rfd *rfd;
2191 unsigned long flags;
2192 struct list_head *element;
2193 u8 ring_index;
2194 u16 buff_index;
2195 u32 len;
2196 u32 word0;
2197 u32 word1;
2198 struct sk_buff *skb;
2199 struct fbr_lookup *fbr;
2200
2201 /* RX Status block is written by the DMA engine prior to every
2202 * interrupt. It contains the next to be used entry in the Packet
2203 * Status Ring, and also the two Free Buffer rings.
2204 */
2205 status = rx_local->rx_status_block;
2206 word1 = status->word1 >> 16;
2207
2208 /* Check the PSR and wrap bits do not match */
2209 if ((word1 & 0x1FFF) == (rx_local->local_psr_full & 0x1FFF))
2210 return NULL; /* Looks like this ring is not updated yet */
2211
2212 /* The packet status ring indicates that data is available. */
2213 psr = (struct pkt_stat_desc *)(rx_local->ps_ring_virtaddr) +
2214 (rx_local->local_psr_full & 0xFFF);
2215
2216 /* Grab any information that is required once the PSR is advanced,
2217 * since we can no longer rely on the memory being accurate
2218 */
2219 len = psr->word1 & 0xFFFF;
2220 ring_index = (psr->word1 >> 26) & 0x03;
2221 fbr = rx_local->fbr[ring_index];
2222 buff_index = (psr->word1 >> 16) & 0x3FF;
2223 word0 = psr->word0;
2224
2225 /* Indicate that we have used this PSR entry. */
2226 /* FIXME wrap 12 */
2227 add_12bit(&rx_local->local_psr_full, 1);
2228 if ((rx_local->local_psr_full & 0xFFF) > rx_local->psr_entries - 1) {
2229 /* Clear psr full and toggle the wrap bit */
2230 rx_local->local_psr_full &= ~0xFFF;
2231 rx_local->local_psr_full ^= 0x1000;
2232 }
2233
2234 writel(rx_local->local_psr_full, &adapter->regs->rxdma.psr_full_offset);
2235
2236 if (ring_index > 1 || buff_index > fbr->num_entries - 1) {
2237 /* Illegal buffer or ring index cannot be used by S/W*/
2238 dev_err(&adapter->pdev->dev,
2239 "NICRxPkts PSR Entry %d indicates length of %d and/or bad bi(%d)\n",
2240 rx_local->local_psr_full & 0xFFF, len, buff_index);
2241 return NULL;
2242 }
2243
2244 /* Get and fill the RFD. */
2245 spin_lock_irqsave(&adapter->rcv_lock, flags);
2246
2247 element = rx_local->recv_list.next;
2248 rfd = list_entry(element, struct rfd, list_node);
2249
2250 if (!rfd) {
2251 spin_unlock_irqrestore(&adapter->rcv_lock, flags);
2252 return NULL;
2253 }
2254
2255 list_del(&rfd->list_node);
2256 rx_local->num_ready_recv--;
2257
2258 spin_unlock_irqrestore(&adapter->rcv_lock, flags);
2259
2260 rfd->bufferindex = buff_index;
2261 rfd->ringindex = ring_index;
2262
2263 /* In V1 silicon, there is a bug which screws up filtering of runt
2264 * packets. Therefore runt packet filtering is disabled in the MAC and
2265 * the packets are dropped here. They are also counted here.
2266 */
2267 if (len < (NIC_MIN_PACKET_SIZE + 4)) {
2268 adapter->stats.rx_other_errs++;
2269 rfd->len = 0;
2270 goto out;
2271 }
2272
2273 if ((word0 & ALCATEL_MULTICAST_PKT) && !(word0 & ALCATEL_BROADCAST_PKT))
2274 adapter->stats.multicast_pkts_rcvd++;
2275
2276 rfd->len = len;
2277
2278 skb = dev_alloc_skb(rfd->len + 2);
2279 if (!skb)
2280 return NULL;
2281
2282 adapter->netdev->stats.rx_bytes += rfd->len;
2283
2284 memcpy(skb_put(skb, rfd->len), fbr->virt[buff_index], rfd->len);
2285
2286 skb->protocol = eth_type_trans(skb, adapter->netdev);
2287 skb->ip_summed = CHECKSUM_NONE;
2288 netif_receive_skb(skb);
2289
2290out:
2291 nic_return_rfd(adapter, rfd);
2292 return rfd;
2293}
2294
2295static int et131x_handle_recv_pkts(struct et131x_adapter *adapter, int budget)
2296{
2297 struct rfd *rfd = NULL;
2298 int count = 0;
2299 int limit = budget;
2300 bool done = true;
2301 struct rx_ring *rx_ring = &adapter->rx_ring;
2302
2303 if (budget > MAX_PACKETS_HANDLED)
2304 limit = MAX_PACKETS_HANDLED;
2305
2306 /* Process up to available RFD's */
2307 while (count < limit) {
2308 if (list_empty(&rx_ring->recv_list)) {
2309 WARN_ON(rx_ring->num_ready_recv != 0);
2310 done = false;
2311 break;
2312 }
2313
2314 rfd = nic_rx_pkts(adapter);
2315
2316 if (rfd == NULL)
2317 break;
2318
2319 /* Do not receive any packets until a filter has been set.
2320 * Do not receive any packets until we have link.
2321 * If length is zero, return the RFD in order to advance the
2322 * Free buffer ring.
2323 */
2324 if (!adapter->packet_filter ||
2325 !netif_carrier_ok(adapter->netdev) ||
2326 rfd->len == 0)
2327 continue;
2328
2329 adapter->netdev->stats.rx_packets++;
2330
2331 if (rx_ring->num_ready_recv < RFD_LOW_WATER_MARK)
2332 dev_warn(&adapter->pdev->dev, "RFD's are running out\n");
2333
2334 count++;
2335 }
2336
2337 if (count == limit || !done) {
2338 rx_ring->unfinished_receives = true;
2339 writel(PARM_TX_TIME_INT_DEF * NANO_IN_A_MICRO,
2340 &adapter->regs->global.watchdog_timer);
2341 } else {
2342 /* Watchdog timer will disable itself if appropriate. */
2343 rx_ring->unfinished_receives = false;
2344 }
2345
2346 return count;
2347}
2348
2349/* et131x_tx_dma_memory_alloc
2350 *
2351 * Allocates memory that will be visible both to the device and to the CPU.
2352 * The OS will pass us packets, pointers to which we will insert in the Tx
2353 * Descriptor queue. The device will read this queue to find the packets in
2354 * memory. The device will update the "status" in memory each time it xmits a
2355 * packet.
2356 */
2357static int et131x_tx_dma_memory_alloc(struct et131x_adapter *adapter)
2358{
2359 int desc_size = 0;
2360 struct tx_ring *tx_ring = &adapter->tx_ring;
2361
2362 /* Allocate memory for the TCB's (Transmit Control Block) */
2363 tx_ring->tcb_ring = kcalloc(NUM_TCB, sizeof(struct tcb),
2364 GFP_ATOMIC | GFP_DMA);
2365 if (!tx_ring->tcb_ring)
2366 return -ENOMEM;
2367
2368 desc_size = (sizeof(struct tx_desc) * NUM_DESC_PER_RING_TX);
2369 tx_ring->tx_desc_ring = dma_alloc_coherent(&adapter->pdev->dev,
2370 desc_size,
2371 &tx_ring->tx_desc_ring_pa,
2372 GFP_KERNEL);
2373 if (!tx_ring->tx_desc_ring) {
2374 dev_err(&adapter->pdev->dev,
2375 "Cannot alloc memory for Tx Ring\n");
2376 return -ENOMEM;
2377 }
2378
2379 tx_ring->tx_status = dma_alloc_coherent(&adapter->pdev->dev,
2380 sizeof(u32),
2381 &tx_ring->tx_status_pa,
2382 GFP_KERNEL);
2383 if (!tx_ring->tx_status_pa) {
2384 dev_err(&adapter->pdev->dev,
2385 "Cannot alloc memory for Tx status block\n");
2386 return -ENOMEM;
2387 }
2388 return 0;
2389}
2390
2391static void et131x_tx_dma_memory_free(struct et131x_adapter *adapter)
2392{
2393 int desc_size = 0;
2394 struct tx_ring *tx_ring = &adapter->tx_ring;
2395
2396 if (tx_ring->tx_desc_ring) {
2397 /* Free memory relating to Tx rings here */
2398 desc_size = (sizeof(struct tx_desc) * NUM_DESC_PER_RING_TX);
2399 dma_free_coherent(&adapter->pdev->dev,
2400 desc_size,
2401 tx_ring->tx_desc_ring,
2402 tx_ring->tx_desc_ring_pa);
2403 tx_ring->tx_desc_ring = NULL;
2404 }
2405
2406 /* Free memory for the Tx status block */
2407 if (tx_ring->tx_status) {
2408 dma_free_coherent(&adapter->pdev->dev,
2409 sizeof(u32),
2410 tx_ring->tx_status,
2411 tx_ring->tx_status_pa);
2412
2413 tx_ring->tx_status = NULL;
2414 }
2415 /* Free the memory for the tcb structures */
2416 kfree(tx_ring->tcb_ring);
2417}
2418
2419/* nic_send_packet - NIC specific send handler for version B silicon. */
2420static int nic_send_packet(struct et131x_adapter *adapter, struct tcb *tcb)
2421{
2422 u32 i;
2423 struct tx_desc desc[24];
2424 u32 frag = 0;
2425 u32 thiscopy, remainder;
2426 struct sk_buff *skb = tcb->skb;
2427 u32 nr_frags = skb_shinfo(skb)->nr_frags + 1;
2428 struct skb_frag_struct *frags = &skb_shinfo(skb)->frags[0];
2429 struct phy_device *phydev = adapter->phydev;
2430 dma_addr_t dma_addr;
2431 struct tx_ring *tx_ring = &adapter->tx_ring;
2432
2433 /* Part of the optimizations of this send routine restrict us to
2434 * sending 24 fragments at a pass. In practice we should never see
2435 * more than 5 fragments.
2436 */
2437
2438 /* nr_frags should be no more than 18. */
2439 BUILD_BUG_ON(MAX_SKB_FRAGS + 1 > 23);
2440
2441 memset(desc, 0, sizeof(struct tx_desc) * (nr_frags + 1));
2442
2443 for (i = 0; i < nr_frags; i++) {
2444 /* If there is something in this element, lets get a
2445 * descriptor from the ring and get the necessary data
2446 */
2447 if (i == 0) {
2448 /* If the fragments are smaller than a standard MTU,
2449 * then map them to a single descriptor in the Tx
2450 * Desc ring. However, if they're larger, as is
2451 * possible with support for jumbo packets, then
2452 * split them each across 2 descriptors.
2453 *
2454 * This will work until we determine why the hardware
2455 * doesn't seem to like large fragments.
2456 */
2457 if (skb_headlen(skb) <= 1514) {
2458 /* Low 16bits are length, high is vlan and
2459 * unused currently so zero
2460 */
2461 desc[frag].len_vlan = skb_headlen(skb);
2462 dma_addr = dma_map_single(&adapter->pdev->dev,
2463 skb->data,
2464 skb_headlen(skb),
2465 DMA_TO_DEVICE);
2466 desc[frag].addr_lo = lower_32_bits(dma_addr);
2467 desc[frag].addr_hi = upper_32_bits(dma_addr);
2468 frag++;
2469 } else {
2470 desc[frag].len_vlan = skb_headlen(skb) / 2;
2471 dma_addr = dma_map_single(&adapter->pdev->dev,
2472 skb->data,
2473 skb_headlen(skb) / 2,
2474 DMA_TO_DEVICE);
2475 desc[frag].addr_lo = lower_32_bits(dma_addr);
2476 desc[frag].addr_hi = upper_32_bits(dma_addr);
2477 frag++;
2478
2479 desc[frag].len_vlan = skb_headlen(skb) / 2;
2480 dma_addr = dma_map_single(&adapter->pdev->dev,
2481 skb->data +
2482 skb_headlen(skb) / 2,
2483 skb_headlen(skb) / 2,
2484 DMA_TO_DEVICE);
2485 desc[frag].addr_lo = lower_32_bits(dma_addr);
2486 desc[frag].addr_hi = upper_32_bits(dma_addr);
2487 frag++;
2488 }
2489 } else {
2490 desc[frag].len_vlan = frags[i - 1].size;
2491 dma_addr = skb_frag_dma_map(&adapter->pdev->dev,
2492 &frags[i - 1],
2493 0,
2494 frags[i - 1].size,
2495 DMA_TO_DEVICE);
2496 desc[frag].addr_lo = lower_32_bits(dma_addr);
2497 desc[frag].addr_hi = upper_32_bits(dma_addr);
2498 frag++;
2499 }
2500 }
2501
2502 if (phydev && phydev->speed == SPEED_1000) {
2503 if (++tx_ring->since_irq == PARM_TX_NUM_BUFS_DEF) {
2504 /* Last element & Interrupt flag */
2505 desc[frag - 1].flags =
2506 TXDESC_FLAG_INTPROC | TXDESC_FLAG_LASTPKT;
2507 tx_ring->since_irq = 0;
2508 } else { /* Last element */
2509 desc[frag - 1].flags = TXDESC_FLAG_LASTPKT;
2510 }
2511 } else {
2512 desc[frag - 1].flags =
2513 TXDESC_FLAG_INTPROC | TXDESC_FLAG_LASTPKT;
2514 }
2515
2516 desc[0].flags |= TXDESC_FLAG_FIRSTPKT;
2517
2518 tcb->index_start = tx_ring->send_idx;
2519 tcb->stale = 0;
2520
2521 thiscopy = NUM_DESC_PER_RING_TX - INDEX10(tx_ring->send_idx);
2522
2523 if (thiscopy >= frag) {
2524 remainder = 0;
2525 thiscopy = frag;
2526 } else {
2527 remainder = frag - thiscopy;
2528 }
2529
2530 memcpy(tx_ring->tx_desc_ring + INDEX10(tx_ring->send_idx),
2531 desc,
2532 sizeof(struct tx_desc) * thiscopy);
2533
2534 add_10bit(&tx_ring->send_idx, thiscopy);
2535
2536 if (INDEX10(tx_ring->send_idx) == 0 ||
2537 INDEX10(tx_ring->send_idx) == NUM_DESC_PER_RING_TX) {
2538 tx_ring->send_idx &= ~ET_DMA10_MASK;
2539 tx_ring->send_idx ^= ET_DMA10_WRAP;
2540 }
2541
2542 if (remainder) {
2543 memcpy(tx_ring->tx_desc_ring,
2544 desc + thiscopy,
2545 sizeof(struct tx_desc) * remainder);
2546
2547 add_10bit(&tx_ring->send_idx, remainder);
2548 }
2549
2550 if (INDEX10(tx_ring->send_idx) == 0) {
2551 if (tx_ring->send_idx)
2552 tcb->index = NUM_DESC_PER_RING_TX - 1;
2553 else
2554 tcb->index = ET_DMA10_WRAP|(NUM_DESC_PER_RING_TX - 1);
2555 } else {
2556 tcb->index = tx_ring->send_idx - 1;
2557 }
2558
2559 spin_lock(&adapter->tcb_send_qlock);
2560
2561 if (tx_ring->send_tail)
2562 tx_ring->send_tail->next = tcb;
2563 else
2564 tx_ring->send_head = tcb;
2565
2566 tx_ring->send_tail = tcb;
2567
2568 WARN_ON(tcb->next != NULL);
2569
2570 tx_ring->used++;
2571
2572 spin_unlock(&adapter->tcb_send_qlock);
2573
2574 /* Write the new write pointer back to the device. */
2575 writel(tx_ring->send_idx, &adapter->regs->txdma.service_request);
2576
2577 /* For Gig only, we use Tx Interrupt coalescing. Enable the software
2578 * timer to wake us up if this packet isn't followed by N more.
2579 */
2580 if (phydev && phydev->speed == SPEED_1000) {
2581 writel(PARM_TX_TIME_INT_DEF * NANO_IN_A_MICRO,
2582 &adapter->regs->global.watchdog_timer);
2583 }
2584 return 0;
2585}
2586
2587static int send_packet(struct sk_buff *skb, struct et131x_adapter *adapter)
2588{
2589 int status;
2590 struct tcb *tcb;
2591 unsigned long flags;
2592 struct tx_ring *tx_ring = &adapter->tx_ring;
2593
2594 /* All packets must have at least a MAC address and a protocol type */
2595 if (skb->len < ETH_HLEN)
2596 return -EIO;
2597
2598 spin_lock_irqsave(&adapter->tcb_ready_qlock, flags);
2599
2600 tcb = tx_ring->tcb_qhead;
2601
2602 if (tcb == NULL) {
2603 spin_unlock_irqrestore(&adapter->tcb_ready_qlock, flags);
2604 return -ENOMEM;
2605 }
2606
2607 tx_ring->tcb_qhead = tcb->next;
2608
2609 if (tx_ring->tcb_qhead == NULL)
2610 tx_ring->tcb_qtail = NULL;
2611
2612 spin_unlock_irqrestore(&adapter->tcb_ready_qlock, flags);
2613
2614 tcb->skb = skb;
2615 tcb->next = NULL;
2616
2617 status = nic_send_packet(adapter, tcb);
2618
2619 if (status != 0) {
2620 spin_lock_irqsave(&adapter->tcb_ready_qlock, flags);
2621
2622 if (tx_ring->tcb_qtail)
2623 tx_ring->tcb_qtail->next = tcb;
2624 else
2625 /* Apparently ready Q is empty. */
2626 tx_ring->tcb_qhead = tcb;
2627
2628 tx_ring->tcb_qtail = tcb;
2629 spin_unlock_irqrestore(&adapter->tcb_ready_qlock, flags);
2630 return status;
2631 }
2632 WARN_ON(tx_ring->used > NUM_TCB);
2633 return 0;
2634}
2635
2636/* free_send_packet - Recycle a struct tcb */
2637static inline void free_send_packet(struct et131x_adapter *adapter,
2638 struct tcb *tcb)
2639{
2640 unsigned long flags;
2641 struct tx_desc *desc = NULL;
2642 struct net_device_stats *stats = &adapter->netdev->stats;
2643 struct tx_ring *tx_ring = &adapter->tx_ring;
2644 u64 dma_addr;
2645
2646 if (tcb->skb) {
2647 stats->tx_bytes += tcb->skb->len;
2648
2649 /* Iterate through the TX descriptors on the ring
2650 * corresponding to this packet and umap the fragments
2651 * they point to
2652 */
2653 do {
2654 desc = tx_ring->tx_desc_ring +
2655 INDEX10(tcb->index_start);
2656
2657 dma_addr = desc->addr_lo;
2658 dma_addr |= (u64)desc->addr_hi << 32;
2659
2660 dma_unmap_single(&adapter->pdev->dev,
2661 dma_addr,
2662 desc->len_vlan, DMA_TO_DEVICE);
2663
2664 add_10bit(&tcb->index_start, 1);
2665 if (INDEX10(tcb->index_start) >=
2666 NUM_DESC_PER_RING_TX) {
2667 tcb->index_start &= ~ET_DMA10_MASK;
2668 tcb->index_start ^= ET_DMA10_WRAP;
2669 }
2670 } while (desc != tx_ring->tx_desc_ring + INDEX10(tcb->index));
2671
2672 dev_kfree_skb_any(tcb->skb);
2673 }
2674
2675 memset(tcb, 0, sizeof(struct tcb));
2676
2677 /* Add the TCB to the Ready Q */
2678 spin_lock_irqsave(&adapter->tcb_ready_qlock, flags);
2679
2680 stats->tx_packets++;
2681
2682 if (tx_ring->tcb_qtail)
2683 tx_ring->tcb_qtail->next = tcb;
2684 else /* Apparently ready Q is empty. */
2685 tx_ring->tcb_qhead = tcb;
2686
2687 tx_ring->tcb_qtail = tcb;
2688
2689 spin_unlock_irqrestore(&adapter->tcb_ready_qlock, flags);
2690 WARN_ON(tx_ring->used < 0);
2691}
2692
2693/* et131x_free_busy_send_packets - Free and complete the stopped active sends */
2694static void et131x_free_busy_send_packets(struct et131x_adapter *adapter)
2695{
2696 struct tcb *tcb;
2697 unsigned long flags;
2698 u32 freed = 0;
2699 struct tx_ring *tx_ring = &adapter->tx_ring;
2700
2701 /* Any packets being sent? Check the first TCB on the send list */
2702 spin_lock_irqsave(&adapter->tcb_send_qlock, flags);
2703
2704 tcb = tx_ring->send_head;
2705
2706 while (tcb != NULL && freed < NUM_TCB) {
2707 struct tcb *next = tcb->next;
2708
2709 tx_ring->send_head = next;
2710
2711 if (next == NULL)
2712 tx_ring->send_tail = NULL;
2713
2714 tx_ring->used--;
2715
2716 spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags);
2717
2718 freed++;
2719 free_send_packet(adapter, tcb);
2720
2721 spin_lock_irqsave(&adapter->tcb_send_qlock, flags);
2722
2723 tcb = tx_ring->send_head;
2724 }
2725
2726 WARN_ON(freed == NUM_TCB);
2727
2728 spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags);
2729
2730 tx_ring->used = 0;
2731}
2732
2733/* et131x_handle_send_pkts
2734 *
2735 * Re-claim the send resources, complete sends and get more to send from
2736 * the send wait queue.
2737 */
2738static void et131x_handle_send_pkts(struct et131x_adapter *adapter)
2739{
2740 unsigned long flags;
2741 u32 serviced;
2742 struct tcb *tcb;
2743 u32 index;
2744 struct tx_ring *tx_ring = &adapter->tx_ring;
2745
2746 serviced = readl(&adapter->regs->txdma.new_service_complete);
2747 index = INDEX10(serviced);
2748
2749 /* Has the ring wrapped? Process any descriptors that do not have
2750 * the same "wrap" indicator as the current completion indicator
2751 */
2752 spin_lock_irqsave(&adapter->tcb_send_qlock, flags);
2753
2754 tcb = tx_ring->send_head;
2755
2756 while (tcb &&
2757 ((serviced ^ tcb->index) & ET_DMA10_WRAP) &&
2758 index < INDEX10(tcb->index)) {
2759 tx_ring->used--;
2760 tx_ring->send_head = tcb->next;
2761 if (tcb->next == NULL)
2762 tx_ring->send_tail = NULL;
2763
2764 spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags);
2765 free_send_packet(adapter, tcb);
2766 spin_lock_irqsave(&adapter->tcb_send_qlock, flags);
2767
2768 /* Goto the next packet */
2769 tcb = tx_ring->send_head;
2770 }
2771 while (tcb &&
2772 !((serviced ^ tcb->index) & ET_DMA10_WRAP) &&
2773 index > (tcb->index & ET_DMA10_MASK)) {
2774 tx_ring->used--;
2775 tx_ring->send_head = tcb->next;
2776 if (tcb->next == NULL)
2777 tx_ring->send_tail = NULL;
2778
2779 spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags);
2780 free_send_packet(adapter, tcb);
2781 spin_lock_irqsave(&adapter->tcb_send_qlock, flags);
2782
2783 /* Goto the next packet */
2784 tcb = tx_ring->send_head;
2785 }
2786
2787 /* Wake up the queue when we hit a low-water mark */
2788 if (tx_ring->used <= NUM_TCB / 3)
2789 netif_wake_queue(adapter->netdev);
2790
2791 spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags);
2792}
2793
2794static int et131x_get_settings(struct net_device *netdev,
2795 struct ethtool_cmd *cmd)
2796{
2797 struct et131x_adapter *adapter = netdev_priv(netdev);
2798
2799 return phy_ethtool_gset(adapter->phydev, cmd);
2800}
2801
2802static int et131x_set_settings(struct net_device *netdev,
2803 struct ethtool_cmd *cmd)
2804{
2805 struct et131x_adapter *adapter = netdev_priv(netdev);
2806
2807 return phy_ethtool_sset(adapter->phydev, cmd);
2808}
2809
2810static int et131x_get_regs_len(struct net_device *netdev)
2811{
2812#define ET131X_REGS_LEN 256
2813 return ET131X_REGS_LEN * sizeof(u32);
2814}
2815
2816static void et131x_get_regs(struct net_device *netdev,
2817 struct ethtool_regs *regs, void *regs_data)
2818{
2819 struct et131x_adapter *adapter = netdev_priv(netdev);
2820 struct address_map __iomem *aregs = adapter->regs;
2821 u32 *regs_buff = regs_data;
2822 u32 num = 0;
2823 u16 tmp;
2824
2825 memset(regs_data, 0, et131x_get_regs_len(netdev));
2826
2827 regs->version = (1 << 24) | (adapter->pdev->revision << 16) |
2828 adapter->pdev->device;
2829
2830 /* PHY regs */
2831 et131x_mii_read(adapter, MII_BMCR, &tmp);
2832 regs_buff[num++] = tmp;
2833 et131x_mii_read(adapter, MII_BMSR, &tmp);
2834 regs_buff[num++] = tmp;
2835 et131x_mii_read(adapter, MII_PHYSID1, &tmp);
2836 regs_buff[num++] = tmp;
2837 et131x_mii_read(adapter, MII_PHYSID2, &tmp);
2838 regs_buff[num++] = tmp;
2839 et131x_mii_read(adapter, MII_ADVERTISE, &tmp);
2840 regs_buff[num++] = tmp;
2841 et131x_mii_read(adapter, MII_LPA, &tmp);
2842 regs_buff[num++] = tmp;
2843 et131x_mii_read(adapter, MII_EXPANSION, &tmp);
2844 regs_buff[num++] = tmp;
2845 /* Autoneg next page transmit reg */
2846 et131x_mii_read(adapter, 0x07, &tmp);
2847 regs_buff[num++] = tmp;
2848 /* Link partner next page reg */
2849 et131x_mii_read(adapter, 0x08, &tmp);
2850 regs_buff[num++] = tmp;
2851 et131x_mii_read(adapter, MII_CTRL1000, &tmp);
2852 regs_buff[num++] = tmp;
2853 et131x_mii_read(adapter, MII_STAT1000, &tmp);
2854 regs_buff[num++] = tmp;
2855 et131x_mii_read(adapter, 0x0b, &tmp);
2856 regs_buff[num++] = tmp;
2857 et131x_mii_read(adapter, 0x0c, &tmp);
2858 regs_buff[num++] = tmp;
2859 et131x_mii_read(adapter, MII_MMD_CTRL, &tmp);
2860 regs_buff[num++] = tmp;
2861 et131x_mii_read(adapter, MII_MMD_DATA, &tmp);
2862 regs_buff[num++] = tmp;
2863 et131x_mii_read(adapter, MII_ESTATUS, &tmp);
2864 regs_buff[num++] = tmp;
2865
2866 et131x_mii_read(adapter, PHY_INDEX_REG, &tmp);
2867 regs_buff[num++] = tmp;
2868 et131x_mii_read(adapter, PHY_DATA_REG, &tmp);
2869 regs_buff[num++] = tmp;
2870 et131x_mii_read(adapter, PHY_MPHY_CONTROL_REG, &tmp);
2871 regs_buff[num++] = tmp;
2872 et131x_mii_read(adapter, PHY_LOOPBACK_CONTROL, &tmp);
2873 regs_buff[num++] = tmp;
2874 et131x_mii_read(adapter, PHY_LOOPBACK_CONTROL + 1, &tmp);
2875 regs_buff[num++] = tmp;
2876
2877 et131x_mii_read(adapter, PHY_REGISTER_MGMT_CONTROL, &tmp);
2878 regs_buff[num++] = tmp;
2879 et131x_mii_read(adapter, PHY_CONFIG, &tmp);
2880 regs_buff[num++] = tmp;
2881 et131x_mii_read(adapter, PHY_PHY_CONTROL, &tmp);
2882 regs_buff[num++] = tmp;
2883 et131x_mii_read(adapter, PHY_INTERRUPT_MASK, &tmp);
2884 regs_buff[num++] = tmp;
2885 et131x_mii_read(adapter, PHY_INTERRUPT_STATUS, &tmp);
2886 regs_buff[num++] = tmp;
2887 et131x_mii_read(adapter, PHY_PHY_STATUS, &tmp);
2888 regs_buff[num++] = tmp;
2889 et131x_mii_read(adapter, PHY_LED_1, &tmp);
2890 regs_buff[num++] = tmp;
2891 et131x_mii_read(adapter, PHY_LED_2, &tmp);
2892 regs_buff[num++] = tmp;
2893
2894 /* Global regs */
2895 regs_buff[num++] = readl(&aregs->global.txq_start_addr);
2896 regs_buff[num++] = readl(&aregs->global.txq_end_addr);
2897 regs_buff[num++] = readl(&aregs->global.rxq_start_addr);
2898 regs_buff[num++] = readl(&aregs->global.rxq_end_addr);
2899 regs_buff[num++] = readl(&aregs->global.pm_csr);
2900 regs_buff[num++] = adapter->stats.interrupt_status;
2901 regs_buff[num++] = readl(&aregs->global.int_mask);
2902 regs_buff[num++] = readl(&aregs->global.int_alias_clr_en);
2903 regs_buff[num++] = readl(&aregs->global.int_status_alias);
2904 regs_buff[num++] = readl(&aregs->global.sw_reset);
2905 regs_buff[num++] = readl(&aregs->global.slv_timer);
2906 regs_buff[num++] = readl(&aregs->global.msi_config);
2907 regs_buff[num++] = readl(&aregs->global.loopback);
2908 regs_buff[num++] = readl(&aregs->global.watchdog_timer);
2909
2910 /* TXDMA regs */
2911 regs_buff[num++] = readl(&aregs->txdma.csr);
2912 regs_buff[num++] = readl(&aregs->txdma.pr_base_hi);
2913 regs_buff[num++] = readl(&aregs->txdma.pr_base_lo);
2914 regs_buff[num++] = readl(&aregs->txdma.pr_num_des);
2915 regs_buff[num++] = readl(&aregs->txdma.txq_wr_addr);
2916 regs_buff[num++] = readl(&aregs->txdma.txq_wr_addr_ext);
2917 regs_buff[num++] = readl(&aregs->txdma.txq_rd_addr);
2918 regs_buff[num++] = readl(&aregs->txdma.dma_wb_base_hi);
2919 regs_buff[num++] = readl(&aregs->txdma.dma_wb_base_lo);
2920 regs_buff[num++] = readl(&aregs->txdma.service_request);
2921 regs_buff[num++] = readl(&aregs->txdma.service_complete);
2922 regs_buff[num++] = readl(&aregs->txdma.cache_rd_index);
2923 regs_buff[num++] = readl(&aregs->txdma.cache_wr_index);
2924 regs_buff[num++] = readl(&aregs->txdma.tx_dma_error);
2925 regs_buff[num++] = readl(&aregs->txdma.desc_abort_cnt);
2926 regs_buff[num++] = readl(&aregs->txdma.payload_abort_cnt);
2927 regs_buff[num++] = readl(&aregs->txdma.writeback_abort_cnt);
2928 regs_buff[num++] = readl(&aregs->txdma.desc_timeout_cnt);
2929 regs_buff[num++] = readl(&aregs->txdma.payload_timeout_cnt);
2930 regs_buff[num++] = readl(&aregs->txdma.writeback_timeout_cnt);
2931 regs_buff[num++] = readl(&aregs->txdma.desc_error_cnt);
2932 regs_buff[num++] = readl(&aregs->txdma.payload_error_cnt);
2933 regs_buff[num++] = readl(&aregs->txdma.writeback_error_cnt);
2934 regs_buff[num++] = readl(&aregs->txdma.dropped_tlp_cnt);
2935 regs_buff[num++] = readl(&aregs->txdma.new_service_complete);
2936 regs_buff[num++] = readl(&aregs->txdma.ethernet_packet_cnt);
2937
2938 /* RXDMA regs */
2939 regs_buff[num++] = readl(&aregs->rxdma.csr);
2940 regs_buff[num++] = readl(&aregs->rxdma.dma_wb_base_hi);
2941 regs_buff[num++] = readl(&aregs->rxdma.dma_wb_base_lo);
2942 regs_buff[num++] = readl(&aregs->rxdma.num_pkt_done);
2943 regs_buff[num++] = readl(&aregs->rxdma.max_pkt_time);
2944 regs_buff[num++] = readl(&aregs->rxdma.rxq_rd_addr);
2945 regs_buff[num++] = readl(&aregs->rxdma.rxq_rd_addr_ext);
2946 regs_buff[num++] = readl(&aregs->rxdma.rxq_wr_addr);
2947 regs_buff[num++] = readl(&aregs->rxdma.psr_base_hi);
2948 regs_buff[num++] = readl(&aregs->rxdma.psr_base_lo);
2949 regs_buff[num++] = readl(&aregs->rxdma.psr_num_des);
2950 regs_buff[num++] = readl(&aregs->rxdma.psr_avail_offset);
2951 regs_buff[num++] = readl(&aregs->rxdma.psr_full_offset);
2952 regs_buff[num++] = readl(&aregs->rxdma.psr_access_index);
2953 regs_buff[num++] = readl(&aregs->rxdma.psr_min_des);
2954 regs_buff[num++] = readl(&aregs->rxdma.fbr0_base_lo);
2955 regs_buff[num++] = readl(&aregs->rxdma.fbr0_base_hi);
2956 regs_buff[num++] = readl(&aregs->rxdma.fbr0_num_des);
2957 regs_buff[num++] = readl(&aregs->rxdma.fbr0_avail_offset);
2958 regs_buff[num++] = readl(&aregs->rxdma.fbr0_full_offset);
2959 regs_buff[num++] = readl(&aregs->rxdma.fbr0_rd_index);
2960 regs_buff[num++] = readl(&aregs->rxdma.fbr0_min_des);
2961 regs_buff[num++] = readl(&aregs->rxdma.fbr1_base_lo);
2962 regs_buff[num++] = readl(&aregs->rxdma.fbr1_base_hi);
2963 regs_buff[num++] = readl(&aregs->rxdma.fbr1_num_des);
2964 regs_buff[num++] = readl(&aregs->rxdma.fbr1_avail_offset);
2965 regs_buff[num++] = readl(&aregs->rxdma.fbr1_full_offset);
2966 regs_buff[num++] = readl(&aregs->rxdma.fbr1_rd_index);
2967 regs_buff[num++] = readl(&aregs->rxdma.fbr1_min_des);
2968}
2969
2970static void et131x_get_drvinfo(struct net_device *netdev,
2971 struct ethtool_drvinfo *info)
2972{
2973 struct et131x_adapter *adapter = netdev_priv(netdev);
2974
2975 strlcpy(info->driver, DRIVER_NAME, sizeof(info->driver));
2976 strlcpy(info->version, DRIVER_VERSION, sizeof(info->version));
2977 strlcpy(info->bus_info, pci_name(adapter->pdev),
2978 sizeof(info->bus_info));
2979}
2980
2981static struct ethtool_ops et131x_ethtool_ops = {
2982 .get_settings = et131x_get_settings,
2983 .set_settings = et131x_set_settings,
2984 .get_drvinfo = et131x_get_drvinfo,
2985 .get_regs_len = et131x_get_regs_len,
2986 .get_regs = et131x_get_regs,
2987 .get_link = ethtool_op_get_link,
2988};
2989
2990/* et131x_hwaddr_init - set up the MAC Address */
2991static void et131x_hwaddr_init(struct et131x_adapter *adapter)
2992{
2993 /* If have our default mac from init and no mac address from
2994 * EEPROM then we need to generate the last octet and set it on the
2995 * device
2996 */
2997 if (is_zero_ether_addr(adapter->rom_addr)) {
2998 /* We need to randomly generate the last octet so we
2999 * decrease our chances of setting the mac address to
3000 * same as another one of our cards in the system
3001 */
3002 get_random_bytes(&adapter->addr[5], 1);
3003 /* We have the default value in the register we are
3004 * working with so we need to copy the current
3005 * address into the permanent address
3006 */
3007 ether_addr_copy(adapter->rom_addr, adapter->addr);
3008 } else {
3009 /* We do not have an override address, so set the
3010 * current address to the permanent address and add
3011 * it to the device
3012 */
3013 ether_addr_copy(adapter->addr, adapter->rom_addr);
3014 }
3015}
3016
3017static int et131x_pci_init(struct et131x_adapter *adapter,
3018 struct pci_dev *pdev)
3019{
3020 u16 max_payload;
3021 int i, rc;
3022
3023 rc = et131x_init_eeprom(adapter);
3024 if (rc < 0)
3025 goto out;
3026
3027 if (!pci_is_pcie(pdev)) {
3028 dev_err(&pdev->dev, "Missing PCIe capabilities\n");
3029 goto err_out;
3030 }
3031
3032 /* Program the Ack/Nak latency and replay timers */
3033 max_payload = pdev->pcie_mpss;
3034
3035 if (max_payload < 2) {
3036 static const u16 acknak[2] = { 0x76, 0xD0 };
3037 static const u16 replay[2] = { 0x1E0, 0x2ED };
3038
3039 if (pci_write_config_word(pdev, ET1310_PCI_ACK_NACK,
3040 acknak[max_payload])) {
3041 dev_err(&pdev->dev,
3042 "Could not write PCI config space for ACK/NAK\n");
3043 goto err_out;
3044 }
3045 if (pci_write_config_word(pdev, ET1310_PCI_REPLAY,
3046 replay[max_payload])) {
3047 dev_err(&pdev->dev,
3048 "Could not write PCI config space for Replay Timer\n");
3049 goto err_out;
3050 }
3051 }
3052
3053 /* l0s and l1 latency timers. We are using default values.
3054 * Representing 001 for L0s and 010 for L1
3055 */
3056 if (pci_write_config_byte(pdev, ET1310_PCI_L0L1LATENCY, 0x11)) {
3057 dev_err(&pdev->dev,
3058 "Could not write PCI config space for Latency Timers\n");
3059 goto err_out;
3060 }
3061
3062 /* Change the max read size to 2k */
3063 if (pcie_set_readrq(pdev, 2048)) {
3064 dev_err(&pdev->dev,
3065 "Couldn't change PCI config space for Max read size\n");
3066 goto err_out;
3067 }
3068
3069 /* Get MAC address from config space if an eeprom exists, otherwise
3070 * the MAC address there will not be valid
3071 */
3072 if (!adapter->has_eeprom) {
3073 et131x_hwaddr_init(adapter);
3074 return 0;
3075 }
3076
3077 for (i = 0; i < ETH_ALEN; i++) {
3078 if (pci_read_config_byte(pdev, ET1310_PCI_MAC_ADDRESS + i,
3079 adapter->rom_addr + i)) {
3080 dev_err(&pdev->dev, "Could not read PCI config space for MAC address\n");
3081 goto err_out;
3082 }
3083 }
3084 ether_addr_copy(adapter->addr, adapter->rom_addr);
3085out:
3086 return rc;
3087err_out:
3088 rc = -EIO;
3089 goto out;
3090}
3091
3092/* et131x_error_timer_handler
3093 * @data: timer-specific variable; here a pointer to our adapter structure
3094 *
3095 * The routine called when the error timer expires, to track the number of
3096 * recurring errors.
3097 */
3098static void et131x_error_timer_handler(unsigned long data)
3099{
3100 struct et131x_adapter *adapter = (struct et131x_adapter *)data;
3101 struct phy_device *phydev = adapter->phydev;
3102
3103 if (et1310_in_phy_coma(adapter)) {
3104 /* Bring the device immediately out of coma, to
3105 * prevent it from sleeping indefinitely, this
3106 * mechanism could be improved!
3107 */
3108 et1310_disable_phy_coma(adapter);
3109 adapter->boot_coma = 20;
3110 } else {
3111 et1310_update_macstat_host_counters(adapter);
3112 }
3113
3114 if (!phydev->link && adapter->boot_coma < 11)
3115 adapter->boot_coma++;
3116
3117 if (adapter->boot_coma == 10) {
3118 if (!phydev->link) {
3119 if (!et1310_in_phy_coma(adapter)) {
3120 /* NOTE - This was originally a 'sync with
3121 * interrupt'. How to do that under Linux?
3122 */
3123 et131x_enable_interrupts(adapter);
3124 et1310_enable_phy_coma(adapter);
3125 }
3126 }
3127 }
3128
3129 /* This is a periodic timer, so reschedule */
3130 mod_timer(&adapter->error_timer, jiffies + TX_ERROR_PERIOD * HZ / 1000);
3131}
3132
3133static void et131x_adapter_memory_free(struct et131x_adapter *adapter)
3134{
3135 et131x_tx_dma_memory_free(adapter);
3136 et131x_rx_dma_memory_free(adapter);
3137}
3138
3139static int et131x_adapter_memory_alloc(struct et131x_adapter *adapter)
3140{
3141 int status;
3142
3143 status = et131x_tx_dma_memory_alloc(adapter);
3144 if (status) {
3145 dev_err(&adapter->pdev->dev,
3146 "et131x_tx_dma_memory_alloc FAILED\n");
3147 et131x_tx_dma_memory_free(adapter);
3148 return status;
3149 }
3150
3151 status = et131x_rx_dma_memory_alloc(adapter);
3152 if (status) {
3153 dev_err(&adapter->pdev->dev,
3154 "et131x_rx_dma_memory_alloc FAILED\n");
3155 et131x_adapter_memory_free(adapter);
3156 return status;
3157 }
3158
3159 status = et131x_init_recv(adapter);
3160 if (status) {
3161 dev_err(&adapter->pdev->dev, "et131x_init_recv FAILED\n");
3162 et131x_adapter_memory_free(adapter);
3163 }
3164 return status;
3165}
3166
3167static void et131x_adjust_link(struct net_device *netdev)
3168{
3169 struct et131x_adapter *adapter = netdev_priv(netdev);
3170 struct phy_device *phydev = adapter->phydev;
3171
3172 if (!phydev)
3173 return;
3174 if (phydev->link == adapter->link)
3175 return;
3176
3177 /* Check to see if we are in coma mode and if
3178 * so, disable it because we will not be able
3179 * to read PHY values until we are out.
3180 */
3181 if (et1310_in_phy_coma(adapter))
3182 et1310_disable_phy_coma(adapter);
3183
3184 adapter->link = phydev->link;
3185 phy_print_status(phydev);
3186
3187 if (phydev->link) {
3188 adapter->boot_coma = 20;
3189 if (phydev->speed == SPEED_10) {
3190 u16 register18;
3191
3192 et131x_mii_read(adapter, PHY_MPHY_CONTROL_REG,
3193 &register18);
3194 et131x_mii_write(adapter, phydev->addr,
3195 PHY_MPHY_CONTROL_REG,
3196 register18 | 0x4);
3197 et131x_mii_write(adapter, phydev->addr, PHY_INDEX_REG,
3198 register18 | 0x8402);
3199 et131x_mii_write(adapter, phydev->addr, PHY_DATA_REG,
3200 register18 | 511);
3201 et131x_mii_write(adapter, phydev->addr,
3202 PHY_MPHY_CONTROL_REG, register18);
3203 }
3204
3205 et1310_config_flow_control(adapter);
3206
3207 if (phydev->speed == SPEED_1000 &&
3208 adapter->registry_jumbo_packet > 2048) {
3209 u16 reg;
3210
3211 et131x_mii_read(adapter, PHY_CONFIG, &reg);
3212 reg &= ~ET_PHY_CONFIG_TX_FIFO_DEPTH;
3213 reg |= ET_PHY_CONFIG_FIFO_DEPTH_32;
3214 et131x_mii_write(adapter, phydev->addr, PHY_CONFIG,
3215 reg);
3216 }
3217
3218 et131x_set_rx_dma_timer(adapter);
3219 et1310_config_mac_regs2(adapter);
3220 } else {
3221 adapter->boot_coma = 0;
3222
3223 if (phydev->speed == SPEED_10) {
3224 u16 register18;
3225
3226 et131x_mii_read(adapter, PHY_MPHY_CONTROL_REG,
3227 &register18);
3228 et131x_mii_write(adapter, phydev->addr,
3229 PHY_MPHY_CONTROL_REG,
3230 register18 | 0x4);
3231 et131x_mii_write(adapter, phydev->addr,
3232 PHY_INDEX_REG, register18 | 0x8402);
3233 et131x_mii_write(adapter, phydev->addr,
3234 PHY_DATA_REG, register18 | 511);
3235 et131x_mii_write(adapter, phydev->addr,
3236 PHY_MPHY_CONTROL_REG, register18);
3237 }
3238
3239 et131x_free_busy_send_packets(adapter);
3240 et131x_init_send(adapter);
3241
3242 /* Bring the device back to the state it was during
3243 * init prior to autonegotiation being complete. This
3244 * way, when we get the auto-neg complete interrupt,
3245 * we can complete init by calling config_mac_regs2.
3246 */
3247 et131x_soft_reset(adapter);
3248
3249 et131x_adapter_setup(adapter);
3250
3251 et131x_disable_txrx(netdev);
3252 et131x_enable_txrx(netdev);
3253 }
3254}
3255
3256static int et131x_mii_probe(struct net_device *netdev)
3257{
3258 struct et131x_adapter *adapter = netdev_priv(netdev);
3259 struct phy_device *phydev = NULL;
3260
3261 phydev = phy_find_first(adapter->mii_bus);
3262 if (!phydev) {
3263 dev_err(&adapter->pdev->dev, "no PHY found\n");
3264 return -ENODEV;
3265 }
3266
3267 phydev = phy_connect(netdev, dev_name(&phydev->dev),
3268 &et131x_adjust_link, PHY_INTERFACE_MODE_MII);
3269
3270 if (IS_ERR(phydev)) {
3271 dev_err(&adapter->pdev->dev, "Could not attach to PHY\n");
3272 return PTR_ERR(phydev);
3273 }
3274
3275 phydev->supported &= (SUPPORTED_10baseT_Half |
3276 SUPPORTED_10baseT_Full |
3277 SUPPORTED_100baseT_Half |
3278 SUPPORTED_100baseT_Full |
3279 SUPPORTED_Autoneg |
3280 SUPPORTED_MII |
3281 SUPPORTED_TP);
3282
3283 if (adapter->pdev->device != ET131X_PCI_DEVICE_ID_FAST)
3284 phydev->supported |= SUPPORTED_1000baseT_Half |
3285 SUPPORTED_1000baseT_Full;
3286
3287 phydev->advertising = phydev->supported;
3288 phydev->autoneg = AUTONEG_ENABLE;
3289 adapter->phydev = phydev;
3290
3291 dev_info(&adapter->pdev->dev,
3292 "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
3293 phydev->drv->name, dev_name(&phydev->dev));
3294
3295 return 0;
3296}
3297
3298static struct et131x_adapter *et131x_adapter_init(struct net_device *netdev,
3299 struct pci_dev *pdev)
3300{
3301 static const u8 default_mac[] = { 0x00, 0x05, 0x3d, 0x00, 0x02, 0x00 };
3302
3303 struct et131x_adapter *adapter;
3304
3305 adapter = netdev_priv(netdev);
3306 adapter->pdev = pci_dev_get(pdev);
3307 adapter->netdev = netdev;
3308
3309 spin_lock_init(&adapter->tcb_send_qlock);
3310 spin_lock_init(&adapter->tcb_ready_qlock);
3311 spin_lock_init(&adapter->rcv_lock);
3312
3313 adapter->registry_jumbo_packet = 1514; /* 1514-9216 */
3314
3315 ether_addr_copy(adapter->addr, default_mac);
3316
3317 return adapter;
3318}
3319
3320static void et131x_pci_remove(struct pci_dev *pdev)
3321{
3322 struct net_device *netdev = pci_get_drvdata(pdev);
3323 struct et131x_adapter *adapter = netdev_priv(netdev);
3324
3325 unregister_netdev(netdev);
3326 netif_napi_del(&adapter->napi);
3327 phy_disconnect(adapter->phydev);
3328 mdiobus_unregister(adapter->mii_bus);
3329 kfree(adapter->mii_bus->irq);
3330 mdiobus_free(adapter->mii_bus);
3331
3332 et131x_adapter_memory_free(adapter);
3333 iounmap(adapter->regs);
3334 pci_dev_put(pdev);
3335
3336 free_netdev(netdev);
3337 pci_release_regions(pdev);
3338 pci_disable_device(pdev);
3339}
3340
3341static void et131x_up(struct net_device *netdev)
3342{
3343 struct et131x_adapter *adapter = netdev_priv(netdev);
3344
3345 et131x_enable_txrx(netdev);
3346 phy_start(adapter->phydev);
3347}
3348
3349static void et131x_down(struct net_device *netdev)
3350{
3351 struct et131x_adapter *adapter = netdev_priv(netdev);
3352
3353 /* Save the timestamp for the TX watchdog, prevent a timeout */
3354 netdev->trans_start = jiffies;
3355
3356 phy_stop(adapter->phydev);
3357 et131x_disable_txrx(netdev);
3358}
3359
3360#ifdef CONFIG_PM_SLEEP
3361static int et131x_suspend(struct device *dev)
3362{
3363 struct pci_dev *pdev = to_pci_dev(dev);
3364 struct net_device *netdev = pci_get_drvdata(pdev);
3365
3366 if (netif_running(netdev)) {
3367 netif_device_detach(netdev);
3368 et131x_down(netdev);
3369 pci_save_state(pdev);
3370 }
3371
3372 return 0;
3373}
3374
3375static int et131x_resume(struct device *dev)
3376{
3377 struct pci_dev *pdev = to_pci_dev(dev);
3378 struct net_device *netdev = pci_get_drvdata(pdev);
3379
3380 if (netif_running(netdev)) {
3381 pci_restore_state(pdev);
3382 et131x_up(netdev);
3383 netif_device_attach(netdev);
3384 }
3385
3386 return 0;
3387}
3388#endif
3389
3390static SIMPLE_DEV_PM_OPS(et131x_pm_ops, et131x_suspend, et131x_resume);
3391
3392static irqreturn_t et131x_isr(int irq, void *dev_id)
3393{
3394 bool handled = true;
3395 bool enable_interrupts = true;
3396 struct net_device *netdev = dev_id;
3397 struct et131x_adapter *adapter = netdev_priv(netdev);
3398 struct address_map __iomem *iomem = adapter->regs;
3399 struct rx_ring *rx_ring = &adapter->rx_ring;
3400 struct tx_ring *tx_ring = &adapter->tx_ring;
3401 u32 status;
3402
3403 if (!netif_device_present(netdev)) {
3404 handled = false;
3405 enable_interrupts = false;
3406 goto out;
3407 }
3408
3409 et131x_disable_interrupts(adapter);
3410
3411 status = readl(&adapter->regs->global.int_status);
3412
3413 if (adapter->flow == FLOW_TXONLY || adapter->flow == FLOW_BOTH)
3414 status &= ~INT_MASK_ENABLE;
3415 else
3416 status &= ~INT_MASK_ENABLE_NO_FLOW;
3417
3418 /* Make sure this is our interrupt */
3419 if (!status) {
3420 handled = false;
3421 et131x_enable_interrupts(adapter);
3422 goto out;
3423 }
3424
3425 /* This is our interrupt, so process accordingly */
3426 if (status & ET_INTR_WATCHDOG) {
3427 struct tcb *tcb = tx_ring->send_head;
3428
3429 if (tcb)
3430 if (++tcb->stale > 1)
3431 status |= ET_INTR_TXDMA_ISR;
3432
3433 if (rx_ring->unfinished_receives)
3434 status |= ET_INTR_RXDMA_XFR_DONE;
3435 else if (tcb == NULL)
3436 writel(0, &adapter->regs->global.watchdog_timer);
3437
3438 status &= ~ET_INTR_WATCHDOG;
3439 }
3440
3441 if (status & (ET_INTR_RXDMA_XFR_DONE | ET_INTR_TXDMA_ISR)) {
3442 enable_interrupts = false;
3443 napi_schedule(&adapter->napi);
3444 }
3445
3446 status &= ~(ET_INTR_TXDMA_ISR | ET_INTR_RXDMA_XFR_DONE);
3447
3448 if (!status)
3449 goto out;
3450
3451 if (status & ET_INTR_TXDMA_ERR) {
3452 /* Following read also clears the register (COR) */
3453 u32 txdma_err = readl(&iomem->txdma.tx_dma_error);
3454
3455 dev_warn(&adapter->pdev->dev,
3456 "TXDMA_ERR interrupt, error = %d\n",
3457 txdma_err);
3458 }
3459
3460 if (status & (ET_INTR_RXDMA_FB_R0_LOW | ET_INTR_RXDMA_FB_R1_LOW)) {
3461 /* This indicates the number of unused buffers in RXDMA free
3462 * buffer ring 0 is <= the limit you programmed. Free buffer
3463 * resources need to be returned. Free buffers are consumed as
3464 * packets are passed from the network to the host. The host
3465 * becomes aware of the packets from the contents of the packet
3466 * status ring. This ring is queried when the packet done
3467 * interrupt occurs. Packets are then passed to the OS. When
3468 * the OS is done with the packets the resources can be
3469 * returned to the ET1310 for re-use. This interrupt is one
3470 * method of returning resources.
3471 */
3472
3473 /* If the user has flow control on, then we will
3474 * send a pause packet, otherwise just exit
3475 */
3476 if (adapter->flow == FLOW_TXONLY || adapter->flow == FLOW_BOTH) {
3477 u32 pm_csr;
3478
3479 /* Tell the device to send a pause packet via the back
3480 * pressure register (bp req and bp xon/xoff)
3481 */
3482 pm_csr = readl(&iomem->global.pm_csr);
3483 if (!et1310_in_phy_coma(adapter))
3484 writel(3, &iomem->txmac.bp_ctrl);
3485 }
3486 }
3487
3488 /* Handle Packet Status Ring Low Interrupt */
3489 if (status & ET_INTR_RXDMA_STAT_LOW) {
3490 /* Same idea as with the two Free Buffer Rings. Packets going
3491 * from the network to the host each consume a free buffer
3492 * resource and a packet status resource. These resources are
3493 * passed to the OS. When the OS is done with the resources,
3494 * they need to be returned to the ET1310. This is one method
3495 * of returning the resources.
3496 */
3497 }
3498
3499 if (status & ET_INTR_RXDMA_ERR) {
3500 /* The rxdma_error interrupt is sent when a time-out on a
3501 * request issued by the JAGCore has occurred or a completion is
3502 * returned with an un-successful status. In both cases the
3503 * request is considered complete. The JAGCore will
3504 * automatically re-try the request in question. Normally
3505 * information on events like these are sent to the host using
3506 * the "Advanced Error Reporting" capability. This interrupt is
3507 * another way of getting similar information. The only thing
3508 * required is to clear the interrupt by reading the ISR in the
3509 * global resources. The JAGCore will do a re-try on the
3510 * request. Normally you should never see this interrupt. If
3511 * you start to see this interrupt occurring frequently then
3512 * something bad has occurred. A reset might be the thing to do.
3513 */
3514 /* TRAP();*/
3515
3516 dev_warn(&adapter->pdev->dev, "RxDMA_ERR interrupt, error %x\n",
3517 readl(&iomem->txmac.tx_test));
3518 }
3519
3520 /* Handle the Wake on LAN Event */
3521 if (status & ET_INTR_WOL) {
3522 /* This is a secondary interrupt for wake on LAN. The driver
3523 * should never see this, if it does, something serious is
3524 * wrong.
3525 */
3526 dev_err(&adapter->pdev->dev, "WAKE_ON_LAN interrupt\n");
3527 }
3528
3529 if (status & ET_INTR_TXMAC) {
3530 u32 err = readl(&iomem->txmac.err);
3531
3532 /* When any of the errors occur and TXMAC generates an
3533 * interrupt to report these errors, it usually means that
3534 * TXMAC has detected an error in the data stream retrieved
3535 * from the on-chip Tx Q. All of these errors are catastrophic
3536 * and TXMAC won't be able to recover data when these errors
3537 * occur. In a nutshell, the whole Tx path will have to be reset
3538 * and re-configured afterwards.
3539 */
3540 dev_warn(&adapter->pdev->dev, "TXMAC interrupt, error 0x%08x\n",
3541 err);
3542
3543 /* If we are debugging, we want to see this error, otherwise we
3544 * just want the device to be reset and continue
3545 */
3546 }
3547
3548 if (status & ET_INTR_RXMAC) {
3549 /* These interrupts are catastrophic to the device, what we need
3550 * to do is disable the interrupts and set the flag to cause us
3551 * to reset so we can solve this issue.
3552 */
3553 dev_warn(&adapter->pdev->dev,
3554 "RXMAC interrupt, error 0x%08x. Requesting reset\n",
3555 readl(&iomem->rxmac.err_reg));
3556
3557 dev_warn(&adapter->pdev->dev,
3558 "Enable 0x%08x, Diag 0x%08x\n",
3559 readl(&iomem->rxmac.ctrl),
3560 readl(&iomem->rxmac.rxq_diag));
3561
3562 /* If we are debugging, we want to see this error, otherwise we
3563 * just want the device to be reset and continue
3564 */
3565 }
3566
3567 if (status & ET_INTR_MAC_STAT) {
3568 /* This means at least one of the un-masked counters in the
3569 * MAC_STAT block has rolled over. Use this to maintain the top,
3570 * software managed bits of the counter(s).
3571 */
3572 et1310_handle_macstat_interrupt(adapter);
3573 }
3574
3575 if (status & ET_INTR_SLV_TIMEOUT) {
3576 /* This means a timeout has occurred on a read or write request
3577 * to one of the JAGCore registers. The Global Resources block
3578 * has terminated the request and on a read request, returned a
3579 * "fake" value. The most likely reasons are: Bad Address or the
3580 * addressed module is in a power-down state and can't respond.
3581 */
3582 }
3583
3584out:
3585 if (enable_interrupts)
3586 et131x_enable_interrupts(adapter);
3587
3588 return IRQ_RETVAL(handled);
3589}
3590
3591static int et131x_poll(struct napi_struct *napi, int budget)
3592{
3593 struct et131x_adapter *adapter =
3594 container_of(napi, struct et131x_adapter, napi);
3595 int work_done = et131x_handle_recv_pkts(adapter, budget);
3596
3597 et131x_handle_send_pkts(adapter);
3598
3599 if (work_done < budget) {
3600 napi_complete(&adapter->napi);
3601 et131x_enable_interrupts(adapter);
3602 }
3603
3604 return work_done;
3605}
3606
3607/* et131x_stats - Return the current device statistics */
3608static struct net_device_stats *et131x_stats(struct net_device *netdev)
3609{
3610 struct et131x_adapter *adapter = netdev_priv(netdev);
3611 struct net_device_stats *stats = &adapter->netdev->stats;
3612 struct ce_stats *devstat = &adapter->stats;
3613
3614 stats->rx_errors = devstat->rx_length_errs +
3615 devstat->rx_align_errs +
3616 devstat->rx_crc_errs +
3617 devstat->rx_code_violations +
3618 devstat->rx_other_errs;
3619 stats->tx_errors = devstat->tx_max_pkt_errs;
3620 stats->multicast = devstat->multicast_pkts_rcvd;
3621 stats->collisions = devstat->tx_collisions;
3622
3623 stats->rx_length_errors = devstat->rx_length_errs;
3624 stats->rx_over_errors = devstat->rx_overflows;
3625 stats->rx_crc_errors = devstat->rx_crc_errs;
3626 stats->rx_dropped = devstat->rcvd_pkts_dropped;
3627
3628 /* NOTE: Not used, can't find analogous statistics */
3629 /* stats->rx_frame_errors = devstat->; */
3630 /* stats->rx_fifo_errors = devstat->; */
3631 /* stats->rx_missed_errors = devstat->; */
3632
3633 /* stats->tx_aborted_errors = devstat->; */
3634 /* stats->tx_carrier_errors = devstat->; */
3635 /* stats->tx_fifo_errors = devstat->; */
3636 /* stats->tx_heartbeat_errors = devstat->; */
3637 /* stats->tx_window_errors = devstat->; */
3638 return stats;
3639}
3640
3641static int et131x_open(struct net_device *netdev)
3642{
3643 struct et131x_adapter *adapter = netdev_priv(netdev);
3644 struct pci_dev *pdev = adapter->pdev;
3645 unsigned int irq = pdev->irq;
3646 int result;
3647
3648 /* Start the timer to track NIC errors */
3649 init_timer(&adapter->error_timer);
3650 adapter->error_timer.expires = jiffies + TX_ERROR_PERIOD * HZ / 1000;
3651 adapter->error_timer.function = et131x_error_timer_handler;
3652 adapter->error_timer.data = (unsigned long)adapter;
3653 add_timer(&adapter->error_timer);
3654
3655 result = request_irq(irq, et131x_isr,
3656 IRQF_SHARED, netdev->name, netdev);
3657 if (result) {
3658 dev_err(&pdev->dev, "could not register IRQ %d\n", irq);
3659 return result;
3660 }
3661
3662 adapter->flags |= FMP_ADAPTER_INTERRUPT_IN_USE;
3663
3664 napi_enable(&adapter->napi);
3665
3666 et131x_up(netdev);
3667
3668 return result;
3669}
3670
3671static int et131x_close(struct net_device *netdev)
3672{
3673 struct et131x_adapter *adapter = netdev_priv(netdev);
3674
3675 et131x_down(netdev);
3676 napi_disable(&adapter->napi);
3677
3678 adapter->flags &= ~FMP_ADAPTER_INTERRUPT_IN_USE;
3679 free_irq(adapter->pdev->irq, netdev);
3680
3681 /* Stop the error timer */
3682 return del_timer_sync(&adapter->error_timer);
3683}
3684
3685static int et131x_ioctl(struct net_device *netdev, struct ifreq *reqbuf,
3686 int cmd)
3687{
3688 struct et131x_adapter *adapter = netdev_priv(netdev);
3689
3690 if (!adapter->phydev)
3691 return -EINVAL;
3692
3693 return phy_mii_ioctl(adapter->phydev, reqbuf, cmd);
3694}
3695
3696/* et131x_set_packet_filter - Configures the Rx Packet filtering */
3697static int et131x_set_packet_filter(struct et131x_adapter *adapter)
3698{
3699 int filter = adapter->packet_filter;
3700 u32 ctrl;
3701 u32 pf_ctrl;
3702
3703 ctrl = readl(&adapter->regs->rxmac.ctrl);
3704 pf_ctrl = readl(&adapter->regs->rxmac.pf_ctrl);
3705
3706 /* Default to disabled packet filtering */
3707 ctrl |= 0x04;
3708
3709 /* Set us to be in promiscuous mode so we receive everything, this
3710 * is also true when we get a packet filter of 0
3711 */
3712 if ((filter & ET131X_PACKET_TYPE_PROMISCUOUS) || filter == 0)
3713 pf_ctrl &= ~7; /* Clear filter bits */
3714 else {
3715 /* Set us up with Multicast packet filtering. Three cases are
3716 * possible - (1) we have a multi-cast list, (2) we receive ALL
3717 * multicast entries or (3) we receive none.
3718 */
3719 if (filter & ET131X_PACKET_TYPE_ALL_MULTICAST)
3720 pf_ctrl &= ~2; /* Multicast filter bit */
3721 else {
3722 et1310_setup_device_for_multicast(adapter);
3723 pf_ctrl |= 2;
3724 ctrl &= ~0x04;
3725 }
3726
3727 /* Set us up with Unicast packet filtering */
3728 if (filter & ET131X_PACKET_TYPE_DIRECTED) {
3729 et1310_setup_device_for_unicast(adapter);
3730 pf_ctrl |= 4;
3731 ctrl &= ~0x04;
3732 }
3733
3734 /* Set us up with Broadcast packet filtering */
3735 if (filter & ET131X_PACKET_TYPE_BROADCAST) {
3736 pf_ctrl |= 1; /* Broadcast filter bit */
3737 ctrl &= ~0x04;
3738 } else {
3739 pf_ctrl &= ~1;
3740 }
3741
3742 /* Setup the receive mac configuration registers - Packet
3743 * Filter control + the enable / disable for packet filter
3744 * in the control reg.
3745 */
3746 writel(pf_ctrl, &adapter->regs->rxmac.pf_ctrl);
3747 writel(ctrl, &adapter->regs->rxmac.ctrl);
3748 }
3749 return 0;
3750}
3751
3752static void et131x_multicast(struct net_device *netdev)
3753{
3754 struct et131x_adapter *adapter = netdev_priv(netdev);
3755 int packet_filter;
3756 struct netdev_hw_addr *ha;
3757 int i;
3758
3759 /* Before we modify the platform-independent filter flags, store them
3760 * locally. This allows us to determine if anything's changed and if
3761 * we even need to bother the hardware
3762 */
3763 packet_filter = adapter->packet_filter;
3764
3765 /* Clear the 'multicast' flag locally; because we only have a single
3766 * flag to check multicast, and multiple multicast addresses can be
3767 * set, this is the easiest way to determine if more than one
3768 * multicast address is being set.
3769 */
3770 packet_filter &= ~ET131X_PACKET_TYPE_MULTICAST;
3771
3772 /* Check the net_device flags and set the device independent flags
3773 * accordingly
3774 */
3775 if (netdev->flags & IFF_PROMISC)
3776 adapter->packet_filter |= ET131X_PACKET_TYPE_PROMISCUOUS;
3777 else
3778 adapter->packet_filter &= ~ET131X_PACKET_TYPE_PROMISCUOUS;
3779
3780 if ((netdev->flags & IFF_ALLMULTI) ||
3781 (netdev_mc_count(netdev) > NIC_MAX_MCAST_LIST))
3782 adapter->packet_filter |= ET131X_PACKET_TYPE_ALL_MULTICAST;
3783
3784 if (netdev_mc_count(netdev) < 1) {
3785 adapter->packet_filter &= ~ET131X_PACKET_TYPE_ALL_MULTICAST;
3786 adapter->packet_filter &= ~ET131X_PACKET_TYPE_MULTICAST;
3787 } else {
3788 adapter->packet_filter |= ET131X_PACKET_TYPE_MULTICAST;
3789 }
3790
3791 /* Set values in the private adapter struct */
3792 i = 0;
3793 netdev_for_each_mc_addr(ha, netdev) {
3794 if (i == NIC_MAX_MCAST_LIST)
3795 break;
3796 ether_addr_copy(adapter->multicast_list[i++], ha->addr);
3797 }
3798 adapter->multicast_addr_count = i;
3799
3800 /* Are the new flags different from the previous ones? If not, then no
3801 * action is required
3802 *
3803 * NOTE - This block will always update the multicast_list with the
3804 * hardware, even if the addresses aren't the same.
3805 */
3806 if (packet_filter != adapter->packet_filter)
3807 et131x_set_packet_filter(adapter);
3808}
3809
3810static netdev_tx_t et131x_tx(struct sk_buff *skb, struct net_device *netdev)
3811{
3812 struct et131x_adapter *adapter = netdev_priv(netdev);
3813 struct tx_ring *tx_ring = &adapter->tx_ring;
3814
3815 /* stop the queue if it's getting full */
3816 if (tx_ring->used >= NUM_TCB - 1 && !netif_queue_stopped(netdev))
3817 netif_stop_queue(netdev);
3818
3819 /* Save the timestamp for the TX timeout watchdog */
3820 netdev->trans_start = jiffies;
3821
3822 /* TCB is not available */
3823 if (tx_ring->used >= NUM_TCB)
3824 goto drop_err;
3825
3826 if ((adapter->flags & FMP_ADAPTER_FAIL_SEND_MASK) ||
3827 !netif_carrier_ok(netdev))
3828 goto drop_err;
3829
3830 if (send_packet(skb, adapter))
3831 goto drop_err;
3832
3833 return NETDEV_TX_OK;
3834
3835drop_err:
3836 dev_kfree_skb_any(skb);
3837 adapter->netdev->stats.tx_dropped++;
3838 return NETDEV_TX_OK;
3839}
3840
3841/* et131x_tx_timeout - Timeout handler
3842 *
3843 * The handler called when a Tx request times out. The timeout period is
3844 * specified by the 'tx_timeo" element in the net_device structure (see
3845 * et131x_alloc_device() to see how this value is set).
3846 */
3847static void et131x_tx_timeout(struct net_device *netdev)
3848{
3849 struct et131x_adapter *adapter = netdev_priv(netdev);
3850 struct tx_ring *tx_ring = &adapter->tx_ring;
3851 struct tcb *tcb;
3852 unsigned long flags;
3853
3854 /* If the device is closed, ignore the timeout */
3855 if (~(adapter->flags & FMP_ADAPTER_INTERRUPT_IN_USE))
3856 return;
3857
3858 /* Any nonrecoverable hardware error?
3859 * Checks adapter->flags for any failure in phy reading
3860 */
3861 if (adapter->flags & FMP_ADAPTER_NON_RECOVER_ERROR)
3862 return;
3863
3864 /* Hardware failure? */
3865 if (adapter->flags & FMP_ADAPTER_HARDWARE_ERROR) {
3866 dev_err(&adapter->pdev->dev, "hardware error - reset\n");
3867 return;
3868 }
3869
3870 /* Is send stuck? */
3871 spin_lock_irqsave(&adapter->tcb_send_qlock, flags);
3872 tcb = tx_ring->send_head;
3873 spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags);
3874
3875 if (tcb) {
3876 tcb->count++;
3877
3878 if (tcb->count > NIC_SEND_HANG_THRESHOLD) {
3879 dev_warn(&adapter->pdev->dev,
3880 "Send stuck - reset. tcb->WrIndex %x\n",
3881 tcb->index);
3882
3883 adapter->netdev->stats.tx_errors++;
3884
3885 /* perform reset of tx/rx */
3886 et131x_disable_txrx(netdev);
3887 et131x_enable_txrx(netdev);
3888 }
3889 }
3890}
3891
3892static int et131x_change_mtu(struct net_device *netdev, int new_mtu)
3893{
3894 int result = 0;
3895 struct et131x_adapter *adapter = netdev_priv(netdev);
3896
3897 if (new_mtu < 64 || new_mtu > 9216)
3898 return -EINVAL;
3899
3900 et131x_disable_txrx(netdev);
3901
3902 netdev->mtu = new_mtu;
3903
3904 et131x_adapter_memory_free(adapter);
3905
3906 /* Set the config parameter for Jumbo Packet support */
3907 adapter->registry_jumbo_packet = new_mtu + 14;
3908 et131x_soft_reset(adapter);
3909
3910 result = et131x_adapter_memory_alloc(adapter);
3911 if (result != 0) {
3912 dev_warn(&adapter->pdev->dev,
3913 "Change MTU failed; couldn't re-alloc DMA memory\n");
3914 return result;
3915 }
3916
3917 et131x_init_send(adapter);
3918 et131x_hwaddr_init(adapter);
3919 ether_addr_copy(netdev->dev_addr, adapter->addr);
3920
3921 /* Init the device with the new settings */
3922 et131x_adapter_setup(adapter);
3923 et131x_enable_txrx(netdev);
3924
3925 return result;
3926}
3927
3928static const struct net_device_ops et131x_netdev_ops = {
3929 .ndo_open = et131x_open,
3930 .ndo_stop = et131x_close,
3931 .ndo_start_xmit = et131x_tx,
3932 .ndo_set_rx_mode = et131x_multicast,
3933 .ndo_tx_timeout = et131x_tx_timeout,
3934 .ndo_change_mtu = et131x_change_mtu,
3935 .ndo_set_mac_address = eth_mac_addr,
3936 .ndo_validate_addr = eth_validate_addr,
3937 .ndo_get_stats = et131x_stats,
3938 .ndo_do_ioctl = et131x_ioctl,
3939};
3940
3941static int et131x_pci_setup(struct pci_dev *pdev,
3942 const struct pci_device_id *ent)
3943{
3944 struct net_device *netdev;
3945 struct et131x_adapter *adapter;
3946 int rc;
3947 int ii;
3948
3949 rc = pci_enable_device(pdev);
3950 if (rc < 0) {
3951 dev_err(&pdev->dev, "pci_enable_device() failed\n");
3952 goto out;
3953 }
3954
3955 /* Perform some basic PCI checks */
3956 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
3957 dev_err(&pdev->dev, "Can't find PCI device's base address\n");
3958 rc = -ENODEV;
3959 goto err_disable;
3960 }
3961
3962 rc = pci_request_regions(pdev, DRIVER_NAME);
3963 if (rc < 0) {
3964 dev_err(&pdev->dev, "Can't get PCI resources\n");
3965 goto err_disable;
3966 }
3967
3968 pci_set_master(pdev);
3969
3970 /* Check the DMA addressing support of this device */
3971 if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) &&
3972 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) {
3973 dev_err(&pdev->dev, "No usable DMA addressing method\n");
3974 rc = -EIO;
3975 goto err_release_res;
3976 }
3977
3978 netdev = alloc_etherdev(sizeof(struct et131x_adapter));
3979 if (!netdev) {
3980 dev_err(&pdev->dev, "Couldn't alloc netdev struct\n");
3981 rc = -ENOMEM;
3982 goto err_release_res;
3983 }
3984
3985 netdev->watchdog_timeo = ET131X_TX_TIMEOUT;
3986 netdev->netdev_ops = &et131x_netdev_ops;
3987
3988 SET_NETDEV_DEV(netdev, &pdev->dev);
3989 netdev->ethtool_ops = &et131x_ethtool_ops;
3990
3991 adapter = et131x_adapter_init(netdev, pdev);
3992
3993 rc = et131x_pci_init(adapter, pdev);
3994 if (rc < 0)
3995 goto err_free_dev;
3996
3997 /* Map the bus-relative registers to system virtual memory */
3998 adapter->regs = pci_ioremap_bar(pdev, 0);
3999 if (!adapter->regs) {
4000 dev_err(&pdev->dev, "Cannot map device registers\n");
4001 rc = -ENOMEM;
4002 goto err_free_dev;
4003 }
4004
4005 /* If Phy COMA mode was enabled when we went down, disable it here. */
4006 writel(ET_PMCSR_INIT, &adapter->regs->global.pm_csr);
4007
4008 et131x_soft_reset(adapter);
4009 et131x_disable_interrupts(adapter);
4010
4011 rc = et131x_adapter_memory_alloc(adapter);
4012 if (rc < 0) {
4013 dev_err(&pdev->dev, "Could not alloc adapter memory (DMA)\n");
4014 goto err_iounmap;
4015 }
4016
4017 et131x_init_send(adapter);
4018
4019 netif_napi_add(netdev, &adapter->napi, et131x_poll, 64);
4020
4021 ether_addr_copy(netdev->dev_addr, adapter->addr);
4022
4023 rc = -ENOMEM;
4024
4025 adapter->mii_bus = mdiobus_alloc();
4026 if (!adapter->mii_bus) {
4027 dev_err(&pdev->dev, "Alloc of mii_bus struct failed\n");
4028 goto err_mem_free;
4029 }
4030
4031 adapter->mii_bus->name = "et131x_eth_mii";
4032 snprintf(adapter->mii_bus->id, MII_BUS_ID_SIZE, "%x",
4033 (adapter->pdev->bus->number << 8) | adapter->pdev->devfn);
4034 adapter->mii_bus->priv = netdev;
4035 adapter->mii_bus->read = et131x_mdio_read;
4036 adapter->mii_bus->write = et131x_mdio_write;
4037 adapter->mii_bus->irq = kmalloc_array(PHY_MAX_ADDR, sizeof(int),
4038 GFP_KERNEL);
4039 if (!adapter->mii_bus->irq)
4040 goto err_mdio_free;
4041
4042 for (ii = 0; ii < PHY_MAX_ADDR; ii++)
4043 adapter->mii_bus->irq[ii] = PHY_POLL;
4044
4045 rc = mdiobus_register(adapter->mii_bus);
4046 if (rc < 0) {
4047 dev_err(&pdev->dev, "failed to register MII bus\n");
4048 goto err_mdio_free_irq;
4049 }
4050
4051 rc = et131x_mii_probe(netdev);
4052 if (rc < 0) {
4053 dev_err(&pdev->dev, "failed to probe MII bus\n");
4054 goto err_mdio_unregister;
4055 }
4056
4057 et131x_adapter_setup(adapter);
4058
4059 /* Init variable for counting how long we do not have link status */
4060 adapter->boot_coma = 0;
4061 et1310_disable_phy_coma(adapter);
4062
4063 /* We can enable interrupts now
4064 *
4065 * NOTE - Because registration of interrupt handler is done in the
4066 * device's open(), defer enabling device interrupts to that
4067 * point
4068 */
4069
4070 rc = register_netdev(netdev);
4071 if (rc < 0) {
4072 dev_err(&pdev->dev, "register_netdev() failed\n");
4073 goto err_phy_disconnect;
4074 }
4075
4076 /* Register the net_device struct with the PCI subsystem. Save a copy
4077 * of the PCI config space for this device now that the device has
4078 * been initialized, just in case it needs to be quickly restored.
4079 */
4080 pci_set_drvdata(pdev, netdev);
4081out:
4082 return rc;
4083
4084err_phy_disconnect:
4085 phy_disconnect(adapter->phydev);
4086err_mdio_unregister:
4087 mdiobus_unregister(adapter->mii_bus);
4088err_mdio_free_irq:
4089 kfree(adapter->mii_bus->irq);
4090err_mdio_free:
4091 mdiobus_free(adapter->mii_bus);
4092err_mem_free:
4093 et131x_adapter_memory_free(adapter);
4094err_iounmap:
4095 iounmap(adapter->regs);
4096err_free_dev:
4097 pci_dev_put(pdev);
4098 free_netdev(netdev);
4099err_release_res:
4100 pci_release_regions(pdev);
4101err_disable:
4102 pci_disable_device(pdev);
4103 goto out;
4104}
4105
4106static const struct pci_device_id et131x_pci_table[] = {
4107 { PCI_VDEVICE(ATT, ET131X_PCI_DEVICE_ID_GIG), 0UL},
4108 { PCI_VDEVICE(ATT, ET131X_PCI_DEVICE_ID_FAST), 0UL},
4109 { 0,}
4110};
4111MODULE_DEVICE_TABLE(pci, et131x_pci_table);
4112
4113static struct pci_driver et131x_driver = {
4114 .name = DRIVER_NAME,
4115 .id_table = et131x_pci_table,
4116 .probe = et131x_pci_setup,
4117 .remove = et131x_pci_remove,
4118 .driver.pm = &et131x_pm_ops,
4119};
4120
4121module_pci_driver(et131x_driver);
diff --git a/drivers/net/ethernet/agere/et131x.h b/drivers/net/ethernet/agere/et131x.h
new file mode 100644
index 000000000000..be9a11c02526
--- /dev/null
+++ b/drivers/net/ethernet/agere/et131x.h
@@ -0,0 +1,1433 @@
1/* Copyright © 2005 Agere Systems Inc.
2 * All rights reserved.
3 * http://www.agere.com
4 *
5 * SOFTWARE LICENSE
6 *
7 * This software is provided subject to the following terms and conditions,
8 * which you should read carefully before using the software. Using this
9 * software indicates your acceptance of these terms and conditions. If you do
10 * not agree with these terms and conditions, do not use the software.
11 *
12 * Copyright © 2005 Agere Systems Inc.
13 * All rights reserved.
14 *
15 * Redistribution and use in source or binary forms, with or without
16 * modifications, are permitted provided that the following conditions are met:
17 *
18 * . Redistributions of source code must retain the above copyright notice, this
19 * list of conditions and the following Disclaimer as comments in the code as
20 * well as in the documentation and/or other materials provided with the
21 * distribution.
22 *
23 * . Redistributions in binary form must reproduce the above copyright notice,
24 * this list of conditions and the following Disclaimer in the documentation
25 * and/or other materials provided with the distribution.
26 *
27 * . Neither the name of Agere Systems Inc. nor the names of the contributors
28 * may be used to endorse or promote products derived from this software
29 * without specific prior written permission.
30 *
31 * Disclaimer
32 *
33 * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
34 * INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF
35 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY
36 * USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN
37 * RISK. IN NO EVENT SHALL AGERE SYSTEMS INC. OR CONTRIBUTORS BE LIABLE FOR ANY
38 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
39 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
40 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
41 * ON ANY THEORY OF LIABILITY, INCLUDING, BUT NOT LIMITED TO, CONTRACT, STRICT
42 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
43 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
44 * DAMAGE.
45 *
46 */
47
48#define DRIVER_NAME "et131x"
49#define DRIVER_VERSION "v2.0"
50
51/* EEPROM registers */
52
53/* LBCIF Register Groups (addressed via 32-bit offsets) */
54#define LBCIF_DWORD0_GROUP 0xAC
55#define LBCIF_DWORD1_GROUP 0xB0
56
57/* LBCIF Registers (addressed via 8-bit offsets) */
58#define LBCIF_ADDRESS_REGISTER 0xAC
59#define LBCIF_DATA_REGISTER 0xB0
60#define LBCIF_CONTROL_REGISTER 0xB1
61#define LBCIF_STATUS_REGISTER 0xB2
62
63/* LBCIF Control Register Bits */
64#define LBCIF_CONTROL_SEQUENTIAL_READ 0x01
65#define LBCIF_CONTROL_PAGE_WRITE 0x02
66#define LBCIF_CONTROL_EEPROM_RELOAD 0x08
67#define LBCIF_CONTROL_TWO_BYTE_ADDR 0x20
68#define LBCIF_CONTROL_I2C_WRITE 0x40
69#define LBCIF_CONTROL_LBCIF_ENABLE 0x80
70
71/* LBCIF Status Register Bits */
72#define LBCIF_STATUS_PHY_QUEUE_AVAIL 0x01
73#define LBCIF_STATUS_I2C_IDLE 0x02
74#define LBCIF_STATUS_ACK_ERROR 0x04
75#define LBCIF_STATUS_GENERAL_ERROR 0x08
76#define LBCIF_STATUS_CHECKSUM_ERROR 0x40
77#define LBCIF_STATUS_EEPROM_PRESENT 0x80
78
79/* START OF GLOBAL REGISTER ADDRESS MAP */
80/* 10bit registers
81 *
82 * Tx queue start address reg in global address map at address 0x0000
83 * tx queue end address reg in global address map at address 0x0004
84 * rx queue start address reg in global address map at address 0x0008
85 * rx queue end address reg in global address map at address 0x000C
86 */
87
88/* structure for power management control status reg in global address map
89 * located at address 0x0010
90 * jagcore_rx_rdy bit 9
91 * jagcore_tx_rdy bit 8
92 * phy_lped_en bit 7
93 * phy_sw_coma bit 6
94 * rxclk_gate bit 5
95 * txclk_gate bit 4
96 * sysclk_gate bit 3
97 * jagcore_rx_en bit 2
98 * jagcore_tx_en bit 1
99 * gigephy_en bit 0
100 */
101#define ET_PM_PHY_SW_COMA 0x40
102#define ET_PMCSR_INIT 0x38
103
104/* Interrupt status reg at address 0x0018
105 */
106#define ET_INTR_TXDMA_ISR 0x00000008
107#define ET_INTR_TXDMA_ERR 0x00000010
108#define ET_INTR_RXDMA_XFR_DONE 0x00000020
109#define ET_INTR_RXDMA_FB_R0_LOW 0x00000040
110#define ET_INTR_RXDMA_FB_R1_LOW 0x00000080
111#define ET_INTR_RXDMA_STAT_LOW 0x00000100
112#define ET_INTR_RXDMA_ERR 0x00000200
113#define ET_INTR_WATCHDOG 0x00004000
114#define ET_INTR_WOL 0x00008000
115#define ET_INTR_PHY 0x00010000
116#define ET_INTR_TXMAC 0x00020000
117#define ET_INTR_RXMAC 0x00040000
118#define ET_INTR_MAC_STAT 0x00080000
119#define ET_INTR_SLV_TIMEOUT 0x00100000
120
121/* Interrupt mask register at address 0x001C
122 * Interrupt alias clear mask reg at address 0x0020
123 * Interrupt status alias reg at address 0x0024
124 *
125 * Same masks as above
126 */
127
128/* Software reset reg at address 0x0028
129 * 0: txdma_sw_reset
130 * 1: rxdma_sw_reset
131 * 2: txmac_sw_reset
132 * 3: rxmac_sw_reset
133 * 4: mac_sw_reset
134 * 5: mac_stat_sw_reset
135 * 6: mmc_sw_reset
136 *31: selfclr_disable
137 */
138#define ET_RESET_ALL 0x007F
139
140/* SLV Timer reg at address 0x002C (low 24 bits)
141 */
142
143/* MSI Configuration reg at address 0x0030
144 */
145#define ET_MSI_VECTOR 0x0000001F
146#define ET_MSI_TC 0x00070000
147
148/* Loopback reg located at address 0x0034
149 */
150#define ET_LOOP_MAC 0x00000001
151#define ET_LOOP_DMA 0x00000002
152
153/* GLOBAL Module of JAGCore Address Mapping
154 * Located at address 0x0000
155 */
156struct global_regs { /* Location: */
157 u32 txq_start_addr; /* 0x0000 */
158 u32 txq_end_addr; /* 0x0004 */
159 u32 rxq_start_addr; /* 0x0008 */
160 u32 rxq_end_addr; /* 0x000C */
161 u32 pm_csr; /* 0x0010 */
162 u32 unused; /* 0x0014 */
163 u32 int_status; /* 0x0018 */
164 u32 int_mask; /* 0x001C */
165 u32 int_alias_clr_en; /* 0x0020 */
166 u32 int_status_alias; /* 0x0024 */
167 u32 sw_reset; /* 0x0028 */
168 u32 slv_timer; /* 0x002C */
169 u32 msi_config; /* 0x0030 */
170 u32 loopback; /* 0x0034 */
171 u32 watchdog_timer; /* 0x0038 */
172};
173
174/* START OF TXDMA REGISTER ADDRESS MAP */
175/* txdma control status reg at address 0x1000
176 */
177#define ET_TXDMA_CSR_HALT 0x00000001
178#define ET_TXDMA_DROP_TLP 0x00000002
179#define ET_TXDMA_CACHE_THRS 0x000000F0
180#define ET_TXDMA_CACHE_SHIFT 4
181#define ET_TXDMA_SNGL_EPKT 0x00000100
182#define ET_TXDMA_CLASS 0x00001E00
183
184/* structure for txdma packet ring base address hi reg in txdma address map
185 * located at address 0x1004
186 * Defined earlier (u32)
187 */
188
189/* structure for txdma packet ring base address low reg in txdma address map
190 * located at address 0x1008
191 * Defined earlier (u32)
192 */
193
194/* structure for txdma packet ring number of descriptor reg in txdma address
195 * map. Located at address 0x100C
196 *
197 * 31-10: unused
198 * 9-0: pr ndes
199 */
200#define ET_DMA12_MASK 0x0FFF /* 12 bit mask for DMA12W types */
201#define ET_DMA12_WRAP 0x1000
202#define ET_DMA10_MASK 0x03FF /* 10 bit mask for DMA10W types */
203#define ET_DMA10_WRAP 0x0400
204#define ET_DMA4_MASK 0x000F /* 4 bit mask for DMA4W types */
205#define ET_DMA4_WRAP 0x0010
206
207#define INDEX12(x) ((x) & ET_DMA12_MASK)
208#define INDEX10(x) ((x) & ET_DMA10_MASK)
209#define INDEX4(x) ((x) & ET_DMA4_MASK)
210
211/* 10bit DMA with wrap
212 * txdma tx queue write address reg in txdma address map at 0x1010
213 * txdma tx queue write address external reg in txdma address map at 0x1014
214 * txdma tx queue read address reg in txdma address map at 0x1018
215 *
216 * u32
217 * txdma status writeback address hi reg in txdma address map at0x101C
218 * txdma status writeback address lo reg in txdma address map at 0x1020
219 *
220 * 10bit DMA with wrap
221 * txdma service request reg in txdma address map at 0x1024
222 * structure for txdma service complete reg in txdma address map at 0x1028
223 *
224 * 4bit DMA with wrap
225 * txdma tx descriptor cache read index reg in txdma address map at 0x102C
226 * txdma tx descriptor cache write index reg in txdma address map at 0x1030
227 *
228 * txdma error reg in txdma address map at address 0x1034
229 * 0: PyldResend
230 * 1: PyldRewind
231 * 4: DescrResend
232 * 5: DescrRewind
233 * 8: WrbkResend
234 * 9: WrbkRewind
235 */
236
237/* Tx DMA Module of JAGCore Address Mapping
238 * Located at address 0x1000
239 */
240struct txdma_regs { /* Location: */
241 u32 csr; /* 0x1000 */
242 u32 pr_base_hi; /* 0x1004 */
243 u32 pr_base_lo; /* 0x1008 */
244 u32 pr_num_des; /* 0x100C */
245 u32 txq_wr_addr; /* 0x1010 */
246 u32 txq_wr_addr_ext; /* 0x1014 */
247 u32 txq_rd_addr; /* 0x1018 */
248 u32 dma_wb_base_hi; /* 0x101C */
249 u32 dma_wb_base_lo; /* 0x1020 */
250 u32 service_request; /* 0x1024 */
251 u32 service_complete; /* 0x1028 */
252 u32 cache_rd_index; /* 0x102C */
253 u32 cache_wr_index; /* 0x1030 */
254 u32 tx_dma_error; /* 0x1034 */
255 u32 desc_abort_cnt; /* 0x1038 */
256 u32 payload_abort_cnt; /* 0x103c */
257 u32 writeback_abort_cnt; /* 0x1040 */
258 u32 desc_timeout_cnt; /* 0x1044 */
259 u32 payload_timeout_cnt; /* 0x1048 */
260 u32 writeback_timeout_cnt; /* 0x104c */
261 u32 desc_error_cnt; /* 0x1050 */
262 u32 payload_error_cnt; /* 0x1054 */
263 u32 writeback_error_cnt; /* 0x1058 */
264 u32 dropped_tlp_cnt; /* 0x105c */
265 u32 new_service_complete; /* 0x1060 */
266 u32 ethernet_packet_cnt; /* 0x1064 */
267};
268
269/* END OF TXDMA REGISTER ADDRESS MAP */
270
271/* START OF RXDMA REGISTER ADDRESS MAP */
272/* structure for control status reg in rxdma address map
273 * Located at address 0x2000
274 *
275 * CSR
276 * 0: halt
277 * 1-3: tc
278 * 4: fbr_big_endian
279 * 5: psr_big_endian
280 * 6: pkt_big_endian
281 * 7: dma_big_endian
282 * 8-9: fbr0_size
283 * 10: fbr0_enable
284 * 11-12: fbr1_size
285 * 13: fbr1_enable
286 * 14: unused
287 * 15: pkt_drop_disable
288 * 16: pkt_done_flush
289 * 17: halt_status
290 * 18-31: unused
291 */
292#define ET_RXDMA_CSR_HALT 0x0001
293#define ET_RXDMA_CSR_FBR0_SIZE_LO 0x0100
294#define ET_RXDMA_CSR_FBR0_SIZE_HI 0x0200
295#define ET_RXDMA_CSR_FBR0_ENABLE 0x0400
296#define ET_RXDMA_CSR_FBR1_SIZE_LO 0x0800
297#define ET_RXDMA_CSR_FBR1_SIZE_HI 0x1000
298#define ET_RXDMA_CSR_FBR1_ENABLE 0x2000
299#define ET_RXDMA_CSR_HALT_STATUS 0x00020000
300
301/* structure for dma writeback lo reg in rxdma address map
302 * located at address 0x2004
303 * Defined earlier (u32)
304 */
305
306/* structure for dma writeback hi reg in rxdma address map
307 * located at address 0x2008
308 * Defined earlier (u32)
309 */
310
311/* structure for number of packets done reg in rxdma address map
312 * located at address 0x200C
313 *
314 * 31-8: unused
315 * 7-0: num done
316 */
317
318/* structure for max packet time reg in rxdma address map
319 * located at address 0x2010
320 *
321 * 31-18: unused
322 * 17-0: time done
323 */
324
325/* structure for rx queue read address reg in rxdma address map
326 * located at address 0x2014
327 * Defined earlier (u32)
328 */
329
330/* structure for rx queue read address external reg in rxdma address map
331 * located at address 0x2018
332 * Defined earlier (u32)
333 */
334
335/* structure for rx queue write address reg in rxdma address map
336 * located at address 0x201C
337 * Defined earlier (u32)
338 */
339
340/* structure for packet status ring base address lo reg in rxdma address map
341 * located at address 0x2020
342 * Defined earlier (u32)
343 */
344
345/* structure for packet status ring base address hi reg in rxdma address map
346 * located at address 0x2024
347 * Defined earlier (u32)
348 */
349
350/* structure for packet status ring number of descriptors reg in rxdma address
351 * map. Located at address 0x2028
352 *
353 * 31-12: unused
354 * 11-0: psr ndes
355 */
356#define ET_RXDMA_PSR_NUM_DES_MASK 0xFFF
357
358/* structure for packet status ring available offset reg in rxdma address map
359 * located at address 0x202C
360 *
361 * 31-13: unused
362 * 12: psr avail wrap
363 * 11-0: psr avail
364 */
365
366/* structure for packet status ring full offset reg in rxdma address map
367 * located at address 0x2030
368 *
369 * 31-13: unused
370 * 12: psr full wrap
371 * 11-0: psr full
372 */
373
374/* structure for packet status ring access index reg in rxdma address map
375 * located at address 0x2034
376 *
377 * 31-5: unused
378 * 4-0: psr_ai
379 */
380
381/* structure for packet status ring minimum descriptors reg in rxdma address
382 * map. Located at address 0x2038
383 *
384 * 31-12: unused
385 * 11-0: psr_min
386 */
387
388/* structure for free buffer ring base lo address reg in rxdma address map
389 * located at address 0x203C
390 * Defined earlier (u32)
391 */
392
393/* structure for free buffer ring base hi address reg in rxdma address map
394 * located at address 0x2040
395 * Defined earlier (u32)
396 */
397
398/* structure for free buffer ring number of descriptors reg in rxdma address
399 * map. Located at address 0x2044
400 *
401 * 31-10: unused
402 * 9-0: fbr ndesc
403 */
404
405/* structure for free buffer ring 0 available offset reg in rxdma address map
406 * located at address 0x2048
407 * Defined earlier (u32)
408 */
409
410/* structure for free buffer ring 0 full offset reg in rxdma address map
411 * located at address 0x204C
412 * Defined earlier (u32)
413 */
414
415/* structure for free buffer cache 0 full offset reg in rxdma address map
416 * located at address 0x2050
417 *
418 * 31-5: unused
419 * 4-0: fbc rdi
420 */
421
422/* structure for free buffer ring 0 minimum descriptor reg in rxdma address map
423 * located at address 0x2054
424 *
425 * 31-10: unused
426 * 9-0: fbr min
427 */
428
429/* structure for free buffer ring 1 base address lo reg in rxdma address map
430 * located at address 0x2058 - 0x205C
431 * Defined earlier (RXDMA_FBR_BASE_LO_t and RXDMA_FBR_BASE_HI_t)
432 */
433
434/* structure for free buffer ring 1 number of descriptors reg in rxdma address
435 * map. Located at address 0x2060
436 * Defined earlier (RXDMA_FBR_NUM_DES_t)
437 */
438
439/* structure for free buffer ring 1 available offset reg in rxdma address map
440 * located at address 0x2064
441 * Defined Earlier (RXDMA_FBR_AVAIL_OFFSET_t)
442 */
443
444/* structure for free buffer ring 1 full offset reg in rxdma address map
445 * located at address 0x2068
446 * Defined Earlier (RXDMA_FBR_FULL_OFFSET_t)
447 */
448
449/* structure for free buffer cache 1 read index reg in rxdma address map
450 * located at address 0x206C
451 * Defined Earlier (RXDMA_FBC_RD_INDEX_t)
452 */
453
454/* structure for free buffer ring 1 minimum descriptor reg in rxdma address map
455 * located at address 0x2070
456 * Defined Earlier (RXDMA_FBR_MIN_DES_t)
457 */
458
459/* Rx DMA Module of JAGCore Address Mapping
460 * Located at address 0x2000
461 */
462struct rxdma_regs { /* Location: */
463 u32 csr; /* 0x2000 */
464 u32 dma_wb_base_lo; /* 0x2004 */
465 u32 dma_wb_base_hi; /* 0x2008 */
466 u32 num_pkt_done; /* 0x200C */
467 u32 max_pkt_time; /* 0x2010 */
468 u32 rxq_rd_addr; /* 0x2014 */
469 u32 rxq_rd_addr_ext; /* 0x2018 */
470 u32 rxq_wr_addr; /* 0x201C */
471 u32 psr_base_lo; /* 0x2020 */
472 u32 psr_base_hi; /* 0x2024 */
473 u32 psr_num_des; /* 0x2028 */
474 u32 psr_avail_offset; /* 0x202C */
475 u32 psr_full_offset; /* 0x2030 */
476 u32 psr_access_index; /* 0x2034 */
477 u32 psr_min_des; /* 0x2038 */
478 u32 fbr0_base_lo; /* 0x203C */
479 u32 fbr0_base_hi; /* 0x2040 */
480 u32 fbr0_num_des; /* 0x2044 */
481 u32 fbr0_avail_offset; /* 0x2048 */
482 u32 fbr0_full_offset; /* 0x204C */
483 u32 fbr0_rd_index; /* 0x2050 */
484 u32 fbr0_min_des; /* 0x2054 */
485 u32 fbr1_base_lo; /* 0x2058 */
486 u32 fbr1_base_hi; /* 0x205C */
487 u32 fbr1_num_des; /* 0x2060 */
488 u32 fbr1_avail_offset; /* 0x2064 */
489 u32 fbr1_full_offset; /* 0x2068 */
490 u32 fbr1_rd_index; /* 0x206C */
491 u32 fbr1_min_des; /* 0x2070 */
492};
493
494/* END OF RXDMA REGISTER ADDRESS MAP */
495
496/* START OF TXMAC REGISTER ADDRESS MAP */
497/* structure for control reg in txmac address map
498 * located at address 0x3000
499 *
500 * bits
501 * 31-8: unused
502 * 7: cklseg_disable
503 * 6: ckbcnt_disable
504 * 5: cksegnum
505 * 4: async_disable
506 * 3: fc_disable
507 * 2: mcif_disable
508 * 1: mif_disable
509 * 0: txmac_en
510 */
511#define ET_TX_CTRL_FC_DISABLE 0x0008
512#define ET_TX_CTRL_TXMAC_ENABLE 0x0001
513
514/* structure for shadow pointer reg in txmac address map
515 * located at address 0x3004
516 * 31-27: reserved
517 * 26-16: txq rd ptr
518 * 15-11: reserved
519 * 10-0: txq wr ptr
520 */
521
522/* structure for error count reg in txmac address map
523 * located at address 0x3008
524 *
525 * 31-12: unused
526 * 11-8: reserved
527 * 7-4: txq_underrun
528 * 3-0: fifo_underrun
529 */
530
531/* structure for max fill reg in txmac address map
532 * located at address 0x300C
533 * 31-12: unused
534 * 11-0: max fill
535 */
536
537/* structure for cf parameter reg in txmac address map
538 * located at address 0x3010
539 * 31-16: cfep
540 * 15-0: cfpt
541 */
542
543/* structure for tx test reg in txmac address map
544 * located at address 0x3014
545 * 31-17: unused
546 * 16: reserved
547 * 15: txtest_en
548 * 14-11: unused
549 * 10-0: txq test pointer
550 */
551
552/* structure for error reg in txmac address map
553 * located at address 0x3018
554 *
555 * 31-9: unused
556 * 8: fifo_underrun
557 * 7-6: unused
558 * 5: ctrl2_err
559 * 4: txq_underrun
560 * 3: bcnt_err
561 * 2: lseg_err
562 * 1: segnum_err
563 * 0: seg0_err
564 */
565
566/* structure for error interrupt reg in txmac address map
567 * located at address 0x301C
568 *
569 * 31-9: unused
570 * 8: fifo_underrun
571 * 7-6: unused
572 * 5: ctrl2_err
573 * 4: txq_underrun
574 * 3: bcnt_err
575 * 2: lseg_err
576 * 1: segnum_err
577 * 0: seg0_err
578 */
579
580/* structure for error interrupt reg in txmac address map
581 * located at address 0x3020
582 *
583 * 31-2: unused
584 * 1: bp_req
585 * 0: bp_xonxoff
586 */
587
588/* Tx MAC Module of JAGCore Address Mapping
589 */
590struct txmac_regs { /* Location: */
591 u32 ctl; /* 0x3000 */
592 u32 shadow_ptr; /* 0x3004 */
593 u32 err_cnt; /* 0x3008 */
594 u32 max_fill; /* 0x300C */
595 u32 cf_param; /* 0x3010 */
596 u32 tx_test; /* 0x3014 */
597 u32 err; /* 0x3018 */
598 u32 err_int; /* 0x301C */
599 u32 bp_ctrl; /* 0x3020 */
600};
601
602/* END OF TXMAC REGISTER ADDRESS MAP */
603
604/* START OF RXMAC REGISTER ADDRESS MAP */
605
606/* structure for rxmac control reg in rxmac address map
607 * located at address 0x4000
608 *
609 * 31-7: reserved
610 * 6: rxmac_int_disable
611 * 5: async_disable
612 * 4: mif_disable
613 * 3: wol_disable
614 * 2: pkt_filter_disable
615 * 1: mcif_disable
616 * 0: rxmac_en
617 */
618#define ET_RX_CTRL_WOL_DISABLE 0x0008
619#define ET_RX_CTRL_RXMAC_ENABLE 0x0001
620
621/* structure for Wake On Lan Control and CRC 0 reg in rxmac address map
622 * located at address 0x4004
623 * 31-16: crc
624 * 15-12: reserved
625 * 11: ignore_pp
626 * 10: ignore_mp
627 * 9: clr_intr
628 * 8: ignore_link_chg
629 * 7: ignore_uni
630 * 6: ignore_multi
631 * 5: ignore_broad
632 * 4-0: valid_crc 4-0
633 */
634
635/* structure for CRC 1 and CRC 2 reg in rxmac address map
636 * located at address 0x4008
637 *
638 * 31-16: crc2
639 * 15-0: crc1
640 */
641
642/* structure for CRC 3 and CRC 4 reg in rxmac address map
643 * located at address 0x400C
644 *
645 * 31-16: crc4
646 * 15-0: crc3
647 */
648
649/* structure for Wake On Lan Source Address Lo reg in rxmac address map
650 * located at address 0x4010
651 *
652 * 31-24: sa3
653 * 23-16: sa4
654 * 15-8: sa5
655 * 7-0: sa6
656 */
657#define ET_RX_WOL_LO_SA3_SHIFT 24
658#define ET_RX_WOL_LO_SA4_SHIFT 16
659#define ET_RX_WOL_LO_SA5_SHIFT 8
660
661/* structure for Wake On Lan Source Address Hi reg in rxmac address map
662 * located at address 0x4014
663 *
664 * 31-16: reserved
665 * 15-8: sa1
666 * 7-0: sa2
667 */
668#define ET_RX_WOL_HI_SA1_SHIFT 8
669
670/* structure for Wake On Lan mask reg in rxmac address map
671 * located at address 0x4018 - 0x4064
672 * Defined earlier (u32)
673 */
674
675/* structure for Unicast Packet Filter Address 1 reg in rxmac address map
676 * located at address 0x4068
677 *
678 * 31-24: addr1_3
679 * 23-16: addr1_4
680 * 15-8: addr1_5
681 * 7-0: addr1_6
682 */
683#define ET_RX_UNI_PF_ADDR1_3_SHIFT 24
684#define ET_RX_UNI_PF_ADDR1_4_SHIFT 16
685#define ET_RX_UNI_PF_ADDR1_5_SHIFT 8
686
687/* structure for Unicast Packet Filter Address 2 reg in rxmac address map
688 * located at address 0x406C
689 *
690 * 31-24: addr2_3
691 * 23-16: addr2_4
692 * 15-8: addr2_5
693 * 7-0: addr2_6
694 */
695#define ET_RX_UNI_PF_ADDR2_3_SHIFT 24
696#define ET_RX_UNI_PF_ADDR2_4_SHIFT 16
697#define ET_RX_UNI_PF_ADDR2_5_SHIFT 8
698
699/* structure for Unicast Packet Filter Address 1 & 2 reg in rxmac address map
700 * located at address 0x4070
701 *
702 * 31-24: addr2_1
703 * 23-16: addr2_2
704 * 15-8: addr1_1
705 * 7-0: addr1_2
706 */
707#define ET_RX_UNI_PF_ADDR2_1_SHIFT 24
708#define ET_RX_UNI_PF_ADDR2_2_SHIFT 16
709#define ET_RX_UNI_PF_ADDR1_1_SHIFT 8
710
711/* structure for Multicast Hash reg in rxmac address map
712 * located at address 0x4074 - 0x4080
713 * Defined earlier (u32)
714 */
715
716/* structure for Packet Filter Control reg in rxmac address map
717 * located at address 0x4084
718 *
719 * 31-23: unused
720 * 22-16: min_pkt_size
721 * 15-4: unused
722 * 3: filter_frag_en
723 * 2: filter_uni_en
724 * 1: filter_multi_en
725 * 0: filter_broad_en
726 */
727#define ET_RX_PFCTRL_MIN_PKT_SZ_SHIFT 16
728#define ET_RX_PFCTRL_FRAG_FILTER_ENABLE 0x0008
729#define ET_RX_PFCTRL_UNICST_FILTER_ENABLE 0x0004
730#define ET_RX_PFCTRL_MLTCST_FILTER_ENABLE 0x0002
731#define ET_RX_PFCTRL_BRDCST_FILTER_ENABLE 0x0001
732
733/* structure for Memory Controller Interface Control Max Segment reg in rxmac
734 * address map. Located at address 0x4088
735 *
736 * 31-10: reserved
737 * 9-2: max_size
738 * 1: fc_en
739 * 0: seg_en
740 */
741#define ET_RX_MCIF_CTRL_MAX_SEG_SIZE_SHIFT 2
742#define ET_RX_MCIF_CTRL_MAX_SEG_FC_ENABLE 0x0002
743#define ET_RX_MCIF_CTRL_MAX_SEG_ENABLE 0x0001
744
745/* structure for Memory Controller Interface Water Mark reg in rxmac address
746 * map. Located at address 0x408C
747 *
748 * 31-26: unused
749 * 25-16: mark_hi
750 * 15-10: unused
751 * 9-0: mark_lo
752 */
753
754/* structure for Rx Queue Dialog reg in rxmac address map.
755 * located at address 0x4090
756 *
757 * 31-26: reserved
758 * 25-16: rd_ptr
759 * 15-10: reserved
760 * 9-0: wr_ptr
761 */
762
763/* structure for space available reg in rxmac address map.
764 * located at address 0x4094
765 *
766 * 31-17: reserved
767 * 16: space_avail_en
768 * 15-10: reserved
769 * 9-0: space_avail
770 */
771
772/* structure for management interface reg in rxmac address map.
773 * located at address 0x4098
774 *
775 * 31-18: reserved
776 * 17: drop_pkt_en
777 * 16-0: drop_pkt_mask
778 */
779
780/* structure for Error reg in rxmac address map.
781 * located at address 0x409C
782 *
783 * 31-4: unused
784 * 3: mif
785 * 2: async
786 * 1: pkt_filter
787 * 0: mcif
788 */
789
790/* Rx MAC Module of JAGCore Address Mapping
791 */
792struct rxmac_regs { /* Location: */
793 u32 ctrl; /* 0x4000 */
794 u32 crc0; /* 0x4004 */
795 u32 crc12; /* 0x4008 */
796 u32 crc34; /* 0x400C */
797 u32 sa_lo; /* 0x4010 */
798 u32 sa_hi; /* 0x4014 */
799 u32 mask0_word0; /* 0x4018 */
800 u32 mask0_word1; /* 0x401C */
801 u32 mask0_word2; /* 0x4020 */
802 u32 mask0_word3; /* 0x4024 */
803 u32 mask1_word0; /* 0x4028 */
804 u32 mask1_word1; /* 0x402C */
805 u32 mask1_word2; /* 0x4030 */
806 u32 mask1_word3; /* 0x4034 */
807 u32 mask2_word0; /* 0x4038 */
808 u32 mask2_word1; /* 0x403C */
809 u32 mask2_word2; /* 0x4040 */
810 u32 mask2_word3; /* 0x4044 */
811 u32 mask3_word0; /* 0x4048 */
812 u32 mask3_word1; /* 0x404C */
813 u32 mask3_word2; /* 0x4050 */
814 u32 mask3_word3; /* 0x4054 */
815 u32 mask4_word0; /* 0x4058 */
816 u32 mask4_word1; /* 0x405C */
817 u32 mask4_word2; /* 0x4060 */
818 u32 mask4_word3; /* 0x4064 */
819 u32 uni_pf_addr1; /* 0x4068 */
820 u32 uni_pf_addr2; /* 0x406C */
821 u32 uni_pf_addr3; /* 0x4070 */
822 u32 multi_hash1; /* 0x4074 */
823 u32 multi_hash2; /* 0x4078 */
824 u32 multi_hash3; /* 0x407C */
825 u32 multi_hash4; /* 0x4080 */
826 u32 pf_ctrl; /* 0x4084 */
827 u32 mcif_ctrl_max_seg; /* 0x4088 */
828 u32 mcif_water_mark; /* 0x408C */
829 u32 rxq_diag; /* 0x4090 */
830 u32 space_avail; /* 0x4094 */
831
832 u32 mif_ctrl; /* 0x4098 */
833 u32 err_reg; /* 0x409C */
834};
835
836/* END OF RXMAC REGISTER ADDRESS MAP */
837
838/* START OF MAC REGISTER ADDRESS MAP */
839/* structure for configuration #1 reg in mac address map.
840 * located at address 0x5000
841 *
842 * 31: soft reset
843 * 30: sim reset
844 * 29-20: reserved
845 * 19: reset rx mc
846 * 18: reset tx mc
847 * 17: reset rx func
848 * 16: reset tx fnc
849 * 15-9: reserved
850 * 8: loopback
851 * 7-6: reserved
852 * 5: rx flow
853 * 4: tx flow
854 * 3: syncd rx en
855 * 2: rx enable
856 * 1: syncd tx en
857 * 0: tx enable
858 */
859#define ET_MAC_CFG1_SOFT_RESET 0x80000000
860#define ET_MAC_CFG1_SIM_RESET 0x40000000
861#define ET_MAC_CFG1_RESET_RXMC 0x00080000
862#define ET_MAC_CFG1_RESET_TXMC 0x00040000
863#define ET_MAC_CFG1_RESET_RXFUNC 0x00020000
864#define ET_MAC_CFG1_RESET_TXFUNC 0x00010000
865#define ET_MAC_CFG1_LOOPBACK 0x00000100
866#define ET_MAC_CFG1_RX_FLOW 0x00000020
867#define ET_MAC_CFG1_TX_FLOW 0x00000010
868#define ET_MAC_CFG1_RX_ENABLE 0x00000004
869#define ET_MAC_CFG1_TX_ENABLE 0x00000001
870#define ET_MAC_CFG1_WAIT 0x0000000A /* RX & TX syncd */
871
872/* structure for configuration #2 reg in mac address map.
873 * located at address 0x5004
874 * 31-16: reserved
875 * 15-12: preamble
876 * 11-10: reserved
877 * 9-8: if mode
878 * 7-6: reserved
879 * 5: huge frame
880 * 4: length check
881 * 3: undefined
882 * 2: pad crc
883 * 1: crc enable
884 * 0: full duplex
885 */
886#define ET_MAC_CFG2_PREAMBLE_SHIFT 12
887#define ET_MAC_CFG2_IFMODE_MASK 0x0300
888#define ET_MAC_CFG2_IFMODE_1000 0x0200
889#define ET_MAC_CFG2_IFMODE_100 0x0100
890#define ET_MAC_CFG2_IFMODE_HUGE_FRAME 0x0020
891#define ET_MAC_CFG2_IFMODE_LEN_CHECK 0x0010
892#define ET_MAC_CFG2_IFMODE_PAD_CRC 0x0004
893#define ET_MAC_CFG2_IFMODE_CRC_ENABLE 0x0002
894#define ET_MAC_CFG2_IFMODE_FULL_DPLX 0x0001
895
896/* structure for Interpacket gap reg in mac address map.
897 * located at address 0x5008
898 *
899 * 31: reserved
900 * 30-24: non B2B ipg 1
901 * 23: undefined
902 * 22-16: non B2B ipg 2
903 * 15-8: Min ifg enforce
904 * 7-0: B2B ipg
905 *
906 * structure for half duplex reg in mac address map.
907 * located at address 0x500C
908 * 31-24: reserved
909 * 23-20: Alt BEB trunc
910 * 19: Alt BEB enable
911 * 18: BP no backoff
912 * 17: no backoff
913 * 16: excess defer
914 * 15-12: re-xmit max
915 * 11-10: reserved
916 * 9-0: collision window
917 */
918
919/* structure for Maximum Frame Length reg in mac address map.
920 * located at address 0x5010: bits 0-15 hold the length.
921 */
922
923/* structure for Reserve 1 reg in mac address map.
924 * located at address 0x5014 - 0x5018
925 * Defined earlier (u32)
926 */
927
928/* structure for Test reg in mac address map.
929 * located at address 0x501C
930 * test: bits 0-2, rest unused
931 */
932
933/* structure for MII Management Configuration reg in mac address map.
934 * located at address 0x5020
935 *
936 * 31: reset MII mgmt
937 * 30-6: unused
938 * 5: scan auto increment
939 * 4: preamble suppress
940 * 3: undefined
941 * 2-0: mgmt clock reset
942 */
943#define ET_MAC_MIIMGMT_CLK_RST 0x0007
944
945/* structure for MII Management Command reg in mac address map.
946 * located at address 0x5024
947 * bit 1: scan cycle
948 * bit 0: read cycle
949 */
950
951/* structure for MII Management Address reg in mac address map.
952 * located at address 0x5028
953 * 31-13: reserved
954 * 12-8: phy addr
955 * 7-5: reserved
956 * 4-0: register
957 */
958#define ET_MAC_MII_ADDR(phy, reg) ((phy) << 8 | (reg))
959
960/* structure for MII Management Control reg in mac address map.
961 * located at address 0x502C
962 * 31-16: reserved
963 * 15-0: phy control
964 */
965
966/* structure for MII Management Status reg in mac address map.
967 * located at address 0x5030
968 * 31-16: reserved
969 * 15-0: phy control
970 */
971#define ET_MAC_MIIMGMT_STAT_PHYCRTL_MASK 0xFFFF
972
973/* structure for MII Management Indicators reg in mac address map.
974 * located at address 0x5034
975 * 31-3: reserved
976 * 2: not valid
977 * 1: scanning
978 * 0: busy
979 */
980#define ET_MAC_MGMT_BUSY 0x00000001 /* busy */
981#define ET_MAC_MGMT_WAIT 0x00000005 /* busy | not valid */
982
983/* structure for Interface Control reg in mac address map.
984 * located at address 0x5038
985 *
986 * 31: reset if module
987 * 30-28: reserved
988 * 27: tbi mode
989 * 26: ghd mode
990 * 25: lhd mode
991 * 24: phy mode
992 * 23: reset per mii
993 * 22-17: reserved
994 * 16: speed
995 * 15: reset pe100x
996 * 14-11: reserved
997 * 10: force quiet
998 * 9: no cipher
999 * 8: disable link fail
1000 * 7: reset gpsi
1001 * 6-1: reserved
1002 * 0: enable jabber protection
1003 */
1004#define ET_MAC_IFCTRL_GHDMODE (1 << 26)
1005#define ET_MAC_IFCTRL_PHYMODE (1 << 24)
1006
1007/* structure for Interface Status reg in mac address map.
1008 * located at address 0x503C
1009 *
1010 * 31-10: reserved
1011 * 9: excess_defer
1012 * 8: clash
1013 * 7: phy_jabber
1014 * 6: phy_link_ok
1015 * 5: phy_full_duplex
1016 * 4: phy_speed
1017 * 3: pe100x_link_fail
1018 * 2: pe10t_loss_carrier
1019 * 1: pe10t_sqe_error
1020 * 0: pe10t_jabber
1021 */
1022
1023/* structure for Mac Station Address, Part 1 reg in mac address map.
1024 * located at address 0x5040
1025 *
1026 * 31-24: Octet6
1027 * 23-16: Octet5
1028 * 15-8: Octet4
1029 * 7-0: Octet3
1030 */
1031#define ET_MAC_STATION_ADDR1_OC6_SHIFT 24
1032#define ET_MAC_STATION_ADDR1_OC5_SHIFT 16
1033#define ET_MAC_STATION_ADDR1_OC4_SHIFT 8
1034
1035/* structure for Mac Station Address, Part 2 reg in mac address map.
1036 * located at address 0x5044
1037 *
1038 * 31-24: Octet2
1039 * 23-16: Octet1
1040 * 15-0: reserved
1041 */
1042#define ET_MAC_STATION_ADDR2_OC2_SHIFT 24
1043#define ET_MAC_STATION_ADDR2_OC1_SHIFT 16
1044
1045/* MAC Module of JAGCore Address Mapping
1046 */
1047struct mac_regs { /* Location: */
1048 u32 cfg1; /* 0x5000 */
1049 u32 cfg2; /* 0x5004 */
1050 u32 ipg; /* 0x5008 */
1051 u32 hfdp; /* 0x500C */
1052 u32 max_fm_len; /* 0x5010 */
1053 u32 rsv1; /* 0x5014 */
1054 u32 rsv2; /* 0x5018 */
1055 u32 mac_test; /* 0x501C */
1056 u32 mii_mgmt_cfg; /* 0x5020 */
1057 u32 mii_mgmt_cmd; /* 0x5024 */
1058 u32 mii_mgmt_addr; /* 0x5028 */
1059 u32 mii_mgmt_ctrl; /* 0x502C */
1060 u32 mii_mgmt_stat; /* 0x5030 */
1061 u32 mii_mgmt_indicator; /* 0x5034 */
1062 u32 if_ctrl; /* 0x5038 */
1063 u32 if_stat; /* 0x503C */
1064 u32 station_addr_1; /* 0x5040 */
1065 u32 station_addr_2; /* 0x5044 */
1066};
1067
1068/* END OF MAC REGISTER ADDRESS MAP */
1069
1070/* START OF MAC STAT REGISTER ADDRESS MAP */
1071/* structure for Carry Register One and it's Mask Register reg located in mac
1072 * stat address map address 0x6130 and 0x6138.
1073 *
1074 * 31: tr64
1075 * 30: tr127
1076 * 29: tr255
1077 * 28: tr511
1078 * 27: tr1k
1079 * 26: trmax
1080 * 25: trmgv
1081 * 24-17: unused
1082 * 16: rbyt
1083 * 15: rpkt
1084 * 14: rfcs
1085 * 13: rmca
1086 * 12: rbca
1087 * 11: rxcf
1088 * 10: rxpf
1089 * 9: rxuo
1090 * 8: raln
1091 * 7: rflr
1092 * 6: rcde
1093 * 5: rcse
1094 * 4: rund
1095 * 3: rovr
1096 * 2: rfrg
1097 * 1: rjbr
1098 * 0: rdrp
1099 */
1100
1101/* structure for Carry Register Two Mask Register reg in mac stat address map.
1102 * located at address 0x613C
1103 *
1104 * 31-20: unused
1105 * 19: tjbr
1106 * 18: tfcs
1107 * 17: txcf
1108 * 16: tovr
1109 * 15: tund
1110 * 14: trfg
1111 * 13: tbyt
1112 * 12: tpkt
1113 * 11: tmca
1114 * 10: tbca
1115 * 9: txpf
1116 * 8: tdfr
1117 * 7: tedf
1118 * 6: tscl
1119 * 5: tmcl
1120 * 4: tlcl
1121 * 3: txcl
1122 * 2: tncl
1123 * 1: tpfh
1124 * 0: tdrp
1125 */
1126
1127/* MAC STATS Module of JAGCore Address Mapping
1128 */
1129struct macstat_regs { /* Location: */
1130 u32 pad[32]; /* 0x6000 - 607C */
1131
1132 /* counters */
1133 u32 txrx_0_64_byte_frames; /* 0x6080 */
1134 u32 txrx_65_127_byte_frames; /* 0x6084 */
1135 u32 txrx_128_255_byte_frames; /* 0x6088 */
1136 u32 txrx_256_511_byte_frames; /* 0x608C */
1137 u32 txrx_512_1023_byte_frames; /* 0x6090 */
1138 u32 txrx_1024_1518_byte_frames; /* 0x6094 */
1139 u32 txrx_1519_1522_gvln_frames; /* 0x6098 */
1140 u32 rx_bytes; /* 0x609C */
1141 u32 rx_packets; /* 0x60A0 */
1142 u32 rx_fcs_errs; /* 0x60A4 */
1143 u32 rx_multicast_packets; /* 0x60A8 */
1144 u32 rx_broadcast_packets; /* 0x60AC */
1145 u32 rx_control_frames; /* 0x60B0 */
1146 u32 rx_pause_frames; /* 0x60B4 */
1147 u32 rx_unknown_opcodes; /* 0x60B8 */
1148 u32 rx_align_errs; /* 0x60BC */
1149 u32 rx_frame_len_errs; /* 0x60C0 */
1150 u32 rx_code_errs; /* 0x60C4 */
1151 u32 rx_carrier_sense_errs; /* 0x60C8 */
1152 u32 rx_undersize_packets; /* 0x60CC */
1153 u32 rx_oversize_packets; /* 0x60D0 */
1154 u32 rx_fragment_packets; /* 0x60D4 */
1155 u32 rx_jabbers; /* 0x60D8 */
1156 u32 rx_drops; /* 0x60DC */
1157 u32 tx_bytes; /* 0x60E0 */
1158 u32 tx_packets; /* 0x60E4 */
1159 u32 tx_multicast_packets; /* 0x60E8 */
1160 u32 tx_broadcast_packets; /* 0x60EC */
1161 u32 tx_pause_frames; /* 0x60F0 */
1162 u32 tx_deferred; /* 0x60F4 */
1163 u32 tx_excessive_deferred; /* 0x60F8 */
1164 u32 tx_single_collisions; /* 0x60FC */
1165 u32 tx_multiple_collisions; /* 0x6100 */
1166 u32 tx_late_collisions; /* 0x6104 */
1167 u32 tx_excessive_collisions; /* 0x6108 */
1168 u32 tx_total_collisions; /* 0x610C */
1169 u32 tx_pause_honored_frames; /* 0x6110 */
1170 u32 tx_drops; /* 0x6114 */
1171 u32 tx_jabbers; /* 0x6118 */
1172 u32 tx_fcs_errs; /* 0x611C */
1173 u32 tx_control_frames; /* 0x6120 */
1174 u32 tx_oversize_frames; /* 0x6124 */
1175 u32 tx_undersize_frames; /* 0x6128 */
1176 u32 tx_fragments; /* 0x612C */
1177 u32 carry_reg1; /* 0x6130 */
1178 u32 carry_reg2; /* 0x6134 */
1179 u32 carry_reg1_mask; /* 0x6138 */
1180 u32 carry_reg2_mask; /* 0x613C */
1181};
1182
1183/* END OF MAC STAT REGISTER ADDRESS MAP */
1184
1185/* START OF MMC REGISTER ADDRESS MAP */
1186/* Main Memory Controller Control reg in mmc address map.
1187 * located at address 0x7000
1188 */
1189#define ET_MMC_ENABLE 1
1190#define ET_MMC_ARB_DISABLE 2
1191#define ET_MMC_RXMAC_DISABLE 4
1192#define ET_MMC_TXMAC_DISABLE 8
1193#define ET_MMC_TXDMA_DISABLE 16
1194#define ET_MMC_RXDMA_DISABLE 32
1195#define ET_MMC_FORCE_CE 64
1196
1197/* Main Memory Controller Host Memory Access Address reg in mmc
1198 * address map. Located at address 0x7004. Top 16 bits hold the address bits
1199 */
1200#define ET_SRAM_REQ_ACCESS 1
1201#define ET_SRAM_WR_ACCESS 2
1202#define ET_SRAM_IS_CTRL 4
1203
1204/* structure for Main Memory Controller Host Memory Access Data reg in mmc
1205 * address map. Located at address 0x7008 - 0x7014
1206 * Defined earlier (u32)
1207 */
1208
1209/* Memory Control Module of JAGCore Address Mapping
1210 */
1211struct mmc_regs { /* Location: */
1212 u32 mmc_ctrl; /* 0x7000 */
1213 u32 sram_access; /* 0x7004 */
1214 u32 sram_word1; /* 0x7008 */
1215 u32 sram_word2; /* 0x700C */
1216 u32 sram_word3; /* 0x7010 */
1217 u32 sram_word4; /* 0x7014 */
1218};
1219
1220/* END OF MMC REGISTER ADDRESS MAP */
1221
1222/* JAGCore Address Mapping
1223 */
1224struct address_map {
1225 struct global_regs global;
1226 /* unused section of global address map */
1227 u8 unused_global[4096 - sizeof(struct global_regs)];
1228 struct txdma_regs txdma;
1229 /* unused section of txdma address map */
1230 u8 unused_txdma[4096 - sizeof(struct txdma_regs)];
1231 struct rxdma_regs rxdma;
1232 /* unused section of rxdma address map */
1233 u8 unused_rxdma[4096 - sizeof(struct rxdma_regs)];
1234 struct txmac_regs txmac;
1235 /* unused section of txmac address map */
1236 u8 unused_txmac[4096 - sizeof(struct txmac_regs)];
1237 struct rxmac_regs rxmac;
1238 /* unused section of rxmac address map */
1239 u8 unused_rxmac[4096 - sizeof(struct rxmac_regs)];
1240 struct mac_regs mac;
1241 /* unused section of mac address map */
1242 u8 unused_mac[4096 - sizeof(struct mac_regs)];
1243 struct macstat_regs macstat;
1244 /* unused section of mac stat address map */
1245 u8 unused_mac_stat[4096 - sizeof(struct macstat_regs)];
1246 struct mmc_regs mmc;
1247 /* unused section of mmc address map */
1248 u8 unused_mmc[4096 - sizeof(struct mmc_regs)];
1249 /* unused section of address map */
1250 u8 unused_[1015808];
1251 u8 unused_exp_rom[4096]; /* MGS-size TBD */
1252 u8 unused__[524288]; /* unused section of address map */
1253};
1254
1255/* Defines for generic MII registers 0x00 -> 0x0F can be found in
1256 * include/linux/mii.h
1257 */
1258/* some defines for modem registers that seem to be 'reserved' */
1259#define PHY_INDEX_REG 0x10
1260#define PHY_DATA_REG 0x11
1261#define PHY_MPHY_CONTROL_REG 0x12
1262
1263/* defines for specified registers */
1264#define PHY_LOOPBACK_CONTROL 0x13 /* TRU_VMI_LOOPBACK_CONTROL_1_REG 19 */
1265 /* TRU_VMI_LOOPBACK_CONTROL_2_REG 20 */
1266#define PHY_REGISTER_MGMT_CONTROL 0x15 /* TRU_VMI_MI_SEQ_CONTROL_REG 21 */
1267#define PHY_CONFIG 0x16 /* TRU_VMI_CONFIGURATION_REG 22 */
1268#define PHY_PHY_CONTROL 0x17 /* TRU_VMI_PHY_CONTROL_REG 23 */
1269#define PHY_INTERRUPT_MASK 0x18 /* TRU_VMI_INTERRUPT_MASK_REG 24 */
1270#define PHY_INTERRUPT_STATUS 0x19 /* TRU_VMI_INTERRUPT_STATUS_REG 25 */
1271#define PHY_PHY_STATUS 0x1A /* TRU_VMI_PHY_STATUS_REG 26 */
1272#define PHY_LED_1 0x1B /* TRU_VMI_LED_CONTROL_1_REG 27 */
1273#define PHY_LED_2 0x1C /* TRU_VMI_LED_CONTROL_2_REG 28 */
1274 /* TRU_VMI_LINK_CONTROL_REG 29 */
1275 /* TRU_VMI_TIMING_CONTROL_REG */
1276
1277/* MI Register 10: Gigabit basic mode status reg(Reg 0x0A) */
1278#define ET_1000BT_MSTR_SLV 0x4000
1279
1280/* MI Register 16 - 18: Reserved Reg(0x10-0x12) */
1281
1282/* MI Register 19: Loopback Control Reg(0x13)
1283 * 15: mii_en
1284 * 14: pcs_en
1285 * 13: pmd_en
1286 * 12: all_digital_en
1287 * 11: replica_en
1288 * 10: line_driver_en
1289 * 9-0: reserved
1290 */
1291
1292/* MI Register 20: Reserved Reg(0x14) */
1293
1294/* MI Register 21: Management Interface Control Reg(0x15)
1295 * 15-11: reserved
1296 * 10-4: mi_error_count
1297 * 3: reserved
1298 * 2: ignore_10g_fr
1299 * 1: reserved
1300 * 0: preamble_suppress_en
1301 */
1302
1303/* MI Register 22: PHY Configuration Reg(0x16)
1304 * 15: crs_tx_en
1305 * 14: reserved
1306 * 13-12: tx_fifo_depth
1307 * 11-10: speed_downshift
1308 * 9: pbi_detect
1309 * 8: tbi_rate
1310 * 7: alternate_np
1311 * 6: group_mdio_en
1312 * 5: tx_clock_en
1313 * 4: sys_clock_en
1314 * 3: reserved
1315 * 2-0: mac_if_mode
1316 */
1317#define ET_PHY_CONFIG_TX_FIFO_DEPTH 0x3000
1318
1319#define ET_PHY_CONFIG_FIFO_DEPTH_8 0x0000
1320#define ET_PHY_CONFIG_FIFO_DEPTH_16 0x1000
1321#define ET_PHY_CONFIG_FIFO_DEPTH_32 0x2000
1322#define ET_PHY_CONFIG_FIFO_DEPTH_64 0x3000
1323
1324/* MI Register 23: PHY CONTROL Reg(0x17)
1325 * 15: reserved
1326 * 14: tdr_en
1327 * 13: reserved
1328 * 12-11: downshift_attempts
1329 * 10-6: reserved
1330 * 5: jabber_10baseT
1331 * 4: sqe_10baseT
1332 * 3: tp_loopback_10baseT
1333 * 2: preamble_gen_en
1334 * 1: reserved
1335 * 0: force_int
1336 */
1337
1338/* MI Register 24: Interrupt Mask Reg(0x18)
1339 * 15-10: reserved
1340 * 9: mdio_sync_lost
1341 * 8: autoneg_status
1342 * 7: hi_bit_err
1343 * 6: np_rx
1344 * 5: err_counter_full
1345 * 4: fifo_over_underflow
1346 * 3: rx_status
1347 * 2: link_status
1348 * 1: automatic_speed
1349 * 0: int_en
1350 */
1351
1352/* MI Register 25: Interrupt Status Reg(0x19)
1353 * 15-10: reserved
1354 * 9: mdio_sync_lost
1355 * 8: autoneg_status
1356 * 7: hi_bit_err
1357 * 6: np_rx
1358 * 5: err_counter_full
1359 * 4: fifo_over_underflow
1360 * 3: rx_status
1361 * 2: link_status
1362 * 1: automatic_speed
1363 * 0: int_en
1364 */
1365
1366/* MI Register 26: PHY Status Reg(0x1A)
1367 * 15: reserved
1368 * 14-13: autoneg_fault
1369 * 12: autoneg_status
1370 * 11: mdi_x_status
1371 * 10: polarity_status
1372 * 9-8: speed_status
1373 * 7: duplex_status
1374 * 6: link_status
1375 * 5: tx_status
1376 * 4: rx_status
1377 * 3: collision_status
1378 * 2: autoneg_en
1379 * 1: pause_en
1380 * 0: asymmetric_dir
1381 */
1382#define ET_PHY_AUTONEG_STATUS 0x1000
1383#define ET_PHY_POLARITY_STATUS 0x0400
1384#define ET_PHY_SPEED_STATUS 0x0300
1385#define ET_PHY_DUPLEX_STATUS 0x0080
1386#define ET_PHY_LSTATUS 0x0040
1387#define ET_PHY_AUTONEG_ENABLE 0x0020
1388
1389/* MI Register 27: LED Control Reg 1(0x1B)
1390 * 15-14: reserved
1391 * 13-12: led_dup_indicate
1392 * 11-10: led_10baseT
1393 * 9-8: led_collision
1394 * 7-4: reserved
1395 * 3-2: pulse_dur
1396 * 1: pulse_stretch1
1397 * 0: pulse_stretch0
1398 */
1399
1400/* MI Register 28: LED Control Reg 2(0x1C)
1401 * 15-12: led_link
1402 * 11-8: led_tx_rx
1403 * 7-4: led_100BaseTX
1404 * 3-0: led_1000BaseT
1405 */
1406#define ET_LED2_LED_LINK 0xF000
1407#define ET_LED2_LED_TXRX 0x0F00
1408#define ET_LED2_LED_100TX 0x00F0
1409#define ET_LED2_LED_1000T 0x000F
1410
1411/* defines for LED control reg 2 values */
1412#define LED_VAL_1000BT 0x0
1413#define LED_VAL_100BTX 0x1
1414#define LED_VAL_10BT 0x2
1415#define LED_VAL_1000BT_100BTX 0x3 /* 1000BT on, 100BTX blink */
1416#define LED_VAL_LINKON 0x4
1417#define LED_VAL_TX 0x5
1418#define LED_VAL_RX 0x6
1419#define LED_VAL_TXRX 0x7 /* TX or RX */
1420#define LED_VAL_DUPLEXFULL 0x8
1421#define LED_VAL_COLLISION 0x9
1422#define LED_VAL_LINKON_ACTIVE 0xA /* Link on, activity blink */
1423#define LED_VAL_LINKON_RECV 0xB /* Link on, receive blink */
1424#define LED_VAL_DUPLEXFULL_COLLISION 0xC /* Duplex on, collision blink */
1425#define LED_VAL_BLINK 0xD
1426#define LED_VAL_ON 0xE
1427#define LED_VAL_OFF 0xF
1428
1429#define LED_LINK_SHIFT 12
1430#define LED_TXRX_SHIFT 8
1431#define LED_100TX_SHIFT 4
1432
1433/* MI Register 29 - 31: Reserved Reg(0x1D - 0x1E) */
diff --git a/drivers/net/ethernet/allwinner/sun4i-emac.c b/drivers/net/ethernet/allwinner/sun4i-emac.c
index 29b9f082475d..1fcd5568a352 100644
--- a/drivers/net/ethernet/allwinner/sun4i-emac.c
+++ b/drivers/net/ethernet/allwinner/sun4i-emac.c
@@ -878,8 +878,6 @@ static int emac_probe(struct platform_device *pdev)
878 emac_powerup(ndev); 878 emac_powerup(ndev);
879 emac_reset(db); 879 emac_reset(db);
880 880
881 ether_setup(ndev);
882
883 ndev->netdev_ops = &emac_netdev_ops; 881 ndev->netdev_ops = &emac_netdev_ops;
884 ndev->watchdog_timeo = msecs_to_jiffies(watchdog); 882 ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
885 ndev->ethtool_ops = &emac_ethtool_ops; 883 ndev->ethtool_ops = &emac_ethtool_ops;
diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c
index 7330681574d2..fc2d5556b715 100644
--- a/drivers/net/ethernet/altera/altera_tse_main.c
+++ b/drivers/net/ethernet/altera/altera_tse_main.c
@@ -1433,7 +1433,6 @@ static int altera_tse_probe(struct platform_device *pdev)
1433 goto err_free_netdev; 1433 goto err_free_netdev;
1434 1434
1435 /* initialize netdev */ 1435 /* initialize netdev */
1436 ether_setup(ndev);
1437 ndev->mem_start = control_port->start; 1436 ndev->mem_start = control_port->start;
1438 ndev->mem_end = control_port->end; 1437 ndev->mem_end = control_port->end;
1439 ndev->netdev_ops = &altera_tse_netdev_ops; 1438 ndev->netdev_ops = &altera_tse_netdev_ops;
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index 2fee73b878c2..823d01c5684c 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -3236,8 +3236,9 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
3236 3236
3237 skb->protocol = eth_type_trans(skb, bp->dev); 3237 skb->protocol = eth_type_trans(skb, bp->dev);
3238 3238
3239 if ((len > (bp->dev->mtu + ETH_HLEN)) && 3239 if (len > (bp->dev->mtu + ETH_HLEN) &&
3240 (ntohs(skb->protocol) != 0x8100)) { 3240 skb->protocol != htons(0x8100) &&
3241 skb->protocol != htons(ETH_P_8021AD)) {
3241 3242
3242 dev_kfree_skb(skb); 3243 dev_kfree_skb(skb);
3243 goto next_rx; 3244 goto next_rx;
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index 77cb7555e794..d51729c619c0 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -1054,7 +1054,8 @@ static int bcmgenet_xmit_frag(struct net_device *dev,
1054/* Reallocate the SKB to put enough headroom in front of it and insert 1054/* Reallocate the SKB to put enough headroom in front of it and insert
1055 * the transmit checksum offsets in the descriptors 1055 * the transmit checksum offsets in the descriptors
1056 */ 1056 */
1057static int bcmgenet_put_tx_csum(struct net_device *dev, struct sk_buff *skb) 1057static struct sk_buff *bcmgenet_put_tx_csum(struct net_device *dev,
1058 struct sk_buff *skb)
1058{ 1059{
1059 struct status_64 *status = NULL; 1060 struct status_64 *status = NULL;
1060 struct sk_buff *new_skb; 1061 struct sk_buff *new_skb;
@@ -1072,7 +1073,7 @@ static int bcmgenet_put_tx_csum(struct net_device *dev, struct sk_buff *skb)
1072 if (!new_skb) { 1073 if (!new_skb) {
1073 dev->stats.tx_errors++; 1074 dev->stats.tx_errors++;
1074 dev->stats.tx_dropped++; 1075 dev->stats.tx_dropped++;
1075 return -ENOMEM; 1076 return NULL;
1076 } 1077 }
1077 skb = new_skb; 1078 skb = new_skb;
1078 } 1079 }
@@ -1090,7 +1091,7 @@ static int bcmgenet_put_tx_csum(struct net_device *dev, struct sk_buff *skb)
1090 ip_proto = ipv6_hdr(skb)->nexthdr; 1091 ip_proto = ipv6_hdr(skb)->nexthdr;
1091 break; 1092 break;
1092 default: 1093 default:
1093 return 0; 1094 return skb;
1094 } 1095 }
1095 1096
1096 offset = skb_checksum_start_offset(skb) - sizeof(*status); 1097 offset = skb_checksum_start_offset(skb) - sizeof(*status);
@@ -1111,7 +1112,7 @@ static int bcmgenet_put_tx_csum(struct net_device *dev, struct sk_buff *skb)
1111 status->tx_csum_info = tx_csum_info; 1112 status->tx_csum_info = tx_csum_info;
1112 } 1113 }
1113 1114
1114 return 0; 1115 return skb;
1115} 1116}
1116 1117
1117static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev) 1118static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)
@@ -1158,8 +1159,8 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)
1158 1159
1159 /* set the SKB transmit checksum */ 1160 /* set the SKB transmit checksum */
1160 if (priv->desc_64b_en) { 1161 if (priv->desc_64b_en) {
1161 ret = bcmgenet_put_tx_csum(dev, skb); 1162 skb = bcmgenet_put_tx_csum(dev, skb);
1162 if (ret) { 1163 if (!skb) {
1163 ret = NETDEV_TX_OK; 1164 ret = NETDEV_TX_OK;
1164 goto out; 1165 goto out;
1165 } 1166 }
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index e7d3a620d96a..ba499489969a 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -6918,7 +6918,8 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
6918 skb->protocol = eth_type_trans(skb, tp->dev); 6918 skb->protocol = eth_type_trans(skb, tp->dev);
6919 6919
6920 if (len > (tp->dev->mtu + ETH_HLEN) && 6920 if (len > (tp->dev->mtu + ETH_HLEN) &&
6921 skb->protocol != htons(ETH_P_8021Q)) { 6921 skb->protocol != htons(ETH_P_8021Q) &&
6922 skb->protocol != htons(ETH_P_8021AD)) {
6922 dev_kfree_skb_any(skb); 6923 dev_kfree_skb_any(skb);
6923 goto drop_it_no_recycle; 6924 goto drop_it_no_recycle;
6924 } 6925 }
diff --git a/drivers/net/ethernet/cadence/at91_ether.c b/drivers/net/ethernet/cadence/at91_ether.c
index 4a79edaf3885..4a24b9a6ad75 100644
--- a/drivers/net/ethernet/cadence/at91_ether.c
+++ b/drivers/net/ethernet/cadence/at91_ether.c
@@ -351,7 +351,6 @@ static int __init at91ether_probe(struct platform_device *pdev)
351 if (res) 351 if (res)
352 goto err_disable_clock; 352 goto err_disable_clock;
353 353
354 ether_setup(dev);
355 dev->netdev_ops = &at91ether_netdev_ops; 354 dev->netdev_ops = &at91ether_netdev_ops;
356 dev->ethtool_ops = &macb_ethtool_ops; 355 dev->ethtool_ops = &macb_ethtool_ops;
357 platform_set_drvdata(pdev, dev); 356 platform_set_drvdata(pdev, dev);
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c
index d9b8e94b805f..4d9fc0509af6 100644
--- a/drivers/net/ethernet/cadence/macb.c
+++ b/drivers/net/ethernet/cadence/macb.c
@@ -30,7 +30,6 @@
30#include <linux/of_device.h> 30#include <linux/of_device.h>
31#include <linux/of_mdio.h> 31#include <linux/of_mdio.h>
32#include <linux/of_net.h> 32#include <linux/of_net.h>
33#include <linux/pinctrl/consumer.h>
34 33
35#include "macb.h" 34#include "macb.h"
36 35
@@ -2071,7 +2070,6 @@ static int __init macb_probe(struct platform_device *pdev)
2071 struct phy_device *phydev; 2070 struct phy_device *phydev;
2072 u32 config; 2071 u32 config;
2073 int err = -ENXIO; 2072 int err = -ENXIO;
2074 struct pinctrl *pinctrl;
2075 const char *mac; 2073 const char *mac;
2076 2074
2077 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); 2075 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -2080,15 +2078,6 @@ static int __init macb_probe(struct platform_device *pdev)
2080 goto err_out; 2078 goto err_out;
2081 } 2079 }
2082 2080
2083 pinctrl = devm_pinctrl_get_select_default(&pdev->dev);
2084 if (IS_ERR(pinctrl)) {
2085 err = PTR_ERR(pinctrl);
2086 if (err == -EPROBE_DEFER)
2087 goto err_out;
2088
2089 dev_warn(&pdev->dev, "No pinctrl provided\n");
2090 }
2091
2092 err = -ENOMEM; 2081 err = -ENOMEM;
2093 dev = alloc_etherdev(sizeof(*bp)); 2082 dev = alloc_etherdev(sizeof(*bp));
2094 if (!dev) 2083 if (!dev)
diff --git a/drivers/net/ethernet/calxeda/xgmac.c b/drivers/net/ethernet/calxeda/xgmac.c
index 25d6b2a10e4e..47bfea24b9e1 100644
--- a/drivers/net/ethernet/calxeda/xgmac.c
+++ b/drivers/net/ethernet/calxeda/xgmac.c
@@ -1735,7 +1735,6 @@ static int xgmac_probe(struct platform_device *pdev)
1735 SET_NETDEV_DEV(ndev, &pdev->dev); 1735 SET_NETDEV_DEV(ndev, &pdev->dev);
1736 priv = netdev_priv(ndev); 1736 priv = netdev_priv(ndev);
1737 platform_set_drvdata(pdev, ndev); 1737 platform_set_drvdata(pdev, ndev);
1738 ether_setup(ndev);
1739 ndev->netdev_ops = &xgmac_netdev_ops; 1738 ndev->netdev_ops = &xgmac_netdev_ops;
1740 ndev->ethtool_ops = &xgmac_ethtool_ops; 1739 ndev->ethtool_ops = &xgmac_ethtool_ops;
1741 spin_lock_init(&priv->stats_lock); 1740 spin_lock_init(&priv->stats_lock);
diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c
index 70089c29d307..f3ba840cbf7b 100644
--- a/drivers/net/ethernet/davicom/dm9000.c
+++ b/drivers/net/ethernet/davicom/dm9000.c
@@ -1613,9 +1613,6 @@ dm9000_probe(struct platform_device *pdev)
1613 1613
1614 /* from this point we assume that we have found a DM9000 */ 1614 /* from this point we assume that we have found a DM9000 */
1615 1615
1616 /* driver system function */
1617 ether_setup(ndev);
1618
1619 ndev->netdev_ops = &dm9000_netdev_ops; 1616 ndev->netdev_ops = &dm9000_netdev_ops;
1620 ndev->watchdog_timeo = msecs_to_jiffies(watchdog); 1617 ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
1621 ndev->ethtool_ops = &dm9000_ethtool_ops; 1618 ndev->ethtool_ops = &dm9000_ethtool_ops;
diff --git a/drivers/net/ethernet/ethoc.c b/drivers/net/ethernet/ethoc.c
index f3658bdb64cc..0bc6c102f3ac 100644
--- a/drivers/net/ethernet/ethoc.c
+++ b/drivers/net/ethernet/ethoc.c
@@ -1222,8 +1222,6 @@ static int ethoc_probe(struct platform_device *pdev)
1222 goto error; 1222 goto error;
1223 } 1223 }
1224 1224
1225 ether_setup(netdev);
1226
1227 /* setup the net_device structure */ 1225 /* setup the net_device structure */
1228 netdev->netdev_ops = &ethoc_netdev_ops; 1226 netdev->netdev_ops = &ethoc_netdev_ops;
1229 netdev->watchdog_timeo = ETHOC_TIMEOUT; 1227 netdev->watchdog_timeo = ETHOC_TIMEOUT;
diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
index 354a30954fb1..1d5e1822bb2c 100644
--- a/drivers/net/ethernet/freescale/fec.h
+++ b/drivers/net/ethernet/freescale/fec.h
@@ -482,6 +482,8 @@ struct fec_enet_private {
482 unsigned int tx_pkts_itr; 482 unsigned int tx_pkts_itr;
483 unsigned int tx_time_itr; 483 unsigned int tx_time_itr;
484 unsigned int itr_clk_rate; 484 unsigned int itr_clk_rate;
485
486 u32 rx_copybreak;
485}; 487};
486 488
487void fec_ptp_init(struct platform_device *pdev); 489void fec_ptp_init(struct platform_device *pdev);
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 2c7343473e1c..1f07db891ee8 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -236,6 +236,8 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
236#define FEC_PAUSE_FLAG_AUTONEG 0x1 236#define FEC_PAUSE_FLAG_AUTONEG 0x1
237#define FEC_PAUSE_FLAG_ENABLE 0x2 237#define FEC_PAUSE_FLAG_ENABLE 0x2
238 238
239#define COPYBREAK_DEFAULT 256
240
239#define TSO_HEADER_SIZE 128 241#define TSO_HEADER_SIZE 128
240/* Max number of allowed TCP segments for software TSO */ 242/* Max number of allowed TCP segments for software TSO */
241#define FEC_MAX_TSO_SEGS 100 243#define FEC_MAX_TSO_SEGS 100
@@ -1322,6 +1324,50 @@ fec_enet_tx(struct net_device *ndev)
1322 return; 1324 return;
1323} 1325}
1324 1326
1327static int
1328fec_enet_new_rxbdp(struct net_device *ndev, struct bufdesc *bdp, struct sk_buff *skb)
1329{
1330 struct fec_enet_private *fep = netdev_priv(ndev);
1331 int off;
1332
1333 off = ((unsigned long)skb->data) & fep->rx_align;
1334 if (off)
1335 skb_reserve(skb, fep->rx_align + 1 - off);
1336
1337 bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, skb->data,
1338 FEC_ENET_RX_FRSIZE - fep->rx_align,
1339 DMA_FROM_DEVICE);
1340 if (dma_mapping_error(&fep->pdev->dev, bdp->cbd_bufaddr)) {
1341 if (net_ratelimit())
1342 netdev_err(ndev, "Rx DMA memory map failed\n");
1343 return -ENOMEM;
1344 }
1345
1346 return 0;
1347}
1348
1349static bool fec_enet_copybreak(struct net_device *ndev, struct sk_buff **skb,
1350 struct bufdesc *bdp, u32 length)
1351{
1352 struct fec_enet_private *fep = netdev_priv(ndev);
1353 struct sk_buff *new_skb;
1354
1355 if (length > fep->rx_copybreak)
1356 return false;
1357
1358 new_skb = netdev_alloc_skb(ndev, length);
1359 if (!new_skb)
1360 return false;
1361
1362 dma_sync_single_for_cpu(&fep->pdev->dev, bdp->cbd_bufaddr,
1363 FEC_ENET_RX_FRSIZE - fep->rx_align,
1364 DMA_FROM_DEVICE);
1365 memcpy(new_skb->data, (*skb)->data, length);
1366 *skb = new_skb;
1367
1368 return true;
1369}
1370
1325/* During a receive, the cur_rx points to the current incoming buffer. 1371/* During a receive, the cur_rx points to the current incoming buffer.
1326 * When we update through the ring, if the next incoming buffer has 1372 * When we update through the ring, if the next incoming buffer has
1327 * not been given to the system, we just set the empty indicator, 1373 * not been given to the system, we just set the empty indicator,
@@ -1336,7 +1382,8 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
1336 struct fec_enet_priv_rx_q *rxq; 1382 struct fec_enet_priv_rx_q *rxq;
1337 struct bufdesc *bdp; 1383 struct bufdesc *bdp;
1338 unsigned short status; 1384 unsigned short status;
1339 struct sk_buff *skb; 1385 struct sk_buff *skb_new = NULL;
1386 struct sk_buff *skb;
1340 ushort pkt_len; 1387 ushort pkt_len;
1341 __u8 *data; 1388 __u8 *data;
1342 int pkt_received = 0; 1389 int pkt_received = 0;
@@ -1344,6 +1391,7 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
1344 bool vlan_packet_rcvd = false; 1391 bool vlan_packet_rcvd = false;
1345 u16 vlan_tag; 1392 u16 vlan_tag;
1346 int index = 0; 1393 int index = 0;
1394 bool is_copybreak;
1347 1395
1348#ifdef CONFIG_M532x 1396#ifdef CONFIG_M532x
1349 flush_cache_all(); 1397 flush_cache_all();
@@ -1401,11 +1449,27 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
1401 ndev->stats.rx_bytes += pkt_len; 1449 ndev->stats.rx_bytes += pkt_len;
1402 1450
1403 index = fec_enet_get_bd_index(rxq->rx_bd_base, bdp, fep); 1451 index = fec_enet_get_bd_index(rxq->rx_bd_base, bdp, fep);
1404 data = rxq->rx_skbuff[index]->data; 1452 skb = rxq->rx_skbuff[index];
1405 dma_sync_single_for_cpu(&fep->pdev->dev, bdp->cbd_bufaddr,
1406 FEC_ENET_RX_FRSIZE - fep->rx_align,
1407 DMA_FROM_DEVICE);
1408 1453
1454 /* The packet length includes FCS, but we don't want to
1455 * include that when passing upstream as it messes up
1456 * bridging applications.
1457 */
1458 is_copybreak = fec_enet_copybreak(ndev, &skb, bdp, pkt_len - 4);
1459 if (!is_copybreak) {
1460 skb_new = netdev_alloc_skb(ndev, FEC_ENET_RX_FRSIZE);
1461 if (unlikely(!skb_new)) {
1462 ndev->stats.rx_dropped++;
1463 goto rx_processing_done;
1464 }
1465 dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,
1466 FEC_ENET_RX_FRSIZE - fep->rx_align,
1467 DMA_FROM_DEVICE);
1468 }
1469
1470 prefetch(skb->data - NET_IP_ALIGN);
1471 skb_put(skb, pkt_len - 4);
1472 data = skb->data;
1409 if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) 1473 if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME)
1410 swap_buffer(data, pkt_len); 1474 swap_buffer(data, pkt_len);
1411 1475
@@ -1422,62 +1486,48 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
1422 struct vlan_hdr *vlan_header = 1486 struct vlan_hdr *vlan_header =
1423 (struct vlan_hdr *) (data + ETH_HLEN); 1487 (struct vlan_hdr *) (data + ETH_HLEN);
1424 vlan_tag = ntohs(vlan_header->h_vlan_TCI); 1488 vlan_tag = ntohs(vlan_header->h_vlan_TCI);
1425 pkt_len -= VLAN_HLEN;
1426 1489
1427 vlan_packet_rcvd = true; 1490 vlan_packet_rcvd = true;
1491
1492 skb_copy_to_linear_data_offset(skb, VLAN_HLEN,
1493 data, (2 * ETH_ALEN));
1494 skb_pull(skb, VLAN_HLEN);
1428 } 1495 }
1429 1496
1430 /* This does 16 byte alignment, exactly what we need. 1497 skb->protocol = eth_type_trans(skb, ndev);
1431 * The packet length includes FCS, but we don't want to
1432 * include that when passing upstream as it messes up
1433 * bridging applications.
1434 */
1435 skb = netdev_alloc_skb(ndev, pkt_len - 4 + NET_IP_ALIGN);
1436 1498
1437 if (unlikely(!skb)) { 1499 /* Get receive timestamp from the skb */
1438 ndev->stats.rx_dropped++; 1500 if (fep->hwts_rx_en && fep->bufdesc_ex)
1439 } else { 1501 fec_enet_hwtstamp(fep, ebdp->ts,
1440 int payload_offset = (2 * ETH_ALEN); 1502 skb_hwtstamps(skb));
1441 skb_reserve(skb, NET_IP_ALIGN); 1503
1442 skb_put(skb, pkt_len - 4); /* Make room */ 1504 if (fep->bufdesc_ex &&
1443 1505 (fep->csum_flags & FLAG_RX_CSUM_ENABLED)) {
1444 /* Extract the frame data without the VLAN header. */ 1506 if (!(ebdp->cbd_esc & FLAG_RX_CSUM_ERROR)) {
1445 skb_copy_to_linear_data(skb, data, (2 * ETH_ALEN)); 1507 /* don't check it */
1446 if (vlan_packet_rcvd) 1508 skb->ip_summed = CHECKSUM_UNNECESSARY;
1447 payload_offset = (2 * ETH_ALEN) + VLAN_HLEN; 1509 } else {
1448 skb_copy_to_linear_data_offset(skb, (2 * ETH_ALEN), 1510 skb_checksum_none_assert(skb);
1449 data + payload_offset,
1450 pkt_len - 4 - (2 * ETH_ALEN));
1451
1452 skb->protocol = eth_type_trans(skb, ndev);
1453
1454 /* Get receive timestamp from the skb */
1455 if (fep->hwts_rx_en && fep->bufdesc_ex)
1456 fec_enet_hwtstamp(fep, ebdp->ts,
1457 skb_hwtstamps(skb));
1458
1459 if (fep->bufdesc_ex &&
1460 (fep->csum_flags & FLAG_RX_CSUM_ENABLED)) {
1461 if (!(ebdp->cbd_esc & FLAG_RX_CSUM_ERROR)) {
1462 /* don't check it */
1463 skb->ip_summed = CHECKSUM_UNNECESSARY;
1464 } else {
1465 skb_checksum_none_assert(skb);
1466 }
1467 } 1511 }
1512 }
1468 1513
1469 /* Handle received VLAN packets */ 1514 /* Handle received VLAN packets */
1470 if (vlan_packet_rcvd) 1515 if (vlan_packet_rcvd)
1471 __vlan_hwaccel_put_tag(skb, 1516 __vlan_hwaccel_put_tag(skb,
1472 htons(ETH_P_8021Q), 1517 htons(ETH_P_8021Q),
1473 vlan_tag); 1518 vlan_tag);
1474 1519
1475 napi_gro_receive(&fep->napi, skb); 1520 napi_gro_receive(&fep->napi, skb);
1521
1522 if (is_copybreak) {
1523 dma_sync_single_for_device(&fep->pdev->dev, bdp->cbd_bufaddr,
1524 FEC_ENET_RX_FRSIZE - fep->rx_align,
1525 DMA_FROM_DEVICE);
1526 } else {
1527 rxq->rx_skbuff[index] = skb_new;
1528 fec_enet_new_rxbdp(ndev, bdp, skb_new);
1476 } 1529 }
1477 1530
1478 dma_sync_single_for_device(&fep->pdev->dev, bdp->cbd_bufaddr,
1479 FEC_ENET_RX_FRSIZE - fep->rx_align,
1480 DMA_FROM_DEVICE);
1481rx_processing_done: 1531rx_processing_done:
1482 /* Clear the status flags for this buffer */ 1532 /* Clear the status flags for this buffer */
1483 status &= ~BD_ENET_RX_STATS; 1533 status &= ~BD_ENET_RX_STATS;
@@ -2392,6 +2442,44 @@ static void fec_enet_itr_coal_init(struct net_device *ndev)
2392 fec_enet_set_coalesce(ndev, &ec); 2442 fec_enet_set_coalesce(ndev, &ec);
2393} 2443}
2394 2444
2445static int fec_enet_get_tunable(struct net_device *netdev,
2446 const struct ethtool_tunable *tuna,
2447 void *data)
2448{
2449 struct fec_enet_private *fep = netdev_priv(netdev);
2450 int ret = 0;
2451
2452 switch (tuna->id) {
2453 case ETHTOOL_RX_COPYBREAK:
2454 *(u32 *)data = fep->rx_copybreak;
2455 break;
2456 default:
2457 ret = -EINVAL;
2458 break;
2459 }
2460
2461 return ret;
2462}
2463
2464static int fec_enet_set_tunable(struct net_device *netdev,
2465 const struct ethtool_tunable *tuna,
2466 const void *data)
2467{
2468 struct fec_enet_private *fep = netdev_priv(netdev);
2469 int ret = 0;
2470
2471 switch (tuna->id) {
2472 case ETHTOOL_RX_COPYBREAK:
2473 fep->rx_copybreak = *(u32 *)data;
2474 break;
2475 default:
2476 ret = -EINVAL;
2477 break;
2478 }
2479
2480 return ret;
2481}
2482
2395static const struct ethtool_ops fec_enet_ethtool_ops = { 2483static const struct ethtool_ops fec_enet_ethtool_ops = {
2396 .get_settings = fec_enet_get_settings, 2484 .get_settings = fec_enet_get_settings,
2397 .set_settings = fec_enet_set_settings, 2485 .set_settings = fec_enet_set_settings,
@@ -2408,6 +2496,8 @@ static const struct ethtool_ops fec_enet_ethtool_ops = {
2408 .get_sset_count = fec_enet_get_sset_count, 2496 .get_sset_count = fec_enet_get_sset_count,
2409#endif 2497#endif
2410 .get_ts_info = fec_enet_get_ts_info, 2498 .get_ts_info = fec_enet_get_ts_info,
2499 .get_tunable = fec_enet_get_tunable,
2500 .set_tunable = fec_enet_set_tunable,
2411}; 2501};
2412 2502
2413static int fec_enet_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd) 2503static int fec_enet_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
@@ -2553,33 +2643,20 @@ fec_enet_alloc_rxq_buffers(struct net_device *ndev, unsigned int queue)
2553 struct sk_buff *skb; 2643 struct sk_buff *skb;
2554 struct bufdesc *bdp; 2644 struct bufdesc *bdp;
2555 struct fec_enet_priv_rx_q *rxq; 2645 struct fec_enet_priv_rx_q *rxq;
2556 unsigned int off;
2557 2646
2558 rxq = fep->rx_queue[queue]; 2647 rxq = fep->rx_queue[queue];
2559 bdp = rxq->rx_bd_base; 2648 bdp = rxq->rx_bd_base;
2560 for (i = 0; i < rxq->rx_ring_size; i++) { 2649 for (i = 0; i < rxq->rx_ring_size; i++) {
2561 dma_addr_t addr;
2562
2563 skb = netdev_alloc_skb(ndev, FEC_ENET_RX_FRSIZE); 2650 skb = netdev_alloc_skb(ndev, FEC_ENET_RX_FRSIZE);
2564 if (!skb) 2651 if (!skb)
2565 goto err_alloc; 2652 goto err_alloc;
2566 2653
2567 off = ((unsigned long)skb->data) & fep->rx_align; 2654 if (fec_enet_new_rxbdp(ndev, bdp, skb)) {
2568 if (off)
2569 skb_reserve(skb, fep->rx_align + 1 - off);
2570
2571 addr = dma_map_single(&fep->pdev->dev, skb->data,
2572 FEC_ENET_RX_FRSIZE - fep->rx_align, DMA_FROM_DEVICE);
2573
2574 if (dma_mapping_error(&fep->pdev->dev, addr)) {
2575 dev_kfree_skb(skb); 2655 dev_kfree_skb(skb);
2576 if (net_ratelimit())
2577 netdev_err(ndev, "Rx DMA memory map failed\n");
2578 goto err_alloc; 2656 goto err_alloc;
2579 } 2657 }
2580 2658
2581 rxq->rx_skbuff[i] = skb; 2659 rxq->rx_skbuff[i] = skb;
2582 bdp->cbd_bufaddr = addr;
2583 bdp->cbd_sc = BD_ENET_RX_EMPTY; 2660 bdp->cbd_sc = BD_ENET_RX_EMPTY;
2584 2661
2585 if (fep->bufdesc_ex) { 2662 if (fep->bufdesc_ex) {
@@ -3240,6 +3317,7 @@ fec_probe(struct platform_device *pdev)
3240 if (fep->bufdesc_ex && fep->ptp_clock) 3317 if (fep->bufdesc_ex && fep->ptp_clock)
3241 netdev_info(ndev, "registered PHC device %d\n", fep->dev_id); 3318 netdev_info(ndev, "registered PHC device %d\n", fep->dev_id);
3242 3319
3320 fep->rx_copybreak = COPYBREAK_DEFAULT;
3243 INIT_WORK(&fep->tx_timeout_work, fec_enet_timeout_work); 3321 INIT_WORK(&fep->tx_timeout_work, fec_enet_timeout_work);
3244 return 0; 3322 return 0;
3245 3323
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k.h b/drivers/net/ethernet/intel/fm10k/fm10k.h
index 05658275ba17..42eb4344a9dc 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k.h
+++ b/drivers/net/ethernet/intel/fm10k/fm10k.h
@@ -47,13 +47,9 @@
47#define FM10K_DEFAULT_TX_WORK 256 47#define FM10K_DEFAULT_TX_WORK 256
48 48
49#define FM10K_RXBUFFER_256 256 49#define FM10K_RXBUFFER_256 256
50#define FM10K_RXBUFFER_16384 16384
51#define FM10K_RX_HDR_LEN FM10K_RXBUFFER_256 50#define FM10K_RX_HDR_LEN FM10K_RXBUFFER_256
52#if PAGE_SIZE <= FM10K_RXBUFFER_16384 51#define FM10K_RXBUFFER_2048 2048
53#define FM10K_RX_BUFSZ (PAGE_SIZE / 2) 52#define FM10K_RX_BUFSZ FM10K_RXBUFFER_2048
54#else
55#define FM10K_RX_BUFSZ FM10K_RXBUFFER_16384
56#endif
57 53
58/* How many Rx Buffers do we bundle into one write to the hardware ? */ 54/* How many Rx Buffers do we bundle into one write to the hardware ? */
59#define FM10K_RX_BUFFER_WRITE 16 /* Must be power of 2 */ 55#define FM10K_RX_BUFFER_WRITE 16 /* Must be power of 2 */
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
index dcec000bdb68..bf44a8fe711f 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
@@ -546,6 +546,10 @@ int fm10k_open(struct net_device *netdev)
546 fm10k_request_glort_range(interface); 546 fm10k_request_glort_range(interface);
547 547
548 /* Notify the stack of the actual queue counts */ 548 /* Notify the stack of the actual queue counts */
549 err = netif_set_real_num_tx_queues(netdev,
550 interface->num_tx_queues);
551 if (err)
552 goto err_set_queues;
549 553
550 err = netif_set_real_num_rx_queues(netdev, 554 err = netif_set_real_num_rx_queues(netdev,
551 interface->num_rx_queues); 555 interface->num_rx_queues);
@@ -601,7 +605,7 @@ int fm10k_close(struct net_device *netdev)
601static netdev_tx_t fm10k_xmit_frame(struct sk_buff *skb, struct net_device *dev) 605static netdev_tx_t fm10k_xmit_frame(struct sk_buff *skb, struct net_device *dev)
602{ 606{
603 struct fm10k_intfc *interface = netdev_priv(dev); 607 struct fm10k_intfc *interface = netdev_priv(dev);
604 unsigned int r_idx = 0; 608 unsigned int r_idx = skb->queue_mapping;
605 int err; 609 int err;
606 610
607 if ((skb->protocol == htons(ETH_P_8021Q)) && 611 if ((skb->protocol == htons(ETH_P_8021Q)) &&
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index be039dd6114d..267992b3de8a 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -702,7 +702,7 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
702 total_packets += tx_buf->gso_segs; 702 total_packets += tx_buf->gso_segs;
703 703
704 /* free the skb */ 704 /* free the skb */
705 dev_kfree_skb_any(tx_buf->skb); 705 dev_consume_skb_any(tx_buf->skb);
706 706
707 /* unmap skb header data */ 707 /* unmap skb header data */
708 dma_unmap_single(tx_ring->dev, 708 dma_unmap_single(tx_ring->dev,
diff --git a/drivers/net/ethernet/intel/igb/e1000_hw.h b/drivers/net/ethernet/intel/igb/e1000_hw.h
index ce55ea5d750c..2003b3756ba2 100644
--- a/drivers/net/ethernet/intel/igb/e1000_hw.h
+++ b/drivers/net/ethernet/intel/igb/e1000_hw.h
@@ -265,11 +265,6 @@ struct e1000_hw_stats {
265 u64 b2ogprc; 265 u64 b2ogprc;
266}; 266};
267 267
268struct e1000_phy_stats {
269 u32 idle_errors;
270 u32 receive_errors;
271};
272
273struct e1000_host_mng_dhcp_cookie { 268struct e1000_host_mng_dhcp_cookie {
274 u32 signature; 269 u32 signature;
275 u8 status; 270 u8 status;
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
index 06102d1f7c03..82d891e183b1 100644
--- a/drivers/net/ethernet/intel/igb/igb.h
+++ b/drivers/net/ethernet/intel/igb/igb.h
@@ -403,7 +403,6 @@ struct igb_adapter {
403 struct e1000_hw hw; 403 struct e1000_hw hw;
404 struct e1000_hw_stats stats; 404 struct e1000_hw_stats stats;
405 struct e1000_phy_info phy_info; 405 struct e1000_phy_info phy_info;
406 struct e1000_phy_stats phy_stats;
407 406
408 u32 test_icr; 407 u32 test_icr;
409 struct igb_ring test_tx_ring; 408 struct igb_ring test_tx_ring;
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 6cf0c17ad9c4..ae59c0b108c5 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -58,7 +58,7 @@
58 58
59#define MAJ 5 59#define MAJ 5
60#define MIN 2 60#define MIN 2
61#define BUILD 13 61#define BUILD 15
62#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \ 62#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \
63__stringify(BUILD) "-k" 63__stringify(BUILD) "-k"
64char igb_driver_name[] = "igb"; 64char igb_driver_name[] = "igb";
@@ -5206,14 +5206,11 @@ void igb_update_stats(struct igb_adapter *adapter,
5206 struct e1000_hw *hw = &adapter->hw; 5206 struct e1000_hw *hw = &adapter->hw;
5207 struct pci_dev *pdev = adapter->pdev; 5207 struct pci_dev *pdev = adapter->pdev;
5208 u32 reg, mpc; 5208 u32 reg, mpc;
5209 u16 phy_tmp;
5210 int i; 5209 int i;
5211 u64 bytes, packets; 5210 u64 bytes, packets;
5212 unsigned int start; 5211 unsigned int start;
5213 u64 _bytes, _packets; 5212 u64 _bytes, _packets;
5214 5213
5215#define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
5216
5217 /* Prevent stats update while adapter is being reset, or if the pci 5214 /* Prevent stats update while adapter is being reset, or if the pci
5218 * connection is down. 5215 * connection is down.
5219 */ 5216 */
@@ -5374,15 +5371,6 @@ void igb_update_stats(struct igb_adapter *adapter,
5374 5371
5375 /* Tx Dropped needs to be maintained elsewhere */ 5372 /* Tx Dropped needs to be maintained elsewhere */
5376 5373
5377 /* Phy Stats */
5378 if (hw->phy.media_type == e1000_media_type_copper) {
5379 if ((adapter->link_speed == SPEED_1000) &&
5380 (!igb_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) {
5381 phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK;
5382 adapter->phy_stats.idle_errors += phy_tmp;
5383 }
5384 }
5385
5386 /* Management Stats */ 5374 /* Management Stats */
5387 adapter->stats.mgptc += rd32(E1000_MGTPTC); 5375 adapter->stats.mgptc += rd32(E1000_MGTPTC);
5388 adapter->stats.mgprc += rd32(E1000_MGTPRC); 5376 adapter->stats.mgprc += rd32(E1000_MGTPRC);
@@ -6386,7 +6374,7 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
6386 total_packets += tx_buffer->gso_segs; 6374 total_packets += tx_buffer->gso_segs;
6387 6375
6388 /* free the skb */ 6376 /* free the skb */
6389 dev_kfree_skb_any(tx_buffer->skb); 6377 dev_consume_skb_any(tx_buffer->skb);
6390 6378
6391 /* unmap skb header data */ 6379 /* unmap skb header data */
6392 dma_unmap_single(tx_ring->dev, 6380 dma_unmap_single(tx_ring->dev,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index 673d82095779..5032a602d5c9 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -307,7 +307,6 @@ enum ixgbe_ring_f_enum {
307#define MAX_RX_QUEUES (IXGBE_MAX_FDIR_INDICES + 1) 307#define MAX_RX_QUEUES (IXGBE_MAX_FDIR_INDICES + 1)
308#define MAX_TX_QUEUES (IXGBE_MAX_FDIR_INDICES + 1) 308#define MAX_TX_QUEUES (IXGBE_MAX_FDIR_INDICES + 1)
309#define IXGBE_MAX_L2A_QUEUES 4 309#define IXGBE_MAX_L2A_QUEUES 4
310#define IXGBE_MAX_L2A_QUEUES 4
311#define IXGBE_BAD_L2A_QUEUE 3 310#define IXGBE_BAD_L2A_QUEUE 3
312#define IXGBE_MAX_MACVLANS 31 311#define IXGBE_MAX_MACVLANS 31
313#define IXGBE_MAX_DCBMACVLANS 8 312#define IXGBE_MAX_DCBMACVLANS 8
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index cff383b1cbb0..3ce4a258f945 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -2267,7 +2267,6 @@ static int ixgbe_set_coalesce(struct net_device *netdev,
2267 if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count) 2267 if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count)
2268 adapter->tx_itr_setting = adapter->rx_itr_setting; 2268 adapter->tx_itr_setting = adapter->rx_itr_setting;
2269 2269
2270#if IS_ENABLED(CONFIG_BQL)
2271 /* detect ITR changes that require update of TXDCTL.WTHRESH */ 2270 /* detect ITR changes that require update of TXDCTL.WTHRESH */
2272 if ((adapter->tx_itr_setting != 1) && 2271 if ((adapter->tx_itr_setting != 1) &&
2273 (adapter->tx_itr_setting < IXGBE_100K_ITR)) { 2272 (adapter->tx_itr_setting < IXGBE_100K_ITR)) {
@@ -2279,7 +2278,7 @@ static int ixgbe_set_coalesce(struct net_device *netdev,
2279 (tx_itr_prev < IXGBE_100K_ITR)) 2278 (tx_itr_prev < IXGBE_100K_ITR))
2280 need_reset = true; 2279 need_reset = true;
2281 } 2280 }
2282#endif 2281
2283 /* check the old value and enable RSC if necessary */ 2282 /* check the old value and enable RSC if necessary */
2284 need_reset |= ixgbe_update_rsc(adapter); 2283 need_reset |= ixgbe_update_rsc(adapter);
2285 2284
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 06ef5a32a893..d677b5a23b58 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -1094,7 +1094,7 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
1094 total_packets += tx_buffer->gso_segs; 1094 total_packets += tx_buffer->gso_segs;
1095 1095
1096 /* free the skb */ 1096 /* free the skb */
1097 dev_kfree_skb_any(tx_buffer->skb); 1097 dev_consume_skb_any(tx_buffer->skb);
1098 1098
1099 /* unmap skb header data */ 1099 /* unmap skb header data */
1100 dma_unmap_single(tx_ring->dev, 1100 dma_unmap_single(tx_ring->dev,
@@ -2982,11 +2982,7 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter,
2982 * to or less than the number of on chip descriptors, which is 2982 * to or less than the number of on chip descriptors, which is
2983 * currently 40. 2983 * currently 40.
2984 */ 2984 */
2985#if IS_ENABLED(CONFIG_BQL)
2986 if (!ring->q_vector || (ring->q_vector->itr < IXGBE_100K_ITR)) 2985 if (!ring->q_vector || (ring->q_vector->itr < IXGBE_100K_ITR))
2987#else
2988 if (!ring->q_vector || (ring->q_vector->itr < 8))
2989#endif
2990 txdctl |= (1 << 16); /* WTHRESH = 1 */ 2986 txdctl |= (1 << 16); /* WTHRESH = 1 */
2991 else 2987 else
2992 txdctl |= (8 << 16); /* WTHRESH = 8 */ 2988 txdctl |= (8 << 16); /* WTHRESH = 8 */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
index 11f02ea78c4a..d47b19f27c35 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
@@ -445,8 +445,6 @@ s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
445s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw) 445s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw)
446{ 446{
447 s32 status = 0; 447 s32 status = 0;
448 u32 time_out;
449 u32 max_time_out = 10;
450 u16 autoneg_reg = IXGBE_MII_AUTONEG_REG; 448 u16 autoneg_reg = IXGBE_MII_AUTONEG_REG;
451 bool autoneg = false; 449 bool autoneg = false;
452 ixgbe_link_speed speed; 450 ixgbe_link_speed speed;
@@ -514,25 +512,6 @@ s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw)
514 hw->phy.ops.write_reg(hw, MDIO_CTRL1, 512 hw->phy.ops.write_reg(hw, MDIO_CTRL1,
515 MDIO_MMD_AN, autoneg_reg); 513 MDIO_MMD_AN, autoneg_reg);
516 514
517 /* Wait for autonegotiation to finish */
518 for (time_out = 0; time_out < max_time_out; time_out++) {
519 udelay(10);
520 /* Restart PHY autonegotiation and wait for completion */
521 status = hw->phy.ops.read_reg(hw, MDIO_STAT1,
522 MDIO_MMD_AN,
523 &autoneg_reg);
524
525 autoneg_reg &= MDIO_AN_STAT1_COMPLETE;
526 if (autoneg_reg == MDIO_AN_STAT1_COMPLETE) {
527 break;
528 }
529 }
530
531 if (time_out == max_time_out) {
532 hw_dbg(hw, "ixgbe_setup_phy_link_generic: time out\n");
533 return IXGBE_ERR_LINK_SETUP;
534 }
535
536 return status; 515 return status;
537} 516}
538 517
@@ -657,8 +636,6 @@ s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
657s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw) 636s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw)
658{ 637{
659 s32 status; 638 s32 status;
660 u32 time_out;
661 u32 max_time_out = 10;
662 u16 autoneg_reg = IXGBE_MII_AUTONEG_REG; 639 u16 autoneg_reg = IXGBE_MII_AUTONEG_REG;
663 bool autoneg = false; 640 bool autoneg = false;
664 ixgbe_link_speed speed; 641 ixgbe_link_speed speed;
@@ -724,24 +701,6 @@ s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw)
724 hw->phy.ops.write_reg(hw, MDIO_CTRL1, 701 hw->phy.ops.write_reg(hw, MDIO_CTRL1,
725 MDIO_MMD_AN, autoneg_reg); 702 MDIO_MMD_AN, autoneg_reg);
726 703
727 /* Wait for autonegotiation to finish */
728 for (time_out = 0; time_out < max_time_out; time_out++) {
729 udelay(10);
730 /* Restart PHY autonegotiation and wait for completion */
731 status = hw->phy.ops.read_reg(hw, MDIO_STAT1,
732 MDIO_MMD_AN,
733 &autoneg_reg);
734
735 autoneg_reg &= MDIO_AN_STAT1_COMPLETE;
736 if (autoneg_reg == MDIO_AN_STAT1_COMPLETE)
737 break;
738 }
739
740 if (time_out == max_time_out) {
741 hw_dbg(hw, "ixgbe_setup_phy_link_tnx: time out\n");
742 return IXGBE_ERR_LINK_SETUP;
743 }
744
745 return status; 704 return status;
746} 705}
747 706
diff --git a/drivers/net/ethernet/lantiq_etop.c b/drivers/net/ethernet/lantiq_etop.c
index fd4b6aecf6ee..2dad4d5047ba 100644
--- a/drivers/net/ethernet/lantiq_etop.c
+++ b/drivers/net/ethernet/lantiq_etop.c
@@ -633,7 +633,6 @@ ltq_etop_init(struct net_device *dev)
633 int err; 633 int err;
634 bool random_mac = false; 634 bool random_mac = false;
635 635
636 ether_setup(dev);
637 dev->watchdog_timeo = 10 * HZ; 636 dev->watchdog_timeo = 10 * HZ;
638 err = ltq_etop_hw_init(dev); 637 err = ltq_etop_hw_init(dev);
639 if (err) 638 if (err)
diff --git a/drivers/net/ethernet/marvell/Kconfig b/drivers/net/ethernet/marvell/Kconfig
index bed8fbb3edc5..b3b72ad92d4a 100644
--- a/drivers/net/ethernet/marvell/Kconfig
+++ b/drivers/net/ethernet/marvell/Kconfig
@@ -64,7 +64,7 @@ config MVPP2
64 64
65config PXA168_ETH 65config PXA168_ETH
66 tristate "Marvell pxa168 ethernet support" 66 tristate "Marvell pxa168 ethernet support"
67 depends on CPU_PXA168 || ARCH_BERLIN || COMPILE_TEST 67 depends on (CPU_PXA168 || ARCH_BERLIN || COMPILE_TEST) && HAS_IOMEM
68 select PHYLIB 68 select PHYLIB
69 ---help--- 69 ---help---
70 This driver supports the pxa168 Ethernet ports. 70 This driver supports the pxa168 Ethernet ports.
diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c
index 24de41231593..c3b209cd0660 100644
--- a/drivers/net/ethernet/marvell/pxa168_eth.c
+++ b/drivers/net/ethernet/marvell/pxa168_eth.c
@@ -634,12 +634,12 @@ static int pxa168_eth_set_mac_address(struct net_device *dev, void *addr)
634 memcpy(oldMac, dev->dev_addr, ETH_ALEN); 634 memcpy(oldMac, dev->dev_addr, ETH_ALEN);
635 memcpy(dev->dev_addr, sa->sa_data, ETH_ALEN); 635 memcpy(dev->dev_addr, sa->sa_data, ETH_ALEN);
636 636
637 mac_h = sa->sa_data[0] << 24; 637 mac_h = dev->dev_addr[0] << 24;
638 mac_h |= sa->sa_data[1] << 16; 638 mac_h |= dev->dev_addr[1] << 16;
639 mac_h |= sa->sa_data[2] << 8; 639 mac_h |= dev->dev_addr[2] << 8;
640 mac_h |= sa->sa_data[3]; 640 mac_h |= dev->dev_addr[3];
641 mac_l = sa->sa_data[4] << 8; 641 mac_l = dev->dev_addr[4] << 8;
642 mac_l |= sa->sa_data[5]; 642 mac_l |= dev->dev_addr[5];
643 wrl(pep, MAC_ADDR_HIGH, mac_h); 643 wrl(pep, MAC_ADDR_HIGH, mac_h);
644 wrl(pep, MAC_ADDR_LOW, mac_l); 644 wrl(pep, MAC_ADDR_LOW, mac_l);
645 645
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index 35ff2925110a..42c9f8b09a6e 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -112,6 +112,7 @@ static const char main_strings[][ETH_GSTRING_LEN] = {
112 112
113 /* port statistics */ 113 /* port statistics */
114 "tso_packets", 114 "tso_packets",
115 "xmit_more",
115 "queue_stopped", "wake_queue", "tx_timeout", "rx_alloc_failed", 116 "queue_stopped", "wake_queue", "tx_timeout", "rx_alloc_failed",
116 "rx_csum_good", "rx_csum_none", "tx_chksum_offload", 117 "rx_csum_good", "rx_csum_none", "tx_chksum_offload",
117 118
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_port.c b/drivers/net/ethernet/mellanox/mlx4/en_port.c
index c2cfb05e7290..0a0261d128b9 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_port.c
@@ -150,14 +150,19 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
150 priv->port_stats.tx_chksum_offload = 0; 150 priv->port_stats.tx_chksum_offload = 0;
151 priv->port_stats.queue_stopped = 0; 151 priv->port_stats.queue_stopped = 0;
152 priv->port_stats.wake_queue = 0; 152 priv->port_stats.wake_queue = 0;
153 priv->port_stats.tso_packets = 0;
154 priv->port_stats.xmit_more = 0;
153 155
154 for (i = 0; i < priv->tx_ring_num; i++) { 156 for (i = 0; i < priv->tx_ring_num; i++) {
155 stats->tx_packets += priv->tx_ring[i]->packets; 157 const struct mlx4_en_tx_ring *ring = priv->tx_ring[i];
156 stats->tx_bytes += priv->tx_ring[i]->bytes; 158
157 priv->port_stats.tx_chksum_offload += priv->tx_ring[i]->tx_csum; 159 stats->tx_packets += ring->packets;
158 priv->port_stats.queue_stopped += 160 stats->tx_bytes += ring->bytes;
159 priv->tx_ring[i]->queue_stopped; 161 priv->port_stats.tx_chksum_offload += ring->tx_csum;
160 priv->port_stats.wake_queue += priv->tx_ring[i]->wake_queue; 162 priv->port_stats.queue_stopped += ring->queue_stopped;
163 priv->port_stats.wake_queue += ring->wake_queue;
164 priv->port_stats.tso_packets += ring->tso_packets;
165 priv->port_stats.xmit_more += ring->xmit_more;
161 } 166 }
162 167
163 stats->rx_errors = be64_to_cpu(mlx4_en_stats->PCS) + 168 stats->rx_errors = be64_to_cpu(mlx4_en_stats->PCS) +
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index adedc47e947d..0c501253fdab 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -840,7 +840,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
840 * note that we already verified that it is linear */ 840 * note that we already verified that it is linear */
841 memcpy(tx_desc->lso.header, skb->data, lso_header_size); 841 memcpy(tx_desc->lso.header, skb->data, lso_header_size);
842 842
843 priv->port_stats.tso_packets++; 843 ring->tso_packets++;
844 i = ((skb->len - lso_header_size) / skb_shinfo(skb)->gso_size) + 844 i = ((skb->len - lso_header_size) / skb_shinfo(skb)->gso_size) +
845 !!((skb->len - lso_header_size) % skb_shinfo(skb)->gso_size); 845 !!((skb->len - lso_header_size) % skb_shinfo(skb)->gso_size);
846 tx_info->nr_bytes = skb->len + (i - 1) * lso_header_size; 846 tx_info->nr_bytes = skb->len + (i - 1) * lso_header_size;
@@ -910,6 +910,8 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
910 wmb(); 910 wmb();
911 iowrite32be(ring->doorbell_qpn, 911 iowrite32be(ring->doorbell_qpn,
912 ring->bf.uar->map + MLX4_SEND_DOORBELL); 912 ring->bf.uar->map + MLX4_SEND_DOORBELL);
913 } else {
914 ring->xmit_more++;
913 } 915 }
914 } 916 }
915 917
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index f6c32a947185..90de6e1ad06e 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -78,13 +78,13 @@ MODULE_PARM_DESC(msi_x, "attempt to use MSI-X if nonzero");
78#endif /* CONFIG_PCI_MSI */ 78#endif /* CONFIG_PCI_MSI */
79 79
80static uint8_t num_vfs[3] = {0, 0, 0}; 80static uint8_t num_vfs[3] = {0, 0, 0};
81static int num_vfs_argc = 3; 81static int num_vfs_argc;
82module_param_array(num_vfs, byte , &num_vfs_argc, 0444); 82module_param_array(num_vfs, byte , &num_vfs_argc, 0444);
83MODULE_PARM_DESC(num_vfs, "enable #num_vfs functions if num_vfs > 0\n" 83MODULE_PARM_DESC(num_vfs, "enable #num_vfs functions if num_vfs > 0\n"
84 "num_vfs=port1,port2,port1+2"); 84 "num_vfs=port1,port2,port1+2");
85 85
86static uint8_t probe_vf[3] = {0, 0, 0}; 86static uint8_t probe_vf[3] = {0, 0, 0};
87static int probe_vfs_argc = 3; 87static int probe_vfs_argc;
88module_param_array(probe_vf, byte, &probe_vfs_argc, 0444); 88module_param_array(probe_vf, byte, &probe_vfs_argc, 0444);
89MODULE_PARM_DESC(probe_vf, "number of vfs to probe by pf driver (num_vfs > 0)\n" 89MODULE_PARM_DESC(probe_vf, "number of vfs to probe by pf driver (num_vfs > 0)\n"
90 "probe_vf=port1,port2,port1+2"); 90 "probe_vf=port1,port2,port1+2");
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index 6a4fc2394cf2..84c9d5dbfa4f 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -279,6 +279,8 @@ struct mlx4_en_tx_ring {
279 unsigned long tx_csum; 279 unsigned long tx_csum;
280 unsigned long queue_stopped; 280 unsigned long queue_stopped;
281 unsigned long wake_queue; 281 unsigned long wake_queue;
282 unsigned long tso_packets;
283 unsigned long xmit_more;
282 struct mlx4_bf bf; 284 struct mlx4_bf bf;
283 bool bf_enabled; 285 bool bf_enabled;
284 bool bf_alloced; 286 bool bf_alloced;
@@ -426,6 +428,7 @@ struct mlx4_en_pkt_stats {
426 428
427struct mlx4_en_port_stats { 429struct mlx4_en_port_stats {
428 unsigned long tso_packets; 430 unsigned long tso_packets;
431 unsigned long xmit_more;
429 unsigned long queue_stopped; 432 unsigned long queue_stopped;
430 unsigned long wake_queue; 433 unsigned long wake_queue;
431 unsigned long tx_timeout; 434 unsigned long tx_timeout;
@@ -433,7 +436,7 @@ struct mlx4_en_port_stats {
433 unsigned long rx_chksum_good; 436 unsigned long rx_chksum_good;
434 unsigned long rx_chksum_none; 437 unsigned long rx_chksum_none;
435 unsigned long tx_chksum_offload; 438 unsigned long tx_chksum_offload;
436#define NUM_PORT_STATS 8 439#define NUM_PORT_STATS 9
437}; 440};
438 441
439struct mlx4_en_perf_stats { 442struct mlx4_en_perf_stats {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index 65a7da69e2ac..368c6c5ea014 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -357,60 +357,24 @@ const char *mlx5_command_str(int command)
357 case MLX5_CMD_OP_2ERR_QP: 357 case MLX5_CMD_OP_2ERR_QP:
358 return "2ERR_QP"; 358 return "2ERR_QP";
359 359
360 case MLX5_CMD_OP_RTS2SQD_QP:
361 return "RTS2SQD_QP";
362
363 case MLX5_CMD_OP_SQD2RTS_QP:
364 return "SQD2RTS_QP";
365
366 case MLX5_CMD_OP_2RST_QP: 360 case MLX5_CMD_OP_2RST_QP:
367 return "2RST_QP"; 361 return "2RST_QP";
368 362
369 case MLX5_CMD_OP_QUERY_QP: 363 case MLX5_CMD_OP_QUERY_QP:
370 return "QUERY_QP"; 364 return "QUERY_QP";
371 365
372 case MLX5_CMD_OP_CONF_SQP:
373 return "CONF_SQP";
374
375 case MLX5_CMD_OP_MAD_IFC: 366 case MLX5_CMD_OP_MAD_IFC:
376 return "MAD_IFC"; 367 return "MAD_IFC";
377 368
378 case MLX5_CMD_OP_INIT2INIT_QP: 369 case MLX5_CMD_OP_INIT2INIT_QP:
379 return "INIT2INIT_QP"; 370 return "INIT2INIT_QP";
380 371
381 case MLX5_CMD_OP_SUSPEND_QP:
382 return "SUSPEND_QP";
383
384 case MLX5_CMD_OP_UNSUSPEND_QP:
385 return "UNSUSPEND_QP";
386
387 case MLX5_CMD_OP_SQD2SQD_QP:
388 return "SQD2SQD_QP";
389
390 case MLX5_CMD_OP_ALLOC_QP_COUNTER_SET:
391 return "ALLOC_QP_COUNTER_SET";
392
393 case MLX5_CMD_OP_DEALLOC_QP_COUNTER_SET:
394 return "DEALLOC_QP_COUNTER_SET";
395
396 case MLX5_CMD_OP_QUERY_QP_COUNTER_SET:
397 return "QUERY_QP_COUNTER_SET";
398
399 case MLX5_CMD_OP_CREATE_PSV: 372 case MLX5_CMD_OP_CREATE_PSV:
400 return "CREATE_PSV"; 373 return "CREATE_PSV";
401 374
402 case MLX5_CMD_OP_DESTROY_PSV: 375 case MLX5_CMD_OP_DESTROY_PSV:
403 return "DESTROY_PSV"; 376 return "DESTROY_PSV";
404 377
405 case MLX5_CMD_OP_QUERY_PSV:
406 return "QUERY_PSV";
407
408 case MLX5_CMD_OP_QUERY_SIG_RULE_TABLE:
409 return "QUERY_SIG_RULE_TABLE";
410
411 case MLX5_CMD_OP_QUERY_BLOCK_SIZE_TABLE:
412 return "QUERY_BLOCK_SIZE_TABLE";
413
414 case MLX5_CMD_OP_CREATE_SRQ: 378 case MLX5_CMD_OP_CREATE_SRQ:
415 return "CREATE_SRQ"; 379 return "CREATE_SRQ";
416 380
@@ -1538,16 +1502,9 @@ static const char *cmd_status_str(u8 status)
1538 } 1502 }
1539} 1503}
1540 1504
1541int mlx5_cmd_status_to_err(struct mlx5_outbox_hdr *hdr) 1505static int cmd_status_to_err(u8 status)
1542{ 1506{
1543 if (!hdr->status) 1507 switch (status) {
1544 return 0;
1545
1546 pr_warn("command failed, status %s(0x%x), syndrome 0x%x\n",
1547 cmd_status_str(hdr->status), hdr->status,
1548 be32_to_cpu(hdr->syndrome));
1549
1550 switch (hdr->status) {
1551 case MLX5_CMD_STAT_OK: return 0; 1508 case MLX5_CMD_STAT_OK: return 0;
1552 case MLX5_CMD_STAT_INT_ERR: return -EIO; 1509 case MLX5_CMD_STAT_INT_ERR: return -EIO;
1553 case MLX5_CMD_STAT_BAD_OP_ERR: return -EINVAL; 1510 case MLX5_CMD_STAT_BAD_OP_ERR: return -EINVAL;
@@ -1567,3 +1524,33 @@ int mlx5_cmd_status_to_err(struct mlx5_outbox_hdr *hdr)
1567 default: return -EIO; 1524 default: return -EIO;
1568 } 1525 }
1569} 1526}
1527
1528/* this will be available till all the commands use set/get macros */
1529int mlx5_cmd_status_to_err(struct mlx5_outbox_hdr *hdr)
1530{
1531 if (!hdr->status)
1532 return 0;
1533
1534 pr_warn("command failed, status %s(0x%x), syndrome 0x%x\n",
1535 cmd_status_str(hdr->status), hdr->status,
1536 be32_to_cpu(hdr->syndrome));
1537
1538 return cmd_status_to_err(hdr->status);
1539}
1540
1541int mlx5_cmd_status_to_err_v2(void *ptr)
1542{
1543 u32 syndrome;
1544 u8 status;
1545
1546 status = be32_to_cpu(*(__be32 *)ptr) >> 24;
1547 if (!status)
1548 return 0;
1549
1550 syndrome = be32_to_cpu(*(__be32 *)(ptr + 4));
1551
1552 pr_warn("command failed, status %s(0x%x), syndrome 0x%x\n",
1553 cmd_status_str(status), status, syndrome);
1554
1555 return cmd_status_to_err(status);
1556}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index 4e8bd0b34bb0..ed53291468f3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -198,7 +198,7 @@ static int mlx5_eq_int(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
198 int eqes_found = 0; 198 int eqes_found = 0;
199 int set_ci = 0; 199 int set_ci = 0;
200 u32 cqn; 200 u32 cqn;
201 u32 srqn; 201 u32 rsn;
202 u8 port; 202 u8 port;
203 203
204 while ((eqe = next_eqe_sw(eq))) { 204 while ((eqe = next_eqe_sw(eq))) {
@@ -224,18 +224,18 @@ static int mlx5_eq_int(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
224 case MLX5_EVENT_TYPE_PATH_MIG_FAILED: 224 case MLX5_EVENT_TYPE_PATH_MIG_FAILED:
225 case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR: 225 case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
226 case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR: 226 case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR:
227 rsn = be32_to_cpu(eqe->data.qp_srq.qp_srq_n) & 0xffffff;
227 mlx5_core_dbg(dev, "event %s(%d) arrived\n", 228 mlx5_core_dbg(dev, "event %s(%d) arrived\n",
228 eqe_type_str(eqe->type), eqe->type); 229 eqe_type_str(eqe->type), eqe->type);
229 mlx5_qp_event(dev, be32_to_cpu(eqe->data.qp_srq.qp_srq_n) & 0xffffff, 230 mlx5_rsc_event(dev, rsn, eqe->type);
230 eqe->type);
231 break; 231 break;
232 232
233 case MLX5_EVENT_TYPE_SRQ_RQ_LIMIT: 233 case MLX5_EVENT_TYPE_SRQ_RQ_LIMIT:
234 case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR: 234 case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR:
235 srqn = be32_to_cpu(eqe->data.qp_srq.qp_srq_n) & 0xffffff; 235 rsn = be32_to_cpu(eqe->data.qp_srq.qp_srq_n) & 0xffffff;
236 mlx5_core_dbg(dev, "SRQ event %s(%d): srqn 0x%x\n", 236 mlx5_core_dbg(dev, "SRQ event %s(%d): srqn 0x%x\n",
237 eqe_type_str(eqe->type), eqe->type, srqn); 237 eqe_type_str(eqe->type), eqe->type, rsn);
238 mlx5_srq_event(dev, srqn, eqe->type); 238 mlx5_srq_event(dev, rsn, eqe->type);
239 break; 239 break;
240 240
241 case MLX5_EVENT_TYPE_CMD: 241 case MLX5_EVENT_TYPE_CMD:
@@ -468,7 +468,7 @@ int mlx5_start_eqs(struct mlx5_core_dev *dev)
468 468
469 err = mlx5_create_map_eq(dev, &table->pages_eq, 469 err = mlx5_create_map_eq(dev, &table->pages_eq,
470 MLX5_EQ_VEC_PAGES, 470 MLX5_EQ_VEC_PAGES,
471 dev->caps.max_vf + 1, 471 dev->caps.gen.max_vf + 1,
472 1 << MLX5_EVENT_TYPE_PAGE_REQUEST, "mlx5_pages_eq", 472 1 << MLX5_EVENT_TYPE_PAGE_REQUEST, "mlx5_pages_eq",
473 &dev->priv.uuari.uars[0]); 473 &dev->priv.uuari.uars[0]);
474 if (err) { 474 if (err) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
index f012658b6a92..087c4c797deb 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
@@ -64,86 +64,9 @@ out_out:
64 return err; 64 return err;
65} 65}
66 66
67int mlx5_cmd_query_hca_cap(struct mlx5_core_dev *dev, 67int mlx5_cmd_query_hca_cap(struct mlx5_core_dev *dev, struct mlx5_caps *caps)
68 struct mlx5_caps *caps)
69{ 68{
70 struct mlx5_cmd_query_hca_cap_mbox_out *out; 69 return mlx5_core_get_caps(dev, caps, HCA_CAP_OPMOD_GET_CUR);
71 struct mlx5_cmd_query_hca_cap_mbox_in in;
72 struct mlx5_query_special_ctxs_mbox_out ctx_out;
73 struct mlx5_query_special_ctxs_mbox_in ctx_in;
74 int err;
75 u16 t16;
76
77 out = kzalloc(sizeof(*out), GFP_KERNEL);
78 if (!out)
79 return -ENOMEM;
80
81 memset(&in, 0, sizeof(in));
82 in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_HCA_CAP);
83 in.hdr.opmod = cpu_to_be16(0x1);
84 err = mlx5_cmd_exec(dev, &in, sizeof(in), out, sizeof(*out));
85 if (err)
86 goto out_out;
87
88 if (out->hdr.status) {
89 err = mlx5_cmd_status_to_err(&out->hdr);
90 goto out_out;
91 }
92
93
94 caps->log_max_eq = out->hca_cap.log_max_eq & 0xf;
95 caps->max_cqes = 1 << out->hca_cap.log_max_cq_sz;
96 caps->max_wqes = 1 << out->hca_cap.log_max_qp_sz;
97 caps->max_sq_desc_sz = be16_to_cpu(out->hca_cap.max_desc_sz_sq);
98 caps->max_rq_desc_sz = be16_to_cpu(out->hca_cap.max_desc_sz_rq);
99 caps->flags = be64_to_cpu(out->hca_cap.flags);
100 caps->stat_rate_support = be16_to_cpu(out->hca_cap.stat_rate_support);
101 caps->log_max_msg = out->hca_cap.log_max_msg & 0x1f;
102 caps->num_ports = out->hca_cap.num_ports & 0xf;
103 caps->log_max_cq = out->hca_cap.log_max_cq & 0x1f;
104 if (caps->num_ports > MLX5_MAX_PORTS) {
105 mlx5_core_err(dev, "device has %d ports while the driver supports max %d ports\n",
106 caps->num_ports, MLX5_MAX_PORTS);
107 err = -EINVAL;
108 goto out_out;
109 }
110 caps->log_max_qp = out->hca_cap.log_max_qp & 0x1f;
111 caps->log_max_mkey = out->hca_cap.log_max_mkey & 0x3f;
112 caps->log_max_pd = out->hca_cap.log_max_pd & 0x1f;
113 caps->log_max_srq = out->hca_cap.log_max_srqs & 0x1f;
114 caps->local_ca_ack_delay = out->hca_cap.local_ca_ack_delay & 0x1f;
115 caps->log_max_mcg = out->hca_cap.log_max_mcg;
116 caps->max_qp_mcg = be32_to_cpu(out->hca_cap.max_qp_mcg) & 0xffffff;
117 caps->max_ra_res_qp = 1 << (out->hca_cap.log_max_ra_res_qp & 0x3f);
118 caps->max_ra_req_qp = 1 << (out->hca_cap.log_max_ra_req_qp & 0x3f);
119 caps->max_srq_wqes = 1 << out->hca_cap.log_max_srq_sz;
120 t16 = be16_to_cpu(out->hca_cap.bf_log_bf_reg_size);
121 if (t16 & 0x8000) {
122 caps->bf_reg_size = 1 << (t16 & 0x1f);
123 caps->bf_regs_per_page = MLX5_BF_REGS_PER_PAGE;
124 } else {
125 caps->bf_reg_size = 0;
126 caps->bf_regs_per_page = 0;
127 }
128 caps->min_page_sz = ~(u32)((1 << out->hca_cap.log_pg_sz) - 1);
129
130 memset(&ctx_in, 0, sizeof(ctx_in));
131 memset(&ctx_out, 0, sizeof(ctx_out));
132 ctx_in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS);
133 err = mlx5_cmd_exec(dev, &ctx_in, sizeof(ctx_in),
134 &ctx_out, sizeof(ctx_out));
135 if (err)
136 goto out_out;
137
138 if (ctx_out.hdr.status)
139 err = mlx5_cmd_status_to_err(&ctx_out.hdr);
140
141 caps->reserved_lkey = be32_to_cpu(ctx_out.reserved_lkey);
142
143out_out:
144 kfree(out);
145
146 return err;
147} 70}
148 71
149int mlx5_cmd_init_hca(struct mlx5_core_dev *dev) 72int mlx5_cmd_init_hca(struct mlx5_core_dev *dev)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index f2716cc1f51d..3d8e8e489b2d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -43,6 +43,7 @@
43#include <linux/mlx5/qp.h> 43#include <linux/mlx5/qp.h>
44#include <linux/mlx5/srq.h> 44#include <linux/mlx5/srq.h>
45#include <linux/debugfs.h> 45#include <linux/debugfs.h>
46#include <linux/mlx5/mlx5_ifc.h>
46#include "mlx5_core.h" 47#include "mlx5_core.h"
47 48
48#define DRIVER_NAME "mlx5_core" 49#define DRIVER_NAME "mlx5_core"
@@ -207,11 +208,11 @@ static void release_bar(struct pci_dev *pdev)
207static int mlx5_enable_msix(struct mlx5_core_dev *dev) 208static int mlx5_enable_msix(struct mlx5_core_dev *dev)
208{ 209{
209 struct mlx5_eq_table *table = &dev->priv.eq_table; 210 struct mlx5_eq_table *table = &dev->priv.eq_table;
210 int num_eqs = 1 << dev->caps.log_max_eq; 211 int num_eqs = 1 << dev->caps.gen.log_max_eq;
211 int nvec; 212 int nvec;
212 int i; 213 int i;
213 214
214 nvec = dev->caps.num_ports * num_online_cpus() + MLX5_EQ_VEC_COMP_BASE; 215 nvec = dev->caps.gen.num_ports * num_online_cpus() + MLX5_EQ_VEC_COMP_BASE;
215 nvec = min_t(int, nvec, num_eqs); 216 nvec = min_t(int, nvec, num_eqs);
216 if (nvec <= MLX5_EQ_VEC_COMP_BASE) 217 if (nvec <= MLX5_EQ_VEC_COMP_BASE)
217 return -ENOMEM; 218 return -ENOMEM;
@@ -250,91 +251,205 @@ struct mlx5_reg_host_endianess {
250#define CAP_MASK(pos, size) ((u64)((1 << (size)) - 1) << (pos)) 251#define CAP_MASK(pos, size) ((u64)((1 << (size)) - 1) << (pos))
251 252
252enum { 253enum {
253 MLX5_CAP_BITS_RW_MASK = CAP_MASK(MLX5_CAP_OFF_CMDIF_CSUM, 2) | 254 MLX5_CAP_BITS_RW_MASK = CAP_MASK(MLX5_CAP_OFF_CMDIF_CSUM, 2) |
254 CAP_MASK(MLX5_CAP_OFF_DCT, 1), 255 MLX5_DEV_CAP_FLAG_DCT,
255}; 256};
256 257
258static u16 to_fw_pkey_sz(u32 size)
259{
260 switch (size) {
261 case 128:
262 return 0;
263 case 256:
264 return 1;
265 case 512:
266 return 2;
267 case 1024:
268 return 3;
269 case 2048:
270 return 4;
271 case 4096:
272 return 5;
273 default:
274 pr_warn("invalid pkey table size %d\n", size);
275 return 0;
276 }
277}
278
257/* selectively copy writable fields clearing any reserved area 279/* selectively copy writable fields clearing any reserved area
258 */ 280 */
259static void copy_rw_fields(struct mlx5_hca_cap *to, struct mlx5_hca_cap *from) 281static void copy_rw_fields(void *to, struct mlx5_caps *from)
260{ 282{
283 __be64 *flags_off = (__be64 *)MLX5_ADDR_OF(cmd_hca_cap, to, reserved_22);
261 u64 v64; 284 u64 v64;
262 285
263 to->log_max_qp = from->log_max_qp & 0x1f; 286 MLX5_SET(cmd_hca_cap, to, log_max_qp, from->gen.log_max_qp);
264 to->log_max_ra_req_dc = from->log_max_ra_req_dc & 0x3f; 287 MLX5_SET(cmd_hca_cap, to, log_max_ra_req_qp, from->gen.log_max_ra_req_qp);
265 to->log_max_ra_res_dc = from->log_max_ra_res_dc & 0x3f; 288 MLX5_SET(cmd_hca_cap, to, log_max_ra_res_qp, from->gen.log_max_ra_res_qp);
266 to->log_max_ra_req_qp = from->log_max_ra_req_qp & 0x3f; 289 MLX5_SET(cmd_hca_cap, to, pkey_table_size, from->gen.pkey_table_size);
267 to->log_max_ra_res_qp = from->log_max_ra_res_qp & 0x3f; 290 MLX5_SET(cmd_hca_cap, to, log_max_ra_req_dc, from->gen.log_max_ra_req_dc);
268 to->log_max_atomic_size_qp = from->log_max_atomic_size_qp; 291 MLX5_SET(cmd_hca_cap, to, log_max_ra_res_dc, from->gen.log_max_ra_res_dc);
269 to->log_max_atomic_size_dc = from->log_max_atomic_size_dc; 292 MLX5_SET(cmd_hca_cap, to, pkey_table_size, to_fw_pkey_sz(from->gen.pkey_table_size));
270 v64 = be64_to_cpu(from->flags) & MLX5_CAP_BITS_RW_MASK; 293 v64 = from->gen.flags & MLX5_CAP_BITS_RW_MASK;
271 to->flags = cpu_to_be64(v64); 294 *flags_off = cpu_to_be64(v64);
272} 295}
273 296
274enum { 297static u16 get_pkey_table_size(int pkey)
275 HCA_CAP_OPMOD_GET_MAX = 0, 298{
276 HCA_CAP_OPMOD_GET_CUR = 1, 299 if (pkey > MLX5_MAX_LOG_PKEY_TABLE)
277}; 300 return 0;
278 301
279static int handle_hca_cap(struct mlx5_core_dev *dev) 302 return MLX5_MIN_PKEY_TABLE_SIZE << pkey;
303}
304
305static void fw2drv_caps(struct mlx5_caps *caps, void *out)
306{
307 struct mlx5_general_caps *gen = &caps->gen;
308
309 gen->max_srq_wqes = 1 << MLX5_GET_PR(cmd_hca_cap, out, log_max_srq_sz);
310 gen->max_wqes = 1 << MLX5_GET_PR(cmd_hca_cap, out, log_max_qp_sz);
311 gen->log_max_qp = MLX5_GET_PR(cmd_hca_cap, out, log_max_qp);
312 gen->log_max_strq = MLX5_GET_PR(cmd_hca_cap, out, log_max_strq_sz);
313 gen->log_max_srq = MLX5_GET_PR(cmd_hca_cap, out, log_max_srqs);
314 gen->max_cqes = 1 << MLX5_GET_PR(cmd_hca_cap, out, log_max_cq_sz);
315 gen->log_max_cq = MLX5_GET_PR(cmd_hca_cap, out, log_max_cq);
316 gen->max_eqes = 1 << MLX5_GET_PR(cmd_hca_cap, out, log_max_eq_sz);
317 gen->log_max_mkey = MLX5_GET_PR(cmd_hca_cap, out, log_max_mkey);
318 gen->log_max_eq = MLX5_GET_PR(cmd_hca_cap, out, log_max_eq);
319 gen->max_indirection = MLX5_GET_PR(cmd_hca_cap, out, max_indirection);
320 gen->log_max_mrw_sz = MLX5_GET_PR(cmd_hca_cap, out, log_max_mrw_sz);
321 gen->log_max_bsf_list_size = MLX5_GET_PR(cmd_hca_cap, out, log_max_bsf_list_size);
322 gen->log_max_klm_list_size = MLX5_GET_PR(cmd_hca_cap, out, log_max_klm_list_size);
323 gen->log_max_ra_req_dc = MLX5_GET_PR(cmd_hca_cap, out, log_max_ra_req_dc);
324 gen->log_max_ra_res_dc = MLX5_GET_PR(cmd_hca_cap, out, log_max_ra_res_dc);
325 gen->log_max_ra_req_qp = MLX5_GET_PR(cmd_hca_cap, out, log_max_ra_req_qp);
326 gen->log_max_ra_res_qp = MLX5_GET_PR(cmd_hca_cap, out, log_max_ra_res_qp);
327 gen->max_qp_counters = MLX5_GET_PR(cmd_hca_cap, out, max_qp_cnt);
328 gen->pkey_table_size = get_pkey_table_size(MLX5_GET_PR(cmd_hca_cap, out, pkey_table_size));
329 gen->local_ca_ack_delay = MLX5_GET_PR(cmd_hca_cap, out, local_ca_ack_delay);
330 gen->num_ports = MLX5_GET_PR(cmd_hca_cap, out, num_ports);
331 gen->log_max_msg = MLX5_GET_PR(cmd_hca_cap, out, log_max_msg);
332 gen->stat_rate_support = MLX5_GET_PR(cmd_hca_cap, out, stat_rate_support);
333 gen->flags = be64_to_cpu(*(__be64 *)MLX5_ADDR_OF(cmd_hca_cap, out, reserved_22));
334 pr_debug("flags = 0x%llx\n", gen->flags);
335 gen->uar_sz = MLX5_GET_PR(cmd_hca_cap, out, uar_sz);
336 gen->min_log_pg_sz = MLX5_GET_PR(cmd_hca_cap, out, log_pg_sz);
337 gen->bf_reg_size = MLX5_GET_PR(cmd_hca_cap, out, bf);
338 gen->bf_reg_size = 1 << MLX5_GET_PR(cmd_hca_cap, out, log_bf_reg_size);
339 gen->max_sq_desc_sz = MLX5_GET_PR(cmd_hca_cap, out, max_wqe_sz_sq);
340 gen->max_rq_desc_sz = MLX5_GET_PR(cmd_hca_cap, out, max_wqe_sz_rq);
341 gen->max_dc_sq_desc_sz = MLX5_GET_PR(cmd_hca_cap, out, max_wqe_sz_sq_dc);
342 gen->max_qp_mcg = MLX5_GET_PR(cmd_hca_cap, out, max_qp_mcg);
343 gen->log_max_pd = MLX5_GET_PR(cmd_hca_cap, out, log_max_pd);
344 gen->log_max_xrcd = MLX5_GET_PR(cmd_hca_cap, out, log_max_xrcd);
345 gen->log_uar_page_sz = MLX5_GET_PR(cmd_hca_cap, out, log_uar_page_sz);
346}
347
348static const char *caps_opmod_str(u16 opmod)
280{ 349{
281 struct mlx5_cmd_query_hca_cap_mbox_out *query_out = NULL; 350 switch (opmod) {
282 struct mlx5_cmd_set_hca_cap_mbox_in *set_ctx = NULL; 351 case HCA_CAP_OPMOD_GET_MAX:
283 struct mlx5_cmd_query_hca_cap_mbox_in query_ctx; 352 return "GET_MAX";
284 struct mlx5_cmd_set_hca_cap_mbox_out set_out; 353 case HCA_CAP_OPMOD_GET_CUR:
285 u64 flags; 354 return "GET_CUR";
355 default:
356 return "Invalid";
357 }
358}
359
360int mlx5_core_get_caps(struct mlx5_core_dev *dev, struct mlx5_caps *caps,
361 u16 opmod)
362{
363 u8 in[MLX5_ST_SZ_BYTES(query_hca_cap_in)];
364 int out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
365 void *out;
286 int err; 366 int err;
287 367
288 memset(&query_ctx, 0, sizeof(query_ctx)); 368 memset(in, 0, sizeof(in));
289 query_out = kzalloc(sizeof(*query_out), GFP_KERNEL); 369 out = kzalloc(out_sz, GFP_KERNEL);
290 if (!query_out) 370 if (!out)
291 return -ENOMEM; 371 return -ENOMEM;
372 MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP);
373 MLX5_SET(query_hca_cap_in, in, op_mod, opmod);
374 err = mlx5_cmd_exec(dev, in, sizeof(in), out, out_sz);
375 if (err)
376 goto query_ex;
292 377
293 set_ctx = kzalloc(sizeof(*set_ctx), GFP_KERNEL); 378 err = mlx5_cmd_status_to_err_v2(out);
294 if (!set_ctx) { 379 if (err) {
295 err = -ENOMEM; 380 mlx5_core_warn(dev, "query max hca cap failed, %d\n", err);
296 goto query_ex; 381 goto query_ex;
297 } 382 }
383 mlx5_core_dbg(dev, "%s\n", caps_opmod_str(opmod));
384 fw2drv_caps(caps, MLX5_ADDR_OF(query_hca_cap_out, out, capability_struct));
385
386query_ex:
387 kfree(out);
388 return err;
389}
298 390
299 query_ctx.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_HCA_CAP); 391static int set_caps(struct mlx5_core_dev *dev, void *in, int in_sz)
300 query_ctx.hdr.opmod = cpu_to_be16(HCA_CAP_OPMOD_GET_CUR); 392{
301 err = mlx5_cmd_exec(dev, &query_ctx, sizeof(query_ctx), 393 u32 out[MLX5_ST_SZ_DW(set_hca_cap_out)];
302 query_out, sizeof(*query_out)); 394 int err;
395
396 memset(out, 0, sizeof(out));
397
398 MLX5_SET(set_hca_cap_in, in, opcode, MLX5_CMD_OP_SET_HCA_CAP);
399 err = mlx5_cmd_exec(dev, in, in_sz, out, sizeof(out));
303 if (err) 400 if (err)
304 goto query_ex; 401 return err;
305 402
306 err = mlx5_cmd_status_to_err(&query_out->hdr); 403 err = mlx5_cmd_status_to_err_v2(out);
307 if (err) { 404
308 mlx5_core_warn(dev, "query hca cap failed, %d\n", err); 405 return err;
406}
407
408static int handle_hca_cap(struct mlx5_core_dev *dev)
409{
410 void *set_ctx = NULL;
411 struct mlx5_profile *prof = dev->profile;
412 struct mlx5_caps *cur_caps = NULL;
413 struct mlx5_caps *max_caps = NULL;
414 int err = -ENOMEM;
415 int set_sz = MLX5_ST_SZ_BYTES(set_hca_cap_in);
416
417 set_ctx = kzalloc(set_sz, GFP_KERNEL);
418 if (!set_ctx)
309 goto query_ex; 419 goto query_ex;
310 }
311 420
312 copy_rw_fields(&set_ctx->hca_cap, &query_out->hca_cap); 421 max_caps = kzalloc(sizeof(*max_caps), GFP_KERNEL);
422 if (!max_caps)
423 goto query_ex;
313 424
314 if (dev->profile && dev->profile->mask & MLX5_PROF_MASK_QP_SIZE) 425 cur_caps = kzalloc(sizeof(*cur_caps), GFP_KERNEL);
315 set_ctx->hca_cap.log_max_qp = dev->profile->log_max_qp; 426 if (!cur_caps)
427 goto query_ex;
316 428
317 flags = be64_to_cpu(query_out->hca_cap.flags); 429 err = mlx5_core_get_caps(dev, max_caps, HCA_CAP_OPMOD_GET_MAX);
318 /* disable checksum */ 430 if (err)
319 flags &= ~MLX5_DEV_CAP_FLAG_CMDIF_CSUM;
320
321 set_ctx->hca_cap.flags = cpu_to_be64(flags);
322 memset(&set_out, 0, sizeof(set_out));
323 set_ctx->hca_cap.log_uar_page_sz = cpu_to_be16(PAGE_SHIFT - 12);
324 set_ctx->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_SET_HCA_CAP);
325 err = mlx5_cmd_exec(dev, set_ctx, sizeof(*set_ctx),
326 &set_out, sizeof(set_out));
327 if (err) {
328 mlx5_core_warn(dev, "set hca cap failed, %d\n", err);
329 goto query_ex; 431 goto query_ex;
330 }
331 432
332 err = mlx5_cmd_status_to_err(&set_out.hdr); 433 err = mlx5_core_get_caps(dev, cur_caps, HCA_CAP_OPMOD_GET_CUR);
333 if (err) 434 if (err)
334 goto query_ex; 435 goto query_ex;
335 436
437 /* we limit the size of the pkey table to 128 entries for now */
438 cur_caps->gen.pkey_table_size = 128;
439
440 if (prof->mask & MLX5_PROF_MASK_QP_SIZE)
441 cur_caps->gen.log_max_qp = prof->log_max_qp;
442
443 /* disable checksum */
444 cur_caps->gen.flags &= ~MLX5_DEV_CAP_FLAG_CMDIF_CSUM;
445
446 copy_rw_fields(MLX5_ADDR_OF(set_hca_cap_in, set_ctx, hca_capability_struct),
447 cur_caps);
448 err = set_caps(dev, set_ctx, set_sz);
449
336query_ex: 450query_ex:
337 kfree(query_out); 451 kfree(cur_caps);
452 kfree(max_caps);
338 kfree(set_ctx); 453 kfree(set_ctx);
339 454
340 return err; 455 return err;
@@ -782,6 +897,7 @@ static void remove_one(struct pci_dev *pdev)
782 897
783static const struct pci_device_id mlx5_core_pci_table[] = { 898static const struct pci_device_id mlx5_core_pci_table[] = {
784 { PCI_VDEVICE(MELLANOX, 4113) }, /* MT4113 Connect-IB */ 899 { PCI_VDEVICE(MELLANOX, 4113) }, /* MT4113 Connect-IB */
900 { PCI_VDEVICE(MELLANOX, 4115) }, /* ConnectX-4 */
785 { 0, } 901 { 0, }
786}; 902};
787 903
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/qp.c b/drivers/net/ethernet/mellanox/mlx5/core/qp.c
index 8145b4668229..5261a2b0da43 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/qp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/qp.c
@@ -39,28 +39,53 @@
39 39
40#include "mlx5_core.h" 40#include "mlx5_core.h"
41 41
42void mlx5_qp_event(struct mlx5_core_dev *dev, u32 qpn, int event_type) 42static struct mlx5_core_rsc_common *mlx5_get_rsc(struct mlx5_core_dev *dev,
43 u32 rsn)
43{ 44{
44 struct mlx5_qp_table *table = &dev->priv.qp_table; 45 struct mlx5_qp_table *table = &dev->priv.qp_table;
45 struct mlx5_core_qp *qp; 46 struct mlx5_core_rsc_common *common;
46 47
47 spin_lock(&table->lock); 48 spin_lock(&table->lock);
48 49
49 qp = radix_tree_lookup(&table->tree, qpn); 50 common = radix_tree_lookup(&table->tree, rsn);
50 if (qp) 51 if (common)
51 atomic_inc(&qp->refcount); 52 atomic_inc(&common->refcount);
52 53
53 spin_unlock(&table->lock); 54 spin_unlock(&table->lock);
54 55
55 if (!qp) { 56 if (!common) {
56 mlx5_core_warn(dev, "Async event for bogus QP 0x%x\n", qpn); 57 mlx5_core_warn(dev, "Async event for bogus resource 0x%x\n",
57 return; 58 rsn);
59 return NULL;
58 } 60 }
61 return common;
62}
59 63
60 qp->event(qp, event_type); 64void mlx5_core_put_rsc(struct mlx5_core_rsc_common *common)
65{
66 if (atomic_dec_and_test(&common->refcount))
67 complete(&common->free);
68}
69
70void mlx5_rsc_event(struct mlx5_core_dev *dev, u32 rsn, int event_type)
71{
72 struct mlx5_core_rsc_common *common = mlx5_get_rsc(dev, rsn);
73 struct mlx5_core_qp *qp;
74
75 if (!common)
76 return;
77
78 switch (common->res) {
79 case MLX5_RES_QP:
80 qp = (struct mlx5_core_qp *)common;
81 qp->event(qp, event_type);
82 break;
83
84 default:
85 mlx5_core_warn(dev, "invalid resource type for 0x%x\n", rsn);
86 }
61 87
62 if (atomic_dec_and_test(&qp->refcount)) 88 mlx5_core_put_rsc(common);
63 complete(&qp->free);
64} 89}
65 90
66int mlx5_core_create_qp(struct mlx5_core_dev *dev, 91int mlx5_core_create_qp(struct mlx5_core_dev *dev,
@@ -92,6 +117,7 @@ int mlx5_core_create_qp(struct mlx5_core_dev *dev,
92 qp->qpn = be32_to_cpu(out.qpn) & 0xffffff; 117 qp->qpn = be32_to_cpu(out.qpn) & 0xffffff;
93 mlx5_core_dbg(dev, "qpn = 0x%x\n", qp->qpn); 118 mlx5_core_dbg(dev, "qpn = 0x%x\n", qp->qpn);
94 119
120 qp->common.res = MLX5_RES_QP;
95 spin_lock_irq(&table->lock); 121 spin_lock_irq(&table->lock);
96 err = radix_tree_insert(&table->tree, qp->qpn, qp); 122 err = radix_tree_insert(&table->tree, qp->qpn, qp);
97 spin_unlock_irq(&table->lock); 123 spin_unlock_irq(&table->lock);
@@ -106,9 +132,9 @@ int mlx5_core_create_qp(struct mlx5_core_dev *dev,
106 qp->qpn); 132 qp->qpn);
107 133
108 qp->pid = current->pid; 134 qp->pid = current->pid;
109 atomic_set(&qp->refcount, 1); 135 atomic_set(&qp->common.refcount, 1);
110 atomic_inc(&dev->num_qps); 136 atomic_inc(&dev->num_qps);
111 init_completion(&qp->free); 137 init_completion(&qp->common.free);
112 138
113 return 0; 139 return 0;
114 140
@@ -138,9 +164,8 @@ int mlx5_core_destroy_qp(struct mlx5_core_dev *dev,
138 radix_tree_delete(&table->tree, qp->qpn); 164 radix_tree_delete(&table->tree, qp->qpn);
139 spin_unlock_irqrestore(&table->lock, flags); 165 spin_unlock_irqrestore(&table->lock, flags);
140 166
141 if (atomic_dec_and_test(&qp->refcount)) 167 mlx5_core_put_rsc((struct mlx5_core_rsc_common *)qp);
142 complete(&qp->free); 168 wait_for_completion(&qp->common.free);
143 wait_for_completion(&qp->free);
144 169
145 memset(&in, 0, sizeof(in)); 170 memset(&in, 0, sizeof(in));
146 memset(&out, 0, sizeof(out)); 171 memset(&out, 0, sizeof(out));
@@ -184,13 +209,10 @@ int mlx5_core_qp_modify(struct mlx5_core_dev *dev, enum mlx5_qp_state cur_state,
184 [MLX5_QP_STATE_RST] = MLX5_CMD_OP_2RST_QP, 209 [MLX5_QP_STATE_RST] = MLX5_CMD_OP_2RST_QP,
185 [MLX5_QP_STATE_ERR] = MLX5_CMD_OP_2ERR_QP, 210 [MLX5_QP_STATE_ERR] = MLX5_CMD_OP_2ERR_QP,
186 [MLX5_QP_STATE_RTS] = MLX5_CMD_OP_RTS2RTS_QP, 211 [MLX5_QP_STATE_RTS] = MLX5_CMD_OP_RTS2RTS_QP,
187 [MLX5_QP_STATE_SQD] = MLX5_CMD_OP_RTS2SQD_QP,
188 }, 212 },
189 [MLX5_QP_STATE_SQD] = { 213 [MLX5_QP_STATE_SQD] = {
190 [MLX5_QP_STATE_RST] = MLX5_CMD_OP_2RST_QP, 214 [MLX5_QP_STATE_RST] = MLX5_CMD_OP_2RST_QP,
191 [MLX5_QP_STATE_ERR] = MLX5_CMD_OP_2ERR_QP, 215 [MLX5_QP_STATE_ERR] = MLX5_CMD_OP_2ERR_QP,
192 [MLX5_QP_STATE_RTS] = MLX5_CMD_OP_SQD2RTS_QP,
193 [MLX5_QP_STATE_SQD] = MLX5_CMD_OP_SQD2SQD_QP,
194 }, 216 },
195 [MLX5_QP_STATE_SQER] = { 217 [MLX5_QP_STATE_SQER] = {
196 [MLX5_QP_STATE_RST] = MLX5_CMD_OP_2RST_QP, 218 [MLX5_QP_STATE_RST] = MLX5_CMD_OP_2RST_QP,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/uar.c b/drivers/net/ethernet/mellanox/mlx5/core/uar.c
index 68f5d9c77c7b..0a6348cefc01 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/uar.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/uar.c
@@ -174,11 +174,11 @@ int mlx5_alloc_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari)
174 for (i = 0; i < tot_uuars; i++) { 174 for (i = 0; i < tot_uuars; i++) {
175 bf = &uuari->bfs[i]; 175 bf = &uuari->bfs[i];
176 176
177 bf->buf_size = dev->caps.bf_reg_size / 2; 177 bf->buf_size = dev->caps.gen.bf_reg_size / 2;
178 bf->uar = &uuari->uars[i / MLX5_BF_REGS_PER_PAGE]; 178 bf->uar = &uuari->uars[i / MLX5_BF_REGS_PER_PAGE];
179 bf->regreg = uuari->uars[i / MLX5_BF_REGS_PER_PAGE].map; 179 bf->regreg = uuari->uars[i / MLX5_BF_REGS_PER_PAGE].map;
180 bf->reg = NULL; /* Add WC support */ 180 bf->reg = NULL; /* Add WC support */
181 bf->offset = (i % MLX5_BF_REGS_PER_PAGE) * dev->caps.bf_reg_size + 181 bf->offset = (i % MLX5_BF_REGS_PER_PAGE) * dev->caps.gen.bf_reg_size +
182 MLX5_BF_OFFSET; 182 MLX5_BF_OFFSET;
183 bf->need_lock = need_uuar_lock(i); 183 bf->need_lock = need_uuar_lock(i);
184 spin_lock_init(&bf->lock); 184 spin_lock_init(&bf->lock);
diff --git a/drivers/net/ethernet/moxa/moxart_ether.c b/drivers/net/ethernet/moxa/moxart_ether.c
index 2f12c88c66ab..bde1b70f473b 100644
--- a/drivers/net/ethernet/moxa/moxart_ether.c
+++ b/drivers/net/ethernet/moxa/moxart_ether.c
@@ -511,7 +511,6 @@ static int moxart_mac_probe(struct platform_device *pdev)
511 goto init_fail; 511 goto init_fail;
512 } 512 }
513 513
514 ether_setup(ndev);
515 ndev->netdev_ops = &moxart_netdev_ops; 514 ndev->netdev_ops = &moxart_netdev_ops;
516 netif_napi_add(ndev, &priv->napi, moxart_rx_poll, RX_DESC_NUM); 515 netif_napi_add(ndev, &priv->napi, moxart_rx_poll, RX_DESC_NUM);
517 ndev->priv_flags |= IFF_UNICAST_FLT; 516 ndev->priv_flags |= IFF_UNICAST_FLT;
diff --git a/drivers/net/ethernet/netx-eth.c b/drivers/net/ethernet/netx-eth.c
index 31eb911e4763..8176c8a1cc6a 100644
--- a/drivers/net/ethernet/netx-eth.c
+++ b/drivers/net/ethernet/netx-eth.c
@@ -315,8 +315,6 @@ static int netx_eth_enable(struct net_device *ndev)
315 unsigned int mac4321, mac65; 315 unsigned int mac4321, mac65;
316 int running, i; 316 int running, i;
317 317
318 ether_setup(ndev);
319
320 ndev->netdev_ops = &netx_eth_netdev_ops; 318 ndev->netdev_ops = &netx_eth_netdev_ops;
321 ndev->watchdog_timeo = msecs_to_jiffies(5000); 319 ndev->watchdog_timeo = msecs_to_jiffies(5000);
322 320
diff --git a/drivers/net/ethernet/nuvoton/w90p910_ether.c b/drivers/net/ethernet/nuvoton/w90p910_ether.c
index 79645f74b3a8..379b7fbded78 100644
--- a/drivers/net/ethernet/nuvoton/w90p910_ether.c
+++ b/drivers/net/ethernet/nuvoton/w90p910_ether.c
@@ -943,7 +943,6 @@ static int w90p910_ether_setup(struct net_device *dev)
943{ 943{
944 struct w90p910_ether *ether = netdev_priv(dev); 944 struct w90p910_ether *ether = netdev_priv(dev);
945 945
946 ether_setup(dev);
947 dev->netdev_ops = &w90p910_ether_netdev_ops; 946 dev->netdev_ops = &w90p910_ether_netdev_ops;
948 dev->ethtool_ops = &w90p910_ether_ethtool_ops; 947 dev->ethtool_ops = &w90p910_ether_ethtool_ops;
949 948
diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c
index a44a03c45014..66fd868152e5 100644
--- a/drivers/net/ethernet/nxp/lpc_eth.c
+++ b/drivers/net/ethernet/nxp/lpc_eth.c
@@ -1377,9 +1377,6 @@ static int lpc_eth_drv_probe(struct platform_device *pdev)
1377 goto err_out_iounmap; 1377 goto err_out_iounmap;
1378 } 1378 }
1379 1379
1380 /* Fill in the fields of the device structure with ethernet values. */
1381 ether_setup(ndev);
1382
1383 /* Setup driver functions */ 1380 /* Setup driver functions */
1384 ndev->netdev_ops = &lpc_netdev_ops; 1381 ndev->netdev_ops = &lpc_netdev_ops;
1385 ndev->ethtool_ops = &lpc_eth_ethtool_ops; 1382 ndev->ethtool_ops = &lpc_eth_ethtool_ops;
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
index 32058614151a..5c4068353f66 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
@@ -135,6 +135,7 @@ void netxen_release_tx_buffers(struct netxen_adapter *adapter)
135 int i, j; 135 int i, j;
136 struct nx_host_tx_ring *tx_ring = adapter->tx_ring; 136 struct nx_host_tx_ring *tx_ring = adapter->tx_ring;
137 137
138 spin_lock(&adapter->tx_clean_lock);
138 cmd_buf = tx_ring->cmd_buf_arr; 139 cmd_buf = tx_ring->cmd_buf_arr;
139 for (i = 0; i < tx_ring->num_desc; i++) { 140 for (i = 0; i < tx_ring->num_desc; i++) {
140 buffrag = cmd_buf->frag_array; 141 buffrag = cmd_buf->frag_array;
@@ -158,6 +159,7 @@ void netxen_release_tx_buffers(struct netxen_adapter *adapter)
158 } 159 }
159 cmd_buf++; 160 cmd_buf++;
160 } 161 }
162 spin_unlock(&adapter->tx_clean_lock);
161} 163}
162 164
163void netxen_free_sw_resources(struct netxen_adapter *adapter) 165void netxen_free_sw_resources(struct netxen_adapter *adapter)
@@ -1792,9 +1794,9 @@ int netxen_process_cmd_ring(struct netxen_adapter *adapter)
1792 break; 1794 break;
1793 } 1795 }
1794 1796
1795 if (count && netif_running(netdev)) { 1797 tx_ring->sw_consumer = sw_consumer;
1796 tx_ring->sw_consumer = sw_consumer;
1797 1798
1799 if (count && netif_running(netdev)) {
1798 smp_mb(); 1800 smp_mb();
1799 1801
1800 if (netif_queue_stopped(netdev) && netif_carrier_ok(netdev)) 1802 if (netif_queue_stopped(netdev) && netif_carrier_ok(netdev))
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
index 32456c79cc73..0b2a1ccd276d 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
@@ -1186,7 +1186,6 @@ __netxen_nic_down(struct netxen_adapter *adapter, struct net_device *netdev)
1186 return; 1186 return;
1187 1187
1188 smp_mb(); 1188 smp_mb();
1189 spin_lock(&adapter->tx_clean_lock);
1190 netif_carrier_off(netdev); 1189 netif_carrier_off(netdev);
1191 netif_tx_disable(netdev); 1190 netif_tx_disable(netdev);
1192 1191
@@ -1204,7 +1203,6 @@ __netxen_nic_down(struct netxen_adapter *adapter, struct net_device *netdev)
1204 netxen_napi_disable(adapter); 1203 netxen_napi_disable(adapter);
1205 1204
1206 netxen_release_tx_buffers(adapter); 1205 netxen_release_tx_buffers(adapter);
1207 spin_unlock(&adapter->tx_clean_lock);
1208} 1206}
1209 1207
1210/* Usage: During suspend and firmware recovery module */ 1208/* Usage: During suspend and firmware recovery module */
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
index 9a2cfe4efac6..2bb48d57e7a5 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
@@ -1177,9 +1177,8 @@ static void qlcnic_83xx_setup_idc_parameters(struct qlcnic_adapter *adapter)
1177{ 1177{
1178 u32 idc_params, val; 1178 u32 idc_params, val;
1179 1179
1180 if (qlcnic_83xx_lockless_flash_read32(adapter, 1180 if (qlcnic_83xx_flash_read32(adapter, QLC_83XX_IDC_FLASH_PARAM_ADDR,
1181 QLC_83XX_IDC_FLASH_PARAM_ADDR, 1181 (u8 *)&idc_params, 1)) {
1182 (u8 *)&idc_params, 1)) {
1183 dev_info(&adapter->pdev->dev, 1182 dev_info(&adapter->pdev->dev,
1184 "%s:failed to get IDC params from flash\n", __func__); 1183 "%s:failed to get IDC params from flash\n", __func__);
1185 adapter->dev_init_timeo = QLC_83XX_IDC_INIT_TIMEOUT_SECS; 1184 adapter->dev_init_timeo = QLC_83XX_IDC_INIT_TIMEOUT_SECS;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
index 141f116eb868..494e8105adee 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
@@ -1333,21 +1333,21 @@ static void qlcnic_get_ethtool_stats(struct net_device *dev,
1333 struct qlcnic_host_tx_ring *tx_ring; 1333 struct qlcnic_host_tx_ring *tx_ring;
1334 struct qlcnic_esw_statistics port_stats; 1334 struct qlcnic_esw_statistics port_stats;
1335 struct qlcnic_mac_statistics mac_stats; 1335 struct qlcnic_mac_statistics mac_stats;
1336 int index, ret, length, size, tx_size, ring; 1336 int index, ret, length, size, ring;
1337 char *p; 1337 char *p;
1338 1338
1339 tx_size = adapter->drv_tx_rings * QLCNIC_TX_STATS_LEN; 1339 memset(data, 0, stats->n_stats * sizeof(u64));
1340 1340
1341 memset(data, 0, tx_size * sizeof(u64));
1342 for (ring = 0, index = 0; ring < adapter->drv_tx_rings; ring++) { 1341 for (ring = 0, index = 0; ring < adapter->drv_tx_rings; ring++) {
1343 if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) { 1342 if (adapter->is_up == QLCNIC_ADAPTER_UP_MAGIC) {
1344 tx_ring = &adapter->tx_ring[ring]; 1343 tx_ring = &adapter->tx_ring[ring];
1345 data = qlcnic_fill_tx_queue_stats(data, tx_ring); 1344 data = qlcnic_fill_tx_queue_stats(data, tx_ring);
1346 qlcnic_update_stats(adapter); 1345 qlcnic_update_stats(adapter);
1346 } else {
1347 data += QLCNIC_TX_STATS_LEN;
1347 } 1348 }
1348 } 1349 }
1349 1350
1350 memset(data, 0, stats->n_stats * sizeof(u64));
1351 length = QLCNIC_STATS_LEN; 1351 length = QLCNIC_STATS_LEN;
1352 for (index = 0; index < length; index++) { 1352 for (index = 0; index < length; index++) {
1353 p = (char *)adapter + qlcnic_gstrings_stats[index].stat_offset; 1353 p = (char *)adapter + qlcnic_gstrings_stats[index].stat_offset;
diff --git a/drivers/net/ethernet/qualcomm/qca_spi.c b/drivers/net/ethernet/qualcomm/qca_spi.c
index 74eb520e2649..2c811f66d5ac 100644
--- a/drivers/net/ethernet/qualcomm/qca_spi.c
+++ b/drivers/net/ethernet/qualcomm/qca_spi.c
@@ -810,8 +810,6 @@ qcaspi_netdev_setup(struct net_device *dev)
810{ 810{
811 struct qcaspi *qca = NULL; 811 struct qcaspi *qca = NULL;
812 812
813 ether_setup(dev);
814
815 dev->netdev_ops = &qcaspi_netdev_ops; 813 dev->netdev_ops = &qcaspi_netdev_ops;
816 qcaspi_set_ethtool_ops(dev); 814 qcaspi_set_ethtool_ops(dev);
817 dev->watchdog_timeo = QCASPI_TX_TIMEOUT; 815 dev->watchdog_timeo = QCASPI_TX_TIMEOUT;
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index 1d81238fcb93..54476ba42477 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -969,59 +969,6 @@ DECLARE_RTL_COND(rtl_eriar_cond)
969 return RTL_R32(ERIAR) & ERIAR_FLAG; 969 return RTL_R32(ERIAR) & ERIAR_FLAG;
970} 970}
971 971
972static void rtl8168_oob_notify(struct rtl8169_private *tp, u8 cmd)
973{
974 void __iomem *ioaddr = tp->mmio_addr;
975
976 RTL_W8(ERIDR, cmd);
977 RTL_W32(ERIAR, 0x800010e8);
978 msleep(2);
979
980 if (!rtl_udelay_loop_wait_low(tp, &rtl_eriar_cond, 100, 5))
981 return;
982
983 ocp_write(tp, 0x1, 0x30, 0x00000001);
984}
985
986#define OOB_CMD_RESET 0x00
987#define OOB_CMD_DRIVER_START 0x05
988#define OOB_CMD_DRIVER_STOP 0x06
989
990static u16 rtl8168_get_ocp_reg(struct rtl8169_private *tp)
991{
992 return (tp->mac_version == RTL_GIGA_MAC_VER_31) ? 0xb8 : 0x10;
993}
994
995DECLARE_RTL_COND(rtl_ocp_read_cond)
996{
997 u16 reg;
998
999 reg = rtl8168_get_ocp_reg(tp);
1000
1001 return ocp_read(tp, 0x0f, reg) & 0x00000800;
1002}
1003
1004static void rtl8168_driver_start(struct rtl8169_private *tp)
1005{
1006 rtl8168_oob_notify(tp, OOB_CMD_DRIVER_START);
1007
1008 rtl_msleep_loop_wait_high(tp, &rtl_ocp_read_cond, 10, 10);
1009}
1010
1011static void rtl8168_driver_stop(struct rtl8169_private *tp)
1012{
1013 rtl8168_oob_notify(tp, OOB_CMD_DRIVER_STOP);
1014
1015 rtl_msleep_loop_wait_low(tp, &rtl_ocp_read_cond, 10, 10);
1016}
1017
1018static int r8168dp_check_dash(struct rtl8169_private *tp)
1019{
1020 u16 reg = rtl8168_get_ocp_reg(tp);
1021
1022 return (ocp_read(tp, 0x0f, reg) & 0x00008000) ? 1 : 0;
1023}
1024
1025static bool rtl_ocp_reg_failure(struct rtl8169_private *tp, u32 reg) 972static bool rtl_ocp_reg_failure(struct rtl8169_private *tp, u32 reg)
1026{ 973{
1027 if (reg & 0xffff0001) { 974 if (reg & 0xffff0001) {
@@ -1246,12 +1193,12 @@ static void rtl_patchphy(struct rtl8169_private *tp, int reg_addr, int value)
1246 rtl_writephy(tp, reg_addr, rtl_readphy(tp, reg_addr) | value); 1193 rtl_writephy(tp, reg_addr, rtl_readphy(tp, reg_addr) | value);
1247} 1194}
1248 1195
1249static void rtl_w1w0_phy(struct rtl8169_private *tp, int reg_addr, int p, int m) 1196static void rtl_w0w1_phy(struct rtl8169_private *tp, int reg_addr, int p, int m)
1250{ 1197{
1251 int val; 1198 int val;
1252 1199
1253 val = rtl_readphy(tp, reg_addr); 1200 val = rtl_readphy(tp, reg_addr);
1254 rtl_writephy(tp, reg_addr, (val | p) & ~m); 1201 rtl_writephy(tp, reg_addr, (val & ~m) | p);
1255} 1202}
1256 1203
1257static void rtl_mdio_write(struct net_device *dev, int phy_id, int location, 1204static void rtl_mdio_write(struct net_device *dev, int phy_id, int location,
@@ -1320,7 +1267,7 @@ static u32 rtl_eri_read(struct rtl8169_private *tp, int addr, int type)
1320 RTL_R32(ERIDR) : ~0; 1267 RTL_R32(ERIDR) : ~0;
1321} 1268}
1322 1269
1323static void rtl_w1w0_eri(struct rtl8169_private *tp, int addr, u32 mask, u32 p, 1270static void rtl_w0w1_eri(struct rtl8169_private *tp, int addr, u32 mask, u32 p,
1324 u32 m, int type) 1271 u32 m, int type)
1325{ 1272{
1326 u32 val; 1273 u32 val;
@@ -1329,6 +1276,52 @@ static void rtl_w1w0_eri(struct rtl8169_private *tp, int addr, u32 mask, u32 p,
1329 rtl_eri_write(tp, addr, mask, (val & ~m) | p, type); 1276 rtl_eri_write(tp, addr, mask, (val & ~m) | p, type);
1330} 1277}
1331 1278
1279static void rtl8168_oob_notify(struct rtl8169_private *tp, u8 cmd)
1280{
1281 rtl_eri_write(tp, 0xe8, ERIAR_MASK_0001, cmd, ERIAR_EXGMAC);
1282
1283 ocp_write(tp, 0x1, 0x30, 0x00000001);
1284}
1285
1286#define OOB_CMD_RESET 0x00
1287#define OOB_CMD_DRIVER_START 0x05
1288#define OOB_CMD_DRIVER_STOP 0x06
1289
1290static u16 rtl8168_get_ocp_reg(struct rtl8169_private *tp)
1291{
1292 return (tp->mac_version == RTL_GIGA_MAC_VER_31) ? 0xb8 : 0x10;
1293}
1294
1295DECLARE_RTL_COND(rtl_ocp_read_cond)
1296{
1297 u16 reg;
1298
1299 reg = rtl8168_get_ocp_reg(tp);
1300
1301 return ocp_read(tp, 0x0f, reg) & 0x00000800;
1302}
1303
1304static void rtl8168_driver_start(struct rtl8169_private *tp)
1305{
1306 rtl8168_oob_notify(tp, OOB_CMD_DRIVER_START);
1307
1308 rtl_msleep_loop_wait_high(tp, &rtl_ocp_read_cond, 10, 10);
1309}
1310
1311static void rtl8168_driver_stop(struct rtl8169_private *tp)
1312{
1313 rtl8168_oob_notify(tp, OOB_CMD_DRIVER_STOP);
1314
1315 rtl_msleep_loop_wait_low(tp, &rtl_ocp_read_cond, 10, 10);
1316}
1317
1318static int r8168_check_dash(struct rtl8169_private *tp)
1319{
1320 u16 reg = rtl8168_get_ocp_reg(tp);
1321
1322 return (ocp_read(tp, 0x0f, reg) & 0x00008000) ? 1 : 0;
1323}
1324
1332struct exgmac_reg { 1325struct exgmac_reg {
1333 u16 addr; 1326 u16 addr;
1334 u16 mask; 1327 u16 mask;
@@ -1473,9 +1466,9 @@ static void rtl_link_chg_patch(struct rtl8169_private *tp)
1473 ERIAR_EXGMAC); 1466 ERIAR_EXGMAC);
1474 } 1467 }
1475 /* Reset packet filter */ 1468 /* Reset packet filter */
1476 rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, 1469 rtl_w0w1_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01,
1477 ERIAR_EXGMAC); 1470 ERIAR_EXGMAC);
1478 rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, 1471 rtl_w0w1_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00,
1479 ERIAR_EXGMAC); 1472 ERIAR_EXGMAC);
1480 } else if (tp->mac_version == RTL_GIGA_MAC_VER_35 || 1473 } else if (tp->mac_version == RTL_GIGA_MAC_VER_35 ||
1481 tp->mac_version == RTL_GIGA_MAC_VER_36) { 1474 tp->mac_version == RTL_GIGA_MAC_VER_36) {
@@ -1546,8 +1539,20 @@ static u32 __rtl8169_get_wol(struct rtl8169_private *tp)
1546 if (options & LinkUp) 1539 if (options & LinkUp)
1547 wolopts |= WAKE_PHY; 1540 wolopts |= WAKE_PHY;
1548 switch (tp->mac_version) { 1541 switch (tp->mac_version) {
1542 case RTL_GIGA_MAC_VER_34:
1543 case RTL_GIGA_MAC_VER_35:
1544 case RTL_GIGA_MAC_VER_36:
1545 case RTL_GIGA_MAC_VER_37:
1546 case RTL_GIGA_MAC_VER_38:
1547 case RTL_GIGA_MAC_VER_40:
1548 case RTL_GIGA_MAC_VER_41:
1549 case RTL_GIGA_MAC_VER_42:
1550 case RTL_GIGA_MAC_VER_43:
1551 case RTL_GIGA_MAC_VER_44:
1549 case RTL_GIGA_MAC_VER_45: 1552 case RTL_GIGA_MAC_VER_45:
1550 case RTL_GIGA_MAC_VER_46: 1553 case RTL_GIGA_MAC_VER_46:
1554 case RTL_GIGA_MAC_VER_47:
1555 case RTL_GIGA_MAC_VER_48:
1551 if (rtl_eri_read(tp, 0xdc, ERIAR_EXGMAC) & MagicPacket_v2) 1556 if (rtl_eri_read(tp, 0xdc, ERIAR_EXGMAC) & MagicPacket_v2)
1552 wolopts |= WAKE_MAGIC; 1557 wolopts |= WAKE_MAGIC;
1553 break; 1558 break;
@@ -1601,18 +1606,30 @@ static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts)
1601 RTL_W8(Cfg9346, Cfg9346_Unlock); 1606 RTL_W8(Cfg9346, Cfg9346_Unlock);
1602 1607
1603 switch (tp->mac_version) { 1608 switch (tp->mac_version) {
1609 case RTL_GIGA_MAC_VER_34:
1610 case RTL_GIGA_MAC_VER_35:
1611 case RTL_GIGA_MAC_VER_36:
1612 case RTL_GIGA_MAC_VER_37:
1613 case RTL_GIGA_MAC_VER_38:
1614 case RTL_GIGA_MAC_VER_40:
1615 case RTL_GIGA_MAC_VER_41:
1616 case RTL_GIGA_MAC_VER_42:
1617 case RTL_GIGA_MAC_VER_43:
1618 case RTL_GIGA_MAC_VER_44:
1604 case RTL_GIGA_MAC_VER_45: 1619 case RTL_GIGA_MAC_VER_45:
1605 case RTL_GIGA_MAC_VER_46: 1620 case RTL_GIGA_MAC_VER_46:
1621 case RTL_GIGA_MAC_VER_47:
1622 case RTL_GIGA_MAC_VER_48:
1606 tmp = ARRAY_SIZE(cfg) - 1; 1623 tmp = ARRAY_SIZE(cfg) - 1;
1607 if (wolopts & WAKE_MAGIC) 1624 if (wolopts & WAKE_MAGIC)
1608 rtl_w1w0_eri(tp, 1625 rtl_w0w1_eri(tp,
1609 0x0dc, 1626 0x0dc,
1610 ERIAR_MASK_0100, 1627 ERIAR_MASK_0100,
1611 MagicPacket_v2, 1628 MagicPacket_v2,
1612 0x0000, 1629 0x0000,
1613 ERIAR_EXGMAC); 1630 ERIAR_EXGMAC);
1614 else 1631 else
1615 rtl_w1w0_eri(tp, 1632 rtl_w0w1_eri(tp,
1616 0x0dc, 1633 0x0dc,
1617 ERIAR_MASK_0100, 1634 ERIAR_MASK_0100,
1618 0x0000, 1635 0x0000,
@@ -2877,8 +2894,8 @@ static void rtl8168d_1_hw_phy_config(struct rtl8169_private *tp)
2877 * Fine Tune Switching regulator parameter 2894 * Fine Tune Switching regulator parameter
2878 */ 2895 */
2879 rtl_writephy(tp, 0x1f, 0x0002); 2896 rtl_writephy(tp, 0x1f, 0x0002);
2880 rtl_w1w0_phy(tp, 0x0b, 0x0010, 0x00ef); 2897 rtl_w0w1_phy(tp, 0x0b, 0x0010, 0x00ef);
2881 rtl_w1w0_phy(tp, 0x0c, 0xa200, 0x5d00); 2898 rtl_w0w1_phy(tp, 0x0c, 0xa200, 0x5d00);
2882 2899
2883 if (rtl8168d_efuse_read(tp, 0x01) == 0xb1) { 2900 if (rtl8168d_efuse_read(tp, 0x01) == 0xb1) {
2884 static const struct phy_reg phy_reg_init[] = { 2901 static const struct phy_reg phy_reg_init[] = {
@@ -2927,8 +2944,8 @@ static void rtl8168d_1_hw_phy_config(struct rtl8169_private *tp)
2927 2944
2928 /* Fine tune PLL performance */ 2945 /* Fine tune PLL performance */
2929 rtl_writephy(tp, 0x1f, 0x0002); 2946 rtl_writephy(tp, 0x1f, 0x0002);
2930 rtl_w1w0_phy(tp, 0x02, 0x0100, 0x0600); 2947 rtl_w0w1_phy(tp, 0x02, 0x0100, 0x0600);
2931 rtl_w1w0_phy(tp, 0x03, 0x0000, 0xe000); 2948 rtl_w0w1_phy(tp, 0x03, 0x0000, 0xe000);
2932 2949
2933 rtl_writephy(tp, 0x1f, 0x0005); 2950 rtl_writephy(tp, 0x1f, 0x0005);
2934 rtl_writephy(tp, 0x05, 0x001b); 2951 rtl_writephy(tp, 0x05, 0x001b);
@@ -3025,8 +3042,8 @@ static void rtl8168d_2_hw_phy_config(struct rtl8169_private *tp)
3025 3042
3026 /* Fine tune PLL performance */ 3043 /* Fine tune PLL performance */
3027 rtl_writephy(tp, 0x1f, 0x0002); 3044 rtl_writephy(tp, 0x1f, 0x0002);
3028 rtl_w1w0_phy(tp, 0x02, 0x0100, 0x0600); 3045 rtl_w0w1_phy(tp, 0x02, 0x0100, 0x0600);
3029 rtl_w1w0_phy(tp, 0x03, 0x0000, 0xe000); 3046 rtl_w0w1_phy(tp, 0x03, 0x0000, 0xe000);
3030 3047
3031 /* Switching regulator Slew rate */ 3048 /* Switching regulator Slew rate */
3032 rtl_writephy(tp, 0x1f, 0x0002); 3049 rtl_writephy(tp, 0x1f, 0x0002);
@@ -3154,32 +3171,32 @@ static void rtl8168e_1_hw_phy_config(struct rtl8169_private *tp)
3154 /* DCO enable for 10M IDLE Power */ 3171 /* DCO enable for 10M IDLE Power */
3155 rtl_writephy(tp, 0x1f, 0x0007); 3172 rtl_writephy(tp, 0x1f, 0x0007);
3156 rtl_writephy(tp, 0x1e, 0x0023); 3173 rtl_writephy(tp, 0x1e, 0x0023);
3157 rtl_w1w0_phy(tp, 0x17, 0x0006, 0x0000); 3174 rtl_w0w1_phy(tp, 0x17, 0x0006, 0x0000);
3158 rtl_writephy(tp, 0x1f, 0x0000); 3175 rtl_writephy(tp, 0x1f, 0x0000);
3159 3176
3160 /* For impedance matching */ 3177 /* For impedance matching */
3161 rtl_writephy(tp, 0x1f, 0x0002); 3178 rtl_writephy(tp, 0x1f, 0x0002);
3162 rtl_w1w0_phy(tp, 0x08, 0x8000, 0x7f00); 3179 rtl_w0w1_phy(tp, 0x08, 0x8000, 0x7f00);
3163 rtl_writephy(tp, 0x1f, 0x0000); 3180 rtl_writephy(tp, 0x1f, 0x0000);
3164 3181
3165 /* PHY auto speed down */ 3182 /* PHY auto speed down */
3166 rtl_writephy(tp, 0x1f, 0x0007); 3183 rtl_writephy(tp, 0x1f, 0x0007);
3167 rtl_writephy(tp, 0x1e, 0x002d); 3184 rtl_writephy(tp, 0x1e, 0x002d);
3168 rtl_w1w0_phy(tp, 0x18, 0x0050, 0x0000); 3185 rtl_w0w1_phy(tp, 0x18, 0x0050, 0x0000);
3169 rtl_writephy(tp, 0x1f, 0x0000); 3186 rtl_writephy(tp, 0x1f, 0x0000);
3170 rtl_w1w0_phy(tp, 0x14, 0x8000, 0x0000); 3187 rtl_w0w1_phy(tp, 0x14, 0x8000, 0x0000);
3171 3188
3172 rtl_writephy(tp, 0x1f, 0x0005); 3189 rtl_writephy(tp, 0x1f, 0x0005);
3173 rtl_writephy(tp, 0x05, 0x8b86); 3190 rtl_writephy(tp, 0x05, 0x8b86);
3174 rtl_w1w0_phy(tp, 0x06, 0x0001, 0x0000); 3191 rtl_w0w1_phy(tp, 0x06, 0x0001, 0x0000);
3175 rtl_writephy(tp, 0x1f, 0x0000); 3192 rtl_writephy(tp, 0x1f, 0x0000);
3176 3193
3177 rtl_writephy(tp, 0x1f, 0x0005); 3194 rtl_writephy(tp, 0x1f, 0x0005);
3178 rtl_writephy(tp, 0x05, 0x8b85); 3195 rtl_writephy(tp, 0x05, 0x8b85);
3179 rtl_w1w0_phy(tp, 0x06, 0x0000, 0x2000); 3196 rtl_w0w1_phy(tp, 0x06, 0x0000, 0x2000);
3180 rtl_writephy(tp, 0x1f, 0x0007); 3197 rtl_writephy(tp, 0x1f, 0x0007);
3181 rtl_writephy(tp, 0x1e, 0x0020); 3198 rtl_writephy(tp, 0x1e, 0x0020);
3182 rtl_w1w0_phy(tp, 0x15, 0x0000, 0x1100); 3199 rtl_w0w1_phy(tp, 0x15, 0x0000, 0x1100);
3183 rtl_writephy(tp, 0x1f, 0x0006); 3200 rtl_writephy(tp, 0x1f, 0x0006);
3184 rtl_writephy(tp, 0x00, 0x5a00); 3201 rtl_writephy(tp, 0x00, 0x5a00);
3185 rtl_writephy(tp, 0x1f, 0x0000); 3202 rtl_writephy(tp, 0x1f, 0x0000);
@@ -3243,39 +3260,39 @@ static void rtl8168e_2_hw_phy_config(struct rtl8169_private *tp)
3243 /* For 4-corner performance improve */ 3260 /* For 4-corner performance improve */
3244 rtl_writephy(tp, 0x1f, 0x0005); 3261 rtl_writephy(tp, 0x1f, 0x0005);
3245 rtl_writephy(tp, 0x05, 0x8b80); 3262 rtl_writephy(tp, 0x05, 0x8b80);
3246 rtl_w1w0_phy(tp, 0x17, 0x0006, 0x0000); 3263 rtl_w0w1_phy(tp, 0x17, 0x0006, 0x0000);
3247 rtl_writephy(tp, 0x1f, 0x0000); 3264 rtl_writephy(tp, 0x1f, 0x0000);
3248 3265
3249 /* PHY auto speed down */ 3266 /* PHY auto speed down */
3250 rtl_writephy(tp, 0x1f, 0x0004); 3267 rtl_writephy(tp, 0x1f, 0x0004);
3251 rtl_writephy(tp, 0x1f, 0x0007); 3268 rtl_writephy(tp, 0x1f, 0x0007);
3252 rtl_writephy(tp, 0x1e, 0x002d); 3269 rtl_writephy(tp, 0x1e, 0x002d);
3253 rtl_w1w0_phy(tp, 0x18, 0x0010, 0x0000); 3270 rtl_w0w1_phy(tp, 0x18, 0x0010, 0x0000);
3254 rtl_writephy(tp, 0x1f, 0x0002); 3271 rtl_writephy(tp, 0x1f, 0x0002);
3255 rtl_writephy(tp, 0x1f, 0x0000); 3272 rtl_writephy(tp, 0x1f, 0x0000);
3256 rtl_w1w0_phy(tp, 0x14, 0x8000, 0x0000); 3273 rtl_w0w1_phy(tp, 0x14, 0x8000, 0x0000);
3257 3274
3258 /* improve 10M EEE waveform */ 3275 /* improve 10M EEE waveform */
3259 rtl_writephy(tp, 0x1f, 0x0005); 3276 rtl_writephy(tp, 0x1f, 0x0005);
3260 rtl_writephy(tp, 0x05, 0x8b86); 3277 rtl_writephy(tp, 0x05, 0x8b86);
3261 rtl_w1w0_phy(tp, 0x06, 0x0001, 0x0000); 3278 rtl_w0w1_phy(tp, 0x06, 0x0001, 0x0000);
3262 rtl_writephy(tp, 0x1f, 0x0000); 3279 rtl_writephy(tp, 0x1f, 0x0000);
3263 3280
3264 /* Improve 2-pair detection performance */ 3281 /* Improve 2-pair detection performance */
3265 rtl_writephy(tp, 0x1f, 0x0005); 3282 rtl_writephy(tp, 0x1f, 0x0005);
3266 rtl_writephy(tp, 0x05, 0x8b85); 3283 rtl_writephy(tp, 0x05, 0x8b85);
3267 rtl_w1w0_phy(tp, 0x06, 0x4000, 0x0000); 3284 rtl_w0w1_phy(tp, 0x06, 0x4000, 0x0000);
3268 rtl_writephy(tp, 0x1f, 0x0000); 3285 rtl_writephy(tp, 0x1f, 0x0000);
3269 3286
3270 /* EEE setting */ 3287 /* EEE setting */
3271 rtl_w1w0_eri(tp, 0x1b0, ERIAR_MASK_1111, 0x0000, 0x0003, ERIAR_EXGMAC); 3288 rtl_w0w1_eri(tp, 0x1b0, ERIAR_MASK_1111, 0x0000, 0x0003, ERIAR_EXGMAC);
3272 rtl_writephy(tp, 0x1f, 0x0005); 3289 rtl_writephy(tp, 0x1f, 0x0005);
3273 rtl_writephy(tp, 0x05, 0x8b85); 3290 rtl_writephy(tp, 0x05, 0x8b85);
3274 rtl_w1w0_phy(tp, 0x06, 0x0000, 0x2000); 3291 rtl_w0w1_phy(tp, 0x06, 0x0000, 0x2000);
3275 rtl_writephy(tp, 0x1f, 0x0004); 3292 rtl_writephy(tp, 0x1f, 0x0004);
3276 rtl_writephy(tp, 0x1f, 0x0007); 3293 rtl_writephy(tp, 0x1f, 0x0007);
3277 rtl_writephy(tp, 0x1e, 0x0020); 3294 rtl_writephy(tp, 0x1e, 0x0020);
3278 rtl_w1w0_phy(tp, 0x15, 0x0000, 0x0100); 3295 rtl_w0w1_phy(tp, 0x15, 0x0000, 0x0100);
3279 rtl_writephy(tp, 0x1f, 0x0002); 3296 rtl_writephy(tp, 0x1f, 0x0002);
3280 rtl_writephy(tp, 0x1f, 0x0000); 3297 rtl_writephy(tp, 0x1f, 0x0000);
3281 rtl_writephy(tp, 0x0d, 0x0007); 3298 rtl_writephy(tp, 0x0d, 0x0007);
@@ -3286,8 +3303,8 @@ static void rtl8168e_2_hw_phy_config(struct rtl8169_private *tp)
3286 3303
3287 /* Green feature */ 3304 /* Green feature */
3288 rtl_writephy(tp, 0x1f, 0x0003); 3305 rtl_writephy(tp, 0x1f, 0x0003);
3289 rtl_w1w0_phy(tp, 0x19, 0x0000, 0x0001); 3306 rtl_w0w1_phy(tp, 0x19, 0x0000, 0x0001);
3290 rtl_w1w0_phy(tp, 0x10, 0x0000, 0x0400); 3307 rtl_w0w1_phy(tp, 0x10, 0x0000, 0x0400);
3291 rtl_writephy(tp, 0x1f, 0x0000); 3308 rtl_writephy(tp, 0x1f, 0x0000);
3292 3309
3293 /* Broken BIOS workaround: feed GigaMAC registers with MAC address. */ 3310 /* Broken BIOS workaround: feed GigaMAC registers with MAC address. */
@@ -3299,20 +3316,20 @@ static void rtl8168f_hw_phy_config(struct rtl8169_private *tp)
3299 /* For 4-corner performance improve */ 3316 /* For 4-corner performance improve */
3300 rtl_writephy(tp, 0x1f, 0x0005); 3317 rtl_writephy(tp, 0x1f, 0x0005);
3301 rtl_writephy(tp, 0x05, 0x8b80); 3318 rtl_writephy(tp, 0x05, 0x8b80);
3302 rtl_w1w0_phy(tp, 0x06, 0x0006, 0x0000); 3319 rtl_w0w1_phy(tp, 0x06, 0x0006, 0x0000);
3303 rtl_writephy(tp, 0x1f, 0x0000); 3320 rtl_writephy(tp, 0x1f, 0x0000);
3304 3321
3305 /* PHY auto speed down */ 3322 /* PHY auto speed down */
3306 rtl_writephy(tp, 0x1f, 0x0007); 3323 rtl_writephy(tp, 0x1f, 0x0007);
3307 rtl_writephy(tp, 0x1e, 0x002d); 3324 rtl_writephy(tp, 0x1e, 0x002d);
3308 rtl_w1w0_phy(tp, 0x18, 0x0010, 0x0000); 3325 rtl_w0w1_phy(tp, 0x18, 0x0010, 0x0000);
3309 rtl_writephy(tp, 0x1f, 0x0000); 3326 rtl_writephy(tp, 0x1f, 0x0000);
3310 rtl_w1w0_phy(tp, 0x14, 0x8000, 0x0000); 3327 rtl_w0w1_phy(tp, 0x14, 0x8000, 0x0000);
3311 3328
3312 /* Improve 10M EEE waveform */ 3329 /* Improve 10M EEE waveform */
3313 rtl_writephy(tp, 0x1f, 0x0005); 3330 rtl_writephy(tp, 0x1f, 0x0005);
3314 rtl_writephy(tp, 0x05, 0x8b86); 3331 rtl_writephy(tp, 0x05, 0x8b86);
3315 rtl_w1w0_phy(tp, 0x06, 0x0001, 0x0000); 3332 rtl_w0w1_phy(tp, 0x06, 0x0001, 0x0000);
3316 rtl_writephy(tp, 0x1f, 0x0000); 3333 rtl_writephy(tp, 0x1f, 0x0000);
3317} 3334}
3318 3335
@@ -3362,7 +3379,7 @@ static void rtl8168f_1_hw_phy_config(struct rtl8169_private *tp)
3362 /* Improve 2-pair detection performance */ 3379 /* Improve 2-pair detection performance */
3363 rtl_writephy(tp, 0x1f, 0x0005); 3380 rtl_writephy(tp, 0x1f, 0x0005);
3364 rtl_writephy(tp, 0x05, 0x8b85); 3381 rtl_writephy(tp, 0x05, 0x8b85);
3365 rtl_w1w0_phy(tp, 0x06, 0x4000, 0x0000); 3382 rtl_w0w1_phy(tp, 0x06, 0x4000, 0x0000);
3366 rtl_writephy(tp, 0x1f, 0x0000); 3383 rtl_writephy(tp, 0x1f, 0x0000);
3367} 3384}
3368 3385
@@ -3418,7 +3435,7 @@ static void rtl8411_hw_phy_config(struct rtl8169_private *tp)
3418 /* Improve 2-pair detection performance */ 3435 /* Improve 2-pair detection performance */
3419 rtl_writephy(tp, 0x1f, 0x0005); 3436 rtl_writephy(tp, 0x1f, 0x0005);
3420 rtl_writephy(tp, 0x05, 0x8b85); 3437 rtl_writephy(tp, 0x05, 0x8b85);
3421 rtl_w1w0_phy(tp, 0x06, 0x4000, 0x0000); 3438 rtl_w0w1_phy(tp, 0x06, 0x4000, 0x0000);
3422 rtl_writephy(tp, 0x1f, 0x0000); 3439 rtl_writephy(tp, 0x1f, 0x0000);
3423 3440
3424 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); 3441 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
@@ -3426,36 +3443,36 @@ static void rtl8411_hw_phy_config(struct rtl8169_private *tp)
3426 /* Modify green table for giga */ 3443 /* Modify green table for giga */
3427 rtl_writephy(tp, 0x1f, 0x0005); 3444 rtl_writephy(tp, 0x1f, 0x0005);
3428 rtl_writephy(tp, 0x05, 0x8b54); 3445 rtl_writephy(tp, 0x05, 0x8b54);
3429 rtl_w1w0_phy(tp, 0x06, 0x0000, 0x0800); 3446 rtl_w0w1_phy(tp, 0x06, 0x0000, 0x0800);
3430 rtl_writephy(tp, 0x05, 0x8b5d); 3447 rtl_writephy(tp, 0x05, 0x8b5d);
3431 rtl_w1w0_phy(tp, 0x06, 0x0000, 0x0800); 3448 rtl_w0w1_phy(tp, 0x06, 0x0000, 0x0800);
3432 rtl_writephy(tp, 0x05, 0x8a7c); 3449 rtl_writephy(tp, 0x05, 0x8a7c);
3433 rtl_w1w0_phy(tp, 0x06, 0x0000, 0x0100); 3450 rtl_w0w1_phy(tp, 0x06, 0x0000, 0x0100);
3434 rtl_writephy(tp, 0x05, 0x8a7f); 3451 rtl_writephy(tp, 0x05, 0x8a7f);
3435 rtl_w1w0_phy(tp, 0x06, 0x0100, 0x0000); 3452 rtl_w0w1_phy(tp, 0x06, 0x0100, 0x0000);
3436 rtl_writephy(tp, 0x05, 0x8a82); 3453 rtl_writephy(tp, 0x05, 0x8a82);
3437 rtl_w1w0_phy(tp, 0x06, 0x0000, 0x0100); 3454 rtl_w0w1_phy(tp, 0x06, 0x0000, 0x0100);
3438 rtl_writephy(tp, 0x05, 0x8a85); 3455 rtl_writephy(tp, 0x05, 0x8a85);
3439 rtl_w1w0_phy(tp, 0x06, 0x0000, 0x0100); 3456 rtl_w0w1_phy(tp, 0x06, 0x0000, 0x0100);
3440 rtl_writephy(tp, 0x05, 0x8a88); 3457 rtl_writephy(tp, 0x05, 0x8a88);
3441 rtl_w1w0_phy(tp, 0x06, 0x0000, 0x0100); 3458 rtl_w0w1_phy(tp, 0x06, 0x0000, 0x0100);
3442 rtl_writephy(tp, 0x1f, 0x0000); 3459 rtl_writephy(tp, 0x1f, 0x0000);
3443 3460
3444 /* uc same-seed solution */ 3461 /* uc same-seed solution */
3445 rtl_writephy(tp, 0x1f, 0x0005); 3462 rtl_writephy(tp, 0x1f, 0x0005);
3446 rtl_writephy(tp, 0x05, 0x8b85); 3463 rtl_writephy(tp, 0x05, 0x8b85);
3447 rtl_w1w0_phy(tp, 0x06, 0x8000, 0x0000); 3464 rtl_w0w1_phy(tp, 0x06, 0x8000, 0x0000);
3448 rtl_writephy(tp, 0x1f, 0x0000); 3465 rtl_writephy(tp, 0x1f, 0x0000);
3449 3466
3450 /* eee setting */ 3467 /* eee setting */
3451 rtl_w1w0_eri(tp, 0x1b0, ERIAR_MASK_0001, 0x00, 0x03, ERIAR_EXGMAC); 3468 rtl_w0w1_eri(tp, 0x1b0, ERIAR_MASK_0001, 0x00, 0x03, ERIAR_EXGMAC);
3452 rtl_writephy(tp, 0x1f, 0x0005); 3469 rtl_writephy(tp, 0x1f, 0x0005);
3453 rtl_writephy(tp, 0x05, 0x8b85); 3470 rtl_writephy(tp, 0x05, 0x8b85);
3454 rtl_w1w0_phy(tp, 0x06, 0x0000, 0x2000); 3471 rtl_w0w1_phy(tp, 0x06, 0x0000, 0x2000);
3455 rtl_writephy(tp, 0x1f, 0x0004); 3472 rtl_writephy(tp, 0x1f, 0x0004);
3456 rtl_writephy(tp, 0x1f, 0x0007); 3473 rtl_writephy(tp, 0x1f, 0x0007);
3457 rtl_writephy(tp, 0x1e, 0x0020); 3474 rtl_writephy(tp, 0x1e, 0x0020);
3458 rtl_w1w0_phy(tp, 0x15, 0x0000, 0x0100); 3475 rtl_w0w1_phy(tp, 0x15, 0x0000, 0x0100);
3459 rtl_writephy(tp, 0x1f, 0x0000); 3476 rtl_writephy(tp, 0x1f, 0x0000);
3460 rtl_writephy(tp, 0x0d, 0x0007); 3477 rtl_writephy(tp, 0x0d, 0x0007);
3461 rtl_writephy(tp, 0x0e, 0x003c); 3478 rtl_writephy(tp, 0x0e, 0x003c);
@@ -3465,8 +3482,8 @@ static void rtl8411_hw_phy_config(struct rtl8169_private *tp)
3465 3482
3466 /* Green feature */ 3483 /* Green feature */
3467 rtl_writephy(tp, 0x1f, 0x0003); 3484 rtl_writephy(tp, 0x1f, 0x0003);
3468 rtl_w1w0_phy(tp, 0x19, 0x0000, 0x0001); 3485 rtl_w0w1_phy(tp, 0x19, 0x0000, 0x0001);
3469 rtl_w1w0_phy(tp, 0x10, 0x0000, 0x0400); 3486 rtl_w0w1_phy(tp, 0x10, 0x0000, 0x0400);
3470 rtl_writephy(tp, 0x1f, 0x0000); 3487 rtl_writephy(tp, 0x1f, 0x0000);
3471} 3488}
3472 3489
@@ -3477,45 +3494,45 @@ static void rtl8168g_1_hw_phy_config(struct rtl8169_private *tp)
3477 rtl_writephy(tp, 0x1f, 0x0a46); 3494 rtl_writephy(tp, 0x1f, 0x0a46);
3478 if (rtl_readphy(tp, 0x10) & 0x0100) { 3495 if (rtl_readphy(tp, 0x10) & 0x0100) {
3479 rtl_writephy(tp, 0x1f, 0x0bcc); 3496 rtl_writephy(tp, 0x1f, 0x0bcc);
3480 rtl_w1w0_phy(tp, 0x12, 0x0000, 0x8000); 3497 rtl_w0w1_phy(tp, 0x12, 0x0000, 0x8000);
3481 } else { 3498 } else {
3482 rtl_writephy(tp, 0x1f, 0x0bcc); 3499 rtl_writephy(tp, 0x1f, 0x0bcc);
3483 rtl_w1w0_phy(tp, 0x12, 0x8000, 0x0000); 3500 rtl_w0w1_phy(tp, 0x12, 0x8000, 0x0000);
3484 } 3501 }
3485 3502
3486 rtl_writephy(tp, 0x1f, 0x0a46); 3503 rtl_writephy(tp, 0x1f, 0x0a46);
3487 if (rtl_readphy(tp, 0x13) & 0x0100) { 3504 if (rtl_readphy(tp, 0x13) & 0x0100) {
3488 rtl_writephy(tp, 0x1f, 0x0c41); 3505 rtl_writephy(tp, 0x1f, 0x0c41);
3489 rtl_w1w0_phy(tp, 0x15, 0x0002, 0x0000); 3506 rtl_w0w1_phy(tp, 0x15, 0x0002, 0x0000);
3490 } else { 3507 } else {
3491 rtl_writephy(tp, 0x1f, 0x0c41); 3508 rtl_writephy(tp, 0x1f, 0x0c41);
3492 rtl_w1w0_phy(tp, 0x15, 0x0000, 0x0002); 3509 rtl_w0w1_phy(tp, 0x15, 0x0000, 0x0002);
3493 } 3510 }
3494 3511
3495 /* Enable PHY auto speed down */ 3512 /* Enable PHY auto speed down */
3496 rtl_writephy(tp, 0x1f, 0x0a44); 3513 rtl_writephy(tp, 0x1f, 0x0a44);
3497 rtl_w1w0_phy(tp, 0x11, 0x000c, 0x0000); 3514 rtl_w0w1_phy(tp, 0x11, 0x000c, 0x0000);
3498 3515
3499 rtl_writephy(tp, 0x1f, 0x0bcc); 3516 rtl_writephy(tp, 0x1f, 0x0bcc);
3500 rtl_w1w0_phy(tp, 0x14, 0x0100, 0x0000); 3517 rtl_w0w1_phy(tp, 0x14, 0x0100, 0x0000);
3501 rtl_writephy(tp, 0x1f, 0x0a44); 3518 rtl_writephy(tp, 0x1f, 0x0a44);
3502 rtl_w1w0_phy(tp, 0x11, 0x00c0, 0x0000); 3519 rtl_w0w1_phy(tp, 0x11, 0x00c0, 0x0000);
3503 rtl_writephy(tp, 0x1f, 0x0a43); 3520 rtl_writephy(tp, 0x1f, 0x0a43);
3504 rtl_writephy(tp, 0x13, 0x8084); 3521 rtl_writephy(tp, 0x13, 0x8084);
3505 rtl_w1w0_phy(tp, 0x14, 0x0000, 0x6000); 3522 rtl_w0w1_phy(tp, 0x14, 0x0000, 0x6000);
3506 rtl_w1w0_phy(tp, 0x10, 0x1003, 0x0000); 3523 rtl_w0w1_phy(tp, 0x10, 0x1003, 0x0000);
3507 3524
3508 /* EEE auto-fallback function */ 3525 /* EEE auto-fallback function */
3509 rtl_writephy(tp, 0x1f, 0x0a4b); 3526 rtl_writephy(tp, 0x1f, 0x0a4b);
3510 rtl_w1w0_phy(tp, 0x11, 0x0004, 0x0000); 3527 rtl_w0w1_phy(tp, 0x11, 0x0004, 0x0000);
3511 3528
3512 /* Enable UC LPF tune function */ 3529 /* Enable UC LPF tune function */
3513 rtl_writephy(tp, 0x1f, 0x0a43); 3530 rtl_writephy(tp, 0x1f, 0x0a43);
3514 rtl_writephy(tp, 0x13, 0x8012); 3531 rtl_writephy(tp, 0x13, 0x8012);
3515 rtl_w1w0_phy(tp, 0x14, 0x8000, 0x0000); 3532 rtl_w0w1_phy(tp, 0x14, 0x8000, 0x0000);
3516 3533
3517 rtl_writephy(tp, 0x1f, 0x0c42); 3534 rtl_writephy(tp, 0x1f, 0x0c42);
3518 rtl_w1w0_phy(tp, 0x11, 0x4000, 0x2000); 3535 rtl_w0w1_phy(tp, 0x11, 0x4000, 0x2000);
3519 3536
3520 /* Improve SWR Efficiency */ 3537 /* Improve SWR Efficiency */
3521 rtl_writephy(tp, 0x1f, 0x0bcd); 3538 rtl_writephy(tp, 0x1f, 0x0bcd);
@@ -3531,7 +3548,7 @@ static void rtl8168g_1_hw_phy_config(struct rtl8169_private *tp)
3531 /* Check ALDPS bit, disable it if enabled */ 3548 /* Check ALDPS bit, disable it if enabled */
3532 rtl_writephy(tp, 0x1f, 0x0a43); 3549 rtl_writephy(tp, 0x1f, 0x0a43);
3533 if (rtl_readphy(tp, 0x10) & 0x0004) 3550 if (rtl_readphy(tp, 0x10) & 0x0004)
3534 rtl_w1w0_phy(tp, 0x10, 0x0000, 0x0004); 3551 rtl_w0w1_phy(tp, 0x10, 0x0000, 0x0004);
3535 3552
3536 rtl_writephy(tp, 0x1f, 0x0000); 3553 rtl_writephy(tp, 0x1f, 0x0000);
3537} 3554}
@@ -3551,33 +3568,33 @@ static void rtl8168h_1_hw_phy_config(struct rtl8169_private *tp)
3551 /* CHN EST parameters adjust - giga master */ 3568 /* CHN EST parameters adjust - giga master */
3552 rtl_writephy(tp, 0x1f, 0x0a43); 3569 rtl_writephy(tp, 0x1f, 0x0a43);
3553 rtl_writephy(tp, 0x13, 0x809b); 3570 rtl_writephy(tp, 0x13, 0x809b);
3554 rtl_w1w0_phy(tp, 0x14, 0x8000, 0xf800); 3571 rtl_w0w1_phy(tp, 0x14, 0x8000, 0xf800);
3555 rtl_writephy(tp, 0x13, 0x80a2); 3572 rtl_writephy(tp, 0x13, 0x80a2);
3556 rtl_w1w0_phy(tp, 0x14, 0x8000, 0xff00); 3573 rtl_w0w1_phy(tp, 0x14, 0x8000, 0xff00);
3557 rtl_writephy(tp, 0x13, 0x80a4); 3574 rtl_writephy(tp, 0x13, 0x80a4);
3558 rtl_w1w0_phy(tp, 0x14, 0x8500, 0xff00); 3575 rtl_w0w1_phy(tp, 0x14, 0x8500, 0xff00);
3559 rtl_writephy(tp, 0x13, 0x809c); 3576 rtl_writephy(tp, 0x13, 0x809c);
3560 rtl_w1w0_phy(tp, 0x14, 0xbd00, 0xff00); 3577 rtl_w0w1_phy(tp, 0x14, 0xbd00, 0xff00);
3561 rtl_writephy(tp, 0x1f, 0x0000); 3578 rtl_writephy(tp, 0x1f, 0x0000);
3562 3579
3563 /* CHN EST parameters adjust - giga slave */ 3580 /* CHN EST parameters adjust - giga slave */
3564 rtl_writephy(tp, 0x1f, 0x0a43); 3581 rtl_writephy(tp, 0x1f, 0x0a43);
3565 rtl_writephy(tp, 0x13, 0x80ad); 3582 rtl_writephy(tp, 0x13, 0x80ad);
3566 rtl_w1w0_phy(tp, 0x14, 0x7000, 0xf800); 3583 rtl_w0w1_phy(tp, 0x14, 0x7000, 0xf800);
3567 rtl_writephy(tp, 0x13, 0x80b4); 3584 rtl_writephy(tp, 0x13, 0x80b4);
3568 rtl_w1w0_phy(tp, 0x14, 0x5000, 0xff00); 3585 rtl_w0w1_phy(tp, 0x14, 0x5000, 0xff00);
3569 rtl_writephy(tp, 0x13, 0x80ac); 3586 rtl_writephy(tp, 0x13, 0x80ac);
3570 rtl_w1w0_phy(tp, 0x14, 0x4000, 0xff00); 3587 rtl_w0w1_phy(tp, 0x14, 0x4000, 0xff00);
3571 rtl_writephy(tp, 0x1f, 0x0000); 3588 rtl_writephy(tp, 0x1f, 0x0000);
3572 3589
3573 /* CHN EST parameters adjust - fnet */ 3590 /* CHN EST parameters adjust - fnet */
3574 rtl_writephy(tp, 0x1f, 0x0a43); 3591 rtl_writephy(tp, 0x1f, 0x0a43);
3575 rtl_writephy(tp, 0x13, 0x808e); 3592 rtl_writephy(tp, 0x13, 0x808e);
3576 rtl_w1w0_phy(tp, 0x14, 0x1200, 0xff00); 3593 rtl_w0w1_phy(tp, 0x14, 0x1200, 0xff00);
3577 rtl_writephy(tp, 0x13, 0x8090); 3594 rtl_writephy(tp, 0x13, 0x8090);
3578 rtl_w1w0_phy(tp, 0x14, 0xe500, 0xff00); 3595 rtl_w0w1_phy(tp, 0x14, 0xe500, 0xff00);
3579 rtl_writephy(tp, 0x13, 0x8092); 3596 rtl_writephy(tp, 0x13, 0x8092);
3580 rtl_w1w0_phy(tp, 0x14, 0x9f00, 0xff00); 3597 rtl_w0w1_phy(tp, 0x14, 0x9f00, 0xff00);
3581 rtl_writephy(tp, 0x1f, 0x0000); 3598 rtl_writephy(tp, 0x1f, 0x0000);
3582 3599
3583 /* enable R-tune & PGA-retune function */ 3600 /* enable R-tune & PGA-retune function */
@@ -3596,57 +3613,57 @@ static void rtl8168h_1_hw_phy_config(struct rtl8169_private *tp)
3596 dout_tapbin &= 0xf000; 3613 dout_tapbin &= 0xf000;
3597 rtl_writephy(tp, 0x1f, 0x0a43); 3614 rtl_writephy(tp, 0x1f, 0x0a43);
3598 rtl_writephy(tp, 0x13, 0x827a); 3615 rtl_writephy(tp, 0x13, 0x827a);
3599 rtl_w1w0_phy(tp, 0x14, dout_tapbin, 0xf000); 3616 rtl_w0w1_phy(tp, 0x14, dout_tapbin, 0xf000);
3600 rtl_writephy(tp, 0x13, 0x827b); 3617 rtl_writephy(tp, 0x13, 0x827b);
3601 rtl_w1w0_phy(tp, 0x14, dout_tapbin, 0xf000); 3618 rtl_w0w1_phy(tp, 0x14, dout_tapbin, 0xf000);
3602 rtl_writephy(tp, 0x13, 0x827c); 3619 rtl_writephy(tp, 0x13, 0x827c);
3603 rtl_w1w0_phy(tp, 0x14, dout_tapbin, 0xf000); 3620 rtl_w0w1_phy(tp, 0x14, dout_tapbin, 0xf000);
3604 rtl_writephy(tp, 0x13, 0x827d); 3621 rtl_writephy(tp, 0x13, 0x827d);
3605 rtl_w1w0_phy(tp, 0x14, dout_tapbin, 0xf000); 3622 rtl_w0w1_phy(tp, 0x14, dout_tapbin, 0xf000);
3606 3623
3607 rtl_writephy(tp, 0x1f, 0x0a43); 3624 rtl_writephy(tp, 0x1f, 0x0a43);
3608 rtl_writephy(tp, 0x13, 0x0811); 3625 rtl_writephy(tp, 0x13, 0x0811);
3609 rtl_w1w0_phy(tp, 0x14, 0x0800, 0x0000); 3626 rtl_w0w1_phy(tp, 0x14, 0x0800, 0x0000);
3610 rtl_writephy(tp, 0x1f, 0x0a42); 3627 rtl_writephy(tp, 0x1f, 0x0a42);
3611 rtl_w1w0_phy(tp, 0x16, 0x0002, 0x0000); 3628 rtl_w0w1_phy(tp, 0x16, 0x0002, 0x0000);
3612 rtl_writephy(tp, 0x1f, 0x0000); 3629 rtl_writephy(tp, 0x1f, 0x0000);
3613 3630
3614 /* enable GPHY 10M */ 3631 /* enable GPHY 10M */
3615 rtl_writephy(tp, 0x1f, 0x0a44); 3632 rtl_writephy(tp, 0x1f, 0x0a44);
3616 rtl_w1w0_phy(tp, 0x11, 0x0800, 0x0000); 3633 rtl_w0w1_phy(tp, 0x11, 0x0800, 0x0000);
3617 rtl_writephy(tp, 0x1f, 0x0000); 3634 rtl_writephy(tp, 0x1f, 0x0000);
3618 3635
3619 /* SAR ADC performance */ 3636 /* SAR ADC performance */
3620 rtl_writephy(tp, 0x1f, 0x0bca); 3637 rtl_writephy(tp, 0x1f, 0x0bca);
3621 rtl_w1w0_phy(tp, 0x17, 0x4000, 0x3000); 3638 rtl_w0w1_phy(tp, 0x17, 0x4000, 0x3000);
3622 rtl_writephy(tp, 0x1f, 0x0000); 3639 rtl_writephy(tp, 0x1f, 0x0000);
3623 3640
3624 rtl_writephy(tp, 0x1f, 0x0a43); 3641 rtl_writephy(tp, 0x1f, 0x0a43);
3625 rtl_writephy(tp, 0x13, 0x803f); 3642 rtl_writephy(tp, 0x13, 0x803f);
3626 rtl_w1w0_phy(tp, 0x14, 0x0000, 0x3000); 3643 rtl_w0w1_phy(tp, 0x14, 0x0000, 0x3000);
3627 rtl_writephy(tp, 0x13, 0x8047); 3644 rtl_writephy(tp, 0x13, 0x8047);
3628 rtl_w1w0_phy(tp, 0x14, 0x0000, 0x3000); 3645 rtl_w0w1_phy(tp, 0x14, 0x0000, 0x3000);
3629 rtl_writephy(tp, 0x13, 0x804f); 3646 rtl_writephy(tp, 0x13, 0x804f);
3630 rtl_w1w0_phy(tp, 0x14, 0x0000, 0x3000); 3647 rtl_w0w1_phy(tp, 0x14, 0x0000, 0x3000);
3631 rtl_writephy(tp, 0x13, 0x8057); 3648 rtl_writephy(tp, 0x13, 0x8057);
3632 rtl_w1w0_phy(tp, 0x14, 0x0000, 0x3000); 3649 rtl_w0w1_phy(tp, 0x14, 0x0000, 0x3000);
3633 rtl_writephy(tp, 0x13, 0x805f); 3650 rtl_writephy(tp, 0x13, 0x805f);
3634 rtl_w1w0_phy(tp, 0x14, 0x0000, 0x3000); 3651 rtl_w0w1_phy(tp, 0x14, 0x0000, 0x3000);
3635 rtl_writephy(tp, 0x13, 0x8067); 3652 rtl_writephy(tp, 0x13, 0x8067);
3636 rtl_w1w0_phy(tp, 0x14, 0x0000, 0x3000); 3653 rtl_w0w1_phy(tp, 0x14, 0x0000, 0x3000);
3637 rtl_writephy(tp, 0x13, 0x806f); 3654 rtl_writephy(tp, 0x13, 0x806f);
3638 rtl_w1w0_phy(tp, 0x14, 0x0000, 0x3000); 3655 rtl_w0w1_phy(tp, 0x14, 0x0000, 0x3000);
3639 rtl_writephy(tp, 0x1f, 0x0000); 3656 rtl_writephy(tp, 0x1f, 0x0000);
3640 3657
3641 /* disable phy pfm mode */ 3658 /* disable phy pfm mode */
3642 rtl_writephy(tp, 0x1f, 0x0a44); 3659 rtl_writephy(tp, 0x1f, 0x0a44);
3643 rtl_w1w0_phy(tp, 0x14, 0x0000, 0x0080); 3660 rtl_w0w1_phy(tp, 0x14, 0x0000, 0x0080);
3644 rtl_writephy(tp, 0x1f, 0x0000); 3661 rtl_writephy(tp, 0x1f, 0x0000);
3645 3662
3646 /* Check ALDPS bit, disable it if enabled */ 3663 /* Check ALDPS bit, disable it if enabled */
3647 rtl_writephy(tp, 0x1f, 0x0a43); 3664 rtl_writephy(tp, 0x1f, 0x0a43);
3648 if (rtl_readphy(tp, 0x10) & 0x0004) 3665 if (rtl_readphy(tp, 0x10) & 0x0004)
3649 rtl_w1w0_phy(tp, 0x10, 0x0000, 0x0004); 3666 rtl_w0w1_phy(tp, 0x10, 0x0000, 0x0004);
3650 3667
3651 rtl_writephy(tp, 0x1f, 0x0000); 3668 rtl_writephy(tp, 0x1f, 0x0000);
3652} 3669}
@@ -3662,20 +3679,20 @@ static void rtl8168h_2_hw_phy_config(struct rtl8169_private *tp)
3662 /* CHIN EST parameter update */ 3679 /* CHIN EST parameter update */
3663 rtl_writephy(tp, 0x1f, 0x0a43); 3680 rtl_writephy(tp, 0x1f, 0x0a43);
3664 rtl_writephy(tp, 0x13, 0x808a); 3681 rtl_writephy(tp, 0x13, 0x808a);
3665 rtl_w1w0_phy(tp, 0x14, 0x000a, 0x003f); 3682 rtl_w0w1_phy(tp, 0x14, 0x000a, 0x003f);
3666 rtl_writephy(tp, 0x1f, 0x0000); 3683 rtl_writephy(tp, 0x1f, 0x0000);
3667 3684
3668 /* enable R-tune & PGA-retune function */ 3685 /* enable R-tune & PGA-retune function */
3669 rtl_writephy(tp, 0x1f, 0x0a43); 3686 rtl_writephy(tp, 0x1f, 0x0a43);
3670 rtl_writephy(tp, 0x13, 0x0811); 3687 rtl_writephy(tp, 0x13, 0x0811);
3671 rtl_w1w0_phy(tp, 0x14, 0x0800, 0x0000); 3688 rtl_w0w1_phy(tp, 0x14, 0x0800, 0x0000);
3672 rtl_writephy(tp, 0x1f, 0x0a42); 3689 rtl_writephy(tp, 0x1f, 0x0a42);
3673 rtl_w1w0_phy(tp, 0x16, 0x0002, 0x0000); 3690 rtl_w0w1_phy(tp, 0x16, 0x0002, 0x0000);
3674 rtl_writephy(tp, 0x1f, 0x0000); 3691 rtl_writephy(tp, 0x1f, 0x0000);
3675 3692
3676 /* enable GPHY 10M */ 3693 /* enable GPHY 10M */
3677 rtl_writephy(tp, 0x1f, 0x0a44); 3694 rtl_writephy(tp, 0x1f, 0x0a44);
3678 rtl_w1w0_phy(tp, 0x11, 0x0800, 0x0000); 3695 rtl_w0w1_phy(tp, 0x11, 0x0800, 0x0000);
3679 rtl_writephy(tp, 0x1f, 0x0000); 3696 rtl_writephy(tp, 0x1f, 0x0000);
3680 3697
3681 r8168_mac_ocp_write(tp, 0xdd02, 0x807d); 3698 r8168_mac_ocp_write(tp, 0xdd02, 0x807d);
@@ -3692,8 +3709,8 @@ static void rtl8168h_2_hw_phy_config(struct rtl8169_private *tp)
3692 ioffset_p0 |= (data & (0x07)); 3709 ioffset_p0 |= (data & (0x07));
3693 data = (ioffset_p3<<12)|(ioffset_p2<<8)|(ioffset_p1<<4)|(ioffset_p0); 3710 data = (ioffset_p3<<12)|(ioffset_p2<<8)|(ioffset_p1<<4)|(ioffset_p0);
3694 3711
3695 if ((ioffset_p3 != 0x0F) || (ioffset_p2 != 0x0F) || 3712 if ((ioffset_p3 != 0x0f) || (ioffset_p2 != 0x0f) ||
3696 (ioffset_p1 != 0x0F) || (ioffset_p0 == 0x0F)) { 3713 (ioffset_p1 != 0x0f) || (ioffset_p0 == 0x0f)) {
3697 rtl_writephy(tp, 0x1f, 0x0bcf); 3714 rtl_writephy(tp, 0x1f, 0x0bcf);
3698 rtl_writephy(tp, 0x16, data); 3715 rtl_writephy(tp, 0x16, data);
3699 rtl_writephy(tp, 0x1f, 0x0000); 3716 rtl_writephy(tp, 0x1f, 0x0000);
@@ -3713,13 +3730,13 @@ static void rtl8168h_2_hw_phy_config(struct rtl8169_private *tp)
3713 3730
3714 /* disable phy pfm mode */ 3731 /* disable phy pfm mode */
3715 rtl_writephy(tp, 0x1f, 0x0a44); 3732 rtl_writephy(tp, 0x1f, 0x0a44);
3716 rtl_w1w0_phy(tp, 0x14, 0x0000, 0x0080); 3733 rtl_w0w1_phy(tp, 0x14, 0x0000, 0x0080);
3717 rtl_writephy(tp, 0x1f, 0x0000); 3734 rtl_writephy(tp, 0x1f, 0x0000);
3718 3735
3719 /* Check ALDPS bit, disable it if enabled */ 3736 /* Check ALDPS bit, disable it if enabled */
3720 rtl_writephy(tp, 0x1f, 0x0a43); 3737 rtl_writephy(tp, 0x1f, 0x0a43);
3721 if (rtl_readphy(tp, 0x10) & 0x0004) 3738 if (rtl_readphy(tp, 0x10) & 0x0004)
3722 rtl_w1w0_phy(tp, 0x10, 0x0000, 0x0004); 3739 rtl_w0w1_phy(tp, 0x10, 0x0000, 0x0004);
3723 3740
3724 rtl_writephy(tp, 0x1f, 0x0000); 3741 rtl_writephy(tp, 0x1f, 0x0000);
3725} 3742}
@@ -4265,7 +4282,7 @@ static void r810x_pll_power_up(struct rtl8169_private *tp)
4265 break; 4282 break;
4266 case RTL_GIGA_MAC_VER_47: 4283 case RTL_GIGA_MAC_VER_47:
4267 case RTL_GIGA_MAC_VER_48: 4284 case RTL_GIGA_MAC_VER_48:
4268 RTL_W8(PMCH, RTL_R8(PMCH) | 0xC0); 4285 RTL_W8(PMCH, RTL_R8(PMCH) | 0xc0);
4269 break; 4286 break;
4270 default: 4287 default:
4271 RTL_W8(PMCH, RTL_R8(PMCH) | 0x80); 4288 RTL_W8(PMCH, RTL_R8(PMCH) | 0x80);
@@ -4340,7 +4357,7 @@ static void r8168_pll_power_down(struct rtl8169_private *tp)
4340 if ((tp->mac_version == RTL_GIGA_MAC_VER_27 || 4357 if ((tp->mac_version == RTL_GIGA_MAC_VER_27 ||
4341 tp->mac_version == RTL_GIGA_MAC_VER_28 || 4358 tp->mac_version == RTL_GIGA_MAC_VER_28 ||
4342 tp->mac_version == RTL_GIGA_MAC_VER_31) && 4359 tp->mac_version == RTL_GIGA_MAC_VER_31) &&
4343 r8168dp_check_dash(tp)) { 4360 r8168_check_dash(tp)) {
4344 return; 4361 return;
4345 } 4362 }
4346 4363
@@ -4367,14 +4384,16 @@ static void r8168_pll_power_down(struct rtl8169_private *tp)
4367 case RTL_GIGA_MAC_VER_31: 4384 case RTL_GIGA_MAC_VER_31:
4368 case RTL_GIGA_MAC_VER_32: 4385 case RTL_GIGA_MAC_VER_32:
4369 case RTL_GIGA_MAC_VER_33: 4386 case RTL_GIGA_MAC_VER_33:
4387 case RTL_GIGA_MAC_VER_44:
4370 case RTL_GIGA_MAC_VER_45: 4388 case RTL_GIGA_MAC_VER_45:
4371 case RTL_GIGA_MAC_VER_46: 4389 case RTL_GIGA_MAC_VER_46:
4372 RTL_W8(PMCH, RTL_R8(PMCH) & ~0x80); 4390 RTL_W8(PMCH, RTL_R8(PMCH) & ~0x80);
4373 break; 4391 break;
4374 case RTL_GIGA_MAC_VER_40: 4392 case RTL_GIGA_MAC_VER_40:
4375 case RTL_GIGA_MAC_VER_41: 4393 case RTL_GIGA_MAC_VER_41:
4376 rtl_w1w0_eri(tp, 0x1a8, ERIAR_MASK_1111, 0x00000000, 4394 rtl_w0w1_eri(tp, 0x1a8, ERIAR_MASK_1111, 0x00000000,
4377 0xfc000000, ERIAR_EXGMAC); 4395 0xfc000000, ERIAR_EXGMAC);
4396 RTL_W8(PMCH, RTL_R8(PMCH) & ~0x80);
4378 break; 4397 break;
4379 } 4398 }
4380} 4399}
@@ -4393,13 +4412,15 @@ static void r8168_pll_power_up(struct rtl8169_private *tp)
4393 case RTL_GIGA_MAC_VER_33: 4412 case RTL_GIGA_MAC_VER_33:
4394 RTL_W8(PMCH, RTL_R8(PMCH) | 0x80); 4413 RTL_W8(PMCH, RTL_R8(PMCH) | 0x80);
4395 break; 4414 break;
4415 case RTL_GIGA_MAC_VER_44:
4396 case RTL_GIGA_MAC_VER_45: 4416 case RTL_GIGA_MAC_VER_45:
4397 case RTL_GIGA_MAC_VER_46: 4417 case RTL_GIGA_MAC_VER_46:
4398 RTL_W8(PMCH, RTL_R8(PMCH) | 0xC0); 4418 RTL_W8(PMCH, RTL_R8(PMCH) | 0xc0);
4399 break; 4419 break;
4400 case RTL_GIGA_MAC_VER_40: 4420 case RTL_GIGA_MAC_VER_40:
4401 case RTL_GIGA_MAC_VER_41: 4421 case RTL_GIGA_MAC_VER_41:
4402 rtl_w1w0_eri(tp, 0x1a8, ERIAR_MASK_1111, 0xfc000000, 4422 RTL_W8(PMCH, RTL_R8(PMCH) | 0xc0);
4423 rtl_w0w1_eri(tp, 0x1a8, ERIAR_MASK_1111, 0xfc000000,
4403 0x00000000, ERIAR_EXGMAC); 4424 0x00000000, ERIAR_EXGMAC);
4404 break; 4425 break;
4405 } 4426 }
@@ -4712,6 +4733,8 @@ static void rtl_hw_reset(struct rtl8169_private *tp)
4712 RTL_W8(ChipCmd, CmdReset); 4733 RTL_W8(ChipCmd, CmdReset);
4713 4734
4714 rtl_udelay_loop_wait_low(tp, &rtl_chipcmd_cond, 100, 100); 4735 rtl_udelay_loop_wait_low(tp, &rtl_chipcmd_cond, 100, 100);
4736
4737 netdev_reset_queue(tp->dev);
4715} 4738}
4716 4739
4717static void rtl_request_uncached_firmware(struct rtl8169_private *tp) 4740static void rtl_request_uncached_firmware(struct rtl8169_private *tp)
@@ -4975,7 +4998,7 @@ static void rtl_hw_start_8169(struct net_device *dev)
4975 4998
4976 if (tp->mac_version == RTL_GIGA_MAC_VER_02 || 4999 if (tp->mac_version == RTL_GIGA_MAC_VER_02 ||
4977 tp->mac_version == RTL_GIGA_MAC_VER_03) { 5000 tp->mac_version == RTL_GIGA_MAC_VER_03) {
4978 dprintk("Set MAC Reg C+CR Offset 0xE0. " 5001 dprintk("Set MAC Reg C+CR Offset 0xe0. "
4979 "Bit-3 and bit-14 MUST be 1\n"); 5002 "Bit-3 and bit-14 MUST be 1\n");
4980 tp->cp_cmd |= (1 << 14); 5003 tp->cp_cmd |= (1 << 14);
4981 } 5004 }
@@ -5010,7 +5033,7 @@ static void rtl_hw_start_8169(struct net_device *dev)
5010 rtl_set_rx_mode(dev); 5033 rtl_set_rx_mode(dev);
5011 5034
5012 /* no early-rx interrupts */ 5035 /* no early-rx interrupts */
5013 RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xF000); 5036 RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xf000);
5014} 5037}
5015 5038
5016static void rtl_csi_write(struct rtl8169_private *tp, int addr, int value) 5039static void rtl_csi_write(struct rtl8169_private *tp, int addr, int value)
@@ -5473,8 +5496,8 @@ static void rtl_hw_start_8168e_2(struct rtl8169_private *tp)
5473 rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC); 5496 rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC);
5474 rtl_eri_write(tp, 0xcc, ERIAR_MASK_1111, 0x00000050, ERIAR_EXGMAC); 5497 rtl_eri_write(tp, 0xcc, ERIAR_MASK_1111, 0x00000050, ERIAR_EXGMAC);
5475 rtl_eri_write(tp, 0xd0, ERIAR_MASK_1111, 0x07ff0060, ERIAR_EXGMAC); 5498 rtl_eri_write(tp, 0xd0, ERIAR_MASK_1111, 0x07ff0060, ERIAR_EXGMAC);
5476 rtl_w1w0_eri(tp, 0x1b0, ERIAR_MASK_0001, 0x10, 0x00, ERIAR_EXGMAC); 5499 rtl_w0w1_eri(tp, 0x1b0, ERIAR_MASK_0001, 0x10, 0x00, ERIAR_EXGMAC);
5477 rtl_w1w0_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0xff00, ERIAR_EXGMAC); 5500 rtl_w0w1_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0xff00, ERIAR_EXGMAC);
5478 5501
5479 RTL_W8(MaxTxPacketSize, EarlySize); 5502 RTL_W8(MaxTxPacketSize, EarlySize);
5480 5503
@@ -5504,10 +5527,10 @@ static void rtl_hw_start_8168f(struct rtl8169_private *tp)
5504 rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); 5527 rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
5505 rtl_eri_write(tp, 0xc8, ERIAR_MASK_1111, 0x00100002, ERIAR_EXGMAC); 5528 rtl_eri_write(tp, 0xc8, ERIAR_MASK_1111, 0x00100002, ERIAR_EXGMAC);
5506 rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC); 5529 rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC);
5507 rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC); 5530 rtl_w0w1_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC);
5508 rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC); 5531 rtl_w0w1_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC);
5509 rtl_w1w0_eri(tp, 0x1b0, ERIAR_MASK_0001, 0x10, 0x00, ERIAR_EXGMAC); 5532 rtl_w0w1_eri(tp, 0x1b0, ERIAR_MASK_0001, 0x10, 0x00, ERIAR_EXGMAC);
5510 rtl_w1w0_eri(tp, 0x1d0, ERIAR_MASK_0001, 0x10, 0x00, ERIAR_EXGMAC); 5533 rtl_w0w1_eri(tp, 0x1d0, ERIAR_MASK_0001, 0x10, 0x00, ERIAR_EXGMAC);
5511 rtl_eri_write(tp, 0xcc, ERIAR_MASK_1111, 0x00000050, ERIAR_EXGMAC); 5534 rtl_eri_write(tp, 0xcc, ERIAR_MASK_1111, 0x00000050, ERIAR_EXGMAC);
5512 rtl_eri_write(tp, 0xd0, ERIAR_MASK_1111, 0x00000060, ERIAR_EXGMAC); 5535 rtl_eri_write(tp, 0xd0, ERIAR_MASK_1111, 0x00000060, ERIAR_EXGMAC);
5513 5536
@@ -5536,7 +5559,7 @@ static void rtl_hw_start_8168f_1(struct rtl8169_private *tp)
5536 5559
5537 rtl_ephy_init(tp, e_info_8168f_1, ARRAY_SIZE(e_info_8168f_1)); 5560 rtl_ephy_init(tp, e_info_8168f_1, ARRAY_SIZE(e_info_8168f_1));
5538 5561
5539 rtl_w1w0_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0xff00, ERIAR_EXGMAC); 5562 rtl_w0w1_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0xff00, ERIAR_EXGMAC);
5540 5563
5541 /* Adjust EEE LED frequency */ 5564 /* Adjust EEE LED frequency */
5542 RTL_W8(EEE_LED, RTL_R8(EEE_LED) & ~0x07); 5565 RTL_W8(EEE_LED, RTL_R8(EEE_LED) & ~0x07);
@@ -5556,7 +5579,7 @@ static void rtl_hw_start_8411(struct rtl8169_private *tp)
5556 5579
5557 rtl_ephy_init(tp, e_info_8168f_1, ARRAY_SIZE(e_info_8168f_1)); 5580 rtl_ephy_init(tp, e_info_8168f_1, ARRAY_SIZE(e_info_8168f_1));
5558 5581
5559 rtl_w1w0_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0x0000, ERIAR_EXGMAC); 5582 rtl_w0w1_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0x0000, ERIAR_EXGMAC);
5560} 5583}
5561 5584
5562static void rtl_hw_start_8168g_1(struct rtl8169_private *tp) 5585static void rtl_hw_start_8168g_1(struct rtl8169_private *tp)
@@ -5575,8 +5598,8 @@ static void rtl_hw_start_8168g_1(struct rtl8169_private *tp)
5575 5598
5576 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT); 5599 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
5577 5600
5578 rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC); 5601 rtl_w0w1_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC);
5579 rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC); 5602 rtl_w0w1_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC);
5580 rtl_eri_write(tp, 0x2f8, ERIAR_MASK_0011, 0x1d8f, ERIAR_EXGMAC); 5603 rtl_eri_write(tp, 0x2f8, ERIAR_MASK_0011, 0x1d8f, ERIAR_EXGMAC);
5581 5604
5582 RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb); 5605 RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
@@ -5589,8 +5612,8 @@ static void rtl_hw_start_8168g_1(struct rtl8169_private *tp)
5589 /* Adjust EEE LED frequency */ 5612 /* Adjust EEE LED frequency */
5590 RTL_W8(EEE_LED, RTL_R8(EEE_LED) & ~0x07); 5613 RTL_W8(EEE_LED, RTL_R8(EEE_LED) & ~0x07);
5591 5614
5592 rtl_w1w0_eri(tp, 0x2fc, ERIAR_MASK_0001, 0x01, 0x06, ERIAR_EXGMAC); 5615 rtl_w0w1_eri(tp, 0x2fc, ERIAR_MASK_0001, 0x01, 0x06, ERIAR_EXGMAC);
5593 rtl_w1w0_eri(tp, 0x1b0, ERIAR_MASK_0011, 0x0000, 0x1000, ERIAR_EXGMAC); 5616 rtl_w0w1_eri(tp, 0x1b0, ERIAR_MASK_0011, 0x0000, 0x1000, ERIAR_EXGMAC);
5594 5617
5595 rtl_pcie_state_l2l3_enable(tp, false); 5618 rtl_pcie_state_l2l3_enable(tp, false);
5596} 5619}
@@ -5663,12 +5686,12 @@ static void rtl_hw_start_8168h_1(struct rtl8169_private *tp)
5663 5686
5664 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT); 5687 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
5665 5688
5666 rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC); 5689 rtl_w0w1_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC);
5667 rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC); 5690 rtl_w0w1_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC);
5668 5691
5669 rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_1111, 0x0010, 0x00, ERIAR_EXGMAC); 5692 rtl_w0w1_eri(tp, 0xdc, ERIAR_MASK_1111, 0x0010, 0x00, ERIAR_EXGMAC);
5670 5693
5671 rtl_w1w0_eri(tp, 0xd4, ERIAR_MASK_1111, 0x1f00, 0x00, ERIAR_EXGMAC); 5694 rtl_w0w1_eri(tp, 0xd4, ERIAR_MASK_1111, 0x1f00, 0x00, ERIAR_EXGMAC);
5672 5695
5673 rtl_eri_write(tp, 0x5f0, ERIAR_MASK_0011, 0x4f87, ERIAR_EXGMAC); 5696 rtl_eri_write(tp, 0x5f0, ERIAR_MASK_0011, 0x4f87, ERIAR_EXGMAC);
5674 5697
@@ -5687,7 +5710,7 @@ static void rtl_hw_start_8168h_1(struct rtl8169_private *tp)
5687 5710
5688 RTL_W8(DLLPR, RTL_R8(DLLPR) & ~TX_10M_PS_EN); 5711 RTL_W8(DLLPR, RTL_R8(DLLPR) & ~TX_10M_PS_EN);
5689 5712
5690 rtl_w1w0_eri(tp, 0x1b0, ERIAR_MASK_0011, 0x0000, 0x1000, ERIAR_EXGMAC); 5713 rtl_w0w1_eri(tp, 0x1b0, ERIAR_MASK_0011, 0x0000, 0x1000, ERIAR_EXGMAC);
5691 5714
5692 rtl_pcie_state_l2l3_enable(tp, false); 5715 rtl_pcie_state_l2l3_enable(tp, false);
5693 5716
@@ -5858,7 +5881,7 @@ static void rtl_hw_start_8168(struct net_device *dev)
5858 5881
5859 rtl_set_rx_mode(dev); 5882 rtl_set_rx_mode(dev);
5860 5883
5861 RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xF000); 5884 RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xf000);
5862} 5885}
5863 5886
5864#define R810X_CPCMD_QUIRK_MASK (\ 5887#define R810X_CPCMD_QUIRK_MASK (\
@@ -5981,11 +6004,11 @@ static void rtl_hw_start_8402(struct rtl8169_private *tp)
5981 6004
5982 rtl_eri_write(tp, 0xc8, ERIAR_MASK_1111, 0x00000002, ERIAR_EXGMAC); 6005 rtl_eri_write(tp, 0xc8, ERIAR_MASK_1111, 0x00000002, ERIAR_EXGMAC);
5983 rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00000006, ERIAR_EXGMAC); 6006 rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00000006, ERIAR_EXGMAC);
5984 rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC); 6007 rtl_w0w1_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC);
5985 rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC); 6008 rtl_w0w1_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC);
5986 rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); 6009 rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
5987 rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); 6010 rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
5988 rtl_w1w0_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0e00, 0xff00, ERIAR_EXGMAC); 6011 rtl_w0w1_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0e00, 0xff00, ERIAR_EXGMAC);
5989 6012
5990 rtl_pcie_state_l2l3_enable(tp, false); 6013 rtl_pcie_state_l2l3_enable(tp, false);
5991} 6014}
@@ -6592,6 +6615,8 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
6592 6615
6593 txd->opts2 = cpu_to_le32(opts[1]); 6616 txd->opts2 = cpu_to_le32(opts[1]);
6594 6617
6618 netdev_sent_queue(dev, skb->len);
6619
6595 skb_tx_timestamp(skb); 6620 skb_tx_timestamp(skb);
6596 6621
6597 wmb(); 6622 wmb();
@@ -6691,6 +6716,7 @@ static void rtl8169_pcierr_interrupt(struct net_device *dev)
6691static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp) 6716static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp)
6692{ 6717{
6693 unsigned int dirty_tx, tx_left; 6718 unsigned int dirty_tx, tx_left;
6719 unsigned int bytes_compl = 0, pkts_compl = 0;
6694 6720
6695 dirty_tx = tp->dirty_tx; 6721 dirty_tx = tp->dirty_tx;
6696 smp_rmb(); 6722 smp_rmb();
@@ -6709,10 +6735,8 @@ static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp)
6709 rtl8169_unmap_tx_skb(&tp->pci_dev->dev, tx_skb, 6735 rtl8169_unmap_tx_skb(&tp->pci_dev->dev, tx_skb,
6710 tp->TxDescArray + entry); 6736 tp->TxDescArray + entry);
6711 if (status & LastFrag) { 6737 if (status & LastFrag) {
6712 u64_stats_update_begin(&tp->tx_stats.syncp); 6738 pkts_compl++;
6713 tp->tx_stats.packets++; 6739 bytes_compl += tx_skb->skb->len;
6714 tp->tx_stats.bytes += tx_skb->skb->len;
6715 u64_stats_update_end(&tp->tx_stats.syncp);
6716 dev_kfree_skb_any(tx_skb->skb); 6740 dev_kfree_skb_any(tx_skb->skb);
6717 tx_skb->skb = NULL; 6741 tx_skb->skb = NULL;
6718 } 6742 }
@@ -6721,6 +6745,13 @@ static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp)
6721 } 6745 }
6722 6746
6723 if (tp->dirty_tx != dirty_tx) { 6747 if (tp->dirty_tx != dirty_tx) {
6748 netdev_completed_queue(tp->dev, pkts_compl, bytes_compl);
6749
6750 u64_stats_update_begin(&tp->tx_stats.syncp);
6751 tp->tx_stats.packets += pkts_compl;
6752 tp->tx_stats.bytes += bytes_compl;
6753 u64_stats_update_end(&tp->tx_stats.syncp);
6754
6724 tp->dirty_tx = dirty_tx; 6755 tp->dirty_tx = dirty_tx;
6725 /* Sync with rtl8169_start_xmit: 6756 /* Sync with rtl8169_start_xmit:
6726 * - publish dirty_tx ring index (write barrier) 6757 * - publish dirty_tx ring index (write barrier)
@@ -7366,9 +7397,10 @@ static void rtl_remove_one(struct pci_dev *pdev)
7366 struct net_device *dev = pci_get_drvdata(pdev); 7397 struct net_device *dev = pci_get_drvdata(pdev);
7367 struct rtl8169_private *tp = netdev_priv(dev); 7398 struct rtl8169_private *tp = netdev_priv(dev);
7368 7399
7369 if (tp->mac_version == RTL_GIGA_MAC_VER_27 || 7400 if ((tp->mac_version == RTL_GIGA_MAC_VER_27 ||
7370 tp->mac_version == RTL_GIGA_MAC_VER_28 || 7401 tp->mac_version == RTL_GIGA_MAC_VER_28 ||
7371 tp->mac_version == RTL_GIGA_MAC_VER_31) { 7402 tp->mac_version == RTL_GIGA_MAC_VER_31) &&
7403 r8168_check_dash(tp)) {
7372 rtl8168_driver_stop(tp); 7404 rtl8168_driver_stop(tp);
7373 } 7405 }
7374 7406
@@ -7662,8 +7694,20 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
7662 RTL_W8(Config1, RTL_R8(Config1) | PMEnable); 7694 RTL_W8(Config1, RTL_R8(Config1) | PMEnable);
7663 RTL_W8(Config5, RTL_R8(Config5) & (BWF | MWF | UWF | LanWake | PMEStatus)); 7695 RTL_W8(Config5, RTL_R8(Config5) & (BWF | MWF | UWF | LanWake | PMEStatus));
7664 switch (tp->mac_version) { 7696 switch (tp->mac_version) {
7697 case RTL_GIGA_MAC_VER_34:
7698 case RTL_GIGA_MAC_VER_35:
7699 case RTL_GIGA_MAC_VER_36:
7700 case RTL_GIGA_MAC_VER_37:
7701 case RTL_GIGA_MAC_VER_38:
7702 case RTL_GIGA_MAC_VER_40:
7703 case RTL_GIGA_MAC_VER_41:
7704 case RTL_GIGA_MAC_VER_42:
7705 case RTL_GIGA_MAC_VER_43:
7706 case RTL_GIGA_MAC_VER_44:
7665 case RTL_GIGA_MAC_VER_45: 7707 case RTL_GIGA_MAC_VER_45:
7666 case RTL_GIGA_MAC_VER_46: 7708 case RTL_GIGA_MAC_VER_46:
7709 case RTL_GIGA_MAC_VER_47:
7710 case RTL_GIGA_MAC_VER_48:
7667 if (rtl_eri_read(tp, 0xdc, ERIAR_EXGMAC) & MagicPacket_v2) 7711 if (rtl_eri_read(tp, 0xdc, ERIAR_EXGMAC) & MagicPacket_v2)
7668 tp->features |= RTL_FEATURE_WOL; 7712 tp->features |= RTL_FEATURE_WOL;
7669 if ((RTL_R8(Config3) & LinkUp) != 0) 7713 if ((RTL_R8(Config3) & LinkUp) != 0)
@@ -7700,14 +7744,23 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
7700 u64_stats_init(&tp->tx_stats.syncp); 7744 u64_stats_init(&tp->tx_stats.syncp);
7701 7745
7702 /* Get MAC address */ 7746 /* Get MAC address */
7703 if (tp->mac_version == RTL_GIGA_MAC_VER_45 || 7747 if (tp->mac_version == RTL_GIGA_MAC_VER_35 ||
7748 tp->mac_version == RTL_GIGA_MAC_VER_36 ||
7749 tp->mac_version == RTL_GIGA_MAC_VER_37 ||
7750 tp->mac_version == RTL_GIGA_MAC_VER_38 ||
7751 tp->mac_version == RTL_GIGA_MAC_VER_40 ||
7752 tp->mac_version == RTL_GIGA_MAC_VER_41 ||
7753 tp->mac_version == RTL_GIGA_MAC_VER_42 ||
7754 tp->mac_version == RTL_GIGA_MAC_VER_43 ||
7755 tp->mac_version == RTL_GIGA_MAC_VER_44 ||
7756 tp->mac_version == RTL_GIGA_MAC_VER_45 ||
7704 tp->mac_version == RTL_GIGA_MAC_VER_46 || 7757 tp->mac_version == RTL_GIGA_MAC_VER_46 ||
7705 tp->mac_version == RTL_GIGA_MAC_VER_47 || 7758 tp->mac_version == RTL_GIGA_MAC_VER_47 ||
7706 tp->mac_version == RTL_GIGA_MAC_VER_48) { 7759 tp->mac_version == RTL_GIGA_MAC_VER_48) {
7707 u16 mac_addr[3]; 7760 u16 mac_addr[3];
7708 7761
7709 *(u32 *)&mac_addr[0] = rtl_eri_read(tp, 0xE0, ERIAR_EXGMAC); 7762 *(u32 *)&mac_addr[0] = rtl_eri_read(tp, 0xe0, ERIAR_EXGMAC);
7710 *(u16 *)&mac_addr[2] = rtl_eri_read(tp, 0xE4, ERIAR_EXGMAC); 7763 *(u16 *)&mac_addr[2] = rtl_eri_read(tp, 0xe4, ERIAR_EXGMAC);
7711 7764
7712 if (is_valid_ether_addr((u8 *)mac_addr)) 7765 if (is_valid_ether_addr((u8 *)mac_addr))
7713 rtl_rar_set(tp, (u8 *)mac_addr); 7766 rtl_rar_set(tp, (u8 *)mac_addr);
@@ -7780,9 +7833,10 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
7780 rtl_chip_infos[chipset].jumbo_tx_csum ? "ok" : "ko"); 7833 rtl_chip_infos[chipset].jumbo_tx_csum ? "ok" : "ko");
7781 } 7834 }
7782 7835
7783 if (tp->mac_version == RTL_GIGA_MAC_VER_27 || 7836 if ((tp->mac_version == RTL_GIGA_MAC_VER_27 ||
7784 tp->mac_version == RTL_GIGA_MAC_VER_28 || 7837 tp->mac_version == RTL_GIGA_MAC_VER_28 ||
7785 tp->mac_version == RTL_GIGA_MAC_VER_31) { 7838 tp->mac_version == RTL_GIGA_MAC_VER_31) &&
7839 r8168_check_dash(tp)) {
7786 rtl8168_driver_start(tp); 7840 rtl8168_driver_start(tp);
7787 } 7841 }
7788 7842
diff --git a/drivers/net/ethernet/smsc/smc911x.c b/drivers/net/ethernet/smsc/smc911x.c
index 9778cba9fc74..e88df9c7f1c0 100644
--- a/drivers/net/ethernet/smsc/smc911x.c
+++ b/drivers/net/ethernet/smsc/smc911x.c
@@ -1927,9 +1927,6 @@ static int smc911x_probe(struct net_device *dev)
1927 } 1927 }
1928 dev->irq = irq_canonicalize(dev->irq); 1928 dev->irq = irq_canonicalize(dev->irq);
1929 1929
1930 /* Fill in the fields of the device structure with ethernet values. */
1931 ether_setup(dev);
1932
1933 dev->netdev_ops = &smc911x_netdev_ops; 1930 dev->netdev_ops = &smc911x_netdev_ops;
1934 dev->watchdog_timeo = msecs_to_jiffies(watchdog); 1931 dev->watchdog_timeo = msecs_to_jiffies(watchdog);
1935 dev->ethtool_ops = &smc911x_ethtool_ops; 1932 dev->ethtool_ops = &smc911x_ethtool_ops;
diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c
index bcaa41af1e62..5e94d00b96b3 100644
--- a/drivers/net/ethernet/smsc/smc91x.c
+++ b/drivers/net/ethernet/smsc/smc91x.c
@@ -1967,9 +1967,6 @@ static int smc_probe(struct net_device *dev, void __iomem *ioaddr,
1967 } 1967 }
1968 dev->irq = irq_canonicalize(dev->irq); 1968 dev->irq = irq_canonicalize(dev->irq);
1969 1969
1970 /* Fill in the fields of the device structure with ethernet values. */
1971 ether_setup(dev);
1972
1973 dev->watchdog_timeo = msecs_to_jiffies(watchdog); 1970 dev->watchdog_timeo = msecs_to_jiffies(watchdog);
1974 dev->netdev_ops = &smc_netdev_ops; 1971 dev->netdev_ops = &smc_netdev_ops;
1975 dev->ethtool_ops = &smc_ethtool_ops; 1972 dev->ethtool_ops = &smc_ethtool_ops;
diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c
index 5e13fa5524ae..affb29da353e 100644
--- a/drivers/net/ethernet/smsc/smsc911x.c
+++ b/drivers/net/ethernet/smsc/smsc911x.c
@@ -2255,7 +2255,6 @@ static int smsc911x_init(struct net_device *dev)
2255 if (smsc911x_soft_reset(pdata)) 2255 if (smsc911x_soft_reset(pdata))
2256 return -ENODEV; 2256 return -ENODEV;
2257 2257
2258 ether_setup(dev);
2259 dev->flags |= IFF_MULTICAST; 2258 dev->flags |= IFF_MULTICAST;
2260 netif_napi_add(dev, &pdata->napi, smsc911x_poll, SMSC_NAPI_WEIGHT); 2259 netif_napi_add(dev, &pdata->napi, smsc911x_poll, SMSC_NAPI_WEIGHT);
2261 dev->netdev_ops = &smsc911x_netdev_ops; 2260 dev->netdev_ops = &smsc911x_netdev_ops;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 9dbb02d9d9c2..6f77a46c7e2c 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -2765,8 +2765,6 @@ struct stmmac_priv *stmmac_dvr_probe(struct device *device,
2765 priv->device = device; 2765 priv->device = device;
2766 priv->dev = ndev; 2766 priv->dev = ndev;
2767 2767
2768 ether_setup(ndev);
2769
2770 stmmac_set_ethtool_ops(ndev); 2768 stmmac_set_ethtool_ops(ndev);
2771 priv->pause = pause; 2769 priv->pause = pause;
2772 priv->plat = plat_dat; 2770 priv->plat = plat_dat;
@@ -2786,8 +2784,15 @@ struct stmmac_priv *stmmac_dvr_probe(struct device *device,
2786 if (IS_ERR(priv->stmmac_clk)) { 2784 if (IS_ERR(priv->stmmac_clk)) {
2787 dev_warn(priv->device, "%s: warning: cannot get CSR clock\n", 2785 dev_warn(priv->device, "%s: warning: cannot get CSR clock\n",
2788 __func__); 2786 __func__);
2789 ret = PTR_ERR(priv->stmmac_clk); 2787 /* If failed to obtain stmmac_clk and specific clk_csr value
2790 goto error_clk_get; 2788 * is NOT passed from the platform, probe fail.
2789 */
2790 if (!priv->plat->clk_csr) {
2791 ret = PTR_ERR(priv->stmmac_clk);
2792 goto error_clk_get;
2793 } else {
2794 priv->stmmac_clk = NULL;
2795 }
2791 } 2796 }
2792 clk_prepare_enable(priv->stmmac_clk); 2797 clk_prepare_enable(priv->stmmac_clk);
2793 2798
diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c
index 126269762ee7..15396720f489 100644
--- a/drivers/net/ethernet/sun/sunvnet.c
+++ b/drivers/net/ethernet/sun/sunvnet.c
@@ -1073,7 +1073,7 @@ out_dropped:
1073 if (pending) 1073 if (pending)
1074 (void)mod_timer(&port->clean_timer, 1074 (void)mod_timer(&port->clean_timer,
1075 jiffies + VNET_CLEAN_TIMEOUT); 1075 jiffies + VNET_CLEAN_TIMEOUT);
1076 else 1076 else if (port)
1077 del_timer(&port->clean_timer); 1077 del_timer(&port->clean_timer);
1078 dev->stats.tx_dropped++; 1078 dev->stats.tx_dropped++;
1079 return NETDEV_TX_OK; 1079 return NETDEV_TX_OK;
diff --git a/drivers/net/ethernet/wiznet/w5100.c b/drivers/net/ethernet/wiznet/w5100.c
index 104d46f37969..0f56b1c0e082 100644
--- a/drivers/net/ethernet/wiznet/w5100.c
+++ b/drivers/net/ethernet/wiznet/w5100.c
@@ -708,7 +708,6 @@ static int w5100_probe(struct platform_device *pdev)
708 priv = netdev_priv(ndev); 708 priv = netdev_priv(ndev);
709 priv->ndev = ndev; 709 priv->ndev = ndev;
710 710
711 ether_setup(ndev);
712 ndev->netdev_ops = &w5100_netdev_ops; 711 ndev->netdev_ops = &w5100_netdev_ops;
713 ndev->ethtool_ops = &w5100_ethtool_ops; 712 ndev->ethtool_ops = &w5100_ethtool_ops;
714 ndev->watchdog_timeo = HZ; 713 ndev->watchdog_timeo = HZ;
diff --git a/drivers/net/ethernet/wiznet/w5300.c b/drivers/net/ethernet/wiznet/w5300.c
index 1f33c4c86c20..f961f14a0473 100644
--- a/drivers/net/ethernet/wiznet/w5300.c
+++ b/drivers/net/ethernet/wiznet/w5300.c
@@ -620,7 +620,6 @@ static int w5300_probe(struct platform_device *pdev)
620 priv = netdev_priv(ndev); 620 priv = netdev_priv(ndev);
621 priv->ndev = ndev; 621 priv->ndev = ndev;
622 622
623 ether_setup(ndev);
624 ndev->netdev_ops = &w5300_netdev_ops; 623 ndev->netdev_ops = &w5300_netdev_ops;
625 ndev->ethtool_ops = &w5300_ethtool_ops; 624 ndev->ethtool_ops = &w5300_ethtool_ops;
626 ndev->watchdog_timeo = HZ; 625 ndev->watchdog_timeo = HZ;
diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c
index fda5891835d4..629077050fce 100644
--- a/drivers/net/ethernet/xilinx/ll_temac_main.c
+++ b/drivers/net/ethernet/xilinx/ll_temac_main.c
@@ -1012,7 +1012,6 @@ static int temac_of_probe(struct platform_device *op)
1012 if (!ndev) 1012 if (!ndev)
1013 return -ENOMEM; 1013 return -ENOMEM;
1014 1014
1015 ether_setup(ndev);
1016 platform_set_drvdata(op, ndev); 1015 platform_set_drvdata(op, ndev);
1017 SET_NETDEV_DEV(ndev, &op->dev); 1016 SET_NETDEV_DEV(ndev, &op->dev);
1018 ndev->flags &= ~IFF_MULTICAST; /* clear multicast */ 1017 ndev->flags &= ~IFF_MULTICAST; /* clear multicast */
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index a9c5eaadc426..0fcb5e7eb073 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -387,6 +387,7 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
387 int hdr_offset; 387 int hdr_offset;
388 u32 net_trans_info; 388 u32 net_trans_info;
389 u32 hash; 389 u32 hash;
390 u32 skb_length = skb->len;
390 391
391 392
392 /* We will atmost need two pages to describe the rndis 393 /* We will atmost need two pages to describe the rndis
@@ -562,7 +563,7 @@ do_send:
562 563
563drop: 564drop:
564 if (ret == 0) { 565 if (ret == 0) {
565 net->stats.tx_bytes += skb->len; 566 net->stats.tx_bytes += skb_length;
566 net->stats.tx_packets++; 567 net->stats.tx_packets++;
567 } else { 568 } else {
568 kfree(packet); 569 kfree(packet);
diff --git a/drivers/net/irda/Kconfig b/drivers/net/irda/Kconfig
index 8d101d63abca..a2c227bfb687 100644
--- a/drivers/net/irda/Kconfig
+++ b/drivers/net/irda/Kconfig
@@ -397,7 +397,7 @@ config MCS_FIR
397config SH_IRDA 397config SH_IRDA
398 tristate "SuperH IrDA driver" 398 tristate "SuperH IrDA driver"
399 depends on IRDA 399 depends on IRDA
400 depends on ARCH_SHMOBILE || COMPILE_TEST 400 depends on (ARCH_SHMOBILE || COMPILE_TEST) && HAS_IOMEM
401 help 401 help
402 Say Y here if your want to enable SuperH IrDA devices. 402 Say Y here if your want to enable SuperH IrDA devices.
403 403
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index 3381c4f91a8c..0c6adaaf898c 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -112,17 +112,15 @@ out:
112 return err; 112 return err;
113} 113}
114 114
115/* Requires RTNL */
115static int macvtap_set_queue(struct net_device *dev, struct file *file, 116static int macvtap_set_queue(struct net_device *dev, struct file *file,
116 struct macvtap_queue *q) 117 struct macvtap_queue *q)
117{ 118{
118 struct macvlan_dev *vlan = netdev_priv(dev); 119 struct macvlan_dev *vlan = netdev_priv(dev);
119 int err = -EBUSY;
120 120
121 rtnl_lock();
122 if (vlan->numqueues == MAX_MACVTAP_QUEUES) 121 if (vlan->numqueues == MAX_MACVTAP_QUEUES)
123 goto out; 122 return -EBUSY;
124 123
125 err = 0;
126 rcu_assign_pointer(q->vlan, vlan); 124 rcu_assign_pointer(q->vlan, vlan);
127 rcu_assign_pointer(vlan->taps[vlan->numvtaps], q); 125 rcu_assign_pointer(vlan->taps[vlan->numvtaps], q);
128 sock_hold(&q->sk); 126 sock_hold(&q->sk);
@@ -136,9 +134,7 @@ static int macvtap_set_queue(struct net_device *dev, struct file *file,
136 vlan->numvtaps++; 134 vlan->numvtaps++;
137 vlan->numqueues++; 135 vlan->numqueues++;
138 136
139out: 137 return 0;
140 rtnl_unlock();
141 return err;
142} 138}
143 139
144static int macvtap_disable_queue(struct macvtap_queue *q) 140static int macvtap_disable_queue(struct macvtap_queue *q)
@@ -454,11 +450,12 @@ static void macvtap_sock_destruct(struct sock *sk)
454static int macvtap_open(struct inode *inode, struct file *file) 450static int macvtap_open(struct inode *inode, struct file *file)
455{ 451{
456 struct net *net = current->nsproxy->net_ns; 452 struct net *net = current->nsproxy->net_ns;
457 struct net_device *dev = dev_get_by_macvtap_minor(iminor(inode)); 453 struct net_device *dev;
458 struct macvtap_queue *q; 454 struct macvtap_queue *q;
459 int err; 455 int err = -ENODEV;
460 456
461 err = -ENODEV; 457 rtnl_lock();
458 dev = dev_get_by_macvtap_minor(iminor(inode));
462 if (!dev) 459 if (!dev)
463 goto out; 460 goto out;
464 461
@@ -498,6 +495,7 @@ out:
498 if (dev) 495 if (dev)
499 dev_put(dev); 496 dev_put(dev);
500 497
498 rtnl_unlock();
501 return err; 499 return err;
502} 500}
503 501
diff --git a/drivers/net/phy/bcm7xxx.c b/drivers/net/phy/bcm7xxx.c
index daae69950925..1d211d369039 100644
--- a/drivers/net/phy/bcm7xxx.c
+++ b/drivers/net/phy/bcm7xxx.c
@@ -350,6 +350,32 @@ static struct phy_driver bcm7xxx_driver[] = {
350 BCM7XXX_28NM_GPHY(PHY_ID_BCM7439, "Broadcom BCM7439"), 350 BCM7XXX_28NM_GPHY(PHY_ID_BCM7439, "Broadcom BCM7439"),
351 BCM7XXX_28NM_GPHY(PHY_ID_BCM7445, "Broadcom BCM7445"), 351 BCM7XXX_28NM_GPHY(PHY_ID_BCM7445, "Broadcom BCM7445"),
352{ 352{
353 .phy_id = PHY_ID_BCM7425,
354 .phy_id_mask = 0xfffffff0,
355 .name = "Broadcom BCM7425",
356 .features = PHY_GBIT_FEATURES |
357 SUPPORTED_Pause | SUPPORTED_Asym_Pause,
358 .flags = 0,
359 .config_init = bcm7xxx_config_init,
360 .config_aneg = genphy_config_aneg,
361 .read_status = genphy_read_status,
362 .suspend = bcm7xxx_suspend,
363 .resume = bcm7xxx_config_init,
364 .driver = { .owner = THIS_MODULE },
365}, {
366 .phy_id = PHY_ID_BCM7429,
367 .phy_id_mask = 0xfffffff0,
368 .name = "Broadcom BCM7429",
369 .features = PHY_GBIT_FEATURES |
370 SUPPORTED_Pause | SUPPORTED_Asym_Pause,
371 .flags = PHY_IS_INTERNAL,
372 .config_init = bcm7xxx_config_init,
373 .config_aneg = genphy_config_aneg,
374 .read_status = genphy_read_status,
375 .suspend = bcm7xxx_suspend,
376 .resume = bcm7xxx_config_init,
377 .driver = { .owner = THIS_MODULE },
378}, {
353 .phy_id = PHY_BCM_OUI_4, 379 .phy_id = PHY_BCM_OUI_4,
354 .phy_id_mask = 0xffff0000, 380 .phy_id_mask = 0xffff0000,
355 .name = "Broadcom BCM7XXX 40nm", 381 .name = "Broadcom BCM7XXX 40nm",
@@ -381,6 +407,8 @@ static struct mdio_device_id __maybe_unused bcm7xxx_tbl[] = {
381 { PHY_ID_BCM7250, 0xfffffff0, }, 407 { PHY_ID_BCM7250, 0xfffffff0, },
382 { PHY_ID_BCM7364, 0xfffffff0, }, 408 { PHY_ID_BCM7364, 0xfffffff0, },
383 { PHY_ID_BCM7366, 0xfffffff0, }, 409 { PHY_ID_BCM7366, 0xfffffff0, },
410 { PHY_ID_BCM7425, 0xfffffff0, },
411 { PHY_ID_BCM7429, 0xfffffff0, },
384 { PHY_ID_BCM7439, 0xfffffff0, }, 412 { PHY_ID_BCM7439, 0xfffffff0, },
385 { PHY_ID_BCM7445, 0xfffffff0, }, 413 { PHY_ID_BCM7445, 0xfffffff0, },
386 { PHY_BCM_OUI_4, 0xffff0000 }, 414 { PHY_BCM_OUI_4, 0xffff0000 },
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index a4d4c4a1354f..b9a98152815b 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -26,7 +26,7 @@
26#include <linux/mdio.h> 26#include <linux/mdio.h>
27 27
28/* Version Information */ 28/* Version Information */
29#define DRIVER_VERSION "v1.06.0 (2014/03/03)" 29#define DRIVER_VERSION "v1.06.1 (2014/10/01)"
30#define DRIVER_AUTHOR "Realtek linux nic maintainers <nic_swsd@realtek.com>" 30#define DRIVER_AUTHOR "Realtek linux nic maintainers <nic_swsd@realtek.com>"
31#define DRIVER_DESC "Realtek RTL8152/RTL8153 Based USB Ethernet Adapters" 31#define DRIVER_DESC "Realtek RTL8152/RTL8153 Based USB Ethernet Adapters"
32#define MODULENAME "r8152" 32#define MODULENAME "r8152"
@@ -1979,10 +1979,34 @@ static void rxdy_gated_en(struct r8152 *tp, bool enable)
1979 ocp_write_word(tp, MCU_TYPE_PLA, PLA_MISC_1, ocp_data); 1979 ocp_write_word(tp, MCU_TYPE_PLA, PLA_MISC_1, ocp_data);
1980} 1980}
1981 1981
1982static int rtl_start_rx(struct r8152 *tp)
1983{
1984 int i, ret = 0;
1985
1986 INIT_LIST_HEAD(&tp->rx_done);
1987 for (i = 0; i < RTL8152_MAX_RX; i++) {
1988 INIT_LIST_HEAD(&tp->rx_info[i].list);
1989 ret = r8152_submit_rx(tp, &tp->rx_info[i], GFP_KERNEL);
1990 if (ret)
1991 break;
1992 }
1993
1994 return ret;
1995}
1996
1997static int rtl_stop_rx(struct r8152 *tp)
1998{
1999 int i;
2000
2001 for (i = 0; i < RTL8152_MAX_RX; i++)
2002 usb_kill_urb(tp->rx_info[i].urb);
2003
2004 return 0;
2005}
2006
1982static int rtl_enable(struct r8152 *tp) 2007static int rtl_enable(struct r8152 *tp)
1983{ 2008{
1984 u32 ocp_data; 2009 u32 ocp_data;
1985 int i, ret;
1986 2010
1987 r8152b_reset_packet_filter(tp); 2011 r8152b_reset_packet_filter(tp);
1988 2012
@@ -1992,14 +2016,7 @@ static int rtl_enable(struct r8152 *tp)
1992 2016
1993 rxdy_gated_en(tp, false); 2017 rxdy_gated_en(tp, false);
1994 2018
1995 INIT_LIST_HEAD(&tp->rx_done); 2019 return rtl_start_rx(tp);
1996 ret = 0;
1997 for (i = 0; i < RTL8152_MAX_RX; i++) {
1998 INIT_LIST_HEAD(&tp->rx_info[i].list);
1999 ret |= r8152_submit_rx(tp, &tp->rx_info[i], GFP_KERNEL);
2000 }
2001
2002 return ret;
2003} 2020}
2004 2021
2005static int rtl8152_enable(struct r8152 *tp) 2022static int rtl8152_enable(struct r8152 *tp)
@@ -2083,8 +2100,7 @@ static void rtl_disable(struct r8152 *tp)
2083 usleep_range(1000, 2000); 2100 usleep_range(1000, 2000);
2084 } 2101 }
2085 2102
2086 for (i = 0; i < RTL8152_MAX_RX; i++) 2103 rtl_stop_rx(tp);
2087 usb_kill_urb(tp->rx_info[i].urb);
2088 2104
2089 rtl8152_nic_reset(tp); 2105 rtl8152_nic_reset(tp);
2090} 2106}
@@ -2243,28 +2259,6 @@ static void rtl_phy_reset(struct r8152 *tp)
2243 } 2259 }
2244} 2260}
2245 2261
2246static void rtl_clear_bp(struct r8152 *tp)
2247{
2248 ocp_write_dword(tp, MCU_TYPE_PLA, PLA_BP_0, 0);
2249 ocp_write_dword(tp, MCU_TYPE_PLA, PLA_BP_2, 0);
2250 ocp_write_dword(tp, MCU_TYPE_PLA, PLA_BP_4, 0);
2251 ocp_write_dword(tp, MCU_TYPE_PLA, PLA_BP_6, 0);
2252 ocp_write_dword(tp, MCU_TYPE_USB, USB_BP_0, 0);
2253 ocp_write_dword(tp, MCU_TYPE_USB, USB_BP_2, 0);
2254 ocp_write_dword(tp, MCU_TYPE_USB, USB_BP_4, 0);
2255 ocp_write_dword(tp, MCU_TYPE_USB, USB_BP_6, 0);
2256 usleep_range(3000, 6000);
2257 ocp_write_word(tp, MCU_TYPE_PLA, PLA_BP_BA, 0);
2258 ocp_write_word(tp, MCU_TYPE_USB, USB_BP_BA, 0);
2259}
2260
2261static void r8153_clear_bp(struct r8152 *tp)
2262{
2263 ocp_write_byte(tp, MCU_TYPE_PLA, PLA_BP_EN, 0);
2264 ocp_write_byte(tp, MCU_TYPE_USB, USB_BP_EN, 0);
2265 rtl_clear_bp(tp);
2266}
2267
2268static void r8153_teredo_off(struct r8152 *tp) 2262static void r8153_teredo_off(struct r8152 *tp)
2269{ 2263{
2270 u32 ocp_data; 2264 u32 ocp_data;
@@ -2307,8 +2301,6 @@ static void r8152b_hw_phy_cfg(struct r8152 *tp)
2307 r8152_mdio_write(tp, MII_BMCR, data); 2301 r8152_mdio_write(tp, MII_BMCR, data);
2308 } 2302 }
2309 2303
2310 rtl_clear_bp(tp);
2311
2312 set_bit(PHY_RESET, &tp->flags); 2304 set_bit(PHY_RESET, &tp->flags);
2313} 2305}
2314 2306
@@ -2455,8 +2447,6 @@ static void r8153_hw_phy_cfg(struct r8152 *tp)
2455 r8152_mdio_write(tp, MII_BMCR, data); 2447 r8152_mdio_write(tp, MII_BMCR, data);
2456 } 2448 }
2457 2449
2458 r8153_clear_bp(tp);
2459
2460 if (tp->version == RTL_VER_03) { 2450 if (tp->version == RTL_VER_03) {
2461 data = ocp_reg_read(tp, OCP_EEE_CFG); 2451 data = ocp_reg_read(tp, OCP_EEE_CFG);
2462 data &= ~CTAP_SHORT_EN; 2452 data &= ~CTAP_SHORT_EN;
@@ -3181,13 +3171,14 @@ static int rtl8152_suspend(struct usb_interface *intf, pm_message_t message)
3181 clear_bit(WORK_ENABLE, &tp->flags); 3171 clear_bit(WORK_ENABLE, &tp->flags);
3182 usb_kill_urb(tp->intr_urb); 3172 usb_kill_urb(tp->intr_urb);
3183 cancel_delayed_work_sync(&tp->schedule); 3173 cancel_delayed_work_sync(&tp->schedule);
3174 tasklet_disable(&tp->tl);
3184 if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) { 3175 if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) {
3176 rtl_stop_rx(tp);
3185 rtl_runtime_suspend_enable(tp, true); 3177 rtl_runtime_suspend_enable(tp, true);
3186 } else { 3178 } else {
3187 tasklet_disable(&tp->tl);
3188 tp->rtl_ops.down(tp); 3179 tp->rtl_ops.down(tp);
3189 tasklet_enable(&tp->tl);
3190 } 3180 }
3181 tasklet_enable(&tp->tl);
3191 } 3182 }
3192 3183
3193 return 0; 3184 return 0;
@@ -3206,18 +3197,19 @@ static int rtl8152_resume(struct usb_interface *intf)
3206 if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) { 3197 if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) {
3207 rtl_runtime_suspend_enable(tp, false); 3198 rtl_runtime_suspend_enable(tp, false);
3208 clear_bit(SELECTIVE_SUSPEND, &tp->flags); 3199 clear_bit(SELECTIVE_SUSPEND, &tp->flags);
3200 set_bit(WORK_ENABLE, &tp->flags);
3209 if (tp->speed & LINK_STATUS) 3201 if (tp->speed & LINK_STATUS)
3210 tp->rtl_ops.disable(tp); 3202 rtl_start_rx(tp);
3211 } else { 3203 } else {
3212 tp->rtl_ops.up(tp); 3204 tp->rtl_ops.up(tp);
3213 rtl8152_set_speed(tp, AUTONEG_ENABLE, 3205 rtl8152_set_speed(tp, AUTONEG_ENABLE,
3214 tp->mii.supports_gmii ? 3206 tp->mii.supports_gmii ?
3215 SPEED_1000 : SPEED_100, 3207 SPEED_1000 : SPEED_100,
3216 DUPLEX_FULL); 3208 DUPLEX_FULL);
3209 tp->speed = 0;
3210 netif_carrier_off(tp->netdev);
3211 set_bit(WORK_ENABLE, &tp->flags);
3217 } 3212 }
3218 tp->speed = 0;
3219 netif_carrier_off(tp->netdev);
3220 set_bit(WORK_ENABLE, &tp->flags);
3221 usb_submit_urb(tp->intr_urb, GFP_KERNEL); 3213 usb_submit_urb(tp->intr_urb, GFP_KERNEL);
3222 } 3214 }
3223 3215
@@ -3623,7 +3615,7 @@ static void rtl8153_unload(struct r8152 *tp)
3623 if (test_bit(RTL8152_UNPLUG, &tp->flags)) 3615 if (test_bit(RTL8152_UNPLUG, &tp->flags))
3624 return; 3616 return;
3625 3617
3626 r8153_power_cut_en(tp, true); 3618 r8153_power_cut_en(tp, false);
3627} 3619}
3628 3620
3629static int rtl_ops_init(struct r8152 *tp, const struct usb_device_id *id) 3621static int rtl_ops_init(struct r8152 *tp, const struct usb_device_id *id)
@@ -3788,7 +3780,11 @@ static void rtl8152_disconnect(struct usb_interface *intf)
3788 3780
3789 usb_set_intfdata(intf, NULL); 3781 usb_set_intfdata(intf, NULL);
3790 if (tp) { 3782 if (tp) {
3791 set_bit(RTL8152_UNPLUG, &tp->flags); 3783 struct usb_device *udev = tp->udev;
3784
3785 if (udev->state == USB_STATE_NOTATTACHED)
3786 set_bit(RTL8152_UNPLUG, &tp->flags);
3787
3792 tasklet_kill(&tp->tl); 3788 tasklet_kill(&tp->tl);
3793 unregister_netdev(tp->netdev); 3789 unregister_netdev(tp->netdev);
3794 tp->rtl_ops.unload(tp); 3790 tp->rtl_ops.unload(tp);
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 34e102ec95c2..2af795d6ba05 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -1610,6 +1610,8 @@ static int vxlan6_xmit_skb(struct vxlan_sock *vs,
1610 vxh->vx_flags = htonl(VXLAN_FLAGS); 1610 vxh->vx_flags = htonl(VXLAN_FLAGS);
1611 vxh->vx_vni = vni; 1611 vxh->vx_vni = vni;
1612 1612
1613 skb_set_inner_protocol(skb, htons(ETH_P_TEB));
1614
1613 udp_tunnel6_xmit_skb(vs->sock, dst, skb, dev, saddr, daddr, prio, 1615 udp_tunnel6_xmit_skb(vs->sock, dst, skb, dev, saddr, daddr, prio,
1614 ttl, src_port, dst_port); 1616 ttl, src_port, dst_port);
1615 return 0; 1617 return 0;
@@ -1652,6 +1654,8 @@ int vxlan_xmit_skb(struct vxlan_sock *vs,
1652 vxh->vx_flags = htonl(VXLAN_FLAGS); 1654 vxh->vx_flags = htonl(VXLAN_FLAGS);
1653 vxh->vx_vni = vni; 1655 vxh->vx_vni = vni;
1654 1656
1657 skb_set_inner_protocol(skb, htons(ETH_P_TEB));
1658
1655 return udp_tunnel_xmit_skb(vs->sock, rt, skb, src, dst, tos, 1659 return udp_tunnel_xmit_skb(vs->sock, rt, skb, src, dst, tos,
1656 ttl, df, src_port, dst_port, xnet); 1660 ttl, df, src_port, dst_port, xnet);
1657} 1661}