aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet')
-rw-r--r--drivers/net/ethernet/Kconfig13
-rw-r--r--drivers/net/ethernet/Makefile1
-rw-r--r--drivers/net/ethernet/altera/Kconfig1
-rw-r--r--drivers/net/ethernet/altera/Makefile1
-rw-r--r--drivers/net/ethernet/altera/altera_msgdma.c118
-rw-r--r--drivers/net/ethernet/altera/altera_msgdma.h3
-rw-r--r--drivers/net/ethernet/altera/altera_msgdmahw.h13
-rw-r--r--drivers/net/ethernet/altera/altera_sgdma.c338
-rw-r--r--drivers/net/ethernet/altera/altera_sgdma.h3
-rw-r--r--drivers/net/ethernet/altera/altera_sgdmahw.h26
-rw-r--r--drivers/net/ethernet/altera/altera_tse.h53
-rw-r--r--drivers/net/ethernet/altera/altera_tse_ethtool.c116
-rw-r--r--drivers/net/ethernet/altera/altera_tse_main.c206
-rw-r--r--drivers/net/ethernet/altera/altera_utils.c20
-rw-r--r--drivers/net/ethernet/altera/altera_utils.h8
-rw-r--r--drivers/net/ethernet/arc/emac.h2
-rw-r--r--drivers/net/ethernet/arc/emac_main.c82
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c12
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c60
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c4
-rw-r--r--drivers/net/ethernet/cadence/Kconfig4
-rw-r--r--drivers/net/ethernet/cadence/macb.c35
-rw-r--r--drivers/net/ethernet/chelsio/Kconfig13
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c2
-rw-r--r--drivers/net/ethernet/ec_bhf.c706
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c6
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c223
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ethtool.c3
-rw-r--r--drivers/net/ethernet/ibm/emac/mal.c5
-rw-r--r--drivers/net/ethernet/ibm/emac/mal.h20
-rw-r--r--drivers/net/ethernet/ibm/emac/rgmii.c3
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.c71
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.h3
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c6
-rw-r--r--drivers/net/ethernet/intel/e1000e/phy.h1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c14
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_nvm.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ptp.c4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c22
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_i210.c2
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mac.c13
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c4
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ptp.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h21
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c3
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c6
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c40
-rw-r--r--drivers/net/ethernet/jme.c53
-rw-r--r--drivers/net/ethernet/marvell/mvmdio.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cmd.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h26
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/port.c152
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/qp.c35
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c80
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic.h16
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c1
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c66
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c9
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_common.h2
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_core.c22
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.c11
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.h42
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_dma.c13
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c31
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_mdio.c14
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_reg.h4
-rw-r--r--drivers/net/ethernet/sfc/nic.c14
-rw-r--r--drivers/net/ethernet/smsc/smc91x.c25
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c4
-rw-r--r--drivers/net/ethernet/sun/cassini.c2
-rw-r--r--drivers/net/ethernet/ti/cpsw.c17
74 files changed, 2165 insertions, 813 deletions
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig
index 39b26fe28d10..051349458462 100644
--- a/drivers/net/ethernet/Kconfig
+++ b/drivers/net/ethernet/Kconfig
@@ -35,6 +35,19 @@ source "drivers/net/ethernet/calxeda/Kconfig"
35source "drivers/net/ethernet/chelsio/Kconfig" 35source "drivers/net/ethernet/chelsio/Kconfig"
36source "drivers/net/ethernet/cirrus/Kconfig" 36source "drivers/net/ethernet/cirrus/Kconfig"
37source "drivers/net/ethernet/cisco/Kconfig" 37source "drivers/net/ethernet/cisco/Kconfig"
38
39config CX_ECAT
40 tristate "Beckhoff CX5020 EtherCAT master support"
41 depends on PCI
42 depends on X86 || COMPILE_TEST
43 ---help---
44 Driver for EtherCAT master module located on CCAT FPGA
45 that can be found on Beckhoff CX5020, and possibly other of CX
46 Beckhoff CX series industrial PCs.
47
48 To compile this driver as a module, choose M here. The module
49 will be called ec_bhf.
50
38source "drivers/net/ethernet/davicom/Kconfig" 51source "drivers/net/ethernet/davicom/Kconfig"
39 52
40config DNET 53config DNET
diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile
index 545d0b3b9cb4..35190e36c456 100644
--- a/drivers/net/ethernet/Makefile
+++ b/drivers/net/ethernet/Makefile
@@ -21,6 +21,7 @@ obj-$(CONFIG_NET_CALXEDA_XGMAC) += calxeda/
21obj-$(CONFIG_NET_VENDOR_CHELSIO) += chelsio/ 21obj-$(CONFIG_NET_VENDOR_CHELSIO) += chelsio/
22obj-$(CONFIG_NET_VENDOR_CIRRUS) += cirrus/ 22obj-$(CONFIG_NET_VENDOR_CIRRUS) += cirrus/
23obj-$(CONFIG_NET_VENDOR_CISCO) += cisco/ 23obj-$(CONFIG_NET_VENDOR_CISCO) += cisco/
24obj-$(CONFIG_CX_ECAT) += ec_bhf.o
24obj-$(CONFIG_DM9000) += davicom/ 25obj-$(CONFIG_DM9000) += davicom/
25obj-$(CONFIG_DNET) += dnet.o 26obj-$(CONFIG_DNET) += dnet.o
26obj-$(CONFIG_NET_VENDOR_DEC) += dec/ 27obj-$(CONFIG_NET_VENDOR_DEC) += dec/
diff --git a/drivers/net/ethernet/altera/Kconfig b/drivers/net/ethernet/altera/Kconfig
index 80c1ab74a4b8..fdddba51473e 100644
--- a/drivers/net/ethernet/altera/Kconfig
+++ b/drivers/net/ethernet/altera/Kconfig
@@ -1,5 +1,6 @@
1config ALTERA_TSE 1config ALTERA_TSE
2 tristate "Altera Triple-Speed Ethernet MAC support" 2 tristate "Altera Triple-Speed Ethernet MAC support"
3 depends on HAS_DMA
3 select PHYLIB 4 select PHYLIB
4 ---help--- 5 ---help---
5 This driver supports the Altera Triple-Speed (TSE) Ethernet MAC. 6 This driver supports the Altera Triple-Speed (TSE) Ethernet MAC.
diff --git a/drivers/net/ethernet/altera/Makefile b/drivers/net/ethernet/altera/Makefile
index d4a187e45369..3eff2fd3997e 100644
--- a/drivers/net/ethernet/altera/Makefile
+++ b/drivers/net/ethernet/altera/Makefile
@@ -5,3 +5,4 @@
5obj-$(CONFIG_ALTERA_TSE) += altera_tse.o 5obj-$(CONFIG_ALTERA_TSE) += altera_tse.o
6altera_tse-objs := altera_tse_main.o altera_tse_ethtool.o \ 6altera_tse-objs := altera_tse_main.o altera_tse_ethtool.o \
7altera_msgdma.o altera_sgdma.o altera_utils.o 7altera_msgdma.o altera_sgdma.o altera_utils.o
8ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/ethernet/altera/altera_msgdma.c b/drivers/net/ethernet/altera/altera_msgdma.c
index 3df18669ea30..0fb986ba3290 100644
--- a/drivers/net/ethernet/altera/altera_msgdma.c
+++ b/drivers/net/ethernet/altera/altera_msgdma.c
@@ -18,6 +18,7 @@
18#include "altera_utils.h" 18#include "altera_utils.h"
19#include "altera_tse.h" 19#include "altera_tse.h"
20#include "altera_msgdmahw.h" 20#include "altera_msgdmahw.h"
21#include "altera_msgdma.h"
21 22
22/* No initialization work to do for MSGDMA */ 23/* No initialization work to do for MSGDMA */
23int msgdma_initialize(struct altera_tse_private *priv) 24int msgdma_initialize(struct altera_tse_private *priv)
@@ -29,21 +30,23 @@ void msgdma_uninitialize(struct altera_tse_private *priv)
29{ 30{
30} 31}
31 32
33void msgdma_start_rxdma(struct altera_tse_private *priv)
34{
35}
36
32void msgdma_reset(struct altera_tse_private *priv) 37void msgdma_reset(struct altera_tse_private *priv)
33{ 38{
34 int counter; 39 int counter;
35 struct msgdma_csr *txcsr =
36 (struct msgdma_csr *)priv->tx_dma_csr;
37 struct msgdma_csr *rxcsr =
38 (struct msgdma_csr *)priv->rx_dma_csr;
39 40
40 /* Reset Rx mSGDMA */ 41 /* Reset Rx mSGDMA */
41 iowrite32(MSGDMA_CSR_STAT_MASK, &rxcsr->status); 42 csrwr32(MSGDMA_CSR_STAT_MASK, priv->rx_dma_csr,
42 iowrite32(MSGDMA_CSR_CTL_RESET, &rxcsr->control); 43 msgdma_csroffs(status));
44 csrwr32(MSGDMA_CSR_CTL_RESET, priv->rx_dma_csr,
45 msgdma_csroffs(control));
43 46
44 counter = 0; 47 counter = 0;
45 while (counter++ < ALTERA_TSE_SW_RESET_WATCHDOG_CNTR) { 48 while (counter++ < ALTERA_TSE_SW_RESET_WATCHDOG_CNTR) {
46 if (tse_bit_is_clear(&rxcsr->status, 49 if (tse_bit_is_clear(priv->rx_dma_csr, msgdma_csroffs(status),
47 MSGDMA_CSR_STAT_RESETTING)) 50 MSGDMA_CSR_STAT_RESETTING))
48 break; 51 break;
49 udelay(1); 52 udelay(1);
@@ -54,15 +57,18 @@ void msgdma_reset(struct altera_tse_private *priv)
54 "TSE Rx mSGDMA resetting bit never cleared!\n"); 57 "TSE Rx mSGDMA resetting bit never cleared!\n");
55 58
56 /* clear all status bits */ 59 /* clear all status bits */
57 iowrite32(MSGDMA_CSR_STAT_MASK, &rxcsr->status); 60 csrwr32(MSGDMA_CSR_STAT_MASK, priv->rx_dma_csr, msgdma_csroffs(status));
58 61
59 /* Reset Tx mSGDMA */ 62 /* Reset Tx mSGDMA */
60 iowrite32(MSGDMA_CSR_STAT_MASK, &txcsr->status); 63 csrwr32(MSGDMA_CSR_STAT_MASK, priv->tx_dma_csr,
61 iowrite32(MSGDMA_CSR_CTL_RESET, &txcsr->control); 64 msgdma_csroffs(status));
65
66 csrwr32(MSGDMA_CSR_CTL_RESET, priv->tx_dma_csr,
67 msgdma_csroffs(control));
62 68
63 counter = 0; 69 counter = 0;
64 while (counter++ < ALTERA_TSE_SW_RESET_WATCHDOG_CNTR) { 70 while (counter++ < ALTERA_TSE_SW_RESET_WATCHDOG_CNTR) {
65 if (tse_bit_is_clear(&txcsr->status, 71 if (tse_bit_is_clear(priv->tx_dma_csr, msgdma_csroffs(status),
66 MSGDMA_CSR_STAT_RESETTING)) 72 MSGDMA_CSR_STAT_RESETTING))
67 break; 73 break;
68 udelay(1); 74 udelay(1);
@@ -73,58 +79,58 @@ void msgdma_reset(struct altera_tse_private *priv)
73 "TSE Tx mSGDMA resetting bit never cleared!\n"); 79 "TSE Tx mSGDMA resetting bit never cleared!\n");
74 80
75 /* clear all status bits */ 81 /* clear all status bits */
76 iowrite32(MSGDMA_CSR_STAT_MASK, &txcsr->status); 82 csrwr32(MSGDMA_CSR_STAT_MASK, priv->tx_dma_csr, msgdma_csroffs(status));
77} 83}
78 84
79void msgdma_disable_rxirq(struct altera_tse_private *priv) 85void msgdma_disable_rxirq(struct altera_tse_private *priv)
80{ 86{
81 struct msgdma_csr *csr = priv->rx_dma_csr; 87 tse_clear_bit(priv->rx_dma_csr, msgdma_csroffs(control),
82 tse_clear_bit(&csr->control, MSGDMA_CSR_CTL_GLOBAL_INTR); 88 MSGDMA_CSR_CTL_GLOBAL_INTR);
83} 89}
84 90
85void msgdma_enable_rxirq(struct altera_tse_private *priv) 91void msgdma_enable_rxirq(struct altera_tse_private *priv)
86{ 92{
87 struct msgdma_csr *csr = priv->rx_dma_csr; 93 tse_set_bit(priv->rx_dma_csr, msgdma_csroffs(control),
88 tse_set_bit(&csr->control, MSGDMA_CSR_CTL_GLOBAL_INTR); 94 MSGDMA_CSR_CTL_GLOBAL_INTR);
89} 95}
90 96
91void msgdma_disable_txirq(struct altera_tse_private *priv) 97void msgdma_disable_txirq(struct altera_tse_private *priv)
92{ 98{
93 struct msgdma_csr *csr = priv->tx_dma_csr; 99 tse_clear_bit(priv->tx_dma_csr, msgdma_csroffs(control),
94 tse_clear_bit(&csr->control, MSGDMA_CSR_CTL_GLOBAL_INTR); 100 MSGDMA_CSR_CTL_GLOBAL_INTR);
95} 101}
96 102
97void msgdma_enable_txirq(struct altera_tse_private *priv) 103void msgdma_enable_txirq(struct altera_tse_private *priv)
98{ 104{
99 struct msgdma_csr *csr = priv->tx_dma_csr; 105 tse_set_bit(priv->tx_dma_csr, msgdma_csroffs(control),
100 tse_set_bit(&csr->control, MSGDMA_CSR_CTL_GLOBAL_INTR); 106 MSGDMA_CSR_CTL_GLOBAL_INTR);
101} 107}
102 108
103void msgdma_clear_rxirq(struct altera_tse_private *priv) 109void msgdma_clear_rxirq(struct altera_tse_private *priv)
104{ 110{
105 struct msgdma_csr *csr = priv->rx_dma_csr; 111 csrwr32(MSGDMA_CSR_STAT_IRQ, priv->rx_dma_csr, msgdma_csroffs(status));
106 iowrite32(MSGDMA_CSR_STAT_IRQ, &csr->status);
107} 112}
108 113
109void msgdma_clear_txirq(struct altera_tse_private *priv) 114void msgdma_clear_txirq(struct altera_tse_private *priv)
110{ 115{
111 struct msgdma_csr *csr = priv->tx_dma_csr; 116 csrwr32(MSGDMA_CSR_STAT_IRQ, priv->tx_dma_csr, msgdma_csroffs(status));
112 iowrite32(MSGDMA_CSR_STAT_IRQ, &csr->status);
113} 117}
114 118
115/* return 0 to indicate transmit is pending */ 119/* return 0 to indicate transmit is pending */
116int msgdma_tx_buffer(struct altera_tse_private *priv, struct tse_buffer *buffer) 120int msgdma_tx_buffer(struct altera_tse_private *priv, struct tse_buffer *buffer)
117{ 121{
118 struct msgdma_extended_desc *desc = priv->tx_dma_desc; 122 csrwr32(lower_32_bits(buffer->dma_addr), priv->tx_dma_desc,
119 123 msgdma_descroffs(read_addr_lo));
120 iowrite32(lower_32_bits(buffer->dma_addr), &desc->read_addr_lo); 124 csrwr32(upper_32_bits(buffer->dma_addr), priv->tx_dma_desc,
121 iowrite32(upper_32_bits(buffer->dma_addr), &desc->read_addr_hi); 125 msgdma_descroffs(read_addr_hi));
122 iowrite32(0, &desc->write_addr_lo); 126 csrwr32(0, priv->tx_dma_desc, msgdma_descroffs(write_addr_lo));
123 iowrite32(0, &desc->write_addr_hi); 127 csrwr32(0, priv->tx_dma_desc, msgdma_descroffs(write_addr_hi));
124 iowrite32(buffer->len, &desc->len); 128 csrwr32(buffer->len, priv->tx_dma_desc, msgdma_descroffs(len));
125 iowrite32(0, &desc->burst_seq_num); 129 csrwr32(0, priv->tx_dma_desc, msgdma_descroffs(burst_seq_num));
126 iowrite32(MSGDMA_DESC_TX_STRIDE, &desc->stride); 130 csrwr32(MSGDMA_DESC_TX_STRIDE, priv->tx_dma_desc,
127 iowrite32(MSGDMA_DESC_CTL_TX_SINGLE, &desc->control); 131 msgdma_descroffs(stride));
132 csrwr32(MSGDMA_DESC_CTL_TX_SINGLE, priv->tx_dma_desc,
133 msgdma_descroffs(control));
128 return 0; 134 return 0;
129} 135}
130 136
@@ -133,17 +139,16 @@ u32 msgdma_tx_completions(struct altera_tse_private *priv)
133 u32 ready = 0; 139 u32 ready = 0;
134 u32 inuse; 140 u32 inuse;
135 u32 status; 141 u32 status;
136 struct msgdma_csr *txcsr =
137 (struct msgdma_csr *)priv->tx_dma_csr;
138 142
139 /* Get number of sent descriptors */ 143 /* Get number of sent descriptors */
140 inuse = ioread32(&txcsr->rw_fill_level) & 0xffff; 144 inuse = csrrd32(priv->tx_dma_csr, msgdma_csroffs(rw_fill_level))
145 & 0xffff;
141 146
142 if (inuse) { /* Tx FIFO is not empty */ 147 if (inuse) { /* Tx FIFO is not empty */
143 ready = priv->tx_prod - priv->tx_cons - inuse - 1; 148 ready = priv->tx_prod - priv->tx_cons - inuse - 1;
144 } else { 149 } else {
145 /* Check for buffered last packet */ 150 /* Check for buffered last packet */
146 status = ioread32(&txcsr->status); 151 status = csrrd32(priv->tx_dma_csr, msgdma_csroffs(status));
147 if (status & MSGDMA_CSR_STAT_BUSY) 152 if (status & MSGDMA_CSR_STAT_BUSY)
148 ready = priv->tx_prod - priv->tx_cons - 1; 153 ready = priv->tx_prod - priv->tx_cons - 1;
149 else 154 else
@@ -154,10 +159,9 @@ u32 msgdma_tx_completions(struct altera_tse_private *priv)
154 159
155/* Put buffer to the mSGDMA RX FIFO 160/* Put buffer to the mSGDMA RX FIFO
156 */ 161 */
157int msgdma_add_rx_desc(struct altera_tse_private *priv, 162void msgdma_add_rx_desc(struct altera_tse_private *priv,
158 struct tse_buffer *rxbuffer) 163 struct tse_buffer *rxbuffer)
159{ 164{
160 struct msgdma_extended_desc *desc = priv->rx_dma_desc;
161 u32 len = priv->rx_dma_buf_sz; 165 u32 len = priv->rx_dma_buf_sz;
162 dma_addr_t dma_addr = rxbuffer->dma_addr; 166 dma_addr_t dma_addr = rxbuffer->dma_addr;
163 u32 control = (MSGDMA_DESC_CTL_END_ON_EOP 167 u32 control = (MSGDMA_DESC_CTL_END_ON_EOP
@@ -167,15 +171,16 @@ int msgdma_add_rx_desc(struct altera_tse_private *priv,
167 | MSGDMA_DESC_CTL_TR_ERR_IRQ 171 | MSGDMA_DESC_CTL_TR_ERR_IRQ
168 | MSGDMA_DESC_CTL_GO); 172 | MSGDMA_DESC_CTL_GO);
169 173
170 iowrite32(0, &desc->read_addr_lo); 174 csrwr32(0, priv->rx_dma_desc, msgdma_descroffs(read_addr_lo));
171 iowrite32(0, &desc->read_addr_hi); 175 csrwr32(0, priv->rx_dma_desc, msgdma_descroffs(read_addr_hi));
172 iowrite32(lower_32_bits(dma_addr), &desc->write_addr_lo); 176 csrwr32(lower_32_bits(dma_addr), priv->rx_dma_desc,
173 iowrite32(upper_32_bits(dma_addr), &desc->write_addr_hi); 177 msgdma_descroffs(write_addr_lo));
174 iowrite32(len, &desc->len); 178 csrwr32(upper_32_bits(dma_addr), priv->rx_dma_desc,
175 iowrite32(0, &desc->burst_seq_num); 179 msgdma_descroffs(write_addr_hi));
176 iowrite32(0x00010001, &desc->stride); 180 csrwr32(len, priv->rx_dma_desc, msgdma_descroffs(len));
177 iowrite32(control, &desc->control); 181 csrwr32(0, priv->rx_dma_desc, msgdma_descroffs(burst_seq_num));
178 return 1; 182 csrwr32(0x00010001, priv->rx_dma_desc, msgdma_descroffs(stride));
183 csrwr32(control, priv->rx_dma_desc, msgdma_descroffs(control));
179} 184}
180 185
181/* status is returned on upper 16 bits, 186/* status is returned on upper 16 bits,
@@ -186,14 +191,13 @@ u32 msgdma_rx_status(struct altera_tse_private *priv)
186 u32 rxstatus = 0; 191 u32 rxstatus = 0;
187 u32 pktlength; 192 u32 pktlength;
188 u32 pktstatus; 193 u32 pktstatus;
189 struct msgdma_csr *rxcsr = 194
190 (struct msgdma_csr *)priv->rx_dma_csr; 195 if (csrrd32(priv->rx_dma_csr, msgdma_csroffs(resp_fill_level))
191 struct msgdma_response *rxresp = 196 & 0xffff) {
192 (struct msgdma_response *)priv->rx_dma_resp; 197 pktlength = csrrd32(priv->rx_dma_resp,
193 198 msgdma_respoffs(bytes_transferred));
194 if (ioread32(&rxcsr->resp_fill_level) & 0xffff) { 199 pktstatus = csrrd32(priv->rx_dma_resp,
195 pktlength = ioread32(&rxresp->bytes_transferred); 200 msgdma_respoffs(status));
196 pktstatus = ioread32(&rxresp->status);
197 rxstatus = pktstatus; 201 rxstatus = pktstatus;
198 rxstatus = rxstatus << 16; 202 rxstatus = rxstatus << 16;
199 rxstatus |= (pktlength & 0xffff); 203 rxstatus |= (pktlength & 0xffff);
diff --git a/drivers/net/ethernet/altera/altera_msgdma.h b/drivers/net/ethernet/altera/altera_msgdma.h
index 7f0f5bf2bba2..42cf61c81057 100644
--- a/drivers/net/ethernet/altera/altera_msgdma.h
+++ b/drivers/net/ethernet/altera/altera_msgdma.h
@@ -25,10 +25,11 @@ void msgdma_disable_txirq(struct altera_tse_private *);
25void msgdma_clear_rxirq(struct altera_tse_private *); 25void msgdma_clear_rxirq(struct altera_tse_private *);
26void msgdma_clear_txirq(struct altera_tse_private *); 26void msgdma_clear_txirq(struct altera_tse_private *);
27u32 msgdma_tx_completions(struct altera_tse_private *); 27u32 msgdma_tx_completions(struct altera_tse_private *);
28int msgdma_add_rx_desc(struct altera_tse_private *, struct tse_buffer *); 28void msgdma_add_rx_desc(struct altera_tse_private *, struct tse_buffer *);
29int msgdma_tx_buffer(struct altera_tse_private *, struct tse_buffer *); 29int msgdma_tx_buffer(struct altera_tse_private *, struct tse_buffer *);
30u32 msgdma_rx_status(struct altera_tse_private *); 30u32 msgdma_rx_status(struct altera_tse_private *);
31int msgdma_initialize(struct altera_tse_private *); 31int msgdma_initialize(struct altera_tse_private *);
32void msgdma_uninitialize(struct altera_tse_private *); 32void msgdma_uninitialize(struct altera_tse_private *);
33void msgdma_start_rxdma(struct altera_tse_private *);
33 34
34#endif /* __ALTERA_MSGDMA_H__ */ 35#endif /* __ALTERA_MSGDMA_H__ */
diff --git a/drivers/net/ethernet/altera/altera_msgdmahw.h b/drivers/net/ethernet/altera/altera_msgdmahw.h
index d7b59ba4019c..e335626e1b6b 100644
--- a/drivers/net/ethernet/altera/altera_msgdmahw.h
+++ b/drivers/net/ethernet/altera/altera_msgdmahw.h
@@ -17,15 +17,6 @@
17#ifndef __ALTERA_MSGDMAHW_H__ 17#ifndef __ALTERA_MSGDMAHW_H__
18#define __ALTERA_MSGDMAHW_H__ 18#define __ALTERA_MSGDMAHW_H__
19 19
20/* mSGDMA standard descriptor format
21 */
22struct msgdma_desc {
23 u32 read_addr; /* data buffer source address */
24 u32 write_addr; /* data buffer destination address */
25 u32 len; /* the number of bytes to transfer per descriptor */
26 u32 control; /* characteristics of the transfer */
27};
28
29/* mSGDMA extended descriptor format 20/* mSGDMA extended descriptor format
30 */ 21 */
31struct msgdma_extended_desc { 22struct msgdma_extended_desc {
@@ -159,6 +150,10 @@ struct msgdma_response {
159 u32 status; 150 u32 status;
160}; 151};
161 152
153#define msgdma_respoffs(a) (offsetof(struct msgdma_response, a))
154#define msgdma_csroffs(a) (offsetof(struct msgdma_csr, a))
155#define msgdma_descroffs(a) (offsetof(struct msgdma_extended_desc, a))
156
162/* mSGDMA response register bit definitions 157/* mSGDMA response register bit definitions
163 */ 158 */
164#define MSGDMA_RESP_EARLY_TERM BIT(8) 159#define MSGDMA_RESP_EARLY_TERM BIT(8)
diff --git a/drivers/net/ethernet/altera/altera_sgdma.c b/drivers/net/ethernet/altera/altera_sgdma.c
index 0ee96639ae44..99cc56f451cf 100644
--- a/drivers/net/ethernet/altera/altera_sgdma.c
+++ b/drivers/net/ethernet/altera/altera_sgdma.c
@@ -20,28 +20,28 @@
20#include "altera_sgdmahw.h" 20#include "altera_sgdmahw.h"
21#include "altera_sgdma.h" 21#include "altera_sgdma.h"
22 22
23static void sgdma_descrip(struct sgdma_descrip *desc, 23static void sgdma_setup_descrip(struct sgdma_descrip __iomem *desc,
24 struct sgdma_descrip *ndesc, 24 struct sgdma_descrip __iomem *ndesc,
25 dma_addr_t ndesc_phys, 25 dma_addr_t ndesc_phys,
26 dma_addr_t raddr, 26 dma_addr_t raddr,
27 dma_addr_t waddr, 27 dma_addr_t waddr,
28 u16 length, 28 u16 length,
29 int generate_eop, 29 int generate_eop,
30 int rfixed, 30 int rfixed,
31 int wfixed); 31 int wfixed);
32 32
33static int sgdma_async_write(struct altera_tse_private *priv, 33static int sgdma_async_write(struct altera_tse_private *priv,
34 struct sgdma_descrip *desc); 34 struct sgdma_descrip __iomem *desc);
35 35
36static int sgdma_async_read(struct altera_tse_private *priv); 36static int sgdma_async_read(struct altera_tse_private *priv);
37 37
38static dma_addr_t 38static dma_addr_t
39sgdma_txphysaddr(struct altera_tse_private *priv, 39sgdma_txphysaddr(struct altera_tse_private *priv,
40 struct sgdma_descrip *desc); 40 struct sgdma_descrip __iomem *desc);
41 41
42static dma_addr_t 42static dma_addr_t
43sgdma_rxphysaddr(struct altera_tse_private *priv, 43sgdma_rxphysaddr(struct altera_tse_private *priv,
44 struct sgdma_descrip *desc); 44 struct sgdma_descrip __iomem *desc);
45 45
46static int sgdma_txbusy(struct altera_tse_private *priv); 46static int sgdma_txbusy(struct altera_tse_private *priv);
47 47
@@ -64,18 +64,23 @@ queue_rx_peekhead(struct altera_tse_private *priv);
64 64
65int sgdma_initialize(struct altera_tse_private *priv) 65int sgdma_initialize(struct altera_tse_private *priv)
66{ 66{
67 priv->txctrlreg = SGDMA_CTRLREG_ILASTD; 67 priv->txctrlreg = SGDMA_CTRLREG_ILASTD |
68 SGDMA_CTRLREG_INTEN;
68 69
69 priv->rxctrlreg = SGDMA_CTRLREG_IDESCRIP | 70 priv->rxctrlreg = SGDMA_CTRLREG_IDESCRIP |
71 SGDMA_CTRLREG_INTEN |
70 SGDMA_CTRLREG_ILASTD; 72 SGDMA_CTRLREG_ILASTD;
71 73
74 priv->sgdmadesclen = sizeof(struct sgdma_descrip);
75
72 INIT_LIST_HEAD(&priv->txlisthd); 76 INIT_LIST_HEAD(&priv->txlisthd);
73 INIT_LIST_HEAD(&priv->rxlisthd); 77 INIT_LIST_HEAD(&priv->rxlisthd);
74 78
75 priv->rxdescphys = (dma_addr_t) 0; 79 priv->rxdescphys = (dma_addr_t) 0;
76 priv->txdescphys = (dma_addr_t) 0; 80 priv->txdescphys = (dma_addr_t) 0;
77 81
78 priv->rxdescphys = dma_map_single(priv->device, priv->rx_dma_desc, 82 priv->rxdescphys = dma_map_single(priv->device,
83 (void __force *)priv->rx_dma_desc,
79 priv->rxdescmem, DMA_BIDIRECTIONAL); 84 priv->rxdescmem, DMA_BIDIRECTIONAL);
80 85
81 if (dma_mapping_error(priv->device, priv->rxdescphys)) { 86 if (dma_mapping_error(priv->device, priv->rxdescphys)) {
@@ -84,7 +89,8 @@ int sgdma_initialize(struct altera_tse_private *priv)
84 return -EINVAL; 89 return -EINVAL;
85 } 90 }
86 91
87 priv->txdescphys = dma_map_single(priv->device, priv->tx_dma_desc, 92 priv->txdescphys = dma_map_single(priv->device,
93 (void __force *)priv->tx_dma_desc,
88 priv->txdescmem, DMA_TO_DEVICE); 94 priv->txdescmem, DMA_TO_DEVICE);
89 95
90 if (dma_mapping_error(priv->device, priv->txdescphys)) { 96 if (dma_mapping_error(priv->device, priv->txdescphys)) {
@@ -93,6 +99,16 @@ int sgdma_initialize(struct altera_tse_private *priv)
93 return -EINVAL; 99 return -EINVAL;
94 } 100 }
95 101
102 /* Initialize descriptor memory to all 0's, sync memory to cache */
103 memset_io(priv->tx_dma_desc, 0, priv->txdescmem);
104 memset_io(priv->rx_dma_desc, 0, priv->rxdescmem);
105
106 dma_sync_single_for_device(priv->device, priv->txdescphys,
107 priv->txdescmem, DMA_TO_DEVICE);
108
109 dma_sync_single_for_device(priv->device, priv->rxdescphys,
110 priv->rxdescmem, DMA_TO_DEVICE);
111
96 return 0; 112 return 0;
97} 113}
98 114
@@ -112,58 +128,48 @@ void sgdma_uninitialize(struct altera_tse_private *priv)
112 */ 128 */
113void sgdma_reset(struct altera_tse_private *priv) 129void sgdma_reset(struct altera_tse_private *priv)
114{ 130{
115 u32 *ptxdescripmem = (u32 *)priv->tx_dma_desc;
116 u32 txdescriplen = priv->txdescmem;
117 u32 *prxdescripmem = (u32 *)priv->rx_dma_desc;
118 u32 rxdescriplen = priv->rxdescmem;
119 struct sgdma_csr *ptxsgdma = (struct sgdma_csr *)priv->tx_dma_csr;
120 struct sgdma_csr *prxsgdma = (struct sgdma_csr *)priv->rx_dma_csr;
121
122 /* Initialize descriptor memory to 0 */ 131 /* Initialize descriptor memory to 0 */
123 memset(ptxdescripmem, 0, txdescriplen); 132 memset_io(priv->tx_dma_desc, 0, priv->txdescmem);
124 memset(prxdescripmem, 0, rxdescriplen); 133 memset_io(priv->rx_dma_desc, 0, priv->rxdescmem);
125 134
126 iowrite32(SGDMA_CTRLREG_RESET, &ptxsgdma->control); 135 csrwr32(SGDMA_CTRLREG_RESET, priv->tx_dma_csr, sgdma_csroffs(control));
127 iowrite32(0, &ptxsgdma->control); 136 csrwr32(0, priv->tx_dma_csr, sgdma_csroffs(control));
128 137
129 iowrite32(SGDMA_CTRLREG_RESET, &prxsgdma->control); 138 csrwr32(SGDMA_CTRLREG_RESET, priv->rx_dma_csr, sgdma_csroffs(control));
130 iowrite32(0, &prxsgdma->control); 139 csrwr32(0, priv->rx_dma_csr, sgdma_csroffs(control));
131} 140}
132 141
142/* For SGDMA, interrupts remain enabled after initially enabling,
143 * so no need to provide implementations for abstract enable
144 * and disable
145 */
146
133void sgdma_enable_rxirq(struct altera_tse_private *priv) 147void sgdma_enable_rxirq(struct altera_tse_private *priv)
134{ 148{
135 struct sgdma_csr *csr = (struct sgdma_csr *)priv->rx_dma_csr;
136 priv->rxctrlreg |= SGDMA_CTRLREG_INTEN;
137 tse_set_bit(&csr->control, SGDMA_CTRLREG_INTEN);
138} 149}
139 150
140void sgdma_enable_txirq(struct altera_tse_private *priv) 151void sgdma_enable_txirq(struct altera_tse_private *priv)
141{ 152{
142 struct sgdma_csr *csr = (struct sgdma_csr *)priv->tx_dma_csr;
143 priv->txctrlreg |= SGDMA_CTRLREG_INTEN;
144 tse_set_bit(&csr->control, SGDMA_CTRLREG_INTEN);
145} 153}
146 154
147/* for SGDMA, RX interrupts remain enabled after enabling */
148void sgdma_disable_rxirq(struct altera_tse_private *priv) 155void sgdma_disable_rxirq(struct altera_tse_private *priv)
149{ 156{
150} 157}
151 158
152/* for SGDMA, TX interrupts remain enabled after enabling */
153void sgdma_disable_txirq(struct altera_tse_private *priv) 159void sgdma_disable_txirq(struct altera_tse_private *priv)
154{ 160{
155} 161}
156 162
157void sgdma_clear_rxirq(struct altera_tse_private *priv) 163void sgdma_clear_rxirq(struct altera_tse_private *priv)
158{ 164{
159 struct sgdma_csr *csr = (struct sgdma_csr *)priv->rx_dma_csr; 165 tse_set_bit(priv->rx_dma_csr, sgdma_csroffs(control),
160 tse_set_bit(&csr->control, SGDMA_CTRLREG_CLRINT); 166 SGDMA_CTRLREG_CLRINT);
161} 167}
162 168
163void sgdma_clear_txirq(struct altera_tse_private *priv) 169void sgdma_clear_txirq(struct altera_tse_private *priv)
164{ 170{
165 struct sgdma_csr *csr = (struct sgdma_csr *)priv->tx_dma_csr; 171 tse_set_bit(priv->tx_dma_csr, sgdma_csroffs(control),
166 tse_set_bit(&csr->control, SGDMA_CTRLREG_CLRINT); 172 SGDMA_CTRLREG_CLRINT);
167} 173}
168 174
169/* transmits buffer through SGDMA. Returns number of buffers 175/* transmits buffer through SGDMA. Returns number of buffers
@@ -173,28 +179,27 @@ void sgdma_clear_txirq(struct altera_tse_private *priv)
173 */ 179 */
174int sgdma_tx_buffer(struct altera_tse_private *priv, struct tse_buffer *buffer) 180int sgdma_tx_buffer(struct altera_tse_private *priv, struct tse_buffer *buffer)
175{ 181{
176 int pktstx = 0; 182 struct sgdma_descrip __iomem *descbase =
177 struct sgdma_descrip *descbase = 183 (struct sgdma_descrip __iomem *)priv->tx_dma_desc;
178 (struct sgdma_descrip *)priv->tx_dma_desc;
179 184
180 struct sgdma_descrip *cdesc = &descbase[0]; 185 struct sgdma_descrip __iomem *cdesc = &descbase[0];
181 struct sgdma_descrip *ndesc = &descbase[1]; 186 struct sgdma_descrip __iomem *ndesc = &descbase[1];
182 187
183 /* wait 'til the tx sgdma is ready for the next transmit request */ 188 /* wait 'til the tx sgdma is ready for the next transmit request */
184 if (sgdma_txbusy(priv)) 189 if (sgdma_txbusy(priv))
185 return 0; 190 return 0;
186 191
187 sgdma_descrip(cdesc, /* current descriptor */ 192 sgdma_setup_descrip(cdesc, /* current descriptor */
188 ndesc, /* next descriptor */ 193 ndesc, /* next descriptor */
189 sgdma_txphysaddr(priv, ndesc), 194 sgdma_txphysaddr(priv, ndesc),
190 buffer->dma_addr, /* address of packet to xmit */ 195 buffer->dma_addr, /* address of packet to xmit */
191 0, /* write addr 0 for tx dma */ 196 0, /* write addr 0 for tx dma */
192 buffer->len, /* length of packet */ 197 buffer->len, /* length of packet */
193 SGDMA_CONTROL_EOP, /* Generate EOP */ 198 SGDMA_CONTROL_EOP, /* Generate EOP */
194 0, /* read fixed */ 199 0, /* read fixed */
195 SGDMA_CONTROL_WR_FIXED); /* Generate SOP */ 200 SGDMA_CONTROL_WR_FIXED); /* Generate SOP */
196 201
197 pktstx = sgdma_async_write(priv, cdesc); 202 sgdma_async_write(priv, cdesc);
198 203
199 /* enqueue the request to the pending transmit queue */ 204 /* enqueue the request to the pending transmit queue */
200 queue_tx(priv, buffer); 205 queue_tx(priv, buffer);
@@ -208,10 +213,10 @@ int sgdma_tx_buffer(struct altera_tse_private *priv, struct tse_buffer *buffer)
208u32 sgdma_tx_completions(struct altera_tse_private *priv) 213u32 sgdma_tx_completions(struct altera_tse_private *priv)
209{ 214{
210 u32 ready = 0; 215 u32 ready = 0;
211 struct sgdma_descrip *desc = (struct sgdma_descrip *)priv->tx_dma_desc;
212 216
213 if (!sgdma_txbusy(priv) && 217 if (!sgdma_txbusy(priv) &&
214 ((desc->control & SGDMA_CONTROL_HW_OWNED) == 0) && 218 ((csrrd8(priv->tx_dma_desc, sgdma_descroffs(control))
219 & SGDMA_CONTROL_HW_OWNED) == 0) &&
215 (dequeue_tx(priv))) { 220 (dequeue_tx(priv))) {
216 ready = 1; 221 ready = 1;
217 } 222 }
@@ -219,11 +224,15 @@ u32 sgdma_tx_completions(struct altera_tse_private *priv)
219 return ready; 224 return ready;
220} 225}
221 226
222int sgdma_add_rx_desc(struct altera_tse_private *priv, 227void sgdma_start_rxdma(struct altera_tse_private *priv)
223 struct tse_buffer *rxbuffer) 228{
229 sgdma_async_read(priv);
230}
231
232void sgdma_add_rx_desc(struct altera_tse_private *priv,
233 struct tse_buffer *rxbuffer)
224{ 234{
225 queue_rx(priv, rxbuffer); 235 queue_rx(priv, rxbuffer);
226 return sgdma_async_read(priv);
227} 236}
228 237
229/* status is returned on upper 16 bits, 238/* status is returned on upper 16 bits,
@@ -231,38 +240,62 @@ int sgdma_add_rx_desc(struct altera_tse_private *priv,
231 */ 240 */
232u32 sgdma_rx_status(struct altera_tse_private *priv) 241u32 sgdma_rx_status(struct altera_tse_private *priv)
233{ 242{
234 struct sgdma_csr *csr = (struct sgdma_csr *)priv->rx_dma_csr; 243 struct sgdma_descrip __iomem *base =
235 struct sgdma_descrip *base = (struct sgdma_descrip *)priv->rx_dma_desc; 244 (struct sgdma_descrip __iomem *)priv->rx_dma_desc;
236 struct sgdma_descrip *desc = NULL; 245 struct sgdma_descrip __iomem *desc = NULL;
237 int pktsrx;
238 unsigned int rxstatus = 0;
239 unsigned int pktlength = 0;
240 unsigned int pktstatus = 0;
241 struct tse_buffer *rxbuffer = NULL; 246 struct tse_buffer *rxbuffer = NULL;
247 unsigned int rxstatus = 0;
242 248
243 dma_sync_single_for_cpu(priv->device, 249 u32 sts = csrrd32(priv->rx_dma_csr, sgdma_csroffs(status));
244 priv->rxdescphys,
245 priv->rxdescmem,
246 DMA_BIDIRECTIONAL);
247 250
248 desc = &base[0]; 251 desc = &base[0];
249 if ((ioread32(&csr->status) & SGDMA_STSREG_EOP) || 252 if (sts & SGDMA_STSREG_EOP) {
250 (desc->status & SGDMA_STATUS_EOP)) { 253 unsigned int pktlength = 0;
251 pktlength = desc->bytes_xferred; 254 unsigned int pktstatus = 0;
252 pktstatus = desc->status & 0x3f; 255 dma_sync_single_for_cpu(priv->device,
253 rxstatus = pktstatus; 256 priv->rxdescphys,
257 priv->sgdmadesclen,
258 DMA_FROM_DEVICE);
259
260 pktlength = csrrd16(desc, sgdma_descroffs(bytes_xferred));
261 pktstatus = csrrd8(desc, sgdma_descroffs(status));
262 rxstatus = pktstatus & ~SGDMA_STATUS_EOP;
254 rxstatus = rxstatus << 16; 263 rxstatus = rxstatus << 16;
255 rxstatus |= (pktlength & 0xffff); 264 rxstatus |= (pktlength & 0xffff);
256 265
257 desc->status = 0; 266 if (rxstatus) {
258 267 csrwr8(0, desc, sgdma_descroffs(status));
259 rxbuffer = dequeue_rx(priv); 268
260 if (rxbuffer == NULL) 269 rxbuffer = dequeue_rx(priv);
270 if (rxbuffer == NULL)
271 netdev_info(priv->dev,
272 "sgdma rx and rx queue empty!\n");
273
274 /* Clear control */
275 csrwr32(0, priv->rx_dma_csr, sgdma_csroffs(control));
276 /* clear status */
277 csrwr32(0xf, priv->rx_dma_csr, sgdma_csroffs(status));
278
279 /* kick the rx sgdma after reaping this descriptor */
280 sgdma_async_read(priv);
281
282 } else {
283 /* If the SGDMA indicated an end of packet on recv,
284 * then it's expected that the rxstatus from the
285 * descriptor is non-zero - meaning a valid packet
286 * with a nonzero length, or an error has been
287 * indicated. if not, then all we can do is signal
288 * an error and return no packet received. Most likely
289 * there is a system design error, or an error in the
290 * underlying kernel (cache or cache management problem)
291 */
261 netdev_err(priv->dev, 292 netdev_err(priv->dev,
262 "sgdma rx and rx queue empty!\n"); 293 "SGDMA RX Error Info: %x, %x, %x\n",
263 294 sts, csrrd8(desc, sgdma_descroffs(status)),
264 /* kick the rx sgdma after reaping this descriptor */ 295 rxstatus);
265 pktsrx = sgdma_async_read(priv); 296 }
297 } else if (sts == 0) {
298 sgdma_async_read(priv);
266 } 299 }
267 300
268 return rxstatus; 301 return rxstatus;
@@ -270,38 +303,41 @@ u32 sgdma_rx_status(struct altera_tse_private *priv)
270 303
271 304
272/* Private functions */ 305/* Private functions */
273static void sgdma_descrip(struct sgdma_descrip *desc, 306static void sgdma_setup_descrip(struct sgdma_descrip __iomem *desc,
274 struct sgdma_descrip *ndesc, 307 struct sgdma_descrip __iomem *ndesc,
275 dma_addr_t ndesc_phys, 308 dma_addr_t ndesc_phys,
276 dma_addr_t raddr, 309 dma_addr_t raddr,
277 dma_addr_t waddr, 310 dma_addr_t waddr,
278 u16 length, 311 u16 length,
279 int generate_eop, 312 int generate_eop,
280 int rfixed, 313 int rfixed,
281 int wfixed) 314 int wfixed)
282{ 315{
283 /* Clear the next descriptor as not owned by hardware */ 316 /* Clear the next descriptor as not owned by hardware */
284 u32 ctrl = ndesc->control; 317
318 u32 ctrl = csrrd8(ndesc, sgdma_descroffs(control));
285 ctrl &= ~SGDMA_CONTROL_HW_OWNED; 319 ctrl &= ~SGDMA_CONTROL_HW_OWNED;
286 ndesc->control = ctrl; 320 csrwr8(ctrl, ndesc, sgdma_descroffs(control));
287 321
288 ctrl = 0;
289 ctrl = SGDMA_CONTROL_HW_OWNED; 322 ctrl = SGDMA_CONTROL_HW_OWNED;
290 ctrl |= generate_eop; 323 ctrl |= generate_eop;
291 ctrl |= rfixed; 324 ctrl |= rfixed;
292 ctrl |= wfixed; 325 ctrl |= wfixed;
293 326
294 /* Channel is implicitly zero, initialized to 0 by default */ 327 /* Channel is implicitly zero, initialized to 0 by default */
295 328 csrwr32(lower_32_bits(raddr), desc, sgdma_descroffs(raddr));
296 desc->raddr = raddr; 329 csrwr32(lower_32_bits(waddr), desc, sgdma_descroffs(waddr));
297 desc->waddr = waddr; 330
298 desc->next = lower_32_bits(ndesc_phys); 331 csrwr32(0, desc, sgdma_descroffs(pad1));
299 desc->control = ctrl; 332 csrwr32(0, desc, sgdma_descroffs(pad2));
300 desc->status = 0; 333 csrwr32(lower_32_bits(ndesc_phys), desc, sgdma_descroffs(next));
301 desc->rburst = 0; 334
302 desc->wburst = 0; 335 csrwr8(ctrl, desc, sgdma_descroffs(control));
303 desc->bytes = length; 336 csrwr8(0, desc, sgdma_descroffs(status));
304 desc->bytes_xferred = 0; 337 csrwr8(0, desc, sgdma_descroffs(wburst));
338 csrwr8(0, desc, sgdma_descroffs(rburst));
339 csrwr16(length, desc, sgdma_descroffs(bytes));
340 csrwr16(0, desc, sgdma_descroffs(bytes_xferred));
305} 341}
306 342
307/* If hardware is busy, don't restart async read. 343/* If hardware is busy, don't restart async read.
@@ -312,48 +348,43 @@ static void sgdma_descrip(struct sgdma_descrip *desc,
312 */ 348 */
313static int sgdma_async_read(struct altera_tse_private *priv) 349static int sgdma_async_read(struct altera_tse_private *priv)
314{ 350{
315 struct sgdma_csr *csr = (struct sgdma_csr *)priv->rx_dma_csr; 351 struct sgdma_descrip __iomem *descbase =
316 struct sgdma_descrip *descbase = 352 (struct sgdma_descrip __iomem *)priv->rx_dma_desc;
317 (struct sgdma_descrip *)priv->rx_dma_desc;
318 353
319 struct sgdma_descrip *cdesc = &descbase[0]; 354 struct sgdma_descrip __iomem *cdesc = &descbase[0];
320 struct sgdma_descrip *ndesc = &descbase[1]; 355 struct sgdma_descrip __iomem *ndesc = &descbase[1];
321 356
322 unsigned int sts = ioread32(&csr->status);
323 struct tse_buffer *rxbuffer = NULL; 357 struct tse_buffer *rxbuffer = NULL;
324 358
325 if (!sgdma_rxbusy(priv)) { 359 if (!sgdma_rxbusy(priv)) {
326 rxbuffer = queue_rx_peekhead(priv); 360 rxbuffer = queue_rx_peekhead(priv);
327 if (rxbuffer == NULL) 361 if (rxbuffer == NULL) {
362 netdev_err(priv->dev, "no rx buffers available\n");
328 return 0; 363 return 0;
329 364 }
330 sgdma_descrip(cdesc, /* current descriptor */ 365
331 ndesc, /* next descriptor */ 366 sgdma_setup_descrip(cdesc, /* current descriptor */
332 sgdma_rxphysaddr(priv, ndesc), 367 ndesc, /* next descriptor */
333 0, /* read addr 0 for rx dma */ 368 sgdma_rxphysaddr(priv, ndesc),
334 rxbuffer->dma_addr, /* write addr for rx dma */ 369 0, /* read addr 0 for rx dma */
335 0, /* read 'til EOP */ 370 rxbuffer->dma_addr, /* write addr for rx dma */
336 0, /* EOP: NA for rx dma */ 371 0, /* read 'til EOP */
337 0, /* read fixed: NA for rx dma */ 372 0, /* EOP: NA for rx dma */
338 0); /* SOP: NA for rx DMA */ 373 0, /* read fixed: NA for rx dma */
339 374 0); /* SOP: NA for rx DMA */
340 /* clear control and status */
341 iowrite32(0, &csr->control);
342
343 /* If status available, clear those bits */
344 if (sts & 0xf)
345 iowrite32(0xf, &csr->status);
346 375
347 dma_sync_single_for_device(priv->device, 376 dma_sync_single_for_device(priv->device,
348 priv->rxdescphys, 377 priv->rxdescphys,
349 priv->rxdescmem, 378 priv->sgdmadesclen,
350 DMA_BIDIRECTIONAL); 379 DMA_TO_DEVICE);
351 380
352 iowrite32(lower_32_bits(sgdma_rxphysaddr(priv, cdesc)), 381 csrwr32(lower_32_bits(sgdma_rxphysaddr(priv, cdesc)),
353 &csr->next_descrip); 382 priv->rx_dma_csr,
383 sgdma_csroffs(next_descrip));
354 384
355 iowrite32((priv->rxctrlreg | SGDMA_CTRLREG_START), 385 csrwr32((priv->rxctrlreg | SGDMA_CTRLREG_START),
356 &csr->control); 386 priv->rx_dma_csr,
387 sgdma_csroffs(control));
357 388
358 return 1; 389 return 1;
359 } 390 }
@@ -362,32 +393,32 @@ static int sgdma_async_read(struct altera_tse_private *priv)
362} 393}
363 394
364static int sgdma_async_write(struct altera_tse_private *priv, 395static int sgdma_async_write(struct altera_tse_private *priv,
365 struct sgdma_descrip *desc) 396 struct sgdma_descrip __iomem *desc)
366{ 397{
367 struct sgdma_csr *csr = (struct sgdma_csr *)priv->tx_dma_csr;
368
369 if (sgdma_txbusy(priv)) 398 if (sgdma_txbusy(priv))
370 return 0; 399 return 0;
371 400
372 /* clear control and status */ 401 /* clear control and status */
373 iowrite32(0, &csr->control); 402 csrwr32(0, priv->tx_dma_csr, sgdma_csroffs(control));
374 iowrite32(0x1f, &csr->status); 403 csrwr32(0x1f, priv->tx_dma_csr, sgdma_csroffs(status));
375 404
376 dma_sync_single_for_device(priv->device, priv->txdescphys, 405 dma_sync_single_for_device(priv->device, priv->txdescphys,
377 priv->txdescmem, DMA_TO_DEVICE); 406 priv->sgdmadesclen, DMA_TO_DEVICE);
378 407
379 iowrite32(lower_32_bits(sgdma_txphysaddr(priv, desc)), 408 csrwr32(lower_32_bits(sgdma_txphysaddr(priv, desc)),
380 &csr->next_descrip); 409 priv->tx_dma_csr,
410 sgdma_csroffs(next_descrip));
381 411
382 iowrite32((priv->txctrlreg | SGDMA_CTRLREG_START), 412 csrwr32((priv->txctrlreg | SGDMA_CTRLREG_START),
383 &csr->control); 413 priv->tx_dma_csr,
414 sgdma_csroffs(control));
384 415
385 return 1; 416 return 1;
386} 417}
387 418
388static dma_addr_t 419static dma_addr_t
389sgdma_txphysaddr(struct altera_tse_private *priv, 420sgdma_txphysaddr(struct altera_tse_private *priv,
390 struct sgdma_descrip *desc) 421 struct sgdma_descrip __iomem *desc)
391{ 422{
392 dma_addr_t paddr = priv->txdescmem_busaddr; 423 dma_addr_t paddr = priv->txdescmem_busaddr;
393 uintptr_t offs = (uintptr_t)desc - (uintptr_t)priv->tx_dma_desc; 424 uintptr_t offs = (uintptr_t)desc - (uintptr_t)priv->tx_dma_desc;
@@ -396,7 +427,7 @@ sgdma_txphysaddr(struct altera_tse_private *priv,
396 427
397static dma_addr_t 428static dma_addr_t
398sgdma_rxphysaddr(struct altera_tse_private *priv, 429sgdma_rxphysaddr(struct altera_tse_private *priv,
399 struct sgdma_descrip *desc) 430 struct sgdma_descrip __iomem *desc)
400{ 431{
401 dma_addr_t paddr = priv->rxdescmem_busaddr; 432 dma_addr_t paddr = priv->rxdescmem_busaddr;
402 uintptr_t offs = (uintptr_t)desc - (uintptr_t)priv->rx_dma_desc; 433 uintptr_t offs = (uintptr_t)desc - (uintptr_t)priv->rx_dma_desc;
@@ -485,8 +516,8 @@ queue_rx_peekhead(struct altera_tse_private *priv)
485 */ 516 */
486static int sgdma_rxbusy(struct altera_tse_private *priv) 517static int sgdma_rxbusy(struct altera_tse_private *priv)
487{ 518{
488 struct sgdma_csr *csr = (struct sgdma_csr *)priv->rx_dma_csr; 519 return csrrd32(priv->rx_dma_csr, sgdma_csroffs(status))
489 return ioread32(&csr->status) & SGDMA_STSREG_BUSY; 520 & SGDMA_STSREG_BUSY;
490} 521}
491 522
492/* waits for the tx sgdma to finish it's current operation, returns 0 523/* waits for the tx sgdma to finish it's current operation, returns 0
@@ -495,13 +526,14 @@ static int sgdma_rxbusy(struct altera_tse_private *priv)
495static int sgdma_txbusy(struct altera_tse_private *priv) 526static int sgdma_txbusy(struct altera_tse_private *priv)
496{ 527{
497 int delay = 0; 528 int delay = 0;
498 struct sgdma_csr *csr = (struct sgdma_csr *)priv->tx_dma_csr;
499 529
500 /* if DMA is busy, wait for current transactino to finish */ 530 /* if DMA is busy, wait for current transactino to finish */
501 while ((ioread32(&csr->status) & SGDMA_STSREG_BUSY) && (delay++ < 100)) 531 while ((csrrd32(priv->tx_dma_csr, sgdma_csroffs(status))
532 & SGDMA_STSREG_BUSY) && (delay++ < 100))
502 udelay(1); 533 udelay(1);
503 534
504 if (ioread32(&csr->status) & SGDMA_STSREG_BUSY) { 535 if (csrrd32(priv->tx_dma_csr, sgdma_csroffs(status))
536 & SGDMA_STSREG_BUSY) {
505 netdev_err(priv->dev, "timeout waiting for tx dma\n"); 537 netdev_err(priv->dev, "timeout waiting for tx dma\n");
506 return 1; 538 return 1;
507 } 539 }
diff --git a/drivers/net/ethernet/altera/altera_sgdma.h b/drivers/net/ethernet/altera/altera_sgdma.h
index 07d471729dc4..584977e29ef9 100644
--- a/drivers/net/ethernet/altera/altera_sgdma.h
+++ b/drivers/net/ethernet/altera/altera_sgdma.h
@@ -26,10 +26,11 @@ void sgdma_clear_rxirq(struct altera_tse_private *);
26void sgdma_clear_txirq(struct altera_tse_private *); 26void sgdma_clear_txirq(struct altera_tse_private *);
27int sgdma_tx_buffer(struct altera_tse_private *priv, struct tse_buffer *); 27int sgdma_tx_buffer(struct altera_tse_private *priv, struct tse_buffer *);
28u32 sgdma_tx_completions(struct altera_tse_private *); 28u32 sgdma_tx_completions(struct altera_tse_private *);
29int sgdma_add_rx_desc(struct altera_tse_private *priv, struct tse_buffer *); 29void sgdma_add_rx_desc(struct altera_tse_private *priv, struct tse_buffer *);
30void sgdma_status(struct altera_tse_private *); 30void sgdma_status(struct altera_tse_private *);
31u32 sgdma_rx_status(struct altera_tse_private *); 31u32 sgdma_rx_status(struct altera_tse_private *);
32int sgdma_initialize(struct altera_tse_private *); 32int sgdma_initialize(struct altera_tse_private *);
33void sgdma_uninitialize(struct altera_tse_private *); 33void sgdma_uninitialize(struct altera_tse_private *);
34void sgdma_start_rxdma(struct altera_tse_private *);
34 35
35#endif /* __ALTERA_SGDMA_H__ */ 36#endif /* __ALTERA_SGDMA_H__ */
diff --git a/drivers/net/ethernet/altera/altera_sgdmahw.h b/drivers/net/ethernet/altera/altera_sgdmahw.h
index ba3334f35383..85bc33b218d9 100644
--- a/drivers/net/ethernet/altera/altera_sgdmahw.h
+++ b/drivers/net/ethernet/altera/altera_sgdmahw.h
@@ -19,16 +19,16 @@
19 19
20/* SGDMA descriptor structure */ 20/* SGDMA descriptor structure */
21struct sgdma_descrip { 21struct sgdma_descrip {
22 unsigned int raddr; /* address of data to be read */ 22 u32 raddr; /* address of data to be read */
23 unsigned int pad1; 23 u32 pad1;
24 unsigned int waddr; 24 u32 waddr;
25 unsigned int pad2; 25 u32 pad2;
26 unsigned int next; 26 u32 next;
27 unsigned int pad3; 27 u32 pad3;
28 unsigned short bytes; 28 u16 bytes;
29 unsigned char rburst; 29 u8 rburst;
30 unsigned char wburst; 30 u8 wburst;
31 unsigned short bytes_xferred; /* 16 bits, bytes xferred */ 31 u16 bytes_xferred; /* 16 bits, bytes xferred */
32 32
33 /* bit 0: error 33 /* bit 0: error
34 * bit 1: length error 34 * bit 1: length error
@@ -39,7 +39,7 @@ struct sgdma_descrip {
39 * bit 6: reserved 39 * bit 6: reserved
40 * bit 7: status eop for recv case 40 * bit 7: status eop for recv case
41 */ 41 */
42 unsigned char status; 42 u8 status;
43 43
44 /* bit 0: eop 44 /* bit 0: eop
45 * bit 1: read_fixed 45 * bit 1: read_fixed
@@ -47,7 +47,7 @@ struct sgdma_descrip {
47 * bits 3,4,5,6: Channel (always 0) 47 * bits 3,4,5,6: Channel (always 0)
48 * bit 7: hardware owned 48 * bit 7: hardware owned
49 */ 49 */
50 unsigned char control; 50 u8 control;
51} __packed; 51} __packed;
52 52
53 53
@@ -101,6 +101,8 @@ struct sgdma_csr {
101 u32 pad3[3]; 101 u32 pad3[3];
102}; 102};
103 103
104#define sgdma_csroffs(a) (offsetof(struct sgdma_csr, a))
105#define sgdma_descroffs(a) (offsetof(struct sgdma_descrip, a))
104 106
105#define SGDMA_STSREG_ERR BIT(0) /* Error */ 107#define SGDMA_STSREG_ERR BIT(0) /* Error */
106#define SGDMA_STSREG_EOP BIT(1) /* EOP */ 108#define SGDMA_STSREG_EOP BIT(1) /* EOP */
diff --git a/drivers/net/ethernet/altera/altera_tse.h b/drivers/net/ethernet/altera/altera_tse.h
index 8feeed05de0e..2adb24d4523c 100644
--- a/drivers/net/ethernet/altera/altera_tse.h
+++ b/drivers/net/ethernet/altera/altera_tse.h
@@ -58,6 +58,8 @@
58/* MAC function configuration default settings */ 58/* MAC function configuration default settings */
59#define ALTERA_TSE_TX_IPG_LENGTH 12 59#define ALTERA_TSE_TX_IPG_LENGTH 12
60 60
61#define ALTERA_TSE_PAUSE_QUANTA 0xffff
62
61#define GET_BIT_VALUE(v, bit) (((v) >> (bit)) & 0x1) 63#define GET_BIT_VALUE(v, bit) (((v) >> (bit)) & 0x1)
62 64
63/* MAC Command_Config Register Bit Definitions 65/* MAC Command_Config Register Bit Definitions
@@ -355,6 +357,8 @@ struct altera_tse_mac {
355 u32 reserved5[42]; 357 u32 reserved5[42];
356}; 358};
357 359
360#define tse_csroffs(a) (offsetof(struct altera_tse_mac, a))
361
358/* Transmit and Receive Command Registers Bit Definitions 362/* Transmit and Receive Command Registers Bit Definitions
359 */ 363 */
360#define ALTERA_TSE_TX_CMD_STAT_OMIT_CRC BIT(17) 364#define ALTERA_TSE_TX_CMD_STAT_OMIT_CRC BIT(17)
@@ -390,10 +394,11 @@ struct altera_dmaops {
390 void (*clear_rxirq)(struct altera_tse_private *); 394 void (*clear_rxirq)(struct altera_tse_private *);
391 int (*tx_buffer)(struct altera_tse_private *, struct tse_buffer *); 395 int (*tx_buffer)(struct altera_tse_private *, struct tse_buffer *);
392 u32 (*tx_completions)(struct altera_tse_private *); 396 u32 (*tx_completions)(struct altera_tse_private *);
393 int (*add_rx_desc)(struct altera_tse_private *, struct tse_buffer *); 397 void (*add_rx_desc)(struct altera_tse_private *, struct tse_buffer *);
394 u32 (*get_rx_status)(struct altera_tse_private *); 398 u32 (*get_rx_status)(struct altera_tse_private *);
395 int (*init_dma)(struct altera_tse_private *); 399 int (*init_dma)(struct altera_tse_private *);
396 void (*uninit_dma)(struct altera_tse_private *); 400 void (*uninit_dma)(struct altera_tse_private *);
401 void (*start_rxdma)(struct altera_tse_private *);
397}; 402};
398 403
399/* This structure is private to each device. 404/* This structure is private to each device.
@@ -453,6 +458,7 @@ struct altera_tse_private {
453 u32 rxctrlreg; 458 u32 rxctrlreg;
454 dma_addr_t rxdescphys; 459 dma_addr_t rxdescphys;
455 dma_addr_t txdescphys; 460 dma_addr_t txdescphys;
461 size_t sgdmadesclen;
456 462
457 struct list_head txlisthd; 463 struct list_head txlisthd;
458 struct list_head rxlisthd; 464 struct list_head rxlisthd;
@@ -483,4 +489,49 @@ struct altera_tse_private {
483 */ 489 */
484void altera_tse_set_ethtool_ops(struct net_device *); 490void altera_tse_set_ethtool_ops(struct net_device *);
485 491
492static inline
493u32 csrrd32(void __iomem *mac, size_t offs)
494{
495 void __iomem *paddr = (void __iomem *)((uintptr_t)mac + offs);
496 return readl(paddr);
497}
498
499static inline
500u16 csrrd16(void __iomem *mac, size_t offs)
501{
502 void __iomem *paddr = (void __iomem *)((uintptr_t)mac + offs);
503 return readw(paddr);
504}
505
506static inline
507u8 csrrd8(void __iomem *mac, size_t offs)
508{
509 void __iomem *paddr = (void __iomem *)((uintptr_t)mac + offs);
510 return readb(paddr);
511}
512
513static inline
514void csrwr32(u32 val, void __iomem *mac, size_t offs)
515{
516 void __iomem *paddr = (void __iomem *)((uintptr_t)mac + offs);
517
518 writel(val, paddr);
519}
520
521static inline
522void csrwr16(u16 val, void __iomem *mac, size_t offs)
523{
524 void __iomem *paddr = (void __iomem *)((uintptr_t)mac + offs);
525
526 writew(val, paddr);
527}
528
529static inline
530void csrwr8(u8 val, void __iomem *mac, size_t offs)
531{
532 void __iomem *paddr = (void __iomem *)((uintptr_t)mac + offs);
533
534 writeb(val, paddr);
535}
536
486#endif /* __ALTERA_TSE_H__ */ 537#endif /* __ALTERA_TSE_H__ */
diff --git a/drivers/net/ethernet/altera/altera_tse_ethtool.c b/drivers/net/ethernet/altera/altera_tse_ethtool.c
index 319ca74f5e74..54c25eff7952 100644
--- a/drivers/net/ethernet/altera/altera_tse_ethtool.c
+++ b/drivers/net/ethernet/altera/altera_tse_ethtool.c
@@ -77,7 +77,7 @@ static void tse_get_drvinfo(struct net_device *dev,
77 struct altera_tse_private *priv = netdev_priv(dev); 77 struct altera_tse_private *priv = netdev_priv(dev);
78 u32 rev = ioread32(&priv->mac_dev->megacore_revision); 78 u32 rev = ioread32(&priv->mac_dev->megacore_revision);
79 79
80 strcpy(info->driver, "Altera TSE MAC IP Driver"); 80 strcpy(info->driver, "altera_tse");
81 strcpy(info->version, "v8.0"); 81 strcpy(info->version, "v8.0");
82 snprintf(info->fw_version, ETHTOOL_FWVERS_LEN, "v%d.%d", 82 snprintf(info->fw_version, ETHTOOL_FWVERS_LEN, "v%d.%d",
83 rev & 0xFFFF, (rev & 0xFFFF0000) >> 16); 83 rev & 0xFFFF, (rev & 0xFFFF0000) >> 16);
@@ -96,54 +96,89 @@ static void tse_fill_stats(struct net_device *dev, struct ethtool_stats *dummy,
96 u64 *buf) 96 u64 *buf)
97{ 97{
98 struct altera_tse_private *priv = netdev_priv(dev); 98 struct altera_tse_private *priv = netdev_priv(dev);
99 struct altera_tse_mac *mac = priv->mac_dev;
100 u64 ext; 99 u64 ext;
101 100
102 buf[0] = ioread32(&mac->frames_transmitted_ok); 101 buf[0] = csrrd32(priv->mac_dev,
103 buf[1] = ioread32(&mac->frames_received_ok); 102 tse_csroffs(frames_transmitted_ok));
104 buf[2] = ioread32(&mac->frames_check_sequence_errors); 103 buf[1] = csrrd32(priv->mac_dev,
105 buf[3] = ioread32(&mac->alignment_errors); 104 tse_csroffs(frames_received_ok));
105 buf[2] = csrrd32(priv->mac_dev,
106 tse_csroffs(frames_check_sequence_errors));
107 buf[3] = csrrd32(priv->mac_dev,
108 tse_csroffs(alignment_errors));
106 109
107 /* Extended aOctetsTransmittedOK counter */ 110 /* Extended aOctetsTransmittedOK counter */
108 ext = (u64) ioread32(&mac->msb_octets_transmitted_ok) << 32; 111 ext = (u64) csrrd32(priv->mac_dev,
109 ext |= ioread32(&mac->octets_transmitted_ok); 112 tse_csroffs(msb_octets_transmitted_ok)) << 32;
113
114 ext |= csrrd32(priv->mac_dev,
115 tse_csroffs(octets_transmitted_ok));
110 buf[4] = ext; 116 buf[4] = ext;
111 117
112 /* Extended aOctetsReceivedOK counter */ 118 /* Extended aOctetsReceivedOK counter */
113 ext = (u64) ioread32(&mac->msb_octets_received_ok) << 32; 119 ext = (u64) csrrd32(priv->mac_dev,
114 ext |= ioread32(&mac->octets_received_ok); 120 tse_csroffs(msb_octets_received_ok)) << 32;
121
122 ext |= csrrd32(priv->mac_dev,
123 tse_csroffs(octets_received_ok));
115 buf[5] = ext; 124 buf[5] = ext;
116 125
117 buf[6] = ioread32(&mac->tx_pause_mac_ctrl_frames); 126 buf[6] = csrrd32(priv->mac_dev,
118 buf[7] = ioread32(&mac->rx_pause_mac_ctrl_frames); 127 tse_csroffs(tx_pause_mac_ctrl_frames));
119 buf[8] = ioread32(&mac->if_in_errors); 128 buf[7] = csrrd32(priv->mac_dev,
120 buf[9] = ioread32(&mac->if_out_errors); 129 tse_csroffs(rx_pause_mac_ctrl_frames));
121 buf[10] = ioread32(&mac->if_in_ucast_pkts); 130 buf[8] = csrrd32(priv->mac_dev,
122 buf[11] = ioread32(&mac->if_in_multicast_pkts); 131 tse_csroffs(if_in_errors));
123 buf[12] = ioread32(&mac->if_in_broadcast_pkts); 132 buf[9] = csrrd32(priv->mac_dev,
124 buf[13] = ioread32(&mac->if_out_discards); 133 tse_csroffs(if_out_errors));
125 buf[14] = ioread32(&mac->if_out_ucast_pkts); 134 buf[10] = csrrd32(priv->mac_dev,
126 buf[15] = ioread32(&mac->if_out_multicast_pkts); 135 tse_csroffs(if_in_ucast_pkts));
127 buf[16] = ioread32(&mac->if_out_broadcast_pkts); 136 buf[11] = csrrd32(priv->mac_dev,
128 buf[17] = ioread32(&mac->ether_stats_drop_events); 137 tse_csroffs(if_in_multicast_pkts));
138 buf[12] = csrrd32(priv->mac_dev,
139 tse_csroffs(if_in_broadcast_pkts));
140 buf[13] = csrrd32(priv->mac_dev,
141 tse_csroffs(if_out_discards));
142 buf[14] = csrrd32(priv->mac_dev,
143 tse_csroffs(if_out_ucast_pkts));
144 buf[15] = csrrd32(priv->mac_dev,
145 tse_csroffs(if_out_multicast_pkts));
146 buf[16] = csrrd32(priv->mac_dev,
147 tse_csroffs(if_out_broadcast_pkts));
148 buf[17] = csrrd32(priv->mac_dev,
149 tse_csroffs(ether_stats_drop_events));
129 150
130 /* Extended etherStatsOctets counter */ 151 /* Extended etherStatsOctets counter */
131 ext = (u64) ioread32(&mac->msb_ether_stats_octets) << 32; 152 ext = (u64) csrrd32(priv->mac_dev,
132 ext |= ioread32(&mac->ether_stats_octets); 153 tse_csroffs(msb_ether_stats_octets)) << 32;
154 ext |= csrrd32(priv->mac_dev,
155 tse_csroffs(ether_stats_octets));
133 buf[18] = ext; 156 buf[18] = ext;
134 157
135 buf[19] = ioread32(&mac->ether_stats_pkts); 158 buf[19] = csrrd32(priv->mac_dev,
136 buf[20] = ioread32(&mac->ether_stats_undersize_pkts); 159 tse_csroffs(ether_stats_pkts));
137 buf[21] = ioread32(&mac->ether_stats_oversize_pkts); 160 buf[20] = csrrd32(priv->mac_dev,
138 buf[22] = ioread32(&mac->ether_stats_pkts_64_octets); 161 tse_csroffs(ether_stats_undersize_pkts));
139 buf[23] = ioread32(&mac->ether_stats_pkts_65to127_octets); 162 buf[21] = csrrd32(priv->mac_dev,
140 buf[24] = ioread32(&mac->ether_stats_pkts_128to255_octets); 163 tse_csroffs(ether_stats_oversize_pkts));
141 buf[25] = ioread32(&mac->ether_stats_pkts_256to511_octets); 164 buf[22] = csrrd32(priv->mac_dev,
142 buf[26] = ioread32(&mac->ether_stats_pkts_512to1023_octets); 165 tse_csroffs(ether_stats_pkts_64_octets));
143 buf[27] = ioread32(&mac->ether_stats_pkts_1024to1518_octets); 166 buf[23] = csrrd32(priv->mac_dev,
144 buf[28] = ioread32(&mac->ether_stats_pkts_1519tox_octets); 167 tse_csroffs(ether_stats_pkts_65to127_octets));
145 buf[29] = ioread32(&mac->ether_stats_jabbers); 168 buf[24] = csrrd32(priv->mac_dev,
146 buf[30] = ioread32(&mac->ether_stats_fragments); 169 tse_csroffs(ether_stats_pkts_128to255_octets));
170 buf[25] = csrrd32(priv->mac_dev,
171 tse_csroffs(ether_stats_pkts_256to511_octets));
172 buf[26] = csrrd32(priv->mac_dev,
173 tse_csroffs(ether_stats_pkts_512to1023_octets));
174 buf[27] = csrrd32(priv->mac_dev,
175 tse_csroffs(ether_stats_pkts_1024to1518_octets));
176 buf[28] = csrrd32(priv->mac_dev,
177 tse_csroffs(ether_stats_pkts_1519tox_octets));
178 buf[29] = csrrd32(priv->mac_dev,
179 tse_csroffs(ether_stats_jabbers));
180 buf[30] = csrrd32(priv->mac_dev,
181 tse_csroffs(ether_stats_fragments));
147} 182}
148 183
149static int tse_sset_count(struct net_device *dev, int sset) 184static int tse_sset_count(struct net_device *dev, int sset)
@@ -178,19 +213,24 @@ static void tse_get_regs(struct net_device *dev, struct ethtool_regs *regs,
178{ 213{
179 int i; 214 int i;
180 struct altera_tse_private *priv = netdev_priv(dev); 215 struct altera_tse_private *priv = netdev_priv(dev);
181 u32 *tse_mac_regs = (u32 *)priv->mac_dev;
182 u32 *buf = regbuf; 216 u32 *buf = regbuf;
183 217
184 /* Set version to a known value, so ethtool knows 218 /* Set version to a known value, so ethtool knows
185 * how to do any special formatting of this data. 219 * how to do any special formatting of this data.
186 * This version number will need to change if and 220 * This version number will need to change if and
187 * when this register table is changed. 221 * when this register table is changed.
222 *
223 * version[31:0] = 1: Dump the first 128 TSE Registers
224 * Upper bits are all 0 by default
225 *
226 * Upper 16-bits will indicate feature presence for
227 * Ethtool register decoding in future version.
188 */ 228 */
189 229
190 regs->version = 1; 230 regs->version = 1;
191 231
192 for (i = 0; i < TSE_NUM_REGS; i++) 232 for (i = 0; i < TSE_NUM_REGS; i++)
193 buf[i] = ioread32(&tse_mac_regs[i]); 233 buf[i] = csrrd32(priv->mac_dev, i * 4);
194} 234}
195 235
196static int tse_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) 236static int tse_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c
index c70a29e0b9f7..7330681574d2 100644
--- a/drivers/net/ethernet/altera/altera_tse_main.c
+++ b/drivers/net/ethernet/altera/altera_tse_main.c
@@ -100,29 +100,30 @@ static inline u32 tse_tx_avail(struct altera_tse_private *priv)
100 */ 100 */
101static int altera_tse_mdio_read(struct mii_bus *bus, int mii_id, int regnum) 101static int altera_tse_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
102{ 102{
103 struct altera_tse_mac *mac = (struct altera_tse_mac *)bus->priv; 103 struct net_device *ndev = bus->priv;
104 unsigned int *mdio_regs = (unsigned int *)&mac->mdio_phy0; 104 struct altera_tse_private *priv = netdev_priv(ndev);
105 u32 data;
106 105
107 /* set MDIO address */ 106 /* set MDIO address */
108 iowrite32((mii_id & 0x1f), &mac->mdio_phy0_addr); 107 csrwr32((mii_id & 0x1f), priv->mac_dev,
108 tse_csroffs(mdio_phy0_addr));
109 109
110 /* get the data */ 110 /* get the data */
111 data = ioread32(&mdio_regs[regnum]) & 0xffff; 111 return csrrd32(priv->mac_dev,
112 return data; 112 tse_csroffs(mdio_phy0) + regnum * 4) & 0xffff;
113} 113}
114 114
115static int altera_tse_mdio_write(struct mii_bus *bus, int mii_id, int regnum, 115static int altera_tse_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
116 u16 value) 116 u16 value)
117{ 117{
118 struct altera_tse_mac *mac = (struct altera_tse_mac *)bus->priv; 118 struct net_device *ndev = bus->priv;
119 unsigned int *mdio_regs = (unsigned int *)&mac->mdio_phy0; 119 struct altera_tse_private *priv = netdev_priv(ndev);
120 120
121 /* set MDIO address */ 121 /* set MDIO address */
122 iowrite32((mii_id & 0x1f), &mac->mdio_phy0_addr); 122 csrwr32((mii_id & 0x1f), priv->mac_dev,
123 tse_csroffs(mdio_phy0_addr));
123 124
124 /* write the data */ 125 /* write the data */
125 iowrite32((u32) value, &mdio_regs[regnum]); 126 csrwr32(value, priv->mac_dev, tse_csroffs(mdio_phy0) + regnum * 4);
126 return 0; 127 return 0;
127} 128}
128 129
@@ -168,7 +169,7 @@ static int altera_tse_mdio_create(struct net_device *dev, unsigned int id)
168 for (i = 0; i < PHY_MAX_ADDR; i++) 169 for (i = 0; i < PHY_MAX_ADDR; i++)
169 mdio->irq[i] = PHY_POLL; 170 mdio->irq[i] = PHY_POLL;
170 171
171 mdio->priv = priv->mac_dev; 172 mdio->priv = dev;
172 mdio->parent = priv->device; 173 mdio->parent = priv->device;
173 174
174 ret = of_mdiobus_register(mdio, mdio_node); 175 ret = of_mdiobus_register(mdio, mdio_node);
@@ -224,6 +225,7 @@ static int tse_init_rx_buffer(struct altera_tse_private *priv,
224 dev_kfree_skb_any(rxbuffer->skb); 225 dev_kfree_skb_any(rxbuffer->skb);
225 return -EINVAL; 226 return -EINVAL;
226 } 227 }
228 rxbuffer->dma_addr &= (dma_addr_t)~3;
227 rxbuffer->len = len; 229 rxbuffer->len = len;
228 return 0; 230 return 0;
229} 231}
@@ -425,9 +427,10 @@ static int tse_rx(struct altera_tse_private *priv, int limit)
425 priv->dev->stats.rx_bytes += pktlength; 427 priv->dev->stats.rx_bytes += pktlength;
426 428
427 entry = next_entry; 429 entry = next_entry;
430
431 tse_rx_refill(priv);
428 } 432 }
429 433
430 tse_rx_refill(priv);
431 return count; 434 return count;
432} 435}
433 436
@@ -520,7 +523,6 @@ static irqreturn_t altera_isr(int irq, void *dev_id)
520 struct altera_tse_private *priv; 523 struct altera_tse_private *priv;
521 unsigned long int flags; 524 unsigned long int flags;
522 525
523
524 if (unlikely(!dev)) { 526 if (unlikely(!dev)) {
525 pr_err("%s: invalid dev pointer\n", __func__); 527 pr_err("%s: invalid dev pointer\n", __func__);
526 return IRQ_NONE; 528 return IRQ_NONE;
@@ -562,7 +564,6 @@ static int tse_start_xmit(struct sk_buff *skb, struct net_device *dev)
562 unsigned int nopaged_len = skb_headlen(skb); 564 unsigned int nopaged_len = skb_headlen(skb);
563 enum netdev_tx ret = NETDEV_TX_OK; 565 enum netdev_tx ret = NETDEV_TX_OK;
564 dma_addr_t dma_addr; 566 dma_addr_t dma_addr;
565 int txcomplete = 0;
566 567
567 spin_lock_bh(&priv->tx_lock); 568 spin_lock_bh(&priv->tx_lock);
568 569
@@ -598,7 +599,7 @@ static int tse_start_xmit(struct sk_buff *skb, struct net_device *dev)
598 dma_sync_single_for_device(priv->device, buffer->dma_addr, 599 dma_sync_single_for_device(priv->device, buffer->dma_addr,
599 buffer->len, DMA_TO_DEVICE); 600 buffer->len, DMA_TO_DEVICE);
600 601
601 txcomplete = priv->dmaops->tx_buffer(priv, buffer); 602 priv->dmaops->tx_buffer(priv, buffer);
602 603
603 skb_tx_timestamp(skb); 604 skb_tx_timestamp(skb);
604 605
@@ -697,7 +698,6 @@ static struct phy_device *connect_local_phy(struct net_device *dev)
697 struct altera_tse_private *priv = netdev_priv(dev); 698 struct altera_tse_private *priv = netdev_priv(dev);
698 struct phy_device *phydev = NULL; 699 struct phy_device *phydev = NULL;
699 char phy_id_fmt[MII_BUS_ID_SIZE + 3]; 700 char phy_id_fmt[MII_BUS_ID_SIZE + 3];
700 int ret;
701 701
702 if (priv->phy_addr != POLL_PHY) { 702 if (priv->phy_addr != POLL_PHY) {
703 snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, 703 snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT,
@@ -711,6 +711,7 @@ static struct phy_device *connect_local_phy(struct net_device *dev)
711 netdev_err(dev, "Could not attach to PHY\n"); 711 netdev_err(dev, "Could not attach to PHY\n");
712 712
713 } else { 713 } else {
714 int ret;
714 phydev = phy_find_first(priv->mdio); 715 phydev = phy_find_first(priv->mdio);
715 if (phydev == NULL) { 716 if (phydev == NULL) {
716 netdev_err(dev, "No PHY found\n"); 717 netdev_err(dev, "No PHY found\n");
@@ -790,7 +791,6 @@ static int init_phy(struct net_device *dev)
790 791
791static void tse_update_mac_addr(struct altera_tse_private *priv, u8 *addr) 792static void tse_update_mac_addr(struct altera_tse_private *priv, u8 *addr)
792{ 793{
793 struct altera_tse_mac *mac = priv->mac_dev;
794 u32 msb; 794 u32 msb;
795 u32 lsb; 795 u32 lsb;
796 796
@@ -798,8 +798,8 @@ static void tse_update_mac_addr(struct altera_tse_private *priv, u8 *addr)
798 lsb = ((addr[5] << 8) | addr[4]) & 0xffff; 798 lsb = ((addr[5] << 8) | addr[4]) & 0xffff;
799 799
800 /* Set primary MAC address */ 800 /* Set primary MAC address */
801 iowrite32(msb, &mac->mac_addr_0); 801 csrwr32(msb, priv->mac_dev, tse_csroffs(mac_addr_0));
802 iowrite32(lsb, &mac->mac_addr_1); 802 csrwr32(lsb, priv->mac_dev, tse_csroffs(mac_addr_1));
803} 803}
804 804
805/* MAC software reset. 805/* MAC software reset.
@@ -810,26 +810,26 @@ static void tse_update_mac_addr(struct altera_tse_private *priv, u8 *addr)
810 */ 810 */
811static int reset_mac(struct altera_tse_private *priv) 811static int reset_mac(struct altera_tse_private *priv)
812{ 812{
813 void __iomem *cmd_cfg_reg = &priv->mac_dev->command_config;
814 int counter; 813 int counter;
815 u32 dat; 814 u32 dat;
816 815
817 dat = ioread32(cmd_cfg_reg); 816 dat = csrrd32(priv->mac_dev, tse_csroffs(command_config));
818 dat &= ~(MAC_CMDCFG_TX_ENA | MAC_CMDCFG_RX_ENA); 817 dat &= ~(MAC_CMDCFG_TX_ENA | MAC_CMDCFG_RX_ENA);
819 dat |= MAC_CMDCFG_SW_RESET | MAC_CMDCFG_CNT_RESET; 818 dat |= MAC_CMDCFG_SW_RESET | MAC_CMDCFG_CNT_RESET;
820 iowrite32(dat, cmd_cfg_reg); 819 csrwr32(dat, priv->mac_dev, tse_csroffs(command_config));
821 820
822 counter = 0; 821 counter = 0;
823 while (counter++ < ALTERA_TSE_SW_RESET_WATCHDOG_CNTR) { 822 while (counter++ < ALTERA_TSE_SW_RESET_WATCHDOG_CNTR) {
824 if (tse_bit_is_clear(cmd_cfg_reg, MAC_CMDCFG_SW_RESET)) 823 if (tse_bit_is_clear(priv->mac_dev, tse_csroffs(command_config),
824 MAC_CMDCFG_SW_RESET))
825 break; 825 break;
826 udelay(1); 826 udelay(1);
827 } 827 }
828 828
829 if (counter >= ALTERA_TSE_SW_RESET_WATCHDOG_CNTR) { 829 if (counter >= ALTERA_TSE_SW_RESET_WATCHDOG_CNTR) {
830 dat = ioread32(cmd_cfg_reg); 830 dat = csrrd32(priv->mac_dev, tse_csroffs(command_config));
831 dat &= ~MAC_CMDCFG_SW_RESET; 831 dat &= ~MAC_CMDCFG_SW_RESET;
832 iowrite32(dat, cmd_cfg_reg); 832 csrwr32(dat, priv->mac_dev, tse_csroffs(command_config));
833 return -1; 833 return -1;
834 } 834 }
835 return 0; 835 return 0;
@@ -839,42 +839,58 @@ static int reset_mac(struct altera_tse_private *priv)
839*/ 839*/
840static int init_mac(struct altera_tse_private *priv) 840static int init_mac(struct altera_tse_private *priv)
841{ 841{
842 struct altera_tse_mac *mac = priv->mac_dev;
843 unsigned int cmd = 0; 842 unsigned int cmd = 0;
844 u32 frm_length; 843 u32 frm_length;
845 844
846 /* Setup Rx FIFO */ 845 /* Setup Rx FIFO */
847 iowrite32(priv->rx_fifo_depth - ALTERA_TSE_RX_SECTION_EMPTY, 846 csrwr32(priv->rx_fifo_depth - ALTERA_TSE_RX_SECTION_EMPTY,
848 &mac->rx_section_empty); 847 priv->mac_dev, tse_csroffs(rx_section_empty));
849 iowrite32(ALTERA_TSE_RX_SECTION_FULL, &mac->rx_section_full); 848
850 iowrite32(ALTERA_TSE_RX_ALMOST_EMPTY, &mac->rx_almost_empty); 849 csrwr32(ALTERA_TSE_RX_SECTION_FULL, priv->mac_dev,
851 iowrite32(ALTERA_TSE_RX_ALMOST_FULL, &mac->rx_almost_full); 850 tse_csroffs(rx_section_full));
851
852 csrwr32(ALTERA_TSE_RX_ALMOST_EMPTY, priv->mac_dev,
853 tse_csroffs(rx_almost_empty));
854
855 csrwr32(ALTERA_TSE_RX_ALMOST_FULL, priv->mac_dev,
856 tse_csroffs(rx_almost_full));
852 857
853 /* Setup Tx FIFO */ 858 /* Setup Tx FIFO */
854 iowrite32(priv->tx_fifo_depth - ALTERA_TSE_TX_SECTION_EMPTY, 859 csrwr32(priv->tx_fifo_depth - ALTERA_TSE_TX_SECTION_EMPTY,
855 &mac->tx_section_empty); 860 priv->mac_dev, tse_csroffs(tx_section_empty));
856 iowrite32(ALTERA_TSE_TX_SECTION_FULL, &mac->tx_section_full); 861
857 iowrite32(ALTERA_TSE_TX_ALMOST_EMPTY, &mac->tx_almost_empty); 862 csrwr32(ALTERA_TSE_TX_SECTION_FULL, priv->mac_dev,
858 iowrite32(ALTERA_TSE_TX_ALMOST_FULL, &mac->tx_almost_full); 863 tse_csroffs(tx_section_full));
864
865 csrwr32(ALTERA_TSE_TX_ALMOST_EMPTY, priv->mac_dev,
866 tse_csroffs(tx_almost_empty));
867
868 csrwr32(ALTERA_TSE_TX_ALMOST_FULL, priv->mac_dev,
869 tse_csroffs(tx_almost_full));
859 870
860 /* MAC Address Configuration */ 871 /* MAC Address Configuration */
861 tse_update_mac_addr(priv, priv->dev->dev_addr); 872 tse_update_mac_addr(priv, priv->dev->dev_addr);
862 873
863 /* MAC Function Configuration */ 874 /* MAC Function Configuration */
864 frm_length = ETH_HLEN + priv->dev->mtu + ETH_FCS_LEN; 875 frm_length = ETH_HLEN + priv->dev->mtu + ETH_FCS_LEN;
865 iowrite32(frm_length, &mac->frm_length); 876 csrwr32(frm_length, priv->mac_dev, tse_csroffs(frm_length));
866 iowrite32(ALTERA_TSE_TX_IPG_LENGTH, &mac->tx_ipg_length); 877
878 csrwr32(ALTERA_TSE_TX_IPG_LENGTH, priv->mac_dev,
879 tse_csroffs(tx_ipg_length));
867 880
868 /* Disable RX/TX shift 16 for alignment of all received frames on 16-bit 881 /* Disable RX/TX shift 16 for alignment of all received frames on 16-bit
869 * start address 882 * start address
870 */ 883 */
871 tse_clear_bit(&mac->rx_cmd_stat, ALTERA_TSE_RX_CMD_STAT_RX_SHIFT16); 884 tse_set_bit(priv->mac_dev, tse_csroffs(rx_cmd_stat),
872 tse_clear_bit(&mac->tx_cmd_stat, ALTERA_TSE_TX_CMD_STAT_TX_SHIFT16 | 885 ALTERA_TSE_RX_CMD_STAT_RX_SHIFT16);
873 ALTERA_TSE_TX_CMD_STAT_OMIT_CRC); 886
887 tse_clear_bit(priv->mac_dev, tse_csroffs(tx_cmd_stat),
888 ALTERA_TSE_TX_CMD_STAT_TX_SHIFT16 |
889 ALTERA_TSE_TX_CMD_STAT_OMIT_CRC);
874 890
875 /* Set the MAC options */ 891 /* Set the MAC options */
876 cmd = ioread32(&mac->command_config); 892 cmd = csrrd32(priv->mac_dev, tse_csroffs(command_config));
877 cmd |= MAC_CMDCFG_PAD_EN; /* Padding Removal on Receive */ 893 cmd &= ~MAC_CMDCFG_PAD_EN; /* No padding Removal on Receive */
878 cmd &= ~MAC_CMDCFG_CRC_FWD; /* CRC Removal */ 894 cmd &= ~MAC_CMDCFG_CRC_FWD; /* CRC Removal */
879 cmd |= MAC_CMDCFG_RX_ERR_DISC; /* Automatically discard frames 895 cmd |= MAC_CMDCFG_RX_ERR_DISC; /* Automatically discard frames
880 * with CRC errors 896 * with CRC errors
@@ -882,7 +898,16 @@ static int init_mac(struct altera_tse_private *priv)
882 cmd |= MAC_CMDCFG_CNTL_FRM_ENA; 898 cmd |= MAC_CMDCFG_CNTL_FRM_ENA;
883 cmd &= ~MAC_CMDCFG_TX_ENA; 899 cmd &= ~MAC_CMDCFG_TX_ENA;
884 cmd &= ~MAC_CMDCFG_RX_ENA; 900 cmd &= ~MAC_CMDCFG_RX_ENA;
885 iowrite32(cmd, &mac->command_config); 901
902 /* Default speed and duplex setting, full/100 */
903 cmd &= ~MAC_CMDCFG_HD_ENA;
904 cmd &= ~MAC_CMDCFG_ETH_SPEED;
905 cmd &= ~MAC_CMDCFG_ENA_10;
906
907 csrwr32(cmd, priv->mac_dev, tse_csroffs(command_config));
908
909 csrwr32(ALTERA_TSE_PAUSE_QUANTA, priv->mac_dev,
910 tse_csroffs(pause_quanta));
886 911
887 if (netif_msg_hw(priv)) 912 if (netif_msg_hw(priv))
888 dev_dbg(priv->device, 913 dev_dbg(priv->device,
@@ -895,15 +920,14 @@ static int init_mac(struct altera_tse_private *priv)
895 */ 920 */
896static void tse_set_mac(struct altera_tse_private *priv, bool enable) 921static void tse_set_mac(struct altera_tse_private *priv, bool enable)
897{ 922{
898 struct altera_tse_mac *mac = priv->mac_dev; 923 u32 value = csrrd32(priv->mac_dev, tse_csroffs(command_config));
899 u32 value = ioread32(&mac->command_config);
900 924
901 if (enable) 925 if (enable)
902 value |= MAC_CMDCFG_TX_ENA | MAC_CMDCFG_RX_ENA; 926 value |= MAC_CMDCFG_TX_ENA | MAC_CMDCFG_RX_ENA;
903 else 927 else
904 value &= ~(MAC_CMDCFG_TX_ENA | MAC_CMDCFG_RX_ENA); 928 value &= ~(MAC_CMDCFG_TX_ENA | MAC_CMDCFG_RX_ENA);
905 929
906 iowrite32(value, &mac->command_config); 930 csrwr32(value, priv->mac_dev, tse_csroffs(command_config));
907} 931}
908 932
909/* Change the MTU 933/* Change the MTU
@@ -933,13 +957,12 @@ static int tse_change_mtu(struct net_device *dev, int new_mtu)
933static void altera_tse_set_mcfilter(struct net_device *dev) 957static void altera_tse_set_mcfilter(struct net_device *dev)
934{ 958{
935 struct altera_tse_private *priv = netdev_priv(dev); 959 struct altera_tse_private *priv = netdev_priv(dev);
936 struct altera_tse_mac *mac = priv->mac_dev;
937 int i; 960 int i;
938 struct netdev_hw_addr *ha; 961 struct netdev_hw_addr *ha;
939 962
940 /* clear the hash filter */ 963 /* clear the hash filter */
941 for (i = 0; i < 64; i++) 964 for (i = 0; i < 64; i++)
942 iowrite32(0, &(mac->hash_table[i])); 965 csrwr32(0, priv->mac_dev, tse_csroffs(hash_table) + i * 4);
943 966
944 netdev_for_each_mc_addr(ha, dev) { 967 netdev_for_each_mc_addr(ha, dev) {
945 unsigned int hash = 0; 968 unsigned int hash = 0;
@@ -955,7 +978,7 @@ static void altera_tse_set_mcfilter(struct net_device *dev)
955 978
956 hash = (hash << 1) | xor_bit; 979 hash = (hash << 1) | xor_bit;
957 } 980 }
958 iowrite32(1, &(mac->hash_table[hash])); 981 csrwr32(1, priv->mac_dev, tse_csroffs(hash_table) + hash * 4);
959 } 982 }
960} 983}
961 984
@@ -963,12 +986,11 @@ static void altera_tse_set_mcfilter(struct net_device *dev)
963static void altera_tse_set_mcfilterall(struct net_device *dev) 986static void altera_tse_set_mcfilterall(struct net_device *dev)
964{ 987{
965 struct altera_tse_private *priv = netdev_priv(dev); 988 struct altera_tse_private *priv = netdev_priv(dev);
966 struct altera_tse_mac *mac = priv->mac_dev;
967 int i; 989 int i;
968 990
969 /* set the hash filter */ 991 /* set the hash filter */
970 for (i = 0; i < 64; i++) 992 for (i = 0; i < 64; i++)
971 iowrite32(1, &(mac->hash_table[i])); 993 csrwr32(1, priv->mac_dev, tse_csroffs(hash_table) + i * 4);
972} 994}
973 995
974/* Set or clear the multicast filter for this adaptor 996/* Set or clear the multicast filter for this adaptor
@@ -976,12 +998,12 @@ static void altera_tse_set_mcfilterall(struct net_device *dev)
976static void tse_set_rx_mode_hashfilter(struct net_device *dev) 998static void tse_set_rx_mode_hashfilter(struct net_device *dev)
977{ 999{
978 struct altera_tse_private *priv = netdev_priv(dev); 1000 struct altera_tse_private *priv = netdev_priv(dev);
979 struct altera_tse_mac *mac = priv->mac_dev;
980 1001
981 spin_lock(&priv->mac_cfg_lock); 1002 spin_lock(&priv->mac_cfg_lock);
982 1003
983 if (dev->flags & IFF_PROMISC) 1004 if (dev->flags & IFF_PROMISC)
984 tse_set_bit(&mac->command_config, MAC_CMDCFG_PROMIS_EN); 1005 tse_set_bit(priv->mac_dev, tse_csroffs(command_config),
1006 MAC_CMDCFG_PROMIS_EN);
985 1007
986 if (dev->flags & IFF_ALLMULTI) 1008 if (dev->flags & IFF_ALLMULTI)
987 altera_tse_set_mcfilterall(dev); 1009 altera_tse_set_mcfilterall(dev);
@@ -996,15 +1018,16 @@ static void tse_set_rx_mode_hashfilter(struct net_device *dev)
996static void tse_set_rx_mode(struct net_device *dev) 1018static void tse_set_rx_mode(struct net_device *dev)
997{ 1019{
998 struct altera_tse_private *priv = netdev_priv(dev); 1020 struct altera_tse_private *priv = netdev_priv(dev);
999 struct altera_tse_mac *mac = priv->mac_dev;
1000 1021
1001 spin_lock(&priv->mac_cfg_lock); 1022 spin_lock(&priv->mac_cfg_lock);
1002 1023
1003 if ((dev->flags & IFF_PROMISC) || (dev->flags & IFF_ALLMULTI) || 1024 if ((dev->flags & IFF_PROMISC) || (dev->flags & IFF_ALLMULTI) ||
1004 !netdev_mc_empty(dev) || !netdev_uc_empty(dev)) 1025 !netdev_mc_empty(dev) || !netdev_uc_empty(dev))
1005 tse_set_bit(&mac->command_config, MAC_CMDCFG_PROMIS_EN); 1026 tse_set_bit(priv->mac_dev, tse_csroffs(command_config),
1027 MAC_CMDCFG_PROMIS_EN);
1006 else 1028 else
1007 tse_clear_bit(&mac->command_config, MAC_CMDCFG_PROMIS_EN); 1029 tse_clear_bit(priv->mac_dev, tse_csroffs(command_config),
1030 MAC_CMDCFG_PROMIS_EN);
1008 1031
1009 spin_unlock(&priv->mac_cfg_lock); 1032 spin_unlock(&priv->mac_cfg_lock);
1010} 1033}
@@ -1085,17 +1108,19 @@ static int tse_open(struct net_device *dev)
1085 1108
1086 spin_unlock_irqrestore(&priv->rxdma_irq_lock, flags); 1109 spin_unlock_irqrestore(&priv->rxdma_irq_lock, flags);
1087 1110
1088 /* Start MAC Rx/Tx */
1089 spin_lock(&priv->mac_cfg_lock);
1090 tse_set_mac(priv, true);
1091 spin_unlock(&priv->mac_cfg_lock);
1092
1093 if (priv->phydev) 1111 if (priv->phydev)
1094 phy_start(priv->phydev); 1112 phy_start(priv->phydev);
1095 1113
1096 napi_enable(&priv->napi); 1114 napi_enable(&priv->napi);
1097 netif_start_queue(dev); 1115 netif_start_queue(dev);
1098 1116
1117 priv->dmaops->start_rxdma(priv);
1118
1119 /* Start MAC Rx/Tx */
1120 spin_lock(&priv->mac_cfg_lock);
1121 tse_set_mac(priv, true);
1122 spin_unlock(&priv->mac_cfg_lock);
1123
1099 return 0; 1124 return 0;
1100 1125
1101tx_request_irq_error: 1126tx_request_irq_error:
@@ -1167,7 +1192,6 @@ static struct net_device_ops altera_tse_netdev_ops = {
1167 .ndo_validate_addr = eth_validate_addr, 1192 .ndo_validate_addr = eth_validate_addr,
1168}; 1193};
1169 1194
1170
1171static int request_and_map(struct platform_device *pdev, const char *name, 1195static int request_and_map(struct platform_device *pdev, const char *name,
1172 struct resource **res, void __iomem **ptr) 1196 struct resource **res, void __iomem **ptr)
1173{ 1197{
@@ -1235,7 +1259,7 @@ static int altera_tse_probe(struct platform_device *pdev)
1235 /* Get the mapped address to the SGDMA descriptor memory */ 1259 /* Get the mapped address to the SGDMA descriptor memory */
1236 ret = request_and_map(pdev, "s1", &dma_res, &descmap); 1260 ret = request_and_map(pdev, "s1", &dma_res, &descmap);
1237 if (ret) 1261 if (ret)
1238 goto out_free; 1262 goto err_free_netdev;
1239 1263
1240 /* Start of that memory is for transmit descriptors */ 1264 /* Start of that memory is for transmit descriptors */
1241 priv->tx_dma_desc = descmap; 1265 priv->tx_dma_desc = descmap;
@@ -1254,24 +1278,24 @@ static int altera_tse_probe(struct platform_device *pdev)
1254 if (upper_32_bits(priv->rxdescmem_busaddr)) { 1278 if (upper_32_bits(priv->rxdescmem_busaddr)) {
1255 dev_dbg(priv->device, 1279 dev_dbg(priv->device,
1256 "SGDMA bus addresses greater than 32-bits\n"); 1280 "SGDMA bus addresses greater than 32-bits\n");
1257 goto out_free; 1281 goto err_free_netdev;
1258 } 1282 }
1259 if (upper_32_bits(priv->txdescmem_busaddr)) { 1283 if (upper_32_bits(priv->txdescmem_busaddr)) {
1260 dev_dbg(priv->device, 1284 dev_dbg(priv->device,
1261 "SGDMA bus addresses greater than 32-bits\n"); 1285 "SGDMA bus addresses greater than 32-bits\n");
1262 goto out_free; 1286 goto err_free_netdev;
1263 } 1287 }
1264 } else if (priv->dmaops && 1288 } else if (priv->dmaops &&
1265 priv->dmaops->altera_dtype == ALTERA_DTYPE_MSGDMA) { 1289 priv->dmaops->altera_dtype == ALTERA_DTYPE_MSGDMA) {
1266 ret = request_and_map(pdev, "rx_resp", &dma_res, 1290 ret = request_and_map(pdev, "rx_resp", &dma_res,
1267 &priv->rx_dma_resp); 1291 &priv->rx_dma_resp);
1268 if (ret) 1292 if (ret)
1269 goto out_free; 1293 goto err_free_netdev;
1270 1294
1271 ret = request_and_map(pdev, "tx_desc", &dma_res, 1295 ret = request_and_map(pdev, "tx_desc", &dma_res,
1272 &priv->tx_dma_desc); 1296 &priv->tx_dma_desc);
1273 if (ret) 1297 if (ret)
1274 goto out_free; 1298 goto err_free_netdev;
1275 1299
1276 priv->txdescmem = resource_size(dma_res); 1300 priv->txdescmem = resource_size(dma_res);
1277 priv->txdescmem_busaddr = dma_res->start; 1301 priv->txdescmem_busaddr = dma_res->start;
@@ -1279,13 +1303,13 @@ static int altera_tse_probe(struct platform_device *pdev)
1279 ret = request_and_map(pdev, "rx_desc", &dma_res, 1303 ret = request_and_map(pdev, "rx_desc", &dma_res,
1280 &priv->rx_dma_desc); 1304 &priv->rx_dma_desc);
1281 if (ret) 1305 if (ret)
1282 goto out_free; 1306 goto err_free_netdev;
1283 1307
1284 priv->rxdescmem = resource_size(dma_res); 1308 priv->rxdescmem = resource_size(dma_res);
1285 priv->rxdescmem_busaddr = dma_res->start; 1309 priv->rxdescmem_busaddr = dma_res->start;
1286 1310
1287 } else { 1311 } else {
1288 goto out_free; 1312 goto err_free_netdev;
1289 } 1313 }
1290 1314
1291 if (!dma_set_mask(priv->device, DMA_BIT_MASK(priv->dmaops->dmamask))) 1315 if (!dma_set_mask(priv->device, DMA_BIT_MASK(priv->dmaops->dmamask)))
@@ -1294,26 +1318,26 @@ static int altera_tse_probe(struct platform_device *pdev)
1294 else if (!dma_set_mask(priv->device, DMA_BIT_MASK(32))) 1318 else if (!dma_set_mask(priv->device, DMA_BIT_MASK(32)))
1295 dma_set_coherent_mask(priv->device, DMA_BIT_MASK(32)); 1319 dma_set_coherent_mask(priv->device, DMA_BIT_MASK(32));
1296 else 1320 else
1297 goto out_free; 1321 goto err_free_netdev;
1298 1322
1299 /* MAC address space */ 1323 /* MAC address space */
1300 ret = request_and_map(pdev, "control_port", &control_port, 1324 ret = request_and_map(pdev, "control_port", &control_port,
1301 (void __iomem **)&priv->mac_dev); 1325 (void __iomem **)&priv->mac_dev);
1302 if (ret) 1326 if (ret)
1303 goto out_free; 1327 goto err_free_netdev;
1304 1328
1305 /* xSGDMA Rx Dispatcher address space */ 1329 /* xSGDMA Rx Dispatcher address space */
1306 ret = request_and_map(pdev, "rx_csr", &dma_res, 1330 ret = request_and_map(pdev, "rx_csr", &dma_res,
1307 &priv->rx_dma_csr); 1331 &priv->rx_dma_csr);
1308 if (ret) 1332 if (ret)
1309 goto out_free; 1333 goto err_free_netdev;
1310 1334
1311 1335
1312 /* xSGDMA Tx Dispatcher address space */ 1336 /* xSGDMA Tx Dispatcher address space */
1313 ret = request_and_map(pdev, "tx_csr", &dma_res, 1337 ret = request_and_map(pdev, "tx_csr", &dma_res,
1314 &priv->tx_dma_csr); 1338 &priv->tx_dma_csr);
1315 if (ret) 1339 if (ret)
1316 goto out_free; 1340 goto err_free_netdev;
1317 1341
1318 1342
1319 /* Rx IRQ */ 1343 /* Rx IRQ */
@@ -1321,7 +1345,7 @@ static int altera_tse_probe(struct platform_device *pdev)
1321 if (priv->rx_irq == -ENXIO) { 1345 if (priv->rx_irq == -ENXIO) {
1322 dev_err(&pdev->dev, "cannot obtain Rx IRQ\n"); 1346 dev_err(&pdev->dev, "cannot obtain Rx IRQ\n");
1323 ret = -ENXIO; 1347 ret = -ENXIO;
1324 goto out_free; 1348 goto err_free_netdev;
1325 } 1349 }
1326 1350
1327 /* Tx IRQ */ 1351 /* Tx IRQ */
@@ -1329,7 +1353,7 @@ static int altera_tse_probe(struct platform_device *pdev)
1329 if (priv->tx_irq == -ENXIO) { 1353 if (priv->tx_irq == -ENXIO) {
1330 dev_err(&pdev->dev, "cannot obtain Tx IRQ\n"); 1354 dev_err(&pdev->dev, "cannot obtain Tx IRQ\n");
1331 ret = -ENXIO; 1355 ret = -ENXIO;
1332 goto out_free; 1356 goto err_free_netdev;
1333 } 1357 }
1334 1358
1335 /* get FIFO depths from device tree */ 1359 /* get FIFO depths from device tree */
@@ -1337,14 +1361,14 @@ static int altera_tse_probe(struct platform_device *pdev)
1337 &priv->rx_fifo_depth)) { 1361 &priv->rx_fifo_depth)) {
1338 dev_err(&pdev->dev, "cannot obtain rx-fifo-depth\n"); 1362 dev_err(&pdev->dev, "cannot obtain rx-fifo-depth\n");
1339 ret = -ENXIO; 1363 ret = -ENXIO;
1340 goto out_free; 1364 goto err_free_netdev;
1341 } 1365 }
1342 1366
1343 if (of_property_read_u32(pdev->dev.of_node, "tx-fifo-depth", 1367 if (of_property_read_u32(pdev->dev.of_node, "tx-fifo-depth",
1344 &priv->rx_fifo_depth)) { 1368 &priv->rx_fifo_depth)) {
1345 dev_err(&pdev->dev, "cannot obtain tx-fifo-depth\n"); 1369 dev_err(&pdev->dev, "cannot obtain tx-fifo-depth\n");
1346 ret = -ENXIO; 1370 ret = -ENXIO;
1347 goto out_free; 1371 goto err_free_netdev;
1348 } 1372 }
1349 1373
1350 /* get hash filter settings for this instance */ 1374 /* get hash filter settings for this instance */
@@ -1352,6 +1376,11 @@ static int altera_tse_probe(struct platform_device *pdev)
1352 of_property_read_bool(pdev->dev.of_node, 1376 of_property_read_bool(pdev->dev.of_node,
1353 "altr,has-hash-multicast-filter"); 1377 "altr,has-hash-multicast-filter");
1354 1378
1379 /* Set hash filter to not set for now until the
1380 * multicast filter receive issue is debugged
1381 */
1382 priv->hash_filter = 0;
1383
1355 /* get supplemental address settings for this instance */ 1384 /* get supplemental address settings for this instance */
1356 priv->added_unicast = 1385 priv->added_unicast =
1357 of_property_read_bool(pdev->dev.of_node, 1386 of_property_read_bool(pdev->dev.of_node,
@@ -1393,7 +1422,7 @@ static int altera_tse_probe(struct platform_device *pdev)
1393 ((priv->phy_addr >= 0) && (priv->phy_addr < PHY_MAX_ADDR)))) { 1422 ((priv->phy_addr >= 0) && (priv->phy_addr < PHY_MAX_ADDR)))) {
1394 dev_err(&pdev->dev, "invalid phy-addr specified %d\n", 1423 dev_err(&pdev->dev, "invalid phy-addr specified %d\n",
1395 priv->phy_addr); 1424 priv->phy_addr);
1396 goto out_free; 1425 goto err_free_netdev;
1397 } 1426 }
1398 1427
1399 /* Create/attach to MDIO bus */ 1428 /* Create/attach to MDIO bus */
@@ -1401,7 +1430,7 @@ static int altera_tse_probe(struct platform_device *pdev)
1401 atomic_add_return(1, &instance_count)); 1430 atomic_add_return(1, &instance_count));
1402 1431
1403 if (ret) 1432 if (ret)
1404 goto out_free; 1433 goto err_free_netdev;
1405 1434
1406 /* initialize netdev */ 1435 /* initialize netdev */
1407 ether_setup(ndev); 1436 ether_setup(ndev);
@@ -1438,7 +1467,7 @@ static int altera_tse_probe(struct platform_device *pdev)
1438 ret = register_netdev(ndev); 1467 ret = register_netdev(ndev);
1439 if (ret) { 1468 if (ret) {
1440 dev_err(&pdev->dev, "failed to register TSE net device\n"); 1469 dev_err(&pdev->dev, "failed to register TSE net device\n");
1441 goto out_free_mdio; 1470 goto err_register_netdev;
1442 } 1471 }
1443 1472
1444 platform_set_drvdata(pdev, ndev); 1473 platform_set_drvdata(pdev, ndev);
@@ -1455,13 +1484,16 @@ static int altera_tse_probe(struct platform_device *pdev)
1455 ret = init_phy(ndev); 1484 ret = init_phy(ndev);
1456 if (ret != 0) { 1485 if (ret != 0) {
1457 netdev_err(ndev, "Cannot attach to PHY (error: %d)\n", ret); 1486 netdev_err(ndev, "Cannot attach to PHY (error: %d)\n", ret);
1458 goto out_free_mdio; 1487 goto err_init_phy;
1459 } 1488 }
1460 return 0; 1489 return 0;
1461 1490
1462out_free_mdio: 1491err_init_phy:
1492 unregister_netdev(ndev);
1493err_register_netdev:
1494 netif_napi_del(&priv->napi);
1463 altera_tse_mdio_destroy(ndev); 1495 altera_tse_mdio_destroy(ndev);
1464out_free: 1496err_free_netdev:
1465 free_netdev(ndev); 1497 free_netdev(ndev);
1466 return ret; 1498 return ret;
1467} 1499}
@@ -1480,7 +1512,7 @@ static int altera_tse_remove(struct platform_device *pdev)
1480 return 0; 1512 return 0;
1481} 1513}
1482 1514
1483struct altera_dmaops altera_dtype_sgdma = { 1515static const struct altera_dmaops altera_dtype_sgdma = {
1484 .altera_dtype = ALTERA_DTYPE_SGDMA, 1516 .altera_dtype = ALTERA_DTYPE_SGDMA,
1485 .dmamask = 32, 1517 .dmamask = 32,
1486 .reset_dma = sgdma_reset, 1518 .reset_dma = sgdma_reset,
@@ -1496,9 +1528,10 @@ struct altera_dmaops altera_dtype_sgdma = {
1496 .get_rx_status = sgdma_rx_status, 1528 .get_rx_status = sgdma_rx_status,
1497 .init_dma = sgdma_initialize, 1529 .init_dma = sgdma_initialize,
1498 .uninit_dma = sgdma_uninitialize, 1530 .uninit_dma = sgdma_uninitialize,
1531 .start_rxdma = sgdma_start_rxdma,
1499}; 1532};
1500 1533
1501struct altera_dmaops altera_dtype_msgdma = { 1534static const struct altera_dmaops altera_dtype_msgdma = {
1502 .altera_dtype = ALTERA_DTYPE_MSGDMA, 1535 .altera_dtype = ALTERA_DTYPE_MSGDMA,
1503 .dmamask = 64, 1536 .dmamask = 64,
1504 .reset_dma = msgdma_reset, 1537 .reset_dma = msgdma_reset,
@@ -1514,6 +1547,7 @@ struct altera_dmaops altera_dtype_msgdma = {
1514 .get_rx_status = msgdma_rx_status, 1547 .get_rx_status = msgdma_rx_status,
1515 .init_dma = msgdma_initialize, 1548 .init_dma = msgdma_initialize,
1516 .uninit_dma = msgdma_uninitialize, 1549 .uninit_dma = msgdma_uninitialize,
1550 .start_rxdma = msgdma_start_rxdma,
1517}; 1551};
1518 1552
1519static struct of_device_id altera_tse_ids[] = { 1553static struct of_device_id altera_tse_ids[] = {
diff --git a/drivers/net/ethernet/altera/altera_utils.c b/drivers/net/ethernet/altera/altera_utils.c
index 70fa13f486b2..d7eeb1713ad2 100644
--- a/drivers/net/ethernet/altera/altera_utils.c
+++ b/drivers/net/ethernet/altera/altera_utils.c
@@ -17,28 +17,28 @@
17#include "altera_tse.h" 17#include "altera_tse.h"
18#include "altera_utils.h" 18#include "altera_utils.h"
19 19
20void tse_set_bit(void __iomem *ioaddr, u32 bit_mask) 20void tse_set_bit(void __iomem *ioaddr, size_t offs, u32 bit_mask)
21{ 21{
22 u32 value = ioread32(ioaddr); 22 u32 value = csrrd32(ioaddr, offs);
23 value |= bit_mask; 23 value |= bit_mask;
24 iowrite32(value, ioaddr); 24 csrwr32(value, ioaddr, offs);
25} 25}
26 26
27void tse_clear_bit(void __iomem *ioaddr, u32 bit_mask) 27void tse_clear_bit(void __iomem *ioaddr, size_t offs, u32 bit_mask)
28{ 28{
29 u32 value = ioread32(ioaddr); 29 u32 value = csrrd32(ioaddr, offs);
30 value &= ~bit_mask; 30 value &= ~bit_mask;
31 iowrite32(value, ioaddr); 31 csrwr32(value, ioaddr, offs);
32} 32}
33 33
34int tse_bit_is_set(void __iomem *ioaddr, u32 bit_mask) 34int tse_bit_is_set(void __iomem *ioaddr, size_t offs, u32 bit_mask)
35{ 35{
36 u32 value = ioread32(ioaddr); 36 u32 value = csrrd32(ioaddr, offs);
37 return (value & bit_mask) ? 1 : 0; 37 return (value & bit_mask) ? 1 : 0;
38} 38}
39 39
40int tse_bit_is_clear(void __iomem *ioaddr, u32 bit_mask) 40int tse_bit_is_clear(void __iomem *ioaddr, size_t offs, u32 bit_mask)
41{ 41{
42 u32 value = ioread32(ioaddr); 42 u32 value = csrrd32(ioaddr, offs);
43 return (value & bit_mask) ? 0 : 1; 43 return (value & bit_mask) ? 0 : 1;
44} 44}
diff --git a/drivers/net/ethernet/altera/altera_utils.h b/drivers/net/ethernet/altera/altera_utils.h
index ce1db36d3583..baf100ccf587 100644
--- a/drivers/net/ethernet/altera/altera_utils.h
+++ b/drivers/net/ethernet/altera/altera_utils.h
@@ -19,9 +19,9 @@
19#ifndef __ALTERA_UTILS_H__ 19#ifndef __ALTERA_UTILS_H__
20#define __ALTERA_UTILS_H__ 20#define __ALTERA_UTILS_H__
21 21
22void tse_set_bit(void __iomem *ioaddr, u32 bit_mask); 22void tse_set_bit(void __iomem *ioaddr, size_t offs, u32 bit_mask);
23void tse_clear_bit(void __iomem *ioaddr, u32 bit_mask); 23void tse_clear_bit(void __iomem *ioaddr, size_t offs, u32 bit_mask);
24int tse_bit_is_set(void __iomem *ioaddr, u32 bit_mask); 24int tse_bit_is_set(void __iomem *ioaddr, size_t offs, u32 bit_mask);
25int tse_bit_is_clear(void __iomem *ioaddr, u32 bit_mask); 25int tse_bit_is_clear(void __iomem *ioaddr, size_t offs, u32 bit_mask);
26 26
27#endif /* __ALTERA_UTILS_H__*/ 27#endif /* __ALTERA_UTILS_H__*/
diff --git a/drivers/net/ethernet/arc/emac.h b/drivers/net/ethernet/arc/emac.h
index 928fac6dd10a..53f85bf71526 100644
--- a/drivers/net/ethernet/arc/emac.h
+++ b/drivers/net/ethernet/arc/emac.h
@@ -11,6 +11,7 @@
11#include <linux/dma-mapping.h> 11#include <linux/dma-mapping.h>
12#include <linux/netdevice.h> 12#include <linux/netdevice.h>
13#include <linux/phy.h> 13#include <linux/phy.h>
14#include <linux/clk.h>
14 15
15/* STATUS and ENABLE Register bit masks */ 16/* STATUS and ENABLE Register bit masks */
16#define TXINT_MASK (1<<0) /* Transmit interrupt */ 17#define TXINT_MASK (1<<0) /* Transmit interrupt */
@@ -131,6 +132,7 @@ struct arc_emac_priv {
131 struct mii_bus *bus; 132 struct mii_bus *bus;
132 133
133 void __iomem *regs; 134 void __iomem *regs;
135 struct clk *clk;
134 136
135 struct napi_struct napi; 137 struct napi_struct napi;
136 struct net_device_stats stats; 138 struct net_device_stats stats;
diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c
index eeecc29cf5b7..d647a7d115ac 100644
--- a/drivers/net/ethernet/arc/emac_main.c
+++ b/drivers/net/ethernet/arc/emac_main.c
@@ -574,6 +574,18 @@ static int arc_emac_tx(struct sk_buff *skb, struct net_device *ndev)
574 return NETDEV_TX_OK; 574 return NETDEV_TX_OK;
575} 575}
576 576
577static void arc_emac_set_address_internal(struct net_device *ndev)
578{
579 struct arc_emac_priv *priv = netdev_priv(ndev);
580 unsigned int addr_low, addr_hi;
581
582 addr_low = le32_to_cpu(*(__le32 *) &ndev->dev_addr[0]);
583 addr_hi = le16_to_cpu(*(__le16 *) &ndev->dev_addr[4]);
584
585 arc_reg_set(priv, R_ADDRL, addr_low);
586 arc_reg_set(priv, R_ADDRH, addr_hi);
587}
588
577/** 589/**
578 * arc_emac_set_address - Set the MAC address for this device. 590 * arc_emac_set_address - Set the MAC address for this device.
579 * @ndev: Pointer to net_device structure. 591 * @ndev: Pointer to net_device structure.
@@ -587,9 +599,7 @@ static int arc_emac_tx(struct sk_buff *skb, struct net_device *ndev)
587 */ 599 */
588static int arc_emac_set_address(struct net_device *ndev, void *p) 600static int arc_emac_set_address(struct net_device *ndev, void *p)
589{ 601{
590 struct arc_emac_priv *priv = netdev_priv(ndev);
591 struct sockaddr *addr = p; 602 struct sockaddr *addr = p;
592 unsigned int addr_low, addr_hi;
593 603
594 if (netif_running(ndev)) 604 if (netif_running(ndev))
595 return -EBUSY; 605 return -EBUSY;
@@ -599,11 +609,7 @@ static int arc_emac_set_address(struct net_device *ndev, void *p)
599 609
600 memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len); 610 memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
601 611
602 addr_low = le32_to_cpu(*(__le32 *) &ndev->dev_addr[0]); 612 arc_emac_set_address_internal(ndev);
603 addr_hi = le16_to_cpu(*(__le16 *) &ndev->dev_addr[4]);
604
605 arc_reg_set(priv, R_ADDRL, addr_low);
606 arc_reg_set(priv, R_ADDRH, addr_hi);
607 613
608 return 0; 614 return 0;
609} 615}
@@ -643,13 +649,6 @@ static int arc_emac_probe(struct platform_device *pdev)
643 return -ENODEV; 649 return -ENODEV;
644 } 650 }
645 651
646 /* Get CPU clock frequency from device tree */
647 if (of_property_read_u32(pdev->dev.of_node, "clock-frequency",
648 &clock_frequency)) {
649 dev_err(&pdev->dev, "failed to retrieve <clock-frequency> from device tree\n");
650 return -EINVAL;
651 }
652
653 /* Get IRQ from device tree */ 652 /* Get IRQ from device tree */
654 irq = irq_of_parse_and_map(pdev->dev.of_node, 0); 653 irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
655 if (!irq) { 654 if (!irq) {
@@ -677,17 +676,36 @@ static int arc_emac_probe(struct platform_device *pdev)
677 priv->regs = devm_ioremap_resource(&pdev->dev, &res_regs); 676 priv->regs = devm_ioremap_resource(&pdev->dev, &res_regs);
678 if (IS_ERR(priv->regs)) { 677 if (IS_ERR(priv->regs)) {
679 err = PTR_ERR(priv->regs); 678 err = PTR_ERR(priv->regs);
680 goto out; 679 goto out_netdev;
681 } 680 }
682 dev_dbg(&pdev->dev, "Registers base address is 0x%p\n", priv->regs); 681 dev_dbg(&pdev->dev, "Registers base address is 0x%p\n", priv->regs);
683 682
683 priv->clk = of_clk_get(pdev->dev.of_node, 0);
684 if (IS_ERR(priv->clk)) {
685 /* Get CPU clock frequency from device tree */
686 if (of_property_read_u32(pdev->dev.of_node, "clock-frequency",
687 &clock_frequency)) {
688 dev_err(&pdev->dev, "failed to retrieve <clock-frequency> from device tree\n");
689 err = -EINVAL;
690 goto out_netdev;
691 }
692 } else {
693 err = clk_prepare_enable(priv->clk);
694 if (err) {
695 dev_err(&pdev->dev, "failed to enable clock\n");
696 goto out_clkget;
697 }
698
699 clock_frequency = clk_get_rate(priv->clk);
700 }
701
684 id = arc_reg_get(priv, R_ID); 702 id = arc_reg_get(priv, R_ID);
685 703
686 /* Check for EMAC revision 5 or 7, magic number */ 704 /* Check for EMAC revision 5 or 7, magic number */
687 if (!(id == 0x0005fd02 || id == 0x0007fd02)) { 705 if (!(id == 0x0005fd02 || id == 0x0007fd02)) {
688 dev_err(&pdev->dev, "ARC EMAC not detected, id=0x%x\n", id); 706 dev_err(&pdev->dev, "ARC EMAC not detected, id=0x%x\n", id);
689 err = -ENODEV; 707 err = -ENODEV;
690 goto out; 708 goto out_clken;
691 } 709 }
692 dev_info(&pdev->dev, "ARC EMAC detected with id: 0x%x\n", id); 710 dev_info(&pdev->dev, "ARC EMAC detected with id: 0x%x\n", id);
693 711
@@ -702,7 +720,7 @@ static int arc_emac_probe(struct platform_device *pdev)
702 ndev->name, ndev); 720 ndev->name, ndev);
703 if (err) { 721 if (err) {
704 dev_err(&pdev->dev, "could not allocate IRQ\n"); 722 dev_err(&pdev->dev, "could not allocate IRQ\n");
705 goto out; 723 goto out_clken;
706 } 724 }
707 725
708 /* Get MAC address from device tree */ 726 /* Get MAC address from device tree */
@@ -713,6 +731,7 @@ static int arc_emac_probe(struct platform_device *pdev)
713 else 731 else
714 eth_hw_addr_random(ndev); 732 eth_hw_addr_random(ndev);
715 733
734 arc_emac_set_address_internal(ndev);
716 dev_info(&pdev->dev, "MAC address is now %pM\n", ndev->dev_addr); 735 dev_info(&pdev->dev, "MAC address is now %pM\n", ndev->dev_addr);
717 736
718 /* Do 1 allocation instead of 2 separate ones for Rx and Tx BD rings */ 737 /* Do 1 allocation instead of 2 separate ones for Rx and Tx BD rings */
@@ -722,7 +741,7 @@ static int arc_emac_probe(struct platform_device *pdev)
722 if (!priv->rxbd) { 741 if (!priv->rxbd) {
723 dev_err(&pdev->dev, "failed to allocate data buffers\n"); 742 dev_err(&pdev->dev, "failed to allocate data buffers\n");
724 err = -ENOMEM; 743 err = -ENOMEM;
725 goto out; 744 goto out_clken;
726 } 745 }
727 746
728 priv->txbd = priv->rxbd + RX_BD_NUM; 747 priv->txbd = priv->rxbd + RX_BD_NUM;
@@ -734,7 +753,7 @@ static int arc_emac_probe(struct platform_device *pdev)
734 err = arc_mdio_probe(pdev, priv); 753 err = arc_mdio_probe(pdev, priv);
735 if (err) { 754 if (err) {
736 dev_err(&pdev->dev, "failed to probe MII bus\n"); 755 dev_err(&pdev->dev, "failed to probe MII bus\n");
737 goto out; 756 goto out_clken;
738 } 757 }
739 758
740 priv->phy_dev = of_phy_connect(ndev, phy_node, arc_emac_adjust_link, 0, 759 priv->phy_dev = of_phy_connect(ndev, phy_node, arc_emac_adjust_link, 0,
@@ -742,7 +761,7 @@ static int arc_emac_probe(struct platform_device *pdev)
742 if (!priv->phy_dev) { 761 if (!priv->phy_dev) {
743 dev_err(&pdev->dev, "of_phy_connect() failed\n"); 762 dev_err(&pdev->dev, "of_phy_connect() failed\n");
744 err = -ENODEV; 763 err = -ENODEV;
745 goto out; 764 goto out_mdio;
746 } 765 }
747 766
748 dev_info(&pdev->dev, "connected to %s phy with id 0x%x\n", 767 dev_info(&pdev->dev, "connected to %s phy with id 0x%x\n",
@@ -752,14 +771,25 @@ static int arc_emac_probe(struct platform_device *pdev)
752 771
753 err = register_netdev(ndev); 772 err = register_netdev(ndev);
754 if (err) { 773 if (err) {
755 netif_napi_del(&priv->napi);
756 dev_err(&pdev->dev, "failed to register network device\n"); 774 dev_err(&pdev->dev, "failed to register network device\n");
757 goto out; 775 goto out_netif_api;
758 } 776 }
759 777
760 return 0; 778 return 0;
761 779
762out: 780out_netif_api:
781 netif_napi_del(&priv->napi);
782 phy_disconnect(priv->phy_dev);
783 priv->phy_dev = NULL;
784out_mdio:
785 arc_mdio_remove(priv);
786out_clken:
787 if (!IS_ERR(priv->clk))
788 clk_disable_unprepare(priv->clk);
789out_clkget:
790 if (!IS_ERR(priv->clk))
791 clk_put(priv->clk);
792out_netdev:
763 free_netdev(ndev); 793 free_netdev(ndev);
764 return err; 794 return err;
765} 795}
@@ -774,6 +804,12 @@ static int arc_emac_remove(struct platform_device *pdev)
774 arc_mdio_remove(priv); 804 arc_mdio_remove(priv);
775 unregister_netdev(ndev); 805 unregister_netdev(ndev);
776 netif_napi_del(&priv->napi); 806 netif_napi_del(&priv->napi);
807
808 if (!IS_ERR(priv->clk)) {
809 clk_disable_unprepare(priv->clk);
810 clk_put(priv->clk);
811 }
812
777 free_netdev(ndev); 813 free_netdev(ndev);
778 814
779 return 0; 815 return 0;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index a78edaccceee..3b0d43154e67 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -10051,8 +10051,8 @@ static void bnx2x_prev_unload_close_mac(struct bnx2x *bp,
10051#define BCM_5710_UNDI_FW_MF_MAJOR (0x07) 10051#define BCM_5710_UNDI_FW_MF_MAJOR (0x07)
10052#define BCM_5710_UNDI_FW_MF_MINOR (0x08) 10052#define BCM_5710_UNDI_FW_MF_MINOR (0x08)
10053#define BCM_5710_UNDI_FW_MF_VERS (0x05) 10053#define BCM_5710_UNDI_FW_MF_VERS (0x05)
10054#define BNX2X_PREV_UNDI_MF_PORT(p) (0x1a150c + ((p) << 4)) 10054#define BNX2X_PREV_UNDI_MF_PORT(p) (BAR_TSTRORM_INTMEM + 0x150c + ((p) << 4))
10055#define BNX2X_PREV_UNDI_MF_FUNC(f) (0x1a184c + ((f) << 4)) 10055#define BNX2X_PREV_UNDI_MF_FUNC(f) (BAR_TSTRORM_INTMEM + 0x184c + ((f) << 4))
10056static bool bnx2x_prev_unload_undi_fw_supports_mf(struct bnx2x *bp) 10056static bool bnx2x_prev_unload_undi_fw_supports_mf(struct bnx2x *bp)
10057{ 10057{
10058 u8 major, minor, version; 10058 u8 major, minor, version;
@@ -10352,6 +10352,7 @@ static int bnx2x_prev_unload_common(struct bnx2x *bp)
10352 /* Reset should be performed after BRB is emptied */ 10352 /* Reset should be performed after BRB is emptied */
10353 if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_BRB1) { 10353 if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_BRB1) {
10354 u32 timer_count = 1000; 10354 u32 timer_count = 1000;
10355 bool need_write = true;
10355 10356
10356 /* Close the MAC Rx to prevent BRB from filling up */ 10357 /* Close the MAC Rx to prevent BRB from filling up */
10357 bnx2x_prev_unload_close_mac(bp, &mac_vals); 10358 bnx2x_prev_unload_close_mac(bp, &mac_vals);
@@ -10398,7 +10399,10 @@ static int bnx2x_prev_unload_common(struct bnx2x *bp)
10398 * cleaning methods - might be redundant but harmless. 10399 * cleaning methods - might be redundant but harmless.
10399 */ 10400 */
10400 if (bnx2x_prev_unload_undi_fw_supports_mf(bp)) { 10401 if (bnx2x_prev_unload_undi_fw_supports_mf(bp)) {
10401 bnx2x_prev_unload_undi_mf(bp); 10402 if (need_write) {
10403 bnx2x_prev_unload_undi_mf(bp);
10404 need_write = false;
10405 }
10402 } else if (prev_undi) { 10406 } else if (prev_undi) {
10403 /* If UNDI resides in memory, 10407 /* If UNDI resides in memory,
10404 * manually increment it 10408 * manually increment it
@@ -13233,6 +13237,8 @@ static void __bnx2x_remove(struct pci_dev *pdev,
13233 iounmap(bp->doorbells); 13237 iounmap(bp->doorbells);
13234 13238
13235 bnx2x_release_firmware(bp); 13239 bnx2x_release_firmware(bp);
13240 } else {
13241 bnx2x_vf_pci_dealloc(bp);
13236 } 13242 }
13237 bnx2x_free_mem_bp(bp); 13243 bnx2x_free_mem_bp(bp);
13238 13244
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
index 5c523b32db70..b8078d50261b 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
@@ -427,7 +427,9 @@ static int bnx2x_vf_mac_vlan_config(struct bnx2x *bp,
427 if (filter->add && filter->type == BNX2X_VF_FILTER_VLAN && 427 if (filter->add && filter->type == BNX2X_VF_FILTER_VLAN &&
428 (atomic_read(&bnx2x_vfq(vf, qid, vlan_count)) >= 428 (atomic_read(&bnx2x_vfq(vf, qid, vlan_count)) >=
429 vf_vlan_rules_cnt(vf))) { 429 vf_vlan_rules_cnt(vf))) {
430 BNX2X_ERR("No credits for vlan\n"); 430 BNX2X_ERR("No credits for vlan [%d >= %d]\n",
431 atomic_read(&bnx2x_vfq(vf, qid, vlan_count)),
432 vf_vlan_rules_cnt(vf));
431 return -ENOMEM; 433 return -ENOMEM;
432 } 434 }
433 435
@@ -610,6 +612,7 @@ int bnx2x_vf_mcast(struct bnx2x *bp, struct bnx2x_virtf *vf,
610 } 612 }
611 613
612 /* add new mcasts */ 614 /* add new mcasts */
615 mcast.mcast_list_len = mc_num;
613 rc = bnx2x_config_mcast(bp, &mcast, BNX2X_MCAST_CMD_ADD); 616 rc = bnx2x_config_mcast(bp, &mcast, BNX2X_MCAST_CMD_ADD);
614 if (rc) 617 if (rc)
615 BNX2X_ERR("Faled to add multicasts\n"); 618 BNX2X_ERR("Faled to add multicasts\n");
@@ -837,6 +840,29 @@ int bnx2x_vf_flr_clnup_epilog(struct bnx2x *bp, u8 abs_vfid)
837 return 0; 840 return 0;
838} 841}
839 842
843static void bnx2x_iov_re_set_vlan_filters(struct bnx2x *bp,
844 struct bnx2x_virtf *vf,
845 int new)
846{
847 int num = vf_vlan_rules_cnt(vf);
848 int diff = new - num;
849 bool rc = true;
850
851 DP(BNX2X_MSG_IOV, "vf[%d] - %d vlan filter credits [previously %d]\n",
852 vf->abs_vfid, new, num);
853
854 if (diff > 0)
855 rc = bp->vlans_pool.get(&bp->vlans_pool, diff);
856 else if (diff < 0)
857 rc = bp->vlans_pool.put(&bp->vlans_pool, -diff);
858
859 if (rc)
860 vf_vlan_rules_cnt(vf) = new;
861 else
862 DP(BNX2X_MSG_IOV, "vf[%d] - Failed to configure vlan filter credits change\n",
863 vf->abs_vfid);
864}
865
840/* must be called after the number of PF queues and the number of VFs are 866/* must be called after the number of PF queues and the number of VFs are
841 * both known 867 * both known
842 */ 868 */
@@ -854,9 +880,11 @@ bnx2x_iov_static_resc(struct bnx2x *bp, struct bnx2x_virtf *vf)
854 resc->num_mac_filters = 1; 880 resc->num_mac_filters = 1;
855 881
856 /* divvy up vlan rules */ 882 /* divvy up vlan rules */
883 bnx2x_iov_re_set_vlan_filters(bp, vf, 0);
857 vlan_count = bp->vlans_pool.check(&bp->vlans_pool); 884 vlan_count = bp->vlans_pool.check(&bp->vlans_pool);
858 vlan_count = 1 << ilog2(vlan_count); 885 vlan_count = 1 << ilog2(vlan_count);
859 resc->num_vlan_filters = vlan_count / BNX2X_NR_VIRTFN(bp); 886 bnx2x_iov_re_set_vlan_filters(bp, vf,
887 vlan_count / BNX2X_NR_VIRTFN(bp));
860 888
861 /* no real limitation */ 889 /* no real limitation */
862 resc->num_mc_filters = 0; 890 resc->num_mc_filters = 0;
@@ -1478,10 +1506,6 @@ int bnx2x_iov_nic_init(struct bnx2x *bp)
1478 bnx2x_iov_static_resc(bp, vf); 1506 bnx2x_iov_static_resc(bp, vf);
1479 1507
1480 /* queues are initialized during VF-ACQUIRE */ 1508 /* queues are initialized during VF-ACQUIRE */
1481
1482 /* reserve the vf vlan credit */
1483 bp->vlans_pool.get(&bp->vlans_pool, vf_vlan_rules_cnt(vf));
1484
1485 vf->filter_state = 0; 1509 vf->filter_state = 0;
1486 vf->sp_cl_id = bnx2x_fp(bp, 0, cl_id); 1510 vf->sp_cl_id = bnx2x_fp(bp, 0, cl_id);
1487 1511
@@ -1912,11 +1936,12 @@ int bnx2x_vf_chk_avail_resc(struct bnx2x *bp, struct bnx2x_virtf *vf,
1912 u8 rxq_cnt = vf_rxq_count(vf) ? : bnx2x_vf_max_queue_cnt(bp, vf); 1936 u8 rxq_cnt = vf_rxq_count(vf) ? : bnx2x_vf_max_queue_cnt(bp, vf);
1913 u8 txq_cnt = vf_txq_count(vf) ? : bnx2x_vf_max_queue_cnt(bp, vf); 1937 u8 txq_cnt = vf_txq_count(vf) ? : bnx2x_vf_max_queue_cnt(bp, vf);
1914 1938
1939 /* Save a vlan filter for the Hypervisor */
1915 return ((req_resc->num_rxqs <= rxq_cnt) && 1940 return ((req_resc->num_rxqs <= rxq_cnt) &&
1916 (req_resc->num_txqs <= txq_cnt) && 1941 (req_resc->num_txqs <= txq_cnt) &&
1917 (req_resc->num_sbs <= vf_sb_count(vf)) && 1942 (req_resc->num_sbs <= vf_sb_count(vf)) &&
1918 (req_resc->num_mac_filters <= vf_mac_rules_cnt(vf)) && 1943 (req_resc->num_mac_filters <= vf_mac_rules_cnt(vf)) &&
1919 (req_resc->num_vlan_filters <= vf_vlan_rules_cnt(vf))); 1944 (req_resc->num_vlan_filters <= vf_vlan_rules_visible_cnt(vf)));
1920} 1945}
1921 1946
1922/* CORE VF API */ 1947/* CORE VF API */
@@ -1972,14 +1997,14 @@ int bnx2x_vf_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf,
1972 vf_txq_count(vf) = resc->num_txqs ? : bnx2x_vf_max_queue_cnt(bp, vf); 1997 vf_txq_count(vf) = resc->num_txqs ? : bnx2x_vf_max_queue_cnt(bp, vf);
1973 if (resc->num_mac_filters) 1998 if (resc->num_mac_filters)
1974 vf_mac_rules_cnt(vf) = resc->num_mac_filters; 1999 vf_mac_rules_cnt(vf) = resc->num_mac_filters;
1975 if (resc->num_vlan_filters) 2000 /* Add an additional vlan filter credit for the hypervisor */
1976 vf_vlan_rules_cnt(vf) = resc->num_vlan_filters; 2001 bnx2x_iov_re_set_vlan_filters(bp, vf, resc->num_vlan_filters + 1);
1977 2002
1978 DP(BNX2X_MSG_IOV, 2003 DP(BNX2X_MSG_IOV,
1979 "Fulfilling vf request: sb count %d, tx_count %d, rx_count %d, mac_rules_count %d, vlan_rules_count %d\n", 2004 "Fulfilling vf request: sb count %d, tx_count %d, rx_count %d, mac_rules_count %d, vlan_rules_count %d\n",
1980 vf_sb_count(vf), vf_rxq_count(vf), 2005 vf_sb_count(vf), vf_rxq_count(vf),
1981 vf_txq_count(vf), vf_mac_rules_cnt(vf), 2006 vf_txq_count(vf), vf_mac_rules_cnt(vf),
1982 vf_vlan_rules_cnt(vf)); 2007 vf_vlan_rules_visible_cnt(vf));
1983 2008
1984 /* Initialize the queues */ 2009 /* Initialize the queues */
1985 if (!vf->vfqs) { 2010 if (!vf->vfqs) {
@@ -2670,7 +2695,7 @@ out:
2670 bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC); 2695 bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC);
2671 } 2696 }
2672 2697
2673 return 0; 2698 return rc;
2674} 2699}
2675 2700
2676int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos) 2701int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos)
@@ -2896,6 +2921,14 @@ void __iomem *bnx2x_vf_doorbells(struct bnx2x *bp)
2896 return bp->regview + PXP_VF_ADDR_DB_START; 2921 return bp->regview + PXP_VF_ADDR_DB_START;
2897} 2922}
2898 2923
2924void bnx2x_vf_pci_dealloc(struct bnx2x *bp)
2925{
2926 BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->vf2pf_mbox_mapping,
2927 sizeof(struct bnx2x_vf_mbx_msg));
2928 BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->pf2vf_bulletin_mapping,
2929 sizeof(union pf_vf_bulletin));
2930}
2931
2899int bnx2x_vf_pci_alloc(struct bnx2x *bp) 2932int bnx2x_vf_pci_alloc(struct bnx2x *bp)
2900{ 2933{
2901 mutex_init(&bp->vf2pf_mutex); 2934 mutex_init(&bp->vf2pf_mutex);
@@ -2915,10 +2948,7 @@ int bnx2x_vf_pci_alloc(struct bnx2x *bp)
2915 return 0; 2948 return 0;
2916 2949
2917alloc_mem_err: 2950alloc_mem_err:
2918 BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->vf2pf_mbox_mapping, 2951 bnx2x_vf_pci_dealloc(bp);
2919 sizeof(struct bnx2x_vf_mbx_msg));
2920 BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->pf2vf_bulletin_mapping,
2921 sizeof(union pf_vf_bulletin));
2922 return -ENOMEM; 2952 return -ENOMEM;
2923} 2953}
2924 2954
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
index 8bf764570eef..6929adba52f9 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
@@ -159,6 +159,8 @@ struct bnx2x_virtf {
159#define vf_mac_rules_cnt(vf) ((vf)->alloc_resc.num_mac_filters) 159#define vf_mac_rules_cnt(vf) ((vf)->alloc_resc.num_mac_filters)
160#define vf_vlan_rules_cnt(vf) ((vf)->alloc_resc.num_vlan_filters) 160#define vf_vlan_rules_cnt(vf) ((vf)->alloc_resc.num_vlan_filters)
161#define vf_mc_rules_cnt(vf) ((vf)->alloc_resc.num_mc_filters) 161#define vf_mc_rules_cnt(vf) ((vf)->alloc_resc.num_mc_filters)
162 /* Hide a single vlan filter credit for the hypervisor */
163#define vf_vlan_rules_visible_cnt(vf) (vf_vlan_rules_cnt(vf) - 1)
162 164
163 u8 sb_count; /* actual number of SBs */ 165 u8 sb_count; /* actual number of SBs */
164 u8 igu_base_id; /* base igu status block id */ 166 u8 igu_base_id; /* base igu status block id */
@@ -502,6 +504,7 @@ static inline int bnx2x_vf_ustorm_prods_offset(struct bnx2x *bp,
502enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp); 504enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp);
503void bnx2x_timer_sriov(struct bnx2x *bp); 505void bnx2x_timer_sriov(struct bnx2x *bp);
504void __iomem *bnx2x_vf_doorbells(struct bnx2x *bp); 506void __iomem *bnx2x_vf_doorbells(struct bnx2x *bp);
507void bnx2x_vf_pci_dealloc(struct bnx2x *bp);
505int bnx2x_vf_pci_alloc(struct bnx2x *bp); 508int bnx2x_vf_pci_alloc(struct bnx2x *bp);
506int bnx2x_enable_sriov(struct bnx2x *bp); 509int bnx2x_enable_sriov(struct bnx2x *bp);
507void bnx2x_disable_sriov(struct bnx2x *bp); 510void bnx2x_disable_sriov(struct bnx2x *bp);
@@ -568,6 +571,7 @@ static inline void __iomem *bnx2x_vf_doorbells(struct bnx2x *bp)
568 return NULL; 571 return NULL;
569} 572}
570 573
574static inline void bnx2x_vf_pci_dealloc(struct bnx2 *bp) {return 0; }
571static inline int bnx2x_vf_pci_alloc(struct bnx2x *bp) {return 0; } 575static inline int bnx2x_vf_pci_alloc(struct bnx2x *bp) {return 0; }
572static inline void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp) {} 576static inline void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp) {}
573static inline int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs) {return 0; } 577static inline int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs) {return 0; }
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
index 0622884596b2..784c7155b98a 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
@@ -747,7 +747,7 @@ int bnx2x_vfpf_config_mac(struct bnx2x *bp, u8 *addr, u8 vf_qid, bool set)
747out: 747out:
748 bnx2x_vfpf_finalize(bp, &req->first_tlv); 748 bnx2x_vfpf_finalize(bp, &req->first_tlv);
749 749
750 return 0; 750 return rc;
751} 751}
752 752
753/* request pf to config rss table for vf queues*/ 753/* request pf to config rss table for vf queues*/
@@ -1163,7 +1163,7 @@ static void bnx2x_vf_mbx_acquire_resp(struct bnx2x *bp, struct bnx2x_virtf *vf,
1163 bnx2x_vf_max_queue_cnt(bp, vf); 1163 bnx2x_vf_max_queue_cnt(bp, vf);
1164 resc->num_sbs = vf_sb_count(vf); 1164 resc->num_sbs = vf_sb_count(vf);
1165 resc->num_mac_filters = vf_mac_rules_cnt(vf); 1165 resc->num_mac_filters = vf_mac_rules_cnt(vf);
1166 resc->num_vlan_filters = vf_vlan_rules_cnt(vf); 1166 resc->num_vlan_filters = vf_vlan_rules_visible_cnt(vf);
1167 resc->num_mc_filters = 0; 1167 resc->num_mc_filters = 0;
1168 1168
1169 if (status == PFVF_STATUS_SUCCESS) { 1169 if (status == PFVF_STATUS_SUCCESS) {
diff --git a/drivers/net/ethernet/cadence/Kconfig b/drivers/net/ethernet/cadence/Kconfig
index 7e49c43b7af3..9e089d24466e 100644
--- a/drivers/net/ethernet/cadence/Kconfig
+++ b/drivers/net/ethernet/cadence/Kconfig
@@ -4,7 +4,7 @@
4 4
5config NET_CADENCE 5config NET_CADENCE
6 bool "Cadence devices" 6 bool "Cadence devices"
7 depends on HAS_IOMEM && (ARM || AVR32 || COMPILE_TEST) 7 depends on HAS_IOMEM && (ARM || AVR32 || MICROBLAZE || COMPILE_TEST)
8 default y 8 default y
9 ---help--- 9 ---help---
10 If you have a network (Ethernet) card belonging to this class, say Y. 10 If you have a network (Ethernet) card belonging to this class, say Y.
@@ -30,7 +30,7 @@ config ARM_AT91_ETHER
30 30
31config MACB 31config MACB
32 tristate "Cadence MACB/GEM support" 32 tristate "Cadence MACB/GEM support"
33 depends on HAS_DMA && (PLATFORM_AT32AP || ARCH_AT91 || ARCH_PICOXCELL || ARCH_ZYNQ || COMPILE_TEST) 33 depends on HAS_DMA && (PLATFORM_AT32AP || ARCH_AT91 || ARCH_PICOXCELL || ARCH_ZYNQ || MICROBLAZE || COMPILE_TEST)
34 select PHYLIB 34 select PHYLIB
35 ---help--- 35 ---help---
36 The Cadence MACB ethernet interface is found on many Atmel AT32 and 36 The Cadence MACB ethernet interface is found on many Atmel AT32 and
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c
index ca97005e24b4..e9daa072ebb4 100644
--- a/drivers/net/ethernet/cadence/macb.c
+++ b/drivers/net/ethernet/cadence/macb.c
@@ -599,25 +599,16 @@ static void gem_rx_refill(struct macb *bp)
599{ 599{
600 unsigned int entry; 600 unsigned int entry;
601 struct sk_buff *skb; 601 struct sk_buff *skb;
602 struct macb_dma_desc *desc;
603 dma_addr_t paddr; 602 dma_addr_t paddr;
604 603
605 while (CIRC_SPACE(bp->rx_prepared_head, bp->rx_tail, RX_RING_SIZE) > 0) { 604 while (CIRC_SPACE(bp->rx_prepared_head, bp->rx_tail, RX_RING_SIZE) > 0) {
606 u32 addr, ctrl;
607
608 entry = macb_rx_ring_wrap(bp->rx_prepared_head); 605 entry = macb_rx_ring_wrap(bp->rx_prepared_head);
609 desc = &bp->rx_ring[entry];
610 606
611 /* Make hw descriptor updates visible to CPU */ 607 /* Make hw descriptor updates visible to CPU */
612 rmb(); 608 rmb();
613 609
614 addr = desc->addr;
615 ctrl = desc->ctrl;
616 bp->rx_prepared_head++; 610 bp->rx_prepared_head++;
617 611
618 if ((addr & MACB_BIT(RX_USED)))
619 continue;
620
621 if (bp->rx_skbuff[entry] == NULL) { 612 if (bp->rx_skbuff[entry] == NULL) {
622 /* allocate sk_buff for this free entry in ring */ 613 /* allocate sk_buff for this free entry in ring */
623 skb = netdev_alloc_skb(bp->dev, bp->rx_buffer_size); 614 skb = netdev_alloc_skb(bp->dev, bp->rx_buffer_size);
@@ -698,7 +689,6 @@ static int gem_rx(struct macb *bp, int budget)
698 if (!(addr & MACB_BIT(RX_USED))) 689 if (!(addr & MACB_BIT(RX_USED)))
699 break; 690 break;
700 691
701 desc->addr &= ~MACB_BIT(RX_USED);
702 bp->rx_tail++; 692 bp->rx_tail++;
703 count++; 693 count++;
704 694
@@ -891,16 +881,15 @@ static int macb_poll(struct napi_struct *napi, int budget)
891 if (work_done < budget) { 881 if (work_done < budget) {
892 napi_complete(napi); 882 napi_complete(napi);
893 883
894 /*
895 * We've done what we can to clean the buffers. Make sure we
896 * get notified when new packets arrive.
897 */
898 macb_writel(bp, IER, MACB_RX_INT_FLAGS);
899
900 /* Packets received while interrupts were disabled */ 884 /* Packets received while interrupts were disabled */
901 status = macb_readl(bp, RSR); 885 status = macb_readl(bp, RSR);
902 if (unlikely(status)) 886 if (status) {
887 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
888 macb_writel(bp, ISR, MACB_BIT(RCOMP));
903 napi_reschedule(napi); 889 napi_reschedule(napi);
890 } else {
891 macb_writel(bp, IER, MACB_RX_INT_FLAGS);
892 }
904 } 893 }
905 894
906 /* TODO: Handle errors */ 895 /* TODO: Handle errors */
@@ -951,6 +940,10 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
951 if (unlikely(status & (MACB_TX_ERR_FLAGS))) { 940 if (unlikely(status & (MACB_TX_ERR_FLAGS))) {
952 macb_writel(bp, IDR, MACB_TX_INT_FLAGS); 941 macb_writel(bp, IDR, MACB_TX_INT_FLAGS);
953 schedule_work(&bp->tx_error_task); 942 schedule_work(&bp->tx_error_task);
943
944 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
945 macb_writel(bp, ISR, MACB_TX_ERR_FLAGS);
946
954 break; 947 break;
955 } 948 }
956 949
@@ -968,6 +961,9 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
968 bp->hw_stats.gem.rx_overruns++; 961 bp->hw_stats.gem.rx_overruns++;
969 else 962 else
970 bp->hw_stats.macb.rx_overruns++; 963 bp->hw_stats.macb.rx_overruns++;
964
965 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
966 macb_writel(bp, ISR, MACB_BIT(ISR_ROVR));
971 } 967 }
972 968
973 if (status & MACB_BIT(HRESP)) { 969 if (status & MACB_BIT(HRESP)) {
@@ -977,6 +973,9 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
977 * (work queue?) 973 * (work queue?)
978 */ 974 */
979 netdev_err(dev, "DMA bus error: HRESP not OK\n"); 975 netdev_err(dev, "DMA bus error: HRESP not OK\n");
976
977 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
978 macb_writel(bp, ISR, MACB_BIT(HRESP));
980 } 979 }
981 980
982 status = macb_readl(bp, ISR); 981 status = macb_readl(bp, ISR);
@@ -1113,7 +1112,7 @@ static void gem_free_rx_buffers(struct macb *bp)
1113 1112
1114 desc = &bp->rx_ring[i]; 1113 desc = &bp->rx_ring[i];
1115 addr = MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, desc->addr)); 1114 addr = MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, desc->addr));
1116 dma_unmap_single(&bp->pdev->dev, addr, skb->len, 1115 dma_unmap_single(&bp->pdev->dev, addr, bp->rx_buffer_size,
1117 DMA_FROM_DEVICE); 1116 DMA_FROM_DEVICE);
1118 dev_kfree_skb_any(skb); 1117 dev_kfree_skb_any(skb);
1119 skb = NULL; 1118 skb = NULL;
diff --git a/drivers/net/ethernet/chelsio/Kconfig b/drivers/net/ethernet/chelsio/Kconfig
index d40c994a4f6a..570222c33410 100644
--- a/drivers/net/ethernet/chelsio/Kconfig
+++ b/drivers/net/ethernet/chelsio/Kconfig
@@ -67,13 +67,13 @@ config CHELSIO_T3
67 will be called cxgb3. 67 will be called cxgb3.
68 68
69config CHELSIO_T4 69config CHELSIO_T4
70 tristate "Chelsio Communications T4 Ethernet support" 70 tristate "Chelsio Communications T4/T5 Ethernet support"
71 depends on PCI 71 depends on PCI
72 select FW_LOADER 72 select FW_LOADER
73 select MDIO 73 select MDIO
74 ---help--- 74 ---help---
75 This driver supports Chelsio T4-based gigabit and 10Gb Ethernet 75 This driver supports Chelsio T4 and T5 based gigabit, 10Gb Ethernet
76 adapters. 76 adapter and T5 based 40Gb Ethernet adapter.
77 77
78 For general information about Chelsio and our products, visit 78 For general information about Chelsio and our products, visit
79 our website at <http://www.chelsio.com>. 79 our website at <http://www.chelsio.com>.
@@ -87,11 +87,12 @@ config CHELSIO_T4
87 will be called cxgb4. 87 will be called cxgb4.
88 88
89config CHELSIO_T4VF 89config CHELSIO_T4VF
90 tristate "Chelsio Communications T4 Virtual Function Ethernet support" 90 tristate "Chelsio Communications T4/T5 Virtual Function Ethernet support"
91 depends on PCI 91 depends on PCI
92 ---help--- 92 ---help---
93 This driver supports Chelsio T4-based gigabit and 10Gb Ethernet 93 This driver supports Chelsio T4 and T5 based gigabit, 10Gb Ethernet
94 adapters with PCI-E SR-IOV Virtual Functions. 94 adapters and T5 based 40Gb Ethernet adapters with PCI-E SR-IOV Virtual
95 Functions.
95 96
96 For general information about Chelsio and our products, visit 97 For general information about Chelsio and our products, visit
97 our website at <http://www.chelsio.com>. 98 our website at <http://www.chelsio.com>.
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 6fe58913403a..24e16e3301e0 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -5870,6 +5870,8 @@ static void print_port_info(const struct net_device *dev)
5870 spd = " 2.5 GT/s"; 5870 spd = " 2.5 GT/s";
5871 else if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_5_0GB) 5871 else if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_5_0GB)
5872 spd = " 5 GT/s"; 5872 spd = " 5 GT/s";
5873 else if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_8_0GB)
5874 spd = " 8 GT/s";
5873 5875
5874 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_100M) 5876 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_100M)
5875 bufp += sprintf(bufp, "100/"); 5877 bufp += sprintf(bufp, "100/");
diff --git a/drivers/net/ethernet/ec_bhf.c b/drivers/net/ethernet/ec_bhf.c
new file mode 100644
index 000000000000..4884205e56ee
--- /dev/null
+++ b/drivers/net/ethernet/ec_bhf.c
@@ -0,0 +1,706 @@
1 /*
2 * drivers/net/ethernet/beckhoff/ec_bhf.c
3 *
4 * Copyright (C) 2014 Darek Marcinkiewicz <reksio@newterm.pl>
5 *
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 */
16
17/* This is a driver for EtherCAT master module present on CCAT FPGA.
18 * Those can be found on Bechhoff CX50xx industrial PCs.
19 */
20
21#if 0
22#define DEBUG
23#endif
24#include <linux/kernel.h>
25#include <linux/module.h>
26#include <linux/moduleparam.h>
27#include <linux/pci.h>
28#include <linux/init.h>
29
30#include <linux/netdevice.h>
31#include <linux/etherdevice.h>
32#include <linux/ip.h>
33#include <linux/skbuff.h>
34#include <linux/hrtimer.h>
35#include <linux/interrupt.h>
36#include <linux/stat.h>
37
38#define TIMER_INTERVAL_NSEC 20000
39
40#define INFO_BLOCK_SIZE 0x10
41#define INFO_BLOCK_TYPE 0x0
42#define INFO_BLOCK_REV 0x2
43#define INFO_BLOCK_BLK_CNT 0x4
44#define INFO_BLOCK_TX_CHAN 0x4
45#define INFO_BLOCK_RX_CHAN 0x5
46#define INFO_BLOCK_OFFSET 0x8
47
48#define EC_MII_OFFSET 0x4
49#define EC_FIFO_OFFSET 0x8
50#define EC_MAC_OFFSET 0xc
51
52#define MAC_FRAME_ERR_CNT 0x0
53#define MAC_RX_ERR_CNT 0x1
54#define MAC_CRC_ERR_CNT 0x2
55#define MAC_LNK_LST_ERR_CNT 0x3
56#define MAC_TX_FRAME_CNT 0x10
57#define MAC_RX_FRAME_CNT 0x14
58#define MAC_TX_FIFO_LVL 0x20
59#define MAC_DROPPED_FRMS 0x28
60#define MAC_CONNECTED_CCAT_FLAG 0x78
61
62#define MII_MAC_ADDR 0x8
63#define MII_MAC_FILT_FLAG 0xe
64#define MII_LINK_STATUS 0xf
65
66#define FIFO_TX_REG 0x0
67#define FIFO_TX_RESET 0x8
68#define FIFO_RX_REG 0x10
69#define FIFO_RX_ADDR_VALID (1u << 31)
70#define FIFO_RX_RESET 0x18
71
72#define DMA_CHAN_OFFSET 0x1000
73#define DMA_CHAN_SIZE 0x8
74
75#define DMA_WINDOW_SIZE_MASK 0xfffffffc
76
77static struct pci_device_id ids[] = {
78 { PCI_DEVICE(0x15ec, 0x5000), },
79 { 0, }
80};
81MODULE_DEVICE_TABLE(pci, ids);
82
83struct rx_header {
84#define RXHDR_NEXT_ADDR_MASK 0xffffffu
85#define RXHDR_NEXT_VALID (1u << 31)
86 __le32 next;
87#define RXHDR_NEXT_RECV_FLAG 0x1
88 __le32 recv;
89#define RXHDR_LEN_MASK 0xfffu
90 __le16 len;
91 __le16 port;
92 __le32 reserved;
93 u8 timestamp[8];
94} __packed;
95
96#define PKT_PAYLOAD_SIZE 0x7e8
97struct rx_desc {
98 struct rx_header header;
99 u8 data[PKT_PAYLOAD_SIZE];
100} __packed;
101
102struct tx_header {
103 __le16 len;
104#define TX_HDR_PORT_0 0x1
105#define TX_HDR_PORT_1 0x2
106 u8 port;
107 u8 ts_enable;
108#define TX_HDR_SENT 0x1
109 __le32 sent;
110 u8 timestamp[8];
111} __packed;
112
113struct tx_desc {
114 struct tx_header header;
115 u8 data[PKT_PAYLOAD_SIZE];
116} __packed;
117
118#define FIFO_SIZE 64
119
120static long polling_frequency = TIMER_INTERVAL_NSEC;
121
122struct bhf_dma {
123 u8 *buf;
124 size_t len;
125 dma_addr_t buf_phys;
126
127 u8 *alloc;
128 size_t alloc_len;
129 dma_addr_t alloc_phys;
130};
131
132struct ec_bhf_priv {
133 struct net_device *net_dev;
134
135 struct pci_dev *dev;
136
137 void * __iomem io;
138 void * __iomem dma_io;
139
140 struct hrtimer hrtimer;
141
142 int tx_dma_chan;
143 int rx_dma_chan;
144 void * __iomem ec_io;
145 void * __iomem fifo_io;
146 void * __iomem mii_io;
147 void * __iomem mac_io;
148
149 struct bhf_dma rx_buf;
150 struct rx_desc *rx_descs;
151 int rx_dnext;
152 int rx_dcount;
153
154 struct bhf_dma tx_buf;
155 struct tx_desc *tx_descs;
156 int tx_dcount;
157 int tx_dnext;
158
159 u64 stat_rx_bytes;
160 u64 stat_tx_bytes;
161};
162
163#define PRIV_TO_DEV(priv) (&(priv)->dev->dev)
164
165#define ETHERCAT_MASTER_ID 0x14
166
167static void ec_bhf_print_status(struct ec_bhf_priv *priv)
168{
169 struct device *dev = PRIV_TO_DEV(priv);
170
171 dev_dbg(dev, "Frame error counter: %d\n",
172 ioread8(priv->mac_io + MAC_FRAME_ERR_CNT));
173 dev_dbg(dev, "RX error counter: %d\n",
174 ioread8(priv->mac_io + MAC_RX_ERR_CNT));
175 dev_dbg(dev, "CRC error counter: %d\n",
176 ioread8(priv->mac_io + MAC_CRC_ERR_CNT));
177 dev_dbg(dev, "TX frame counter: %d\n",
178 ioread32(priv->mac_io + MAC_TX_FRAME_CNT));
179 dev_dbg(dev, "RX frame counter: %d\n",
180 ioread32(priv->mac_io + MAC_RX_FRAME_CNT));
181 dev_dbg(dev, "TX fifo level: %d\n",
182 ioread8(priv->mac_io + MAC_TX_FIFO_LVL));
183 dev_dbg(dev, "Dropped frames: %d\n",
184 ioread8(priv->mac_io + MAC_DROPPED_FRMS));
185 dev_dbg(dev, "Connected with CCAT slot: %d\n",
186 ioread8(priv->mac_io + MAC_CONNECTED_CCAT_FLAG));
187 dev_dbg(dev, "Link status: %d\n",
188 ioread8(priv->mii_io + MII_LINK_STATUS));
189}
190
191static void ec_bhf_reset(struct ec_bhf_priv *priv)
192{
193 iowrite8(0, priv->mac_io + MAC_FRAME_ERR_CNT);
194 iowrite8(0, priv->mac_io + MAC_RX_ERR_CNT);
195 iowrite8(0, priv->mac_io + MAC_CRC_ERR_CNT);
196 iowrite8(0, priv->mac_io + MAC_LNK_LST_ERR_CNT);
197 iowrite32(0, priv->mac_io + MAC_TX_FRAME_CNT);
198 iowrite32(0, priv->mac_io + MAC_RX_FRAME_CNT);
199 iowrite8(0, priv->mac_io + MAC_DROPPED_FRMS);
200
201 iowrite8(0, priv->fifo_io + FIFO_TX_RESET);
202 iowrite8(0, priv->fifo_io + FIFO_RX_RESET);
203
204 iowrite8(0, priv->mac_io + MAC_TX_FIFO_LVL);
205}
206
207static void ec_bhf_send_packet(struct ec_bhf_priv *priv, struct tx_desc *desc)
208{
209 u32 len = le16_to_cpu(desc->header.len) + sizeof(desc->header);
210 u32 addr = (u8 *)desc - priv->tx_buf.buf;
211
212 iowrite32((ALIGN(len, 8) << 24) | addr, priv->fifo_io + FIFO_TX_REG);
213
214 dev_dbg(PRIV_TO_DEV(priv), "Done sending packet\n");
215}
216
217static int ec_bhf_desc_sent(struct tx_desc *desc)
218{
219 return le32_to_cpu(desc->header.sent) & TX_HDR_SENT;
220}
221
222static void ec_bhf_process_tx(struct ec_bhf_priv *priv)
223{
224 if (unlikely(netif_queue_stopped(priv->net_dev))) {
225 /* Make sure that we perceive changes to tx_dnext. */
226 smp_rmb();
227
228 if (ec_bhf_desc_sent(&priv->tx_descs[priv->tx_dnext]))
229 netif_wake_queue(priv->net_dev);
230 }
231}
232
233static int ec_bhf_pkt_received(struct rx_desc *desc)
234{
235 return le32_to_cpu(desc->header.recv) & RXHDR_NEXT_RECV_FLAG;
236}
237
238static void ec_bhf_add_rx_desc(struct ec_bhf_priv *priv, struct rx_desc *desc)
239{
240 iowrite32(FIFO_RX_ADDR_VALID | ((u8 *)(desc) - priv->rx_buf.buf),
241 priv->fifo_io + FIFO_RX_REG);
242}
243
244static void ec_bhf_process_rx(struct ec_bhf_priv *priv)
245{
246 struct rx_desc *desc = &priv->rx_descs[priv->rx_dnext];
247 struct device *dev = PRIV_TO_DEV(priv);
248
249 while (ec_bhf_pkt_received(desc)) {
250 int pkt_size = (le16_to_cpu(desc->header.len) &
251 RXHDR_LEN_MASK) - sizeof(struct rx_header) - 4;
252 u8 *data = desc->data;
253 struct sk_buff *skb;
254
255 skb = netdev_alloc_skb_ip_align(priv->net_dev, pkt_size);
256 dev_dbg(dev, "Received packet, size: %d\n", pkt_size);
257
258 if (skb) {
259 memcpy(skb_put(skb, pkt_size), data, pkt_size);
260 skb->protocol = eth_type_trans(skb, priv->net_dev);
261 dev_dbg(dev, "Protocol type: %x\n", skb->protocol);
262
263 priv->stat_rx_bytes += pkt_size;
264
265 netif_rx(skb);
266 } else {
267 dev_err_ratelimited(dev,
268 "Couldn't allocate a skb_buff for a packet of size %u\n",
269 pkt_size);
270 }
271
272 desc->header.recv = 0;
273
274 ec_bhf_add_rx_desc(priv, desc);
275
276 priv->rx_dnext = (priv->rx_dnext + 1) % priv->rx_dcount;
277 desc = &priv->rx_descs[priv->rx_dnext];
278 }
279
280}
281
282static enum hrtimer_restart ec_bhf_timer_fun(struct hrtimer *timer)
283{
284 struct ec_bhf_priv *priv = container_of(timer, struct ec_bhf_priv,
285 hrtimer);
286 ec_bhf_process_rx(priv);
287 ec_bhf_process_tx(priv);
288
289 if (!netif_running(priv->net_dev))
290 return HRTIMER_NORESTART;
291
292 hrtimer_forward_now(timer, ktime_set(0, polling_frequency));
293 return HRTIMER_RESTART;
294}
295
296static int ec_bhf_setup_offsets(struct ec_bhf_priv *priv)
297{
298 struct device *dev = PRIV_TO_DEV(priv);
299 unsigned block_count, i;
300 void * __iomem ec_info;
301
302 dev_dbg(dev, "Info block:\n");
303 dev_dbg(dev, "Type of function: %x\n", (unsigned)ioread16(priv->io));
304 dev_dbg(dev, "Revision of function: %x\n",
305 (unsigned)ioread16(priv->io + INFO_BLOCK_REV));
306
307 block_count = ioread8(priv->io + INFO_BLOCK_BLK_CNT);
308 dev_dbg(dev, "Number of function blocks: %x\n", block_count);
309
310 for (i = 0; i < block_count; i++) {
311 u16 type = ioread16(priv->io + i * INFO_BLOCK_SIZE +
312 INFO_BLOCK_TYPE);
313 if (type == ETHERCAT_MASTER_ID)
314 break;
315 }
316 if (i == block_count) {
317 dev_err(dev, "EtherCAT master with DMA block not found\n");
318 return -ENODEV;
319 }
320 dev_dbg(dev, "EtherCAT master with DMA block found at pos: %d\n", i);
321
322 ec_info = priv->io + i * INFO_BLOCK_SIZE;
323 dev_dbg(dev, "EtherCAT master revision: %d\n",
324 ioread16(ec_info + INFO_BLOCK_REV));
325
326 priv->tx_dma_chan = ioread8(ec_info + INFO_BLOCK_TX_CHAN);
327 dev_dbg(dev, "EtherCAT master tx dma channel: %d\n",
328 priv->tx_dma_chan);
329
330 priv->rx_dma_chan = ioread8(ec_info + INFO_BLOCK_RX_CHAN);
331 dev_dbg(dev, "EtherCAT master rx dma channel: %d\n",
332 priv->rx_dma_chan);
333
334 priv->ec_io = priv->io + ioread32(ec_info + INFO_BLOCK_OFFSET);
335 priv->mii_io = priv->ec_io + ioread32(priv->ec_io + EC_MII_OFFSET);
336 priv->fifo_io = priv->ec_io + ioread32(priv->ec_io + EC_FIFO_OFFSET);
337 priv->mac_io = priv->ec_io + ioread32(priv->ec_io + EC_MAC_OFFSET);
338
339 dev_dbg(dev,
340 "EtherCAT block addres: %p, fifo address: %p, mii address: %p, mac address: %p\n",
341 priv->ec_io, priv->fifo_io, priv->mii_io, priv->mac_io);
342
343 return 0;
344}
345
346static netdev_tx_t ec_bhf_start_xmit(struct sk_buff *skb,
347 struct net_device *net_dev)
348{
349 struct ec_bhf_priv *priv = netdev_priv(net_dev);
350 struct tx_desc *desc;
351 unsigned len;
352
353 dev_dbg(PRIV_TO_DEV(priv), "Starting xmit\n");
354
355 desc = &priv->tx_descs[priv->tx_dnext];
356
357 skb_copy_and_csum_dev(skb, desc->data);
358 len = skb->len;
359
360 memset(&desc->header, 0, sizeof(desc->header));
361 desc->header.len = cpu_to_le16(len);
362 desc->header.port = TX_HDR_PORT_0;
363
364 ec_bhf_send_packet(priv, desc);
365
366 priv->tx_dnext = (priv->tx_dnext + 1) % priv->tx_dcount;
367
368 if (!ec_bhf_desc_sent(&priv->tx_descs[priv->tx_dnext])) {
369 /* Make sure that update updates to tx_dnext are perceived
370 * by timer routine.
371 */
372 smp_wmb();
373
374 netif_stop_queue(net_dev);
375
376 dev_dbg(PRIV_TO_DEV(priv), "Stopping netif queue\n");
377 ec_bhf_print_status(priv);
378 }
379
380 priv->stat_tx_bytes += len;
381
382 dev_kfree_skb(skb);
383
384 return NETDEV_TX_OK;
385}
386
387static int ec_bhf_alloc_dma_mem(struct ec_bhf_priv *priv,
388 struct bhf_dma *buf,
389 int channel,
390 int size)
391{
392 int offset = channel * DMA_CHAN_SIZE + DMA_CHAN_OFFSET;
393 struct device *dev = PRIV_TO_DEV(priv);
394 u32 mask;
395
396 iowrite32(0xffffffff, priv->dma_io + offset);
397
398 mask = ioread32(priv->dma_io + offset);
399 mask &= DMA_WINDOW_SIZE_MASK;
400 dev_dbg(dev, "Read mask %x for channel %d\n", mask, channel);
401
402 /* We want to allocate a chunk of memory that is:
403 * - aligned to the mask we just read
404 * - is of size 2^mask bytes (at most)
405 * In order to ensure that we will allocate buffer of
406 * 2 * 2^mask bytes.
407 */
408 buf->len = min_t(int, ~mask + 1, size);
409 buf->alloc_len = 2 * buf->len;
410
411 dev_dbg(dev, "Allocating %d bytes for channel %d",
412 (int)buf->alloc_len, channel);
413 buf->alloc = dma_alloc_coherent(dev, buf->alloc_len, &buf->alloc_phys,
414 GFP_KERNEL);
415 if (buf->alloc == NULL) {
416 dev_info(dev, "Failed to allocate buffer\n");
417 return -ENOMEM;
418 }
419
420 buf->buf_phys = (buf->alloc_phys + buf->len) & mask;
421 buf->buf = buf->alloc + (buf->buf_phys - buf->alloc_phys);
422
423 iowrite32(0, priv->dma_io + offset + 4);
424 iowrite32(buf->buf_phys, priv->dma_io + offset);
425 dev_dbg(dev, "Buffer: %x and read from dev: %x",
426 (unsigned)buf->buf_phys, ioread32(priv->dma_io + offset));
427
428 return 0;
429}
430
431static void ec_bhf_setup_tx_descs(struct ec_bhf_priv *priv)
432{
433 int i = 0;
434
435 priv->tx_dcount = priv->tx_buf.len / sizeof(struct tx_desc);
436 priv->tx_descs = (struct tx_desc *) priv->tx_buf.buf;
437 priv->tx_dnext = 0;
438
439 for (i = 0; i < priv->tx_dcount; i++)
440 priv->tx_descs[i].header.sent = cpu_to_le32(TX_HDR_SENT);
441}
442
443static void ec_bhf_setup_rx_descs(struct ec_bhf_priv *priv)
444{
445 int i;
446
447 priv->rx_dcount = priv->rx_buf.len / sizeof(struct rx_desc);
448 priv->rx_descs = (struct rx_desc *) priv->rx_buf.buf;
449 priv->rx_dnext = 0;
450
451 for (i = 0; i < priv->rx_dcount; i++) {
452 struct rx_desc *desc = &priv->rx_descs[i];
453 u32 next;
454
455 if (i != priv->rx_dcount - 1)
456 next = (u8 *)(desc + 1) - priv->rx_buf.buf;
457 else
458 next = 0;
459 next |= RXHDR_NEXT_VALID;
460 desc->header.next = cpu_to_le32(next);
461 desc->header.recv = 0;
462 ec_bhf_add_rx_desc(priv, desc);
463 }
464}
465
466static int ec_bhf_open(struct net_device *net_dev)
467{
468 struct ec_bhf_priv *priv = netdev_priv(net_dev);
469 struct device *dev = PRIV_TO_DEV(priv);
470 int err = 0;
471
472 dev_info(dev, "Opening device\n");
473
474 ec_bhf_reset(priv);
475
476 err = ec_bhf_alloc_dma_mem(priv, &priv->rx_buf, priv->rx_dma_chan,
477 FIFO_SIZE * sizeof(struct rx_desc));
478 if (err) {
479 dev_err(dev, "Failed to allocate rx buffer\n");
480 goto out;
481 }
482 ec_bhf_setup_rx_descs(priv);
483
484 dev_info(dev, "RX buffer allocated, address: %x\n",
485 (unsigned)priv->rx_buf.buf_phys);
486
487 err = ec_bhf_alloc_dma_mem(priv, &priv->tx_buf, priv->tx_dma_chan,
488 FIFO_SIZE * sizeof(struct tx_desc));
489 if (err) {
490 dev_err(dev, "Failed to allocate tx buffer\n");
491 goto error_rx_free;
492 }
493 dev_dbg(dev, "TX buffer allocated, addres: %x\n",
494 (unsigned)priv->tx_buf.buf_phys);
495
496 iowrite8(0, priv->mii_io + MII_MAC_FILT_FLAG);
497
498 ec_bhf_setup_tx_descs(priv);
499
500 netif_start_queue(net_dev);
501
502 hrtimer_init(&priv->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
503 priv->hrtimer.function = ec_bhf_timer_fun;
504 hrtimer_start(&priv->hrtimer, ktime_set(0, polling_frequency),
505 HRTIMER_MODE_REL);
506
507 dev_info(PRIV_TO_DEV(priv), "Device open\n");
508
509 ec_bhf_print_status(priv);
510
511 return 0;
512
513error_rx_free:
514 dma_free_coherent(dev, priv->rx_buf.alloc_len, priv->rx_buf.alloc,
515 priv->rx_buf.alloc_len);
516out:
517 return err;
518}
519
520static int ec_bhf_stop(struct net_device *net_dev)
521{
522 struct ec_bhf_priv *priv = netdev_priv(net_dev);
523 struct device *dev = PRIV_TO_DEV(priv);
524
525 hrtimer_cancel(&priv->hrtimer);
526
527 ec_bhf_reset(priv);
528
529 netif_tx_disable(net_dev);
530
531 dma_free_coherent(dev, priv->tx_buf.alloc_len,
532 priv->tx_buf.alloc, priv->tx_buf.alloc_phys);
533 dma_free_coherent(dev, priv->rx_buf.alloc_len,
534 priv->rx_buf.alloc, priv->rx_buf.alloc_phys);
535
536 return 0;
537}
538
539static struct rtnl_link_stats64 *
540ec_bhf_get_stats(struct net_device *net_dev,
541 struct rtnl_link_stats64 *stats)
542{
543 struct ec_bhf_priv *priv = netdev_priv(net_dev);
544
545 stats->rx_errors = ioread8(priv->mac_io + MAC_RX_ERR_CNT) +
546 ioread8(priv->mac_io + MAC_CRC_ERR_CNT) +
547 ioread8(priv->mac_io + MAC_FRAME_ERR_CNT);
548 stats->rx_packets = ioread32(priv->mac_io + MAC_RX_FRAME_CNT);
549 stats->tx_packets = ioread32(priv->mac_io + MAC_TX_FRAME_CNT);
550 stats->rx_dropped = ioread8(priv->mac_io + MAC_DROPPED_FRMS);
551
552 stats->tx_bytes = priv->stat_tx_bytes;
553 stats->rx_bytes = priv->stat_rx_bytes;
554
555 return stats;
556}
557
558static const struct net_device_ops ec_bhf_netdev_ops = {
559 .ndo_start_xmit = ec_bhf_start_xmit,
560 .ndo_open = ec_bhf_open,
561 .ndo_stop = ec_bhf_stop,
562 .ndo_get_stats64 = ec_bhf_get_stats,
563 .ndo_change_mtu = eth_change_mtu,
564 .ndo_validate_addr = eth_validate_addr,
565 .ndo_set_mac_address = eth_mac_addr
566};
567
568static int ec_bhf_probe(struct pci_dev *dev, const struct pci_device_id *id)
569{
570 struct net_device *net_dev;
571 struct ec_bhf_priv *priv;
572 void * __iomem dma_io;
573 void * __iomem io;
574 int err = 0;
575
576 err = pci_enable_device(dev);
577 if (err)
578 return err;
579
580 pci_set_master(dev);
581
582 err = pci_set_dma_mask(dev, DMA_BIT_MASK(32));
583 if (err) {
584 dev_err(&dev->dev,
585 "Required dma mask not supported, failed to initialize device\n");
586 err = -EIO;
587 goto err_disable_dev;
588 }
589
590 err = pci_set_consistent_dma_mask(dev, DMA_BIT_MASK(32));
591 if (err) {
592 dev_err(&dev->dev,
593 "Required dma mask not supported, failed to initialize device\n");
594 goto err_disable_dev;
595 }
596
597 err = pci_request_regions(dev, "ec_bhf");
598 if (err) {
599 dev_err(&dev->dev, "Failed to request pci memory regions\n");
600 goto err_disable_dev;
601 }
602
603 io = pci_iomap(dev, 0, 0);
604 if (!io) {
605 dev_err(&dev->dev, "Failed to map pci card memory bar 0");
606 err = -EIO;
607 goto err_release_regions;
608 }
609
610 dma_io = pci_iomap(dev, 2, 0);
611 if (!dma_io) {
612 dev_err(&dev->dev, "Failed to map pci card memory bar 2");
613 err = -EIO;
614 goto err_unmap;
615 }
616
617 net_dev = alloc_etherdev(sizeof(struct ec_bhf_priv));
618 if (net_dev == 0) {
619 err = -ENOMEM;
620 goto err_unmap_dma_io;
621 }
622
623 pci_set_drvdata(dev, net_dev);
624 SET_NETDEV_DEV(net_dev, &dev->dev);
625
626 net_dev->features = 0;
627 net_dev->flags |= IFF_NOARP;
628
629 net_dev->netdev_ops = &ec_bhf_netdev_ops;
630
631 priv = netdev_priv(net_dev);
632 priv->net_dev = net_dev;
633 priv->io = io;
634 priv->dma_io = dma_io;
635 priv->dev = dev;
636
637 err = ec_bhf_setup_offsets(priv);
638 if (err < 0)
639 goto err_free_net_dev;
640
641 memcpy_fromio(net_dev->dev_addr, priv->mii_io + MII_MAC_ADDR, 6);
642
643 dev_dbg(&dev->dev, "CX5020 Ethercat master address: %pM\n",
644 net_dev->dev_addr);
645
646 err = register_netdev(net_dev);
647 if (err < 0)
648 goto err_free_net_dev;
649
650 return 0;
651
652err_free_net_dev:
653 free_netdev(net_dev);
654err_unmap_dma_io:
655 pci_iounmap(dev, dma_io);
656err_unmap:
657 pci_iounmap(dev, io);
658err_release_regions:
659 pci_release_regions(dev);
660err_disable_dev:
661 pci_clear_master(dev);
662 pci_disable_device(dev);
663
664 return err;
665}
666
667static void ec_bhf_remove(struct pci_dev *dev)
668{
669 struct net_device *net_dev = pci_get_drvdata(dev);
670 struct ec_bhf_priv *priv = netdev_priv(net_dev);
671
672 unregister_netdev(net_dev);
673 free_netdev(net_dev);
674
675 pci_iounmap(dev, priv->dma_io);
676 pci_iounmap(dev, priv->io);
677 pci_release_regions(dev);
678 pci_clear_master(dev);
679 pci_disable_device(dev);
680}
681
682static struct pci_driver pci_driver = {
683 .name = "ec_bhf",
684 .id_table = ids,
685 .probe = ec_bhf_probe,
686 .remove = ec_bhf_remove,
687};
688
689static int __init ec_bhf_init(void)
690{
691 return pci_register_driver(&pci_driver);
692}
693
694static void __exit ec_bhf_exit(void)
695{
696 pci_unregister_driver(&pci_driver);
697}
698
699module_init(ec_bhf_init);
700module_exit(ec_bhf_exit);
701
702module_param(polling_frequency, long, S_IRUGO);
703MODULE_PARM_DESC(polling_frequency, "Polling timer frequency in ns");
704
705MODULE_LICENSE("GPL");
706MODULE_AUTHOR("Dariusz Marcinkiewicz <reksio@newterm.pl>");
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index a18645407d21..dc19bc5dec77 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -4949,6 +4949,12 @@ static void be_eeh_resume(struct pci_dev *pdev)
4949 if (status) 4949 if (status)
4950 goto err; 4950 goto err;
4951 4951
4952 /* On some BE3 FW versions, after a HW reset,
4953 * interrupts will remain disabled for each function.
4954 * So, explicitly enable interrupts
4955 */
4956 be_intr_set(adapter, true);
4957
4952 /* tell fw we're ready to fire cmds */ 4958 /* tell fw we're ready to fire cmds */
4953 status = be_cmd_fw_init(adapter); 4959 status = be_cmd_fw_init(adapter);
4954 if (status) 4960 if (status)
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index 9125d9abf099..e2d42475b006 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -121,6 +121,7 @@ static irqreturn_t gfar_error(int irq, void *dev_id);
121static irqreturn_t gfar_transmit(int irq, void *dev_id); 121static irqreturn_t gfar_transmit(int irq, void *dev_id);
122static irqreturn_t gfar_interrupt(int irq, void *dev_id); 122static irqreturn_t gfar_interrupt(int irq, void *dev_id);
123static void adjust_link(struct net_device *dev); 123static void adjust_link(struct net_device *dev);
124static noinline void gfar_update_link_state(struct gfar_private *priv);
124static int init_phy(struct net_device *dev); 125static int init_phy(struct net_device *dev);
125static int gfar_probe(struct platform_device *ofdev); 126static int gfar_probe(struct platform_device *ofdev);
126static int gfar_remove(struct platform_device *ofdev); 127static int gfar_remove(struct platform_device *ofdev);
@@ -3076,41 +3077,6 @@ static irqreturn_t gfar_interrupt(int irq, void *grp_id)
3076 return IRQ_HANDLED; 3077 return IRQ_HANDLED;
3077} 3078}
3078 3079
3079static u32 gfar_get_flowctrl_cfg(struct gfar_private *priv)
3080{
3081 struct phy_device *phydev = priv->phydev;
3082 u32 val = 0;
3083
3084 if (!phydev->duplex)
3085 return val;
3086
3087 if (!priv->pause_aneg_en) {
3088 if (priv->tx_pause_en)
3089 val |= MACCFG1_TX_FLOW;
3090 if (priv->rx_pause_en)
3091 val |= MACCFG1_RX_FLOW;
3092 } else {
3093 u16 lcl_adv, rmt_adv;
3094 u8 flowctrl;
3095 /* get link partner capabilities */
3096 rmt_adv = 0;
3097 if (phydev->pause)
3098 rmt_adv = LPA_PAUSE_CAP;
3099 if (phydev->asym_pause)
3100 rmt_adv |= LPA_PAUSE_ASYM;
3101
3102 lcl_adv = mii_advertise_flowctrl(phydev->advertising);
3103
3104 flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
3105 if (flowctrl & FLOW_CTRL_TX)
3106 val |= MACCFG1_TX_FLOW;
3107 if (flowctrl & FLOW_CTRL_RX)
3108 val |= MACCFG1_RX_FLOW;
3109 }
3110
3111 return val;
3112}
3113
3114/* Called every time the controller might need to be made 3080/* Called every time the controller might need to be made
3115 * aware of new link state. The PHY code conveys this 3081 * aware of new link state. The PHY code conveys this
3116 * information through variables in the phydev structure, and this 3082 * information through variables in the phydev structure, and this
@@ -3120,83 +3086,12 @@ static u32 gfar_get_flowctrl_cfg(struct gfar_private *priv)
3120static void adjust_link(struct net_device *dev) 3086static void adjust_link(struct net_device *dev)
3121{ 3087{
3122 struct gfar_private *priv = netdev_priv(dev); 3088 struct gfar_private *priv = netdev_priv(dev);
3123 struct gfar __iomem *regs = priv->gfargrp[0].regs;
3124 struct phy_device *phydev = priv->phydev; 3089 struct phy_device *phydev = priv->phydev;
3125 int new_state = 0;
3126 3090
3127 if (test_bit(GFAR_RESETTING, &priv->state)) 3091 if (unlikely(phydev->link != priv->oldlink ||
3128 return; 3092 phydev->duplex != priv->oldduplex ||
3129 3093 phydev->speed != priv->oldspeed))
3130 if (phydev->link) { 3094 gfar_update_link_state(priv);
3131 u32 tempval1 = gfar_read(&regs->maccfg1);
3132 u32 tempval = gfar_read(&regs->maccfg2);
3133 u32 ecntrl = gfar_read(&regs->ecntrl);
3134
3135 /* Now we make sure that we can be in full duplex mode.
3136 * If not, we operate in half-duplex mode.
3137 */
3138 if (phydev->duplex != priv->oldduplex) {
3139 new_state = 1;
3140 if (!(phydev->duplex))
3141 tempval &= ~(MACCFG2_FULL_DUPLEX);
3142 else
3143 tempval |= MACCFG2_FULL_DUPLEX;
3144
3145 priv->oldduplex = phydev->duplex;
3146 }
3147
3148 if (phydev->speed != priv->oldspeed) {
3149 new_state = 1;
3150 switch (phydev->speed) {
3151 case 1000:
3152 tempval =
3153 ((tempval & ~(MACCFG2_IF)) | MACCFG2_GMII);
3154
3155 ecntrl &= ~(ECNTRL_R100);
3156 break;
3157 case 100:
3158 case 10:
3159 tempval =
3160 ((tempval & ~(MACCFG2_IF)) | MACCFG2_MII);
3161
3162 /* Reduced mode distinguishes
3163 * between 10 and 100
3164 */
3165 if (phydev->speed == SPEED_100)
3166 ecntrl |= ECNTRL_R100;
3167 else
3168 ecntrl &= ~(ECNTRL_R100);
3169 break;
3170 default:
3171 netif_warn(priv, link, dev,
3172 "Ack! Speed (%d) is not 10/100/1000!\n",
3173 phydev->speed);
3174 break;
3175 }
3176
3177 priv->oldspeed = phydev->speed;
3178 }
3179
3180 tempval1 &= ~(MACCFG1_TX_FLOW | MACCFG1_RX_FLOW);
3181 tempval1 |= gfar_get_flowctrl_cfg(priv);
3182
3183 gfar_write(&regs->maccfg1, tempval1);
3184 gfar_write(&regs->maccfg2, tempval);
3185 gfar_write(&regs->ecntrl, ecntrl);
3186
3187 if (!priv->oldlink) {
3188 new_state = 1;
3189 priv->oldlink = 1;
3190 }
3191 } else if (priv->oldlink) {
3192 new_state = 1;
3193 priv->oldlink = 0;
3194 priv->oldspeed = 0;
3195 priv->oldduplex = -1;
3196 }
3197
3198 if (new_state && netif_msg_link(priv))
3199 phy_print_status(phydev);
3200} 3095}
3201 3096
3202/* Update the hash table based on the current list of multicast 3097/* Update the hash table based on the current list of multicast
@@ -3442,6 +3337,114 @@ static irqreturn_t gfar_error(int irq, void *grp_id)
3442 return IRQ_HANDLED; 3337 return IRQ_HANDLED;
3443} 3338}
3444 3339
3340static u32 gfar_get_flowctrl_cfg(struct gfar_private *priv)
3341{
3342 struct phy_device *phydev = priv->phydev;
3343 u32 val = 0;
3344
3345 if (!phydev->duplex)
3346 return val;
3347
3348 if (!priv->pause_aneg_en) {
3349 if (priv->tx_pause_en)
3350 val |= MACCFG1_TX_FLOW;
3351 if (priv->rx_pause_en)
3352 val |= MACCFG1_RX_FLOW;
3353 } else {
3354 u16 lcl_adv, rmt_adv;
3355 u8 flowctrl;
3356 /* get link partner capabilities */
3357 rmt_adv = 0;
3358 if (phydev->pause)
3359 rmt_adv = LPA_PAUSE_CAP;
3360 if (phydev->asym_pause)
3361 rmt_adv |= LPA_PAUSE_ASYM;
3362
3363 lcl_adv = mii_advertise_flowctrl(phydev->advertising);
3364
3365 flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
3366 if (flowctrl & FLOW_CTRL_TX)
3367 val |= MACCFG1_TX_FLOW;
3368 if (flowctrl & FLOW_CTRL_RX)
3369 val |= MACCFG1_RX_FLOW;
3370 }
3371
3372 return val;
3373}
3374
3375static noinline void gfar_update_link_state(struct gfar_private *priv)
3376{
3377 struct gfar __iomem *regs = priv->gfargrp[0].regs;
3378 struct phy_device *phydev = priv->phydev;
3379
3380 if (unlikely(test_bit(GFAR_RESETTING, &priv->state)))
3381 return;
3382
3383 if (phydev->link) {
3384 u32 tempval1 = gfar_read(&regs->maccfg1);
3385 u32 tempval = gfar_read(&regs->maccfg2);
3386 u32 ecntrl = gfar_read(&regs->ecntrl);
3387
3388 if (phydev->duplex != priv->oldduplex) {
3389 if (!(phydev->duplex))
3390 tempval &= ~(MACCFG2_FULL_DUPLEX);
3391 else
3392 tempval |= MACCFG2_FULL_DUPLEX;
3393
3394 priv->oldduplex = phydev->duplex;
3395 }
3396
3397 if (phydev->speed != priv->oldspeed) {
3398 switch (phydev->speed) {
3399 case 1000:
3400 tempval =
3401 ((tempval & ~(MACCFG2_IF)) | MACCFG2_GMII);
3402
3403 ecntrl &= ~(ECNTRL_R100);
3404 break;
3405 case 100:
3406 case 10:
3407 tempval =
3408 ((tempval & ~(MACCFG2_IF)) | MACCFG2_MII);
3409
3410 /* Reduced mode distinguishes
3411 * between 10 and 100
3412 */
3413 if (phydev->speed == SPEED_100)
3414 ecntrl |= ECNTRL_R100;
3415 else
3416 ecntrl &= ~(ECNTRL_R100);
3417 break;
3418 default:
3419 netif_warn(priv, link, priv->ndev,
3420 "Ack! Speed (%d) is not 10/100/1000!\n",
3421 phydev->speed);
3422 break;
3423 }
3424
3425 priv->oldspeed = phydev->speed;
3426 }
3427
3428 tempval1 &= ~(MACCFG1_TX_FLOW | MACCFG1_RX_FLOW);
3429 tempval1 |= gfar_get_flowctrl_cfg(priv);
3430
3431 gfar_write(&regs->maccfg1, tempval1);
3432 gfar_write(&regs->maccfg2, tempval);
3433 gfar_write(&regs->ecntrl, ecntrl);
3434
3435 if (!priv->oldlink)
3436 priv->oldlink = 1;
3437
3438 } else if (priv->oldlink) {
3439 priv->oldlink = 0;
3440 priv->oldspeed = 0;
3441 priv->oldduplex = -1;
3442 }
3443
3444 if (netif_msg_link(priv))
3445 phy_print_status(phydev);
3446}
3447
3445static struct of_device_id gfar_match[] = 3448static struct of_device_id gfar_match[] =
3446{ 3449{
3447 { 3450 {
diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c
index 891dbee6e6c1..76d70708f864 100644
--- a/drivers/net/ethernet/freescale/gianfar_ethtool.c
+++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c
@@ -533,6 +533,9 @@ static int gfar_spauseparam(struct net_device *dev,
533 struct gfar __iomem *regs = priv->gfargrp[0].regs; 533 struct gfar __iomem *regs = priv->gfargrp[0].regs;
534 u32 oldadv, newadv; 534 u32 oldadv, newadv;
535 535
536 if (!phydev)
537 return -ENODEV;
538
536 if (!(phydev->supported & SUPPORTED_Pause) || 539 if (!(phydev->supported & SUPPORTED_Pause) ||
537 (!(phydev->supported & SUPPORTED_Asym_Pause) && 540 (!(phydev->supported & SUPPORTED_Asym_Pause) &&
538 (epause->rx_pause != epause->tx_pause))) 541 (epause->rx_pause != epause->tx_pause)))
diff --git a/drivers/net/ethernet/ibm/emac/mal.c b/drivers/net/ethernet/ibm/emac/mal.c
index 9d75fef6396f..63eb959a28aa 100644
--- a/drivers/net/ethernet/ibm/emac/mal.c
+++ b/drivers/net/ethernet/ibm/emac/mal.c
@@ -682,10 +682,7 @@ static int mal_probe(struct platform_device *ofdev)
682 goto fail6; 682 goto fail6;
683 683
684 /* Enable all MAL SERR interrupt sources */ 684 /* Enable all MAL SERR interrupt sources */
685 if (mal->version == 2) 685 set_mal_dcrn(mal, MAL_IER, MAL_IER_EVENTS);
686 set_mal_dcrn(mal, MAL_IER, MAL2_IER_EVENTS);
687 else
688 set_mal_dcrn(mal, MAL_IER, MAL1_IER_EVENTS);
689 686
690 /* Enable EOB interrupt */ 687 /* Enable EOB interrupt */
691 mal_enable_eob_irq(mal); 688 mal_enable_eob_irq(mal);
diff --git a/drivers/net/ethernet/ibm/emac/mal.h b/drivers/net/ethernet/ibm/emac/mal.h
index e431a32e3d69..eeade2ea8334 100644
--- a/drivers/net/ethernet/ibm/emac/mal.h
+++ b/drivers/net/ethernet/ibm/emac/mal.h
@@ -95,24 +95,20 @@
95 95
96 96
97#define MAL_IER 0x02 97#define MAL_IER 0x02
98/* MAL IER bits */
98#define MAL_IER_DE 0x00000010 99#define MAL_IER_DE 0x00000010
99#define MAL_IER_OTE 0x00000004 100#define MAL_IER_OTE 0x00000004
100#define MAL_IER_OE 0x00000002 101#define MAL_IER_OE 0x00000002
101#define MAL_IER_PE 0x00000001 102#define MAL_IER_PE 0x00000001
102/* MAL V1 IER bits */
103#define MAL1_IER_NWE 0x00000008
104#define MAL1_IER_SOC_EVENTS MAL1_IER_NWE
105#define MAL1_IER_EVENTS (MAL1_IER_SOC_EVENTS | MAL_IER_DE | \
106 MAL_IER_OTE | MAL_IER_OE | MAL_IER_PE)
107 103
108/* MAL V2 IER bits */ 104/* PLB read/write/timeout errors */
109#define MAL2_IER_PT 0x00000080 105#define MAL_IER_PTE 0x00000080
110#define MAL2_IER_PRE 0x00000040 106#define MAL_IER_PRE 0x00000040
111#define MAL2_IER_PWE 0x00000020 107#define MAL_IER_PWE 0x00000020
112#define MAL2_IER_SOC_EVENTS (MAL2_IER_PT | MAL2_IER_PRE | MAL2_IER_PWE)
113#define MAL2_IER_EVENTS (MAL2_IER_SOC_EVENTS | MAL_IER_DE | \
114 MAL_IER_OTE | MAL_IER_OE | MAL_IER_PE)
115 108
109#define MAL_IER_SOC_EVENTS (MAL_IER_PTE | MAL_IER_PRE | MAL_IER_PWE)
110#define MAL_IER_EVENTS (MAL_IER_SOC_EVENTS | MAL_IER_DE | \
111 MAL_IER_OTE | MAL_IER_OE | MAL_IER_PE)
116 112
117#define MAL_TXCASR 0x04 113#define MAL_TXCASR 0x04
118#define MAL_TXCARR 0x05 114#define MAL_TXCARR 0x05
diff --git a/drivers/net/ethernet/ibm/emac/rgmii.c b/drivers/net/ethernet/ibm/emac/rgmii.c
index 4fb2f96da23b..a01182cce965 100644
--- a/drivers/net/ethernet/ibm/emac/rgmii.c
+++ b/drivers/net/ethernet/ibm/emac/rgmii.c
@@ -45,6 +45,7 @@
45 45
46/* RGMIIx_SSR */ 46/* RGMIIx_SSR */
47#define RGMII_SSR_MASK(idx) (0x7 << ((idx) * 8)) 47#define RGMII_SSR_MASK(idx) (0x7 << ((idx) * 8))
48#define RGMII_SSR_10(idx) (0x1 << ((idx) * 8))
48#define RGMII_SSR_100(idx) (0x2 << ((idx) * 8)) 49#define RGMII_SSR_100(idx) (0x2 << ((idx) * 8))
49#define RGMII_SSR_1000(idx) (0x4 << ((idx) * 8)) 50#define RGMII_SSR_1000(idx) (0x4 << ((idx) * 8))
50 51
@@ -139,6 +140,8 @@ void rgmii_set_speed(struct platform_device *ofdev, int input, int speed)
139 ssr |= RGMII_SSR_1000(input); 140 ssr |= RGMII_SSR_1000(input);
140 else if (speed == SPEED_100) 141 else if (speed == SPEED_100)
141 ssr |= RGMII_SSR_100(input); 142 ssr |= RGMII_SSR_100(input);
143 else if (speed == SPEED_10)
144 ssr |= RGMII_SSR_10(input);
142 145
143 out_be32(&p->ssr, ssr); 146 out_be32(&p->ssr, ssr);
144 147
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
index 9866f264f55e..f0bbd4246d71 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
@@ -186,7 +186,7 @@ static bool e1000_phy_is_accessible_pchlan(struct e1000_hw *hw)
186{ 186{
187 u16 phy_reg = 0; 187 u16 phy_reg = 0;
188 u32 phy_id = 0; 188 u32 phy_id = 0;
189 s32 ret_val; 189 s32 ret_val = 0;
190 u16 retry_count; 190 u16 retry_count;
191 u32 mac_reg = 0; 191 u32 mac_reg = 0;
192 192
@@ -217,11 +217,13 @@ static bool e1000_phy_is_accessible_pchlan(struct e1000_hw *hw)
217 /* In case the PHY needs to be in mdio slow mode, 217 /* In case the PHY needs to be in mdio slow mode,
218 * set slow mode and try to get the PHY id again. 218 * set slow mode and try to get the PHY id again.
219 */ 219 */
220 hw->phy.ops.release(hw); 220 if (hw->mac.type < e1000_pch_lpt) {
221 ret_val = e1000_set_mdio_slow_mode_hv(hw); 221 hw->phy.ops.release(hw);
222 if (!ret_val) 222 ret_val = e1000_set_mdio_slow_mode_hv(hw);
223 ret_val = e1000e_get_phy_id(hw); 223 if (!ret_val)
224 hw->phy.ops.acquire(hw); 224 ret_val = e1000e_get_phy_id(hw);
225 hw->phy.ops.acquire(hw);
226 }
225 227
226 if (ret_val) 228 if (ret_val)
227 return false; 229 return false;
@@ -842,6 +844,17 @@ s32 e1000_set_eee_pchlan(struct e1000_hw *hw)
842 } 844 }
843 } 845 }
844 846
847 if (hw->phy.type == e1000_phy_82579) {
848 ret_val = e1000_read_emi_reg_locked(hw, I82579_LPI_PLL_SHUT,
849 &data);
850 if (ret_val)
851 goto release;
852
853 data &= ~I82579_LPI_100_PLL_SHUT;
854 ret_val = e1000_write_emi_reg_locked(hw, I82579_LPI_PLL_SHUT,
855 data);
856 }
857
845 /* R/Clr IEEE MMD 3.1 bits 11:10 - Tx/Rx LPI Received */ 858 /* R/Clr IEEE MMD 3.1 bits 11:10 - Tx/Rx LPI Received */
846 ret_val = e1000_read_emi_reg_locked(hw, pcs_status, &data); 859 ret_val = e1000_read_emi_reg_locked(hw, pcs_status, &data);
847 if (ret_val) 860 if (ret_val)
@@ -1314,14 +1327,17 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
1314 return ret_val; 1327 return ret_val;
1315 } 1328 }
1316 1329
1317 /* When connected at 10Mbps half-duplex, 82579 parts are excessively 1330 /* When connected at 10Mbps half-duplex, some parts are excessively
1318 * aggressive resulting in many collisions. To avoid this, increase 1331 * aggressive resulting in many collisions. To avoid this, increase
1319 * the IPG and reduce Rx latency in the PHY. 1332 * the IPG and reduce Rx latency in the PHY.
1320 */ 1333 */
1321 if ((hw->mac.type == e1000_pch2lan) && link) { 1334 if (((hw->mac.type == e1000_pch2lan) ||
1335 (hw->mac.type == e1000_pch_lpt)) && link) {
1322 u32 reg; 1336 u32 reg;
1323 reg = er32(STATUS); 1337 reg = er32(STATUS);
1324 if (!(reg & (E1000_STATUS_FD | E1000_STATUS_SPEED_MASK))) { 1338 if (!(reg & (E1000_STATUS_FD | E1000_STATUS_SPEED_MASK))) {
1339 u16 emi_addr;
1340
1325 reg = er32(TIPG); 1341 reg = er32(TIPG);
1326 reg &= ~E1000_TIPG_IPGT_MASK; 1342 reg &= ~E1000_TIPG_IPGT_MASK;
1327 reg |= 0xFF; 1343 reg |= 0xFF;
@@ -1332,8 +1348,12 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
1332 if (ret_val) 1348 if (ret_val)
1333 return ret_val; 1349 return ret_val;
1334 1350
1335 ret_val = 1351 if (hw->mac.type == e1000_pch2lan)
1336 e1000_write_emi_reg_locked(hw, I82579_RX_CONFIG, 0); 1352 emi_addr = I82579_RX_CONFIG;
1353 else
1354 emi_addr = I217_RX_CONFIG;
1355
1356 ret_val = e1000_write_emi_reg_locked(hw, emi_addr, 0);
1337 1357
1338 hw->phy.ops.release(hw); 1358 hw->phy.ops.release(hw);
1339 1359
@@ -2493,51 +2513,44 @@ release:
2493 * e1000_k1_gig_workaround_lv - K1 Si workaround 2513 * e1000_k1_gig_workaround_lv - K1 Si workaround
2494 * @hw: pointer to the HW structure 2514 * @hw: pointer to the HW structure
2495 * 2515 *
2496 * Workaround to set the K1 beacon duration for 82579 parts 2516 * Workaround to set the K1 beacon duration for 82579 parts in 10Mbps
2517 * Disable K1 in 1000Mbps and 100Mbps
2497 **/ 2518 **/
2498static s32 e1000_k1_workaround_lv(struct e1000_hw *hw) 2519static s32 e1000_k1_workaround_lv(struct e1000_hw *hw)
2499{ 2520{
2500 s32 ret_val = 0; 2521 s32 ret_val = 0;
2501 u16 status_reg = 0; 2522 u16 status_reg = 0;
2502 u32 mac_reg;
2503 u16 phy_reg;
2504 2523
2505 if (hw->mac.type != e1000_pch2lan) 2524 if (hw->mac.type != e1000_pch2lan)
2506 return 0; 2525 return 0;
2507 2526
2508 /* Set K1 beacon duration based on 1Gbps speed or otherwise */ 2527 /* Set K1 beacon duration based on 10Mbs speed */
2509 ret_val = e1e_rphy(hw, HV_M_STATUS, &status_reg); 2528 ret_val = e1e_rphy(hw, HV_M_STATUS, &status_reg);
2510 if (ret_val) 2529 if (ret_val)
2511 return ret_val; 2530 return ret_val;
2512 2531
2513 if ((status_reg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) 2532 if ((status_reg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE))
2514 == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) { 2533 == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) {
2515 mac_reg = er32(FEXTNVM4); 2534 if (status_reg &
2516 mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK; 2535 (HV_M_STATUS_SPEED_1000 | HV_M_STATUS_SPEED_100)) {
2517
2518 ret_val = e1e_rphy(hw, I82579_LPI_CTRL, &phy_reg);
2519 if (ret_val)
2520 return ret_val;
2521
2522 if (status_reg & HV_M_STATUS_SPEED_1000) {
2523 u16 pm_phy_reg; 2536 u16 pm_phy_reg;
2524 2537
2525 mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_8USEC; 2538 /* LV 1G/100 Packet drop issue wa */
2526 phy_reg &= ~I82579_LPI_CTRL_FORCE_PLL_LOCK_COUNT;
2527 /* LV 1G Packet drop issue wa */
2528 ret_val = e1e_rphy(hw, HV_PM_CTRL, &pm_phy_reg); 2539 ret_val = e1e_rphy(hw, HV_PM_CTRL, &pm_phy_reg);
2529 if (ret_val) 2540 if (ret_val)
2530 return ret_val; 2541 return ret_val;
2531 pm_phy_reg &= ~HV_PM_CTRL_PLL_STOP_IN_K1_GIGA; 2542 pm_phy_reg &= ~HV_PM_CTRL_K1_ENABLE;
2532 ret_val = e1e_wphy(hw, HV_PM_CTRL, pm_phy_reg); 2543 ret_val = e1e_wphy(hw, HV_PM_CTRL, pm_phy_reg);
2533 if (ret_val) 2544 if (ret_val)
2534 return ret_val; 2545 return ret_val;
2535 } else { 2546 } else {
2547 u32 mac_reg;
2548
2549 mac_reg = er32(FEXTNVM4);
2550 mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK;
2536 mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_16USEC; 2551 mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_16USEC;
2537 phy_reg |= I82579_LPI_CTRL_FORCE_PLL_LOCK_COUNT; 2552 ew32(FEXTNVM4, mac_reg);
2538 } 2553 }
2539 ew32(FEXTNVM4, mac_reg);
2540 ret_val = e1e_wphy(hw, I82579_LPI_CTRL, phy_reg);
2541 } 2554 }
2542 2555
2543 return ret_val; 2556 return ret_val;
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.h b/drivers/net/ethernet/intel/e1000e/ich8lan.h
index bead50f9187b..5515126c81c1 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.h
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.h
@@ -232,16 +232,19 @@
232#define I82577_MSE_THRESHOLD 0x0887 /* 82577 Mean Square Error Threshold */ 232#define I82577_MSE_THRESHOLD 0x0887 /* 82577 Mean Square Error Threshold */
233#define I82579_MSE_LINK_DOWN 0x2411 /* MSE count before dropping link */ 233#define I82579_MSE_LINK_DOWN 0x2411 /* MSE count before dropping link */
234#define I82579_RX_CONFIG 0x3412 /* Receive configuration */ 234#define I82579_RX_CONFIG 0x3412 /* Receive configuration */
235#define I82579_LPI_PLL_SHUT 0x4412 /* LPI PLL Shut Enable */
235#define I82579_EEE_PCS_STATUS 0x182E /* IEEE MMD Register 3.1 >> 8 */ 236#define I82579_EEE_PCS_STATUS 0x182E /* IEEE MMD Register 3.1 >> 8 */
236#define I82579_EEE_CAPABILITY 0x0410 /* IEEE MMD Register 3.20 */ 237#define I82579_EEE_CAPABILITY 0x0410 /* IEEE MMD Register 3.20 */
237#define I82579_EEE_ADVERTISEMENT 0x040E /* IEEE MMD Register 7.60 */ 238#define I82579_EEE_ADVERTISEMENT 0x040E /* IEEE MMD Register 7.60 */
238#define I82579_EEE_LP_ABILITY 0x040F /* IEEE MMD Register 7.61 */ 239#define I82579_EEE_LP_ABILITY 0x040F /* IEEE MMD Register 7.61 */
239#define I82579_EEE_100_SUPPORTED (1 << 1) /* 100BaseTx EEE */ 240#define I82579_EEE_100_SUPPORTED (1 << 1) /* 100BaseTx EEE */
240#define I82579_EEE_1000_SUPPORTED (1 << 2) /* 1000BaseTx EEE */ 241#define I82579_EEE_1000_SUPPORTED (1 << 2) /* 1000BaseTx EEE */
242#define I82579_LPI_100_PLL_SHUT (1 << 2) /* 100M LPI PLL Shut Enabled */
241#define I217_EEE_PCS_STATUS 0x9401 /* IEEE MMD Register 3.1 */ 243#define I217_EEE_PCS_STATUS 0x9401 /* IEEE MMD Register 3.1 */
242#define I217_EEE_CAPABILITY 0x8000 /* IEEE MMD Register 3.20 */ 244#define I217_EEE_CAPABILITY 0x8000 /* IEEE MMD Register 3.20 */
243#define I217_EEE_ADVERTISEMENT 0x8001 /* IEEE MMD Register 7.60 */ 245#define I217_EEE_ADVERTISEMENT 0x8001 /* IEEE MMD Register 7.60 */
244#define I217_EEE_LP_ABILITY 0x8002 /* IEEE MMD Register 7.61 */ 246#define I217_EEE_LP_ABILITY 0x8002 /* IEEE MMD Register 7.61 */
247#define I217_RX_CONFIG 0xB20C /* Receive configuration */
245 248
246#define E1000_EEE_RX_LPI_RCVD 0x0400 /* Tx LP idle received */ 249#define E1000_EEE_RX_LPI_RCVD 0x0400 /* Tx LP idle received */
247#define E1000_EEE_TX_LPI_RCVD 0x0800 /* Rx LP idle received */ 250#define E1000_EEE_TX_LPI_RCVD 0x0800 /* Rx LP idle received */
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index d50c91e50528..3e69386add04 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -1165,7 +1165,7 @@ static void e1000e_tx_hwtstamp_work(struct work_struct *work)
1165 dev_kfree_skb_any(adapter->tx_hwtstamp_skb); 1165 dev_kfree_skb_any(adapter->tx_hwtstamp_skb);
1166 adapter->tx_hwtstamp_skb = NULL; 1166 adapter->tx_hwtstamp_skb = NULL;
1167 adapter->tx_hwtstamp_timeouts++; 1167 adapter->tx_hwtstamp_timeouts++;
1168 e_warn("clearing Tx timestamp hang"); 1168 e_warn("clearing Tx timestamp hang\n");
1169 } else { 1169 } else {
1170 /* reschedule to check later */ 1170 /* reschedule to check later */
1171 schedule_work(&adapter->tx_hwtstamp_work); 1171 schedule_work(&adapter->tx_hwtstamp_work);
@@ -5687,7 +5687,7 @@ struct rtnl_link_stats64 *e1000e_get_stats64(struct net_device *netdev,
5687static int e1000_change_mtu(struct net_device *netdev, int new_mtu) 5687static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
5688{ 5688{
5689 struct e1000_adapter *adapter = netdev_priv(netdev); 5689 struct e1000_adapter *adapter = netdev_priv(netdev);
5690 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; 5690 int max_frame = new_mtu + VLAN_HLEN + ETH_HLEN + ETH_FCS_LEN;
5691 5691
5692 /* Jumbo frame support */ 5692 /* Jumbo frame support */
5693 if ((max_frame > ETH_FRAME_LEN + ETH_FCS_LEN) && 5693 if ((max_frame > ETH_FRAME_LEN + ETH_FCS_LEN) &&
@@ -6235,6 +6235,7 @@ static int __e1000_resume(struct pci_dev *pdev)
6235 return 0; 6235 return 0;
6236} 6236}
6237 6237
6238#ifdef CONFIG_PM_SLEEP
6238static int e1000e_pm_thaw(struct device *dev) 6239static int e1000e_pm_thaw(struct device *dev)
6239{ 6240{
6240 struct net_device *netdev = pci_get_drvdata(to_pci_dev(dev)); 6241 struct net_device *netdev = pci_get_drvdata(to_pci_dev(dev));
@@ -6255,7 +6256,6 @@ static int e1000e_pm_thaw(struct device *dev)
6255 return 0; 6256 return 0;
6256} 6257}
6257 6258
6258#ifdef CONFIG_PM_SLEEP
6259static int e1000e_pm_suspend(struct device *dev) 6259static int e1000e_pm_suspend(struct device *dev)
6260{ 6260{
6261 struct pci_dev *pdev = to_pci_dev(dev); 6261 struct pci_dev *pdev = to_pci_dev(dev);
diff --git a/drivers/net/ethernet/intel/e1000e/phy.h b/drivers/net/ethernet/intel/e1000e/phy.h
index 3841bccf058c..537d2780b408 100644
--- a/drivers/net/ethernet/intel/e1000e/phy.h
+++ b/drivers/net/ethernet/intel/e1000e/phy.h
@@ -164,6 +164,7 @@ s32 e1000_get_cable_length_82577(struct e1000_hw *hw);
164#define HV_M_STATUS_AUTONEG_COMPLETE 0x1000 164#define HV_M_STATUS_AUTONEG_COMPLETE 0x1000
165#define HV_M_STATUS_SPEED_MASK 0x0300 165#define HV_M_STATUS_SPEED_MASK 0x0300
166#define HV_M_STATUS_SPEED_1000 0x0200 166#define HV_M_STATUS_SPEED_1000 0x0200
167#define HV_M_STATUS_SPEED_100 0x0100
167#define HV_M_STATUS_LINK_UP 0x0040 168#define HV_M_STATUS_LINK_UP 0x0040
168 169
169#define IGP01E1000_PHY_PCS_INIT_REG 0x00B4 170#define IGP01E1000_PHY_PCS_INIT_REG 0x00B4
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 861b722c2672..cf0761f08911 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -2897,12 +2897,9 @@ static irqreturn_t i40e_intr(int irq, void *data)
2897 u32 prttsyn_stat = rd32(hw, I40E_PRTTSYN_STAT_0); 2897 u32 prttsyn_stat = rd32(hw, I40E_PRTTSYN_STAT_0);
2898 2898
2899 if (prttsyn_stat & I40E_PRTTSYN_STAT_0_TXTIME_MASK) { 2899 if (prttsyn_stat & I40E_PRTTSYN_STAT_0_TXTIME_MASK) {
2900 ena_mask &= ~I40E_PFINT_ICR0_ENA_TIMESYNC_MASK; 2900 icr0 &= ~I40E_PFINT_ICR0_ENA_TIMESYNC_MASK;
2901 i40e_ptp_tx_hwtstamp(pf); 2901 i40e_ptp_tx_hwtstamp(pf);
2902 prttsyn_stat &= ~I40E_PRTTSYN_STAT_0_TXTIME_MASK;
2903 } 2902 }
2904
2905 wr32(hw, I40E_PRTTSYN_STAT_0, prttsyn_stat);
2906 } 2903 }
2907 2904
2908 /* If a critical error is pending we have no choice but to reset the 2905 /* If a critical error is pending we have no choice but to reset the
@@ -4271,6 +4268,14 @@ static int i40e_open(struct net_device *netdev)
4271 if (err) 4268 if (err)
4272 return err; 4269 return err;
4273 4270
4271 /* configure global TSO hardware offload settings */
4272 wr32(&pf->hw, I40E_GLLAN_TSOMSK_F, be32_to_cpu(TCP_FLAG_PSH |
4273 TCP_FLAG_FIN) >> 16);
4274 wr32(&pf->hw, I40E_GLLAN_TSOMSK_M, be32_to_cpu(TCP_FLAG_PSH |
4275 TCP_FLAG_FIN |
4276 TCP_FLAG_CWR) >> 16);
4277 wr32(&pf->hw, I40E_GLLAN_TSOMSK_L, be32_to_cpu(TCP_FLAG_CWR) >> 16);
4278
4274#ifdef CONFIG_I40E_VXLAN 4279#ifdef CONFIG_I40E_VXLAN
4275 vxlan_get_rx_port(netdev); 4280 vxlan_get_rx_port(netdev);
4276#endif 4281#endif
@@ -6712,6 +6717,7 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
6712 NETIF_F_HW_VLAN_CTAG_FILTER | 6717 NETIF_F_HW_VLAN_CTAG_FILTER |
6713 NETIF_F_IPV6_CSUM | 6718 NETIF_F_IPV6_CSUM |
6714 NETIF_F_TSO | 6719 NETIF_F_TSO |
6720 NETIF_F_TSO_ECN |
6715 NETIF_F_TSO6 | 6721 NETIF_F_TSO6 |
6716 NETIF_F_RXCSUM | 6722 NETIF_F_RXCSUM |
6717 NETIF_F_NTUPLE | 6723 NETIF_F_NTUPLE |
diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
index 262bdf11d221..81299189a47d 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
@@ -160,7 +160,7 @@ static i40e_status i40e_poll_sr_srctl_done_bit(struct i40e_hw *hw)
160 udelay(5); 160 udelay(5);
161 } 161 }
162 if (ret_code == I40E_ERR_TIMEOUT) 162 if (ret_code == I40E_ERR_TIMEOUT)
163 hw_dbg(hw, "Done bit in GLNVM_SRCTL not set"); 163 hw_dbg(hw, "Done bit in GLNVM_SRCTL not set\n");
164 return ret_code; 164 return ret_code;
165} 165}
166 166
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
index e33ec6c842b7..e61e63720800 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
@@ -239,7 +239,7 @@ static void i40e_ptp_tx_work(struct work_struct *work)
239 dev_kfree_skb_any(pf->ptp_tx_skb); 239 dev_kfree_skb_any(pf->ptp_tx_skb);
240 pf->ptp_tx_skb = NULL; 240 pf->ptp_tx_skb = NULL;
241 pf->tx_hwtstamp_timeouts++; 241 pf->tx_hwtstamp_timeouts++;
242 dev_warn(&pf->pdev->dev, "clearing Tx timestamp hang"); 242 dev_warn(&pf->pdev->dev, "clearing Tx timestamp hang\n");
243 return; 243 return;
244 } 244 }
245 245
@@ -321,7 +321,7 @@ void i40e_ptp_rx_hang(struct i40e_vsi *vsi)
321 pf->last_rx_ptp_check = jiffies; 321 pf->last_rx_ptp_check = jiffies;
322 pf->rx_hwtstamp_cleared++; 322 pf->rx_hwtstamp_cleared++;
323 dev_warn(&vsi->back->pdev->dev, 323 dev_warn(&vsi->back->pdev->dev,
324 "%s: clearing Rx timestamp hang", 324 "%s: clearing Rx timestamp hang\n",
325 __func__); 325 __func__);
326 } 326 }
327} 327}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 0f5d96ad281d..9478ddc66caf 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -418,7 +418,7 @@ int i40e_add_del_fdir(struct i40e_vsi *vsi,
418 } 418 }
419 break; 419 break;
420 default: 420 default:
421 dev_info(&pf->pdev->dev, "Could not specify spec type %d", 421 dev_info(&pf->pdev->dev, "Could not specify spec type %d\n",
422 input->flow_type); 422 input->flow_type);
423 ret = -EINVAL; 423 ret = -EINVAL;
424 } 424 }
@@ -478,7 +478,7 @@ static void i40e_fd_handle_status(struct i40e_ring *rx_ring,
478 pf->flags |= I40E_FLAG_FDIR_REQUIRES_REINIT; 478 pf->flags |= I40E_FLAG_FDIR_REQUIRES_REINIT;
479 } 479 }
480 } else { 480 } else {
481 dev_info(&pdev->dev, "FD filter programming error"); 481 dev_info(&pdev->dev, "FD filter programming error\n");
482 } 482 }
483 } else if (error == 483 } else if (error ==
484 (0x1 << I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT)) { 484 (0x1 << I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT)) {
@@ -1713,9 +1713,11 @@ static int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
1713 I40E_TX_FLAGS_VLAN_PRIO_SHIFT; 1713 I40E_TX_FLAGS_VLAN_PRIO_SHIFT;
1714 if (tx_flags & I40E_TX_FLAGS_SW_VLAN) { 1714 if (tx_flags & I40E_TX_FLAGS_SW_VLAN) {
1715 struct vlan_ethhdr *vhdr; 1715 struct vlan_ethhdr *vhdr;
1716 if (skb_header_cloned(skb) && 1716 int rc;
1717 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) 1717
1718 return -ENOMEM; 1718 rc = skb_cow_head(skb, 0);
1719 if (rc < 0)
1720 return rc;
1719 vhdr = (struct vlan_ethhdr *)skb->data; 1721 vhdr = (struct vlan_ethhdr *)skb->data;
1720 vhdr->h_vlan_TCI = htons(tx_flags >> 1722 vhdr->h_vlan_TCI = htons(tx_flags >>
1721 I40E_TX_FLAGS_VLAN_SHIFT); 1723 I40E_TX_FLAGS_VLAN_SHIFT);
@@ -1743,20 +1745,18 @@ static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb,
1743 u64 *cd_type_cmd_tso_mss, u32 *cd_tunneling) 1745 u64 *cd_type_cmd_tso_mss, u32 *cd_tunneling)
1744{ 1746{
1745 u32 cd_cmd, cd_tso_len, cd_mss; 1747 u32 cd_cmd, cd_tso_len, cd_mss;
1748 struct ipv6hdr *ipv6h;
1746 struct tcphdr *tcph; 1749 struct tcphdr *tcph;
1747 struct iphdr *iph; 1750 struct iphdr *iph;
1748 u32 l4len; 1751 u32 l4len;
1749 int err; 1752 int err;
1750 struct ipv6hdr *ipv6h;
1751 1753
1752 if (!skb_is_gso(skb)) 1754 if (!skb_is_gso(skb))
1753 return 0; 1755 return 0;
1754 1756
1755 if (skb_header_cloned(skb)) { 1757 err = skb_cow_head(skb, 0);
1756 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); 1758 if (err < 0)
1757 if (err) 1759 return err;
1758 return err;
1759 }
1760 1760
1761 if (protocol == htons(ETH_P_IP)) { 1761 if (protocol == htons(ETH_P_IP)) {
1762 iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb); 1762 iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb);
diff --git a/drivers/net/ethernet/intel/igb/e1000_i210.c b/drivers/net/ethernet/intel/igb/e1000_i210.c
index db963397cc27..f67f8a170b90 100644
--- a/drivers/net/ethernet/intel/igb/e1000_i210.c
+++ b/drivers/net/ethernet/intel/igb/e1000_i210.c
@@ -365,7 +365,7 @@ static s32 igb_read_invm_word_i210(struct e1000_hw *hw, u8 address, u16 *data)
365 word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword); 365 word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
366 if (word_address == address) { 366 if (word_address == address) {
367 *data = INVM_DWORD_TO_WORD_DATA(invm_dword); 367 *data = INVM_DWORD_TO_WORD_DATA(invm_dword);
368 hw_dbg("Read INVM Word 0x%02x = %x", 368 hw_dbg("Read INVM Word 0x%02x = %x\n",
369 address, *data); 369 address, *data);
370 status = E1000_SUCCESS; 370 status = E1000_SUCCESS;
371 break; 371 break;
diff --git a/drivers/net/ethernet/intel/igb/e1000_mac.c b/drivers/net/ethernet/intel/igb/e1000_mac.c
index 5910a932ea7c..1e0c404db81a 100644
--- a/drivers/net/ethernet/intel/igb/e1000_mac.c
+++ b/drivers/net/ethernet/intel/igb/e1000_mac.c
@@ -929,11 +929,10 @@ s32 igb_config_fc_after_link_up(struct e1000_hw *hw)
929 */ 929 */
930 if (hw->fc.requested_mode == e1000_fc_full) { 930 if (hw->fc.requested_mode == e1000_fc_full) {
931 hw->fc.current_mode = e1000_fc_full; 931 hw->fc.current_mode = e1000_fc_full;
932 hw_dbg("Flow Control = FULL.\r\n"); 932 hw_dbg("Flow Control = FULL.\n");
933 } else { 933 } else {
934 hw->fc.current_mode = e1000_fc_rx_pause; 934 hw->fc.current_mode = e1000_fc_rx_pause;
935 hw_dbg("Flow Control = " 935 hw_dbg("Flow Control = RX PAUSE frames only.\n");
936 "RX PAUSE frames only.\r\n");
937 } 936 }
938 } 937 }
939 /* For receiving PAUSE frames ONLY. 938 /* For receiving PAUSE frames ONLY.
@@ -948,7 +947,7 @@ s32 igb_config_fc_after_link_up(struct e1000_hw *hw)
948 (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && 947 (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
949 (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { 948 (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
950 hw->fc.current_mode = e1000_fc_tx_pause; 949 hw->fc.current_mode = e1000_fc_tx_pause;
951 hw_dbg("Flow Control = TX PAUSE frames only.\r\n"); 950 hw_dbg("Flow Control = TX PAUSE frames only.\n");
952 } 951 }
953 /* For transmitting PAUSE frames ONLY. 952 /* For transmitting PAUSE frames ONLY.
954 * 953 *
@@ -962,7 +961,7 @@ s32 igb_config_fc_after_link_up(struct e1000_hw *hw)
962 !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && 961 !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
963 (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { 962 (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
964 hw->fc.current_mode = e1000_fc_rx_pause; 963 hw->fc.current_mode = e1000_fc_rx_pause;
965 hw_dbg("Flow Control = RX PAUSE frames only.\r\n"); 964 hw_dbg("Flow Control = RX PAUSE frames only.\n");
966 } 965 }
967 /* Per the IEEE spec, at this point flow control should be 966 /* Per the IEEE spec, at this point flow control should be
968 * disabled. However, we want to consider that we could 967 * disabled. However, we want to consider that we could
@@ -988,10 +987,10 @@ s32 igb_config_fc_after_link_up(struct e1000_hw *hw)
988 (hw->fc.requested_mode == e1000_fc_tx_pause) || 987 (hw->fc.requested_mode == e1000_fc_tx_pause) ||
989 (hw->fc.strict_ieee)) { 988 (hw->fc.strict_ieee)) {
990 hw->fc.current_mode = e1000_fc_none; 989 hw->fc.current_mode = e1000_fc_none;
991 hw_dbg("Flow Control = NONE.\r\n"); 990 hw_dbg("Flow Control = NONE.\n");
992 } else { 991 } else {
993 hw->fc.current_mode = e1000_fc_rx_pause; 992 hw->fc.current_mode = e1000_fc_rx_pause;
994 hw_dbg("Flow Control = RX PAUSE frames only.\r\n"); 993 hw_dbg("Flow Control = RX PAUSE frames only.\n");
995 } 994 }
996 995
997 /* Now we need to do one last check... If we auto- 996 /* Now we need to do one last check... If we auto-
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index fb98d4602f9d..16430a8440fa 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -5193,8 +5193,10 @@ void igb_update_stats(struct igb_adapter *adapter,
5193 5193
5194 rcu_read_lock(); 5194 rcu_read_lock();
5195 for (i = 0; i < adapter->num_rx_queues; i++) { 5195 for (i = 0; i < adapter->num_rx_queues; i++) {
5196 u32 rqdpc = rd32(E1000_RQDPC(i));
5197 struct igb_ring *ring = adapter->rx_ring[i]; 5196 struct igb_ring *ring = adapter->rx_ring[i];
5197 u32 rqdpc = rd32(E1000_RQDPC(i));
5198 if (hw->mac.type >= e1000_i210)
5199 wr32(E1000_RQDPC(i), 0);
5198 5200
5199 if (rqdpc) { 5201 if (rqdpc) {
5200 ring->rx_stats.drops += rqdpc; 5202 ring->rx_stats.drops += rqdpc;
diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c
index 9209d652e1c9..ab25e49365f7 100644
--- a/drivers/net/ethernet/intel/igb/igb_ptp.c
+++ b/drivers/net/ethernet/intel/igb/igb_ptp.c
@@ -389,7 +389,7 @@ static void igb_ptp_tx_work(struct work_struct *work)
389 adapter->ptp_tx_skb = NULL; 389 adapter->ptp_tx_skb = NULL;
390 clear_bit_unlock(__IGB_PTP_TX_IN_PROGRESS, &adapter->state); 390 clear_bit_unlock(__IGB_PTP_TX_IN_PROGRESS, &adapter->state);
391 adapter->tx_hwtstamp_timeouts++; 391 adapter->tx_hwtstamp_timeouts++;
392 dev_warn(&adapter->pdev->dev, "clearing Tx timestamp hang"); 392 dev_warn(&adapter->pdev->dev, "clearing Tx timestamp hang\n");
393 return; 393 return;
394 } 394 }
395 395
@@ -451,7 +451,7 @@ void igb_ptp_rx_hang(struct igb_adapter *adapter)
451 rd32(E1000_RXSTMPH); 451 rd32(E1000_RXSTMPH);
452 adapter->last_rx_ptp_check = jiffies; 452 adapter->last_rx_ptp_check = jiffies;
453 adapter->rx_hwtstamp_cleared++; 453 adapter->rx_hwtstamp_cleared++;
454 dev_warn(&adapter->pdev->dev, "clearing Rx timestamp hang"); 454 dev_warn(&adapter->pdev->dev, "clearing Rx timestamp hang\n");
455 } 455 }
456} 456}
457 457
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index 1a12c1dd7a27..c6c4ca7d68e6 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -256,7 +256,6 @@ struct ixgbe_ring {
256 struct ixgbe_tx_buffer *tx_buffer_info; 256 struct ixgbe_tx_buffer *tx_buffer_info;
257 struct ixgbe_rx_buffer *rx_buffer_info; 257 struct ixgbe_rx_buffer *rx_buffer_info;
258 }; 258 };
259 unsigned long last_rx_timestamp;
260 unsigned long state; 259 unsigned long state;
261 u8 __iomem *tail; 260 u8 __iomem *tail;
262 dma_addr_t dma; /* phys. address of descriptor ring */ 261 dma_addr_t dma; /* phys. address of descriptor ring */
@@ -770,6 +769,7 @@ struct ixgbe_adapter {
770 unsigned long ptp_tx_start; 769 unsigned long ptp_tx_start;
771 unsigned long last_overflow_check; 770 unsigned long last_overflow_check;
772 unsigned long last_rx_ptp_check; 771 unsigned long last_rx_ptp_check;
772 unsigned long last_rx_timestamp;
773 spinlock_t tmreg_lock; 773 spinlock_t tmreg_lock;
774 struct cyclecounter cc; 774 struct cyclecounter cc;
775 struct timecounter tc; 775 struct timecounter tc;
@@ -944,24 +944,7 @@ void ixgbe_ptp_init(struct ixgbe_adapter *adapter);
944void ixgbe_ptp_stop(struct ixgbe_adapter *adapter); 944void ixgbe_ptp_stop(struct ixgbe_adapter *adapter);
945void ixgbe_ptp_overflow_check(struct ixgbe_adapter *adapter); 945void ixgbe_ptp_overflow_check(struct ixgbe_adapter *adapter);
946void ixgbe_ptp_rx_hang(struct ixgbe_adapter *adapter); 946void ixgbe_ptp_rx_hang(struct ixgbe_adapter *adapter);
947void __ixgbe_ptp_rx_hwtstamp(struct ixgbe_q_vector *q_vector, 947void ixgbe_ptp_rx_hwtstamp(struct ixgbe_adapter *adapter, struct sk_buff *skb);
948 struct sk_buff *skb);
949static inline void ixgbe_ptp_rx_hwtstamp(struct ixgbe_ring *rx_ring,
950 union ixgbe_adv_rx_desc *rx_desc,
951 struct sk_buff *skb)
952{
953 if (unlikely(!ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_STAT_TS)))
954 return;
955
956 __ixgbe_ptp_rx_hwtstamp(rx_ring->q_vector, skb);
957
958 /*
959 * Update the last_rx_timestamp timer in order to enable watchdog check
960 * for error case of latched timestamp on a dropped packet.
961 */
962 rx_ring->last_rx_timestamp = jiffies;
963}
964
965int ixgbe_ptp_set_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr); 948int ixgbe_ptp_set_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr);
966int ixgbe_ptp_get_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr); 949int ixgbe_ptp_get_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr);
967void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter); 950void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
index 24fba39e194e..981b8a7b100d 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
@@ -1195,7 +1195,7 @@ static s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw,
1195 */ 1195 */
1196 hw->eeprom.word_page_size = IXGBE_EEPROM_PAGE_SIZE_MAX - data[0]; 1196 hw->eeprom.word_page_size = IXGBE_EEPROM_PAGE_SIZE_MAX - data[0];
1197 1197
1198 hw_dbg(hw, "Detected EEPROM page size = %d words.", 1198 hw_dbg(hw, "Detected EEPROM page size = %d words.\n",
1199 hw->eeprom.word_page_size); 1199 hw->eeprom.word_page_size);
1200out: 1200out:
1201 return status; 1201 return status;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index c4c526b7f99f..d62e7a25cf97 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -1664,7 +1664,8 @@ static void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring,
1664 1664
1665 ixgbe_rx_checksum(rx_ring, rx_desc, skb); 1665 ixgbe_rx_checksum(rx_ring, rx_desc, skb);
1666 1666
1667 ixgbe_ptp_rx_hwtstamp(rx_ring, rx_desc, skb); 1667 if (unlikely(ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_STAT_TS)))
1668 ixgbe_ptp_rx_hwtstamp(rx_ring->q_vector->adapter, skb);
1668 1669
1669 if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) && 1670 if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
1670 ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_VP)) { 1671 ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_VP)) {
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
index 23f765263f12..a76af8e28a04 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
@@ -536,7 +536,7 @@ s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw)
536 536
537 if (time_out == max_time_out) { 537 if (time_out == max_time_out) {
538 status = IXGBE_ERR_LINK_SETUP; 538 status = IXGBE_ERR_LINK_SETUP;
539 hw_dbg(hw, "ixgbe_setup_phy_link_generic: time out"); 539 hw_dbg(hw, "ixgbe_setup_phy_link_generic: time out\n");
540 } 540 }
541 541
542 return status; 542 return status;
@@ -745,7 +745,7 @@ s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw)
745 745
746 if (time_out == max_time_out) { 746 if (time_out == max_time_out) {
747 status = IXGBE_ERR_LINK_SETUP; 747 status = IXGBE_ERR_LINK_SETUP;
748 hw_dbg(hw, "ixgbe_setup_phy_link_tnx: time out"); 748 hw_dbg(hw, "ixgbe_setup_phy_link_tnx: time out\n");
749 } 749 }
750 750
751 return status; 751 return status;
@@ -1175,7 +1175,7 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
1175 status = 0; 1175 status = 0;
1176 } else { 1176 } else {
1177 if (hw->allow_unsupported_sfp) { 1177 if (hw->allow_unsupported_sfp) {
1178 e_warn(drv, "WARNING: Intel (R) Network Connections are quality tested using Intel (R) Ethernet Optics. Using untested modules is not supported and may cause unstable operation or damage to the module or the adapter. Intel Corporation is not responsible for any harm caused by using untested modules."); 1178 e_warn(drv, "WARNING: Intel (R) Network Connections are quality tested using Intel (R) Ethernet Optics. Using untested modules is not supported and may cause unstable operation or damage to the module or the adapter. Intel Corporation is not responsible for any harm caused by using untested modules.\n");
1179 status = 0; 1179 status = 0;
1180 } else { 1180 } else {
1181 hw_dbg(hw, 1181 hw_dbg(hw,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
index 63515a6f67fa..8902ae683457 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
@@ -435,10 +435,8 @@ void ixgbe_ptp_overflow_check(struct ixgbe_adapter *adapter)
435void ixgbe_ptp_rx_hang(struct ixgbe_adapter *adapter) 435void ixgbe_ptp_rx_hang(struct ixgbe_adapter *adapter)
436{ 436{
437 struct ixgbe_hw *hw = &adapter->hw; 437 struct ixgbe_hw *hw = &adapter->hw;
438 struct ixgbe_ring *rx_ring;
439 u32 tsyncrxctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL); 438 u32 tsyncrxctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL);
440 unsigned long rx_event; 439 unsigned long rx_event;
441 int n;
442 440
443 /* if we don't have a valid timestamp in the registers, just update the 441 /* if we don't have a valid timestamp in the registers, just update the
444 * timeout counter and exit 442 * timeout counter and exit
@@ -450,18 +448,15 @@ void ixgbe_ptp_rx_hang(struct ixgbe_adapter *adapter)
450 448
451 /* determine the most recent watchdog or rx_timestamp event */ 449 /* determine the most recent watchdog or rx_timestamp event */
452 rx_event = adapter->last_rx_ptp_check; 450 rx_event = adapter->last_rx_ptp_check;
453 for (n = 0; n < adapter->num_rx_queues; n++) { 451 if (time_after(adapter->last_rx_timestamp, rx_event))
454 rx_ring = adapter->rx_ring[n]; 452 rx_event = adapter->last_rx_timestamp;
455 if (time_after(rx_ring->last_rx_timestamp, rx_event))
456 rx_event = rx_ring->last_rx_timestamp;
457 }
458 453
459 /* only need to read the high RXSTMP register to clear the lock */ 454 /* only need to read the high RXSTMP register to clear the lock */
460 if (time_is_before_jiffies(rx_event + 5*HZ)) { 455 if (time_is_before_jiffies(rx_event + 5*HZ)) {
461 IXGBE_READ_REG(hw, IXGBE_RXSTMPH); 456 IXGBE_READ_REG(hw, IXGBE_RXSTMPH);
462 adapter->last_rx_ptp_check = jiffies; 457 adapter->last_rx_ptp_check = jiffies;
463 458
464 e_warn(drv, "clearing RX Timestamp hang"); 459 e_warn(drv, "clearing RX Timestamp hang\n");
465 } 460 }
466} 461}
467 462
@@ -517,7 +512,7 @@ static void ixgbe_ptp_tx_hwtstamp_work(struct work_struct *work)
517 dev_kfree_skb_any(adapter->ptp_tx_skb); 512 dev_kfree_skb_any(adapter->ptp_tx_skb);
518 adapter->ptp_tx_skb = NULL; 513 adapter->ptp_tx_skb = NULL;
519 clear_bit_unlock(__IXGBE_PTP_TX_IN_PROGRESS, &adapter->state); 514 clear_bit_unlock(__IXGBE_PTP_TX_IN_PROGRESS, &adapter->state);
520 e_warn(drv, "clearing Tx Timestamp hang"); 515 e_warn(drv, "clearing Tx Timestamp hang\n");
521 return; 516 return;
522 } 517 }
523 518
@@ -530,35 +525,22 @@ static void ixgbe_ptp_tx_hwtstamp_work(struct work_struct *work)
530} 525}
531 526
532/** 527/**
533 * __ixgbe_ptp_rx_hwtstamp - utility function which checks for RX time stamp 528 * ixgbe_ptp_rx_hwtstamp - utility function which checks for RX time stamp
534 * @q_vector: structure containing interrupt and ring information 529 * @adapter: pointer to adapter struct
535 * @skb: particular skb to send timestamp with 530 * @skb: particular skb to send timestamp with
536 * 531 *
537 * if the timestamp is valid, we convert it into the timecounter ns 532 * if the timestamp is valid, we convert it into the timecounter ns
538 * value, then store that result into the shhwtstamps structure which 533 * value, then store that result into the shhwtstamps structure which
539 * is passed up the network stack 534 * is passed up the network stack
540 */ 535 */
541void __ixgbe_ptp_rx_hwtstamp(struct ixgbe_q_vector *q_vector, 536void ixgbe_ptp_rx_hwtstamp(struct ixgbe_adapter *adapter, struct sk_buff *skb)
542 struct sk_buff *skb)
543{ 537{
544 struct ixgbe_adapter *adapter; 538 struct ixgbe_hw *hw = &adapter->hw;
545 struct ixgbe_hw *hw;
546 struct skb_shared_hwtstamps *shhwtstamps; 539 struct skb_shared_hwtstamps *shhwtstamps;
547 u64 regval = 0, ns; 540 u64 regval = 0, ns;
548 u32 tsyncrxctl; 541 u32 tsyncrxctl;
549 unsigned long flags; 542 unsigned long flags;
550 543
551 /* we cannot process timestamps on a ring without a q_vector */
552 if (!q_vector || !q_vector->adapter)
553 return;
554
555 adapter = q_vector->adapter;
556 hw = &adapter->hw;
557
558 /*
559 * Read the tsyncrxctl register afterwards in order to prevent taking an
560 * I/O hit on every packet.
561 */
562 tsyncrxctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL); 544 tsyncrxctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL);
563 if (!(tsyncrxctl & IXGBE_TSYNCRXCTL_VALID)) 545 if (!(tsyncrxctl & IXGBE_TSYNCRXCTL_VALID))
564 return; 546 return;
@@ -566,13 +548,17 @@ void __ixgbe_ptp_rx_hwtstamp(struct ixgbe_q_vector *q_vector,
566 regval |= (u64)IXGBE_READ_REG(hw, IXGBE_RXSTMPL); 548 regval |= (u64)IXGBE_READ_REG(hw, IXGBE_RXSTMPL);
567 regval |= (u64)IXGBE_READ_REG(hw, IXGBE_RXSTMPH) << 32; 549 regval |= (u64)IXGBE_READ_REG(hw, IXGBE_RXSTMPH) << 32;
568 550
569
570 spin_lock_irqsave(&adapter->tmreg_lock, flags); 551 spin_lock_irqsave(&adapter->tmreg_lock, flags);
571 ns = timecounter_cyc2time(&adapter->tc, regval); 552 ns = timecounter_cyc2time(&adapter->tc, regval);
572 spin_unlock_irqrestore(&adapter->tmreg_lock, flags); 553 spin_unlock_irqrestore(&adapter->tmreg_lock, flags);
573 554
574 shhwtstamps = skb_hwtstamps(skb); 555 shhwtstamps = skb_hwtstamps(skb);
575 shhwtstamps->hwtstamp = ns_to_ktime(ns); 556 shhwtstamps->hwtstamp = ns_to_ktime(ns);
557
558 /* Update the last_rx_timestamp timer in order to enable watchdog check
559 * for error case of latched timestamp on a dropped packet.
560 */
561 adapter->last_rx_timestamp = jiffies;
576} 562}
577 563
578int ixgbe_ptp_get_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr) 564int ixgbe_ptp_get_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr)
diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c
index b0c6050479eb..b78378cea5e3 100644
--- a/drivers/net/ethernet/jme.c
+++ b/drivers/net/ethernet/jme.c
@@ -1988,7 +1988,7 @@ jme_alloc_txdesc(struct jme_adapter *jme,
1988 return idx; 1988 return idx;
1989} 1989}
1990 1990
1991static void 1991static int
1992jme_fill_tx_map(struct pci_dev *pdev, 1992jme_fill_tx_map(struct pci_dev *pdev,
1993 struct txdesc *txdesc, 1993 struct txdesc *txdesc,
1994 struct jme_buffer_info *txbi, 1994 struct jme_buffer_info *txbi,
@@ -2005,6 +2005,9 @@ jme_fill_tx_map(struct pci_dev *pdev,
2005 len, 2005 len,
2006 PCI_DMA_TODEVICE); 2006 PCI_DMA_TODEVICE);
2007 2007
2008 if (unlikely(pci_dma_mapping_error(pdev, dmaaddr)))
2009 return -EINVAL;
2010
2008 pci_dma_sync_single_for_device(pdev, 2011 pci_dma_sync_single_for_device(pdev,
2009 dmaaddr, 2012 dmaaddr,
2010 len, 2013 len,
@@ -2021,9 +2024,30 @@ jme_fill_tx_map(struct pci_dev *pdev,
2021 2024
2022 txbi->mapping = dmaaddr; 2025 txbi->mapping = dmaaddr;
2023 txbi->len = len; 2026 txbi->len = len;
2027 return 0;
2024} 2028}
2025 2029
2026static void 2030static void jme_drop_tx_map(struct jme_adapter *jme, int startidx, int count)
2031{
2032 struct jme_ring *txring = &(jme->txring[0]);
2033 struct jme_buffer_info *txbi = txring->bufinf, *ctxbi;
2034 int mask = jme->tx_ring_mask;
2035 int j;
2036
2037 for (j = 0 ; j < count ; j++) {
2038 ctxbi = txbi + ((startidx + j + 2) & (mask));
2039 pci_unmap_page(jme->pdev,
2040 ctxbi->mapping,
2041 ctxbi->len,
2042 PCI_DMA_TODEVICE);
2043
2044 ctxbi->mapping = 0;
2045 ctxbi->len = 0;
2046 }
2047
2048}
2049
2050static int
2027jme_map_tx_skb(struct jme_adapter *jme, struct sk_buff *skb, int idx) 2051jme_map_tx_skb(struct jme_adapter *jme, struct sk_buff *skb, int idx)
2028{ 2052{
2029 struct jme_ring *txring = &(jme->txring[0]); 2053 struct jme_ring *txring = &(jme->txring[0]);
@@ -2034,25 +2058,37 @@ jme_map_tx_skb(struct jme_adapter *jme, struct sk_buff *skb, int idx)
2034 int mask = jme->tx_ring_mask; 2058 int mask = jme->tx_ring_mask;
2035 const struct skb_frag_struct *frag; 2059 const struct skb_frag_struct *frag;
2036 u32 len; 2060 u32 len;
2061 int ret = 0;
2037 2062
2038 for (i = 0 ; i < nr_frags ; ++i) { 2063 for (i = 0 ; i < nr_frags ; ++i) {
2039 frag = &skb_shinfo(skb)->frags[i]; 2064 frag = &skb_shinfo(skb)->frags[i];
2040 ctxdesc = txdesc + ((idx + i + 2) & (mask)); 2065 ctxdesc = txdesc + ((idx + i + 2) & (mask));
2041 ctxbi = txbi + ((idx + i + 2) & (mask)); 2066 ctxbi = txbi + ((idx + i + 2) & (mask));
2042 2067
2043 jme_fill_tx_map(jme->pdev, ctxdesc, ctxbi, 2068 ret = jme_fill_tx_map(jme->pdev, ctxdesc, ctxbi,
2044 skb_frag_page(frag), 2069 skb_frag_page(frag),
2045 frag->page_offset, skb_frag_size(frag), hidma); 2070 frag->page_offset, skb_frag_size(frag), hidma);
2071 if (ret) {
2072 jme_drop_tx_map(jme, idx, i);
2073 goto out;
2074 }
2075
2046 } 2076 }
2047 2077
2048 len = skb_is_nonlinear(skb) ? skb_headlen(skb) : skb->len; 2078 len = skb_is_nonlinear(skb) ? skb_headlen(skb) : skb->len;
2049 ctxdesc = txdesc + ((idx + 1) & (mask)); 2079 ctxdesc = txdesc + ((idx + 1) & (mask));
2050 ctxbi = txbi + ((idx + 1) & (mask)); 2080 ctxbi = txbi + ((idx + 1) & (mask));
2051 jme_fill_tx_map(jme->pdev, ctxdesc, ctxbi, virt_to_page(skb->data), 2081 ret = jme_fill_tx_map(jme->pdev, ctxdesc, ctxbi, virt_to_page(skb->data),
2052 offset_in_page(skb->data), len, hidma); 2082 offset_in_page(skb->data), len, hidma);
2083 if (ret)
2084 jme_drop_tx_map(jme, idx, i);
2085
2086out:
2087 return ret;
2053 2088
2054} 2089}
2055 2090
2091
2056static int 2092static int
2057jme_tx_tso(struct sk_buff *skb, __le16 *mss, u8 *flags) 2093jme_tx_tso(struct sk_buff *skb, __le16 *mss, u8 *flags)
2058{ 2094{
@@ -2131,6 +2167,7 @@ jme_fill_tx_desc(struct jme_adapter *jme, struct sk_buff *skb, int idx)
2131 struct txdesc *txdesc; 2167 struct txdesc *txdesc;
2132 struct jme_buffer_info *txbi; 2168 struct jme_buffer_info *txbi;
2133 u8 flags; 2169 u8 flags;
2170 int ret = 0;
2134 2171
2135 txdesc = (struct txdesc *)txring->desc + idx; 2172 txdesc = (struct txdesc *)txring->desc + idx;
2136 txbi = txring->bufinf + idx; 2173 txbi = txring->bufinf + idx;
@@ -2155,7 +2192,10 @@ jme_fill_tx_desc(struct jme_adapter *jme, struct sk_buff *skb, int idx)
2155 if (jme_tx_tso(skb, &txdesc->desc1.mss, &flags)) 2192 if (jme_tx_tso(skb, &txdesc->desc1.mss, &flags))
2156 jme_tx_csum(jme, skb, &flags); 2193 jme_tx_csum(jme, skb, &flags);
2157 jme_tx_vlan(skb, &txdesc->desc1.vlan, &flags); 2194 jme_tx_vlan(skb, &txdesc->desc1.vlan, &flags);
2158 jme_map_tx_skb(jme, skb, idx); 2195 ret = jme_map_tx_skb(jme, skb, idx);
2196 if (ret)
2197 return ret;
2198
2159 txdesc->desc1.flags = flags; 2199 txdesc->desc1.flags = flags;
2160 /* 2200 /*
2161 * Set tx buffer info after telling NIC to send 2201 * Set tx buffer info after telling NIC to send
@@ -2228,7 +2268,8 @@ jme_start_xmit(struct sk_buff *skb, struct net_device *netdev)
2228 return NETDEV_TX_BUSY; 2268 return NETDEV_TX_BUSY;
2229 } 2269 }
2230 2270
2231 jme_fill_tx_desc(jme, skb, idx); 2271 if (jme_fill_tx_desc(jme, skb, idx))
2272 return NETDEV_TX_OK;
2232 2273
2233 jwrite32(jme, JME_TXCS, jme->reg_txcs | 2274 jwrite32(jme, JME_TXCS, jme->reg_txcs |
2234 TXCS_SELECT_QUEUE0 | 2275 TXCS_SELECT_QUEUE0 |
diff --git a/drivers/net/ethernet/marvell/mvmdio.c b/drivers/net/ethernet/marvell/mvmdio.c
index b161a525fc5b..9d5ced263a5e 100644
--- a/drivers/net/ethernet/marvell/mvmdio.c
+++ b/drivers/net/ethernet/marvell/mvmdio.c
@@ -232,7 +232,7 @@ static int orion_mdio_probe(struct platform_device *pdev)
232 clk_prepare_enable(dev->clk); 232 clk_prepare_enable(dev->clk);
233 233
234 dev->err_interrupt = platform_get_irq(pdev, 0); 234 dev->err_interrupt = platform_get_irq(pdev, 0);
235 if (dev->err_interrupt != -ENXIO) { 235 if (dev->err_interrupt > 0) {
236 ret = devm_request_irq(&pdev->dev, dev->err_interrupt, 236 ret = devm_request_irq(&pdev->dev, dev->err_interrupt,
237 orion_mdio_err_irq, 237 orion_mdio_err_irq,
238 IRQF_SHARED, pdev->name, dev); 238 IRQF_SHARED, pdev->name, dev);
@@ -241,6 +241,9 @@ static int orion_mdio_probe(struct platform_device *pdev)
241 241
242 writel(MVMDIO_ERR_INT_SMI_DONE, 242 writel(MVMDIO_ERR_INT_SMI_DONE,
243 dev->regs + MVMDIO_ERR_INT_MASK); 243 dev->regs + MVMDIO_ERR_INT_MASK);
244
245 } else if (dev->err_interrupt == -EPROBE_DEFER) {
246 return -EPROBE_DEFER;
244 } 247 }
245 248
246 mutex_init(&dev->lock); 249 mutex_init(&dev->lock);
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
index 78099eab7673..92d3249f63f1 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
@@ -1253,12 +1253,12 @@ static struct mlx4_cmd_info cmd_info[] = {
1253 }, 1253 },
1254 { 1254 {
1255 .opcode = MLX4_CMD_UPDATE_QP, 1255 .opcode = MLX4_CMD_UPDATE_QP,
1256 .has_inbox = false, 1256 .has_inbox = true,
1257 .has_outbox = false, 1257 .has_outbox = false,
1258 .out_is_imm = false, 1258 .out_is_imm = false,
1259 .encode_slave_id = false, 1259 .encode_slave_id = false,
1260 .verify = NULL, 1260 .verify = NULL,
1261 .wrapper = mlx4_CMD_EPERM_wrapper 1261 .wrapper = mlx4_UPDATE_QP_wrapper
1262 }, 1262 },
1263 { 1263 {
1264 .opcode = MLX4_CMD_GET_OP_REQ, 1264 .opcode = MLX4_CMD_GET_OP_REQ,
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index cef267e24f9c..c187d748115f 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -754,10 +754,10 @@ static void mlx4_request_modules(struct mlx4_dev *dev)
754 has_eth_port = true; 754 has_eth_port = true;
755 } 755 }
756 756
757 if (has_ib_port || (dev->caps.flags & MLX4_DEV_CAP_FLAG_IBOE))
758 request_module_nowait(IB_DRV_NAME);
759 if (has_eth_port) 757 if (has_eth_port)
760 request_module_nowait(EN_DRV_NAME); 758 request_module_nowait(EN_DRV_NAME);
759 if (has_ib_port || (dev->caps.flags & MLX4_DEV_CAP_FLAG_IBOE))
760 request_module_nowait(IB_DRV_NAME);
761} 761}
762 762
763/* 763/*
@@ -2044,6 +2044,7 @@ static int mlx4_init_port_info(struct mlx4_dev *dev, int port)
2044 if (!mlx4_is_slave(dev)) { 2044 if (!mlx4_is_slave(dev)) {
2045 mlx4_init_mac_table(dev, &info->mac_table); 2045 mlx4_init_mac_table(dev, &info->mac_table);
2046 mlx4_init_vlan_table(dev, &info->vlan_table); 2046 mlx4_init_vlan_table(dev, &info->vlan_table);
2047 mlx4_init_roce_gid_table(dev, &info->gid_table);
2047 info->base_qpn = mlx4_get_base_qpn(dev, port); 2048 info->base_qpn = mlx4_get_base_qpn(dev, port);
2048 } 2049 }
2049 2050
@@ -2440,7 +2441,8 @@ slave_start:
2440 * No return code for this call, just warn the user in case of PCI 2441 * No return code for this call, just warn the user in case of PCI
2441 * express device capabilities are under-satisfied by the bus. 2442 * express device capabilities are under-satisfied by the bus.
2442 */ 2443 */
2443 mlx4_check_pcie_caps(dev); 2444 if (!mlx4_is_slave(dev))
2445 mlx4_check_pcie_caps(dev);
2444 2446
2445 /* In master functions, the communication channel must be initialized 2447 /* In master functions, the communication channel must be initialized
2446 * after obtaining its address from fw */ 2448 * after obtaining its address from fw */
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index f9c465101963..8e9eb02e09cb 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -695,6 +695,17 @@ struct mlx4_mac_table {
695 int max; 695 int max;
696}; 696};
697 697
698#define MLX4_ROCE_GID_ENTRY_SIZE 16
699
700struct mlx4_roce_gid_entry {
701 u8 raw[MLX4_ROCE_GID_ENTRY_SIZE];
702};
703
704struct mlx4_roce_gid_table {
705 struct mlx4_roce_gid_entry roce_gids[MLX4_ROCE_MAX_GIDS];
706 struct mutex mutex;
707};
708
698#define MLX4_MAX_VLAN_NUM 128 709#define MLX4_MAX_VLAN_NUM 128
699#define MLX4_VLAN_TABLE_SIZE (MLX4_MAX_VLAN_NUM << 2) 710#define MLX4_VLAN_TABLE_SIZE (MLX4_MAX_VLAN_NUM << 2)
700 711
@@ -758,6 +769,7 @@ struct mlx4_port_info {
758 struct device_attribute port_mtu_attr; 769 struct device_attribute port_mtu_attr;
759 struct mlx4_mac_table mac_table; 770 struct mlx4_mac_table mac_table;
760 struct mlx4_vlan_table vlan_table; 771 struct mlx4_vlan_table vlan_table;
772 struct mlx4_roce_gid_table gid_table;
761 int base_qpn; 773 int base_qpn;
762}; 774};
763 775
@@ -788,10 +800,6 @@ enum {
788 MLX4_USE_RR = 1, 800 MLX4_USE_RR = 1,
789}; 801};
790 802
791struct mlx4_roce_gid_entry {
792 u8 raw[16];
793};
794
795struct mlx4_priv { 803struct mlx4_priv {
796 struct mlx4_dev dev; 804 struct mlx4_dev dev;
797 805
@@ -839,7 +847,6 @@ struct mlx4_priv {
839 int fs_hash_mode; 847 int fs_hash_mode;
840 u8 virt2phys_pkey[MLX4_MFUNC_MAX][MLX4_MAX_PORTS][MLX4_MAX_PORT_PKEYS]; 848 u8 virt2phys_pkey[MLX4_MFUNC_MAX][MLX4_MAX_PORTS][MLX4_MAX_PORT_PKEYS];
841 __be64 slave_node_guids[MLX4_MFUNC_MAX]; 849 __be64 slave_node_guids[MLX4_MFUNC_MAX];
842 struct mlx4_roce_gid_entry roce_gids[MLX4_MAX_PORTS][MLX4_ROCE_MAX_GIDS];
843 850
844 atomic_t opreq_count; 851 atomic_t opreq_count;
845 struct work_struct opreq_task; 852 struct work_struct opreq_task;
@@ -1140,6 +1147,8 @@ int mlx4_change_port_types(struct mlx4_dev *dev,
1140 1147
1141void mlx4_init_mac_table(struct mlx4_dev *dev, struct mlx4_mac_table *table); 1148void mlx4_init_mac_table(struct mlx4_dev *dev, struct mlx4_mac_table *table);
1142void mlx4_init_vlan_table(struct mlx4_dev *dev, struct mlx4_vlan_table *table); 1149void mlx4_init_vlan_table(struct mlx4_dev *dev, struct mlx4_vlan_table *table);
1150void mlx4_init_roce_gid_table(struct mlx4_dev *dev,
1151 struct mlx4_roce_gid_table *table);
1143void __mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, u16 vlan); 1152void __mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, u16 vlan);
1144int __mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index); 1153int __mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index);
1145 1154
@@ -1149,6 +1158,7 @@ int mlx4_get_slave_from_resource_id(struct mlx4_dev *dev,
1149 enum mlx4_resource resource_type, 1158 enum mlx4_resource resource_type,
1150 u64 resource_id, int *slave); 1159 u64 resource_id, int *slave);
1151void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave_id); 1160void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave_id);
1161void mlx4_reset_roce_gids(struct mlx4_dev *dev, int slave);
1152int mlx4_init_resource_tracker(struct mlx4_dev *dev); 1162int mlx4_init_resource_tracker(struct mlx4_dev *dev);
1153 1163
1154void mlx4_free_resource_tracker(struct mlx4_dev *dev, 1164void mlx4_free_resource_tracker(struct mlx4_dev *dev,
@@ -1195,6 +1205,12 @@ int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
1195 struct mlx4_cmd_mailbox *outbox, 1205 struct mlx4_cmd_mailbox *outbox,
1196 struct mlx4_cmd_info *cmd); 1206 struct mlx4_cmd_info *cmd);
1197 1207
1208int mlx4_UPDATE_QP_wrapper(struct mlx4_dev *dev, int slave,
1209 struct mlx4_vhcr *vhcr,
1210 struct mlx4_cmd_mailbox *inbox,
1211 struct mlx4_cmd_mailbox *outbox,
1212 struct mlx4_cmd_info *cmd);
1213
1198int mlx4_PROMISC_wrapper(struct mlx4_dev *dev, int slave, 1214int mlx4_PROMISC_wrapper(struct mlx4_dev *dev, int slave,
1199 struct mlx4_vhcr *vhcr, 1215 struct mlx4_vhcr *vhcr,
1200 struct mlx4_cmd_mailbox *inbox, 1216 struct mlx4_cmd_mailbox *inbox,
diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c
index cfcad26ed40f..5ec6f203c6e6 100644
--- a/drivers/net/ethernet/mellanox/mlx4/port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/port.c
@@ -75,6 +75,16 @@ void mlx4_init_vlan_table(struct mlx4_dev *dev, struct mlx4_vlan_table *table)
75 table->total = 0; 75 table->total = 0;
76} 76}
77 77
78void mlx4_init_roce_gid_table(struct mlx4_dev *dev,
79 struct mlx4_roce_gid_table *table)
80{
81 int i;
82
83 mutex_init(&table->mutex);
84 for (i = 0; i < MLX4_ROCE_MAX_GIDS; i++)
85 memset(table->roce_gids[i].raw, 0, MLX4_ROCE_GID_ENTRY_SIZE);
86}
87
78static int validate_index(struct mlx4_dev *dev, 88static int validate_index(struct mlx4_dev *dev,
79 struct mlx4_mac_table *table, int index) 89 struct mlx4_mac_table *table, int index)
80{ 90{
@@ -584,6 +594,84 @@ int mlx4_get_base_gid_ix(struct mlx4_dev *dev, int slave, int port)
584} 594}
585EXPORT_SYMBOL_GPL(mlx4_get_base_gid_ix); 595EXPORT_SYMBOL_GPL(mlx4_get_base_gid_ix);
586 596
597static int mlx4_reset_roce_port_gids(struct mlx4_dev *dev, int slave,
598 int port, struct mlx4_cmd_mailbox *mailbox)
599{
600 struct mlx4_roce_gid_entry *gid_entry_mbox;
601 struct mlx4_priv *priv = mlx4_priv(dev);
602 int num_gids, base, offset;
603 int i, err;
604
605 num_gids = mlx4_get_slave_num_gids(dev, slave, port);
606 base = mlx4_get_base_gid_ix(dev, slave, port);
607
608 memset(mailbox->buf, 0, MLX4_MAILBOX_SIZE);
609
610 mutex_lock(&(priv->port[port].gid_table.mutex));
611 /* Zero-out gids belonging to that slave in the port GID table */
612 for (i = 0, offset = base; i < num_gids; offset++, i++)
613 memcpy(priv->port[port].gid_table.roce_gids[offset].raw,
614 zgid_entry.raw, MLX4_ROCE_GID_ENTRY_SIZE);
615
616 /* Now, copy roce port gids table to mailbox for passing to FW */
617 gid_entry_mbox = (struct mlx4_roce_gid_entry *)mailbox->buf;
618 for (i = 0; i < MLX4_ROCE_MAX_GIDS; gid_entry_mbox++, i++)
619 memcpy(gid_entry_mbox->raw,
620 priv->port[port].gid_table.roce_gids[i].raw,
621 MLX4_ROCE_GID_ENTRY_SIZE);
622
623 err = mlx4_cmd(dev, mailbox->dma,
624 ((u32)port) | (MLX4_SET_PORT_GID_TABLE << 8), 1,
625 MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
626 MLX4_CMD_NATIVE);
627 mutex_unlock(&(priv->port[port].gid_table.mutex));
628 return err;
629}
630
631
632void mlx4_reset_roce_gids(struct mlx4_dev *dev, int slave)
633{
634 struct mlx4_active_ports actv_ports;
635 struct mlx4_cmd_mailbox *mailbox;
636 int num_eth_ports, err;
637 int i;
638
639 if (slave < 0 || slave > dev->num_vfs)
640 return;
641
642 actv_ports = mlx4_get_active_ports(dev, slave);
643
644 for (i = 0, num_eth_ports = 0; i < dev->caps.num_ports; i++) {
645 if (test_bit(i, actv_ports.ports)) {
646 if (dev->caps.port_type[i + 1] != MLX4_PORT_TYPE_ETH)
647 continue;
648 num_eth_ports++;
649 }
650 }
651
652 if (!num_eth_ports)
653 return;
654
655 /* have ETH ports. Alloc mailbox for SET_PORT command */
656 mailbox = mlx4_alloc_cmd_mailbox(dev);
657 if (IS_ERR(mailbox))
658 return;
659
660 for (i = 0; i < dev->caps.num_ports; i++) {
661 if (test_bit(i, actv_ports.ports)) {
662 if (dev->caps.port_type[i + 1] != MLX4_PORT_TYPE_ETH)
663 continue;
664 err = mlx4_reset_roce_port_gids(dev, slave, i + 1, mailbox);
665 if (err)
666 mlx4_warn(dev, "Could not reset ETH port GID table for slave %d, port %d (%d)\n",
667 slave, i + 1, err);
668 }
669 }
670
671 mlx4_free_cmd_mailbox(dev, mailbox);
672 return;
673}
674
587static int mlx4_common_set_port(struct mlx4_dev *dev, int slave, u32 in_mod, 675static int mlx4_common_set_port(struct mlx4_dev *dev, int slave, u32 in_mod,
588 u8 op_mod, struct mlx4_cmd_mailbox *inbox) 676 u8 op_mod, struct mlx4_cmd_mailbox *inbox)
589{ 677{
@@ -692,10 +780,12 @@ static int mlx4_common_set_port(struct mlx4_dev *dev, int slave, u32 in_mod,
692 /* 2. Check that do not have duplicates in OTHER 780 /* 2. Check that do not have duplicates in OTHER
693 * entries in the port GID table 781 * entries in the port GID table
694 */ 782 */
783
784 mutex_lock(&(priv->port[port].gid_table.mutex));
695 for (i = 0; i < MLX4_ROCE_MAX_GIDS; i++) { 785 for (i = 0; i < MLX4_ROCE_MAX_GIDS; i++) {
696 if (i >= base && i < base + num_gids) 786 if (i >= base && i < base + num_gids)
697 continue; /* don't compare to slave's current gids */ 787 continue; /* don't compare to slave's current gids */
698 gid_entry_tbl = &priv->roce_gids[port - 1][i]; 788 gid_entry_tbl = &priv->port[port].gid_table.roce_gids[i];
699 if (!memcmp(gid_entry_tbl->raw, zgid_entry.raw, sizeof(zgid_entry))) 789 if (!memcmp(gid_entry_tbl->raw, zgid_entry.raw, sizeof(zgid_entry)))
700 continue; 790 continue;
701 gid_entry_mbox = (struct mlx4_roce_gid_entry *)(inbox->buf); 791 gid_entry_mbox = (struct mlx4_roce_gid_entry *)(inbox->buf);
@@ -709,6 +799,7 @@ static int mlx4_common_set_port(struct mlx4_dev *dev, int slave, u32 in_mod,
709 mlx4_warn(dev, "requested gid entry for slave:%d " 799 mlx4_warn(dev, "requested gid entry for slave:%d "
710 "is a duplicate of gid at index %d\n", 800 "is a duplicate of gid at index %d\n",
711 slave, i); 801 slave, i);
802 mutex_unlock(&(priv->port[port].gid_table.mutex));
712 return -EINVAL; 803 return -EINVAL;
713 } 804 }
714 } 805 }
@@ -717,16 +808,24 @@ static int mlx4_common_set_port(struct mlx4_dev *dev, int slave, u32 in_mod,
717 /* insert slave GIDs with memcpy, starting at slave's base index */ 808 /* insert slave GIDs with memcpy, starting at slave's base index */
718 gid_entry_mbox = (struct mlx4_roce_gid_entry *)(inbox->buf); 809 gid_entry_mbox = (struct mlx4_roce_gid_entry *)(inbox->buf);
719 for (i = 0, offset = base; i < num_gids; gid_entry_mbox++, offset++, i++) 810 for (i = 0, offset = base; i < num_gids; gid_entry_mbox++, offset++, i++)
720 memcpy(priv->roce_gids[port - 1][offset].raw, gid_entry_mbox->raw, 16); 811 memcpy(priv->port[port].gid_table.roce_gids[offset].raw,
812 gid_entry_mbox->raw, MLX4_ROCE_GID_ENTRY_SIZE);
721 813
722 /* Now, copy roce port gids table to current mailbox for passing to FW */ 814 /* Now, copy roce port gids table to current mailbox for passing to FW */
723 gid_entry_mbox = (struct mlx4_roce_gid_entry *)(inbox->buf); 815 gid_entry_mbox = (struct mlx4_roce_gid_entry *)(inbox->buf);
724 for (i = 0; i < MLX4_ROCE_MAX_GIDS; gid_entry_mbox++, i++) 816 for (i = 0; i < MLX4_ROCE_MAX_GIDS; gid_entry_mbox++, i++)
725 memcpy(gid_entry_mbox->raw, priv->roce_gids[port - 1][i].raw, 16); 817 memcpy(gid_entry_mbox->raw,
726 818 priv->port[port].gid_table.roce_gids[i].raw,
727 break; 819 MLX4_ROCE_GID_ENTRY_SIZE);
820
821 err = mlx4_cmd(dev, inbox->dma, in_mod & 0xffff, op_mod,
822 MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
823 MLX4_CMD_NATIVE);
824 mutex_unlock(&(priv->port[port].gid_table.mutex));
825 return err;
728 } 826 }
729 return mlx4_cmd(dev, inbox->dma, in_mod, op_mod, 827
828 return mlx4_cmd(dev, inbox->dma, in_mod & 0xffff, op_mod,
730 MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B, 829 MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
731 MLX4_CMD_NATIVE); 830 MLX4_CMD_NATIVE);
732 } 831 }
@@ -1099,13 +1198,17 @@ int mlx4_get_slave_from_roce_gid(struct mlx4_dev *dev, int port, u8 *gid,
1099 num_vfs = bitmap_weight(slaves_pport.slaves, dev->num_vfs + 1) - 1; 1198 num_vfs = bitmap_weight(slaves_pport.slaves, dev->num_vfs + 1) - 1;
1100 1199
1101 for (i = 0; i < MLX4_ROCE_MAX_GIDS; i++) { 1200 for (i = 0; i < MLX4_ROCE_MAX_GIDS; i++) {
1102 if (!memcmp(priv->roce_gids[port - 1][i].raw, gid, 16)) { 1201 if (!memcmp(priv->port[port].gid_table.roce_gids[i].raw, gid,
1202 MLX4_ROCE_GID_ENTRY_SIZE)) {
1103 found_ix = i; 1203 found_ix = i;
1104 break; 1204 break;
1105 } 1205 }
1106 } 1206 }
1107 1207
1108 if (found_ix >= 0) { 1208 if (found_ix >= 0) {
1209 /* Calculate a slave_gid which is the slave number in the gid
1210 * table and not a globally unique slave number.
1211 */
1109 if (found_ix < MLX4_ROCE_PF_GIDS) 1212 if (found_ix < MLX4_ROCE_PF_GIDS)
1110 slave_gid = 0; 1213 slave_gid = 0;
1111 else if (found_ix < MLX4_ROCE_PF_GIDS + (vf_gids % num_vfs) * 1214 else if (found_ix < MLX4_ROCE_PF_GIDS + (vf_gids % num_vfs) *
@@ -1118,41 +1221,43 @@ int mlx4_get_slave_from_roce_gid(struct mlx4_dev *dev, int port, u8 *gid,
1118 ((vf_gids % num_vfs) * ((vf_gids / num_vfs + 1)))) / 1221 ((vf_gids % num_vfs) * ((vf_gids / num_vfs + 1)))) /
1119 (vf_gids / num_vfs)) + vf_gids % num_vfs + 1; 1222 (vf_gids / num_vfs)) + vf_gids % num_vfs + 1;
1120 1223
1224 /* Calculate the globally unique slave id */
1121 if (slave_gid) { 1225 if (slave_gid) {
1122 struct mlx4_active_ports exclusive_ports; 1226 struct mlx4_active_ports exclusive_ports;
1123 struct mlx4_active_ports actv_ports; 1227 struct mlx4_active_ports actv_ports;
1124 struct mlx4_slaves_pport slaves_pport_actv; 1228 struct mlx4_slaves_pport slaves_pport_actv;
1125 unsigned max_port_p_one; 1229 unsigned max_port_p_one;
1126 int num_slaves_before = 1; 1230 int num_vfs_before = 0;
1231 int candidate_slave_gid;
1127 1232
1233 /* Calculate how many VFs are on the previous port, if exists */
1128 for (i = 1; i < port; i++) { 1234 for (i = 1; i < port; i++) {
1129 bitmap_zero(exclusive_ports.ports, dev->caps.num_ports); 1235 bitmap_zero(exclusive_ports.ports, dev->caps.num_ports);
1130 set_bit(i, exclusive_ports.ports); 1236 set_bit(i - 1, exclusive_ports.ports);
1131 slaves_pport_actv = 1237 slaves_pport_actv =
1132 mlx4_phys_to_slaves_pport_actv( 1238 mlx4_phys_to_slaves_pport_actv(
1133 dev, &exclusive_ports); 1239 dev, &exclusive_ports);
1134 num_slaves_before += bitmap_weight( 1240 num_vfs_before += bitmap_weight(
1135 slaves_pport_actv.slaves, 1241 slaves_pport_actv.slaves,
1136 dev->num_vfs + 1); 1242 dev->num_vfs + 1);
1137 } 1243 }
1138 1244
1139 if (slave_gid < num_slaves_before) { 1245 /* candidate_slave_gid isn't necessarily the correct slave, but
1140 bitmap_zero(exclusive_ports.ports, dev->caps.num_ports); 1246 * it has the same number of ports and is assigned to the same
1141 set_bit(port - 1, exclusive_ports.ports); 1247 * ports as the real slave we're looking for. On dual port VF,
1142 slaves_pport_actv = 1248 * slave_gid = [single port VFs on port <port>] +
1143 mlx4_phys_to_slaves_pport_actv( 1249 * [offset of the current slave from the first dual port VF] +
1144 dev, &exclusive_ports); 1250 * 1 (for the PF).
1145 slave_gid += bitmap_weight( 1251 */
1146 slaves_pport_actv.slaves, 1252 candidate_slave_gid = slave_gid + num_vfs_before;
1147 dev->num_vfs + 1) - 1253
1148 num_slaves_before; 1254 actv_ports = mlx4_get_active_ports(dev, candidate_slave_gid);
1149 }
1150 actv_ports = mlx4_get_active_ports(dev, slave_gid);
1151 max_port_p_one = find_first_bit( 1255 max_port_p_one = find_first_bit(
1152 actv_ports.ports, dev->caps.num_ports) + 1256 actv_ports.ports, dev->caps.num_ports) +
1153 bitmap_weight(actv_ports.ports, 1257 bitmap_weight(actv_ports.ports,
1154 dev->caps.num_ports) + 1; 1258 dev->caps.num_ports) + 1;
1155 1259
1260 /* Calculate the real slave number */
1156 for (i = 1; i < max_port_p_one; i++) { 1261 for (i = 1; i < max_port_p_one; i++) {
1157 if (i == port) 1262 if (i == port)
1158 continue; 1263 continue;
@@ -1182,7 +1287,8 @@ int mlx4_get_roce_gid_from_slave(struct mlx4_dev *dev, int port, int slave_id,
1182 if (!mlx4_is_master(dev)) 1287 if (!mlx4_is_master(dev))
1183 return -EINVAL; 1288 return -EINVAL;
1184 1289
1185 memcpy(gid, priv->roce_gids[port - 1][slave_id].raw, 16); 1290 memcpy(gid, priv->port[port].gid_table.roce_gids[slave_id].raw,
1291 MLX4_ROCE_GID_ENTRY_SIZE);
1186 return 0; 1292 return 0;
1187} 1293}
1188EXPORT_SYMBOL(mlx4_get_roce_gid_from_slave); 1294EXPORT_SYMBOL(mlx4_get_roce_gid_from_slave);
diff --git a/drivers/net/ethernet/mellanox/mlx4/qp.c b/drivers/net/ethernet/mellanox/mlx4/qp.c
index 61d64ebffd56..fbd32af89c7c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/qp.c
+++ b/drivers/net/ethernet/mellanox/mlx4/qp.c
@@ -389,6 +389,41 @@ err_icm:
389 389
390EXPORT_SYMBOL_GPL(mlx4_qp_alloc); 390EXPORT_SYMBOL_GPL(mlx4_qp_alloc);
391 391
392#define MLX4_UPDATE_QP_SUPPORTED_ATTRS MLX4_UPDATE_QP_SMAC
393int mlx4_update_qp(struct mlx4_dev *dev, struct mlx4_qp *qp,
394 enum mlx4_update_qp_attr attr,
395 struct mlx4_update_qp_params *params)
396{
397 struct mlx4_cmd_mailbox *mailbox;
398 struct mlx4_update_qp_context *cmd;
399 u64 pri_addr_path_mask = 0;
400 int err = 0;
401
402 mailbox = mlx4_alloc_cmd_mailbox(dev);
403 if (IS_ERR(mailbox))
404 return PTR_ERR(mailbox);
405
406 cmd = (struct mlx4_update_qp_context *)mailbox->buf;
407
408 if (!attr || (attr & ~MLX4_UPDATE_QP_SUPPORTED_ATTRS))
409 return -EINVAL;
410
411 if (attr & MLX4_UPDATE_QP_SMAC) {
412 pri_addr_path_mask |= 1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX;
413 cmd->qp_context.pri_path.grh_mylmc = params->smac_index;
414 }
415
416 cmd->primary_addr_path_mask = cpu_to_be64(pri_addr_path_mask);
417
418 err = mlx4_cmd(dev, mailbox->dma, qp->qpn & 0xffffff, 0,
419 MLX4_CMD_UPDATE_QP, MLX4_CMD_TIME_CLASS_A,
420 MLX4_CMD_NATIVE);
421
422 mlx4_free_cmd_mailbox(dev, mailbox);
423 return err;
424}
425EXPORT_SYMBOL_GPL(mlx4_update_qp);
426
392void mlx4_qp_remove(struct mlx4_dev *dev, struct mlx4_qp *qp) 427void mlx4_qp_remove(struct mlx4_dev *dev, struct mlx4_qp *qp)
393{ 428{
394 struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table; 429 struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table;
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index 3b5f53ef29b2..f16e539749c4 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -586,6 +586,7 @@ void mlx4_free_resource_tracker(struct mlx4_dev *dev,
586 } 586 }
587 /* free master's vlans */ 587 /* free master's vlans */
588 i = dev->caps.function; 588 i = dev->caps.function;
589 mlx4_reset_roce_gids(dev, i);
589 mutex_lock(&priv->mfunc.master.res_tracker.slave_list[i].mutex); 590 mutex_lock(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
590 rem_slave_vlans(dev, i); 591 rem_slave_vlans(dev, i);
591 mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[i].mutex); 592 mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
@@ -3733,6 +3734,25 @@ static int qp_detach(struct mlx4_dev *dev, struct mlx4_qp *qp,
3733 } 3734 }
3734} 3735}
3735 3736
3737static int mlx4_adjust_port(struct mlx4_dev *dev, int slave,
3738 u8 *gid, enum mlx4_protocol prot)
3739{
3740 int real_port;
3741
3742 if (prot != MLX4_PROT_ETH)
3743 return 0;
3744
3745 if (dev->caps.steering_mode == MLX4_STEERING_MODE_B0 ||
3746 dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) {
3747 real_port = mlx4_slave_convert_port(dev, slave, gid[5]);
3748 if (real_port < 0)
3749 return -EINVAL;
3750 gid[5] = real_port;
3751 }
3752
3753 return 0;
3754}
3755
3736int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave, 3756int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
3737 struct mlx4_vhcr *vhcr, 3757 struct mlx4_vhcr *vhcr,
3738 struct mlx4_cmd_mailbox *inbox, 3758 struct mlx4_cmd_mailbox *inbox,
@@ -3768,6 +3788,10 @@ int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
3768 if (err) 3788 if (err)
3769 goto ex_detach; 3789 goto ex_detach;
3770 } else { 3790 } else {
3791 err = mlx4_adjust_port(dev, slave, gid, prot);
3792 if (err)
3793 goto ex_put;
3794
3771 err = rem_mcg_res(dev, slave, rqp, gid, prot, type, &reg_id); 3795 err = rem_mcg_res(dev, slave, rqp, gid, prot, type, &reg_id);
3772 if (err) 3796 if (err)
3773 goto ex_put; 3797 goto ex_put;
@@ -3872,6 +3896,60 @@ static int add_eth_header(struct mlx4_dev *dev, int slave,
3872 3896
3873} 3897}
3874 3898
3899#define MLX4_UPD_QP_PATH_MASK_SUPPORTED (1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX)
3900int mlx4_UPDATE_QP_wrapper(struct mlx4_dev *dev, int slave,
3901 struct mlx4_vhcr *vhcr,
3902 struct mlx4_cmd_mailbox *inbox,
3903 struct mlx4_cmd_mailbox *outbox,
3904 struct mlx4_cmd_info *cmd_info)
3905{
3906 int err;
3907 u32 qpn = vhcr->in_modifier & 0xffffff;
3908 struct res_qp *rqp;
3909 u64 mac;
3910 unsigned port;
3911 u64 pri_addr_path_mask;
3912 struct mlx4_update_qp_context *cmd;
3913 int smac_index;
3914
3915 cmd = (struct mlx4_update_qp_context *)inbox->buf;
3916
3917 pri_addr_path_mask = be64_to_cpu(cmd->primary_addr_path_mask);
3918 if (cmd->qp_mask || cmd->secondary_addr_path_mask ||
3919 (pri_addr_path_mask & ~MLX4_UPD_QP_PATH_MASK_SUPPORTED))
3920 return -EPERM;
3921
3922 /* Just change the smac for the QP */
3923 err = get_res(dev, slave, qpn, RES_QP, &rqp);
3924 if (err) {
3925 mlx4_err(dev, "Updating qpn 0x%x for slave %d rejected\n", qpn, slave);
3926 return err;
3927 }
3928
3929 port = (rqp->sched_queue >> 6 & 1) + 1;
3930 smac_index = cmd->qp_context.pri_path.grh_mylmc;
3931 err = mac_find_smac_ix_in_slave(dev, slave, port,
3932 smac_index, &mac);
3933 if (err) {
3934 mlx4_err(dev, "Failed to update qpn 0x%x, MAC is invalid. smac_ix: %d\n",
3935 qpn, smac_index);
3936 goto err_mac;
3937 }
3938
3939 err = mlx4_cmd(dev, inbox->dma,
3940 vhcr->in_modifier, 0,
3941 MLX4_CMD_UPDATE_QP, MLX4_CMD_TIME_CLASS_A,
3942 MLX4_CMD_NATIVE);
3943 if (err) {
3944 mlx4_err(dev, "Failed to update qpn on qpn 0x%x, command failed\n", qpn);
3945 goto err_mac;
3946 }
3947
3948err_mac:
3949 put_res(dev, slave, qpn, RES_QP);
3950 return err;
3951}
3952
3875int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave, 3953int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
3876 struct mlx4_vhcr *vhcr, 3954 struct mlx4_vhcr *vhcr,
3877 struct mlx4_cmd_mailbox *inbox, 3955 struct mlx4_cmd_mailbox *inbox,
@@ -4604,7 +4682,7 @@ static void rem_slave_xrcdns(struct mlx4_dev *dev, int slave)
4604void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave) 4682void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave)
4605{ 4683{
4606 struct mlx4_priv *priv = mlx4_priv(dev); 4684 struct mlx4_priv *priv = mlx4_priv(dev);
4607 4685 mlx4_reset_roce_gids(dev, slave);
4608 mutex_lock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex); 4686 mutex_lock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
4609 rem_slave_vlans(dev, slave); 4687 rem_slave_vlans(dev, slave);
4610 rem_slave_macs(dev, slave); 4688 rem_slave_macs(dev, slave);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
index 7b52a88923ef..f785d01c7d12 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
@@ -1719,22 +1719,6 @@ static inline u32 qlcnic_tx_avail(struct qlcnic_host_tx_ring *tx_ring)
1719 tx_ring->producer; 1719 tx_ring->producer;
1720} 1720}
1721 1721
1722static inline int qlcnic_set_real_num_queues(struct qlcnic_adapter *adapter,
1723 struct net_device *netdev)
1724{
1725 int err;
1726
1727 netdev->num_tx_queues = adapter->drv_tx_rings;
1728 netdev->real_num_tx_queues = adapter->drv_tx_rings;
1729
1730 err = netif_set_real_num_tx_queues(netdev, adapter->drv_tx_rings);
1731 if (err)
1732 netdev_err(netdev, "failed to set %d Tx queues\n",
1733 adapter->drv_tx_rings);
1734
1735 return err;
1736}
1737
1738struct qlcnic_nic_template { 1722struct qlcnic_nic_template {
1739 int (*config_bridged_mode) (struct qlcnic_adapter *, u32); 1723 int (*config_bridged_mode) (struct qlcnic_adapter *, u32);
1740 int (*config_led) (struct qlcnic_adapter *, u32, u32); 1724 int (*config_led) (struct qlcnic_adapter *, u32, u32);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c
index a51fe18f09a8..561cb11ca58c 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c
@@ -1020,6 +1020,7 @@ static int qlcnic_dcb_peer_app_info(struct net_device *netdev,
1020 struct qlcnic_dcb_cee *peer; 1020 struct qlcnic_dcb_cee *peer;
1021 int i; 1021 int i;
1022 1022
1023 memset(info, 0, sizeof(*info));
1023 *app_count = 0; 1024 *app_count = 0;
1024 1025
1025 if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state)) 1026 if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state))
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index dbf75393f758..7e55e88a81bf 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -2206,6 +2206,31 @@ static void qlcnic_82xx_set_mac_filter_count(struct qlcnic_adapter *adapter)
2206 ahw->max_uc_count = count; 2206 ahw->max_uc_count = count;
2207} 2207}
2208 2208
2209static int qlcnic_set_real_num_queues(struct qlcnic_adapter *adapter,
2210 u8 tx_queues, u8 rx_queues)
2211{
2212 struct net_device *netdev = adapter->netdev;
2213 int err = 0;
2214
2215 if (tx_queues) {
2216 err = netif_set_real_num_tx_queues(netdev, tx_queues);
2217 if (err) {
2218 netdev_err(netdev, "failed to set %d Tx queues\n",
2219 tx_queues);
2220 return err;
2221 }
2222 }
2223
2224 if (rx_queues) {
2225 err = netif_set_real_num_rx_queues(netdev, rx_queues);
2226 if (err)
2227 netdev_err(netdev, "failed to set %d Rx queues\n",
2228 rx_queues);
2229 }
2230
2231 return err;
2232}
2233
2209int 2234int
2210qlcnic_setup_netdev(struct qlcnic_adapter *adapter, struct net_device *netdev, 2235qlcnic_setup_netdev(struct qlcnic_adapter *adapter, struct net_device *netdev,
2211 int pci_using_dac) 2236 int pci_using_dac)
@@ -2269,7 +2294,8 @@ qlcnic_setup_netdev(struct qlcnic_adapter *adapter, struct net_device *netdev,
2269 netdev->priv_flags |= IFF_UNICAST_FLT; 2294 netdev->priv_flags |= IFF_UNICAST_FLT;
2270 netdev->irq = adapter->msix_entries[0].vector; 2295 netdev->irq = adapter->msix_entries[0].vector;
2271 2296
2272 err = qlcnic_set_real_num_queues(adapter, netdev); 2297 err = qlcnic_set_real_num_queues(adapter, adapter->drv_tx_rings,
2298 adapter->drv_sds_rings);
2273 if (err) 2299 if (err)
2274 return err; 2300 return err;
2275 2301
@@ -2374,6 +2400,14 @@ void qlcnic_set_drv_version(struct qlcnic_adapter *adapter)
2374 qlcnic_fw_cmd_set_drv_version(adapter, fw_cmd); 2400 qlcnic_fw_cmd_set_drv_version(adapter, fw_cmd);
2375} 2401}
2376 2402
2403/* Reset firmware API lock */
2404static void qlcnic_reset_api_lock(struct qlcnic_adapter *adapter)
2405{
2406 qlcnic_api_lock(adapter);
2407 qlcnic_api_unlock(adapter);
2408}
2409
2410
2377static int 2411static int
2378qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 2412qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2379{ 2413{
@@ -2476,6 +2510,7 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2476 if (qlcnic_82xx_check(adapter)) { 2510 if (qlcnic_82xx_check(adapter)) {
2477 qlcnic_check_vf(adapter, ent); 2511 qlcnic_check_vf(adapter, ent);
2478 adapter->portnum = adapter->ahw->pci_func; 2512 adapter->portnum = adapter->ahw->pci_func;
2513 qlcnic_reset_api_lock(adapter);
2479 err = qlcnic_start_firmware(adapter); 2514 err = qlcnic_start_firmware(adapter);
2480 if (err) { 2515 if (err) {
2481 dev_err(&pdev->dev, "Loading fw failed.Please Reboot\n" 2516 dev_err(&pdev->dev, "Loading fw failed.Please Reboot\n"
@@ -2934,9 +2969,13 @@ static void qlcnic_dump_tx_rings(struct qlcnic_adapter *adapter)
2934 tx_ring->tx_stats.xmit_called, 2969 tx_ring->tx_stats.xmit_called,
2935 tx_ring->tx_stats.xmit_on, 2970 tx_ring->tx_stats.xmit_on,
2936 tx_ring->tx_stats.xmit_off); 2971 tx_ring->tx_stats.xmit_off);
2972
2973 if (tx_ring->crb_intr_mask)
2974 netdev_info(netdev, "crb_intr_mask=%d\n",
2975 readl(tx_ring->crb_intr_mask));
2976
2937 netdev_info(netdev, 2977 netdev_info(netdev,
2938 "crb_intr_mask=%d, hw_producer=%d, sw_producer=%d sw_consumer=%d, hw_consumer=%d\n", 2978 "hw_producer=%d, sw_producer=%d sw_consumer=%d, hw_consumer=%d\n",
2939 readl(tx_ring->crb_intr_mask),
2940 readl(tx_ring->crb_cmd_producer), 2979 readl(tx_ring->crb_cmd_producer),
2941 tx_ring->producer, tx_ring->sw_consumer, 2980 tx_ring->producer, tx_ring->sw_consumer,
2942 le32_to_cpu(*(tx_ring->hw_consumer))); 2981 le32_to_cpu(*(tx_ring->hw_consumer)));
@@ -3969,12 +4008,21 @@ int qlcnic_validate_rings(struct qlcnic_adapter *adapter, __u32 ring_cnt,
3969int qlcnic_setup_rings(struct qlcnic_adapter *adapter) 4008int qlcnic_setup_rings(struct qlcnic_adapter *adapter)
3970{ 4009{
3971 struct net_device *netdev = adapter->netdev; 4010 struct net_device *netdev = adapter->netdev;
4011 u8 tx_rings, rx_rings;
3972 int err; 4012 int err;
3973 4013
3974 if (test_bit(__QLCNIC_RESETTING, &adapter->state)) 4014 if (test_bit(__QLCNIC_RESETTING, &adapter->state))
3975 return -EBUSY; 4015 return -EBUSY;
3976 4016
4017 tx_rings = adapter->drv_tss_rings;
4018 rx_rings = adapter->drv_rss_rings;
4019
3977 netif_device_detach(netdev); 4020 netif_device_detach(netdev);
4021
4022 err = qlcnic_set_real_num_queues(adapter, tx_rings, rx_rings);
4023 if (err)
4024 goto done;
4025
3978 if (netif_running(netdev)) 4026 if (netif_running(netdev))
3979 __qlcnic_down(adapter, netdev); 4027 __qlcnic_down(adapter, netdev);
3980 4028
@@ -3994,7 +4042,17 @@ int qlcnic_setup_rings(struct qlcnic_adapter *adapter)
3994 return err; 4042 return err;
3995 } 4043 }
3996 4044
3997 netif_set_real_num_tx_queues(netdev, adapter->drv_tx_rings); 4045 /* Check if we need to update real_num_{tx|rx}_queues because
4046 * qlcnic_setup_intr() may change Tx/Rx rings size
4047 */
4048 if ((tx_rings != adapter->drv_tx_rings) ||
4049 (rx_rings != adapter->drv_sds_rings)) {
4050 err = qlcnic_set_real_num_queues(adapter,
4051 adapter->drv_tx_rings,
4052 adapter->drv_sds_rings);
4053 if (err)
4054 goto done;
4055 }
3998 4056
3999 if (qlcnic_83xx_check(adapter)) { 4057 if (qlcnic_83xx_check(adapter)) {
4000 qlcnic_83xx_initialize_nic(adapter, 1); 4058 qlcnic_83xx_initialize_nic(adapter, 1);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
index 0638c1810d54..6afe9c1f5ab9 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
@@ -1370,7 +1370,7 @@ static int qlcnic_sriov_issue_cmd(struct qlcnic_adapter *adapter,
1370 1370
1371 rsp = qlcnic_sriov_alloc_bc_trans(&trans); 1371 rsp = qlcnic_sriov_alloc_bc_trans(&trans);
1372 if (rsp) 1372 if (rsp)
1373 return rsp; 1373 goto free_cmd;
1374 1374
1375 rsp = qlcnic_sriov_prepare_bc_hdr(trans, cmd, seq, QLC_BC_COMMAND); 1375 rsp = qlcnic_sriov_prepare_bc_hdr(trans, cmd, seq, QLC_BC_COMMAND);
1376 if (rsp) 1376 if (rsp)
@@ -1425,6 +1425,13 @@ err_out:
1425 1425
1426cleanup_transaction: 1426cleanup_transaction:
1427 qlcnic_sriov_cleanup_transaction(trans); 1427 qlcnic_sriov_cleanup_transaction(trans);
1428
1429free_cmd:
1430 if (cmd->type == QLC_83XX_MBX_CMD_NO_WAIT) {
1431 qlcnic_free_mbx_args(cmd);
1432 kfree(cmd);
1433 }
1434
1428 return rsp; 1435 return rsp;
1429} 1436}
1430 1437
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_common.h b/drivers/net/ethernet/samsung/sxgbe/sxgbe_common.h
index 6203c7d8550f..45019649bbbd 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_common.h
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_common.h
@@ -358,6 +358,8 @@ struct sxgbe_core_ops {
358 /* Enable disable checksum offload operations */ 358 /* Enable disable checksum offload operations */
359 void (*enable_rx_csum)(void __iomem *ioaddr); 359 void (*enable_rx_csum)(void __iomem *ioaddr);
360 void (*disable_rx_csum)(void __iomem *ioaddr); 360 void (*disable_rx_csum)(void __iomem *ioaddr);
361 void (*enable_rxqueue)(void __iomem *ioaddr, int queue_num);
362 void (*disable_rxqueue)(void __iomem *ioaddr, int queue_num);
361}; 363};
362 364
363const struct sxgbe_core_ops *sxgbe_get_core_ops(void); 365const struct sxgbe_core_ops *sxgbe_get_core_ops(void);
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_core.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_core.c
index c4da7a2b002a..58c35692560e 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_core.c
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_core.c
@@ -165,6 +165,26 @@ static void sxgbe_core_set_speed(void __iomem *ioaddr, unsigned char speed)
165 writel(tx_cfg, ioaddr + SXGBE_CORE_TX_CONFIG_REG); 165 writel(tx_cfg, ioaddr + SXGBE_CORE_TX_CONFIG_REG);
166} 166}
167 167
168static void sxgbe_core_enable_rxqueue(void __iomem *ioaddr, int queue_num)
169{
170 u32 reg_val;
171
172 reg_val = readl(ioaddr + SXGBE_CORE_RX_CTL0_REG);
173 reg_val &= ~(SXGBE_CORE_RXQ_ENABLE_MASK << queue_num);
174 reg_val |= SXGBE_CORE_RXQ_ENABLE;
175 writel(reg_val, ioaddr + SXGBE_CORE_RX_CTL0_REG);
176}
177
178static void sxgbe_core_disable_rxqueue(void __iomem *ioaddr, int queue_num)
179{
180 u32 reg_val;
181
182 reg_val = readl(ioaddr + SXGBE_CORE_RX_CTL0_REG);
183 reg_val &= ~(SXGBE_CORE_RXQ_ENABLE_MASK << queue_num);
184 reg_val |= SXGBE_CORE_RXQ_DISABLE;
185 writel(reg_val, ioaddr + SXGBE_CORE_RX_CTL0_REG);
186}
187
168static void sxgbe_set_eee_mode(void __iomem *ioaddr) 188static void sxgbe_set_eee_mode(void __iomem *ioaddr)
169{ 189{
170 u32 ctrl; 190 u32 ctrl;
@@ -254,6 +274,8 @@ static const struct sxgbe_core_ops core_ops = {
254 .set_eee_pls = sxgbe_set_eee_pls, 274 .set_eee_pls = sxgbe_set_eee_pls,
255 .enable_rx_csum = sxgbe_enable_rx_csum, 275 .enable_rx_csum = sxgbe_enable_rx_csum,
256 .disable_rx_csum = sxgbe_disable_rx_csum, 276 .disable_rx_csum = sxgbe_disable_rx_csum,
277 .enable_rxqueue = sxgbe_core_enable_rxqueue,
278 .disable_rxqueue = sxgbe_core_disable_rxqueue,
257}; 279};
258 280
259const struct sxgbe_core_ops *sxgbe_get_core_ops(void) 281const struct sxgbe_core_ops *sxgbe_get_core_ops(void)
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.c
index e896dbbd2e15..2686bb5b6765 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.c
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.c
@@ -45,10 +45,10 @@ static void sxgbe_prepare_tx_desc(struct sxgbe_tx_norm_desc *p, u8 is_fd,
45 p->tdes23.tx_rd_des23.first_desc = is_fd; 45 p->tdes23.tx_rd_des23.first_desc = is_fd;
46 p->tdes23.tx_rd_des23.buf1_size = buf1_len; 46 p->tdes23.tx_rd_des23.buf1_size = buf1_len;
47 47
48 p->tdes23.tx_rd_des23.tx_pkt_len.cksum_pktlen.total_pkt_len = pkt_len; 48 p->tdes23.tx_rd_des23.tx_pkt_len.pkt_len.total_pkt_len = pkt_len;
49 49
50 if (cksum) 50 if (cksum)
51 p->tdes23.tx_rd_des23.tx_pkt_len.cksum_pktlen.cksum_ctl = cic_full; 51 p->tdes23.tx_rd_des23.cksum_ctl = cic_full;
52} 52}
53 53
54/* Set VLAN control information */ 54/* Set VLAN control information */
@@ -233,6 +233,12 @@ static void sxgbe_set_rx_owner(struct sxgbe_rx_norm_desc *p)
233 p->rdes23.rx_rd_des23.own_bit = 1; 233 p->rdes23.rx_rd_des23.own_bit = 1;
234} 234}
235 235
236/* Set Interrupt on completion bit */
237static void sxgbe_set_rx_int_on_com(struct sxgbe_rx_norm_desc *p)
238{
239 p->rdes23.rx_rd_des23.int_on_com = 1;
240}
241
236/* Get the receive frame size */ 242/* Get the receive frame size */
237static int sxgbe_get_rx_frame_len(struct sxgbe_rx_norm_desc *p) 243static int sxgbe_get_rx_frame_len(struct sxgbe_rx_norm_desc *p)
238{ 244{
@@ -498,6 +504,7 @@ static const struct sxgbe_desc_ops desc_ops = {
498 .init_rx_desc = sxgbe_init_rx_desc, 504 .init_rx_desc = sxgbe_init_rx_desc,
499 .get_rx_owner = sxgbe_get_rx_owner, 505 .get_rx_owner = sxgbe_get_rx_owner,
500 .set_rx_owner = sxgbe_set_rx_owner, 506 .set_rx_owner = sxgbe_set_rx_owner,
507 .set_rx_int_on_com = sxgbe_set_rx_int_on_com,
501 .get_rx_frame_len = sxgbe_get_rx_frame_len, 508 .get_rx_frame_len = sxgbe_get_rx_frame_len,
502 .get_rx_fd_status = sxgbe_get_rx_fd_status, 509 .get_rx_fd_status = sxgbe_get_rx_fd_status,
503 .get_rx_ld_status = sxgbe_get_rx_ld_status, 510 .get_rx_ld_status = sxgbe_get_rx_ld_status,
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.h b/drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.h
index 838cb9fb0ea9..18609324db72 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.h
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.h
@@ -39,22 +39,22 @@ struct sxgbe_tx_norm_desc {
39 u32 int_on_com:1; 39 u32 int_on_com:1;
40 /* TDES3 */ 40 /* TDES3 */
41 union { 41 union {
42 u32 tcp_payload_len:18; 42 u16 tcp_payload_len;
43 struct { 43 struct {
44 u32 total_pkt_len:15; 44 u32 total_pkt_len:15;
45 u32 reserved1:1; 45 u32 reserved1:1;
46 u32 cksum_ctl:2; 46 } pkt_len;
47 } cksum_pktlen;
48 } tx_pkt_len; 47 } tx_pkt_len;
49 48
50 u32 tse_bit:1; 49 u16 cksum_ctl:2;
51 u32 tcp_hdr_len:4; 50 u16 tse_bit:1;
52 u32 sa_insert_ctl:3; 51 u16 tcp_hdr_len:4;
53 u32 crc_pad_ctl:2; 52 u16 sa_insert_ctl:3;
54 u32 last_desc:1; 53 u16 crc_pad_ctl:2;
55 u32 first_desc:1; 54 u16 last_desc:1;
56 u32 ctxt_bit:1; 55 u16 first_desc:1;
57 u32 own_bit:1; 56 u16 ctxt_bit:1;
57 u16 own_bit:1;
58 } tx_rd_des23; 58 } tx_rd_des23;
59 59
60 /* tx write back Desc 2,3 */ 60 /* tx write back Desc 2,3 */
@@ -70,25 +70,20 @@ struct sxgbe_tx_norm_desc {
70 70
71struct sxgbe_rx_norm_desc { 71struct sxgbe_rx_norm_desc {
72 union { 72 union {
73 u32 rdes0; /* buf1 address */ 73 u64 rdes01; /* buf1 address */
74 struct { 74 union {
75 u32 out_vlan_tag:16; 75 u32 out_vlan_tag:16;
76 u32 in_vlan_tag:16; 76 u32 in_vlan_tag:16;
77 } wb_rx_des0; 77 u32 rss_hash;
78 } rd_wb_des0; 78 } rx_wb_des01;
79 79 } rdes01;
80 union {
81 u32 rdes1; /* buf2 address or buf1[63:32] */
82 u32 rss_hash; /* Write-back RX */
83 } rd_wb_des1;
84 80
85 union { 81 union {
86 /* RX Read format Desc 2,3 */ 82 /* RX Read format Desc 2,3 */
87 struct{ 83 struct{
88 /* RDES2 */ 84 /* RDES2 */
89 u32 buf2_addr; 85 u64 buf2_addr:62;
90 /* RDES3 */ 86 /* RDES3 */
91 u32 buf2_hi_addr:30;
92 u32 int_on_com:1; 87 u32 int_on_com:1;
93 u32 own_bit:1; 88 u32 own_bit:1;
94 } rx_rd_des23; 89 } rx_rd_des23;
@@ -263,6 +258,9 @@ struct sxgbe_desc_ops {
263 /* Set own bit */ 258 /* Set own bit */
264 void (*set_rx_owner)(struct sxgbe_rx_norm_desc *p); 259 void (*set_rx_owner)(struct sxgbe_rx_norm_desc *p);
265 260
261 /* Set Interrupt on completion bit */
262 void (*set_rx_int_on_com)(struct sxgbe_rx_norm_desc *p);
263
266 /* Get the receive frame size */ 264 /* Get the receive frame size */
267 int (*get_rx_frame_len)(struct sxgbe_rx_norm_desc *p); 265 int (*get_rx_frame_len)(struct sxgbe_rx_norm_desc *p);
268 266
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_dma.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_dma.c
index 4d989ff6c978..bb9b5b8afc5f 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_dma.c
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_dma.c
@@ -23,21 +23,8 @@
23/* DMA core initialization */ 23/* DMA core initialization */
24static int sxgbe_dma_init(void __iomem *ioaddr, int fix_burst, int burst_map) 24static int sxgbe_dma_init(void __iomem *ioaddr, int fix_burst, int burst_map)
25{ 25{
26 int retry_count = 10;
27 u32 reg_val; 26 u32 reg_val;
28 27
29 /* reset the DMA */
30 writel(SXGBE_DMA_SOFT_RESET, ioaddr + SXGBE_DMA_MODE_REG);
31 while (retry_count--) {
32 if (!(readl(ioaddr + SXGBE_DMA_MODE_REG) &
33 SXGBE_DMA_SOFT_RESET))
34 break;
35 mdelay(10);
36 }
37
38 if (retry_count < 0)
39 return -EBUSY;
40
41 reg_val = readl(ioaddr + SXGBE_DMA_SYSBUS_MODE_REG); 28 reg_val = readl(ioaddr + SXGBE_DMA_SYSBUS_MODE_REG);
42 29
43 /* if fix_burst = 0, Set UNDEF = 1 of DMA_Sys_Mode Register. 30 /* if fix_burst = 0, Set UNDEF = 1 of DMA_Sys_Mode Register.
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
index 27e8c824b204..82a9a983869f 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
@@ -1076,6 +1076,9 @@ static int sxgbe_open(struct net_device *dev)
1076 1076
1077 /* Initialize the MAC Core */ 1077 /* Initialize the MAC Core */
1078 priv->hw->mac->core_init(priv->ioaddr); 1078 priv->hw->mac->core_init(priv->ioaddr);
1079 SXGBE_FOR_EACH_QUEUE(SXGBE_RX_QUEUES, queue_num) {
1080 priv->hw->mac->enable_rxqueue(priv->ioaddr, queue_num);
1081 }
1079 1082
1080 /* Request the IRQ lines */ 1083 /* Request the IRQ lines */
1081 ret = devm_request_irq(priv->device, priv->irq, sxgbe_common_interrupt, 1084 ret = devm_request_irq(priv->device, priv->irq, sxgbe_common_interrupt,
@@ -1453,6 +1456,7 @@ static void sxgbe_rx_refill(struct sxgbe_priv_data *priv)
1453 /* Added memory barrier for RX descriptor modification */ 1456 /* Added memory barrier for RX descriptor modification */
1454 wmb(); 1457 wmb();
1455 priv->hw->desc->set_rx_owner(p); 1458 priv->hw->desc->set_rx_owner(p);
1459 priv->hw->desc->set_rx_int_on_com(p);
1456 /* Added memory barrier for RX descriptor modification */ 1460 /* Added memory barrier for RX descriptor modification */
1457 wmb(); 1461 wmb();
1458 } 1462 }
@@ -2070,6 +2074,24 @@ static int sxgbe_hw_init(struct sxgbe_priv_data * const priv)
2070 return 0; 2074 return 0;
2071} 2075}
2072 2076
2077static int sxgbe_sw_reset(void __iomem *addr)
2078{
2079 int retry_count = 10;
2080
2081 writel(SXGBE_DMA_SOFT_RESET, addr + SXGBE_DMA_MODE_REG);
2082 while (retry_count--) {
2083 if (!(readl(addr + SXGBE_DMA_MODE_REG) &
2084 SXGBE_DMA_SOFT_RESET))
2085 break;
2086 mdelay(10);
2087 }
2088
2089 if (retry_count < 0)
2090 return -EBUSY;
2091
2092 return 0;
2093}
2094
2073/** 2095/**
2074 * sxgbe_drv_probe 2096 * sxgbe_drv_probe
2075 * @device: device pointer 2097 * @device: device pointer
@@ -2102,6 +2124,10 @@ struct sxgbe_priv_data *sxgbe_drv_probe(struct device *device,
2102 priv->plat = plat_dat; 2124 priv->plat = plat_dat;
2103 priv->ioaddr = addr; 2125 priv->ioaddr = addr;
2104 2126
2127 ret = sxgbe_sw_reset(priv->ioaddr);
2128 if (ret)
2129 goto error_free_netdev;
2130
2105 /* Verify driver arguments */ 2131 /* Verify driver arguments */
2106 sxgbe_verify_args(); 2132 sxgbe_verify_args();
2107 2133
@@ -2218,9 +2244,14 @@ error_free_netdev:
2218int sxgbe_drv_remove(struct net_device *ndev) 2244int sxgbe_drv_remove(struct net_device *ndev)
2219{ 2245{
2220 struct sxgbe_priv_data *priv = netdev_priv(ndev); 2246 struct sxgbe_priv_data *priv = netdev_priv(ndev);
2247 u8 queue_num;
2221 2248
2222 netdev_info(ndev, "%s: removing driver\n", __func__); 2249 netdev_info(ndev, "%s: removing driver\n", __func__);
2223 2250
2251 SXGBE_FOR_EACH_QUEUE(SXGBE_RX_QUEUES, queue_num) {
2252 priv->hw->mac->disable_rxqueue(priv->ioaddr, queue_num);
2253 }
2254
2224 priv->hw->dma->stop_rx(priv->ioaddr, SXGBE_RX_QUEUES); 2255 priv->hw->dma->stop_rx(priv->ioaddr, SXGBE_RX_QUEUES);
2225 priv->hw->dma->stop_tx(priv->ioaddr, SXGBE_TX_QUEUES); 2256 priv->hw->dma->stop_tx(priv->ioaddr, SXGBE_TX_QUEUES);
2226 2257
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_mdio.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_mdio.c
index 01af2cbb479d..43ccb4a6de15 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_mdio.c
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_mdio.c
@@ -27,7 +27,7 @@
27#define SXGBE_SMA_PREAD_CMD 0x02 /* post read increament address */ 27#define SXGBE_SMA_PREAD_CMD 0x02 /* post read increament address */
28#define SXGBE_SMA_READ_CMD 0x03 /* read command */ 28#define SXGBE_SMA_READ_CMD 0x03 /* read command */
29#define SXGBE_SMA_SKIP_ADDRFRM 0x00040000 /* skip the address frame */ 29#define SXGBE_SMA_SKIP_ADDRFRM 0x00040000 /* skip the address frame */
30#define SXGBE_MII_BUSY 0x00800000 /* mii busy */ 30#define SXGBE_MII_BUSY 0x00400000 /* mii busy */
31 31
32static int sxgbe_mdio_busy_wait(void __iomem *ioaddr, unsigned int mii_data) 32static int sxgbe_mdio_busy_wait(void __iomem *ioaddr, unsigned int mii_data)
33{ 33{
@@ -147,6 +147,7 @@ int sxgbe_mdio_register(struct net_device *ndev)
147 struct sxgbe_mdio_bus_data *mdio_data = priv->plat->mdio_bus_data; 147 struct sxgbe_mdio_bus_data *mdio_data = priv->plat->mdio_bus_data;
148 int err, phy_addr; 148 int err, phy_addr;
149 int *irqlist; 149 int *irqlist;
150 bool phy_found = false;
150 bool act; 151 bool act;
151 152
152 /* allocate the new mdio bus */ 153 /* allocate the new mdio bus */
@@ -162,7 +163,7 @@ int sxgbe_mdio_register(struct net_device *ndev)
162 irqlist = priv->mii_irq; 163 irqlist = priv->mii_irq;
163 164
164 /* assign mii bus fields */ 165 /* assign mii bus fields */
165 mdio_bus->name = "samsxgbe"; 166 mdio_bus->name = "sxgbe";
166 mdio_bus->read = &sxgbe_mdio_read; 167 mdio_bus->read = &sxgbe_mdio_read;
167 mdio_bus->write = &sxgbe_mdio_write; 168 mdio_bus->write = &sxgbe_mdio_write;
168 snprintf(mdio_bus->id, MII_BUS_ID_SIZE, "%s-%x", 169 snprintf(mdio_bus->id, MII_BUS_ID_SIZE, "%s-%x",
@@ -216,13 +217,22 @@ int sxgbe_mdio_register(struct net_device *ndev)
216 netdev_info(ndev, "PHY ID %08x at %d IRQ %s (%s)%s\n", 217 netdev_info(ndev, "PHY ID %08x at %d IRQ %s (%s)%s\n",
217 phy->phy_id, phy_addr, irq_str, 218 phy->phy_id, phy_addr, irq_str,
218 dev_name(&phy->dev), act ? " active" : ""); 219 dev_name(&phy->dev), act ? " active" : "");
220 phy_found = true;
219 } 221 }
220 } 222 }
221 223
224 if (!phy_found) {
225 netdev_err(ndev, "PHY not found\n");
226 goto phyfound_err;
227 }
228
222 priv->mii = mdio_bus; 229 priv->mii = mdio_bus;
223 230
224 return 0; 231 return 0;
225 232
233phyfound_err:
234 err = -ENODEV;
235 mdiobus_unregister(mdio_bus);
226mdiobus_err: 236mdiobus_err:
227 mdiobus_free(mdio_bus); 237 mdiobus_free(mdio_bus);
228 return err; 238 return err;
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_reg.h b/drivers/net/ethernet/samsung/sxgbe/sxgbe_reg.h
index 5a89acb4c505..56f8bf5a3f1b 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_reg.h
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_reg.h
@@ -52,6 +52,10 @@
52#define SXGBE_CORE_RX_CTL2_REG 0x00A8 52#define SXGBE_CORE_RX_CTL2_REG 0x00A8
53#define SXGBE_CORE_RX_CTL3_REG 0x00AC 53#define SXGBE_CORE_RX_CTL3_REG 0x00AC
54 54
55#define SXGBE_CORE_RXQ_ENABLE_MASK 0x0003
56#define SXGBE_CORE_RXQ_ENABLE 0x0002
57#define SXGBE_CORE_RXQ_DISABLE 0x0000
58
55/* Interrupt Registers */ 59/* Interrupt Registers */
56#define SXGBE_CORE_INT_STATUS_REG 0x00B0 60#define SXGBE_CORE_INT_STATUS_REG 0x00B0
57#define SXGBE_CORE_INT_ENABLE_REG 0x00B4 61#define SXGBE_CORE_INT_ENABLE_REG 0x00B4
diff --git a/drivers/net/ethernet/sfc/nic.c b/drivers/net/ethernet/sfc/nic.c
index 32d969e857f7..89b83e59e1dc 100644
--- a/drivers/net/ethernet/sfc/nic.c
+++ b/drivers/net/ethernet/sfc/nic.c
@@ -156,13 +156,15 @@ void efx_nic_fini_interrupt(struct efx_nic *efx)
156 efx->net_dev->rx_cpu_rmap = NULL; 156 efx->net_dev->rx_cpu_rmap = NULL;
157#endif 157#endif
158 158
159 /* Disable MSI/MSI-X interrupts */ 159 if (EFX_INT_MODE_USE_MSI(efx)) {
160 efx_for_each_channel(channel, efx) 160 /* Disable MSI/MSI-X interrupts */
161 free_irq(channel->irq, &efx->msi_context[channel->channel]); 161 efx_for_each_channel(channel, efx)
162 162 free_irq(channel->irq,
163 /* Disable legacy interrupt */ 163 &efx->msi_context[channel->channel]);
164 if (efx->legacy_irq) 164 } else {
165 /* Disable legacy interrupt */
165 free_irq(efx->legacy_irq, efx); 166 free_irq(efx->legacy_irq, efx);
167 }
166} 168}
167 169
168/* Register dump */ 170/* Register dump */
diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c
index d1b4dca53a9d..bcaa41af1e62 100644
--- a/drivers/net/ethernet/smsc/smc91x.c
+++ b/drivers/net/ethernet/smsc/smc91x.c
@@ -147,18 +147,19 @@ MODULE_ALIAS("platform:smc91x");
147 */ 147 */
148#define MII_DELAY 1 148#define MII_DELAY 1
149 149
150#if SMC_DEBUG > 0 150#define DBG(n, dev, fmt, ...) \
151#define DBG(n, dev, args...) \ 151 do { \
152 do { \ 152 if (SMC_DEBUG >= (n)) \
153 if (SMC_DEBUG >= (n)) \ 153 netdev_dbg(dev, fmt, ##__VA_ARGS__); \
154 netdev_dbg(dev, args); \
155 } while (0) 154 } while (0)
156 155
157#define PRINTK(dev, args...) netdev_info(dev, args) 156#define PRINTK(dev, fmt, ...) \
158#else 157 do { \
159#define DBG(n, dev, args...) do { } while (0) 158 if (SMC_DEBUG > 0) \
160#define PRINTK(dev, args...) netdev_dbg(dev, args) 159 netdev_info(dev, fmt, ##__VA_ARGS__); \
161#endif 160 else \
161 netdev_dbg(dev, fmt, ##__VA_ARGS__); \
162 } while (0)
162 163
163#if SMC_DEBUG > 3 164#if SMC_DEBUG > 3
164static void PRINT_PKT(u_char *buf, int length) 165static void PRINT_PKT(u_char *buf, int length)
@@ -191,7 +192,7 @@ static void PRINT_PKT(u_char *buf, int length)
191 pr_cont("\n"); 192 pr_cont("\n");
192} 193}
193#else 194#else
194#define PRINT_PKT(x...) do { } while (0) 195static inline void PRINT_PKT(u_char *buf, int length) { }
195#endif 196#endif
196 197
197 198
@@ -1781,7 +1782,7 @@ static int smc_findirq(struct smc_local *lp)
1781 int timeout = 20; 1782 int timeout = 20;
1782 unsigned long cookie; 1783 unsigned long cookie;
1783 1784
1784 DBG(2, dev, "%s: %s\n", CARDNAME, __func__); 1785 DBG(2, lp->dev, "%s: %s\n", CARDNAME, __func__);
1785 1786
1786 cookie = probe_irq_on(); 1787 cookie = probe_irq_on();
1787 1788
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index d940034acdd4..0f4841d2e8dc 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -1704,7 +1704,7 @@ static int stmmac_open(struct net_device *dev)
1704 if (ret) { 1704 if (ret) {
1705 pr_err("%s: Cannot attach to PHY (error: %d)\n", 1705 pr_err("%s: Cannot attach to PHY (error: %d)\n",
1706 __func__, ret); 1706 __func__, ret);
1707 goto phy_error; 1707 return ret;
1708 } 1708 }
1709 } 1709 }
1710 1710
@@ -1779,8 +1779,6 @@ init_error:
1779dma_desc_error: 1779dma_desc_error:
1780 if (priv->phydev) 1780 if (priv->phydev)
1781 phy_disconnect(priv->phydev); 1781 phy_disconnect(priv->phydev);
1782phy_error:
1783 clk_disable_unprepare(priv->stmmac_clk);
1784 1782
1785 return ret; 1783 return ret;
1786} 1784}
diff --git a/drivers/net/ethernet/sun/cassini.c b/drivers/net/ethernet/sun/cassini.c
index df8d383acf48..b9ac20f42651 100644
--- a/drivers/net/ethernet/sun/cassini.c
+++ b/drivers/net/ethernet/sun/cassini.c
@@ -246,7 +246,7 @@ static inline void cas_lock_tx(struct cas *cp)
246 int i; 246 int i;
247 247
248 for (i = 0; i < N_TX_RINGS; i++) 248 for (i = 0; i < N_TX_RINGS; i++)
249 spin_lock(&cp->tx_lock[i]); 249 spin_lock_nested(&cp->tx_lock[i], i);
250} 250}
251 251
252static inline void cas_lock_all(struct cas *cp) 252static inline void cas_lock_all(struct cas *cp)
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 36aa109416c4..c331b7ebc812 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -1871,18 +1871,13 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
1871 mdio_node = of_find_node_by_phandle(be32_to_cpup(parp)); 1871 mdio_node = of_find_node_by_phandle(be32_to_cpup(parp));
1872 phyid = be32_to_cpup(parp+1); 1872 phyid = be32_to_cpup(parp+1);
1873 mdio = of_find_device_by_node(mdio_node); 1873 mdio = of_find_device_by_node(mdio_node);
1874 1874 of_node_put(mdio_node);
1875 if (strncmp(mdio->name, "gpio", 4) == 0) { 1875 if (!mdio) {
1876 /* GPIO bitbang MDIO driver attached */ 1876 pr_err("Missing mdio platform device\n");
1877 struct mii_bus *bus = dev_get_drvdata(&mdio->dev); 1877 return -EINVAL;
1878
1879 snprintf(slave_data->phy_id, sizeof(slave_data->phy_id),
1880 PHY_ID_FMT, bus->id, phyid);
1881 } else {
1882 /* davinci MDIO driver attached */
1883 snprintf(slave_data->phy_id, sizeof(slave_data->phy_id),
1884 PHY_ID_FMT, mdio->name, phyid);
1885 } 1878 }
1879 snprintf(slave_data->phy_id, sizeof(slave_data->phy_id),
1880 PHY_ID_FMT, mdio->name, phyid);
1886 1881
1887 mac_addr = of_get_mac_address(slave_node); 1882 mac_addr = of_get_mac_address(slave_node);
1888 if (mac_addr) 1883 if (mac_addr)