aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/stmicro
diff options
context:
space:
mode:
authorGiuseppe CAVALLARO <peppe.cavallaro@st.com>2013-03-26 00:43:06 -0400
committerDavid S. Miller <davem@davemloft.net>2013-03-26 12:53:36 -0400
commitc24602ef86649376e9d71ea808cd877e414d340b (patch)
tree3bf58e08f64c7a01b191eab5927886ccce77045c /drivers/net/ethernet/stmicro
parent4a7d666a7202744af32d4da31fb52857b7d86850 (diff)
stmmac: support extend descriptors
This patch is to support the extend descriptors available in the chips newer than the 3.50. In case of the extend descriptors cannot be supported, at runtime, the driver will continue to work using the old style. In detail, this support extends the main descriptor structure adding new descriptors: 4, 5, 6, 7. The desc4 gives us extra information about the received ethernet payload when it is carrying PTP packets or TCP/UDP/ICMP over IP packets. The descriptors 6 and 7 are used for saving HW L/H timestamps (PTP). V2: this new version removes the Koption added in the first implementation because all the checks now to verify if the extended descriptors are actually supported happen at probe time. Signed-off-by: Giuseppe Cavallaro <peppe.cavallaro@st.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ethernet/stmicro')
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/chain_mode.c29
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h38
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/descs.h51
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000.h1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c8
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/enh_desc.c97
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/norm_desc.c45
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h7
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c23
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c443
11 files changed, 528 insertions, 218 deletions
diff --git a/drivers/net/ethernet/stmicro/stmmac/chain_mode.c b/drivers/net/ethernet/stmicro/stmmac/chain_mode.c
index 08ff51e9c791..688c3f4f1781 100644
--- a/drivers/net/ethernet/stmicro/stmmac/chain_mode.c
+++ b/drivers/net/ethernet/stmicro/stmmac/chain_mode.c
@@ -89,27 +89,38 @@ static unsigned int stmmac_is_jumbo_frm(int len, int enh_desc)
89 return ret; 89 return ret;
90} 90}
91 91
92static void stmmac_init_dma_chain(struct dma_desc *des, dma_addr_t phy_addr, 92static void stmmac_init_dma_chain(void *des, dma_addr_t phy_addr,
93 unsigned int size) 93 unsigned int size, unsigned int extend_desc)
94{ 94{
95 /* 95 /*
96 * In chained mode the des3 points to the next element in the ring. 96 * In chained mode the des3 points to the next element in the ring.
97 * The latest element has to point to the head. 97 * The latest element has to point to the head.
98 */ 98 */
99 int i; 99 int i;
100 struct dma_desc *p = des;
101 dma_addr_t dma_phy = phy_addr; 100 dma_addr_t dma_phy = phy_addr;
102 101
103 for (i = 0; i < (size - 1); i++) { 102 if (extend_desc) {
104 dma_phy += sizeof(struct dma_desc); 103 struct dma_extended_desc *p = (struct dma_extended_desc *) des;
105 p->des3 = (unsigned int)dma_phy; 104 for (i = 0; i < (size - 1); i++) {
106 p++; 105 dma_phy += sizeof(struct dma_extended_desc);
106 p->basic.des3 = (unsigned int)dma_phy;
107 p++;
108 }
109 p->basic.des3 = (unsigned int)phy_addr;
110
111 } else {
112 struct dma_desc *p = (struct dma_desc *) des;
113 for (i = 0; i < (size - 1); i++) {
114 dma_phy += sizeof(struct dma_desc);
115 p->des3 = (unsigned int)dma_phy;
116 p++;
117 }
118 p->des3 = (unsigned int)phy_addr;
107 } 119 }
108 p->des3 = (unsigned int)phy_addr;
109} 120}
110 121
111const struct stmmac_chain_mode_ops chain_mode_ops = { 122const struct stmmac_chain_mode_ops chain_mode_ops = {
123 .init = stmmac_init_dma_chain,
112 .is_jumbo_frm = stmmac_is_jumbo_frm, 124 .is_jumbo_frm = stmmac_is_jumbo_frm,
113 .jumbo_frm = stmmac_jumbo_frm, 125 .jumbo_frm = stmmac_jumbo_frm,
114 .init_dma_chain = stmmac_init_dma_chain,
115}; 126};
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index a29553211dee..8a04b7f23389 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -117,6 +117,29 @@ struct stmmac_extra_stats {
117 unsigned long irq_rx_path_in_lpi_mode_n; 117 unsigned long irq_rx_path_in_lpi_mode_n;
118 unsigned long irq_rx_path_exit_lpi_mode_n; 118 unsigned long irq_rx_path_exit_lpi_mode_n;
119 unsigned long phy_eee_wakeup_error_n; 119 unsigned long phy_eee_wakeup_error_n;
120 /* Extended RDES status */
121 unsigned long ip_hdr_err;
122 unsigned long ip_payload_err;
123 unsigned long ip_csum_bypassed;
124 unsigned long ipv4_pkt_rcvd;
125 unsigned long ipv6_pkt_rcvd;
126 unsigned long rx_msg_type_ext_no_ptp;
127 unsigned long rx_msg_type_sync;
128 unsigned long rx_msg_type_follow_up;
129 unsigned long rx_msg_type_delay_req;
130 unsigned long rx_msg_type_delay_resp;
131 unsigned long rx_msg_type_pdelay_req;
132 unsigned long rx_msg_type_pdelay_resp;
133 unsigned long rx_msg_type_pdelay_follow_up;
134 unsigned long ptp_frame_type;
135 unsigned long ptp_ver;
136 unsigned long timestamp_dropped;
137 unsigned long av_pkt_rcvd;
138 unsigned long av_tagged_pkt_rcvd;
139 unsigned long vlan_tag_priority_val;
140 unsigned long l3_filter_match;
141 unsigned long l4_filter_match;
142 unsigned long l3_l4_filter_no_match;
120}; 143};
121 144
122/* CSR Frequency Access Defines*/ 145/* CSR Frequency Access Defines*/
@@ -260,11 +283,10 @@ struct dma_features {
260 283
261struct stmmac_desc_ops { 284struct stmmac_desc_ops {
262 /* DMA RX descriptor ring initialization */ 285 /* DMA RX descriptor ring initialization */
263 void (*init_rx_desc) (struct dma_desc *p, unsigned int ring_size, 286 void (*init_rx_desc) (struct dma_desc *p, int disable_rx_ic, int mode,
264 int disable_rx_ic, int mode); 287 int end);
265 /* DMA TX descriptor ring initialization */ 288 /* DMA TX descriptor ring initialization */
266 void (*init_tx_desc) (struct dma_desc *p, unsigned int ring_size, 289 void (*init_tx_desc) (struct dma_desc *p, int mode, int end);
267 int mode);
268 290
269 /* Invoked by the xmit function to prepare the tx descriptor */ 291 /* Invoked by the xmit function to prepare the tx descriptor */
270 void (*prepare_tx_desc) (struct dma_desc *p, int is_fs, int len, 292 void (*prepare_tx_desc) (struct dma_desc *p, int is_fs, int len,
@@ -294,12 +316,14 @@ struct stmmac_desc_ops {
294 /* Return the reception status looking at the RDES1 */ 316 /* Return the reception status looking at the RDES1 */
295 int (*rx_status) (void *data, struct stmmac_extra_stats *x, 317 int (*rx_status) (void *data, struct stmmac_extra_stats *x,
296 struct dma_desc *p); 318 struct dma_desc *p);
319 void (*rx_extended_status) (void *data, struct stmmac_extra_stats *x,
320 struct dma_extended_desc *p);
297}; 321};
298 322
299struct stmmac_dma_ops { 323struct stmmac_dma_ops {
300 /* DMA core initialization */ 324 /* DMA core initialization */
301 int (*init) (void __iomem *ioaddr, int pbl, int fb, int mb, 325 int (*init) (void __iomem *ioaddr, int pbl, int fb, int mb,
302 int burst_len, u32 dma_tx, u32 dma_rx); 326 int burst_len, u32 dma_tx, u32 dma_rx, int atds);
303 /* Dump DMA registers */ 327 /* Dump DMA registers */
304 void (*dump_regs) (void __iomem *ioaddr); 328 void (*dump_regs) (void __iomem *ioaddr);
305 /* Set tx/rx threshold in the csr6 register 329 /* Set tx/rx threshold in the csr6 register
@@ -371,10 +395,10 @@ struct stmmac_ring_mode_ops {
371}; 395};
372 396
373struct stmmac_chain_mode_ops { 397struct stmmac_chain_mode_ops {
398 void (*init) (void *des, dma_addr_t phy_addr, unsigned int size,
399 unsigned int extend_desc);
374 unsigned int (*is_jumbo_frm) (int len, int ehn_desc); 400 unsigned int (*is_jumbo_frm) (int len, int ehn_desc);
375 unsigned int (*jumbo_frm) (void *priv, struct sk_buff *skb, int csum); 401 unsigned int (*jumbo_frm) (void *priv, struct sk_buff *skb, int csum);
376 void (*init_dma_chain) (struct dma_desc *des, dma_addr_t phy_addr,
377 unsigned int size);
378}; 402};
379 403
380struct mac_device_info { 404struct mac_device_info {
diff --git a/drivers/net/ethernet/stmicro/stmmac/descs.h b/drivers/net/ethernet/stmicro/stmmac/descs.h
index 223adf95fd03..2eca0c033038 100644
--- a/drivers/net/ethernet/stmicro/stmmac/descs.h
+++ b/drivers/net/ethernet/stmicro/stmmac/descs.h
@@ -24,6 +24,7 @@
24#ifndef __DESCS_H__ 24#ifndef __DESCS_H__
25#define __DESCS_H__ 25#define __DESCS_H__
26 26
27/* Basic descriptor structure for normal and alternate descriptors */
27struct dma_desc { 28struct dma_desc {
28 /* Receive descriptor */ 29 /* Receive descriptor */
29 union { 30 union {
@@ -60,7 +61,7 @@ struct dma_desc {
60 } rx; 61 } rx;
61 struct { 62 struct {
62 /* RDES0 */ 63 /* RDES0 */
63 u32 payload_csum_error:1; 64 u32 rx_mac_addr:1;
64 u32 crc_error:1; 65 u32 crc_error:1;
65 u32 dribbling:1; 66 u32 dribbling:1;
66 u32 error_gmii:1; 67 u32 error_gmii:1;
@@ -162,13 +163,57 @@ struct dma_desc {
162 unsigned int des3; 163 unsigned int des3;
163}; 164};
164 165
166/* Extended descriptor structure (supported by new SYNP GMAC generations) */
167struct dma_extended_desc {
168 struct dma_desc basic;
169 union {
170 struct {
171 u32 ip_payload_type:3;
172 u32 ip_hdr_err:1;
173 u32 ip_payload_err:1;
174 u32 ip_csum_bypassed:1;
175 u32 ipv4_pkt_rcvd:1;
176 u32 ipv6_pkt_rcvd:1;
177 u32 msg_type:4;
178 u32 ptp_frame_type:1;
179 u32 ptp_ver:1;
180 u32 timestamp_dropped:1;
181 u32 reserved:1;
182 u32 av_pkt_rcvd:1;
183 u32 av_tagged_pkt_rcvd:1;
184 u32 vlan_tag_priority_val:3;
185 u32 reserved3:3;
186 u32 l3_filter_match:1;
187 u32 l4_filter_match:1;
188 u32 l3_l4_filter_no_match:2;
189 u32 reserved4:4;
190 } erx;
191 struct {
192 u32 reserved;
193 } etx;
194 } des4;
195 unsigned int des5; /* Reserved */
196 unsigned int des6; /* Tx/Rx Timestamp Low */
197 unsigned int des7; /* Tx/Rx Timestamp High */
198};
199
165/* Transmit checksum insertion control */ 200/* Transmit checksum insertion control */
166enum tdes_csum_insertion { 201enum tdes_csum_insertion {
167 cic_disabled = 0, /* Checksum Insertion Control */ 202 cic_disabled = 0, /* Checksum Insertion Control */
168 cic_only_ip = 1, /* Only IP header */ 203 cic_only_ip = 1, /* Only IP header */
169 cic_no_pseudoheader = 2, /* IP header but pseudoheader 204 /* IP header but pseudoheader is not calculated */
170 * is not calculated */ 205 cic_no_pseudoheader = 2,
171 cic_full = 3, /* IP header and pseudoheader */ 206 cic_full = 3, /* IP header and pseudoheader */
172}; 207};
173 208
209/* Extended RDES4 definitions */
210#define RDES_EXT_NO_PTP 0
211#define RDES_EXT_SYNC 0x1
212#define RDES_EXT_FOLLOW_UP 0x2
213#define RDES_EXT_DELAY_REQ 0x3
214#define RDES_EXT_DELAY_RESP 0x4
215#define RDES_EXT_PDELAY_REQ 0x5
216#define RDES_EXT_PDELAY_RESP 0x6
217#define RDES_EXT_PDELAY_FOLLOW_UP 0x7
218
174#endif /* __DESCS_H__ */ 219#endif /* __DESCS_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
index 7ad56afd6324..85466e5a70b7 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
@@ -155,6 +155,7 @@ enum inter_frame_gap {
155/* Programmable burst length (passed thorugh platform)*/ 155/* Programmable burst length (passed thorugh platform)*/
156#define DMA_BUS_MODE_PBL_MASK 0x00003f00 /* Programmable Burst Len */ 156#define DMA_BUS_MODE_PBL_MASK 0x00003f00 /* Programmable Burst Len */
157#define DMA_BUS_MODE_PBL_SHIFT 8 157#define DMA_BUS_MODE_PBL_SHIFT 8
158#define DMA_BUS_MODE_ATDS 0x00000080 /* Alternate Descriptor Size */
158 159
159enum rx_tx_priority_ratio { 160enum rx_tx_priority_ratio {
160 double_ratio = 0x00004000, /*2:1 */ 161 double_ratio = 0x00004000, /*2:1 */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
index bf83c03bfd06..f1c4b2c00aa5 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
@@ -30,8 +30,8 @@
30#include "dwmac1000.h" 30#include "dwmac1000.h"
31#include "dwmac_dma.h" 31#include "dwmac_dma.h"
32 32
33static int dwmac1000_dma_init(void __iomem *ioaddr, int pbl, int fb, 33static int dwmac1000_dma_init(void __iomem *ioaddr, int pbl, int fb, int mb,
34 int mb, int burst_len, u32 dma_tx, u32 dma_rx) 34 int burst_len, u32 dma_tx, u32 dma_rx, int atds)
35{ 35{
36 u32 value = readl(ioaddr + DMA_BUS_MODE); 36 u32 value = readl(ioaddr + DMA_BUS_MODE);
37 int limit; 37 int limit;
@@ -73,6 +73,10 @@ static int dwmac1000_dma_init(void __iomem *ioaddr, int pbl, int fb,
73#ifdef CONFIG_STMMAC_DA 73#ifdef CONFIG_STMMAC_DA
74 value |= DMA_BUS_MODE_DA; /* Rx has priority over tx */ 74 value |= DMA_BUS_MODE_DA; /* Rx has priority over tx */
75#endif 75#endif
76
77 if (atds)
78 value |= DMA_BUS_MODE_ATDS;
79
76 writel(value, ioaddr + DMA_BUS_MODE); 80 writel(value, ioaddr + DMA_BUS_MODE);
77 81
78 /* In case of GMAC AXI configuration, program the DMA_AXI_BUS_MODE 82 /* In case of GMAC AXI configuration, program the DMA_AXI_BUS_MODE
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c
index c2b4d55a79b6..e979a8b2ae42 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c
@@ -32,8 +32,8 @@
32#include "dwmac100.h" 32#include "dwmac100.h"
33#include "dwmac_dma.h" 33#include "dwmac_dma.h"
34 34
35static int dwmac100_dma_init(void __iomem *ioaddr, int pbl, int fb, 35static int dwmac100_dma_init(void __iomem *ioaddr, int pbl, int fb, int mb,
36 int mb, int burst_len, u32 dma_tx, u32 dma_rx) 36 int burst_len, u32 dma_tx, u32 dma_rx, int atds)
37{ 37{
38 u32 value = readl(ioaddr + DMA_BUS_MODE); 38 u32 value = readl(ioaddr + DMA_BUS_MODE);
39 int limit; 39 int limit;
diff --git a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
index 62f9f4e100fd..c1b9ab23b3c5 100644
--- a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
@@ -150,6 +150,57 @@ static int enh_desc_coe_rdes0(int ipc_err, int type, int payload_err)
150 return ret; 150 return ret;
151} 151}
152 152
153static void enh_desc_get_ext_status(void *data, struct stmmac_extra_stats *x,
154 struct dma_extended_desc *p)
155{
156 if (unlikely(p->basic.des01.erx.rx_mac_addr)) {
157 if (p->des4.erx.ip_hdr_err)
158 x->ip_hdr_err++;
159 if (p->des4.erx.ip_payload_err)
160 x->ip_payload_err++;
161 if (p->des4.erx.ip_csum_bypassed)
162 x->ip_csum_bypassed++;
163 if (p->des4.erx.ipv4_pkt_rcvd)
164 x->ipv4_pkt_rcvd++;
165 if (p->des4.erx.ipv6_pkt_rcvd)
166 x->ipv6_pkt_rcvd++;
167 if (p->des4.erx.msg_type == RDES_EXT_SYNC)
168 x->rx_msg_type_sync++;
169 else if (p->des4.erx.msg_type == RDES_EXT_FOLLOW_UP)
170 x->rx_msg_type_follow_up++;
171 else if (p->des4.erx.msg_type == RDES_EXT_DELAY_REQ)
172 x->rx_msg_type_delay_req++;
173 else if (p->des4.erx.msg_type == RDES_EXT_DELAY_RESP)
174 x->rx_msg_type_delay_resp++;
175 else if (p->des4.erx.msg_type == RDES_EXT_DELAY_REQ)
176 x->rx_msg_type_pdelay_req++;
177 else if (p->des4.erx.msg_type == RDES_EXT_PDELAY_RESP)
178 x->rx_msg_type_pdelay_resp++;
179 else if (p->des4.erx.msg_type == RDES_EXT_PDELAY_FOLLOW_UP)
180 x->rx_msg_type_pdelay_follow_up++;
181 else
182 x->rx_msg_type_ext_no_ptp++;
183 if (p->des4.erx.ptp_frame_type)
184 x->ptp_frame_type++;
185 if (p->des4.erx.ptp_ver)
186 x->ptp_ver++;
187 if (p->des4.erx.timestamp_dropped)
188 x->timestamp_dropped++;
189 if (p->des4.erx.av_pkt_rcvd)
190 x->av_pkt_rcvd++;
191 if (p->des4.erx.av_tagged_pkt_rcvd)
192 x->av_tagged_pkt_rcvd++;
193 if (p->des4.erx.vlan_tag_priority_val)
194 x->vlan_tag_priority_val++;
195 if (p->des4.erx.l3_filter_match)
196 x->l3_filter_match++;
197 if (p->des4.erx.l4_filter_match)
198 x->l4_filter_match++;
199 if (p->des4.erx.l3_l4_filter_no_match)
200 x->l3_l4_filter_no_match++;
201 }
202}
203
153static int enh_desc_get_rx_status(void *data, struct stmmac_extra_stats *x, 204static int enh_desc_get_rx_status(void *data, struct stmmac_extra_stats *x,
154 struct dma_desc *p) 205 struct dma_desc *p)
155{ 206{
@@ -198,7 +249,7 @@ static int enh_desc_get_rx_status(void *data, struct stmmac_extra_stats *x,
198 * At any rate, we need to understand if the CSUM hw computation is ok 249 * At any rate, we need to understand if the CSUM hw computation is ok
199 * and report this info to the upper layers. */ 250 * and report this info to the upper layers. */
200 ret = enh_desc_coe_rdes0(p->des01.erx.ipc_csum_error, 251 ret = enh_desc_coe_rdes0(p->des01.erx.ipc_csum_error,
201 p->des01.erx.frame_type, p->des01.erx.payload_csum_error); 252 p->des01.erx.frame_type, p->des01.erx.rx_mac_addr);
202 253
203 if (unlikely(p->des01.erx.dribbling)) { 254 if (unlikely(p->des01.erx.dribbling)) {
204 CHIP_DBG(KERN_ERR "GMAC RX: dribbling error\n"); 255 CHIP_DBG(KERN_ERR "GMAC RX: dribbling error\n");
@@ -225,41 +276,32 @@ static int enh_desc_get_rx_status(void *data, struct stmmac_extra_stats *x,
225 x->rx_vlan++; 276 x->rx_vlan++;
226 } 277 }
227#endif 278#endif
279
228 return ret; 280 return ret;
229} 281}
230 282
231static void enh_desc_init_rx_desc(struct dma_desc *p, unsigned int ring_size, 283static void enh_desc_init_rx_desc(struct dma_desc *p, int disable_rx_ic,
232 int disable_rx_ic, int mode) 284 int mode, int end)
233{ 285{
234 int i; 286 p->des01.erx.own = 1;
235 for (i = 0; i < ring_size; i++) { 287 p->des01.erx.buffer1_size = BUF_SIZE_8KiB - 1;
236 p->des01.erx.own = 1;
237 p->des01.erx.buffer1_size = BUF_SIZE_8KiB - 1;
238 288
239 if (mode == STMMAC_CHAIN_MODE) 289 if (mode == STMMAC_CHAIN_MODE)
240 ehn_desc_rx_set_on_chain(p, (i == ring_size - 1)); 290 ehn_desc_rx_set_on_chain(p, end);
241 else 291 else
242 ehn_desc_rx_set_on_ring(p, (i == ring_size - 1)); 292 ehn_desc_rx_set_on_ring(p, end);
243 293
244 if (disable_rx_ic) 294 if (disable_rx_ic)
245 p->des01.erx.disable_ic = 1; 295 p->des01.erx.disable_ic = 1;
246 p++;
247 }
248} 296}
249 297
250static void enh_desc_init_tx_desc(struct dma_desc *p, unsigned int ring_size, 298static void enh_desc_init_tx_desc(struct dma_desc *p, int mode, int end)
251 int mode)
252{ 299{
253 int i; 300 p->des01.etx.own = 0;
254 301 if (mode == STMMAC_CHAIN_MODE)
255 for (i = 0; i < ring_size; i++) { 302 ehn_desc_tx_set_on_chain(p, end);
256 p->des01.etx.own = 0; 303 else
257 if (mode == STMMAC_CHAIN_MODE) 304 ehn_desc_tx_set_on_ring(p, end);
258 ehn_desc_tx_set_on_chain(p, (i == ring_size - 1));
259 else
260 ehn_desc_tx_set_on_ring(p, (i == ring_size - 1));
261 p++;
262 }
263} 305}
264 306
265static int enh_desc_get_tx_owner(struct dma_desc *p) 307static int enh_desc_get_tx_owner(struct dma_desc *p)
@@ -352,4 +394,5 @@ const struct stmmac_desc_ops enh_desc_ops = {
352 .set_tx_owner = enh_desc_set_tx_owner, 394 .set_tx_owner = enh_desc_set_tx_owner,
353 .set_rx_owner = enh_desc_set_rx_owner, 395 .set_rx_owner = enh_desc_set_rx_owner,
354 .get_rx_frame_len = enh_desc_get_rx_frame_len, 396 .get_rx_frame_len = enh_desc_get_rx_frame_len,
397 .rx_extended_status = enh_desc_get_ext_status,
355}; 398};
diff --git a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
index 88df0b48e35b..47d509435ebb 100644
--- a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
@@ -122,37 +122,28 @@ static int ndesc_get_rx_status(void *data, struct stmmac_extra_stats *x,
122 return ret; 122 return ret;
123} 123}
124 124
125static void ndesc_init_rx_desc(struct dma_desc *p, unsigned int ring_size, 125static void ndesc_init_rx_desc(struct dma_desc *p, int disable_rx_ic, int mode,
126 int disable_rx_ic, int mode) 126 int end)
127{ 127{
128 int i; 128 p->des01.rx.own = 1;
129 for (i = 0; i < ring_size; i++) { 129 p->des01.rx.buffer1_size = BUF_SIZE_2KiB - 1;
130 p->des01.rx.own = 1; 130
131 p->des01.rx.buffer1_size = BUF_SIZE_2KiB - 1; 131 if (mode == STMMAC_CHAIN_MODE)
132 132 ndesc_rx_set_on_chain(p, end);
133 if (mode == STMMAC_CHAIN_MODE) 133 else
134 ndesc_rx_set_on_chain(p, (i == ring_size - 1)); 134 ndesc_rx_set_on_ring(p, end);
135 else 135
136 ndesc_rx_set_on_ring(p, (i == ring_size - 1)); 136 if (disable_rx_ic)
137 137 p->des01.rx.disable_ic = 1;
138 if (disable_rx_ic)
139 p->des01.rx.disable_ic = 1;
140 p++;
141 }
142} 138}
143 139
144static void ndesc_init_tx_desc(struct dma_desc *p, unsigned int ring_size, 140static void ndesc_init_tx_desc(struct dma_desc *p, int mode, int end)
145 int mode)
146{ 141{
147 int i; 142 p->des01.tx.own = 0;
148 for (i = 0; i < ring_size; i++) { 143 if (mode == STMMAC_CHAIN_MODE)
149 p->des01.tx.own = 0; 144 ndesc_tx_set_on_chain(p, end);
150 if (mode == STMMAC_CHAIN_MODE) 145 else
151 ndesc_tx_set_on_chain(p, (i == (ring_size - 1))); 146 ndesc_tx_set_on_ring(p, end);
152 else
153 ndesc_tx_set_on_ring(p, (i == (ring_size - 1)));
154 p++;
155 }
156} 147}
157 148
158static int ndesc_get_tx_owner(struct dma_desc *p) 149static int ndesc_get_tx_owner(struct dma_desc *p)
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index e5f2f333616b..9637d3e86af9 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -34,7 +34,8 @@
34 34
35struct stmmac_priv { 35struct stmmac_priv {
36 /* Frequently used values are kept adjacent for cache effect */ 36 /* Frequently used values are kept adjacent for cache effect */
37 struct dma_desc *dma_tx ____cacheline_aligned; 37 struct dma_desc *dma_tx ____cacheline_aligned; /* Basic TX desc */
38 struct dma_extended_desc *dma_etx; /* Extended TX descriptor */
38 dma_addr_t dma_tx_phy; 39 dma_addr_t dma_tx_phy;
39 struct sk_buff **tx_skbuff; 40 struct sk_buff **tx_skbuff;
40 unsigned int cur_tx; 41 unsigned int cur_tx;
@@ -42,7 +43,8 @@ struct stmmac_priv {
42 unsigned int dma_tx_size; 43 unsigned int dma_tx_size;
43 int tx_coalesce; 44 int tx_coalesce;
44 45
45 struct dma_desc *dma_rx ; 46 struct dma_desc *dma_rx; /* Basic RX descriptor */
47 struct dma_extended_desc *dma_erx; /* Extended RX descriptor */
46 unsigned int cur_rx; 48 unsigned int cur_rx;
47 unsigned int dirty_rx; 49 unsigned int dirty_rx;
48 struct sk_buff **rx_skbuff; 50 struct sk_buff **rx_skbuff;
@@ -94,6 +96,7 @@ struct stmmac_priv {
94 int use_riwt; 96 int use_riwt;
95 u32 rx_riwt; 97 u32 rx_riwt;
96 unsigned int mode; 98 unsigned int mode;
99 int extend_desc;
97}; 100};
98 101
99extern int phyaddr; 102extern int phyaddr;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
index d1ac39c1b05d..f6ad751925e8 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
@@ -108,6 +108,29 @@ static const struct stmmac_stats stmmac_gstrings_stats[] = {
108 STMMAC_STAT(irq_rx_path_in_lpi_mode_n), 108 STMMAC_STAT(irq_rx_path_in_lpi_mode_n),
109 STMMAC_STAT(irq_rx_path_exit_lpi_mode_n), 109 STMMAC_STAT(irq_rx_path_exit_lpi_mode_n),
110 STMMAC_STAT(phy_eee_wakeup_error_n), 110 STMMAC_STAT(phy_eee_wakeup_error_n),
111 /* Extended RDES status */
112 STMMAC_STAT(ip_hdr_err),
113 STMMAC_STAT(ip_payload_err),
114 STMMAC_STAT(ip_csum_bypassed),
115 STMMAC_STAT(ipv4_pkt_rcvd),
116 STMMAC_STAT(ipv6_pkt_rcvd),
117 STMMAC_STAT(rx_msg_type_ext_no_ptp),
118 STMMAC_STAT(rx_msg_type_sync),
119 STMMAC_STAT(rx_msg_type_follow_up),
120 STMMAC_STAT(rx_msg_type_delay_req),
121 STMMAC_STAT(rx_msg_type_delay_resp),
122 STMMAC_STAT(rx_msg_type_pdelay_req),
123 STMMAC_STAT(rx_msg_type_pdelay_resp),
124 STMMAC_STAT(rx_msg_type_pdelay_follow_up),
125 STMMAC_STAT(ptp_frame_type),
126 STMMAC_STAT(ptp_ver),
127 STMMAC_STAT(timestamp_dropped),
128 STMMAC_STAT(av_pkt_rcvd),
129 STMMAC_STAT(av_tagged_pkt_rcvd),
130 STMMAC_STAT(vlan_tag_priority_val),
131 STMMAC_STAT(l3_filter_match),
132 STMMAC_STAT(l4_filter_match),
133 STMMAC_STAT(l3_l4_filter_no_match),
111}; 134};
112#define STMMAC_STATS_LEN ARRAY_SIZE(stmmac_gstrings_stats) 135#define STMMAC_STATS_LEN ARRAY_SIZE(stmmac_gstrings_stats)
113 136
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index bbee6b32ed63..96fbf86366a9 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -468,29 +468,56 @@ static int stmmac_init_phy(struct net_device *dev)
468} 468}
469 469
470/** 470/**
471 * display_ring 471 * stmmac_display_ring
472 * @p: pointer to the ring. 472 * @p: pointer to the ring.
473 * @size: size of the ring. 473 * @size: size of the ring.
474 * Description: display all the descriptors within the ring. 474 * Description: display the control/status and buffer descriptors.
475 */ 475 */
476static void display_ring(struct dma_desc *p, int size) 476static void stmmac_display_ring(void *head, int size, int extend_desc)
477{ 477{
478 struct tmp_s {
479 u64 a;
480 unsigned int b;
481 unsigned int c;
482 };
483 int i; 478 int i;
479 struct dma_extended_desc *ep = (struct dma_extended_desc *) head;
480 struct dma_desc *p = (struct dma_desc *) head;
481
484 for (i = 0; i < size; i++) { 482 for (i = 0; i < size; i++) {
485 struct tmp_s *x = (struct tmp_s *)(p + i); 483 u64 x;
486 pr_info("\t%d [0x%x]: DES0=0x%x DES1=0x%x BUF1=0x%x BUF2=0x%x", 484 if (extend_desc) {
487 i, (unsigned int)virt_to_phys(&p[i]), 485 x = *(u64 *) ep;
488 (unsigned int)(x->a), (unsigned int)((x->a) >> 32), 486 pr_info("%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
489 x->b, x->c); 487 i, (unsigned int) virt_to_phys(ep),
488 (unsigned int) x, (unsigned int) (x >> 32),
489 ep->basic.des2, ep->basic.des3);
490 ep++;
491 } else {
492 x = *(u64 *) p;
493 pr_info("%d [0x%x]: 0x%x 0x%x 0x%x 0x%x",
494 i, (unsigned int) virt_to_phys(p),
495 (unsigned int) x, (unsigned int) (x >> 32),
496 p->des2, p->des3);
497 p++;
498 }
490 pr_info("\n"); 499 pr_info("\n");
491 } 500 }
492} 501}
493 502
503static void stmmac_display_rings(struct stmmac_priv *priv)
504{
505 unsigned int txsize = priv->dma_tx_size;
506 unsigned int rxsize = priv->dma_rx_size;
507
508 if (priv->extend_desc) {
509 pr_info("Extended RX descriptor ring:\n");
510 stmmac_display_ring((void *) priv->dma_erx, rxsize, 1);
511 pr_info("Extended TX descriptor ring:\n");
512 stmmac_display_ring((void *) priv->dma_etx, txsize, 1);
513 } else {
514 pr_info("RX descriptor ring:\n");
515 stmmac_display_ring((void *)priv->dma_rx, rxsize, 0);
516 pr_info("TX descriptor ring:\n");
517 stmmac_display_ring((void *)priv->dma_tx, txsize, 0);
518 }
519}
520
494static int stmmac_set_bfsize(int mtu, int bufsize) 521static int stmmac_set_bfsize(int mtu, int bufsize)
495{ 522{
496 int ret = bufsize; 523 int ret = bufsize;
@@ -507,6 +534,59 @@ static int stmmac_set_bfsize(int mtu, int bufsize)
507 return ret; 534 return ret;
508} 535}
509 536
537static void stmmac_clear_descriptors(struct stmmac_priv *priv)
538{
539 int i;
540 unsigned int txsize = priv->dma_tx_size;
541 unsigned int rxsize = priv->dma_rx_size;
542
543 /* Clear the Rx/Tx descriptors */
544 for (i = 0; i < rxsize; i++)
545 if (priv->extend_desc)
546 priv->hw->desc->init_rx_desc(&priv->dma_erx[i].basic,
547 priv->use_riwt, priv->mode,
548 (i == rxsize - 1));
549 else
550 priv->hw->desc->init_rx_desc(&priv->dma_rx[i],
551 priv->use_riwt, priv->mode,
552 (i == rxsize - 1));
553 for (i = 0; i < txsize; i++)
554 if (priv->extend_desc)
555 priv->hw->desc->init_tx_desc(&priv->dma_etx[i].basic,
556 priv->mode,
557 (i == txsize - 1));
558 else
559 priv->hw->desc->init_tx_desc(&priv->dma_tx[i],
560 priv->mode,
561 (i == txsize - 1));
562}
563
564static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
565 int i)
566{
567 struct sk_buff *skb;
568
569 skb = __netdev_alloc_skb(priv->dev, priv->dma_buf_sz + NET_IP_ALIGN,
570 GFP_KERNEL);
571 if (unlikely(skb == NULL)) {
572 pr_err("%s: Rx init fails; skb is NULL\n", __func__);
573 return 1;
574 }
575 skb_reserve(skb, NET_IP_ALIGN);
576 priv->rx_skbuff[i] = skb;
577 priv->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data,
578 priv->dma_buf_sz,
579 DMA_FROM_DEVICE);
580
581 p->des2 = priv->rx_skbuff_dma[i];
582
583 if ((priv->mode == STMMAC_RING_MODE) &&
584 (priv->dma_buf_sz == BUF_SIZE_16KiB))
585 priv->hw->ring->init_desc3(p);
586
587 return 0;
588}
589
510/** 590/**
511 * init_dma_desc_rings - init the RX/TX descriptor rings 591 * init_dma_desc_rings - init the RX/TX descriptor rings
512 * @dev: net device structure 592 * @dev: net device structure
@@ -518,11 +598,9 @@ static void init_dma_desc_rings(struct net_device *dev)
518{ 598{
519 int i; 599 int i;
520 struct stmmac_priv *priv = netdev_priv(dev); 600 struct stmmac_priv *priv = netdev_priv(dev);
521 struct sk_buff *skb;
522 unsigned int txsize = priv->dma_tx_size; 601 unsigned int txsize = priv->dma_tx_size;
523 unsigned int rxsize = priv->dma_rx_size; 602 unsigned int rxsize = priv->dma_rx_size;
524 unsigned int bfsize = 0; 603 unsigned int bfsize = 0;
525 int dis_ic = 0;
526 604
527 /* Set the max buffer size according to the DESC mode 605 /* Set the max buffer size according to the DESC mode
528 * and the MTU. Note that RING mode allows 16KiB bsize. */ 606 * and the MTU. Note that RING mode allows 16KiB bsize. */
@@ -535,50 +613,53 @@ static void init_dma_desc_rings(struct net_device *dev)
535 DBG(probe, INFO, "stmmac: txsize %d, rxsize %d, bfsize %d\n", 613 DBG(probe, INFO, "stmmac: txsize %d, rxsize %d, bfsize %d\n",
536 txsize, rxsize, bfsize); 614 txsize, rxsize, bfsize);
537 615
616 if (priv->extend_desc) {
617 priv->dma_erx = dma_alloc_coherent(priv->device, rxsize *
618 sizeof(struct
619 dma_extended_desc),
620 &priv->dma_rx_phy,
621 GFP_KERNEL);
622 priv->dma_etx = dma_alloc_coherent(priv->device, txsize *
623 sizeof(struct
624 dma_extended_desc),
625 &priv->dma_tx_phy,
626 GFP_KERNEL);
627 if ((!priv->dma_erx) || (!priv->dma_etx))
628 return;
629 } else {
630 priv->dma_rx = dma_alloc_coherent(priv->device, rxsize *
631 sizeof(struct dma_desc),
632 &priv->dma_rx_phy,
633 GFP_KERNEL);
634 priv->dma_tx = dma_alloc_coherent(priv->device, txsize *
635 sizeof(struct dma_desc),
636 &priv->dma_tx_phy,
637 GFP_KERNEL);
638 if ((!priv->dma_rx) || (!priv->dma_tx))
639 return;
640 }
641
538 priv->rx_skbuff_dma = kmalloc_array(rxsize, sizeof(dma_addr_t), 642 priv->rx_skbuff_dma = kmalloc_array(rxsize, sizeof(dma_addr_t),
539 GFP_KERNEL); 643 GFP_KERNEL);
540 priv->rx_skbuff = kmalloc_array(rxsize, sizeof(struct sk_buff *), 644 priv->rx_skbuff = kmalloc_array(rxsize, sizeof(struct sk_buff *),
541 GFP_KERNEL); 645 GFP_KERNEL);
542 priv->dma_rx = dma_alloc_coherent(priv->device,
543 rxsize * sizeof(struct dma_desc),
544 &priv->dma_rx_phy, GFP_KERNEL);
545 priv->tx_skbuff = kmalloc_array(txsize, sizeof(struct sk_buff *), 646 priv->tx_skbuff = kmalloc_array(txsize, sizeof(struct sk_buff *),
546 GFP_KERNEL); 647 GFP_KERNEL);
547 priv->dma_tx = dma_alloc_coherent(priv->device, 648 if (netif_msg_drv(priv))
548 txsize * sizeof(struct dma_desc), 649 pr_debug("(%s) dma_rx_phy=0x%08x dma_tx_phy=0x%08x\n", __func__,
549 &priv->dma_tx_phy, GFP_KERNEL); 650 (u32) priv->dma_rx_phy, (u32) priv->dma_tx_phy);
550
551 if ((priv->dma_rx == NULL) || (priv->dma_tx == NULL))
552 return;
553
554 DBG(probe, INFO, "stmmac (%s) DMA desc: virt addr (Rx %p, "
555 "Tx %p)\n\tDMA phy addr (Rx 0x%08x, Tx 0x%08x)\n",
556 dev->name, priv->dma_rx, priv->dma_tx,
557 (unsigned int)priv->dma_rx_phy, (unsigned int)priv->dma_tx_phy);
558 651
559 /* RX INITIALIZATION */ 652 /* RX INITIALIZATION */
560 DBG(probe, INFO, "stmmac: SKB addresses:\n" 653 DBG(probe, INFO, "stmmac: SKB addresses:\nskb\t\tskb data\tdma data\n");
561 "skb\t\tskb data\tdma data\n");
562
563 for (i = 0; i < rxsize; i++) { 654 for (i = 0; i < rxsize; i++) {
564 struct dma_desc *p = priv->dma_rx + i; 655 struct dma_desc *p;
656 if (priv->extend_desc)
657 p = &((priv->dma_erx + i)->basic);
658 else
659 p = priv->dma_rx + i;
565 660
566 skb = __netdev_alloc_skb(dev, bfsize + NET_IP_ALIGN, 661 if (stmmac_init_rx_buffers(priv, p, i))
567 GFP_KERNEL);
568 if (unlikely(skb == NULL)) {
569 pr_err("%s: Rx init fails; skb is NULL\n", __func__);
570 break; 662 break;
571 }
572 skb_reserve(skb, NET_IP_ALIGN);
573 priv->rx_skbuff[i] = skb;
574 priv->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data,
575 bfsize, DMA_FROM_DEVICE);
576
577 p->des2 = priv->rx_skbuff_dma[i];
578
579 if ((priv->mode == STMMAC_RING_MODE) &&
580 (bfsize == BUF_SIZE_16KiB))
581 priv->hw->ring->init_desc3(p);
582 663
583 DBG(probe, INFO, "[%p]\t[%p]\t[%x]\n", priv->rx_skbuff[i], 664 DBG(probe, INFO, "[%p]\t[%p]\t[%x]\n", priv->rx_skbuff[i],
584 priv->rx_skbuff[i]->data, priv->rx_skbuff_dma[i]); 665 priv->rx_skbuff[i]->data, priv->rx_skbuff_dma[i]);
@@ -588,35 +669,39 @@ static void init_dma_desc_rings(struct net_device *dev)
588 priv->dma_buf_sz = bfsize; 669 priv->dma_buf_sz = bfsize;
589 buf_sz = bfsize; 670 buf_sz = bfsize;
590 671
672 /* Setup the chained descriptor addresses */
673 if (priv->mode == STMMAC_CHAIN_MODE) {
674 if (priv->extend_desc) {
675 priv->hw->chain->init(priv->dma_erx, priv->dma_rx_phy,
676 rxsize, 1);
677 priv->hw->chain->init(priv->dma_etx, priv->dma_tx_phy,
678 txsize, 1);
679 } else {
680 priv->hw->chain->init(priv->dma_rx, priv->dma_rx_phy,
681 rxsize, 0);
682 priv->hw->chain->init(priv->dma_tx, priv->dma_tx_phy,
683 txsize, 0);
684 }
685 }
686
591 /* TX INITIALIZATION */ 687 /* TX INITIALIZATION */
592 for (i = 0; i < txsize; i++) { 688 for (i = 0; i < txsize; i++) {
689 struct dma_desc *p;
690 if (priv->extend_desc)
691 p = &((priv->dma_etx + i)->basic);
692 else
693 p = priv->dma_tx + i;
694 p->des2 = 0;
593 priv->tx_skbuff[i] = NULL; 695 priv->tx_skbuff[i] = NULL;
594 priv->dma_tx[i].des2 = 0;
595 } 696 }
596 697
597 /* In case of Chained mode this sets the des3 to the next
598 * element in the chain */
599 if (priv->mode == STMMAC_CHAIN_MODE) {
600 priv->hw->chain->init_dma_chain(priv->dma_rx, priv->dma_rx_phy,
601 rxsize);
602 priv->hw->chain->init_dma_chain(priv->dma_tx, priv->dma_tx_phy,
603 txsize);
604 }
605 priv->dirty_tx = 0; 698 priv->dirty_tx = 0;
606 priv->cur_tx = 0; 699 priv->cur_tx = 0;
607 700
608 if (priv->use_riwt) 701 stmmac_clear_descriptors(priv);
609 dis_ic = 1;
610 /* Clear the Rx/Tx descriptors */
611 priv->hw->desc->init_rx_desc(priv->dma_rx, rxsize, dis_ic, priv->mode);
612 priv->hw->desc->init_tx_desc(priv->dma_tx, txsize, priv->mode);
613 702
614 if (netif_msg_hw(priv)) { 703 if (netif_msg_hw(priv))
615 pr_info("RX descriptor ring:\n"); 704 stmmac_display_rings(priv);
616 display_ring(priv->dma_rx, rxsize);
617 pr_info("TX descriptor ring:\n");
618 display_ring(priv->dma_tx, txsize);
619 }
620} 705}
621 706
622static void dma_free_rx_skbufs(struct stmmac_priv *priv) 707static void dma_free_rx_skbufs(struct stmmac_priv *priv)
@@ -639,7 +724,12 @@ static void dma_free_tx_skbufs(struct stmmac_priv *priv)
639 724
640 for (i = 0; i < priv->dma_tx_size; i++) { 725 for (i = 0; i < priv->dma_tx_size; i++) {
641 if (priv->tx_skbuff[i] != NULL) { 726 if (priv->tx_skbuff[i] != NULL) {
642 struct dma_desc *p = priv->dma_tx + i; 727 struct dma_desc *p;
728 if (priv->extend_desc)
729 p = &((priv->dma_etx + i)->basic);
730 else
731 p = priv->dma_tx + i;
732
643 if (p->des2) 733 if (p->des2)
644 dma_unmap_single(priv->device, p->des2, 734 dma_unmap_single(priv->device, p->des2,
645 priv->hw->desc->get_tx_len(p), 735 priv->hw->desc->get_tx_len(p),
@@ -658,12 +748,21 @@ static void free_dma_desc_resources(struct stmmac_priv *priv)
658 748
659 /* Free the region of consistent memory previously allocated for 749 /* Free the region of consistent memory previously allocated for
660 * the DMA */ 750 * the DMA */
661 dma_free_coherent(priv->device, 751 if (!priv->extend_desc) {
662 priv->dma_tx_size * sizeof(struct dma_desc), 752 dma_free_coherent(priv->device,
663 priv->dma_tx, priv->dma_tx_phy); 753 priv->dma_tx_size * sizeof(struct dma_desc),
664 dma_free_coherent(priv->device, 754 priv->dma_tx, priv->dma_tx_phy);
665 priv->dma_rx_size * sizeof(struct dma_desc), 755 dma_free_coherent(priv->device,
666 priv->dma_rx, priv->dma_rx_phy); 756 priv->dma_rx_size * sizeof(struct dma_desc),
757 priv->dma_rx, priv->dma_rx_phy);
758 } else {
759 dma_free_coherent(priv->device, priv->dma_tx_size *
760 sizeof(struct dma_extended_desc),
761 priv->dma_etx, priv->dma_tx_phy);
762 dma_free_coherent(priv->device, priv->dma_rx_size *
763 sizeof(struct dma_extended_desc),
764 priv->dma_erx, priv->dma_rx_phy);
765 }
667 kfree(priv->rx_skbuff_dma); 766 kfree(priv->rx_skbuff_dma);
668 kfree(priv->rx_skbuff); 767 kfree(priv->rx_skbuff);
669 kfree(priv->tx_skbuff); 768 kfree(priv->tx_skbuff);
@@ -710,13 +809,18 @@ static void stmmac_tx_clean(struct stmmac_priv *priv)
710 int last; 809 int last;
711 unsigned int entry = priv->dirty_tx % txsize; 810 unsigned int entry = priv->dirty_tx % txsize;
712 struct sk_buff *skb = priv->tx_skbuff[entry]; 811 struct sk_buff *skb = priv->tx_skbuff[entry];
713 struct dma_desc *p = priv->dma_tx + entry; 812 struct dma_desc *p;
813
814 if (priv->extend_desc)
815 p = (struct dma_desc *) (priv->dma_etx + entry);
816 else
817 p = priv->dma_tx + entry;
714 818
715 /* Check if the descriptor is owned by the DMA. */ 819 /* Check if the descriptor is owned by the DMA. */
716 if (priv->hw->desc->get_tx_owner(p)) 820 if (priv->hw->desc->get_tx_owner(p))
717 break; 821 break;
718 822
719 /* Verify tx error by looking at the last segment */ 823 /* Verify tx error by looking at the last segment. */
720 last = priv->hw->desc->get_tx_ls(p); 824 last = priv->hw->desc->get_tx_ls(p);
721 if (likely(last)) { 825 if (likely(last)) {
722 int tx_error = 826 int tx_error =
@@ -785,12 +889,21 @@ static inline void stmmac_disable_dma_irq(struct stmmac_priv *priv)
785 */ 889 */
786static void stmmac_tx_err(struct stmmac_priv *priv) 890static void stmmac_tx_err(struct stmmac_priv *priv)
787{ 891{
892 int i;
893 int txsize = priv->dma_tx_size;
788 netif_stop_queue(priv->dev); 894 netif_stop_queue(priv->dev);
789 895
790 priv->hw->dma->stop_tx(priv->ioaddr); 896 priv->hw->dma->stop_tx(priv->ioaddr);
791 dma_free_tx_skbufs(priv); 897 dma_free_tx_skbufs(priv);
792 priv->hw->desc->init_tx_desc(priv->dma_tx, priv->dma_tx_size, 898 for (i = 0; i < txsize; i++)
793 priv->mode); 899 if (priv->extend_desc)
900 priv->hw->desc->init_tx_desc(&priv->dma_etx[i].basic,
901 priv->mode,
902 (i == txsize - 1));
903 else
904 priv->hw->desc->init_tx_desc(&priv->dma_tx[i],
905 priv->mode,
906 (i == txsize - 1));
794 priv->dirty_tx = 0; 907 priv->dirty_tx = 0;
795 priv->cur_tx = 0; 908 priv->cur_tx = 0;
796 priv->hw->dma->start_tx(priv->ioaddr); 909 priv->hw->dma->start_tx(priv->ioaddr);
@@ -864,6 +977,14 @@ static void stmmac_selec_desc_mode(struct stmmac_priv *priv)
864{ 977{
865 if (priv->plat->enh_desc) { 978 if (priv->plat->enh_desc) {
866 pr_info(" Enhanced/Alternate descriptors\n"); 979 pr_info(" Enhanced/Alternate descriptors\n");
980
981 /* GMAC older than 3.50 has no extended descriptors */
982 if (priv->synopsys_id >= DWMAC_CORE_3_50) {
983 pr_info("\tEnabled extended descriptors\n");
984 priv->extend_desc = 1;
985 } else
986 pr_warn("Extended descriptors not supported\n");
987
867 priv->hw->desc = &enh_desc_ops; 988 priv->hw->desc = &enh_desc_ops;
868 } else { 989 } else {
869 pr_info(" Normal descriptors\n"); 990 pr_info(" Normal descriptors\n");
@@ -950,6 +1071,7 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv)
950{ 1071{
951 int pbl = DEFAULT_DMA_PBL, fixed_burst = 0, burst_len = 0; 1072 int pbl = DEFAULT_DMA_PBL, fixed_burst = 0, burst_len = 0;
952 int mixed_burst = 0; 1073 int mixed_burst = 0;
1074 int atds = 0;
953 1075
954 /* Some DMA parameters can be passed from the platform; 1076 /* Some DMA parameters can be passed from the platform;
955 * in case of these are not passed we keep a default 1077 * in case of these are not passed we keep a default
@@ -961,9 +1083,12 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv)
961 burst_len = priv->plat->dma_cfg->burst_len; 1083 burst_len = priv->plat->dma_cfg->burst_len;
962 } 1084 }
963 1085
1086 if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
1087 atds = 1;
1088
964 return priv->hw->dma->init(priv->ioaddr, pbl, fixed_burst, mixed_burst, 1089 return priv->hw->dma->init(priv->ioaddr, pbl, fixed_burst, mixed_burst,
965 burst_len, priv->dma_tx_phy, 1090 burst_len, priv->dma_tx_phy,
966 priv->dma_rx_phy); 1091 priv->dma_rx_phy, atds);
967} 1092}
968 1093
969/** 1094/**
@@ -1237,7 +1362,11 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
1237 1362
1238 csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL); 1363 csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
1239 1364
1240 desc = priv->dma_tx + entry; 1365 if (priv->extend_desc)
1366 desc = (struct dma_desc *) (priv->dma_etx + entry);
1367 else
1368 desc = priv->dma_tx + entry;
1369
1241 first = desc; 1370 first = desc;
1242 1371
1243#ifdef STMMAC_XMIT_DEBUG 1372#ifdef STMMAC_XMIT_DEBUG
@@ -1268,14 +1397,17 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
1268 priv->hw->desc->prepare_tx_desc(desc, 1, nopaged_len, 1397 priv->hw->desc->prepare_tx_desc(desc, 1, nopaged_len,
1269 csum_insertion, priv->mode); 1398 csum_insertion, priv->mode);
1270 } else 1399 } else
1271 desc = priv->dma_tx + entry; 1400 desc = first;
1272 1401
1273 for (i = 0; i < nfrags; i++) { 1402 for (i = 0; i < nfrags; i++) {
1274 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 1403 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1275 int len = skb_frag_size(frag); 1404 int len = skb_frag_size(frag);
1276 1405
1277 entry = (++priv->cur_tx) % txsize; 1406 entry = (++priv->cur_tx) % txsize;
1278 desc = priv->dma_tx + entry; 1407 if (priv->extend_desc)
1408 desc = (struct dma_desc *) (priv->dma_etx + entry);
1409 else
1410 desc = priv->dma_tx + entry;
1279 1411
1280 TX_DBG("\t[entry %d] segment len: %d\n", entry, len); 1412 TX_DBG("\t[entry %d] segment len: %d\n", entry, len);
1281 desc->des2 = skb_frag_dma_map(priv->device, frag, 0, len, 1413 desc->des2 = skb_frag_dma_map(priv->device, frag, 0, len,
@@ -1319,7 +1451,11 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
1319 "first=%p, nfrags=%d\n", 1451 "first=%p, nfrags=%d\n",
1320 (priv->cur_tx % txsize), (priv->dirty_tx % txsize), 1452 (priv->cur_tx % txsize), (priv->dirty_tx % txsize),
1321 entry, first, nfrags); 1453 entry, first, nfrags);
1322 display_ring(priv->dma_tx, txsize); 1454 if (priv->extend_desc)
1455 stmmac_display_ring((void *)priv->dma_etx, txsize, 1);
1456 else
1457 stmmac_display_ring((void *)priv->dma_tx, txsize, 0);
1458
1323 pr_info(">>> frame to be transmitted: "); 1459 pr_info(">>> frame to be transmitted: ");
1324 print_pkt(skb->data, skb->len); 1460 print_pkt(skb->data, skb->len);
1325 } 1461 }
@@ -1344,10 +1480,16 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv)
1344{ 1480{
1345 unsigned int rxsize = priv->dma_rx_size; 1481 unsigned int rxsize = priv->dma_rx_size;
1346 int bfsize = priv->dma_buf_sz; 1482 int bfsize = priv->dma_buf_sz;
1347 struct dma_desc *p = priv->dma_rx;
1348 1483
1349 for (; priv->cur_rx - priv->dirty_rx > 0; priv->dirty_rx++) { 1484 for (; priv->cur_rx - priv->dirty_rx > 0; priv->dirty_rx++) {
1350 unsigned int entry = priv->dirty_rx % rxsize; 1485 unsigned int entry = priv->dirty_rx % rxsize;
1486 struct dma_desc *p;
1487
1488 if (priv->extend_desc)
1489 p = (struct dma_desc *) (priv->dma_erx + entry);
1490 else
1491 p = priv->dma_rx + entry;
1492
1351 if (likely(priv->rx_skbuff[entry] == NULL)) { 1493 if (likely(priv->rx_skbuff[entry] == NULL)) {
1352 struct sk_buff *skb; 1494 struct sk_buff *skb;
1353 1495
@@ -1361,16 +1503,16 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv)
1361 dma_map_single(priv->device, skb->data, bfsize, 1503 dma_map_single(priv->device, skb->data, bfsize,
1362 DMA_FROM_DEVICE); 1504 DMA_FROM_DEVICE);
1363 1505
1364 (p + entry)->des2 = priv->rx_skbuff_dma[entry]; 1506 p->des2 = priv->rx_skbuff_dma[entry];
1365 1507
1366 if (unlikely((priv->mode == STMMAC_RING_MODE) && 1508 if (unlikely((priv->mode == STMMAC_RING_MODE) &&
1367 (priv->plat->has_gmac))) 1509 (priv->plat->has_gmac)))
1368 priv->hw->ring->refill_desc3(bfsize, p + entry); 1510 priv->hw->ring->refill_desc3(bfsize, p);
1369 1511
1370 RX_DBG(KERN_INFO "\trefill entry #%d\n", entry); 1512 RX_DBG(KERN_INFO "\trefill entry #%d\n", entry);
1371 } 1513 }
1372 wmb(); 1514 wmb();
1373 priv->hw->desc->set_rx_owner(p + entry); 1515 priv->hw->desc->set_rx_owner(p);
1374 wmb(); 1516 wmb();
1375 } 1517 }
1376} 1518}
@@ -1381,30 +1523,47 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
1381 unsigned int entry = priv->cur_rx % rxsize; 1523 unsigned int entry = priv->cur_rx % rxsize;
1382 unsigned int next_entry; 1524 unsigned int next_entry;
1383 unsigned int count = 0; 1525 unsigned int count = 0;
1384 struct dma_desc *p = priv->dma_rx + entry;
1385 struct dma_desc *p_next;
1386 1526
1387#ifdef STMMAC_RX_DEBUG 1527#ifdef STMMAC_RX_DEBUG
1388 if (netif_msg_hw(priv)) { 1528 if (netif_msg_hw(priv)) {
1389 pr_debug(">>> stmmac_rx: descriptor ring:\n"); 1529 pr_debug(">>> stmmac_rx: descriptor ring:\n");
1390 display_ring(priv->dma_rx, rxsize); 1530 if (priv->extend_desc)
1531 stmmac_display_ring((void *) priv->dma_erx, rxsize, 1);
1532 else
1533 stmmac_display_ring((void *)priv->dma_rx, rxsize, 0);
1391 } 1534 }
1392#endif 1535#endif
1393 while (!priv->hw->desc->get_rx_owner(p)) { 1536 while (count < limit) {
1394 int status; 1537 int status;
1538 struct dma_desc *p, *p_next;
1395 1539
1396 if (count >= limit) 1540 if (priv->extend_desc)
1541 p = (struct dma_desc *) (priv->dma_erx + entry);
1542 else
1543 p = priv->dma_rx + entry ;
1544
1545 if (priv->hw->desc->get_rx_owner(p))
1397 break; 1546 break;
1398 1547
1399 count++; 1548 count++;
1400 1549
1401 next_entry = (++priv->cur_rx) % rxsize; 1550 next_entry = (++priv->cur_rx) % rxsize;
1402 p_next = priv->dma_rx + next_entry; 1551 if (priv->extend_desc)
1552 p_next = (struct dma_desc *) (priv->dma_erx +
1553 next_entry);
1554 else
1555 p_next = priv->dma_rx + next_entry;
1556
1403 prefetch(p_next); 1557 prefetch(p_next);
1404 1558
1405 /* read the status of the incoming frame */ 1559 /* read the status of the incoming frame */
1406 status = (priv->hw->desc->rx_status(&priv->dev->stats, 1560 status = priv->hw->desc->rx_status(&priv->dev->stats,
1407 &priv->xstats, p)); 1561 &priv->xstats, p);
1562 if ((priv->extend_desc) && (priv->hw->desc->rx_extended_status))
1563 priv->hw->desc->rx_extended_status(&priv->dev->stats,
1564 &priv->xstats,
1565 priv->dma_erx +
1566 entry);
1408 if (unlikely(status == discard_frame)) 1567 if (unlikely(status == discard_frame))
1409 priv->dev->stats.rx_errors++; 1568 priv->dev->stats.rx_errors++;
1410 else { 1569 else {
@@ -1459,7 +1618,6 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
1459 priv->dev->stats.rx_bytes += frame_len; 1618 priv->dev->stats.rx_bytes += frame_len;
1460 } 1619 }
1461 entry = next_entry; 1620 entry = next_entry;
1462 p = p_next; /* use prefetched values */
1463 } 1621 }
1464 1622
1465 stmmac_rx_refill(priv); 1623 stmmac_rx_refill(priv);
@@ -1697,40 +1855,51 @@ static struct dentry *stmmac_fs_dir;
1697static struct dentry *stmmac_rings_status; 1855static struct dentry *stmmac_rings_status;
1698static struct dentry *stmmac_dma_cap; 1856static struct dentry *stmmac_dma_cap;
1699 1857
1700static int stmmac_sysfs_ring_read(struct seq_file *seq, void *v) 1858static void sysfs_display_ring(void *head, int size, int extend_desc,
1859 struct seq_file *seq)
1701{ 1860{
1702 struct tmp_s {
1703 u64 a;
1704 unsigned int b;
1705 unsigned int c;
1706 };
1707 int i; 1861 int i;
1708 struct net_device *dev = seq->private; 1862 struct dma_extended_desc *ep = (struct dma_extended_desc *) head;
1709 struct stmmac_priv *priv = netdev_priv(dev); 1863 struct dma_desc *p = (struct dma_desc *) head;
1710
1711 seq_printf(seq, "=======================\n");
1712 seq_printf(seq, " RX descriptor ring\n");
1713 seq_printf(seq, "=======================\n");
1714 1864
1715 for (i = 0; i < priv->dma_rx_size; i++) { 1865 for (i = 0; i < size; i++) {
1716 struct tmp_s *x = (struct tmp_s *)(priv->dma_rx + i); 1866 u64 x;
1717 seq_printf(seq, "[%d] DES0=0x%x DES1=0x%x BUF1=0x%x BUF2=0x%x", 1867 if (extend_desc) {
1718 i, (unsigned int)(x->a), 1868 x = *(u64 *) ep;
1719 (unsigned int)((x->a) >> 32), x->b, x->c); 1869 seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
1870 i, (unsigned int) virt_to_phys(ep),
1871 (unsigned int) x, (unsigned int) (x >> 32),
1872 ep->basic.des2, ep->basic.des3);
1873 ep++;
1874 } else {
1875 x = *(u64 *) p;
1876 seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
1877 i, (unsigned int) virt_to_phys(ep),
1878 (unsigned int) x, (unsigned int) (x >> 32),
1879 p->des2, p->des3);
1880 p++;
1881 }
1720 seq_printf(seq, "\n"); 1882 seq_printf(seq, "\n");
1721 } 1883 }
1884}
1722 1885
1723 seq_printf(seq, "\n"); 1886static int stmmac_sysfs_ring_read(struct seq_file *seq, void *v)
1724 seq_printf(seq, "=======================\n"); 1887{
1725 seq_printf(seq, " TX descriptor ring\n"); 1888 struct net_device *dev = seq->private;
1726 seq_printf(seq, "=======================\n"); 1889 struct stmmac_priv *priv = netdev_priv(dev);
1890 unsigned int txsize = priv->dma_tx_size;
1891 unsigned int rxsize = priv->dma_rx_size;
1727 1892
1728 for (i = 0; i < priv->dma_tx_size; i++) { 1893 if (priv->extend_desc) {
1729 struct tmp_s *x = (struct tmp_s *)(priv->dma_tx + i); 1894 seq_printf(seq, "Extended RX descriptor ring:\n");
1730 seq_printf(seq, "[%d] DES0=0x%x DES1=0x%x BUF1=0x%x BUF2=0x%x", 1895 sysfs_display_ring((void *) priv->dma_erx, rxsize, 1, seq);
1731 i, (unsigned int)(x->a), 1896 seq_printf(seq, "Extended TX descriptor ring:\n");
1732 (unsigned int)((x->a) >> 32), x->b, x->c); 1897 sysfs_display_ring((void *) priv->dma_etx, txsize, 1, seq);
1733 seq_printf(seq, "\n"); 1898 } else {
1899 seq_printf(seq, "RX descriptor ring:\n");
1900 sysfs_display_ring((void *)priv->dma_rx, rxsize, 0, seq);
1901 seq_printf(seq, "TX descriptor ring:\n");
1902 sysfs_display_ring((void *)priv->dma_tx, txsize, 0, seq);
1734 } 1903 }
1735 1904
1736 return 0; 1905 return 0;
@@ -1895,7 +2064,7 @@ static const struct net_device_ops stmmac_netdev_ops = {
1895 */ 2064 */
1896static int stmmac_hw_init(struct stmmac_priv *priv) 2065static int stmmac_hw_init(struct stmmac_priv *priv)
1897{ 2066{
1898 int ret = 0; 2067 int ret;
1899 struct mac_device_info *mac; 2068 struct mac_device_info *mac;
1900 2069
1901 /* Identify the MAC HW device */ 2070 /* Identify the MAC HW device */
@@ -1913,6 +2082,9 @@ static int stmmac_hw_init(struct stmmac_priv *priv)
1913 /* Get and dump the chip ID */ 2082 /* Get and dump the chip ID */
1914 priv->synopsys_id = stmmac_get_synopsys_id(priv); 2083 priv->synopsys_id = stmmac_get_synopsys_id(priv);
1915 2084
2085 /* To use alternate (extended) or normal descriptor structures */
2086 stmmac_selec_desc_mode(priv);
2087
1916 /* To use the chained or ring mode */ 2088 /* To use the chained or ring mode */
1917 if (chain_mode) { 2089 if (chain_mode) {
1918 priv->hw->chain = &chain_mode_ops; 2090 priv->hw->chain = &chain_mode_ops;
@@ -1947,9 +2119,6 @@ static int stmmac_hw_init(struct stmmac_priv *priv)
1947 } else 2119 } else
1948 pr_info(" No HW DMA feature register supported"); 2120 pr_info(" No HW DMA feature register supported");
1949 2121
1950 /* Select the enhnaced/normal descriptor structures */
1951 stmmac_selec_desc_mode(priv);
1952
1953 /* Enable the IPC (Checksum Offload) and check if the feature has been 2122 /* Enable the IPC (Checksum Offload) and check if the feature has been
1954 * enabled during the core configuration. */ 2123 * enabled during the core configuration. */
1955 ret = priv->hw->mac->rx_ipc(priv->ioaddr); 2124 ret = priv->hw->mac->rx_ipc(priv->ioaddr);
@@ -1969,7 +2138,7 @@ static int stmmac_hw_init(struct stmmac_priv *priv)
1969 device_set_wakeup_capable(priv->device, 1); 2138 device_set_wakeup_capable(priv->device, 1);
1970 } 2139 }
1971 2140
1972 return ret; 2141 return 0;
1973} 2142}
1974 2143
1975/** 2144/**
@@ -2015,7 +2184,9 @@ struct stmmac_priv *stmmac_dvr_probe(struct device *device,
2015 priv->plat->phy_addr = phyaddr; 2184 priv->plat->phy_addr = phyaddr;
2016 2185
2017 /* Init MAC and get the capabilities */ 2186 /* Init MAC and get the capabilities */
2018 stmmac_hw_init(priv); 2187 ret = stmmac_hw_init(priv);
2188 if (ret)
2189 goto error_free_netdev;
2019 2190
2020 ndev->netdev_ops = &stmmac_netdev_ops; 2191 ndev->netdev_ops = &stmmac_netdev_ops;
2021 2192
@@ -2086,6 +2257,7 @@ error_clk_get:
2086 unregister_netdev(ndev); 2257 unregister_netdev(ndev);
2087error_netdev_register: 2258error_netdev_register:
2088 netif_napi_del(&priv->napi); 2259 netif_napi_del(&priv->napi);
2260error_free_netdev:
2089 free_netdev(ndev); 2261 free_netdev(ndev);
2090 2262
2091 return NULL; 2263 return NULL;
@@ -2119,7 +2291,6 @@ int stmmac_dvr_remove(struct net_device *ndev)
2119int stmmac_suspend(struct net_device *ndev) 2291int stmmac_suspend(struct net_device *ndev)
2120{ 2292{
2121 struct stmmac_priv *priv = netdev_priv(ndev); 2293 struct stmmac_priv *priv = netdev_priv(ndev);
2122 int dis_ic = 0;
2123 unsigned long flags; 2294 unsigned long flags;
2124 2295
2125 if (!ndev || !netif_running(ndev)) 2296 if (!ndev || !netif_running(ndev))
@@ -2133,19 +2304,13 @@ int stmmac_suspend(struct net_device *ndev)
2133 netif_device_detach(ndev); 2304 netif_device_detach(ndev);
2134 netif_stop_queue(ndev); 2305 netif_stop_queue(ndev);
2135 2306
2136 if (priv->use_riwt)
2137 dis_ic = 1;
2138
2139 napi_disable(&priv->napi); 2307 napi_disable(&priv->napi);
2140 2308
2141 /* Stop TX/RX DMA */ 2309 /* Stop TX/RX DMA */
2142 priv->hw->dma->stop_tx(priv->ioaddr); 2310 priv->hw->dma->stop_tx(priv->ioaddr);
2143 priv->hw->dma->stop_rx(priv->ioaddr); 2311 priv->hw->dma->stop_rx(priv->ioaddr);
2144 /* Clear the Rx/Tx descriptors */ 2312
2145 priv->hw->desc->init_rx_desc(priv->dma_rx, priv->dma_rx_size, 2313 stmmac_clear_descriptors(priv);
2146 dis_ic, priv->mode);
2147 priv->hw->desc->init_tx_desc(priv->dma_tx, priv->dma_tx_size,
2148 priv->mode);
2149 2314
2150 /* Enable Power down mode by programming the PMT regs */ 2315 /* Enable Power down mode by programming the PMT regs */
2151 if (device_may_wakeup(priv->device)) 2316 if (device_may_wakeup(priv->device))