aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/stmmac/dwmac100_dma.c
diff options
context:
space:
mode:
authorGiuseppe CAVALLARO <peppe.cavallaro@st.com>2010-04-13 16:21:12 -0400
committerDavid S. Miller <davem@davemloft.net>2010-04-14 07:49:50 -0400
commit56b106ae7b1f6b7cef4ef7e79a03b59cfc940923 (patch)
tree3bf4908dd7a31231480f7f7289e6660b49bd41e3 /drivers/net/stmmac/dwmac100_dma.c
parent3c32be635c18ead00d460b7bdad1da52622ff40f (diff)
stmmac: rework normal and enhanced descriptors
Currently the driver assumes that the mac10/100 can only use the normal descriptor structure and the gmac can only use the enhanced structures. This patch removes the descriptor's code from the dma files and adds two new files just for handling the normal and enhanced descriptors. Signed-off-by: Giuseppe Cavallaro <peppe.cavallaro@st.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/stmmac/dwmac100_dma.c')
-rw-r--r--drivers/net/stmmac/dwmac100_dma.c223
1 files changed, 4 insertions, 219 deletions
diff --git a/drivers/net/stmmac/dwmac100_dma.c b/drivers/net/stmmac/dwmac100_dma.c
index 7fcc52650505..96d098d68ad6 100644
--- a/drivers/net/stmmac/dwmac100_dma.c
+++ b/drivers/net/stmmac/dwmac100_dma.c
@@ -5,7 +5,7 @@
5 DWC Ether MAC 10/100 Universal version 4.0 has been used for developing 5 DWC Ether MAC 10/100 Universal version 4.0 has been used for developing
6 this code. 6 this code.
7 7
8 This contains the functions to handle the dma and descriptors. 8 This contains the functions to handle the dma.
9 9
10 Copyright (C) 2007-2009 STMicroelectronics Ltd 10 Copyright (C) 2007-2009 STMicroelectronics Ltd
11 11
@@ -79,14 +79,14 @@ static void dwmac100_dump_dma_regs(unsigned long ioaddr)
79{ 79{
80 int i; 80 int i;
81 81
82 DBG(KERN_DEBUG "DWMAC 100 DMA CSR\n"); 82 CHIP_DBG(KERN_DEBUG "DWMAC 100 DMA CSR\n");
83 for (i = 0; i < 9; i++) 83 for (i = 0; i < 9; i++)
84 pr_debug("\t CSR%d (offset 0x%x): 0x%08x\n", i, 84 pr_debug("\t CSR%d (offset 0x%x): 0x%08x\n", i,
85 (DMA_BUS_MODE + i * 4), 85 (DMA_BUS_MODE + i * 4),
86 readl(ioaddr + DMA_BUS_MODE + i * 4)); 86 readl(ioaddr + DMA_BUS_MODE + i * 4));
87 DBG(KERN_DEBUG "\t CSR20 (offset 0x%x): 0x%08x\n", 87 CHIP_DBG(KERN_DEBUG "\t CSR20 (offset 0x%x): 0x%08x\n",
88 DMA_CUR_TX_BUF_ADDR, readl(ioaddr + DMA_CUR_TX_BUF_ADDR)); 88 DMA_CUR_TX_BUF_ADDR, readl(ioaddr + DMA_CUR_TX_BUF_ADDR));
89 DBG(KERN_DEBUG "\t CSR21 (offset 0x%x): 0x%08x\n", 89 CHIP_DBG(KERN_DEBUG "\t CSR21 (offset 0x%x): 0x%08x\n",
90 DMA_CUR_RX_BUF_ADDR, readl(ioaddr + DMA_CUR_RX_BUF_ADDR)); 90 DMA_CUR_RX_BUF_ADDR, readl(ioaddr + DMA_CUR_RX_BUF_ADDR));
91 return; 91 return;
92} 92}
@@ -122,203 +122,6 @@ static void dwmac100_dma_diagnostic_fr(void *data, struct stmmac_extra_stats *x,
122 return; 122 return;
123} 123}
124 124
125static int dwmac100_get_tx_status(void *data, struct stmmac_extra_stats *x,
126 struct dma_desc *p, unsigned long ioaddr)
127{
128 int ret = 0;
129 struct net_device_stats *stats = (struct net_device_stats *)data;
130
131 if (unlikely(p->des01.tx.error_summary)) {
132 if (unlikely(p->des01.tx.underflow_error)) {
133 x->tx_underflow++;
134 stats->tx_fifo_errors++;
135 }
136 if (unlikely(p->des01.tx.no_carrier)) {
137 x->tx_carrier++;
138 stats->tx_carrier_errors++;
139 }
140 if (unlikely(p->des01.tx.loss_carrier)) {
141 x->tx_losscarrier++;
142 stats->tx_carrier_errors++;
143 }
144 if (unlikely((p->des01.tx.excessive_deferral) ||
145 (p->des01.tx.excessive_collisions) ||
146 (p->des01.tx.late_collision)))
147 stats->collisions += p->des01.tx.collision_count;
148 ret = -1;
149 }
150 if (unlikely(p->des01.tx.heartbeat_fail)) {
151 x->tx_heartbeat++;
152 stats->tx_heartbeat_errors++;
153 ret = -1;
154 }
155 if (unlikely(p->des01.tx.deferred))
156 x->tx_deferred++;
157
158 return ret;
159}
160
161static int dwmac100_get_tx_len(struct dma_desc *p)
162{
163 return p->des01.tx.buffer1_size;
164}
165
166/* This function verifies if each incoming frame has some errors
167 * and, if required, updates the multicast statistics.
168 * In case of success, it returns csum_none becasue the device
169 * is not able to compute the csum in HW. */
170static int dwmac100_get_rx_status(void *data, struct stmmac_extra_stats *x,
171 struct dma_desc *p)
172{
173 int ret = csum_none;
174 struct net_device_stats *stats = (struct net_device_stats *)data;
175
176 if (unlikely(p->des01.rx.last_descriptor == 0)) {
177 pr_warning("dwmac100 Error: Oversized Ethernet "
178 "frame spanned multiple buffers\n");
179 stats->rx_length_errors++;
180 return discard_frame;
181 }
182
183 if (unlikely(p->des01.rx.error_summary)) {
184 if (unlikely(p->des01.rx.descriptor_error))
185 x->rx_desc++;
186 if (unlikely(p->des01.rx.partial_frame_error))
187 x->rx_partial++;
188 if (unlikely(p->des01.rx.run_frame))
189 x->rx_runt++;
190 if (unlikely(p->des01.rx.frame_too_long))
191 x->rx_toolong++;
192 if (unlikely(p->des01.rx.collision)) {
193 x->rx_collision++;
194 stats->collisions++;
195 }
196 if (unlikely(p->des01.rx.crc_error)) {
197 x->rx_crc++;
198 stats->rx_crc_errors++;
199 }
200 ret = discard_frame;
201 }
202 if (unlikely(p->des01.rx.dribbling))
203 ret = discard_frame;
204
205 if (unlikely(p->des01.rx.length_error)) {
206 x->rx_length++;
207 ret = discard_frame;
208 }
209 if (unlikely(p->des01.rx.mii_error)) {
210 x->rx_mii++;
211 ret = discard_frame;
212 }
213 if (p->des01.rx.multicast_frame) {
214 x->rx_multicast++;
215 stats->multicast++;
216 }
217 return ret;
218}
219
220static void dwmac100_init_rx_desc(struct dma_desc *p, unsigned int ring_size,
221 int disable_rx_ic)
222{
223 int i;
224 for (i = 0; i < ring_size; i++) {
225 p->des01.rx.own = 1;
226 p->des01.rx.buffer1_size = BUF_SIZE_2KiB - 1;
227 if (i == ring_size - 1)
228 p->des01.rx.end_ring = 1;
229 if (disable_rx_ic)
230 p->des01.rx.disable_ic = 1;
231 p++;
232 }
233 return;
234}
235
236static void dwmac100_init_tx_desc(struct dma_desc *p, unsigned int ring_size)
237{
238 int i;
239 for (i = 0; i < ring_size; i++) {
240 p->des01.tx.own = 0;
241 if (i == ring_size - 1)
242 p->des01.tx.end_ring = 1;
243 p++;
244 }
245 return;
246}
247
248static int dwmac100_get_tx_owner(struct dma_desc *p)
249{
250 return p->des01.tx.own;
251}
252
253static int dwmac100_get_rx_owner(struct dma_desc *p)
254{
255 return p->des01.rx.own;
256}
257
258static void dwmac100_set_tx_owner(struct dma_desc *p)
259{
260 p->des01.tx.own = 1;
261}
262
263static void dwmac100_set_rx_owner(struct dma_desc *p)
264{
265 p->des01.rx.own = 1;
266}
267
268static int dwmac100_get_tx_ls(struct dma_desc *p)
269{
270 return p->des01.tx.last_segment;
271}
272
273static void dwmac100_release_tx_desc(struct dma_desc *p)
274{
275 int ter = p->des01.tx.end_ring;
276
277 /* clean field used within the xmit */
278 p->des01.tx.first_segment = 0;
279 p->des01.tx.last_segment = 0;
280 p->des01.tx.buffer1_size = 0;
281
282 /* clean status reported */
283 p->des01.tx.error_summary = 0;
284 p->des01.tx.underflow_error = 0;
285 p->des01.tx.no_carrier = 0;
286 p->des01.tx.loss_carrier = 0;
287 p->des01.tx.excessive_deferral = 0;
288 p->des01.tx.excessive_collisions = 0;
289 p->des01.tx.late_collision = 0;
290 p->des01.tx.heartbeat_fail = 0;
291 p->des01.tx.deferred = 0;
292
293 /* set termination field */
294 p->des01.tx.end_ring = ter;
295
296 return;
297}
298
299static void dwmac100_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
300 int csum_flag)
301{
302 p->des01.tx.first_segment = is_fs;
303 p->des01.tx.buffer1_size = len;
304}
305
306static void dwmac100_clear_tx_ic(struct dma_desc *p)
307{
308 p->des01.tx.interrupt = 0;
309}
310
311static void dwmac100_close_tx_desc(struct dma_desc *p)
312{
313 p->des01.tx.last_segment = 1;
314 p->des01.tx.interrupt = 1;
315}
316
317static int dwmac100_get_rx_frame_len(struct dma_desc *p)
318{
319 return p->des01.rx.frame_length;
320}
321
322struct stmmac_dma_ops dwmac100_dma_ops = { 125struct stmmac_dma_ops dwmac100_dma_ops = {
323 .init = dwmac100_dma_init, 126 .init = dwmac100_dma_init,
324 .dump_regs = dwmac100_dump_dma_regs, 127 .dump_regs = dwmac100_dump_dma_regs,
@@ -333,21 +136,3 @@ struct stmmac_dma_ops dwmac100_dma_ops = {
333 .stop_rx = dwmac_dma_stop_rx, 136 .stop_rx = dwmac_dma_stop_rx,
334 .dma_interrupt = dwmac_dma_interrupt, 137 .dma_interrupt = dwmac_dma_interrupt,
335}; 138};
336
337struct stmmac_desc_ops dwmac100_desc_ops = {
338 .tx_status = dwmac100_get_tx_status,
339 .rx_status = dwmac100_get_rx_status,
340 .get_tx_len = dwmac100_get_tx_len,
341 .init_rx_desc = dwmac100_init_rx_desc,
342 .init_tx_desc = dwmac100_init_tx_desc,
343 .get_tx_owner = dwmac100_get_tx_owner,
344 .get_rx_owner = dwmac100_get_rx_owner,
345 .release_tx_desc = dwmac100_release_tx_desc,
346 .prepare_tx_desc = dwmac100_prepare_tx_desc,
347 .clear_tx_ic = dwmac100_clear_tx_ic,
348 .close_tx_desc = dwmac100_close_tx_desc,
349 .get_tx_ls = dwmac100_get_tx_ls,
350 .set_tx_owner = dwmac100_set_tx_owner,
351 .set_rx_owner = dwmac100_set_rx_owner,
352 .get_rx_frame_len = dwmac100_get_rx_frame_len,
353};