aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/stmmac/gmac.c
diff options
context:
space:
mode:
authorGiuseppe Cavallaro <peppe.cavallaro@st.com>2009-10-14 18:13:45 -0400
committerDavid S. Miller <davem@davemloft.net>2009-10-14 18:13:45 -0400
commit47dd7a540b8a0cdc028914b7351fca0cf0a1d305 (patch)
treedea632b2d4175a2bf4296069047a1bc63cda5ba6 /drivers/net/stmmac/gmac.c
parent47a01a0c94a3ff1716adb5f37b83975550e1ebbb (diff)
net: add support for STMicroelectronics Ethernet controllers.
This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers (Synopsys IP blocks). Driver documentation: o http://stlinux.com/drupal/kernel/network/stmmac Revisions: o http://stlinux.com/drupal/kernel/network/stmmac-driver-revisions Performances: o http://stlinux.com/drupal/benchmarks/networking/stmmac Signed-off-by: Giuseppe Cavallaro <peppe.cavallaro@st.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/stmmac/gmac.c')
-rw-r--r--drivers/net/stmmac/gmac.c693
1 files changed, 693 insertions, 0 deletions
diff --git a/drivers/net/stmmac/gmac.c b/drivers/net/stmmac/gmac.c
new file mode 100644
index 000000000000..b624bb5bae0a
--- /dev/null
+++ b/drivers/net/stmmac/gmac.c
@@ -0,0 +1,693 @@
1/*******************************************************************************
2 This is the driver for the GMAC on-chip Ethernet controller for ST SoCs.
3 DWC Ether MAC 10/100/1000 Universal version 3.41a has been used for
4 developing this code.
5
6 Copyright (C) 2007-2009 STMicroelectronics Ltd
7
8 This program is free software; you can redistribute it and/or modify it
9 under the terms and conditions of the GNU General Public License,
10 version 2, as published by the Free Software Foundation.
11
12 This program is distributed in the hope it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 more details.
16
17 You should have received a copy of the GNU General Public License along with
18 this program; if not, write to the Free Software Foundation, Inc.,
19 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
20
21 The full GNU General Public License is included in this distribution in
22 the file called "COPYING".
23
24 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
25*******************************************************************************/
26
27#include <linux/netdevice.h>
28#include <linux/crc32.h>
29#include <linux/mii.h>
30#include <linux/phy.h>
31
32#include "stmmac.h"
33#include "gmac.h"
34
35#undef GMAC_DEBUG
36/*#define GMAC_DEBUG*/
37#undef FRAME_FILTER_DEBUG
38/*#define FRAME_FILTER_DEBUG*/
39#ifdef GMAC_DEBUG
40#define DBG(fmt, args...) printk(fmt, ## args)
41#else
42#define DBG(fmt, args...) do { } while (0)
43#endif
44
45static void gmac_dump_regs(unsigned long ioaddr)
46{
47 int i;
48 pr_info("\t----------------------------------------------\n"
49 "\t GMAC registers (base addr = 0x%8x)\n"
50 "\t----------------------------------------------\n",
51 (unsigned int)ioaddr);
52
53 for (i = 0; i < 55; i++) {
54 int offset = i * 4;
55 pr_info("\tReg No. %d (offset 0x%x): 0x%08x\n", i,
56 offset, readl(ioaddr + offset));
57 }
58 return;
59}
60
61static int gmac_dma_init(unsigned long ioaddr, int pbl, u32 dma_tx, u32 dma_rx)
62{
63 u32 value = readl(ioaddr + DMA_BUS_MODE);
64 /* DMA SW reset */
65 value |= DMA_BUS_MODE_SFT_RESET;
66 writel(value, ioaddr + DMA_BUS_MODE);
67 do {} while ((readl(ioaddr + DMA_BUS_MODE) & DMA_BUS_MODE_SFT_RESET));
68
69 value = /* DMA_BUS_MODE_FB | */ DMA_BUS_MODE_4PBL |
70 ((pbl << DMA_BUS_MODE_PBL_SHIFT) |
71 (pbl << DMA_BUS_MODE_RPBL_SHIFT));
72
73#ifdef CONFIG_STMMAC_DA
74 value |= DMA_BUS_MODE_DA; /* Rx has priority over tx */
75#endif
76 writel(value, ioaddr + DMA_BUS_MODE);
77
78 /* Mask interrupts by writing to CSR7 */
79 writel(DMA_INTR_DEFAULT_MASK, ioaddr + DMA_INTR_ENA);
80
81 /* The base address of the RX/TX descriptor lists must be written into
82 * DMA CSR3 and CSR4, respectively. */
83 writel(dma_tx, ioaddr + DMA_TX_BASE_ADDR);
84 writel(dma_rx, ioaddr + DMA_RCV_BASE_ADDR);
85
86 return 0;
87}
88
89/* Transmit FIFO flush operation */
90static void gmac_flush_tx_fifo(unsigned long ioaddr)
91{
92 u32 csr6 = readl(ioaddr + DMA_CONTROL);
93 writel((csr6 | DMA_CONTROL_FTF), ioaddr + DMA_CONTROL);
94
95 do {} while ((readl(ioaddr + DMA_CONTROL) & DMA_CONTROL_FTF));
96}
97
98static void gmac_dma_operation_mode(unsigned long ioaddr, int txmode,
99 int rxmode)
100{
101 u32 csr6 = readl(ioaddr + DMA_CONTROL);
102
103 if (txmode == SF_DMA_MODE) {
104 DBG(KERN_DEBUG "GMAC: enabling TX store and forward mode\n");
105 /* Transmit COE type 2 cannot be done in cut-through mode. */
106 csr6 |= DMA_CONTROL_TSF;
107 /* Operating on second frame increase the performance
108 * especially when transmit store-and-forward is used.*/
109 csr6 |= DMA_CONTROL_OSF;
110 } else {
111 DBG(KERN_DEBUG "GMAC: disabling TX store and forward mode"
112 " (threshold = %d)\n", txmode);
113 csr6 &= ~DMA_CONTROL_TSF;
114 csr6 &= DMA_CONTROL_TC_TX_MASK;
115 /* Set the transmit threashold */
116 if (txmode <= 32)
117 csr6 |= DMA_CONTROL_TTC_32;
118 else if (txmode <= 64)
119 csr6 |= DMA_CONTROL_TTC_64;
120 else if (txmode <= 128)
121 csr6 |= DMA_CONTROL_TTC_128;
122 else if (txmode <= 192)
123 csr6 |= DMA_CONTROL_TTC_192;
124 else
125 csr6 |= DMA_CONTROL_TTC_256;
126 }
127
128 if (rxmode == SF_DMA_MODE) {
129 DBG(KERN_DEBUG "GMAC: enabling RX store and forward mode\n");
130 csr6 |= DMA_CONTROL_RSF;
131 } else {
132 DBG(KERN_DEBUG "GMAC: disabling RX store and forward mode"
133 " (threshold = %d)\n", rxmode);
134 csr6 &= ~DMA_CONTROL_RSF;
135 csr6 &= DMA_CONTROL_TC_RX_MASK;
136 if (rxmode <= 32)
137 csr6 |= DMA_CONTROL_RTC_32;
138 else if (rxmode <= 64)
139 csr6 |= DMA_CONTROL_RTC_64;
140 else if (rxmode <= 96)
141 csr6 |= DMA_CONTROL_RTC_96;
142 else
143 csr6 |= DMA_CONTROL_RTC_128;
144 }
145
146 writel(csr6, ioaddr + DMA_CONTROL);
147 return;
148}
149
150/* Not yet implemented --- no RMON module */
151static void gmac_dma_diagnostic_fr(void *data, struct stmmac_extra_stats *x,
152 unsigned long ioaddr)
153{
154 return;
155}
156
157static void gmac_dump_dma_regs(unsigned long ioaddr)
158{
159 int i;
160 pr_info(" DMA registers\n");
161 for (i = 0; i < 22; i++) {
162 if ((i < 9) || (i > 17)) {
163 int offset = i * 4;
164 pr_err("\t Reg No. %d (offset 0x%x): 0x%08x\n", i,
165 (DMA_BUS_MODE + offset),
166 readl(ioaddr + DMA_BUS_MODE + offset));
167 }
168 }
169 return;
170}
171
172static int gmac_get_tx_frame_status(void *data, struct stmmac_extra_stats *x,
173 struct dma_desc *p, unsigned long ioaddr)
174{
175 int ret = 0;
176 struct net_device_stats *stats = (struct net_device_stats *)data;
177
178 if (unlikely(p->des01.etx.error_summary)) {
179 DBG(KERN_ERR "GMAC TX error... 0x%08x\n", p->des01.etx);
180 if (unlikely(p->des01.etx.jabber_timeout)) {
181 DBG(KERN_ERR "\tjabber_timeout error\n");
182 x->tx_jabber++;
183 }
184
185 if (unlikely(p->des01.etx.frame_flushed)) {
186 DBG(KERN_ERR "\tframe_flushed error\n");
187 x->tx_frame_flushed++;
188 gmac_flush_tx_fifo(ioaddr);
189 }
190
191 if (unlikely(p->des01.etx.loss_carrier)) {
192 DBG(KERN_ERR "\tloss_carrier error\n");
193 x->tx_losscarrier++;
194 stats->tx_carrier_errors++;
195 }
196 if (unlikely(p->des01.etx.no_carrier)) {
197 DBG(KERN_ERR "\tno_carrier error\n");
198 x->tx_carrier++;
199 stats->tx_carrier_errors++;
200 }
201 if (unlikely(p->des01.etx.late_collision)) {
202 DBG(KERN_ERR "\tlate_collision error\n");
203 stats->collisions += p->des01.etx.collision_count;
204 }
205 if (unlikely(p->des01.etx.excessive_collisions)) {
206 DBG(KERN_ERR "\texcessive_collisions\n");
207 stats->collisions += p->des01.etx.collision_count;
208 }
209 if (unlikely(p->des01.etx.excessive_deferral)) {
210 DBG(KERN_INFO "\texcessive tx_deferral\n");
211 x->tx_deferred++;
212 }
213
214 if (unlikely(p->des01.etx.underflow_error)) {
215 DBG(KERN_ERR "\tunderflow error\n");
216 gmac_flush_tx_fifo(ioaddr);
217 x->tx_underflow++;
218 }
219
220 if (unlikely(p->des01.etx.ip_header_error)) {
221 DBG(KERN_ERR "\tTX IP header csum error\n");
222 x->tx_ip_header_error++;
223 }
224
225 if (unlikely(p->des01.etx.payload_error)) {
226 DBG(KERN_ERR "\tAddr/Payload csum error\n");
227 x->tx_payload_error++;
228 gmac_flush_tx_fifo(ioaddr);
229 }
230
231 ret = -1;
232 }
233
234 if (unlikely(p->des01.etx.deferred)) {
235 DBG(KERN_INFO "GMAC TX status: tx deferred\n");
236 x->tx_deferred++;
237 }
238#ifdef STMMAC_VLAN_TAG_USED
239 if (p->des01.etx.vlan_frame) {
240 DBG(KERN_INFO "GMAC TX status: VLAN frame\n");
241 x->tx_vlan++;
242 }
243#endif
244
245 return ret;
246}
247
248static int gmac_get_tx_len(struct dma_desc *p)
249{
250 return p->des01.etx.buffer1_size;
251}
252
253static int gmac_coe_rdes0(int ipc_err, int type, int payload_err)
254{
255 int ret = good_frame;
256 u32 status = (type << 2 | ipc_err << 1 | payload_err) & 0x7;
257
258 /* bits 5 7 0 | Frame status
259 * ----------------------------------------------------------
260 * 0 0 0 | IEEE 802.3 Type frame (lenght < 1536 octects)
261 * 1 0 0 | IPv4/6 No CSUM errorS.
262 * 1 0 1 | IPv4/6 CSUM PAYLOAD error
263 * 1 1 0 | IPv4/6 CSUM IP HR error
264 * 1 1 1 | IPv4/6 IP PAYLOAD AND HEADER errorS
265 * 0 0 1 | IPv4/6 unsupported IP PAYLOAD
266 * 0 1 1 | COE bypassed.. no IPv4/6 frame
267 * 0 1 0 | Reserved.
268 */
269 if (status == 0x0) {
270 DBG(KERN_INFO "RX Des0 status: IEEE 802.3 Type frame.\n");
271 ret = good_frame;
272 } else if (status == 0x4) {
273 DBG(KERN_INFO "RX Des0 status: IPv4/6 No CSUM errorS.\n");
274 ret = good_frame;
275 } else if (status == 0x5) {
276 DBG(KERN_ERR "RX Des0 status: IPv4/6 Payload Error.\n");
277 ret = csum_none;
278 } else if (status == 0x6) {
279 DBG(KERN_ERR "RX Des0 status: IPv4/6 Header Error.\n");
280 ret = csum_none;
281 } else if (status == 0x7) {
282 DBG(KERN_ERR
283 "RX Des0 status: IPv4/6 Header and Payload Error.\n");
284 ret = csum_none;
285 } else if (status == 0x1) {
286 DBG(KERN_ERR
287 "RX Des0 status: IPv4/6 unsupported IP PAYLOAD.\n");
288 ret = discard_frame;
289 } else if (status == 0x3) {
290 DBG(KERN_ERR "RX Des0 status: No IPv4, IPv6 frame.\n");
291 ret = discard_frame;
292 }
293 return ret;
294}
295
296static int gmac_get_rx_frame_status(void *data, struct stmmac_extra_stats *x,
297 struct dma_desc *p)
298{
299 int ret = good_frame;
300 struct net_device_stats *stats = (struct net_device_stats *)data;
301
302 if (unlikely(p->des01.erx.error_summary)) {
303 DBG(KERN_ERR "GMAC RX Error Summary... 0x%08x\n", p->des01.erx);
304 if (unlikely(p->des01.erx.descriptor_error)) {
305 DBG(KERN_ERR "\tdescriptor error\n");
306 x->rx_desc++;
307 stats->rx_length_errors++;
308 }
309 if (unlikely(p->des01.erx.overflow_error)) {
310 DBG(KERN_ERR "\toverflow error\n");
311 x->rx_gmac_overflow++;
312 }
313
314 if (unlikely(p->des01.erx.ipc_csum_error))
315 DBG(KERN_ERR "\tIPC Csum Error/Giant frame\n");
316
317 if (unlikely(p->des01.erx.late_collision)) {
318 DBG(KERN_ERR "\tlate_collision error\n");
319 stats->collisions++;
320 stats->collisions++;
321 }
322 if (unlikely(p->des01.erx.receive_watchdog)) {
323 DBG(KERN_ERR "\treceive_watchdog error\n");
324 x->rx_watchdog++;
325 }
326 if (unlikely(p->des01.erx.error_gmii)) {
327 DBG(KERN_ERR "\tReceive Error\n");
328 x->rx_mii++;
329 }
330 if (unlikely(p->des01.erx.crc_error)) {
331 DBG(KERN_ERR "\tCRC error\n");
332 x->rx_crc++;
333 stats->rx_crc_errors++;
334 }
335 ret = discard_frame;
336 }
337
338 /* After a payload csum error, the ES bit is set.
339 * It doesn't match with the information reported into the databook.
340 * At any rate, we need to understand if the CSUM hw computation is ok
341 * and report this info to the upper layers. */
342 ret = gmac_coe_rdes0(p->des01.erx.ipc_csum_error,
343 p->des01.erx.frame_type, p->des01.erx.payload_csum_error);
344
345 if (unlikely(p->des01.erx.dribbling)) {
346 DBG(KERN_ERR "GMAC RX: dribbling error\n");
347 ret = discard_frame;
348 }
349 if (unlikely(p->des01.erx.sa_filter_fail)) {
350 DBG(KERN_ERR "GMAC RX : Source Address filter fail\n");
351 x->sa_rx_filter_fail++;
352 ret = discard_frame;
353 }
354 if (unlikely(p->des01.erx.da_filter_fail)) {
355 DBG(KERN_ERR "GMAC RX : Destination Address filter fail\n");
356 x->da_rx_filter_fail++;
357 ret = discard_frame;
358 }
359 if (unlikely(p->des01.erx.length_error)) {
360 DBG(KERN_ERR "GMAC RX: length_error error\n");
361 x->rx_lenght++;
362 ret = discard_frame;
363 }
364#ifdef STMMAC_VLAN_TAG_USED
365 if (p->des01.erx.vlan_tag) {
366 DBG(KERN_INFO "GMAC RX: VLAN frame tagged\n");
367 x->rx_vlan++;
368 }
369#endif
370 return ret;
371}
372
373static void gmac_irq_status(unsigned long ioaddr)
374{
375 u32 intr_status = readl(ioaddr + GMAC_INT_STATUS);
376
377 /* Not used events (e.g. MMC interrupts) are not handled. */
378 if ((intr_status & mmc_tx_irq))
379 DBG(KERN_DEBUG "GMAC: MMC tx interrupt: 0x%08x\n",
380 readl(ioaddr + GMAC_MMC_TX_INTR));
381 if (unlikely(intr_status & mmc_rx_irq))
382 DBG(KERN_DEBUG "GMAC: MMC rx interrupt: 0x%08x\n",
383 readl(ioaddr + GMAC_MMC_RX_INTR));
384 if (unlikely(intr_status & mmc_rx_csum_offload_irq))
385 DBG(KERN_DEBUG "GMAC: MMC rx csum offload: 0x%08x\n",
386 readl(ioaddr + GMAC_MMC_RX_CSUM_OFFLOAD));
387 if (unlikely(intr_status & pmt_irq)) {
388 DBG(KERN_DEBUG "GMAC: received Magic frame\n");
389 /* clear the PMT bits 5 and 6 by reading the PMT
390 * status register. */
391 readl(ioaddr + GMAC_PMT);
392 }
393
394 return;
395}
396
397static void gmac_core_init(unsigned long ioaddr)
398{
399 u32 value = readl(ioaddr + GMAC_CONTROL);
400 value |= GMAC_CORE_INIT;
401 writel(value, ioaddr + GMAC_CONTROL);
402
403 /* STBus Bridge Configuration */
404 /*writel(0xc5608, ioaddr + 0x00007000);*/
405
406 /* Freeze MMC counters */
407 writel(0x8, ioaddr + GMAC_MMC_CTRL);
408 /* Mask GMAC interrupts */
409 writel(0x207, ioaddr + GMAC_INT_MASK);
410
411#ifdef STMMAC_VLAN_TAG_USED
412 /* Tag detection without filtering */
413 writel(0x0, ioaddr + GMAC_VLAN_TAG);
414#endif
415 return;
416}
417
418static void gmac_set_umac_addr(unsigned long ioaddr, unsigned char *addr,
419 unsigned int reg_n)
420{
421 stmmac_set_mac_addr(ioaddr, addr, GMAC_ADDR_HIGH(reg_n),
422 GMAC_ADDR_LOW(reg_n));
423}
424
425static void gmac_get_umac_addr(unsigned long ioaddr, unsigned char *addr,
426 unsigned int reg_n)
427{
428 stmmac_get_mac_addr(ioaddr, addr, GMAC_ADDR_HIGH(reg_n),
429 GMAC_ADDR_LOW(reg_n));
430}
431
432static void gmac_set_filter(struct net_device *dev)
433{
434 unsigned long ioaddr = dev->base_addr;
435 unsigned int value = 0;
436
437 DBG(KERN_INFO "%s: # mcasts %d, # unicast %d\n",
438 __func__, dev->mc_count, dev->uc_count);
439
440 if (dev->flags & IFF_PROMISC)
441 value = GMAC_FRAME_FILTER_PR;
442 else if ((dev->mc_count > HASH_TABLE_SIZE)
443 || (dev->flags & IFF_ALLMULTI)) {
444 value = GMAC_FRAME_FILTER_PM; /* pass all multi */
445 writel(0xffffffff, ioaddr + GMAC_HASH_HIGH);
446 writel(0xffffffff, ioaddr + GMAC_HASH_LOW);
447 } else if (dev->mc_count > 0) {
448 int i;
449 u32 mc_filter[2];
450 struct dev_mc_list *mclist;
451
452 /* Hash filter for multicast */
453 value = GMAC_FRAME_FILTER_HMC;
454
455 memset(mc_filter, 0, sizeof(mc_filter));
456 for (i = 0, mclist = dev->mc_list;
457 mclist && i < dev->mc_count; i++, mclist = mclist->next) {
458 /* The upper 6 bits of the calculated CRC are used to
459 index the contens of the hash table */
460 int bit_nr =
461 bitrev32(~crc32_le(~0, mclist->dmi_addr, 6)) >> 26;
462 /* The most significant bit determines the register to
463 * use (H/L) while the other 5 bits determine the bit
464 * within the register. */
465 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
466 }
467 writel(mc_filter[0], ioaddr + GMAC_HASH_LOW);
468 writel(mc_filter[1], ioaddr + GMAC_HASH_HIGH);
469 }
470
471 /* Handle multiple unicast addresses (perfect filtering)*/
472 if (dev->uc_count > GMAC_MAX_UNICAST_ADDRESSES)
473 /* Switch to promiscuous mode is more than 16 addrs
474 are required */
475 value |= GMAC_FRAME_FILTER_PR;
476 else {
477 int i;
478 struct dev_addr_list *uc_ptr = dev->uc_list;
479
480 for (i = 0; i < dev->uc_count; i++) {
481 gmac_set_umac_addr(ioaddr, uc_ptr->da_addr,
482 i + 1);
483
484 DBG(KERN_INFO "\t%d "
485 "- Unicast addr %02x:%02x:%02x:%02x:%02x:"
486 "%02x\n", i + 1,
487 uc_ptr->da_addr[0], uc_ptr->da_addr[1],
488 uc_ptr->da_addr[2], uc_ptr->da_addr[3],
489 uc_ptr->da_addr[4], uc_ptr->da_addr[5]);
490 uc_ptr = uc_ptr->next;
491 }
492 }
493
494#ifdef FRAME_FILTER_DEBUG
495 /* Enable Receive all mode (to debug filtering_fail errors) */
496 value |= GMAC_FRAME_FILTER_RA;
497#endif
498 writel(value, ioaddr + GMAC_FRAME_FILTER);
499
500 DBG(KERN_INFO "\tFrame Filter reg: 0x%08x\n\tHash regs: "
501 "HI 0x%08x, LO 0x%08x\n", readl(ioaddr + GMAC_FRAME_FILTER),
502 readl(ioaddr + GMAC_HASH_HIGH), readl(ioaddr + GMAC_HASH_LOW));
503
504 return;
505}
506
507static void gmac_flow_ctrl(unsigned long ioaddr, unsigned int duplex,
508 unsigned int fc, unsigned int pause_time)
509{
510 unsigned int flow = 0;
511
512 DBG(KERN_DEBUG "GMAC Flow-Control:\n");
513 if (fc & FLOW_RX) {
514 DBG(KERN_DEBUG "\tReceive Flow-Control ON\n");
515 flow |= GMAC_FLOW_CTRL_RFE;
516 }
517 if (fc & FLOW_TX) {
518 DBG(KERN_DEBUG "\tTransmit Flow-Control ON\n");
519 flow |= GMAC_FLOW_CTRL_TFE;
520 }
521
522 if (duplex) {
523 DBG(KERN_DEBUG "\tduplex mode: pause time: %d\n", pause_time);
524 flow |= (pause_time << GMAC_FLOW_CTRL_PT_SHIFT);
525 }
526
527 writel(flow, ioaddr + GMAC_FLOW_CTRL);
528 return;
529}
530
531static void gmac_pmt(unsigned long ioaddr, unsigned long mode)
532{
533 unsigned int pmt = 0;
534
535 if (mode == WAKE_MAGIC) {
536 DBG(KERN_DEBUG "GMAC: WOL Magic frame\n");
537 pmt |= power_down | magic_pkt_en;
538 } else if (mode == WAKE_UCAST) {
539 DBG(KERN_DEBUG "GMAC: WOL on global unicast\n");
540 pmt |= global_unicast;
541 }
542
543 writel(pmt, ioaddr + GMAC_PMT);
544 return;
545}
546
547static void gmac_init_rx_desc(struct dma_desc *p, unsigned int ring_size,
548 int disable_rx_ic)
549{
550 int i;
551 for (i = 0; i < ring_size; i++) {
552 p->des01.erx.own = 1;
553 p->des01.erx.buffer1_size = BUF_SIZE_8KiB - 1;
554 /* To support jumbo frames */
555 p->des01.erx.buffer2_size = BUF_SIZE_8KiB - 1;
556 if (i == ring_size - 1)
557 p->des01.erx.end_ring = 1;
558 if (disable_rx_ic)
559 p->des01.erx.disable_ic = 1;
560 p++;
561 }
562 return;
563}
564
565static void gmac_init_tx_desc(struct dma_desc *p, unsigned int ring_size)
566{
567 int i;
568
569 for (i = 0; i < ring_size; i++) {
570 p->des01.etx.own = 0;
571 if (i == ring_size - 1)
572 p->des01.etx.end_ring = 1;
573 p++;
574 }
575
576 return;
577}
578
579static int gmac_get_tx_owner(struct dma_desc *p)
580{
581 return p->des01.etx.own;
582}
583
584static int gmac_get_rx_owner(struct dma_desc *p)
585{
586 return p->des01.erx.own;
587}
588
589static void gmac_set_tx_owner(struct dma_desc *p)
590{
591 p->des01.etx.own = 1;
592}
593
594static void gmac_set_rx_owner(struct dma_desc *p)
595{
596 p->des01.erx.own = 1;
597}
598
599static int gmac_get_tx_ls(struct dma_desc *p)
600{
601 return p->des01.etx.last_segment;
602}
603
604static void gmac_release_tx_desc(struct dma_desc *p)
605{
606 int ter = p->des01.etx.end_ring;
607
608 memset(p, 0, sizeof(struct dma_desc));
609 p->des01.etx.end_ring = ter;
610
611 return;
612}
613
614static void gmac_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
615 int csum_flag)
616{
617 p->des01.etx.first_segment = is_fs;
618 if (unlikely(len > BUF_SIZE_4KiB)) {
619 p->des01.etx.buffer1_size = BUF_SIZE_4KiB;
620 p->des01.etx.buffer2_size = len - BUF_SIZE_4KiB;
621 } else {
622 p->des01.etx.buffer1_size = len;
623 }
624 if (likely(csum_flag))
625 p->des01.etx.checksum_insertion = cic_full;
626}
627
628static void gmac_clear_tx_ic(struct dma_desc *p)
629{
630 p->des01.etx.interrupt = 0;
631}
632
633static void gmac_close_tx_desc(struct dma_desc *p)
634{
635 p->des01.etx.last_segment = 1;
636 p->des01.etx.interrupt = 1;
637}
638
639static int gmac_get_rx_frame_len(struct dma_desc *p)
640{
641 return p->des01.erx.frame_length;
642}
643
644struct stmmac_ops gmac_driver = {
645 .core_init = gmac_core_init,
646 .dump_mac_regs = gmac_dump_regs,
647 .dma_init = gmac_dma_init,
648 .dump_dma_regs = gmac_dump_dma_regs,
649 .dma_mode = gmac_dma_operation_mode,
650 .dma_diagnostic_fr = gmac_dma_diagnostic_fr,
651 .tx_status = gmac_get_tx_frame_status,
652 .rx_status = gmac_get_rx_frame_status,
653 .get_tx_len = gmac_get_tx_len,
654 .set_filter = gmac_set_filter,
655 .flow_ctrl = gmac_flow_ctrl,
656 .pmt = gmac_pmt,
657 .init_rx_desc = gmac_init_rx_desc,
658 .init_tx_desc = gmac_init_tx_desc,
659 .get_tx_owner = gmac_get_tx_owner,
660 .get_rx_owner = gmac_get_rx_owner,
661 .release_tx_desc = gmac_release_tx_desc,
662 .prepare_tx_desc = gmac_prepare_tx_desc,
663 .clear_tx_ic = gmac_clear_tx_ic,
664 .close_tx_desc = gmac_close_tx_desc,
665 .get_tx_ls = gmac_get_tx_ls,
666 .set_tx_owner = gmac_set_tx_owner,
667 .set_rx_owner = gmac_set_rx_owner,
668 .get_rx_frame_len = gmac_get_rx_frame_len,
669 .host_irq_status = gmac_irq_status,
670 .set_umac_addr = gmac_set_umac_addr,
671 .get_umac_addr = gmac_get_umac_addr,
672};
673
674struct mac_device_info *gmac_setup(unsigned long ioaddr)
675{
676 struct mac_device_info *mac;
677 u32 uid = readl(ioaddr + GMAC_VERSION);
678
679 pr_info("\tGMAC - user ID: 0x%x, Synopsys ID: 0x%x\n",
680 ((uid & 0x0000ff00) >> 8), (uid & 0x000000ff));
681
682 mac = kzalloc(sizeof(const struct mac_device_info), GFP_KERNEL);
683
684 mac->ops = &gmac_driver;
685 mac->hw.pmt = PMT_SUPPORTED;
686 mac->hw.link.port = GMAC_CONTROL_PS;
687 mac->hw.link.duplex = GMAC_CONTROL_DM;
688 mac->hw.link.speed = GMAC_CONTROL_FES;
689 mac->hw.mii.addr = GMAC_MII_ADDR;
690 mac->hw.mii.data = GMAC_MII_DATA;
691
692 return mac;
693}