aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/korina.c
diff options
context:
space:
mode:
authorFlorian Fainelli <florian.fainelli@telecomint.eu>2008-03-19 12:14:51 -0400
committerJeff Garzik <jeff@garzik.org>2008-03-26 00:19:34 -0400
commitef11291bcd5f963c72e7ba5952be3e3c97463d2c (patch)
tree46f7320e9406f8bf35b73065898d96ceb7e38dc4 /drivers/net/korina.c
parentbfebbb88eca12a01fff1fbee2b8e1a4e932b799b (diff)
Add support the Korina (IDT RC32434) Ethernet MAC
This patch adds support for the IDT rc32434 Ethernet MAC we can find in the IDT boards and the Mikrotik RB500. Driver references some code from the linux-mips RB500 support. Signed-off-by: Florian Fainelli <florian.fainelli@telecomint.eu> Signed-off-by: Felix Fietkau <nbd@openwrt.org> Signed-off-by: Philip Rischel <rischelp@idt.com> Signed-off-by: Jeff Garzik <jeff@garzik.org>
Diffstat (limited to 'drivers/net/korina.c')
-rw-r--r--drivers/net/korina.c1233
1 files changed, 1233 insertions, 0 deletions
diff --git a/drivers/net/korina.c b/drivers/net/korina.c
new file mode 100644
index 000000000000..1d24a73a0e1a
--- /dev/null
+++ b/drivers/net/korina.c
@@ -0,0 +1,1233 @@
1/*
2 * Driver for the IDT RC32434 (Korina) on-chip ethernet controller.
3 *
4 * Copyright 2004 IDT Inc. (rischelp@idt.com)
5 * Copyright 2006 Felix Fietkau <nbd@openwrt.org>
6 * Copyright 2008 Florian Fainelli <florian@openwrt.org>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
12 *
13 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
14 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
15 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
16 * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
17 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
18 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
19 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
20 * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
21 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
22 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
23 *
24 * You should have received a copy of the GNU General Public License along
25 * with this program; if not, write to the Free Software Foundation, Inc.,
26 * 675 Mass Ave, Cambridge, MA 02139, USA.
27 *
28 * Writing to a DMA status register:
29 *
30 * When writing to the status register, you should mask the bit you have
31 * been testing the status register with. Both Tx and Rx DMA registers
32 * should stick to this procedure.
33 */
34
35#include <linux/module.h>
36#include <linux/kernel.h>
37#include <linux/moduleparam.h>
38#include <linux/sched.h>
39#include <linux/ctype.h>
40#include <linux/types.h>
41#include <linux/interrupt.h>
42#include <linux/init.h>
43#include <linux/ioport.h>
44#include <linux/in.h>
45#include <linux/slab.h>
46#include <linux/string.h>
47#include <linux/delay.h>
48#include <linux/netdevice.h>
49#include <linux/etherdevice.h>
50#include <linux/skbuff.h>
51#include <linux/errno.h>
52#include <linux/platform_device.h>
53#include <linux/mii.h>
54#include <linux/ethtool.h>
55#include <linux/crc32.h>
56
57#include <asm/bootinfo.h>
58#include <asm/system.h>
59#include <asm/bitops.h>
60#include <asm/pgtable.h>
61#include <asm/segment.h>
62#include <asm/io.h>
63#include <asm/dma.h>
64
65#include <asm/mach-rc32434/rb.h>
66#include <asm/mach-rc32434/rc32434.h>
67#include <asm/mach-rc32434/eth.h>
68#include <asm/mach-rc32434/dma_v.h>
69
70#define DRV_NAME "korina"
71#define DRV_VERSION "0.10"
72#define DRV_RELDATE "04Mar2008"
73
74#define STATION_ADDRESS_HIGH(dev) (((dev)->dev_addr[0] << 8) | \
75 ((dev)->dev_addr[1]))
76#define STATION_ADDRESS_LOW(dev) (((dev)->dev_addr[2] << 24) | \
77 ((dev)->dev_addr[3] << 16) | \
78 ((dev)->dev_addr[4] << 8) | \
79 ((dev)->dev_addr[5]))
80
81#define MII_CLOCK 1250000 /* no more than 2.5MHz */
82
83/* the following must be powers of two */
84#define KORINA_NUM_RDS 64 /* number of receive descriptors */
85#define KORINA_NUM_TDS 64 /* number of transmit descriptors */
86
87#define KORINA_RBSIZE 536 /* size of one resource buffer = Ether MTU */
88#define KORINA_RDS_MASK (KORINA_NUM_RDS - 1)
89#define KORINA_TDS_MASK (KORINA_NUM_TDS - 1)
90#define RD_RING_SIZE (KORINA_NUM_RDS * sizeof(struct dma_desc))
91#define TD_RING_SIZE (KORINA_NUM_TDS * sizeof(struct dma_desc))
92
93#define TX_TIMEOUT (6000 * HZ / 1000)
94
95enum chain_status { desc_filled, desc_empty };
96#define IS_DMA_FINISHED(X) (((X) & (DMA_DESC_FINI)) != 0)
97#define IS_DMA_DONE(X) (((X) & (DMA_DESC_DONE)) != 0)
98#define RCVPKT_LENGTH(X) (((X) & ETH_RX_LEN) >> ETH_RX_LEN_BIT)
99
100/* Information that need to be kept for each board. */
101struct korina_private {
102 struct eth_regs *eth_regs;
103 struct dma_reg *rx_dma_regs;
104 struct dma_reg *tx_dma_regs;
105 struct dma_desc *td_ring; /* transmit descriptor ring */
106 struct dma_desc *rd_ring; /* receive descriptor ring */
107
108 struct sk_buff *tx_skb[KORINA_NUM_TDS];
109 struct sk_buff *rx_skb[KORINA_NUM_RDS];
110
111 int rx_next_done;
112 int rx_chain_head;
113 int rx_chain_tail;
114 enum chain_status rx_chain_status;
115
116 int tx_next_done;
117 int tx_chain_head;
118 int tx_chain_tail;
119 enum chain_status tx_chain_status;
120 int tx_count;
121 int tx_full;
122
123 int rx_irq;
124 int tx_irq;
125 int ovr_irq;
126 int und_irq;
127
128 spinlock_t lock; /* NIC xmit lock */
129
130 int dma_halt_cnt;
131 int dma_run_cnt;
132 struct napi_struct napi;
133 struct mii_if_info mii_if;
134 struct net_device *dev;
135 int phy_addr;
136};
137
138extern unsigned int idt_cpu_freq;
139
140static inline void korina_start_dma(struct dma_reg *ch, u32 dma_addr)
141{
142 writel(0, &ch->dmandptr);
143 writel(dma_addr, &ch->dmadptr);
144}
145
146static inline void korina_abort_dma(struct net_device *dev,
147 struct dma_reg *ch)
148{
149 if (readl(&ch->dmac) & DMA_CHAN_RUN_BIT) {
150 writel(0x10, &ch->dmac);
151
152 while (!(readl(&ch->dmas) & DMA_STAT_HALT))
153 dev->trans_start = jiffies;
154
155 writel(0, &ch->dmas);
156 }
157
158 writel(0, &ch->dmadptr);
159 writel(0, &ch->dmandptr);
160}
161
162static inline void korina_chain_dma(struct dma_reg *ch, u32 dma_addr)
163{
164 writel(dma_addr, &ch->dmandptr);
165}
166
167static void korina_abort_tx(struct net_device *dev)
168{
169 struct korina_private *lp = netdev_priv(dev);
170
171 korina_abort_dma(dev, lp->tx_dma_regs);
172}
173
174static void korina_abort_rx(struct net_device *dev)
175{
176 struct korina_private *lp = netdev_priv(dev);
177
178 korina_abort_dma(dev, lp->rx_dma_regs);
179}
180
181static void korina_start_rx(struct korina_private *lp,
182 struct dma_desc *rd)
183{
184 korina_start_dma(lp->rx_dma_regs, CPHYSADDR(rd));
185}
186
187static void korina_chain_rx(struct korina_private *lp,
188 struct dma_desc *rd)
189{
190 korina_chain_dma(lp->rx_dma_regs, CPHYSADDR(rd));
191}
192
193/* transmit packet */
194static int korina_send_packet(struct sk_buff *skb, struct net_device *dev)
195{
196 struct korina_private *lp = netdev_priv(dev);
197 unsigned long flags;
198 u32 length;
199 u32 chain_index;
200 struct dma_desc *td;
201
202 spin_lock_irqsave(&lp->lock, flags);
203
204 td = &lp->td_ring[lp->tx_chain_tail];
205
206 /* stop queue when full, drop pkts if queue already full */
207 if (lp->tx_count >= (KORINA_NUM_TDS - 2)) {
208 lp->tx_full = 1;
209
210 if (lp->tx_count == (KORINA_NUM_TDS - 2))
211 netif_stop_queue(dev);
212 else {
213 dev->stats.tx_dropped++;
214 dev_kfree_skb_any(skb);
215 spin_unlock_irqrestore(&lp->lock, flags);
216
217 return NETDEV_TX_BUSY;
218 }
219 }
220
221 lp->tx_count++;
222
223 lp->tx_skb[lp->tx_chain_tail] = skb;
224
225 length = skb->len;
226 dma_cache_wback((u32)skb->data, skb->len);
227
228 /* Setup the transmit descriptor. */
229 dma_cache_inv((u32) td, sizeof(*td));
230 td->ca = CPHYSADDR(skb->data);
231 chain_index = (lp->tx_chain_tail - 1) &
232 KORINA_TDS_MASK;
233
234 if (readl(&(lp->tx_dma_regs->dmandptr)) == 0) {
235 if (lp->tx_chain_status == desc_empty) {
236 /* Update tail */
237 td->control = DMA_COUNT(length) |
238 DMA_DESC_COF | DMA_DESC_IOF;
239 /* Move tail */
240 lp->tx_chain_tail = chain_index;
241 /* Write to NDPTR */
242 writel(CPHYSADDR(&lp->td_ring[lp->tx_chain_head]),
243 &lp->tx_dma_regs->dmandptr);
244 /* Move head to tail */
245 lp->tx_chain_head = lp->tx_chain_tail;
246 } else {
247 /* Update tail */
248 td->control = DMA_COUNT(length) |
249 DMA_DESC_COF | DMA_DESC_IOF;
250 /* Link to prev */
251 lp->td_ring[chain_index].control &=
252 ~DMA_DESC_COF;
253 /* Link to prev */
254 lp->td_ring[chain_index].link = CPHYSADDR(td);
255 /* Move tail */
256 lp->tx_chain_tail = chain_index;
257 /* Write to NDPTR */
258 writel(CPHYSADDR(&lp->td_ring[lp->tx_chain_head]),
259 &(lp->tx_dma_regs->dmandptr));
260 /* Move head to tail */
261 lp->tx_chain_head = lp->tx_chain_tail;
262 lp->tx_chain_status = desc_empty;
263 }
264 } else {
265 if (lp->tx_chain_status == desc_empty) {
266 /* Update tail */
267 td->control = DMA_COUNT(length) |
268 DMA_DESC_COF | DMA_DESC_IOF;
269 /* Move tail */
270 lp->tx_chain_tail = chain_index;
271 lp->tx_chain_status = desc_filled;
272 netif_stop_queue(dev);
273 } else {
274 /* Update tail */
275 td->control = DMA_COUNT(length) |
276 DMA_DESC_COF | DMA_DESC_IOF;
277 lp->td_ring[chain_index].control &=
278 ~DMA_DESC_COF;
279 lp->td_ring[chain_index].link = CPHYSADDR(td);
280 lp->tx_chain_tail = chain_index;
281 }
282 }
283 dma_cache_wback((u32) td, sizeof(*td));
284
285 dev->trans_start = jiffies;
286 spin_unlock_irqrestore(&lp->lock, flags);
287
288 return NETDEV_TX_OK;
289}
290
291static int mdio_read(struct net_device *dev, int mii_id, int reg)
292{
293 struct korina_private *lp = netdev_priv(dev);
294 int ret;
295
296 mii_id = ((lp->rx_irq == 0x2c ? 1 : 0) << 8);
297
298 writel(0, &lp->eth_regs->miimcfg);
299 writel(0, &lp->eth_regs->miimcmd);
300 writel(mii_id | reg, &lp->eth_regs->miimaddr);
301 writel(ETH_MII_CMD_SCN, &lp->eth_regs->miimcmd);
302
303 ret = (int)(readl(&lp->eth_regs->miimrdd));
304 return ret;
305}
306
307static void mdio_write(struct net_device *dev, int mii_id, int reg, int val)
308{
309 struct korina_private *lp = netdev_priv(dev);
310
311 mii_id = ((lp->rx_irq == 0x2c ? 1 : 0) << 8);
312
313 writel(0, &lp->eth_regs->miimcfg);
314 writel(1, &lp->eth_regs->miimcmd);
315 writel(mii_id | reg, &lp->eth_regs->miimaddr);
316 writel(ETH_MII_CMD_SCN, &lp->eth_regs->miimcmd);
317 writel(val, &lp->eth_regs->miimwtd);
318}
319
320/* Ethernet Rx DMA interrupt */
321static irqreturn_t korina_rx_dma_interrupt(int irq, void *dev_id)
322{
323 struct net_device *dev = dev_id;
324 struct korina_private *lp = netdev_priv(dev);
325 u32 dmas, dmasm;
326 irqreturn_t retval;
327
328 dmas = readl(&lp->rx_dma_regs->dmas);
329 if (dmas & (DMA_STAT_DONE | DMA_STAT_HALT | DMA_STAT_ERR)) {
330 netif_rx_schedule_prep(dev, &lp->napi);
331
332 dmasm = readl(&lp->rx_dma_regs->dmasm);
333 writel(dmasm | (DMA_STAT_DONE |
334 DMA_STAT_HALT | DMA_STAT_ERR),
335 &lp->rx_dma_regs->dmasm);
336
337 if (dmas & DMA_STAT_ERR)
338 printk(KERN_ERR DRV_NAME "%s: DMA error\n", dev->name);
339
340 retval = IRQ_HANDLED;
341 } else
342 retval = IRQ_NONE;
343
344 return retval;
345}
346
347static int korina_rx(struct net_device *dev, int limit)
348{
349 struct korina_private *lp = netdev_priv(dev);
350 struct dma_desc *rd = &lp->rd_ring[lp->rx_next_done];
351 struct sk_buff *skb, *skb_new;
352 u8 *pkt_buf;
353 u32 devcs, pkt_len, dmas, rx_free_desc;
354 int count;
355
356 dma_cache_inv((u32)rd, sizeof(*rd));
357
358 for (count = 0; count < limit; count++) {
359
360 devcs = rd->devcs;
361
362 /* Update statistics counters */
363 if (devcs & ETH_RX_CRC)
364 dev->stats.rx_crc_errors++;
365 if (devcs & ETH_RX_LOR)
366 dev->stats.rx_length_errors++;
367 if (devcs & ETH_RX_LE)
368 dev->stats.rx_length_errors++;
369 if (devcs & ETH_RX_OVR)
370 dev->stats.rx_over_errors++;
371 if (devcs & ETH_RX_CV)
372 dev->stats.rx_frame_errors++;
373 if (devcs & ETH_RX_CES)
374 dev->stats.rx_length_errors++;
375 if (devcs & ETH_RX_MP)
376 dev->stats.multicast++;
377
378 if ((devcs & ETH_RX_LD) != ETH_RX_LD) {
379 /* check that this is a whole packet
380 * WARNING: DMA_FD bit incorrectly set
381 * in Rc32434 (errata ref #077) */
382 dev->stats.rx_errors++;
383 dev->stats.rx_dropped++;
384 }
385
386 while ((rx_free_desc = KORINA_RBSIZE - (u32)DMA_COUNT(rd->control)) != 0) {
387 /* init the var. used for the later
388 * operations within the while loop */
389 skb_new = NULL;
390 pkt_len = RCVPKT_LENGTH(devcs);
391 skb = lp->rx_skb[lp->rx_next_done];
392
393 if ((devcs & ETH_RX_ROK)) {
394 /* must be the (first and) last
395 * descriptor then */
396 pkt_buf = (u8 *)lp->rx_skb[lp->rx_next_done]->data;
397
398 /* invalidate the cache */
399 dma_cache_inv((unsigned long)pkt_buf, pkt_len - 4);
400
401 /* Malloc up new buffer. */
402 skb_new = netdev_alloc_skb(dev, KORINA_RBSIZE + 2);
403
404 if (!skb_new)
405 break;
406 /* Do not count the CRC */
407 skb_put(skb, pkt_len - 4);
408 skb->protocol = eth_type_trans(skb, dev);
409
410 /* Pass the packet to upper layers */
411 netif_receive_skb(skb);
412 dev->last_rx = jiffies;
413 dev->stats.rx_packets++;
414 dev->stats.rx_bytes += pkt_len;
415
416 /* Update the mcast stats */
417 if (devcs & ETH_RX_MP)
418 dev->stats.multicast++;
419
420 lp->rx_skb[lp->rx_next_done] = skb_new;
421 }
422
423 rd->devcs = 0;
424
425 /* Restore descriptor's curr_addr */
426 if (skb_new)
427 rd->ca = CPHYSADDR(skb_new->data);
428 else
429 rd->ca = CPHYSADDR(skb->data);
430
431 rd->control = DMA_COUNT(KORINA_RBSIZE) |
432 DMA_DESC_COD | DMA_DESC_IOD;
433 lp->rd_ring[(lp->rx_next_done - 1) &
434 KORINA_RDS_MASK].control &=
435 ~DMA_DESC_COD;
436
437 lp->rx_next_done = (lp->rx_next_done + 1) & KORINA_RDS_MASK;
438 dma_cache_wback((u32)rd, sizeof(*rd));
439 rd = &lp->rd_ring[lp->rx_next_done];
440 writel(~DMA_STAT_DONE, &lp->rx_dma_regs->dmas);
441 }
442 }
443
444 dmas = readl(&lp->rx_dma_regs->dmas);
445
446 if (dmas & DMA_STAT_HALT) {
447 writel(~(DMA_STAT_HALT | DMA_STAT_ERR),
448 &lp->rx_dma_regs->dmas);
449
450 lp->dma_halt_cnt++;
451 rd->devcs = 0;
452 skb = lp->rx_skb[lp->rx_next_done];
453 rd->ca = CPHYSADDR(skb->data);
454 dma_cache_wback((u32)rd, sizeof(*rd));
455 korina_chain_rx(lp, rd);
456 }
457
458 return count;
459}
460
461static int korina_poll(struct napi_struct *napi, int budget)
462{
463 struct korina_private *lp =
464 container_of(napi, struct korina_private, napi);
465 struct net_device *dev = lp->dev;
466 int work_done;
467
468 work_done = korina_rx(dev, budget);
469 if (work_done < budget) {
470 netif_rx_complete(dev, napi);
471
472 writel(readl(&lp->rx_dma_regs->dmasm) &
473 ~(DMA_STAT_DONE | DMA_STAT_HALT | DMA_STAT_ERR),
474 &lp->rx_dma_regs->dmasm);
475 }
476 return work_done;
477}
478
479/*
480 * Set or clear the multicast filter for this adaptor.
481 */
482static void korina_multicast_list(struct net_device *dev)
483{
484 struct korina_private *lp = netdev_priv(dev);
485 unsigned long flags;
486 struct dev_mc_list *dmi = dev->mc_list;
487 u32 recognise = ETH_ARC_AB; /* always accept broadcasts */
488 int i;
489
490 /* Set promiscuous mode */
491 if (dev->flags & IFF_PROMISC)
492 recognise |= ETH_ARC_PRO;
493
494 else if ((dev->flags & IFF_ALLMULTI) || (dev->mc_count > 4))
495 /* All multicast and broadcast */
496 recognise |= ETH_ARC_AM;
497
498 /* Build the hash table */
499 if (dev->mc_count > 4) {
500 u16 hash_table[4];
501 u32 crc;
502
503 for (i = 0; i < 4; i++)
504 hash_table[i] = 0;
505
506 for (i = 0; i < dev->mc_count; i++) {
507 char *addrs = dmi->dmi_addr;
508
509 dmi = dmi->next;
510
511 if (!(*addrs & 1))
512 continue;
513
514 crc = ether_crc_le(6, addrs);
515 crc >>= 26;
516 hash_table[crc >> 4] |= 1 << (15 - (crc & 0xf));
517 }
518 /* Accept filtered multicast */
519 recognise |= ETH_ARC_AFM;
520
521 /* Fill the MAC hash tables with their values */
522 writel((u32)(hash_table[1] << 16 | hash_table[0]),
523 &lp->eth_regs->ethhash0);
524 writel((u32)(hash_table[3] << 16 | hash_table[2]),
525 &lp->eth_regs->ethhash1);
526 }
527
528 spin_lock_irqsave(&lp->lock, flags);
529 writel(recognise, &lp->eth_regs->etharc);
530 spin_unlock_irqrestore(&lp->lock, flags);
531}
532
533static void korina_tx(struct net_device *dev)
534{
535 struct korina_private *lp = netdev_priv(dev);
536 struct dma_desc *td = &lp->td_ring[lp->tx_next_done];
537 u32 devcs;
538 u32 dmas;
539
540 spin_lock(&lp->lock);
541
542 /* Process all desc that are done */
543 while (IS_DMA_FINISHED(td->control)) {
544 if (lp->tx_full == 1) {
545 netif_wake_queue(dev);
546 lp->tx_full = 0;
547 }
548
549 devcs = lp->td_ring[lp->tx_next_done].devcs;
550 if ((devcs & (ETH_TX_FD | ETH_TX_LD)) !=
551 (ETH_TX_FD | ETH_TX_LD)) {
552 dev->stats.tx_errors++;
553 dev->stats.tx_dropped++;
554
555 /* Should never happen */
556 printk(KERN_ERR DRV_NAME "%s: split tx ignored\n",
557 dev->name);
558 } else if (devcs & ETH_TX_TOK) {
559 dev->stats.tx_packets++;
560 dev->stats.tx_bytes +=
561 lp->tx_skb[lp->tx_next_done]->len;
562 } else {
563 dev->stats.tx_errors++;
564 dev->stats.tx_dropped++;
565
566 /* Underflow */
567 if (devcs & ETH_TX_UND)
568 dev->stats.tx_fifo_errors++;
569
570 /* Oversized frame */
571 if (devcs & ETH_TX_OF)
572 dev->stats.tx_aborted_errors++;
573
574 /* Excessive deferrals */
575 if (devcs & ETH_TX_ED)
576 dev->stats.tx_carrier_errors++;
577
578 /* Collisions: medium busy */
579 if (devcs & ETH_TX_EC)
580 dev->stats.collisions++;
581
582 /* Late collision */
583 if (devcs & ETH_TX_LC)
584 dev->stats.tx_window_errors++;
585 }
586
587 /* We must always free the original skb */
588 if (lp->tx_skb[lp->tx_next_done]) {
589 dev_kfree_skb_any(lp->tx_skb[lp->tx_next_done]);
590 lp->tx_skb[lp->tx_next_done] = NULL;
591 }
592
593 lp->td_ring[lp->tx_next_done].control = DMA_DESC_IOF;
594 lp->td_ring[lp->tx_next_done].devcs = ETH_TX_FD | ETH_TX_LD;
595 lp->td_ring[lp->tx_next_done].link = 0;
596 lp->td_ring[lp->tx_next_done].ca = 0;
597 lp->tx_count--;
598
599 /* Go on to next transmission */
600 lp->tx_next_done = (lp->tx_next_done + 1) & KORINA_TDS_MASK;
601 td = &lp->td_ring[lp->tx_next_done];
602
603 }
604
605 /* Clear the DMA status register */
606 dmas = readl(&lp->tx_dma_regs->dmas);
607 writel(~dmas, &lp->tx_dma_regs->dmas);
608
609 writel(readl(&lp->tx_dma_regs->dmasm) &
610 ~(DMA_STAT_FINI | DMA_STAT_ERR),
611 &lp->tx_dma_regs->dmasm);
612
613 spin_unlock(&lp->lock);
614}
615
616static irqreturn_t
617korina_tx_dma_interrupt(int irq, void *dev_id)
618{
619 struct net_device *dev = dev_id;
620 struct korina_private *lp = netdev_priv(dev);
621 u32 dmas, dmasm;
622 irqreturn_t retval;
623
624 dmas = readl(&lp->tx_dma_regs->dmas);
625
626 if (dmas & (DMA_STAT_FINI | DMA_STAT_ERR)) {
627 korina_tx(dev);
628
629 dmasm = readl(&lp->tx_dma_regs->dmasm);
630 writel(dmasm | (DMA_STAT_FINI | DMA_STAT_ERR),
631 &lp->tx_dma_regs->dmasm);
632
633 if (lp->tx_chain_status == desc_filled &&
634 (readl(&(lp->tx_dma_regs->dmandptr)) == 0)) {
635 writel(CPHYSADDR(&lp->td_ring[lp->tx_chain_head]),
636 &(lp->tx_dma_regs->dmandptr));
637 lp->tx_chain_status = desc_empty;
638 lp->tx_chain_head = lp->tx_chain_tail;
639 dev->trans_start = jiffies;
640 }
641 if (dmas & DMA_STAT_ERR)
642 printk(KERN_ERR DRV_NAME "%s: DMA error\n", dev->name);
643
644 retval = IRQ_HANDLED;
645 } else
646 retval = IRQ_NONE;
647
648 return retval;
649}
650
651
652static void korina_check_media(struct net_device *dev, unsigned int init_media)
653{
654 struct korina_private *lp = netdev_priv(dev);
655
656 mii_check_media(&lp->mii_if, 0, init_media);
657
658 if (lp->mii_if.full_duplex)
659 writel(readl(&lp->eth_regs->ethmac2) | ETH_MAC2_FD,
660 &lp->eth_regs->ethmac2);
661 else
662 writel(readl(&lp->eth_regs->ethmac2) & ~ETH_MAC2_FD,
663 &lp->eth_regs->ethmac2);
664}
665
666static void korina_set_carrier(struct mii_if_info *mii)
667{
668 if (mii->force_media) {
669 /* autoneg is off: Link is always assumed to be up */
670 if (!netif_carrier_ok(mii->dev))
671 netif_carrier_on(mii->dev);
672 } else /* Let MMI library update carrier status */
673 korina_check_media(mii->dev, 0);
674}
675
676static int korina_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
677{
678 struct korina_private *lp = netdev_priv(dev);
679 struct mii_ioctl_data *data = if_mii(rq);
680 int rc;
681
682 if (!netif_running(dev))
683 return -EINVAL;
684 spin_lock_irq(&lp->lock);
685 rc = generic_mii_ioctl(&lp->mii_if, data, cmd, NULL);
686 spin_unlock_irq(&lp->lock);
687 korina_set_carrier(&lp->mii_if);
688
689 return rc;
690}
691
692/* ethtool helpers */
693static void netdev_get_drvinfo(struct net_device *dev,
694 struct ethtool_drvinfo *info)
695{
696 struct korina_private *lp = netdev_priv(dev);
697
698 strcpy(info->driver, DRV_NAME);
699 strcpy(info->version, DRV_VERSION);
700 strcpy(info->bus_info, lp->dev->name);
701}
702
703static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
704{
705 struct korina_private *lp = netdev_priv(dev);
706 int rc;
707
708 spin_lock_irq(&lp->lock);
709 rc = mii_ethtool_gset(&lp->mii_if, cmd);
710 spin_unlock_irq(&lp->lock);
711
712 return rc;
713}
714
715static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
716{
717 struct korina_private *lp = netdev_priv(dev);
718 int rc;
719
720 spin_lock_irq(&lp->lock);
721 rc = mii_ethtool_sset(&lp->mii_if, cmd);
722 spin_unlock_irq(&lp->lock);
723 korina_set_carrier(&lp->mii_if);
724
725 return rc;
726}
727
728static u32 netdev_get_link(struct net_device *dev)
729{
730 struct korina_private *lp = netdev_priv(dev);
731
732 return mii_link_ok(&lp->mii_if);
733}
734
735static struct ethtool_ops netdev_ethtool_ops = {
736 .get_drvinfo = netdev_get_drvinfo,
737 .get_settings = netdev_get_settings,
738 .set_settings = netdev_set_settings,
739 .get_link = netdev_get_link,
740};
741
742static void korina_alloc_ring(struct net_device *dev)
743{
744 struct korina_private *lp = netdev_priv(dev);
745 int i;
746
747 /* Initialize the transmit descriptors */
748 for (i = 0; i < KORINA_NUM_TDS; i++) {
749 lp->td_ring[i].control = DMA_DESC_IOF;
750 lp->td_ring[i].devcs = ETH_TX_FD | ETH_TX_LD;
751 lp->td_ring[i].ca = 0;
752 lp->td_ring[i].link = 0;
753 }
754 lp->tx_next_done = lp->tx_chain_head = lp->tx_chain_tail =
755 lp->tx_full = lp->tx_count = 0;
756 lp->tx_chain_status = desc_empty;
757
758 /* Initialize the receive descriptors */
759 for (i = 0; i < KORINA_NUM_RDS; i++) {
760 struct sk_buff *skb = lp->rx_skb[i];
761
762 skb = dev_alloc_skb(KORINA_RBSIZE + 2);
763 if (!skb)
764 break;
765 skb_reserve(skb, 2);
766 lp->rx_skb[i] = skb;
767 lp->rd_ring[i].control = DMA_DESC_IOD |
768 DMA_COUNT(KORINA_RBSIZE);
769 lp->rd_ring[i].devcs = 0;
770 lp->rd_ring[i].ca = CPHYSADDR(skb->data);
771 lp->rd_ring[i].link = CPHYSADDR(&lp->rd_ring[i+1]);
772 }
773
774 /* loop back */
775 lp->rd_ring[i].link = CPHYSADDR(&lp->rd_ring[0]);
776 lp->rx_next_done = 0;
777
778 lp->rd_ring[i].control |= DMA_DESC_COD;
779 lp->rx_chain_head = 0;
780 lp->rx_chain_tail = 0;
781 lp->rx_chain_status = desc_empty;
782}
783
784static void korina_free_ring(struct net_device *dev)
785{
786 struct korina_private *lp = netdev_priv(dev);
787 int i;
788
789 for (i = 0; i < KORINA_NUM_RDS; i++) {
790 lp->rd_ring[i].control = 0;
791 if (lp->rx_skb[i])
792 dev_kfree_skb_any(lp->rx_skb[i]);
793 lp->rx_skb[i] = NULL;
794 }
795
796 for (i = 0; i < KORINA_NUM_TDS; i++) {
797 lp->td_ring[i].control = 0;
798 if (lp->tx_skb[i])
799 dev_kfree_skb_any(lp->tx_skb[i]);
800 lp->tx_skb[i] = NULL;
801 }
802}
803
804/*
805 * Initialize the RC32434 ethernet controller.
806 */
807static int korina_init(struct net_device *dev)
808{
809 struct korina_private *lp = netdev_priv(dev);
810
811 /* Disable DMA */
812 korina_abort_tx(dev);
813 korina_abort_rx(dev);
814
815 /* reset ethernet logic */
816 writel(0, &lp->eth_regs->ethintfc);
817 while ((readl(&lp->eth_regs->ethintfc) & ETH_INT_FC_RIP))
818 dev->trans_start = jiffies;
819
820 /* Enable Ethernet Interface */
821 writel(ETH_INT_FC_EN, &lp->eth_regs->ethintfc);
822
823 /* Allocate rings */
824 korina_alloc_ring(dev);
825
826 writel(0, &lp->rx_dma_regs->dmas);
827 /* Start Rx DMA */
828 korina_start_rx(lp, &lp->rd_ring[0]);
829
830 writel(readl(&lp->tx_dma_regs->dmasm) &
831 ~(DMA_STAT_FINI | DMA_STAT_ERR),
832 &lp->tx_dma_regs->dmasm);
833 writel(readl(&lp->rx_dma_regs->dmasm) &
834 ~(DMA_STAT_DONE | DMA_STAT_HALT | DMA_STAT_ERR),
835 &lp->rx_dma_regs->dmasm);
836
837 /* Accept only packets destined for this Ethernet device address */
838 writel(ETH_ARC_AB, &lp->eth_regs->etharc);
839
840 /* Set all Ether station address registers to their initial values */
841 writel(STATION_ADDRESS_LOW(dev), &lp->eth_regs->ethsal0);
842 writel(STATION_ADDRESS_HIGH(dev), &lp->eth_regs->ethsah0);
843
844 writel(STATION_ADDRESS_LOW(dev), &lp->eth_regs->ethsal1);
845 writel(STATION_ADDRESS_HIGH(dev), &lp->eth_regs->ethsah1);
846
847 writel(STATION_ADDRESS_LOW(dev), &lp->eth_regs->ethsal2);
848 writel(STATION_ADDRESS_HIGH(dev), &lp->eth_regs->ethsah2);
849
850 writel(STATION_ADDRESS_LOW(dev), &lp->eth_regs->ethsal3);
851 writel(STATION_ADDRESS_HIGH(dev), &lp->eth_regs->ethsah3);
852
853
854 /* Frame Length Checking, Pad Enable, CRC Enable, Full Duplex set */
855 writel(ETH_MAC2_PE | ETH_MAC2_CEN | ETH_MAC2_FD,
856 &lp->eth_regs->ethmac2);
857
858 /* Back to back inter-packet-gap */
859 writel(0x15, &lp->eth_regs->ethipgt);
860 /* Non - Back to back inter-packet-gap */
861 writel(0x12, &lp->eth_regs->ethipgr);
862
863 /* Management Clock Prescaler Divisor
864 * Clock independent setting */
865 writel(((idt_cpu_freq) / MII_CLOCK + 1) & ~1,
866 &lp->eth_regs->ethmcp);
867
868 /* don't transmit until fifo contains 48b */
869 writel(48, &lp->eth_regs->ethfifott);
870
871 writel(ETH_MAC1_RE, &lp->eth_regs->ethmac1);
872
873 napi_enable(&lp->napi);
874 netif_start_queue(dev);
875
876 return 0;
877}
878
879/*
880 * Restart the RC32434 ethernet controller.
881 * FIXME: check the return status where we call it
882 */
883static int korina_restart(struct net_device *dev)
884{
885 struct korina_private *lp = netdev_priv(dev);
886 int ret = 0;
887
888 /*
889 * Disable interrupts
890 */
891 disable_irq(lp->rx_irq);
892 disable_irq(lp->tx_irq);
893 disable_irq(lp->ovr_irq);
894 disable_irq(lp->und_irq);
895
896 writel(readl(&lp->tx_dma_regs->dmasm) |
897 DMA_STAT_FINI | DMA_STAT_ERR,
898 &lp->tx_dma_regs->dmasm);
899 writel(readl(&lp->rx_dma_regs->dmasm) |
900 DMA_STAT_DONE | DMA_STAT_HALT | DMA_STAT_ERR,
901 &lp->rx_dma_regs->dmasm);
902
903 korina_free_ring(dev);
904
905 ret = korina_init(dev);
906 if (ret < 0) {
907 printk(KERN_ERR DRV_NAME "%s: cannot restart device\n",
908 dev->name);
909 return ret;
910 }
911 korina_multicast_list(dev);
912
913 enable_irq(lp->und_irq);
914 enable_irq(lp->ovr_irq);
915 enable_irq(lp->tx_irq);
916 enable_irq(lp->rx_irq);
917
918 return ret;
919}
920
921static void korina_clear_and_restart(struct net_device *dev, u32 value)
922{
923 struct korina_private *lp = netdev_priv(dev);
924
925 netif_stop_queue(dev);
926 writel(value, &lp->eth_regs->ethintfc);
927 korina_restart(dev);
928}
929
930/* Ethernet Tx Underflow interrupt */
931static irqreturn_t korina_und_interrupt(int irq, void *dev_id)
932{
933 struct net_device *dev = dev_id;
934 struct korina_private *lp = netdev_priv(dev);
935 unsigned int und;
936
937 spin_lock(&lp->lock);
938
939 und = readl(&lp->eth_regs->ethintfc);
940
941 if (und & ETH_INT_FC_UND)
942 korina_clear_and_restart(dev, und & ~ETH_INT_FC_UND);
943
944 spin_unlock(&lp->lock);
945
946 return IRQ_HANDLED;
947}
948
949static void korina_tx_timeout(struct net_device *dev)
950{
951 struct korina_private *lp = netdev_priv(dev);
952 unsigned long flags;
953
954 spin_lock_irqsave(&lp->lock, flags);
955 korina_restart(dev);
956 spin_unlock_irqrestore(&lp->lock, flags);
957}
958
959/* Ethernet Rx Overflow interrupt */
960static irqreturn_t
961korina_ovr_interrupt(int irq, void *dev_id)
962{
963 struct net_device *dev = dev_id;
964 struct korina_private *lp = netdev_priv(dev);
965 unsigned int ovr;
966
967 spin_lock(&lp->lock);
968 ovr = readl(&lp->eth_regs->ethintfc);
969
970 if (ovr & ETH_INT_FC_OVR)
971 korina_clear_and_restart(dev, ovr & ~ETH_INT_FC_OVR);
972
973 spin_unlock(&lp->lock);
974
975 return IRQ_HANDLED;
976}
977
978#ifdef CONFIG_NET_POLL_CONTROLLER
979static void korina_poll_controller(struct net_device *dev)
980{
981 disable_irq(dev->irq);
982 korina_tx_dma_interrupt(dev->irq, dev);
983 enable_irq(dev->irq);
984}
985#endif
986
987static int korina_open(struct net_device *dev)
988{
989 struct korina_private *lp = netdev_priv(dev);
990 int ret = 0;
991
992 /* Initialize */
993 ret = korina_init(dev);
994 if (ret < 0) {
995 printk(KERN_ERR DRV_NAME "%s: cannot open device\n", dev->name);
996 goto out;
997 }
998
999 /* Install the interrupt handler
1000 * that handles the Done Finished
1001 * Ovr and Und Events */
1002 ret = request_irq(lp->rx_irq, &korina_rx_dma_interrupt,
1003 IRQF_SHARED | IRQF_DISABLED, "Korina ethernet Rx", dev);
1004 if (ret < 0) {
1005 printk(KERN_ERR DRV_NAME "%s: unable to get Rx DMA IRQ %d\n",
1006 dev->name, lp->rx_irq);
1007 goto err_release;
1008 }
1009 ret = request_irq(lp->tx_irq, &korina_tx_dma_interrupt,
1010 IRQF_SHARED | IRQF_DISABLED, "Korina ethernet Tx", dev);
1011 if (ret < 0) {
1012 printk(KERN_ERR DRV_NAME "%s: unable to get Tx DMA IRQ %d\n",
1013 dev->name, lp->tx_irq);
1014 goto err_free_rx_irq;
1015 }
1016
1017 /* Install handler for overrun error. */
1018 ret = request_irq(lp->ovr_irq, &korina_ovr_interrupt,
1019 IRQF_SHARED | IRQF_DISABLED, "Ethernet Overflow", dev);
1020 if (ret < 0) {
1021 printk(KERN_ERR DRV_NAME"%s: unable to get OVR IRQ %d\n",
1022 dev->name, lp->ovr_irq);
1023 goto err_free_tx_irq;
1024 }
1025
1026 /* Install handler for underflow error. */
1027 ret = request_irq(lp->und_irq, &korina_und_interrupt,
1028 IRQF_SHARED | IRQF_DISABLED, "Ethernet Underflow", dev);
1029 if (ret < 0) {
1030 printk(KERN_ERR DRV_NAME "%s: unable to get UND IRQ %d\n",
1031 dev->name, lp->und_irq);
1032 goto err_free_ovr_irq;
1033 }
1034
1035err_free_ovr_irq:
1036 free_irq(lp->ovr_irq, dev);
1037err_free_tx_irq:
1038 free_irq(lp->tx_irq, dev);
1039err_free_rx_irq:
1040 free_irq(lp->rx_irq, dev);
1041err_release:
1042 korina_free_ring(dev);
1043 goto out;
1044out:
1045 return ret;
1046}
1047
1048static int korina_close(struct net_device *dev)
1049{
1050 struct korina_private *lp = netdev_priv(dev);
1051 u32 tmp;
1052
1053 /* Disable interrupts */
1054 disable_irq(lp->rx_irq);
1055 disable_irq(lp->tx_irq);
1056 disable_irq(lp->ovr_irq);
1057 disable_irq(lp->und_irq);
1058
1059 korina_abort_tx(dev);
1060 tmp = readl(&lp->tx_dma_regs->dmasm);
1061 tmp = tmp | DMA_STAT_FINI | DMA_STAT_ERR;
1062 writel(tmp, &lp->tx_dma_regs->dmasm);
1063
1064 korina_abort_rx(dev);
1065 tmp = readl(&lp->rx_dma_regs->dmasm);
1066 tmp = tmp | DMA_STAT_DONE | DMA_STAT_HALT | DMA_STAT_ERR;
1067 writel(tmp, &lp->rx_dma_regs->dmasm);
1068
1069 korina_free_ring(dev);
1070
1071 free_irq(lp->rx_irq, dev);
1072 free_irq(lp->tx_irq, dev);
1073 free_irq(lp->ovr_irq, dev);
1074 free_irq(lp->und_irq, dev);
1075
1076 return 0;
1077}
1078
1079static int korina_probe(struct platform_device *pdev)
1080{
1081 struct korina_device *bif = platform_get_drvdata(pdev);
1082 struct korina_private *lp;
1083 struct net_device *dev;
1084 struct resource *r;
1085 int retval, err;
1086
1087 dev = alloc_etherdev(sizeof(struct korina_private));
1088 if (!dev) {
1089 printk(KERN_ERR DRV_NAME ": alloc_etherdev failed\n");
1090 return -ENOMEM;
1091 }
1092 SET_NETDEV_DEV(dev, &pdev->dev);
1093 platform_set_drvdata(pdev, dev);
1094 lp = netdev_priv(dev);
1095
1096 bif->dev = dev;
1097 memcpy(dev->dev_addr, bif->mac, 6);
1098
1099 lp->rx_irq = platform_get_irq_byname(pdev, "korina_rx");
1100 lp->tx_irq = platform_get_irq_byname(pdev, "korina_tx");
1101 lp->ovr_irq = platform_get_irq_byname(pdev, "korina_ovr");
1102 lp->und_irq = platform_get_irq_byname(pdev, "korina_und");
1103
1104 r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "korina_regs");
1105 dev->base_addr = r->start;
1106 lp->eth_regs = ioremap_nocache(r->start, r->end - r->start);
1107 if (!lp->eth_regs) {
1108 printk(KERN_ERR DRV_NAME "cannot remap registers\n");
1109 retval = -ENXIO;
1110 goto probe_err_out;
1111 }
1112
1113 r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "korina_dma_rx");
1114 lp->rx_dma_regs = ioremap_nocache(r->start, r->end - r->start);
1115 if (!lp->rx_dma_regs) {
1116 printk(KERN_ERR DRV_NAME "cannot remap Rx DMA registers\n");
1117 retval = -ENXIO;
1118 goto probe_err_dma_rx;
1119 }
1120
1121 r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "korina_dma_tx");
1122 lp->tx_dma_regs = ioremap_nocache(r->start, r->end - r->start);
1123 if (!lp->tx_dma_regs) {
1124 printk(KERN_ERR DRV_NAME "cannot remap Tx DMA registers\n");
1125 retval = -ENXIO;
1126 goto probe_err_dma_tx;
1127 }
1128
1129 lp->td_ring = kmalloc(TD_RING_SIZE + RD_RING_SIZE, GFP_KERNEL);
1130 if (!lp->td_ring) {
1131 printk(KERN_ERR DRV_NAME "cannot allocate descriptors\n");
1132 retval = -ENOMEM;
1133 goto probe_err_td_ring;
1134 }
1135
1136 dma_cache_inv((unsigned long)(lp->td_ring),
1137 TD_RING_SIZE + RD_RING_SIZE);
1138
1139 /* now convert TD_RING pointer to KSEG1 */
1140 lp->td_ring = (struct dma_desc *)KSEG1ADDR(lp->td_ring);
1141 lp->rd_ring = &lp->td_ring[KORINA_NUM_TDS];
1142
1143 spin_lock_init(&lp->lock);
1144 /* just use the rx dma irq */
1145 dev->irq = lp->rx_irq;
1146 lp->dev = dev;
1147
1148 dev->open = korina_open;
1149 dev->stop = korina_close;
1150 dev->hard_start_xmit = korina_send_packet;
1151 dev->set_multicast_list = &korina_multicast_list;
1152 dev->ethtool_ops = &netdev_ethtool_ops;
1153 dev->tx_timeout = korina_tx_timeout;
1154 dev->watchdog_timeo = TX_TIMEOUT;
1155 dev->do_ioctl = &korina_ioctl;
1156#ifdef CONFIG_NET_POLL_CONTROLLER
1157 dev->poll_controller = korina_poll_controller;
1158#endif
1159 netif_napi_add(dev, &lp->napi, korina_poll, 64);
1160
1161 lp->phy_addr = (((lp->rx_irq == 0x2c? 1:0) << 8) | 0x05);
1162 lp->mii_if.dev = dev;
1163 lp->mii_if.mdio_read = mdio_read;
1164 lp->mii_if.mdio_write = mdio_write;
1165 lp->mii_if.phy_id = lp->phy_addr;
1166 lp->mii_if.phy_id_mask = 0x1f;
1167 lp->mii_if.reg_num_mask = 0x1f;
1168
1169 err = register_netdev(dev);
1170 if (err) {
1171 printk(KERN_ERR DRV_NAME
1172 ": cannot register net device %d\n", err);
1173 retval = -EINVAL;
1174 goto probe_err_register;
1175 }
1176 return 0;
1177
1178probe_err_register:
1179 kfree(lp->td_ring);
1180probe_err_td_ring:
1181 iounmap(lp->tx_dma_regs);
1182probe_err_dma_tx:
1183 iounmap(lp->rx_dma_regs);
1184probe_err_dma_rx:
1185 iounmap(lp->eth_regs);
1186probe_err_out:
1187 free_netdev(dev);
1188 return retval;
1189}
1190
1191static int korina_remove(struct platform_device *pdev)
1192{
1193 struct korina_device *bif = platform_get_drvdata(pdev);
1194 struct korina_private *lp = netdev_priv(bif->dev);
1195
1196 if (lp->eth_regs)
1197 iounmap(lp->eth_regs);
1198 if (lp->rx_dma_regs)
1199 iounmap(lp->rx_dma_regs);
1200 if (lp->tx_dma_regs)
1201 iounmap(lp->tx_dma_regs);
1202
1203 platform_set_drvdata(pdev, NULL);
1204 unregister_netdev(bif->dev);
1205 free_netdev(bif->dev);
1206
1207 return 0;
1208}
1209
1210static struct platform_driver korina_driver = {
1211 .driver.name = "korina",
1212 .probe = korina_probe,
1213 .remove = korina_remove,
1214};
1215
1216static int __init korina_init_module(void)
1217{
1218 return platform_driver_register(&korina_driver);
1219}
1220
1221static void korina_cleanup_module(void)
1222{
1223 return platform_driver_unregister(&korina_driver);
1224}
1225
1226module_init(korina_init_module);
1227module_exit(korina_cleanup_module);
1228
1229MODULE_AUTHOR("Philip Rischel <rischelp@idt.com>");
1230MODULE_AUTHOR("Felix Fietkau <nbd@openwrt.org>");
1231MODULE_AUTHOR("Florian Fainelli <florian@openwrt.org>");
1232MODULE_DESCRIPTION("IDT RC32434 (Korina) Ethernet driver");
1233MODULE_LICENSE("GPL");