aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/dnet.c
diff options
context:
space:
mode:
authorIlya Yanok <yanok@emcraft.com>2009-03-12 02:26:02 -0400
committerDavid S. Miller <davem@davemloft.net>2009-03-12 02:26:02 -0400
commit4796417417a62e2ae83d92cb92e1ecf9ec67b5f5 (patch)
treeb2ec9404d56cde13742a7011442ecff9f80b4406 /drivers/net/dnet.c
parentff8cf9a93800e8118ea097c1aba7203d59a0f3f1 (diff)
dnet: Dave DNET ethernet controller driver (updated)
Driver for Dave DNET ethernet controller found on Dave/DENX QongEVB-LITE FPGA. Heavily based on Dave sources, I've just adopted it to current kernel version and done some code cleanup. Signed-off-by: Ilya Yanok <yanok@emcraft.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/dnet.c')
-rw-r--r--drivers/net/dnet.c994
1 files changed, 994 insertions, 0 deletions
diff --git a/drivers/net/dnet.c b/drivers/net/dnet.c
new file mode 100644
index 000000000000..92c3bd3a4721
--- /dev/null
+++ b/drivers/net/dnet.c
@@ -0,0 +1,994 @@
1/*
2 * Dave DNET Ethernet Controller driver
3 *
4 * Copyright (C) 2008 Dave S.r.l. <www.dave.eu>
5 * Copyright (C) 2009 Ilya Yanok, Emcraft Systems Ltd, <yanok@emcraft.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11#include <linux/version.h>
12#include <linux/module.h>
13#include <linux/moduleparam.h>
14#include <linux/kernel.h>
15#include <linux/types.h>
16#include <linux/slab.h>
17#include <linux/delay.h>
18#include <linux/init.h>
19#include <linux/netdevice.h>
20#include <linux/etherdevice.h>
21#include <linux/dma-mapping.h>
22#include <linux/platform_device.h>
23#include <linux/phy.h>
24#include <linux/platform_device.h>
25
26#include "dnet.h"
27
28#undef DEBUG
29
30/* function for reading internal MAC register */
31u16 dnet_readw_mac(struct dnet *bp, u16 reg)
32{
33 u16 data_read;
34
35 /* issue a read */
36 dnet_writel(bp, reg, MACREG_ADDR);
37
38 /* since a read/write op to the MAC is very slow,
39 * we must wait before reading the data */
40 ndelay(500);
41
42 /* read data read from the MAC register */
43 data_read = dnet_readl(bp, MACREG_DATA);
44
45 /* all done */
46 return data_read;
47}
48
49/* function for writing internal MAC register */
50void dnet_writew_mac(struct dnet *bp, u16 reg, u16 val)
51{
52 /* load data to write */
53 dnet_writel(bp, val, MACREG_DATA);
54
55 /* issue a write */
56 dnet_writel(bp, reg | DNET_INTERNAL_WRITE, MACREG_ADDR);
57
58 /* since a read/write op to the MAC is very slow,
59 * we must wait before exiting */
60 ndelay(500);
61}
62
63static void __dnet_set_hwaddr(struct dnet *bp)
64{
65 u16 tmp;
66
67 tmp = cpu_to_be16(*((u16 *) bp->dev->dev_addr));
68 dnet_writew_mac(bp, DNET_INTERNAL_MAC_ADDR_0_REG, tmp);
69 tmp = cpu_to_be16(*((u16 *) (bp->dev->dev_addr + 2)));
70 dnet_writew_mac(bp, DNET_INTERNAL_MAC_ADDR_1_REG, tmp);
71 tmp = cpu_to_be16(*((u16 *) (bp->dev->dev_addr + 4)));
72 dnet_writew_mac(bp, DNET_INTERNAL_MAC_ADDR_2_REG, tmp);
73}
74
75static void __devinit dnet_get_hwaddr(struct dnet *bp)
76{
77 u16 tmp;
78 u8 addr[6];
79
80 /*
81 * from MAC docs:
82 * "Note that the MAC address is stored in the registers in Hexadecimal
83 * form. For example, to set the MAC Address to: AC-DE-48-00-00-80
84 * would require writing 0xAC (octet 0) to address 0x0B (high byte of
85 * Mac_addr[15:0]), 0xDE (octet 1) to address 0x0A (Low byte of
86 * Mac_addr[15:0]), 0x48 (octet 2) to address 0x0D (high byte of
87 * Mac_addr[15:0]), 0x00 (octet 3) to address 0x0C (Low byte of
88 * Mac_addr[15:0]), 0x00 (octet 4) to address 0x0F (high byte of
89 * Mac_addr[15:0]), and 0x80 (octet 5) to address * 0x0E (Low byte of
90 * Mac_addr[15:0]).
91 */
92 tmp = dnet_readw_mac(bp, DNET_INTERNAL_MAC_ADDR_0_REG);
93 *((u16 *) addr) = be16_to_cpu(tmp);
94 tmp = dnet_readw_mac(bp, DNET_INTERNAL_MAC_ADDR_1_REG);
95 *((u16 *) (addr + 2)) = be16_to_cpu(tmp);
96 tmp = dnet_readw_mac(bp, DNET_INTERNAL_MAC_ADDR_2_REG);
97 *((u16 *) (addr + 4)) = be16_to_cpu(tmp);
98
99 if (is_valid_ether_addr(addr))
100 memcpy(bp->dev->dev_addr, addr, sizeof(addr));
101}
102
103static int dnet_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
104{
105 struct dnet *bp = bus->priv;
106 u16 value;
107
108 while (!(dnet_readw_mac(bp, DNET_INTERNAL_GMII_MNG_CTL_REG)
109 & DNET_INTERNAL_GMII_MNG_CMD_FIN))
110 cpu_relax();
111
112 /* only 5 bits allowed for phy-addr and reg_offset */
113 mii_id &= 0x1f;
114 regnum &= 0x1f;
115
116 /* prepare reg_value for a read */
117 value = (mii_id << 8);
118 value |= regnum;
119
120 /* write control word */
121 dnet_writew_mac(bp, DNET_INTERNAL_GMII_MNG_CTL_REG, value);
122
123 /* wait for end of transfer */
124 while (!(dnet_readw_mac(bp, DNET_INTERNAL_GMII_MNG_CTL_REG)
125 & DNET_INTERNAL_GMII_MNG_CMD_FIN))
126 cpu_relax();
127
128 value = dnet_readw_mac(bp, DNET_INTERNAL_GMII_MNG_DAT_REG);
129
130 pr_debug("mdio_read %02x:%02x <- %04x\n", mii_id, regnum, value);
131
132 return value;
133}
134
135static int dnet_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
136 u16 value)
137{
138 struct dnet *bp = bus->priv;
139 u16 tmp;
140
141 pr_debug("mdio_write %02x:%02x <- %04x\n", mii_id, regnum, value);
142
143 while (!(dnet_readw_mac(bp, DNET_INTERNAL_GMII_MNG_CTL_REG)
144 & DNET_INTERNAL_GMII_MNG_CMD_FIN))
145 cpu_relax();
146
147 /* prepare for a write operation */
148 tmp = (1 << 13);
149
150 /* only 5 bits allowed for phy-addr and reg_offset */
151 mii_id &= 0x1f;
152 regnum &= 0x1f;
153
154 /* only 16 bits on data */
155 value &= 0xffff;
156
157 /* prepare reg_value for a write */
158 tmp |= (mii_id << 8);
159 tmp |= regnum;
160
161 /* write data to write first */
162 dnet_writew_mac(bp, DNET_INTERNAL_GMII_MNG_DAT_REG, value);
163
164 /* write control word */
165 dnet_writew_mac(bp, DNET_INTERNAL_GMII_MNG_CTL_REG, tmp);
166
167 while (!(dnet_readw_mac(bp, DNET_INTERNAL_GMII_MNG_CTL_REG)
168 & DNET_INTERNAL_GMII_MNG_CMD_FIN))
169 cpu_relax();
170
171 return 0;
172}
173
174static int dnet_mdio_reset(struct mii_bus *bus)
175{
176 return 0;
177}
178
179static void dnet_handle_link_change(struct net_device *dev)
180{
181 struct dnet *bp = netdev_priv(dev);
182 struct phy_device *phydev = bp->phy_dev;
183 unsigned long flags;
184 u32 mode_reg, ctl_reg;
185
186 int status_change = 0;
187
188 spin_lock_irqsave(&bp->lock, flags);
189
190 mode_reg = dnet_readw_mac(bp, DNET_INTERNAL_MODE_REG);
191 ctl_reg = dnet_readw_mac(bp, DNET_INTERNAL_RXTX_CONTROL_REG);
192
193 if (phydev->link) {
194 if (bp->duplex != phydev->duplex) {
195 if (phydev->duplex)
196 ctl_reg &=
197 ~(DNET_INTERNAL_RXTX_CONTROL_ENABLEHALFDUP);
198 else
199 ctl_reg |=
200 DNET_INTERNAL_RXTX_CONTROL_ENABLEHALFDUP;
201
202 bp->duplex = phydev->duplex;
203 status_change = 1;
204 }
205
206 if (bp->speed != phydev->speed) {
207 status_change = 1;
208 switch (phydev->speed) {
209 case 1000:
210 mode_reg |= DNET_INTERNAL_MODE_GBITEN;
211 break;
212 case 100:
213 case 10:
214 mode_reg &= ~DNET_INTERNAL_MODE_GBITEN;
215 break;
216 default:
217 printk(KERN_WARNING
218 "%s: Ack! Speed (%d) is not "
219 "10/100/1000!\n", dev->name,
220 phydev->speed);
221 break;
222 }
223 bp->speed = phydev->speed;
224 }
225 }
226
227 if (phydev->link != bp->link) {
228 if (phydev->link) {
229 mode_reg |=
230 (DNET_INTERNAL_MODE_RXEN | DNET_INTERNAL_MODE_TXEN);
231 } else {
232 mode_reg &=
233 ~(DNET_INTERNAL_MODE_RXEN |
234 DNET_INTERNAL_MODE_TXEN);
235 bp->speed = 0;
236 bp->duplex = -1;
237 }
238 bp->link = phydev->link;
239
240 status_change = 1;
241 }
242
243 if (status_change) {
244 dnet_writew_mac(bp, DNET_INTERNAL_RXTX_CONTROL_REG, ctl_reg);
245 dnet_writew_mac(bp, DNET_INTERNAL_MODE_REG, mode_reg);
246 }
247
248 spin_unlock_irqrestore(&bp->lock, flags);
249
250 if (status_change) {
251 if (phydev->link)
252 printk(KERN_INFO "%s: link up (%d/%s)\n",
253 dev->name, phydev->speed,
254 DUPLEX_FULL == phydev->duplex ? "Full" : "Half");
255 else
256 printk(KERN_INFO "%s: link down\n", dev->name);
257 }
258}
259
260static int dnet_mii_probe(struct net_device *dev)
261{
262 struct dnet *bp = netdev_priv(dev);
263 struct phy_device *phydev = NULL;
264 int phy_addr;
265
266 /* find the first phy */
267 for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++) {
268 if (bp->mii_bus->phy_map[phy_addr]) {
269 phydev = bp->mii_bus->phy_map[phy_addr];
270 break;
271 }
272 }
273
274 if (!phydev) {
275 printk(KERN_ERR "%s: no PHY found\n", dev->name);
276 return -ENODEV;
277 }
278
279 /* TODO : add pin_irq */
280
281 /* attach the mac to the phy */
282 if (bp->capabilities & DNET_HAS_RMII) {
283 phydev = phy_connect(dev, phydev->dev.bus_id,
284 &dnet_handle_link_change, 0,
285 PHY_INTERFACE_MODE_RMII);
286 } else {
287 phydev = phy_connect(dev, phydev->dev.bus_id,
288 &dnet_handle_link_change, 0,
289 PHY_INTERFACE_MODE_MII);
290 }
291
292 if (IS_ERR(phydev)) {
293 printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name);
294 return PTR_ERR(phydev);
295 }
296
297 /* mask with MAC supported features */
298 if (bp->capabilities & DNET_HAS_GIGABIT)
299 phydev->supported &= PHY_GBIT_FEATURES;
300 else
301 phydev->supported &= PHY_BASIC_FEATURES;
302
303 phydev->supported |= SUPPORTED_Asym_Pause | SUPPORTED_Pause;
304
305 phydev->advertising = phydev->supported;
306
307 bp->link = 0;
308 bp->speed = 0;
309 bp->duplex = -1;
310 bp->phy_dev = phydev;
311
312 return 0;
313}
314
315static int dnet_mii_init(struct dnet *bp)
316{
317 int err, i;
318
319 bp->mii_bus = mdiobus_alloc();
320 if (bp->mii_bus == NULL)
321 return -ENOMEM;
322
323 bp->mii_bus->name = "dnet_mii_bus";
324 bp->mii_bus->read = &dnet_mdio_read;
325 bp->mii_bus->write = &dnet_mdio_write;
326 bp->mii_bus->reset = &dnet_mdio_reset;
327
328 snprintf(bp->mii_bus->id, MII_BUS_ID_SIZE, "%x", 0);
329
330 bp->mii_bus->priv = bp;
331
332 bp->mii_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
333 if (!bp->mii_bus->irq) {
334 err = -ENOMEM;
335 goto err_out;
336 }
337
338 for (i = 0; i < PHY_MAX_ADDR; i++)
339 bp->mii_bus->irq[i] = PHY_POLL;
340
341 platform_set_drvdata(bp->dev, bp->mii_bus);
342
343 if (mdiobus_register(bp->mii_bus)) {
344 err = -ENXIO;
345 goto err_out_free_mdio_irq;
346 }
347
348 if (dnet_mii_probe(bp->dev) != 0) {
349 err = -ENXIO;
350 goto err_out_unregister_bus;
351 }
352
353 return 0;
354
355err_out_unregister_bus:
356 mdiobus_unregister(bp->mii_bus);
357err_out_free_mdio_irq:
358 kfree(bp->mii_bus->irq);
359err_out:
360 mdiobus_free(bp->mii_bus);
361 return err;
362}
363
364/* For Neptune board: LINK1000 as Link LED and TX as activity LED */
365int dnet_phy_marvell_fixup(struct phy_device *phydev)
366{
367 return phy_write(phydev, 0x18, 0x4148);
368}
369
370static void dnet_update_stats(struct dnet *bp)
371{
372 u32 __iomem *reg = bp->regs + DNET_RX_PKT_IGNR_CNT;
373 u32 *p = &bp->hw_stats.rx_pkt_ignr;
374 u32 *end = &bp->hw_stats.rx_byte + 1;
375
376 WARN_ON((unsigned long)(end - p - 1) !=
377 (DNET_RX_BYTE_CNT - DNET_RX_PKT_IGNR_CNT) / 4);
378
379 for (; p < end; p++, reg++)
380 *p += readl(reg);
381
382 reg = bp->regs + DNET_TX_UNICAST_CNT;
383 p = &bp->hw_stats.tx_unicast;
384 end = &bp->hw_stats.tx_byte + 1;
385
386 WARN_ON((unsigned long)(end - p - 1) !=
387 (DNET_TX_BYTE_CNT - DNET_TX_UNICAST_CNT) / 4);
388
389 for (; p < end; p++, reg++)
390 *p += readl(reg);
391}
392
393static int dnet_poll(struct napi_struct *napi, int budget)
394{
395 struct dnet *bp = container_of(napi, struct dnet, napi);
396 struct net_device *dev = bp->dev;
397 int npackets = 0;
398 unsigned int pkt_len;
399 struct sk_buff *skb;
400 unsigned int *data_ptr;
401 u32 int_enable;
402 u32 cmd_word;
403 int i;
404
405 while (npackets < budget) {
406 /*
407 * break out of while loop if there are no more
408 * packets waiting
409 */
410 if (!(dnet_readl(bp, RX_FIFO_WCNT) >> 16)) {
411 netif_rx_complete(napi);
412 int_enable = dnet_readl(bp, INTR_ENB);
413 int_enable |= DNET_INTR_SRC_RX_CMDFIFOAF;
414 dnet_writel(bp, int_enable, INTR_ENB);
415 return 0;
416 }
417
418 cmd_word = dnet_readl(bp, RX_LEN_FIFO);
419 pkt_len = cmd_word & 0xFFFF;
420
421 if (cmd_word & 0xDF180000)
422 printk(KERN_ERR "%s packet receive error %x\n",
423 __func__, cmd_word);
424
425 skb = dev_alloc_skb(pkt_len + 5);
426 if (skb != NULL) {
427 /* Align IP on 16 byte boundaries */
428 skb_reserve(skb, 2);
429 /*
430 * 'skb_put()' points to the start of sk_buff
431 * data area.
432 */
433 data_ptr = (unsigned int *)skb_put(skb, pkt_len);
434 for (i = 0; i < (pkt_len + 3) >> 2; i++)
435 *data_ptr++ = dnet_readl(bp, RX_DATA_FIFO);
436 skb->protocol = eth_type_trans(skb, dev);
437 netif_receive_skb(skb);
438 npackets++;
439 } else
440 printk(KERN_NOTICE
441 "%s: No memory to allocate a sk_buff of "
442 "size %u.\n", dev->name, pkt_len);
443 }
444
445 budget -= npackets;
446
447 if (npackets < budget) {
448 /* We processed all packets available. Tell NAPI it can
449 * stop polling then re-enable rx interrupts */
450 netif_rx_complete(napi);
451 int_enable = dnet_readl(bp, INTR_ENB);
452 int_enable |= DNET_INTR_SRC_RX_CMDFIFOAF;
453 dnet_writel(bp, int_enable, INTR_ENB);
454 return 0;
455 }
456
457 /* There are still packets waiting */
458 return 1;
459}
460
461static irqreturn_t dnet_interrupt(int irq, void *dev_id)
462{
463 struct net_device *dev = dev_id;
464 struct dnet *bp = netdev_priv(dev);
465 u32 int_src, int_enable, int_current;
466 unsigned long flags;
467 unsigned int handled = 0;
468
469 spin_lock_irqsave(&bp->lock, flags);
470
471 /* read and clear the DNET irq (clear on read) */
472 int_src = dnet_readl(bp, INTR_SRC);
473 int_enable = dnet_readl(bp, INTR_ENB);
474 int_current = int_src & int_enable;
475
476 /* restart the queue if we had stopped it for TX fifo almost full */
477 if (int_current & DNET_INTR_SRC_TX_FIFOAE) {
478 int_enable = dnet_readl(bp, INTR_ENB);
479 int_enable &= ~DNET_INTR_ENB_TX_FIFOAE;
480 dnet_writel(bp, int_enable, INTR_ENB);
481 netif_wake_queue(dev);
482 handled = 1;
483 }
484
485 /* RX FIFO error checking */
486 if (int_current &
487 (DNET_INTR_SRC_RX_CMDFIFOFF | DNET_INTR_SRC_RX_DATAFIFOFF)) {
488 printk(KERN_ERR "%s: RX fifo error %x, irq %x\n", __func__,
489 dnet_readl(bp, RX_STATUS), int_current);
490 /* we can only flush the RX FIFOs */
491 dnet_writel(bp, DNET_SYS_CTL_RXFIFOFLUSH, SYS_CTL);
492 ndelay(500);
493 dnet_writel(bp, 0, SYS_CTL);
494 handled = 1;
495 }
496
497 /* TX FIFO error checking */
498 if (int_current &
499 (DNET_INTR_SRC_TX_FIFOFULL | DNET_INTR_SRC_TX_DISCFRM)) {
500 printk(KERN_ERR "%s: TX fifo error %x, irq %x\n", __func__,
501 dnet_readl(bp, TX_STATUS), int_current);
502 /* we can only flush the TX FIFOs */
503 dnet_writel(bp, DNET_SYS_CTL_TXFIFOFLUSH, SYS_CTL);
504 ndelay(500);
505 dnet_writel(bp, 0, SYS_CTL);
506 handled = 1;
507 }
508
509 if (int_current & DNET_INTR_SRC_RX_CMDFIFOAF) {
510 if (netif_rx_schedule_prep(&bp->napi)) {
511 /*
512 * There's no point taking any more interrupts
513 * until we have processed the buffers
514 */
515 /* Disable Rx interrupts and schedule NAPI poll */
516 int_enable = dnet_readl(bp, INTR_ENB);
517 int_enable &= ~DNET_INTR_SRC_RX_CMDFIFOAF;
518 dnet_writel(bp, int_enable, INTR_ENB);
519 __netif_rx_schedule(&bp->napi);
520 }
521 handled = 1;
522 }
523
524 if (!handled)
525 pr_debug("%s: irq %x remains\n", __func__, int_current);
526
527 spin_unlock_irqrestore(&bp->lock, flags);
528
529 return IRQ_RETVAL(handled);
530}
531
532#ifdef DEBUG
533static inline void dnet_print_skb(struct sk_buff *skb)
534{
535 int k;
536 printk(KERN_DEBUG PFX "data:");
537 for (k = 0; k < skb->len; k++)
538 printk(" %02x", (unsigned int)skb->data[k]);
539 printk("\n");
540}
541#else
542#define dnet_print_skb(skb) do {} while (0)
543#endif
544
545static int dnet_start_xmit(struct sk_buff *skb, struct net_device *dev)
546{
547
548 struct dnet *bp = netdev_priv(dev);
549 u32 tx_status, irq_enable;
550 unsigned int len, i, tx_cmd, wrsz;
551 unsigned long flags;
552 unsigned int *bufp;
553
554 tx_status = dnet_readl(bp, TX_STATUS);
555
556 pr_debug("start_xmit: len %u head %p data %p tail %p end %p\n",
557 skb->len, skb->head, skb->data, skb->tail, skb->end);
558 dnet_print_skb(skb);
559
560 /* frame size (words) */
561 len = (skb->len + 3) >> 2;
562
563 spin_lock_irqsave(&bp->lock, flags);
564
565 tx_status = dnet_readl(bp, TX_STATUS);
566
567 bufp = (unsigned int *)(((u32) skb->data) & 0xFFFFFFFC);
568 wrsz = (u32) skb->len + 3;
569 wrsz += ((u32) skb->data) & 0x3;
570 wrsz >>= 2;
571 tx_cmd = ((((unsigned int)(skb->data)) & 0x03) << 16) | (u32) skb->len;
572
573 /* check if there is enough room for the current frame */
574 if (wrsz < (DNET_FIFO_SIZE - dnet_readl(bp, TX_FIFO_WCNT))) {
575 for (i = 0; i < wrsz; i++)
576 dnet_writel(bp, *bufp++, TX_DATA_FIFO);
577
578 /*
579 * inform MAC that a packet's written and ready to be
580 * shipped out
581 */
582 dnet_writel(bp, tx_cmd, TX_LEN_FIFO);
583 }
584
585 if (dnet_readl(bp, TX_FIFO_WCNT) > DNET_FIFO_TX_DATA_AF_TH) {
586 netif_stop_queue(dev);
587 tx_status = dnet_readl(bp, INTR_SRC);
588 irq_enable = dnet_readl(bp, INTR_ENB);
589 irq_enable |= DNET_INTR_ENB_TX_FIFOAE;
590 dnet_writel(bp, irq_enable, INTR_ENB);
591 }
592
593 /* free the buffer */
594 dev_kfree_skb(skb);
595
596 spin_unlock_irqrestore(&bp->lock, flags);
597
598 dev->trans_start = jiffies;
599
600 return 0;
601}
602
603static void dnet_reset_hw(struct dnet *bp)
604{
605 /* put ts_mac in IDLE state i.e. disable rx/tx */
606 dnet_writew_mac(bp, DNET_INTERNAL_MODE_REG, DNET_INTERNAL_MODE_FCEN);
607
608 /*
609 * RX FIFO almost full threshold: only cmd FIFO almost full is
610 * implemented for RX side
611 */
612 dnet_writel(bp, DNET_FIFO_RX_CMD_AF_TH, RX_FIFO_TH);
613 /*
614 * TX FIFO almost empty threshold: only data FIFO almost empty
615 * is implemented for TX side
616 */
617 dnet_writel(bp, DNET_FIFO_TX_DATA_AE_TH, TX_FIFO_TH);
618
619 /* flush rx/tx fifos */
620 dnet_writel(bp, DNET_SYS_CTL_RXFIFOFLUSH | DNET_SYS_CTL_TXFIFOFLUSH,
621 SYS_CTL);
622 msleep(1);
623 dnet_writel(bp, 0, SYS_CTL);
624}
625
626static void dnet_init_hw(struct dnet *bp)
627{
628 u32 config;
629
630 dnet_reset_hw(bp);
631 __dnet_set_hwaddr(bp);
632
633 config = dnet_readw_mac(bp, DNET_INTERNAL_RXTX_CONTROL_REG);
634
635 if (bp->dev->flags & IFF_PROMISC)
636 /* Copy All Frames */
637 config |= DNET_INTERNAL_RXTX_CONTROL_ENPROMISC;
638 if (!(bp->dev->flags & IFF_BROADCAST))
639 /* No BroadCast */
640 config |= DNET_INTERNAL_RXTX_CONTROL_RXMULTICAST;
641
642 config |= DNET_INTERNAL_RXTX_CONTROL_RXPAUSE |
643 DNET_INTERNAL_RXTX_CONTROL_RXBROADCAST |
644 DNET_INTERNAL_RXTX_CONTROL_DROPCONTROL |
645 DNET_INTERNAL_RXTX_CONTROL_DISCFXFCS;
646
647 dnet_writew_mac(bp, DNET_INTERNAL_RXTX_CONTROL_REG, config);
648
649 /* clear irq before enabling them */
650 config = dnet_readl(bp, INTR_SRC);
651
652 /* enable RX/TX interrupt, recv packet ready interrupt */
653 dnet_writel(bp, DNET_INTR_ENB_GLOBAL_ENABLE | DNET_INTR_ENB_RX_SUMMARY |
654 DNET_INTR_ENB_TX_SUMMARY | DNET_INTR_ENB_RX_FIFOERR |
655 DNET_INTR_ENB_RX_ERROR | DNET_INTR_ENB_RX_FIFOFULL |
656 DNET_INTR_ENB_TX_FIFOFULL | DNET_INTR_ENB_TX_DISCFRM |
657 DNET_INTR_ENB_RX_PKTRDY, INTR_ENB);
658}
659
660static int dnet_open(struct net_device *dev)
661{
662 struct dnet *bp = netdev_priv(dev);
663
664 /* if the phy is not yet register, retry later */
665 if (!bp->phy_dev)
666 return -EAGAIN;
667
668 if (!is_valid_ether_addr(dev->dev_addr))
669 return -EADDRNOTAVAIL;
670
671 napi_enable(&bp->napi);
672 dnet_init_hw(bp);
673
674 phy_start_aneg(bp->phy_dev);
675
676 /* schedule a link state check */
677 phy_start(bp->phy_dev);
678
679 netif_start_queue(dev);
680
681 return 0;
682}
683
684static int dnet_close(struct net_device *dev)
685{
686 struct dnet *bp = netdev_priv(dev);
687
688 netif_stop_queue(dev);
689 napi_disable(&bp->napi);
690
691 if (bp->phy_dev)
692 phy_stop(bp->phy_dev);
693
694 dnet_reset_hw(bp);
695 netif_carrier_off(dev);
696
697 return 0;
698}
699
700static inline void dnet_print_pretty_hwstats(struct dnet_stats *hwstat)
701{
702 pr_debug("%s\n", __func__);
703 pr_debug("----------------------------- RX statistics "
704 "-------------------------------\n");
705 pr_debug("RX_PKT_IGNR_CNT %-8x\n", hwstat->rx_pkt_ignr);
706 pr_debug("RX_LEN_CHK_ERR_CNT %-8x\n", hwstat->rx_len_chk_err);
707 pr_debug("RX_LNG_FRM_CNT %-8x\n", hwstat->rx_lng_frm);
708 pr_debug("RX_SHRT_FRM_CNT %-8x\n", hwstat->rx_shrt_frm);
709 pr_debug("RX_IPG_VIOL_CNT %-8x\n", hwstat->rx_ipg_viol);
710 pr_debug("RX_CRC_ERR_CNT %-8x\n", hwstat->rx_crc_err);
711 pr_debug("RX_OK_PKT_CNT %-8x\n", hwstat->rx_ok_pkt);
712 pr_debug("RX_CTL_FRM_CNT %-8x\n", hwstat->rx_ctl_frm);
713 pr_debug("RX_PAUSE_FRM_CNT %-8x\n", hwstat->rx_pause_frm);
714 pr_debug("RX_MULTICAST_CNT %-8x\n", hwstat->rx_multicast);
715 pr_debug("RX_BROADCAST_CNT %-8x\n", hwstat->rx_broadcast);
716 pr_debug("RX_VLAN_TAG_CNT %-8x\n", hwstat->rx_vlan_tag);
717 pr_debug("RX_PRE_SHRINK_CNT %-8x\n", hwstat->rx_pre_shrink);
718 pr_debug("RX_DRIB_NIB_CNT %-8x\n", hwstat->rx_drib_nib);
719 pr_debug("RX_UNSUP_OPCD_CNT %-8x\n", hwstat->rx_unsup_opcd);
720 pr_debug("RX_BYTE_CNT %-8x\n", hwstat->rx_byte);
721 pr_debug("----------------------------- TX statistics "
722 "-------------------------------\n");
723 pr_debug("TX_UNICAST_CNT %-8x\n", hwstat->tx_unicast);
724 pr_debug("TX_PAUSE_FRM_CNT %-8x\n", hwstat->tx_pause_frm);
725 pr_debug("TX_MULTICAST_CNT %-8x\n", hwstat->tx_multicast);
726 pr_debug("TX_BRDCAST_CNT %-8x\n", hwstat->tx_brdcast);
727 pr_debug("TX_VLAN_TAG_CNT %-8x\n", hwstat->tx_vlan_tag);
728 pr_debug("TX_BAD_FCS_CNT %-8x\n", hwstat->tx_bad_fcs);
729 pr_debug("TX_JUMBO_CNT %-8x\n", hwstat->tx_jumbo);
730 pr_debug("TX_BYTE_CNT %-8x\n", hwstat->tx_byte);
731}
732
733static struct net_device_stats *dnet_get_stats(struct net_device *dev)
734{
735
736 struct dnet *bp = netdev_priv(dev);
737 struct net_device_stats *nstat = &dev->stats;
738 struct dnet_stats *hwstat = &bp->hw_stats;
739
740 /* read stats from hardware */
741 dnet_update_stats(bp);
742
743 /* Convert HW stats into netdevice stats */
744 nstat->rx_errors = (hwstat->rx_len_chk_err +
745 hwstat->rx_lng_frm + hwstat->rx_shrt_frm +
746 /* ignore IGP violation error
747 hwstat->rx_ipg_viol + */
748 hwstat->rx_crc_err +
749 hwstat->rx_pre_shrink +
750 hwstat->rx_drib_nib + hwstat->rx_unsup_opcd);
751 nstat->tx_errors = hwstat->tx_bad_fcs;
752 nstat->rx_length_errors = (hwstat->rx_len_chk_err +
753 hwstat->rx_lng_frm +
754 hwstat->rx_shrt_frm + hwstat->rx_pre_shrink);
755 nstat->rx_crc_errors = hwstat->rx_crc_err;
756 nstat->rx_frame_errors = hwstat->rx_pre_shrink + hwstat->rx_drib_nib;
757 nstat->rx_packets = hwstat->rx_ok_pkt;
758 nstat->tx_packets = (hwstat->tx_unicast +
759 hwstat->tx_multicast + hwstat->tx_brdcast);
760 nstat->rx_bytes = hwstat->rx_byte;
761 nstat->tx_bytes = hwstat->tx_byte;
762 nstat->multicast = hwstat->rx_multicast;
763 nstat->rx_missed_errors = hwstat->rx_pkt_ignr;
764
765 dnet_print_pretty_hwstats(hwstat);
766
767 return nstat;
768}
769
770static int dnet_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
771{
772 struct dnet *bp = netdev_priv(dev);
773 struct phy_device *phydev = bp->phy_dev;
774
775 if (!phydev)
776 return -ENODEV;
777
778 return phy_ethtool_gset(phydev, cmd);
779}
780
781static int dnet_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
782{
783 struct dnet *bp = netdev_priv(dev);
784 struct phy_device *phydev = bp->phy_dev;
785
786 if (!phydev)
787 return -ENODEV;
788
789 return phy_ethtool_sset(phydev, cmd);
790}
791
792static int dnet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
793{
794 struct dnet *bp = netdev_priv(dev);
795 struct phy_device *phydev = bp->phy_dev;
796
797 if (!netif_running(dev))
798 return -EINVAL;
799
800 if (!phydev)
801 return -ENODEV;
802
803 return phy_mii_ioctl(phydev, if_mii(rq), cmd);
804}
805
806static void dnet_get_drvinfo(struct net_device *dev,
807 struct ethtool_drvinfo *info)
808{
809 strcpy(info->driver, DRV_NAME);
810 strcpy(info->version, DRV_VERSION);
811 strcpy(info->bus_info, "0");
812}
813
814static const struct ethtool_ops dnet_ethtool_ops = {
815 .get_settings = dnet_get_settings,
816 .set_settings = dnet_set_settings,
817 .get_drvinfo = dnet_get_drvinfo,
818 .get_link = ethtool_op_get_link,
819};
820
821static const struct net_device_ops dnet_netdev_ops = {
822 .ndo_open = dnet_open,
823 .ndo_stop = dnet_close,
824 .ndo_get_stats = dnet_get_stats,
825 .ndo_start_xmit = dnet_start_xmit,
826 .ndo_do_ioctl = dnet_ioctl,
827 .ndo_set_mac_address = eth_mac_addr,
828 .ndo_validate_addr = eth_validate_addr,
829 .ndo_change_mtu = eth_change_mtu,
830};
831
832static int __devinit dnet_probe(struct platform_device *pdev)
833{
834 struct resource *res;
835 struct net_device *dev;
836 struct dnet *bp;
837 struct phy_device *phydev;
838 int err = -ENXIO;
839 unsigned int mem_base, mem_size, irq;
840
841 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
842 if (!res) {
843 dev_err(&pdev->dev, "no mmio resource defined\n");
844 goto err_out;
845 }
846 mem_base = res->start;
847 mem_size = resource_size(res);
848 irq = platform_get_irq(pdev, 0);
849
850 if (!request_mem_region(mem_base, mem_size, DRV_NAME)) {
851 dev_err(&pdev->dev, "no memory region available\n");
852 err = -EBUSY;
853 goto err_out;
854 }
855
856 err = -ENOMEM;
857 dev = alloc_etherdev(sizeof(*bp));
858 if (!dev) {
859 dev_err(&pdev->dev, "etherdev alloc failed, aborting.\n");
860 goto err_out;
861 }
862
863 /* TODO: Actually, we have some interesting features... */
864 dev->features |= 0;
865
866 bp = netdev_priv(dev);
867 bp->dev = dev;
868
869 SET_NETDEV_DEV(dev, &pdev->dev);
870
871 spin_lock_init(&bp->lock);
872
873 bp->regs = ioremap(mem_base, mem_size);
874 if (!bp->regs) {
875 dev_err(&pdev->dev, "failed to map registers, aborting.\n");
876 err = -ENOMEM;
877 goto err_out_free_dev;
878 }
879
880 dev->irq = irq;
881 err = request_irq(dev->irq, dnet_interrupt, 0, DRV_NAME, dev);
882 if (err) {
883 dev_err(&pdev->dev, "Unable to request IRQ %d (error %d)\n",
884 irq, err);
885 goto err_out_iounmap;
886 }
887
888 dev->netdev_ops = &dnet_netdev_ops;
889 netif_napi_add(dev, &bp->napi, dnet_poll, 64);
890 dev->ethtool_ops = &dnet_ethtool_ops;
891
892 dev->base_addr = (unsigned long)bp->regs;
893
894 bp->capabilities = dnet_readl(bp, VERCAPS) & DNET_CAPS_MASK;
895
896 dnet_get_hwaddr(bp);
897
898 if (!is_valid_ether_addr(dev->dev_addr)) {
899 /* choose a random ethernet address */
900 random_ether_addr(dev->dev_addr);
901 __dnet_set_hwaddr(bp);
902 }
903
904 err = register_netdev(dev);
905 if (err) {
906 dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
907 goto err_out_free_irq;
908 }
909
910 /* register the PHY board fixup (for Marvell 88E1111) */
911 err = phy_register_fixup_for_uid(0x01410cc0, 0xfffffff0,
912 dnet_phy_marvell_fixup);
913 /* we can live without it, so just issue a warning */
914 if (err)
915 dev_warn(&pdev->dev, "Cannot register PHY board fixup.\n");
916
917 if (dnet_mii_init(bp) != 0)
918 goto err_out_unregister_netdev;
919
920 dev_info(&pdev->dev, "Dave DNET at 0x%p (0x%08x) irq %d %pM\n",
921 bp->regs, mem_base, dev->irq, dev->dev_addr);
922 dev_info(&pdev->dev, "has %smdio, %sirq, %sgigabit, %sdma \n",
923 (bp->capabilities & DNET_HAS_MDIO) ? "" : "no ",
924 (bp->capabilities & DNET_HAS_IRQ) ? "" : "no ",
925 (bp->capabilities & DNET_HAS_GIGABIT) ? "" : "no ",
926 (bp->capabilities & DNET_HAS_DMA) ? "" : "no ");
927 phydev = bp->phy_dev;
928 dev_info(&pdev->dev, "attached PHY driver [%s] "
929 "(mii_bus:phy_addr=%s, irq=%d)\n",
930 phydev->drv->name, phydev->dev.bus_id, phydev->irq);
931
932 return 0;
933
934err_out_unregister_netdev:
935 unregister_netdev(dev);
936err_out_free_irq:
937 free_irq(dev->irq, dev);
938err_out_iounmap:
939 iounmap(bp->regs);
940err_out_free_dev:
941 free_netdev(dev);
942err_out:
943 return err;
944}
945
946static int __devexit dnet_remove(struct platform_device *pdev)
947{
948
949 struct net_device *dev;
950 struct dnet *bp;
951
952 dev = platform_get_drvdata(pdev);
953
954 if (dev) {
955 bp = netdev_priv(dev);
956 if (bp->phy_dev)
957 phy_disconnect(bp->phy_dev);
958 mdiobus_unregister(bp->mii_bus);
959 kfree(bp->mii_bus->irq);
960 mdiobus_free(bp->mii_bus);
961 unregister_netdev(dev);
962 free_irq(dev->irq, dev);
963 iounmap(bp->regs);
964 free_netdev(dev);
965 }
966
967 return 0;
968}
969
970static struct platform_driver dnet_driver = {
971 .probe = dnet_probe,
972 .remove = __devexit_p(dnet_remove),
973 .driver = {
974 .name = "dnet",
975 },
976};
977
978static int __init dnet_init(void)
979{
980 return platform_driver_register(&dnet_driver);
981}
982
983static void __exit dnet_exit(void)
984{
985 platform_driver_unregister(&dnet_driver);
986}
987
988module_init(dnet_init);
989module_exit(dnet_exit);
990
991MODULE_LICENSE("GPL");
992MODULE_DESCRIPTION("Dave DNET Ethernet driver");
993MODULE_AUTHOR("Ilya Yanok <yanok@emcraft.com>, "
994 "Matteo Vit <matteo.vit@dave.eu>");