aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorThierry Reding <thierry.reding@avionic-design.de>2009-03-27 03:12:24 -0400
committerDavid S. Miller <davem@davemloft.net>2009-03-27 03:16:21 -0400
commita1702857724fb39cb68ce581490010df99168fd0 (patch)
tree2e62d3ab16220118949fb0671500f7cfef8dd545 /drivers
parent01e6de64d9c8d0e75dca3bb4cf898db73abe00d4 (diff)
net: Add support for the OpenCores 10/100 Mbps Ethernet MAC.
This patch adds a platform device driver that supports the OpenCores 10/100 Mbps Ethernet MAC. The driver expects three resources: one IORESOURCE_MEM resource defines the memory region for the core's memory-mapped registers while a second IORESOURCE_MEM resource defines the network packet buffer space. The third resource, of type IORESOURCE_IRQ, associates an interrupt with the driver. Signed-off-by: Thierry Reding <thierry.reding@avionic-design.de> Acked-by: Florian Fainelli <florian@openwrt.org> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/net/Kconfig8
-rw-r--r--drivers/net/Makefile1
-rw-r--r--drivers/net/ethoc.c1112
3 files changed, 1121 insertions, 0 deletions
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index e5ffc1c606c..f062b424704 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -972,6 +972,14 @@ config ENC28J60_WRITEVERIFY
972 Enable the verify after the buffer write useful for debugging purpose. 972 Enable the verify after the buffer write useful for debugging purpose.
973 If unsure, say N. 973 If unsure, say N.
974 974
975config ETHOC
976 tristate "OpenCores 10/100 Mbps Ethernet MAC support"
977 depends on NET_ETHERNET
978 select MII
979 select PHYLIB
980 help
981 Say Y here if you want to use the OpenCores 10/100 Mbps Ethernet MAC.
982
975config SMC911X 983config SMC911X
976 tristate "SMSC LAN911[5678] support" 984 tristate "SMSC LAN911[5678] support"
977 select CRC32 985 select CRC32
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 758ecdf4c82..98409c9dd44 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -230,6 +230,7 @@ obj-$(CONFIG_PASEMI_MAC) += pasemi_mac_driver.o
230pasemi_mac_driver-objs := pasemi_mac.o pasemi_mac_ethtool.o 230pasemi_mac_driver-objs := pasemi_mac.o pasemi_mac_ethtool.o
231obj-$(CONFIG_MLX4_CORE) += mlx4/ 231obj-$(CONFIG_MLX4_CORE) += mlx4/
232obj-$(CONFIG_ENC28J60) += enc28j60.o 232obj-$(CONFIG_ENC28J60) += enc28j60.o
233obj-$(CONFIG_ETHOC) += ethoc.o
233 234
234obj-$(CONFIG_XTENSA_XT2000_SONIC) += xtsonic.o 235obj-$(CONFIG_XTENSA_XT2000_SONIC) += xtsonic.o
235 236
diff --git a/drivers/net/ethoc.c b/drivers/net/ethoc.c
new file mode 100644
index 00000000000..91a9b1a3376
--- /dev/null
+++ b/drivers/net/ethoc.c
@@ -0,0 +1,1112 @@
1/*
2 * linux/drivers/net/ethoc.c
3 *
4 * Copyright (C) 2007-2008 Avionic Design Development GmbH
5 * Copyright (C) 2008-2009 Avionic Design GmbH
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * Written by Thierry Reding <thierry.reding@avionic-design.de>
12 */
13
14#include <linux/etherdevice.h>
15#include <linux/crc32.h>
16#include <linux/io.h>
17#include <linux/mii.h>
18#include <linux/phy.h>
19#include <linux/platform_device.h>
20#include <net/ethoc.h>
21
22/* register offsets */
23#define MODER 0x00
24#define INT_SOURCE 0x04
25#define INT_MASK 0x08
26#define IPGT 0x0c
27#define IPGR1 0x10
28#define IPGR2 0x14
29#define PACKETLEN 0x18
30#define COLLCONF 0x1c
31#define TX_BD_NUM 0x20
32#define CTRLMODER 0x24
33#define MIIMODER 0x28
34#define MIICOMMAND 0x2c
35#define MIIADDRESS 0x30
36#define MIITX_DATA 0x34
37#define MIIRX_DATA 0x38
38#define MIISTATUS 0x3c
39#define MAC_ADDR0 0x40
40#define MAC_ADDR1 0x44
41#define ETH_HASH0 0x48
42#define ETH_HASH1 0x4c
43#define ETH_TXCTRL 0x50
44
45/* mode register */
46#define MODER_RXEN (1 << 0) /* receive enable */
47#define MODER_TXEN (1 << 1) /* transmit enable */
48#define MODER_NOPRE (1 << 2) /* no preamble */
49#define MODER_BRO (1 << 3) /* broadcast address */
50#define MODER_IAM (1 << 4) /* individual address mode */
51#define MODER_PRO (1 << 5) /* promiscuous mode */
52#define MODER_IFG (1 << 6) /* interframe gap for incoming frames */
53#define MODER_LOOP (1 << 7) /* loopback */
54#define MODER_NBO (1 << 8) /* no back-off */
55#define MODER_EDE (1 << 9) /* excess defer enable */
56#define MODER_FULLD (1 << 10) /* full duplex */
57#define MODER_RESET (1 << 11) /* FIXME: reset (undocumented) */
58#define MODER_DCRC (1 << 12) /* delayed CRC enable */
59#define MODER_CRC (1 << 13) /* CRC enable */
60#define MODER_HUGE (1 << 14) /* huge packets enable */
61#define MODER_PAD (1 << 15) /* padding enabled */
62#define MODER_RSM (1 << 16) /* receive small packets */
63
64/* interrupt source and mask registers */
65#define INT_MASK_TXF (1 << 0) /* transmit frame */
66#define INT_MASK_TXE (1 << 1) /* transmit error */
67#define INT_MASK_RXF (1 << 2) /* receive frame */
68#define INT_MASK_RXE (1 << 3) /* receive error */
69#define INT_MASK_BUSY (1 << 4)
70#define INT_MASK_TXC (1 << 5) /* transmit control frame */
71#define INT_MASK_RXC (1 << 6) /* receive control frame */
72
73#define INT_MASK_TX (INT_MASK_TXF | INT_MASK_TXE)
74#define INT_MASK_RX (INT_MASK_RXF | INT_MASK_RXE)
75
76#define INT_MASK_ALL ( \
77 INT_MASK_TXF | INT_MASK_TXE | \
78 INT_MASK_RXF | INT_MASK_RXE | \
79 INT_MASK_TXC | INT_MASK_RXC | \
80 INT_MASK_BUSY \
81 )
82
83/* packet length register */
84#define PACKETLEN_MIN(min) (((min) & 0xffff) << 16)
85#define PACKETLEN_MAX(max) (((max) & 0xffff) << 0)
86#define PACKETLEN_MIN_MAX(min, max) (PACKETLEN_MIN(min) | \
87 PACKETLEN_MAX(max))
88
89/* transmit buffer number register */
90#define TX_BD_NUM_VAL(x) (((x) <= 0x80) ? (x) : 0x80)
91
92/* control module mode register */
93#define CTRLMODER_PASSALL (1 << 0) /* pass all receive frames */
94#define CTRLMODER_RXFLOW (1 << 1) /* receive control flow */
95#define CTRLMODER_TXFLOW (1 << 2) /* transmit control flow */
96
97/* MII mode register */
98#define MIIMODER_CLKDIV(x) ((x) & 0xfe) /* needs to be an even number */
99#define MIIMODER_NOPRE (1 << 8) /* no preamble */
100
101/* MII command register */
102#define MIICOMMAND_SCAN (1 << 0) /* scan status */
103#define MIICOMMAND_READ (1 << 1) /* read status */
104#define MIICOMMAND_WRITE (1 << 2) /* write control data */
105
106/* MII address register */
107#define MIIADDRESS_FIAD(x) (((x) & 0x1f) << 0)
108#define MIIADDRESS_RGAD(x) (((x) & 0x1f) << 8)
109#define MIIADDRESS_ADDR(phy, reg) (MIIADDRESS_FIAD(phy) | \
110 MIIADDRESS_RGAD(reg))
111
112/* MII transmit data register */
113#define MIITX_DATA_VAL(x) ((x) & 0xffff)
114
115/* MII receive data register */
116#define MIIRX_DATA_VAL(x) ((x) & 0xffff)
117
118/* MII status register */
119#define MIISTATUS_LINKFAIL (1 << 0)
120#define MIISTATUS_BUSY (1 << 1)
121#define MIISTATUS_INVALID (1 << 2)
122
123/* TX buffer descriptor */
124#define TX_BD_CS (1 << 0) /* carrier sense lost */
125#define TX_BD_DF (1 << 1) /* defer indication */
126#define TX_BD_LC (1 << 2) /* late collision */
127#define TX_BD_RL (1 << 3) /* retransmission limit */
128#define TX_BD_RETRY_MASK (0x00f0)
129#define TX_BD_RETRY(x) (((x) & 0x00f0) >> 4)
130#define TX_BD_UR (1 << 8) /* transmitter underrun */
131#define TX_BD_CRC (1 << 11) /* TX CRC enable */
132#define TX_BD_PAD (1 << 12) /* pad enable for short packets */
133#define TX_BD_WRAP (1 << 13)
134#define TX_BD_IRQ (1 << 14) /* interrupt request enable */
135#define TX_BD_READY (1 << 15) /* TX buffer ready */
136#define TX_BD_LEN(x) (((x) & 0xffff) << 16)
137#define TX_BD_LEN_MASK (0xffff << 16)
138
139#define TX_BD_STATS (TX_BD_CS | TX_BD_DF | TX_BD_LC | \
140 TX_BD_RL | TX_BD_RETRY_MASK | TX_BD_UR)
141
142/* RX buffer descriptor */
143#define RX_BD_LC (1 << 0) /* late collision */
144#define RX_BD_CRC (1 << 1) /* RX CRC error */
145#define RX_BD_SF (1 << 2) /* short frame */
146#define RX_BD_TL (1 << 3) /* too long */
147#define RX_BD_DN (1 << 4) /* dribble nibble */
148#define RX_BD_IS (1 << 5) /* invalid symbol */
149#define RX_BD_OR (1 << 6) /* receiver overrun */
150#define RX_BD_MISS (1 << 7)
151#define RX_BD_CF (1 << 8) /* control frame */
152#define RX_BD_WRAP (1 << 13)
153#define RX_BD_IRQ (1 << 14) /* interrupt request enable */
154#define RX_BD_EMPTY (1 << 15)
155#define RX_BD_LEN(x) (((x) & 0xffff) << 16)
156
157#define RX_BD_STATS (RX_BD_LC | RX_BD_CRC | RX_BD_SF | RX_BD_TL | \
158 RX_BD_DN | RX_BD_IS | RX_BD_OR | RX_BD_MISS)
159
160#define ETHOC_BUFSIZ 1536
161#define ETHOC_ZLEN 64
162#define ETHOC_BD_BASE 0x400
163#define ETHOC_TIMEOUT (HZ / 2)
164#define ETHOC_MII_TIMEOUT (1 + (HZ / 5))
165
166/**
167 * struct ethoc - driver-private device structure
168 * @iobase: pointer to I/O memory region
169 * @membase: pointer to buffer memory region
170 * @num_tx: number of send buffers
171 * @cur_tx: last send buffer written
172 * @dty_tx: last buffer actually sent
173 * @num_rx: number of receive buffers
174 * @cur_rx: current receive buffer
175 * @netdev: pointer to network device structure
176 * @napi: NAPI structure
177 * @stats: network device statistics
178 * @msg_enable: device state flags
179 * @rx_lock: receive lock
180 * @lock: device lock
181 * @phy: attached PHY
182 * @mdio: MDIO bus for PHY access
183 * @phy_id: address of attached PHY
184 */
185struct ethoc {
186 void __iomem *iobase;
187 void __iomem *membase;
188
189 unsigned int num_tx;
190 unsigned int cur_tx;
191 unsigned int dty_tx;
192
193 unsigned int num_rx;
194 unsigned int cur_rx;
195
196 struct net_device *netdev;
197 struct napi_struct napi;
198 struct net_device_stats stats;
199 u32 msg_enable;
200
201 spinlock_t rx_lock;
202 spinlock_t lock;
203
204 struct phy_device *phy;
205 struct mii_bus *mdio;
206 s8 phy_id;
207};
208
209/**
210 * struct ethoc_bd - buffer descriptor
211 * @stat: buffer statistics
212 * @addr: physical memory address
213 */
214struct ethoc_bd {
215 u32 stat;
216 u32 addr;
217};
218
219static u32 ethoc_read(struct ethoc *dev, loff_t offset)
220{
221 return ioread32(dev->iobase + offset);
222}
223
224static void ethoc_write(struct ethoc *dev, loff_t offset, u32 data)
225{
226 iowrite32(data, dev->iobase + offset);
227}
228
229static void ethoc_read_bd(struct ethoc *dev, int index, struct ethoc_bd *bd)
230{
231 loff_t offset = ETHOC_BD_BASE + (index * sizeof(struct ethoc_bd));
232 bd->stat = ethoc_read(dev, offset + 0);
233 bd->addr = ethoc_read(dev, offset + 4);
234}
235
236static void ethoc_write_bd(struct ethoc *dev, int index,
237 const struct ethoc_bd *bd)
238{
239 loff_t offset = ETHOC_BD_BASE + (index * sizeof(struct ethoc_bd));
240 ethoc_write(dev, offset + 0, bd->stat);
241 ethoc_write(dev, offset + 4, bd->addr);
242}
243
244static void ethoc_enable_irq(struct ethoc *dev, u32 mask)
245{
246 u32 imask = ethoc_read(dev, INT_MASK);
247 imask |= mask;
248 ethoc_write(dev, INT_MASK, imask);
249}
250
251static void ethoc_disable_irq(struct ethoc *dev, u32 mask)
252{
253 u32 imask = ethoc_read(dev, INT_MASK);
254 imask &= ~mask;
255 ethoc_write(dev, INT_MASK, imask);
256}
257
258static void ethoc_ack_irq(struct ethoc *dev, u32 mask)
259{
260 ethoc_write(dev, INT_SOURCE, mask);
261}
262
263static void ethoc_enable_rx_and_tx(struct ethoc *dev)
264{
265 u32 mode = ethoc_read(dev, MODER);
266 mode |= MODER_RXEN | MODER_TXEN;
267 ethoc_write(dev, MODER, mode);
268}
269
270static void ethoc_disable_rx_and_tx(struct ethoc *dev)
271{
272 u32 mode = ethoc_read(dev, MODER);
273 mode &= ~(MODER_RXEN | MODER_TXEN);
274 ethoc_write(dev, MODER, mode);
275}
276
277static int ethoc_init_ring(struct ethoc *dev)
278{
279 struct ethoc_bd bd;
280 int i;
281
282 dev->cur_tx = 0;
283 dev->dty_tx = 0;
284 dev->cur_rx = 0;
285
286 /* setup transmission buffers */
287 bd.addr = 0;
288 bd.stat = TX_BD_IRQ | TX_BD_CRC;
289
290 for (i = 0; i < dev->num_tx; i++) {
291 if (i == dev->num_tx - 1)
292 bd.stat |= TX_BD_WRAP;
293
294 ethoc_write_bd(dev, i, &bd);
295 bd.addr += ETHOC_BUFSIZ;
296 }
297
298 bd.addr = dev->num_tx * ETHOC_BUFSIZ;
299 bd.stat = RX_BD_EMPTY | RX_BD_IRQ;
300
301 for (i = 0; i < dev->num_rx; i++) {
302 if (i == dev->num_rx - 1)
303 bd.stat |= RX_BD_WRAP;
304
305 ethoc_write_bd(dev, dev->num_tx + i, &bd);
306 bd.addr += ETHOC_BUFSIZ;
307 }
308
309 return 0;
310}
311
312static int ethoc_reset(struct ethoc *dev)
313{
314 u32 mode;
315
316 /* TODO: reset controller? */
317
318 ethoc_disable_rx_and_tx(dev);
319
320 /* TODO: setup registers */
321
322 /* enable FCS generation and automatic padding */
323 mode = ethoc_read(dev, MODER);
324 mode |= MODER_CRC | MODER_PAD;
325 ethoc_write(dev, MODER, mode);
326
327 /* set full-duplex mode */
328 mode = ethoc_read(dev, MODER);
329 mode |= MODER_FULLD;
330 ethoc_write(dev, MODER, mode);
331 ethoc_write(dev, IPGT, 0x15);
332
333 ethoc_ack_irq(dev, INT_MASK_ALL);
334 ethoc_enable_irq(dev, INT_MASK_ALL);
335 ethoc_enable_rx_and_tx(dev);
336 return 0;
337}
338
339static unsigned int ethoc_update_rx_stats(struct ethoc *dev,
340 struct ethoc_bd *bd)
341{
342 struct net_device *netdev = dev->netdev;
343 unsigned int ret = 0;
344
345 if (bd->stat & RX_BD_TL) {
346 dev_err(&netdev->dev, "RX: frame too long\n");
347 dev->stats.rx_length_errors++;
348 ret++;
349 }
350
351 if (bd->stat & RX_BD_SF) {
352 dev_err(&netdev->dev, "RX: frame too short\n");
353 dev->stats.rx_length_errors++;
354 ret++;
355 }
356
357 if (bd->stat & RX_BD_DN) {
358 dev_err(&netdev->dev, "RX: dribble nibble\n");
359 dev->stats.rx_frame_errors++;
360 }
361
362 if (bd->stat & RX_BD_CRC) {
363 dev_err(&netdev->dev, "RX: wrong CRC\n");
364 dev->stats.rx_crc_errors++;
365 ret++;
366 }
367
368 if (bd->stat & RX_BD_OR) {
369 dev_err(&netdev->dev, "RX: overrun\n");
370 dev->stats.rx_over_errors++;
371 ret++;
372 }
373
374 if (bd->stat & RX_BD_MISS)
375 dev->stats.rx_missed_errors++;
376
377 if (bd->stat & RX_BD_LC) {
378 dev_err(&netdev->dev, "RX: late collision\n");
379 dev->stats.collisions++;
380 ret++;
381 }
382
383 return ret;
384}
385
386static int ethoc_rx(struct net_device *dev, int limit)
387{
388 struct ethoc *priv = netdev_priv(dev);
389 int count;
390
391 for (count = 0; count < limit; ++count) {
392 unsigned int entry;
393 struct ethoc_bd bd;
394
395 entry = priv->num_tx + (priv->cur_rx % priv->num_rx);
396 ethoc_read_bd(priv, entry, &bd);
397 if (bd.stat & RX_BD_EMPTY)
398 break;
399
400 if (ethoc_update_rx_stats(priv, &bd) == 0) {
401 int size = bd.stat >> 16;
402 struct sk_buff *skb = netdev_alloc_skb(dev, size);
403 if (likely(skb)) {
404 void *src = priv->membase + bd.addr;
405 memcpy_fromio(skb_put(skb, size), src, size);
406 skb->protocol = eth_type_trans(skb, dev);
407 dev->last_rx = jiffies;
408 priv->stats.rx_packets++;
409 priv->stats.rx_bytes += size;
410 netif_receive_skb(skb);
411 } else {
412 if (net_ratelimit())
413 dev_warn(&dev->dev, "low on memory - "
414 "packet dropped\n");
415
416 priv->stats.rx_dropped++;
417 break;
418 }
419 }
420
421 /* clear the buffer descriptor so it can be reused */
422 bd.stat &= ~RX_BD_STATS;
423 bd.stat |= RX_BD_EMPTY;
424 ethoc_write_bd(priv, entry, &bd);
425 priv->cur_rx++;
426 }
427
428 return count;
429}
430
431static int ethoc_update_tx_stats(struct ethoc *dev, struct ethoc_bd *bd)
432{
433 struct net_device *netdev = dev->netdev;
434
435 if (bd->stat & TX_BD_LC) {
436 dev_err(&netdev->dev, "TX: late collision\n");
437 dev->stats.tx_window_errors++;
438 }
439
440 if (bd->stat & TX_BD_RL) {
441 dev_err(&netdev->dev, "TX: retransmit limit\n");
442 dev->stats.tx_aborted_errors++;
443 }
444
445 if (bd->stat & TX_BD_UR) {
446 dev_err(&netdev->dev, "TX: underrun\n");
447 dev->stats.tx_fifo_errors++;
448 }
449
450 if (bd->stat & TX_BD_CS) {
451 dev_err(&netdev->dev, "TX: carrier sense lost\n");
452 dev->stats.tx_carrier_errors++;
453 }
454
455 if (bd->stat & TX_BD_STATS)
456 dev->stats.tx_errors++;
457
458 dev->stats.collisions += (bd->stat >> 4) & 0xf;
459 dev->stats.tx_bytes += bd->stat >> 16;
460 dev->stats.tx_packets++;
461 return 0;
462}
463
464static void ethoc_tx(struct net_device *dev)
465{
466 struct ethoc *priv = netdev_priv(dev);
467
468 spin_lock(&priv->lock);
469
470 while (priv->dty_tx != priv->cur_tx) {
471 unsigned int entry = priv->dty_tx % priv->num_tx;
472 struct ethoc_bd bd;
473
474 ethoc_read_bd(priv, entry, &bd);
475 if (bd.stat & TX_BD_READY)
476 break;
477
478 entry = (++priv->dty_tx) % priv->num_tx;
479 (void)ethoc_update_tx_stats(priv, &bd);
480 }
481
482 if ((priv->cur_tx - priv->dty_tx) <= (priv->num_tx / 2))
483 netif_wake_queue(dev);
484
485 ethoc_ack_irq(priv, INT_MASK_TX);
486 spin_unlock(&priv->lock);
487}
488
489static irqreturn_t ethoc_interrupt(int irq, void *dev_id)
490{
491 struct net_device *dev = (struct net_device *)dev_id;
492 struct ethoc *priv = netdev_priv(dev);
493 u32 pending;
494
495 ethoc_disable_irq(priv, INT_MASK_ALL);
496 pending = ethoc_read(priv, INT_SOURCE);
497 if (unlikely(pending == 0)) {
498 ethoc_enable_irq(priv, INT_MASK_ALL);
499 return IRQ_NONE;
500 }
501
502 ethoc_ack_irq(priv, INT_MASK_ALL);
503
504 if (pending & INT_MASK_BUSY) {
505 dev_err(&dev->dev, "packet dropped\n");
506 priv->stats.rx_dropped++;
507 }
508
509 if (pending & INT_MASK_RX) {
510 if (napi_schedule_prep(&priv->napi))
511 __napi_schedule(&priv->napi);
512 } else {
513 ethoc_enable_irq(priv, INT_MASK_RX);
514 }
515
516 if (pending & INT_MASK_TX)
517 ethoc_tx(dev);
518
519 ethoc_enable_irq(priv, INT_MASK_ALL & ~INT_MASK_RX);
520 return IRQ_HANDLED;
521}
522
523static int ethoc_get_mac_address(struct net_device *dev, void *addr)
524{
525 struct ethoc *priv = netdev_priv(dev);
526 u8 *mac = (u8 *)addr;
527 u32 reg;
528
529 reg = ethoc_read(priv, MAC_ADDR0);
530 mac[2] = (reg >> 24) & 0xff;
531 mac[3] = (reg >> 16) & 0xff;
532 mac[4] = (reg >> 8) & 0xff;
533 mac[5] = (reg >> 0) & 0xff;
534
535 reg = ethoc_read(priv, MAC_ADDR1);
536 mac[0] = (reg >> 8) & 0xff;
537 mac[1] = (reg >> 0) & 0xff;
538
539 return 0;
540}
541
542static int ethoc_poll(struct napi_struct *napi, int budget)
543{
544 struct ethoc *priv = container_of(napi, struct ethoc, napi);
545 int work_done = 0;
546
547 work_done = ethoc_rx(priv->netdev, budget);
548 if (work_done < budget) {
549 ethoc_enable_irq(priv, INT_MASK_RX);
550 napi_complete(napi);
551 }
552
553 return work_done;
554}
555
556static int ethoc_mdio_read(struct mii_bus *bus, int phy, int reg)
557{
558 unsigned long timeout = jiffies + ETHOC_MII_TIMEOUT;
559 struct ethoc *priv = bus->priv;
560
561 ethoc_write(priv, MIIADDRESS, MIIADDRESS_ADDR(phy, reg));
562 ethoc_write(priv, MIICOMMAND, MIICOMMAND_READ);
563
564 while (time_before(jiffies, timeout)) {
565 u32 status = ethoc_read(priv, MIISTATUS);
566 if (!(status & MIISTATUS_BUSY)) {
567 u32 data = ethoc_read(priv, MIIRX_DATA);
568 /* reset MII command register */
569 ethoc_write(priv, MIICOMMAND, 0);
570 return data;
571 }
572
573 schedule();
574 }
575
576 return -EBUSY;
577}
578
579static int ethoc_mdio_write(struct mii_bus *bus, int phy, int reg, u16 val)
580{
581 unsigned long timeout = jiffies + ETHOC_MII_TIMEOUT;
582 struct ethoc *priv = bus->priv;
583
584 ethoc_write(priv, MIIADDRESS, MIIADDRESS_ADDR(phy, reg));
585 ethoc_write(priv, MIITX_DATA, val);
586 ethoc_write(priv, MIICOMMAND, MIICOMMAND_WRITE);
587
588 while (time_before(jiffies, timeout)) {
589 u32 stat = ethoc_read(priv, MIISTATUS);
590 if (!(stat & MIISTATUS_BUSY))
591 return 0;
592
593 schedule();
594 }
595
596 return -EBUSY;
597}
598
599static int ethoc_mdio_reset(struct mii_bus *bus)
600{
601 return 0;
602}
603
604static void ethoc_mdio_poll(struct net_device *dev)
605{
606}
607
608static int ethoc_mdio_probe(struct net_device *dev)
609{
610 struct ethoc *priv = netdev_priv(dev);
611 struct phy_device *phy;
612 int i;
613
614 for (i = 0; i < PHY_MAX_ADDR; i++) {
615 phy = priv->mdio->phy_map[i];
616 if (phy) {
617 if (priv->phy_id != -1) {
618 /* attach to specified PHY */
619 if (priv->phy_id == phy->addr)
620 break;
621 } else {
622 /* autoselect PHY if none was specified */
623 if (phy->addr != 0)
624 break;
625 }
626 }
627 }
628
629 if (!phy) {
630 dev_err(&dev->dev, "no PHY found\n");
631 return -ENXIO;
632 }
633
634 phy = phy_connect(dev, dev_name(&phy->dev), &ethoc_mdio_poll, 0,
635 PHY_INTERFACE_MODE_GMII);
636 if (IS_ERR(phy)) {
637 dev_err(&dev->dev, "could not attach to PHY\n");
638 return PTR_ERR(phy);
639 }
640
641 priv->phy = phy;
642 return 0;
643}
644
645static int ethoc_open(struct net_device *dev)
646{
647 struct ethoc *priv = netdev_priv(dev);
648 unsigned int min_tx = 2;
649 unsigned int num_bd;
650 int ret;
651
652 ret = request_irq(dev->irq, ethoc_interrupt, IRQF_SHARED,
653 dev->name, dev);
654 if (ret)
655 return ret;
656
657 /* calculate the number of TX/RX buffers */
658 num_bd = (dev->mem_end - dev->mem_start + 1) / ETHOC_BUFSIZ;
659 priv->num_tx = min(min_tx, num_bd / 4);
660 priv->num_rx = num_bd - priv->num_tx;
661 ethoc_write(priv, TX_BD_NUM, priv->num_tx);
662
663 ethoc_init_ring(priv);
664 ethoc_reset(priv);
665
666 if (netif_queue_stopped(dev)) {
667 dev_dbg(&dev->dev, " resuming queue\n");
668 netif_wake_queue(dev);
669 } else {
670 dev_dbg(&dev->dev, " starting queue\n");
671 netif_start_queue(dev);
672 }
673
674 phy_start(priv->phy);
675 napi_enable(&priv->napi);
676
677 if (netif_msg_ifup(priv)) {
678 dev_info(&dev->dev, "I/O: %08lx Memory: %08lx-%08lx\n",
679 dev->base_addr, dev->mem_start, dev->mem_end);
680 }
681
682 return 0;
683}
684
685static int ethoc_stop(struct net_device *dev)
686{
687 struct ethoc *priv = netdev_priv(dev);
688
689 napi_disable(&priv->napi);
690
691 if (priv->phy)
692 phy_stop(priv->phy);
693
694 ethoc_disable_rx_and_tx(priv);
695 free_irq(dev->irq, dev);
696
697 if (!netif_queue_stopped(dev))
698 netif_stop_queue(dev);
699
700 return 0;
701}
702
703static int ethoc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
704{
705 struct ethoc *priv = netdev_priv(dev);
706 struct mii_ioctl_data *mdio = if_mii(ifr);
707 struct phy_device *phy = NULL;
708
709 if (!netif_running(dev))
710 return -EINVAL;
711
712 if (cmd != SIOCGMIIPHY) {
713 if (mdio->phy_id >= PHY_MAX_ADDR)
714 return -ERANGE;
715
716 phy = priv->mdio->phy_map[mdio->phy_id];
717 if (!phy)
718 return -ENODEV;
719 } else {
720 phy = priv->phy;
721 }
722
723 return phy_mii_ioctl(phy, mdio, cmd);
724}
725
726static int ethoc_config(struct net_device *dev, struct ifmap *map)
727{
728 return -ENOSYS;
729}
730
731static int ethoc_set_mac_address(struct net_device *dev, void *addr)
732{
733 struct ethoc *priv = netdev_priv(dev);
734 u8 *mac = (u8 *)addr;
735
736 ethoc_write(priv, MAC_ADDR0, (mac[2] << 24) | (mac[3] << 16) |
737 (mac[4] << 8) | (mac[5] << 0));
738 ethoc_write(priv, MAC_ADDR1, (mac[0] << 8) | (mac[1] << 0));
739
740 return 0;
741}
742
743static void ethoc_set_multicast_list(struct net_device *dev)
744{
745 struct ethoc *priv = netdev_priv(dev);
746 u32 mode = ethoc_read(priv, MODER);
747 struct dev_mc_list *mc = NULL;
748 u32 hash[2] = { 0, 0 };
749
750 /* set loopback mode if requested */
751 if (dev->flags & IFF_LOOPBACK)
752 mode |= MODER_LOOP;
753 else
754 mode &= ~MODER_LOOP;
755
756 /* receive broadcast frames if requested */
757 if (dev->flags & IFF_BROADCAST)
758 mode &= ~MODER_BRO;
759 else
760 mode |= MODER_BRO;
761
762 /* enable promiscuous mode if requested */
763 if (dev->flags & IFF_PROMISC)
764 mode |= MODER_PRO;
765 else
766 mode &= ~MODER_PRO;
767
768 ethoc_write(priv, MODER, mode);
769
770 /* receive multicast frames */
771 if (dev->flags & IFF_ALLMULTI) {
772 hash[0] = 0xffffffff;
773 hash[1] = 0xffffffff;
774 } else {
775 for (mc = dev->mc_list; mc; mc = mc->next) {
776 u32 crc = ether_crc(mc->dmi_addrlen, mc->dmi_addr);
777 int bit = (crc >> 26) & 0x3f;
778 hash[bit >> 5] |= 1 << (bit & 0x1f);
779 }
780 }
781
782 ethoc_write(priv, ETH_HASH0, hash[0]);
783 ethoc_write(priv, ETH_HASH1, hash[1]);
784}
785
786static int ethoc_change_mtu(struct net_device *dev, int new_mtu)
787{
788 return -ENOSYS;
789}
790
791static void ethoc_tx_timeout(struct net_device *dev)
792{
793 struct ethoc *priv = netdev_priv(dev);
794 u32 pending = ethoc_read(priv, INT_SOURCE);
795 if (likely(pending))
796 ethoc_interrupt(dev->irq, dev);
797}
798
799static struct net_device_stats *ethoc_stats(struct net_device *dev)
800{
801 struct ethoc *priv = netdev_priv(dev);
802 return &priv->stats;
803}
804
805static int ethoc_start_xmit(struct sk_buff *skb, struct net_device *dev)
806{
807 struct ethoc *priv = netdev_priv(dev);
808 struct ethoc_bd bd;
809 unsigned int entry;
810 void *dest;
811
812 if (unlikely(skb->len > ETHOC_BUFSIZ)) {
813 priv->stats.tx_errors++;
814 return -EMSGSIZE;
815 }
816
817 entry = priv->cur_tx % priv->num_tx;
818 spin_lock_irq(&priv->lock);
819 priv->cur_tx++;
820
821 ethoc_read_bd(priv, entry, &bd);
822 if (unlikely(skb->len < ETHOC_ZLEN))
823 bd.stat |= TX_BD_PAD;
824 else
825 bd.stat &= ~TX_BD_PAD;
826
827 dest = priv->membase + bd.addr;
828 memcpy_toio(dest, skb->data, skb->len);
829
830 bd.stat &= ~(TX_BD_STATS | TX_BD_LEN_MASK);
831 bd.stat |= TX_BD_LEN(skb->len);
832 ethoc_write_bd(priv, entry, &bd);
833
834 bd.stat |= TX_BD_READY;
835 ethoc_write_bd(priv, entry, &bd);
836
837 if (priv->cur_tx == (priv->dty_tx + priv->num_tx)) {
838 dev_dbg(&dev->dev, "stopping queue\n");
839 netif_stop_queue(dev);
840 }
841
842 dev->trans_start = jiffies;
843 dev_kfree_skb(skb);
844
845 spin_unlock_irq(&priv->lock);
846 return NETDEV_TX_OK;
847}
848
849static const struct net_device_ops ethoc_netdev_ops = {
850 .ndo_open = ethoc_open,
851 .ndo_stop = ethoc_stop,
852 .ndo_do_ioctl = ethoc_ioctl,
853 .ndo_set_config = ethoc_config,
854 .ndo_set_mac_address = ethoc_set_mac_address,
855 .ndo_set_multicast_list = ethoc_set_multicast_list,
856 .ndo_change_mtu = ethoc_change_mtu,
857 .ndo_tx_timeout = ethoc_tx_timeout,
858 .ndo_get_stats = ethoc_stats,
859 .ndo_start_xmit = ethoc_start_xmit,
860};
861
862/**
863 * ethoc_probe() - initialize OpenCores ethernet MAC
864 * pdev: platform device
865 */
866static int ethoc_probe(struct platform_device *pdev)
867{
868 struct net_device *netdev = NULL;
869 struct resource *res = NULL;
870 struct resource *mmio = NULL;
871 struct resource *mem = NULL;
872 struct ethoc *priv = NULL;
873 unsigned int phy;
874 int ret = 0;
875
876 /* allocate networking device */
877 netdev = alloc_etherdev(sizeof(struct ethoc));
878 if (!netdev) {
879 dev_err(&pdev->dev, "cannot allocate network device\n");
880 ret = -ENOMEM;
881 goto out;
882 }
883
884 SET_NETDEV_DEV(netdev, &pdev->dev);
885 platform_set_drvdata(pdev, netdev);
886
887 /* obtain I/O memory space */
888 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
889 if (!res) {
890 dev_err(&pdev->dev, "cannot obtain I/O memory space\n");
891 ret = -ENXIO;
892 goto free;
893 }
894
895 mmio = devm_request_mem_region(&pdev->dev, res->start,
896 res->end - res->start + 1, res->name);
897 if (!res) {
898 dev_err(&pdev->dev, "cannot request I/O memory space\n");
899 ret = -ENXIO;
900 goto free;
901 }
902
903 netdev->base_addr = mmio->start;
904
905 /* obtain buffer memory space */
906 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
907 if (!res) {
908 dev_err(&pdev->dev, "cannot obtain memory space\n");
909 ret = -ENXIO;
910 goto free;
911 }
912
913 mem = devm_request_mem_region(&pdev->dev, res->start,
914 res->end - res->start + 1, res->name);
915 if (!mem) {
916 dev_err(&pdev->dev, "cannot request memory space\n");
917 ret = -ENXIO;
918 goto free;
919 }
920
921 netdev->mem_start = mem->start;
922 netdev->mem_end = mem->end;
923
924 /* obtain device IRQ number */
925 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
926 if (!res) {
927 dev_err(&pdev->dev, "cannot obtain IRQ\n");
928 ret = -ENXIO;
929 goto free;
930 }
931
932 netdev->irq = res->start;
933
934 /* setup driver-private data */
935 priv = netdev_priv(netdev);
936 priv->netdev = netdev;
937
938 priv->iobase = devm_ioremap_nocache(&pdev->dev, netdev->base_addr,
939 mmio->end - mmio->start + 1);
940 if (!priv->iobase) {
941 dev_err(&pdev->dev, "cannot remap I/O memory space\n");
942 ret = -ENXIO;
943 goto error;
944 }
945
946 priv->membase = devm_ioremap_nocache(&pdev->dev, netdev->mem_start,
947 mem->end - mem->start + 1);
948 if (!priv->membase) {
949 dev_err(&pdev->dev, "cannot remap memory space\n");
950 ret = -ENXIO;
951 goto error;
952 }
953
954 /* Allow the platform setup code to pass in a MAC address. */
955 if (pdev->dev.platform_data) {
956 struct ethoc_platform_data *pdata =
957 (struct ethoc_platform_data *)pdev->dev.platform_data;
958 memcpy(netdev->dev_addr, pdata->hwaddr, IFHWADDRLEN);
959 priv->phy_id = pdata->phy_id;
960 }
961
962 /* Check that the given MAC address is valid. If it isn't, read the
963 * current MAC from the controller. */
964 if (!is_valid_ether_addr(netdev->dev_addr))
965 ethoc_get_mac_address(netdev, netdev->dev_addr);
966
967 /* Check the MAC again for validity, if it still isn't choose and
968 * program a random one. */
969 if (!is_valid_ether_addr(netdev->dev_addr))
970 random_ether_addr(netdev->dev_addr);
971
972 ethoc_set_mac_address(netdev, netdev->dev_addr);
973
974 /* register MII bus */
975 priv->mdio = mdiobus_alloc();
976 if (!priv->mdio) {
977 ret = -ENOMEM;
978 goto free;
979 }
980
981 priv->mdio->name = "ethoc-mdio";
982 snprintf(priv->mdio->id, MII_BUS_ID_SIZE, "%s-%d",
983 priv->mdio->name, pdev->id);
984 priv->mdio->read = ethoc_mdio_read;
985 priv->mdio->write = ethoc_mdio_write;
986 priv->mdio->reset = ethoc_mdio_reset;
987 priv->mdio->priv = priv;
988
989 priv->mdio->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
990 if (!priv->mdio->irq) {
991 ret = -ENOMEM;
992 goto free_mdio;
993 }
994
995 for (phy = 0; phy < PHY_MAX_ADDR; phy++)
996 priv->mdio->irq[phy] = PHY_POLL;
997
998 ret = mdiobus_register(priv->mdio);
999 if (ret) {
1000 dev_err(&netdev->dev, "failed to register MDIO bus\n");
1001 goto free_mdio;
1002 }
1003
1004 ret = ethoc_mdio_probe(netdev);
1005 if (ret) {
1006 dev_err(&netdev->dev, "failed to probe MDIO bus\n");
1007 goto error;
1008 }
1009
1010 ether_setup(netdev);
1011
1012 /* setup the net_device structure */
1013 netdev->netdev_ops = &ethoc_netdev_ops;
1014 netdev->watchdog_timeo = ETHOC_TIMEOUT;
1015 netdev->features |= 0;
1016
1017 /* setup NAPI */
1018 memset(&priv->napi, 0, sizeof(priv->napi));
1019 netif_napi_add(netdev, &priv->napi, ethoc_poll, 64);
1020
1021 spin_lock_init(&priv->rx_lock);
1022 spin_lock_init(&priv->lock);
1023
1024 ret = register_netdev(netdev);
1025 if (ret < 0) {
1026 dev_err(&netdev->dev, "failed to register interface\n");
1027 goto error;
1028 }
1029
1030 goto out;
1031
1032error:
1033 mdiobus_unregister(priv->mdio);
1034free_mdio:
1035 kfree(priv->mdio->irq);
1036 mdiobus_free(priv->mdio);
1037free:
1038 free_netdev(netdev);
1039out:
1040 return ret;
1041}
1042
1043/**
1044 * ethoc_remove() - shutdown OpenCores ethernet MAC
1045 * @pdev: platform device
1046 */
1047static int ethoc_remove(struct platform_device *pdev)
1048{
1049 struct net_device *netdev = platform_get_drvdata(pdev);
1050 struct ethoc *priv = netdev_priv(netdev);
1051
1052 platform_set_drvdata(pdev, NULL);
1053
1054 if (netdev) {
1055 phy_disconnect(priv->phy);
1056 priv->phy = NULL;
1057
1058 if (priv->mdio) {
1059 mdiobus_unregister(priv->mdio);
1060 kfree(priv->mdio->irq);
1061 mdiobus_free(priv->mdio);
1062 }
1063
1064 unregister_netdev(netdev);
1065 free_netdev(netdev);
1066 }
1067
1068 return 0;
1069}
1070
1071#ifdef CONFIG_PM
1072static int ethoc_suspend(struct platform_device *pdev, pm_message_t state)
1073{
1074 return -ENOSYS;
1075}
1076
1077static int ethoc_resume(struct platform_device *pdev)
1078{
1079 return -ENOSYS;
1080}
1081#else
1082# define ethoc_suspend NULL
1083# define ethoc_resume NULL
1084#endif
1085
1086static struct platform_driver ethoc_driver = {
1087 .probe = ethoc_probe,
1088 .remove = ethoc_remove,
1089 .suspend = ethoc_suspend,
1090 .resume = ethoc_resume,
1091 .driver = {
1092 .name = "ethoc",
1093 },
1094};
1095
1096static int __init ethoc_init(void)
1097{
1098 return platform_driver_register(&ethoc_driver);
1099}
1100
1101static void __exit ethoc_exit(void)
1102{
1103 platform_driver_unregister(&ethoc_driver);
1104}
1105
1106module_init(ethoc_init);
1107module_exit(ethoc_exit);
1108
1109MODULE_AUTHOR("Thierry Reding <thierry.reding@avionic-design.de>");
1110MODULE_DESCRIPTION("OpenCores Ethernet MAC driver");
1111MODULE_LICENSE("GPL v2");
1112