aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorNobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>2008-06-09 19:33:56 -0400
committerJeff Garzik <jgarzik@redhat.com>2008-06-11 21:58:25 -0400
commit86a74ff21a7ac4bc06b18076ddb0347712b46cfd (patch)
treec15df52245e3d289e85f8a036f2250d11f615ba6 /drivers
parent1ae9d2f4d776bd7e5f64d957216051cd36eb6802 (diff)
net: sh_eth: add support for Renesas SuperH Ethernet
Add support for Renesas SuperH Ethernet controller. This driver supports SH7710 and SH7712. [akpm@linux-foundation.org: coding-style fixes] Signed-off-by: Yoshihiro Shimoda <shimoda.yoshihiro@renesas.com> Signed-off-by: Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com> Cc: Paul Mundt <lethal@linux-sh.org> Cc: Jeff Garzik <jeff@garzik.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Jeff Garzik <jgarzik@redhat.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/net/Kconfig12
-rw-r--r--drivers/net/Makefile1
-rw-r--r--drivers/net/sh_eth.c1174
-rw-r--r--drivers/net/sh_eth.h464
4 files changed, 1651 insertions, 0 deletions
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 8c5e8405fe09..40eb24d6d755 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -524,6 +524,18 @@ config STNIC
524 524
525 If unsure, say N. 525 If unsure, say N.
526 526
527config SH_ETH
528 tristate "Renesas SuperH Ethernet support"
529 depends on SUPERH && \
530 (CPU_SUBTYPE_SH7710 || CPU_SUBTYPE_SH7712)
531 select CRC32
532 select MII
533 select MDIO_BITBANG
534 select PHYLIB
535 help
536 Renesas SuperH Ethernet device driver.
537 This driver support SH7710 and SH7712.
538
527config SUNLANCE 539config SUNLANCE
528 tristate "Sun LANCE support" 540 tristate "Sun LANCE support"
529 depends on SBUS 541 depends on SBUS
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index dcbfe8421154..c52738a3aaab 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -80,6 +80,7 @@ obj-$(CONFIG_VIA_RHINE) += via-rhine.o
80obj-$(CONFIG_VIA_VELOCITY) += via-velocity.o 80obj-$(CONFIG_VIA_VELOCITY) += via-velocity.o
81obj-$(CONFIG_ADAPTEC_STARFIRE) += starfire.o 81obj-$(CONFIG_ADAPTEC_STARFIRE) += starfire.o
82obj-$(CONFIG_RIONET) += rionet.o 82obj-$(CONFIG_RIONET) += rionet.o
83obj-$(CONFIG_SH_ETH) += sh_eth.o
83 84
84# 85#
85# end link order section 86# end link order section
diff --git a/drivers/net/sh_eth.c b/drivers/net/sh_eth.c
new file mode 100644
index 000000000000..f64d987140a9
--- /dev/null
+++ b/drivers/net/sh_eth.c
@@ -0,0 +1,1174 @@
1/*
2 * SuperH Ethernet device driver
3 *
4 * Copyright (C) 2006,2007 Nobuhiro Iwamatsu
5 * Copyright (C) 2008 Renesas Solutions Corp.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * The full GNU General Public License is included in this distribution in
20 * the file called "COPYING".
21 */
22
23#include <linux/version.h>
24#include <linux/init.h>
25#include <linux/dma-mapping.h>
26#include <linux/etherdevice.h>
27#include <linux/delay.h>
28#include <linux/platform_device.h>
29#include <linux/mdio-bitbang.h>
30#include <linux/netdevice.h>
31#include <linux/phy.h>
32#include <linux/cache.h>
33#include <linux/io.h>
34
35#include "sh_eth.h"
36
37/*
38 * Program the hardware MAC address from dev->dev_addr.
39 */
40static void update_mac_address(struct net_device *ndev)
41{
42 u32 ioaddr = ndev->base_addr;
43
44 ctrl_outl((ndev->dev_addr[0] << 24) | (ndev->dev_addr[1] << 16) |
45 (ndev->dev_addr[2] << 8) | (ndev->dev_addr[3]),
46 ioaddr + MAHR);
47 ctrl_outl((ndev->dev_addr[4] << 8) | (ndev->dev_addr[5]),
48 ioaddr + MALR);
49}
50
51/*
52 * Get MAC address from SuperH MAC address register
53 *
54 * SuperH's Ethernet device doesn't have 'ROM' to MAC address.
55 * This driver get MAC address that use by bootloader(U-boot or sh-ipl+g).
56 * When you want use this device, you must set MAC address in bootloader.
57 *
58 */
59static void read_mac_address(struct net_device *ndev)
60{
61 u32 ioaddr = ndev->base_addr;
62
63 ndev->dev_addr[0] = (ctrl_inl(ioaddr + MAHR) >> 24);
64 ndev->dev_addr[1] = (ctrl_inl(ioaddr + MAHR) >> 16) & 0xFF;
65 ndev->dev_addr[2] = (ctrl_inl(ioaddr + MAHR) >> 8) & 0xFF;
66 ndev->dev_addr[3] = (ctrl_inl(ioaddr + MAHR) & 0xFF);
67 ndev->dev_addr[4] = (ctrl_inl(ioaddr + MALR) >> 8) & 0xFF;
68 ndev->dev_addr[5] = (ctrl_inl(ioaddr + MALR) & 0xFF);
69}
70
71struct bb_info {
72 struct mdiobb_ctrl ctrl;
73 u32 addr;
74 u32 mmd_msk;/* MMD */
75 u32 mdo_msk;
76 u32 mdi_msk;
77 u32 mdc_msk;
78};
79
80/* PHY bit set */
81static void bb_set(u32 addr, u32 msk)
82{
83 ctrl_outl(ctrl_inl(addr) | msk, addr);
84}
85
86/* PHY bit clear */
87static void bb_clr(u32 addr, u32 msk)
88{
89 ctrl_outl((ctrl_inl(addr) & ~msk), addr);
90}
91
92/* PHY bit read */
93static int bb_read(u32 addr, u32 msk)
94{
95 return (ctrl_inl(addr) & msk) != 0;
96}
97
98/* Data I/O pin control */
99static void sh_mmd_ctrl(struct mdiobb_ctrl *ctrl, int bit)
100{
101 struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
102 if (bit)
103 bb_set(bitbang->addr, bitbang->mmd_msk);
104 else
105 bb_clr(bitbang->addr, bitbang->mmd_msk);
106}
107
108/* Set bit data*/
109static void sh_set_mdio(struct mdiobb_ctrl *ctrl, int bit)
110{
111 struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
112
113 if (bit)
114 bb_set(bitbang->addr, bitbang->mdo_msk);
115 else
116 bb_clr(bitbang->addr, bitbang->mdo_msk);
117}
118
119/* Get bit data*/
120static int sh_get_mdio(struct mdiobb_ctrl *ctrl)
121{
122 struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
123 return bb_read(bitbang->addr, bitbang->mdi_msk);
124}
125
126/* MDC pin control */
127static void sh_mdc_ctrl(struct mdiobb_ctrl *ctrl, int bit)
128{
129 struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
130
131 if (bit)
132 bb_set(bitbang->addr, bitbang->mdc_msk);
133 else
134 bb_clr(bitbang->addr, bitbang->mdc_msk);
135}
136
137/* mdio bus control struct */
138static struct mdiobb_ops bb_ops = {
139 .owner = THIS_MODULE,
140 .set_mdc = sh_mdc_ctrl,
141 .set_mdio_dir = sh_mmd_ctrl,
142 .set_mdio_data = sh_set_mdio,
143 .get_mdio_data = sh_get_mdio,
144};
145
146static void sh_eth_reset(struct net_device *ndev)
147{
148 u32 ioaddr = ndev->base_addr;
149
150 ctrl_outl(ctrl_inl(ioaddr + EDMR) | EDMR_SRST, ioaddr + EDMR);
151 mdelay(3);
152 ctrl_outl(ctrl_inl(ioaddr + EDMR) & ~EDMR_SRST, ioaddr + EDMR);
153}
154
155/* free skb and descriptor buffer */
156static void sh_eth_ring_free(struct net_device *ndev)
157{
158 struct sh_eth_private *mdp = netdev_priv(ndev);
159 int i;
160
161 /* Free Rx skb ringbuffer */
162 if (mdp->rx_skbuff) {
163 for (i = 0; i < RX_RING_SIZE; i++) {
164 if (mdp->rx_skbuff[i])
165 dev_kfree_skb(mdp->rx_skbuff[i]);
166 }
167 }
168 kfree(mdp->rx_skbuff);
169
170 /* Free Tx skb ringbuffer */
171 if (mdp->tx_skbuff) {
172 for (i = 0; i < TX_RING_SIZE; i++) {
173 if (mdp->tx_skbuff[i])
174 dev_kfree_skb(mdp->tx_skbuff[i]);
175 }
176 }
177 kfree(mdp->tx_skbuff);
178}
179
180/* format skb and descriptor buffer */
181static void sh_eth_ring_format(struct net_device *ndev)
182{
183 struct sh_eth_private *mdp = netdev_priv(ndev);
184 int i;
185 struct sk_buff *skb;
186 struct sh_eth_rxdesc *rxdesc = NULL;
187 struct sh_eth_txdesc *txdesc = NULL;
188 int rx_ringsize = sizeof(*rxdesc) * RX_RING_SIZE;
189 int tx_ringsize = sizeof(*txdesc) * TX_RING_SIZE;
190
191 mdp->cur_rx = mdp->cur_tx = 0;
192 mdp->dirty_rx = mdp->dirty_tx = 0;
193
194 memset(mdp->rx_ring, 0, rx_ringsize);
195
196 /* build Rx ring buffer */
197 for (i = 0; i < RX_RING_SIZE; i++) {
198 /* skb */
199 mdp->rx_skbuff[i] = NULL;
200 skb = dev_alloc_skb(mdp->rx_buf_sz);
201 mdp->rx_skbuff[i] = skb;
202 if (skb == NULL)
203 break;
204 skb->dev = ndev; /* Mark as being used by this device. */
205 skb_reserve(skb, RX_OFFSET);
206
207 /* RX descriptor */
208 rxdesc = &mdp->rx_ring[i];
209 rxdesc->addr = (u32)skb->data & ~0x3UL;
210 rxdesc->status = cpu_to_le32(RD_RACT | RD_RFP);
211
212 /* The size of the buffer is 16 byte boundary. */
213 rxdesc->buffer_length = (mdp->rx_buf_sz + 16) & ~0x0F;
214 }
215
216 mdp->dirty_rx = (u32) (i - RX_RING_SIZE);
217
218 /* Mark the last entry as wrapping the ring. */
219 rxdesc->status |= cpu_to_le32(RC_RDEL);
220
221 memset(mdp->tx_ring, 0, tx_ringsize);
222
223 /* build Tx ring buffer */
224 for (i = 0; i < TX_RING_SIZE; i++) {
225 mdp->tx_skbuff[i] = NULL;
226 txdesc = &mdp->tx_ring[i];
227 txdesc->status = cpu_to_le32(TD_TFP);
228 txdesc->buffer_length = 0;
229 }
230
231 txdesc->status |= cpu_to_le32(TD_TDLE);
232}
233
234/* Get skb and descriptor buffer */
235static int sh_eth_ring_init(struct net_device *ndev)
236{
237 struct sh_eth_private *mdp = netdev_priv(ndev);
238 int rx_ringsize, tx_ringsize, ret = 0;
239
240 /*
241 * +26 gets the maximum ethernet encapsulation, +7 & ~7 because the
242 * card needs room to do 8 byte alignment, +2 so we can reserve
243 * the first 2 bytes, and +16 gets room for the status word from the
244 * card.
245 */
246 mdp->rx_buf_sz = (ndev->mtu <= 1492 ? PKT_BUF_SZ :
247 (((ndev->mtu + 26 + 7) & ~7) + 2 + 16));
248
249 /* Allocate RX and TX skb rings */
250 mdp->rx_skbuff = kmalloc(sizeof(*mdp->rx_skbuff) * RX_RING_SIZE,
251 GFP_KERNEL);
252 if (!mdp->rx_skbuff) {
253 printk(KERN_ERR "%s: Cannot allocate Rx skb\n", ndev->name);
254 ret = -ENOMEM;
255 return ret;
256 }
257
258 mdp->tx_skbuff = kmalloc(sizeof(*mdp->tx_skbuff) * TX_RING_SIZE,
259 GFP_KERNEL);
260 if (!mdp->tx_skbuff) {
261 printk(KERN_ERR "%s: Cannot allocate Tx skb\n", ndev->name);
262 ret = -ENOMEM;
263 goto skb_ring_free;
264 }
265
266 /* Allocate all Rx descriptors. */
267 rx_ringsize = sizeof(struct sh_eth_rxdesc) * RX_RING_SIZE;
268 mdp->rx_ring = dma_alloc_coherent(NULL, rx_ringsize, &mdp->rx_desc_dma,
269 GFP_KERNEL);
270
271 if (!mdp->rx_ring) {
272 printk(KERN_ERR "%s: Cannot allocate Rx Ring (size %d bytes)\n",
273 ndev->name, rx_ringsize);
274 ret = -ENOMEM;
275 goto desc_ring_free;
276 }
277
278 mdp->dirty_rx = 0;
279
280 /* Allocate all Tx descriptors. */
281 tx_ringsize = sizeof(struct sh_eth_txdesc) * TX_RING_SIZE;
282 mdp->tx_ring = dma_alloc_coherent(NULL, tx_ringsize, &mdp->tx_desc_dma,
283 GFP_KERNEL);
284 if (!mdp->tx_ring) {
285 printk(KERN_ERR "%s: Cannot allocate Tx Ring (size %d bytes)\n",
286 ndev->name, tx_ringsize);
287 ret = -ENOMEM;
288 goto desc_ring_free;
289 }
290 return ret;
291
292desc_ring_free:
293 /* free DMA buffer */
294 dma_free_coherent(NULL, rx_ringsize, mdp->rx_ring, mdp->rx_desc_dma);
295
296skb_ring_free:
297 /* Free Rx and Tx skb ring buffer */
298 sh_eth_ring_free(ndev);
299
300 return ret;
301}
302
303static int sh_eth_dev_init(struct net_device *ndev)
304{
305 int ret = 0;
306 struct sh_eth_private *mdp = netdev_priv(ndev);
307 u32 ioaddr = ndev->base_addr;
308 u_int32_t rx_int_var, tx_int_var;
309 u32 val;
310
311 /* Soft Reset */
312 sh_eth_reset(ndev);
313
314 ctrl_outl(RPADIR_PADS1, ioaddr + RPADIR); /* SH7712-DMA-RX-PAD2 */
315
316 /* all sh_eth int mask */
317 ctrl_outl(0, ioaddr + EESIPR);
318
319 /* FIFO size set */
320 ctrl_outl(0, ioaddr + EDMR); /* Endian change */
321
322 ctrl_outl((FIFO_SIZE_T | FIFO_SIZE_R), ioaddr + FDR);
323 ctrl_outl(0, ioaddr + TFTR);
324
325 ctrl_outl(RMCR_RST, ioaddr + RMCR);
326
327 rx_int_var = mdp->rx_int_var = DESC_I_RINT8 | DESC_I_RINT5;
328 tx_int_var = mdp->tx_int_var = DESC_I_TINT2;
329 ctrl_outl(rx_int_var | tx_int_var, ioaddr + TRSCER);
330
331 ctrl_outl((FIFO_F_D_RFF | FIFO_F_D_RFD), ioaddr + FCFTR);
332 ctrl_outl(0, ioaddr + TRIMD);
333
334 /* Descriptor format */
335 sh_eth_ring_format(ndev);
336
337 ctrl_outl((u32)mdp->rx_ring, ioaddr + RDLAR);
338 ctrl_outl((u32)mdp->tx_ring, ioaddr + TDLAR);
339
340 ctrl_outl(ctrl_inl(ioaddr + EESR), ioaddr + EESR);
341 ctrl_outl((DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff), ioaddr + EESIPR);
342
343 /* PAUSE Prohibition */
344 val = (ctrl_inl(ioaddr + ECMR) & ECMR_DM) |
345 ECMR_ZPF | (mdp->duplex ? ECMR_DM : 0) | ECMR_TE | ECMR_RE;
346
347 ctrl_outl(val, ioaddr + ECMR);
348 ctrl_outl(ECSR_BRCRX | ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD |
349 ECSIPR_MPDIP, ioaddr + ECSR);
350 ctrl_outl(ECSIPR_BRCRXIP | ECSIPR_PSRTOIP | ECSIPR_LCHNGIP |
351 ECSIPR_ICDIP | ECSIPR_MPDIP, ioaddr + ECSIPR);
352
353 /* Set MAC address */
354 update_mac_address(ndev);
355
356 /* mask reset */
357#if defined(CONFIG_CPU_SUBTYPE_SH7710)
358 ctrl_outl(APR_AP, ioaddr + APR);
359 ctrl_outl(MPR_MP, ioaddr + MPR);
360 ctrl_outl(TPAUSER_UNLIMITED, ioaddr + TPAUSER);
361 ctrl_outl(BCFR_UNLIMITED, ioaddr + BCFR);
362#endif
363 /* Setting the Rx mode will start the Rx process. */
364 ctrl_outl(EDRRR_R, ioaddr + EDRRR);
365
366 netif_start_queue(ndev);
367
368 return ret;
369}
370
371/* free Tx skb function */
372static int sh_eth_txfree(struct net_device *ndev)
373{
374 struct sh_eth_private *mdp = netdev_priv(ndev);
375 struct sh_eth_txdesc *txdesc;
376 int freeNum = 0;
377 int entry = 0;
378
379 for (; mdp->cur_tx - mdp->dirty_tx > 0; mdp->dirty_tx++) {
380 entry = mdp->dirty_tx % TX_RING_SIZE;
381 txdesc = &mdp->tx_ring[entry];
382 if (txdesc->status & cpu_to_le32(TD_TACT))
383 break;
384 /* Free the original skb. */
385 if (mdp->tx_skbuff[entry]) {
386 dev_kfree_skb_irq(mdp->tx_skbuff[entry]);
387 mdp->tx_skbuff[entry] = NULL;
388 freeNum++;
389 }
390 txdesc->status = cpu_to_le32(TD_TFP);
391 if (entry >= TX_RING_SIZE - 1)
392 txdesc->status |= cpu_to_le32(TD_TDLE);
393
394 mdp->stats.tx_packets++;
395 mdp->stats.tx_bytes += txdesc->buffer_length;
396 }
397 return freeNum;
398}
399
400/* Packet receive function */
401static int sh_eth_rx(struct net_device *ndev)
402{
403 struct sh_eth_private *mdp = netdev_priv(ndev);
404 struct sh_eth_rxdesc *rxdesc;
405
406 int entry = mdp->cur_rx % RX_RING_SIZE;
407 int boguscnt = (mdp->dirty_rx + RX_RING_SIZE) - mdp->cur_rx;
408 struct sk_buff *skb;
409 u16 pkt_len = 0;
410 u32 desc_status;
411
412 rxdesc = &mdp->rx_ring[entry];
413 while (!(rxdesc->status & cpu_to_le32(RD_RACT))) {
414 desc_status = le32_to_cpu(rxdesc->status);
415 pkt_len = rxdesc->frame_length;
416
417 if (--boguscnt < 0)
418 break;
419
420 if (!(desc_status & RDFEND))
421 mdp->stats.rx_length_errors++;
422
423 if (desc_status & (RD_RFS1 | RD_RFS2 | RD_RFS3 | RD_RFS4 |
424 RD_RFS5 | RD_RFS6 | RD_RFS10)) {
425 mdp->stats.rx_errors++;
426 if (desc_status & RD_RFS1)
427 mdp->stats.rx_crc_errors++;
428 if (desc_status & RD_RFS2)
429 mdp->stats.rx_frame_errors++;
430 if (desc_status & RD_RFS3)
431 mdp->stats.rx_length_errors++;
432 if (desc_status & RD_RFS4)
433 mdp->stats.rx_length_errors++;
434 if (desc_status & RD_RFS6)
435 mdp->stats.rx_missed_errors++;
436 if (desc_status & RD_RFS10)
437 mdp->stats.rx_over_errors++;
438 } else {
439 swaps((char *)(rxdesc->addr & ~0x3), pkt_len + 2);
440 skb = mdp->rx_skbuff[entry];
441 mdp->rx_skbuff[entry] = NULL;
442 skb_put(skb, pkt_len);
443 skb->protocol = eth_type_trans(skb, ndev);
444 netif_rx(skb);
445 ndev->last_rx = jiffies;
446 mdp->stats.rx_packets++;
447 mdp->stats.rx_bytes += pkt_len;
448 }
449 rxdesc->status |= cpu_to_le32(RD_RACT);
450 entry = (++mdp->cur_rx) % RX_RING_SIZE;
451 }
452
453 /* Refill the Rx ring buffers. */
454 for (; mdp->cur_rx - mdp->dirty_rx > 0; mdp->dirty_rx++) {
455 entry = mdp->dirty_rx % RX_RING_SIZE;
456 rxdesc = &mdp->rx_ring[entry];
457 if (mdp->rx_skbuff[entry] == NULL) {
458 skb = dev_alloc_skb(mdp->rx_buf_sz);
459 mdp->rx_skbuff[entry] = skb;
460 if (skb == NULL)
461 break; /* Better luck next round. */
462 skb->dev = ndev;
463 skb_reserve(skb, RX_OFFSET);
464 rxdesc->addr = (u32)skb->data & ~0x3UL;
465 }
466 /* The size of the buffer is 16 byte boundary. */
467 rxdesc->buffer_length = (mdp->rx_buf_sz + 16) & ~0x0F;
468 if (entry >= RX_RING_SIZE - 1)
469 rxdesc->status |=
470 cpu_to_le32(RD_RACT | RD_RFP | RC_RDEL);
471 else
472 rxdesc->status |=
473 cpu_to_le32(RD_RACT | RD_RFP);
474 }
475
476 /* Restart Rx engine if stopped. */
477 /* If we don't need to check status, don't. -KDU */
478 ctrl_outl(EDRRR_R, ndev->base_addr + EDRRR);
479
480 return 0;
481}
482
483/* error control function */
484static void sh_eth_error(struct net_device *ndev, int intr_status)
485{
486 struct sh_eth_private *mdp = netdev_priv(ndev);
487 u32 ioaddr = ndev->base_addr;
488 u32 felic_stat;
489
490 if (intr_status & EESR_ECI) {
491 felic_stat = ctrl_inl(ioaddr + ECSR);
492 ctrl_outl(felic_stat, ioaddr + ECSR); /* clear int */
493 if (felic_stat & ECSR_ICD)
494 mdp->stats.tx_carrier_errors++;
495 if (felic_stat & ECSR_LCHNG) {
496 /* Link Changed */
497 u32 link_stat = (ctrl_inl(ioaddr + PSR));
498 if (!(link_stat & PHY_ST_LINK)) {
499 /* Link Down : disable tx and rx */
500 ctrl_outl(ctrl_inl(ioaddr + ECMR) &
501 ~(ECMR_RE | ECMR_TE), ioaddr + ECMR);
502 } else {
503 /* Link Up */
504 ctrl_outl(ctrl_inl(ioaddr + EESIPR) &
505 ~DMAC_M_ECI, ioaddr + EESIPR);
506 /*clear int */
507 ctrl_outl(ctrl_inl(ioaddr + ECSR),
508 ioaddr + ECSR);
509 ctrl_outl(ctrl_inl(ioaddr + EESIPR) |
510 DMAC_M_ECI, ioaddr + EESIPR);
511 /* enable tx and rx */
512 ctrl_outl(ctrl_inl(ioaddr + ECMR) |
513 (ECMR_RE | ECMR_TE), ioaddr + ECMR);
514 }
515 }
516 }
517
518 if (intr_status & EESR_TWB) {
519 /* Write buck end. unused write back interrupt */
520 if (intr_status & EESR_TABT) /* Transmit Abort int */
521 mdp->stats.tx_aborted_errors++;
522 }
523
524 if (intr_status & EESR_RABT) {
525 /* Receive Abort int */
526 if (intr_status & EESR_RFRMER) {
527 /* Receive Frame Overflow int */
528 mdp->stats.rx_frame_errors++;
529 printk(KERN_ERR "Receive Frame Overflow\n");
530 }
531 }
532
533 if (intr_status & EESR_ADE) {
534 if (intr_status & EESR_TDE) {
535 if (intr_status & EESR_TFE)
536 mdp->stats.tx_fifo_errors++;
537 }
538 }
539
540 if (intr_status & EESR_RDE) {
541 /* Receive Descriptor Empty int */
542 mdp->stats.rx_over_errors++;
543
544 if (ctrl_inl(ioaddr + EDRRR) ^ EDRRR_R)
545 ctrl_outl(EDRRR_R, ioaddr + EDRRR);
546 printk(KERN_ERR "Receive Descriptor Empty\n");
547 }
548 if (intr_status & EESR_RFE) {
549 /* Receive FIFO Overflow int */
550 mdp->stats.rx_fifo_errors++;
551 printk(KERN_ERR "Receive FIFO Overflow\n");
552 }
553 if (intr_status &
554 (EESR_TWB | EESR_TABT | EESR_ADE | EESR_TDE | EESR_TFE)) {
555 /* Tx error */
556 u32 edtrr = ctrl_inl(ndev->base_addr + EDTRR);
557 /* dmesg */
558 printk(KERN_ERR "%s:TX error. status=%8.8x cur_tx=%8.8x ",
559 ndev->name, intr_status, mdp->cur_tx);
560 printk(KERN_ERR "dirty_tx=%8.8x state=%8.8x EDTRR=%8.8x.\n",
561 mdp->dirty_tx, (u32) ndev->state, edtrr);
562 /* dirty buffer free */
563 sh_eth_txfree(ndev);
564
565 /* SH7712 BUG */
566 if (edtrr ^ EDTRR_TRNS) {
567 /* tx dma start */
568 ctrl_outl(EDTRR_TRNS, ndev->base_addr + EDTRR);
569 }
570 /* wakeup */
571 netif_wake_queue(ndev);
572 }
573}
574
575static irqreturn_t sh_eth_interrupt(int irq, void *netdev)
576{
577 struct net_device *ndev = netdev;
578 struct sh_eth_private *mdp = netdev_priv(ndev);
579 u32 ioaddr, boguscnt = RX_RING_SIZE;
580 u32 intr_status = 0;
581
582 ioaddr = ndev->base_addr;
583 spin_lock(&mdp->lock);
584
585 intr_status = ctrl_inl(ioaddr + EESR);
586 /* Clear interrupt */
587 ctrl_outl(intr_status, ioaddr + EESR);
588
589 if (intr_status & (EESR_FRC | EESR_RINT8 |
590 EESR_RINT5 | EESR_RINT4 | EESR_RINT3 | EESR_RINT2 |
591 EESR_RINT1))
592 sh_eth_rx(ndev);
593 if (intr_status & (EESR_FTC |
594 EESR_TINT4 | EESR_TINT3 | EESR_TINT2 | EESR_TINT1)) {
595
596 sh_eth_txfree(ndev);
597 netif_wake_queue(ndev);
598 }
599
600 if (intr_status & EESR_ERR_CHECK)
601 sh_eth_error(ndev, intr_status);
602
603 if (--boguscnt < 0) {
604 printk(KERN_WARNING
605 "%s: Too much work at interrupt, status=0x%4.4x.\n",
606 ndev->name, intr_status);
607 }
608
609 spin_unlock(&mdp->lock);
610
611 return IRQ_HANDLED;
612}
613
614static void sh_eth_timer(unsigned long data)
615{
616 struct net_device *ndev = (struct net_device *)data;
617 struct sh_eth_private *mdp = netdev_priv(ndev);
618
619 mod_timer(&mdp->timer, jiffies + (10 * HZ));
620}
621
622/* PHY state control function */
623static void sh_eth_adjust_link(struct net_device *ndev)
624{
625 struct sh_eth_private *mdp = netdev_priv(ndev);
626 struct phy_device *phydev = mdp->phydev;
627 u32 ioaddr = ndev->base_addr;
628 int new_state = 0;
629
630 if (phydev->link != PHY_DOWN) {
631 if (phydev->duplex != mdp->duplex) {
632 new_state = 1;
633 mdp->duplex = phydev->duplex;
634 }
635
636 if (phydev->speed != mdp->speed) {
637 new_state = 1;
638 mdp->speed = phydev->speed;
639 }
640 if (mdp->link == PHY_DOWN) {
641 ctrl_outl((ctrl_inl(ioaddr + ECMR) & ~ECMR_TXF)
642 | ECMR_DM, ioaddr + ECMR);
643 new_state = 1;
644 mdp->link = phydev->link;
645 netif_schedule(ndev);
646 netif_carrier_on(ndev);
647 netif_start_queue(ndev);
648 }
649 } else if (mdp->link) {
650 new_state = 1;
651 mdp->link = PHY_DOWN;
652 mdp->speed = 0;
653 mdp->duplex = -1;
654 netif_stop_queue(ndev);
655 netif_carrier_off(ndev);
656 }
657
658 if (new_state)
659 phy_print_status(phydev);
660}
661
662/* PHY init function */
663static int sh_eth_phy_init(struct net_device *ndev)
664{
665 struct sh_eth_private *mdp = netdev_priv(ndev);
666 char phy_id[BUS_ID_SIZE];
667 struct phy_device *phydev = NULL;
668
669 snprintf(phy_id, BUS_ID_SIZE, PHY_ID_FMT,
670 mdp->mii_bus->id , mdp->phy_id);
671
672 mdp->link = PHY_DOWN;
673 mdp->speed = 0;
674 mdp->duplex = -1;
675
676 /* Try connect to PHY */
677 phydev = phy_connect(ndev, phy_id, &sh_eth_adjust_link,
678 0, PHY_INTERFACE_MODE_MII);
679 if (IS_ERR(phydev)) {
680 dev_err(&ndev->dev, "phy_connect failed\n");
681 return PTR_ERR(phydev);
682 }
683 dev_info(&ndev->dev, "attached phy %i to driver %s\n",
684 phydev->addr, phydev->drv->name);
685
686 mdp->phydev = phydev;
687
688 return 0;
689}
690
691/* PHY control start function */
692static int sh_eth_phy_start(struct net_device *ndev)
693{
694 struct sh_eth_private *mdp = netdev_priv(ndev);
695 int ret;
696
697 ret = sh_eth_phy_init(ndev);
698 if (ret)
699 return ret;
700
701 /* reset phy - this also wakes it from PDOWN */
702 phy_write(mdp->phydev, MII_BMCR, BMCR_RESET);
703 phy_start(mdp->phydev);
704
705 return 0;
706}
707
708/* network device open function */
709static int sh_eth_open(struct net_device *ndev)
710{
711 int ret = 0;
712 struct sh_eth_private *mdp = netdev_priv(ndev);
713
714 ret = request_irq(ndev->irq, &sh_eth_interrupt, 0, ndev->name, ndev);
715 if (ret) {
716 printk(KERN_ERR "Can not assign IRQ number to %s\n", CARDNAME);
717 return ret;
718 }
719
720 /* Descriptor set */
721 ret = sh_eth_ring_init(ndev);
722 if (ret)
723 goto out_free_irq;
724
725 /* device init */
726 ret = sh_eth_dev_init(ndev);
727 if (ret)
728 goto out_free_irq;
729
730 /* PHY control start*/
731 ret = sh_eth_phy_start(ndev);
732 if (ret)
733 goto out_free_irq;
734
735 /* Set the timer to check for link beat. */
736 init_timer(&mdp->timer);
737 mdp->timer.expires = (jiffies + (24 * HZ)) / 10;/* 2.4 sec. */
738 setup_timer(&mdp->timer, sh_eth_timer, ndev);
739
740 return ret;
741
742out_free_irq:
743 free_irq(ndev->irq, ndev);
744 return ret;
745}
746
747/* Timeout function */
748static void sh_eth_tx_timeout(struct net_device *ndev)
749{
750 struct sh_eth_private *mdp = netdev_priv(ndev);
751 u32 ioaddr = ndev->base_addr;
752 struct sh_eth_rxdesc *rxdesc;
753 int i;
754
755 netif_stop_queue(ndev);
756
757 /* worning message out. */
758 printk(KERN_WARNING "%s: transmit timed out, status %8.8x,"
759 " resetting...\n", ndev->name, (int)ctrl_inl(ioaddr + EESR));
760
761 /* tx_errors count up */
762 mdp->stats.tx_errors++;
763
764 /* timer off */
765 del_timer_sync(&mdp->timer);
766
767 /* Free all the skbuffs in the Rx queue. */
768 for (i = 0; i < RX_RING_SIZE; i++) {
769 rxdesc = &mdp->rx_ring[i];
770 rxdesc->status = 0;
771 rxdesc->addr = 0xBADF00D0;
772 if (mdp->rx_skbuff[i])
773 dev_kfree_skb(mdp->rx_skbuff[i]);
774 mdp->rx_skbuff[i] = NULL;
775 }
776 for (i = 0; i < TX_RING_SIZE; i++) {
777 if (mdp->tx_skbuff[i])
778 dev_kfree_skb(mdp->tx_skbuff[i]);
779 mdp->tx_skbuff[i] = NULL;
780 }
781
782 /* device init */
783 sh_eth_dev_init(ndev);
784
785 /* timer on */
786 mdp->timer.expires = (jiffies + (24 * HZ)) / 10;/* 2.4 sec. */
787 add_timer(&mdp->timer);
788}
789
790/* Packet transmit function */
791static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
792{
793 struct sh_eth_private *mdp = netdev_priv(ndev);
794 struct sh_eth_txdesc *txdesc;
795 u32 entry;
796 int flags;
797
798 spin_lock_irqsave(&mdp->lock, flags);
799 if ((mdp->cur_tx - mdp->dirty_tx) >= (TX_RING_SIZE - 4)) {
800 if (!sh_eth_txfree(ndev)) {
801 netif_stop_queue(ndev);
802 spin_unlock_irqrestore(&mdp->lock, flags);
803 return 1;
804 }
805 }
806 spin_unlock_irqrestore(&mdp->lock, flags);
807
808 entry = mdp->cur_tx % TX_RING_SIZE;
809 mdp->tx_skbuff[entry] = skb;
810 txdesc = &mdp->tx_ring[entry];
811 txdesc->addr = (u32)(skb->data);
812 /* soft swap. */
813 swaps((char *)(txdesc->addr & ~0x3), skb->len + 2);
814 /* write back */
815 __flush_purge_region(skb->data, skb->len);
816 if (skb->len < ETHERSMALL)
817 txdesc->buffer_length = ETHERSMALL;
818 else
819 txdesc->buffer_length = skb->len;
820
821 if (entry >= TX_RING_SIZE - 1)
822 txdesc->status |= cpu_to_le32(TD_TACT | TD_TDLE);
823 else
824 txdesc->status |= cpu_to_le32(TD_TACT);
825
826 mdp->cur_tx++;
827
828 ctrl_outl(EDTRR_TRNS, ndev->base_addr + EDTRR);
829 ndev->trans_start = jiffies;
830
831 return 0;
832}
833
834/* device close function */
835static int sh_eth_close(struct net_device *ndev)
836{
837 struct sh_eth_private *mdp = netdev_priv(ndev);
838 u32 ioaddr = ndev->base_addr;
839 int ringsize;
840
841 netif_stop_queue(ndev);
842
843 /* Disable interrupts by clearing the interrupt mask. */
844 ctrl_outl(0x0000, ioaddr + EESIPR);
845
846 /* Stop the chip's Tx and Rx processes. */
847 ctrl_outl(0, ioaddr + EDTRR);
848 ctrl_outl(0, ioaddr + EDRRR);
849
850 /* PHY Disconnect */
851 if (mdp->phydev) {
852 phy_stop(mdp->phydev);
853 phy_disconnect(mdp->phydev);
854 }
855
856 free_irq(ndev->irq, ndev);
857
858 del_timer_sync(&mdp->timer);
859
860 /* Free all the skbuffs in the Rx queue. */
861 sh_eth_ring_free(ndev);
862
863 /* free DMA buffer */
864 ringsize = sizeof(struct sh_eth_rxdesc) * RX_RING_SIZE;
865 dma_free_coherent(NULL, ringsize, mdp->rx_ring, mdp->rx_desc_dma);
866
867 /* free DMA buffer */
868 ringsize = sizeof(struct sh_eth_txdesc) * TX_RING_SIZE;
869 dma_free_coherent(NULL, ringsize, mdp->tx_ring, mdp->tx_desc_dma);
870
871 return 0;
872}
873
874static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev)
875{
876 struct sh_eth_private *mdp = netdev_priv(ndev);
877 u32 ioaddr = ndev->base_addr;
878
879 mdp->stats.tx_dropped += ctrl_inl(ioaddr + TROCR);
880 ctrl_outl(0, ioaddr + TROCR); /* (write clear) */
881 mdp->stats.collisions += ctrl_inl(ioaddr + CDCR);
882 ctrl_outl(0, ioaddr + CDCR); /* (write clear) */
883 mdp->stats.tx_carrier_errors += ctrl_inl(ioaddr + LCCR);
884 ctrl_outl(0, ioaddr + LCCR); /* (write clear) */
885 mdp->stats.tx_carrier_errors += ctrl_inl(ioaddr + CNDCR);
886 ctrl_outl(0, ioaddr + CNDCR); /* (write clear) */
887
888 return &mdp->stats;
889}
890
891/* ioctl to device funciotn*/
892static int sh_eth_do_ioctl(struct net_device *ndev, struct ifreq *rq,
893 int cmd)
894{
895 struct sh_eth_private *mdp = netdev_priv(ndev);
896 struct phy_device *phydev = mdp->phydev;
897
898 if (!netif_running(ndev))
899 return -EINVAL;
900
901 if (!phydev)
902 return -ENODEV;
903
904 return phy_mii_ioctl(phydev, if_mii(rq), cmd);
905}
906
907
908/* Multicast reception directions set */
909static void sh_eth_set_multicast_list(struct net_device *ndev)
910{
911 u32 ioaddr = ndev->base_addr;
912
913 if (ndev->flags & IFF_PROMISC) {
914 /* Set promiscuous. */
915 ctrl_outl((ctrl_inl(ioaddr + ECMR) & ~ECMR_MCT) | ECMR_PRM,
916 ioaddr + ECMR);
917 } else {
918 /* Normal, unicast/broadcast-only mode. */
919 ctrl_outl((ctrl_inl(ioaddr + ECMR) & ~ECMR_PRM) | ECMR_MCT,
920 ioaddr + ECMR);
921 }
922}
923
924/* SuperH's TSU register init function */
925static void sh_eth_tsu_init(u32 ioaddr)
926{
927 ctrl_outl(0, ioaddr + TSU_FWEN0); /* Disable forward(0->1) */
928 ctrl_outl(0, ioaddr + TSU_FWEN1); /* Disable forward(1->0) */
929 ctrl_outl(0, ioaddr + TSU_FCM); /* forward fifo 3k-3k */
930 ctrl_outl(0xc, ioaddr + TSU_BSYSL0);
931 ctrl_outl(0xc, ioaddr + TSU_BSYSL1);
932 ctrl_outl(0, ioaddr + TSU_PRISL0);
933 ctrl_outl(0, ioaddr + TSU_PRISL1);
934 ctrl_outl(0, ioaddr + TSU_FWSL0);
935 ctrl_outl(0, ioaddr + TSU_FWSL1);
936 ctrl_outl(TSU_FWSLC_POSTENU | TSU_FWSLC_POSTENL, ioaddr + TSU_FWSLC);
937 ctrl_outl(0, ioaddr + TSU_QTAGM0); /* Disable QTAG(0->1) */
938 ctrl_outl(0, ioaddr + TSU_QTAGM1); /* Disable QTAG(1->0) */
939 ctrl_outl(0, ioaddr + TSU_FWSR); /* all interrupt status clear */
940 ctrl_outl(0, ioaddr + TSU_FWINMK); /* Disable all interrupt */
941 ctrl_outl(0, ioaddr + TSU_TEN); /* Disable all CAM entry */
942 ctrl_outl(0, ioaddr + TSU_POST1); /* Disable CAM entry [ 0- 7] */
943 ctrl_outl(0, ioaddr + TSU_POST2); /* Disable CAM entry [ 8-15] */
944 ctrl_outl(0, ioaddr + TSU_POST3); /* Disable CAM entry [16-23] */
945 ctrl_outl(0, ioaddr + TSU_POST4); /* Disable CAM entry [24-31] */
946}
947
948/* MDIO bus release function */
949static int sh_mdio_release(struct net_device *ndev)
950{
951 struct mii_bus *bus = dev_get_drvdata(&ndev->dev);
952
953 /* unregister mdio bus */
954 mdiobus_unregister(bus);
955
956 /* remove mdio bus info from net_device */
957 dev_set_drvdata(&ndev->dev, NULL);
958
959 /* free bitbang info */
960 free_mdio_bitbang(bus);
961
962 return 0;
963}
964
965/* MDIO bus init function */
966static int sh_mdio_init(struct net_device *ndev, int id)
967{
968 int ret, i;
969 struct bb_info *bitbang;
970 struct sh_eth_private *mdp = netdev_priv(ndev);
971
972 /* create bit control struct for PHY */
973 bitbang = kzalloc(sizeof(struct bb_info), GFP_KERNEL);
974 if (!bitbang) {
975 ret = -ENOMEM;
976 goto out;
977 }
978
979 /* bitbang init */
980 bitbang->addr = ndev->base_addr + PIR;
981 bitbang->mdi_msk = 0x08;
982 bitbang->mdo_msk = 0x04;
983 bitbang->mmd_msk = 0x02;/* MMD */
984 bitbang->mdc_msk = 0x01;
985 bitbang->ctrl.ops = &bb_ops;
986
987 /* MII contorller setting */
988 mdp->mii_bus = alloc_mdio_bitbang(&bitbang->ctrl);
989 if (!mdp->mii_bus) {
990 ret = -ENOMEM;
991 goto out_free_bitbang;
992 }
993
994 /* Hook up MII support for ethtool */
995 mdp->mii_bus->name = "sh_mii";
996 mdp->mii_bus->dev = &ndev->dev;
997 mdp->mii_bus->id = id;
998
999 /* PHY IRQ */
1000 mdp->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL);
1001 if (!mdp->mii_bus->irq) {
1002 ret = -ENOMEM;
1003 goto out_free_bus;
1004 }
1005
1006 for (i = 0; i < PHY_MAX_ADDR; i++)
1007 mdp->mii_bus->irq[i] = PHY_POLL;
1008
1009 /* regist mdio bus */
1010 ret = mdiobus_register(mdp->mii_bus);
1011 if (ret)
1012 goto out_free_irq;
1013
1014 dev_set_drvdata(&ndev->dev, mdp->mii_bus);
1015
1016 return 0;
1017
1018out_free_irq:
1019 kfree(mdp->mii_bus->irq);
1020
1021out_free_bus:
1022 kfree(mdp->mii_bus);
1023
1024out_free_bitbang:
1025 kfree(bitbang);
1026
1027out:
1028 return ret;
1029}
1030
1031static int sh_eth_drv_probe(struct platform_device *pdev)
1032{
1033 int ret, i, devno = 0;
1034 struct resource *res;
1035 struct net_device *ndev = NULL;
1036 struct sh_eth_private *mdp;
1037
1038 /* get base addr */
1039 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1040 if (unlikely(res == NULL)) {
1041 dev_err(&pdev->dev, "invalid resource\n");
1042 ret = -EINVAL;
1043 goto out;
1044 }
1045
1046 ndev = alloc_etherdev(sizeof(struct sh_eth_private));
1047 if (!ndev) {
1048 printk(KERN_ERR "%s: could not allocate device.\n", CARDNAME);
1049 ret = -ENOMEM;
1050 goto out;
1051 }
1052
1053 /* The sh Ether-specific entries in the device structure. */
1054 ndev->base_addr = res->start;
1055 devno = pdev->id;
1056 if (devno < 0)
1057 devno = 0;
1058
1059 ndev->dma = -1;
1060 ndev->irq = platform_get_irq(pdev, 0);
1061 if (ndev->irq < 0) {
1062 ret = -ENODEV;
1063 goto out_release;
1064 }
1065
1066 SET_NETDEV_DEV(ndev, &pdev->dev);
1067
1068 /* Fill in the fields of the device structure with ethernet values. */
1069 ether_setup(ndev);
1070
1071 mdp = netdev_priv(ndev);
1072 spin_lock_init(&mdp->lock);
1073
1074 /* get PHY ID */
1075 mdp->phy_id = (int)pdev->dev.platform_data;
1076
1077 /* set function */
1078 ndev->open = sh_eth_open;
1079 ndev->hard_start_xmit = sh_eth_start_xmit;
1080 ndev->stop = sh_eth_close;
1081 ndev->get_stats = sh_eth_get_stats;
1082 ndev->set_multicast_list = sh_eth_set_multicast_list;
1083 ndev->do_ioctl = sh_eth_do_ioctl;
1084 ndev->tx_timeout = sh_eth_tx_timeout;
1085 ndev->watchdog_timeo = TX_TIMEOUT;
1086
1087 mdp->post_rx = POST_RX >> (devno << 1);
1088 mdp->post_fw = POST_FW >> (devno << 1);
1089
1090 /* read and set MAC address */
1091 read_mac_address(ndev);
1092
1093 /* First device only init */
1094 if (!devno) {
1095 /* reset device */
1096 ctrl_outl(ARSTR_ARSTR, ndev->base_addr + ARSTR);
1097 mdelay(1);
1098
1099 /* TSU init (Init only)*/
1100 sh_eth_tsu_init(SH_TSU_ADDR);
1101 }
1102
1103 /* network device register */
1104 ret = register_netdev(ndev);
1105 if (ret)
1106 goto out_release;
1107
1108 /* mdio bus init */
1109 ret = sh_mdio_init(ndev, pdev->id);
1110 if (ret)
1111 goto out_unregister;
1112
1113 /* pritnt device infomation */
1114 printk(KERN_INFO "%s: %s at 0x%x, ",
1115 ndev->name, CARDNAME, (u32) ndev->base_addr);
1116
1117 for (i = 0; i < 5; i++)
1118 printk(KERN_INFO "%2.2x:", ndev->dev_addr[i]);
1119 printk(KERN_INFO "%2.2x, IRQ %d.\n", ndev->dev_addr[i], ndev->irq);
1120
1121 platform_set_drvdata(pdev, ndev);
1122
1123 return ret;
1124
1125out_unregister:
1126 unregister_netdev(ndev);
1127
1128out_release:
1129 /* net_dev free */
1130 if (ndev)
1131 free_netdev(ndev);
1132
1133out:
1134 return ret;
1135}
1136
1137static int sh_eth_drv_remove(struct platform_device *pdev)
1138{
1139 struct net_device *ndev = platform_get_drvdata(pdev);
1140
1141 sh_mdio_release(ndev);
1142 unregister_netdev(ndev);
1143 flush_scheduled_work();
1144
1145 free_netdev(ndev);
1146 platform_set_drvdata(pdev, NULL);
1147
1148 return 0;
1149}
1150
1151static struct platform_driver sh_eth_driver = {
1152 .probe = sh_eth_drv_probe,
1153 .remove = sh_eth_drv_remove,
1154 .driver = {
1155 .name = CARDNAME,
1156 },
1157};
1158
1159static int __init sh_eth_init(void)
1160{
1161 return platform_driver_register(&sh_eth_driver);
1162}
1163
1164static void __exit sh_eth_cleanup(void)
1165{
1166 platform_driver_unregister(&sh_eth_driver);
1167}
1168
1169module_init(sh_eth_init);
1170module_exit(sh_eth_cleanup);
1171
1172MODULE_AUTHOR("Nobuhiro Iwamatsu, Yoshihiro Shimoda");
1173MODULE_DESCRIPTION("Renesas SuperH Ethernet driver");
1174MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/sh_eth.h b/drivers/net/sh_eth.h
new file mode 100644
index 000000000000..ca2db6bb3c61
--- /dev/null
+++ b/drivers/net/sh_eth.h
@@ -0,0 +1,464 @@
1/*
2 * SuperH Ethernet device driver
3 *
4 * Copyright (C) 2006-2008 Nobuhiro Iwamatsu
5 * Copyright (C) 2008 Renesas Solutions Corp.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * The full GNU General Public License is included in this distribution in
20 * the file called "COPYING".
21 */
22
23#ifndef __SH_ETH_H__
24#define __SH_ETH_H__
25
26#include <linux/module.h>
27#include <linux/kernel.h>
28#include <linux/spinlock.h>
29#include <linux/workqueue.h>
30#include <linux/netdevice.h>
31#include <linux/phy.h>
32
33#define CARDNAME "sh-eth"
34#define TX_TIMEOUT (5*HZ)
35
36#define TX_RING_SIZE 128 /* Tx ring size */
37#define RX_RING_SIZE 128 /* Rx ring size */
38#define RX_OFFSET 2 /* skb offset */
39#define ETHERSMALL 60
40#define PKT_BUF_SZ 1538
41
42/* Chip Base Address */
43#define SH_ETH0_BASE 0xA7000000
44#define SH_ETH1_BASE 0xA7000400
45#define SH_TSU_ADDR 0xA7000804
46
47/* Chip Registers */
48/* E-DMAC */
49#define EDMR 0x0000
50#define EDTRR 0x0004
51#define EDRRR 0x0008
52#define TDLAR 0x000C
53#define RDLAR 0x0010
54#define EESR 0x0014
55#define EESIPR 0x0018
56#define TRSCER 0x001C
57#define RMFCR 0x0020
58#define TFTR 0x0024
59#define FDR 0x0028
60#define RMCR 0x002C
61#define EDOCR 0x0030
62#define FCFTR 0x0034
63#define RPADIR 0x0038
64#define TRIMD 0x003C
65#define RBWAR 0x0040
66#define RDFAR 0x0044
67#define TBRAR 0x004C
68#define TDFAR 0x0050
69/* Ether Register */
70#define ECMR 0x0160
71#define ECSR 0x0164
72#define ECSIPR 0x0168
73#define PIR 0x016C
74#define MAHR 0x0170
75#define MALR 0x0174
76#define RFLR 0x0178
77#define PSR 0x017C
78#define TROCR 0x0180
79#define CDCR 0x0184
80#define LCCR 0x0188
81#define CNDCR 0x018C
82#define CEFCR 0x0194
83#define FRECR 0x0198
84#define TSFRCR 0x019C
85#define TLFRCR 0x01A0
86#define RFCR 0x01A4
87#define MAFCR 0x01A8
88#define IPGR 0x01B4
89#if defined(CONFIG_CPU_SUBTYPE_SH7710)
90#define APR 0x01B8
91#define MPR 0x01BC
92#define TPAUSER 0x1C4
93#define BCFR 0x1CC
94#endif /* CONFIG_CPU_SH7710 */
95
96#define ARSTR 0x0800
97
98/* TSU */
99#define TSU_CTRST 0x004
100#define TSU_FWEN0 0x010
101#define TSU_FWEN1 0x014
102#define TSU_FCM 0x018
103#define TSU_BSYSL0 0x020
104#define TSU_BSYSL1 0x024
105#define TSU_PRISL0 0x028
106#define TSU_PRISL1 0x02C
107#define TSU_FWSL0 0x030
108#define TSU_FWSL1 0x034
109#define TSU_FWSLC 0x038
110#define TSU_QTAGM0 0x040
111#define TSU_QTAGM1 0x044
112#define TSU_ADQT0 0x048
113#define TSU_ADQT1 0x04C
114#define TSU_FWSR 0x050
115#define TSU_FWINMK 0x054
116#define TSU_ADSBSY 0x060
117#define TSU_TEN 0x064
118#define TSU_POST1 0x070
119#define TSU_POST2 0x074
120#define TSU_POST3 0x078
121#define TSU_POST4 0x07C
122#define TXNLCR0 0x080
123#define TXALCR0 0x084
124#define RXNLCR0 0x088
125#define RXALCR0 0x08C
126#define FWNLCR0 0x090
127#define FWALCR0 0x094
128#define TXNLCR1 0x0A0
129#define TXALCR1 0x0A4
130#define RXNLCR1 0x0A8
131#define RXALCR1 0x0AC
132#define FWNLCR1 0x0B0
133#define FWALCR1 0x0B4
134
135#define TSU_ADRH0 0x0100
136#define TSU_ADRL0 0x0104
137#define TSU_ADRL31 0x01FC
138
139/* Register's bits */
140
141/* EDMR */
142enum DMAC_M_BIT {
143 EDMR_DL1 = 0x20, EDMR_DL0 = 0x10, EDMR_SRST = 0x01,
144};
145
146/* EDTRR */
147enum DMAC_T_BIT {
148 EDTRR_TRNS = 0x01,
149};
150
151/* EDRRR*/
152enum EDRRR_R_BIT {
153 EDRRR_R = 0x01,
154};
155
156/* TPAUSER */
157enum TPAUSER_BIT {
158 TPAUSER_TPAUSE = 0x0000ffff,
159 TPAUSER_UNLIMITED = 0,
160};
161
162/* BCFR */
163enum BCFR_BIT {
164 BCFR_RPAUSE = 0x0000ffff,
165 BCFR_UNLIMITED = 0,
166};
167
168/* PIR */
169enum PIR_BIT {
170 PIR_MDI = 0x08, PIR_MDO = 0x04, PIR_MMD = 0x02, PIR_MDC = 0x01,
171};
172
173/* PSR */
174enum PHY_STATUS_BIT { PHY_ST_LINK = 0x01, };
175
176/* EESR */
177enum EESR_BIT {
178 EESR_TWB = 0x40000000, EESR_TABT = 0x04000000,
179 EESR_RABT = 0x02000000, EESR_RFRMER = 0x01000000,
180 EESR_ADE = 0x00800000, EESR_ECI = 0x00400000,
181 EESR_FTC = 0x00200000, EESR_TDE = 0x00100000,
182 EESR_TFE = 0x00080000, EESR_FRC = 0x00040000,
183 EESR_RDE = 0x00020000, EESR_RFE = 0x00010000,
184 EESR_TINT4 = 0x00000800, EESR_TINT3 = 0x00000400,
185 EESR_TINT2 = 0x00000200, EESR_TINT1 = 0x00000100,
186 EESR_RINT8 = 0x00000080, EESR_RINT5 = 0x00000010,
187 EESR_RINT4 = 0x00000008, EESR_RINT3 = 0x00000004,
188 EESR_RINT2 = 0x00000002, EESR_RINT1 = 0x00000001,
189};
190
191#define EESR_ERR_CHECK (EESR_TWB | EESR_TABT | EESR_RABT | EESR_RDE \
192 | EESR_RFRMER | EESR_ADE | EESR_TFE | EESR_TDE | EESR_ECI)
193
194/* EESIPR */
195enum DMAC_IM_BIT {
196 DMAC_M_TWB = 0x40000000, DMAC_M_TABT = 0x04000000,
197 DMAC_M_RABT = 0x02000000,
198 DMAC_M_RFRMER = 0x01000000, DMAC_M_ADF = 0x00800000,
199 DMAC_M_ECI = 0x00400000, DMAC_M_FTC = 0x00200000,
200 DMAC_M_TDE = 0x00100000, DMAC_M_TFE = 0x00080000,
201 DMAC_M_FRC = 0x00040000, DMAC_M_RDE = 0x00020000,
202 DMAC_M_RFE = 0x00010000, DMAC_M_TINT4 = 0x00000800,
203 DMAC_M_TINT3 = 0x00000400, DMAC_M_TINT2 = 0x00000200,
204 DMAC_M_TINT1 = 0x00000100, DMAC_M_RINT8 = 0x00000080,
205 DMAC_M_RINT5 = 0x00000010, DMAC_M_RINT4 = 0x00000008,
206 DMAC_M_RINT3 = 0x00000004, DMAC_M_RINT2 = 0x00000002,
207 DMAC_M_RINT1 = 0x00000001,
208};
209
210/* Receive descriptor bit */
211enum RD_STS_BIT {
212 RD_RACT = 0x80000000, RC_RDEL = 0x40000000,
213 RC_RFP1 = 0x20000000, RC_RFP0 = 0x10000000,
214 RD_RFE = 0x08000000, RD_RFS10 = 0x00000200,
215 RD_RFS9 = 0x00000100, RD_RFS8 = 0x00000080,
216 RD_RFS7 = 0x00000040, RD_RFS6 = 0x00000020,
217 RD_RFS5 = 0x00000010, RD_RFS4 = 0x00000008,
218 RD_RFS3 = 0x00000004, RD_RFS2 = 0x00000002,
219 RD_RFS1 = 0x00000001,
220};
221#define RDF1ST RC_RFP1
222#define RDFEND RC_RFP0
223#define RD_RFP (RC_RFP1|RC_RFP0)
224
225/* FCFTR */
226enum FCFTR_BIT {
227 FCFTR_RFF2 = 0x00040000, FCFTR_RFF1 = 0x00020000,
228 FCFTR_RFF0 = 0x00010000, FCFTR_RFD2 = 0x00000004,
229 FCFTR_RFD1 = 0x00000002, FCFTR_RFD0 = 0x00000001,
230};
231#define FIFO_F_D_RFF (FCFTR_RFF2|FCFTR_RFF1|FCFTR_RFF0)
232#define FIFO_F_D_RFD (FCFTR_RFD2|FCFTR_RFD1|FCFTR_RFD0)
233
234/* Transfer descriptor bit */
235enum TD_STS_BIT {
236 TD_TACT = 0x80000000, TD_TDLE = 0x40000000, TD_TFP1 = 0x20000000,
237 TD_TFP0 = 0x10000000,
238};
239#define TDF1ST TD_TFP1
240#define TDFEND TD_TFP0
241#define TD_TFP (TD_TFP1|TD_TFP0)
242
243/* RMCR */
244enum RECV_RST_BIT { RMCR_RST = 0x01, };
245/* ECMR */
246enum FELIC_MODE_BIT {
247 ECMR_ZPF = 0x00080000, ECMR_PFR = 0x00040000, ECMR_RXF = 0x00020000,
248 ECMR_TXF = 0x00010000, ECMR_MCT = 0x00002000, ECMR_PRCEF = 0x00001000,
249 ECMR_PMDE = 0x00000200, ECMR_RE = 0x00000040, ECMR_TE = 0x00000020,
250 ECMR_ILB = 0x00000008, ECMR_ELB = 0x00000004, ECMR_DM = 0x00000002,
251 ECMR_PRM = 0x00000001,
252};
253
254/* ECSR */
255enum ECSR_STATUS_BIT {
256 ECSR_BRCRX = 0x20, ECSR_PSRTO = 0x10, ECSR_LCHNG = 0x04,
257 ECSR_MPD = 0x02, ECSR_ICD = 0x01,
258};
259
260/* ECSIPR */
261enum ECSIPR_STATUS_MASK_BIT {
262 ECSIPR_BRCRXIP = 0x20, ECSIPR_PSRTOIP = 0x10, ECSIPR_LCHNGIP = 0x04,
263 ECSIPR_MPDIP = 0x02, ECSIPR_ICDIP = 0x01,
264};
265
266/* APR */
267enum APR_BIT {
268 APR_AP = 0x00000001,
269};
270
271/* MPR */
272enum MPR_BIT {
273 MPR_MP = 0x00000001,
274};
275
276/* TRSCER */
277enum DESC_I_BIT {
278 DESC_I_TINT4 = 0x0800, DESC_I_TINT3 = 0x0400, DESC_I_TINT2 = 0x0200,
279 DESC_I_TINT1 = 0x0100, DESC_I_RINT8 = 0x0080, DESC_I_RINT5 = 0x0010,
280 DESC_I_RINT4 = 0x0008, DESC_I_RINT3 = 0x0004, DESC_I_RINT2 = 0x0002,
281 DESC_I_RINT1 = 0x0001,
282};
283
284/* RPADIR */
285enum RPADIR_BIT {
286 RPADIR_PADS1 = 0x20000, RPADIR_PADS0 = 0x10000,
287 RPADIR_PADR = 0x0003f,
288};
289
290/* FDR */
291enum FIFO_SIZE_BIT {
292 FIFO_SIZE_T = 0x00000700, FIFO_SIZE_R = 0x00000007,
293};
294enum phy_offsets {
295 PHY_CTRL = 0, PHY_STAT = 1, PHY_IDT1 = 2, PHY_IDT2 = 3,
296 PHY_ANA = 4, PHY_ANL = 5, PHY_ANE = 6,
297 PHY_16 = 16,
298};
299
300/* PHY_CTRL */
301enum PHY_CTRL_BIT {
302 PHY_C_RESET = 0x8000, PHY_C_LOOPBK = 0x4000, PHY_C_SPEEDSL = 0x2000,
303 PHY_C_ANEGEN = 0x1000, PHY_C_PWRDN = 0x0800, PHY_C_ISO = 0x0400,
304 PHY_C_RANEG = 0x0200, PHY_C_DUPLEX = 0x0100, PHY_C_COLT = 0x0080,
305};
306#define DM9161_PHY_C_ANEGEN 0 /* auto nego special */
307
308/* PHY_STAT */
309enum PHY_STAT_BIT {
310 PHY_S_100T4 = 0x8000, PHY_S_100X_F = 0x4000, PHY_S_100X_H = 0x2000,
311 PHY_S_10T_F = 0x1000, PHY_S_10T_H = 0x0800, PHY_S_ANEGC = 0x0020,
312 PHY_S_RFAULT = 0x0010, PHY_S_ANEGA = 0x0008, PHY_S_LINK = 0x0004,
313 PHY_S_JAB = 0x0002, PHY_S_EXTD = 0x0001,
314};
315
316/* PHY_ANA */
317enum PHY_ANA_BIT {
318 PHY_A_NP = 0x8000, PHY_A_ACK = 0x4000, PHY_A_RF = 0x2000,
319 PHY_A_FCS = 0x0400, PHY_A_T4 = 0x0200, PHY_A_FDX = 0x0100,
320 PHY_A_HDX = 0x0080, PHY_A_10FDX = 0x0040, PHY_A_10HDX = 0x0020,
321 PHY_A_SEL = 0x001f,
322};
323/* PHY_ANL */
324enum PHY_ANL_BIT {
325 PHY_L_NP = 0x8000, PHY_L_ACK = 0x4000, PHY_L_RF = 0x2000,
326 PHY_L_FCS = 0x0400, PHY_L_T4 = 0x0200, PHY_L_FDX = 0x0100,
327 PHY_L_HDX = 0x0080, PHY_L_10FDX = 0x0040, PHY_L_10HDX = 0x0020,
328 PHY_L_SEL = 0x001f,
329};
330
331/* PHY_ANE */
332enum PHY_ANE_BIT {
333 PHY_E_PDF = 0x0010, PHY_E_LPNPA = 0x0008, PHY_E_NPA = 0x0004,
334 PHY_E_PRX = 0x0002, PHY_E_LPANEGA = 0x0001,
335};
336
337/* DM9161 */
338enum PHY_16_BIT {
339 PHY_16_BP4B45 = 0x8000, PHY_16_BPSCR = 0x4000, PHY_16_BPALIGN = 0x2000,
340 PHY_16_BP_ADPOK = 0x1000, PHY_16_Repeatmode = 0x0800,
341 PHY_16_TXselect = 0x0400,
342 PHY_16_Rsvd = 0x0200, PHY_16_RMIIEnable = 0x0100,
343 PHY_16_Force100LNK = 0x0080,
344 PHY_16_APDLED_CTL = 0x0040, PHY_16_COLLED_CTL = 0x0020,
345 PHY_16_RPDCTR_EN = 0x0010,
346 PHY_16_ResetStMch = 0x0008, PHY_16_PreamSupr = 0x0004,
347 PHY_16_Sleepmode = 0x0002,
348 PHY_16_RemoteLoopOut = 0x0001,
349};
350
351#define POST_RX 0x08
352#define POST_FW 0x04
353#define POST0_RX (POST_RX)
354#define POST0_FW (POST_FW)
355#define POST1_RX (POST_RX >> 2)
356#define POST1_FW (POST_FW >> 2)
357#define POST_ALL (POST0_RX | POST0_FW | POST1_RX | POST1_FW)
358
359/* ARSTR */
360enum ARSTR_BIT { ARSTR_ARSTR = 0x00000001, };
361
362/* TSU_FWEN0 */
363enum TSU_FWEN0_BIT {
364 TSU_FWEN0_0 = 0x00000001,
365};
366
367/* TSU_ADSBSY */
368enum TSU_ADSBSY_BIT {
369 TSU_ADSBSY_0 = 0x00000001,
370};
371
372/* TSU_TEN */
373enum TSU_TEN_BIT {
374 TSU_TEN_0 = 0x80000000,
375};
376
377/* TSU_FWSL0 */
378enum TSU_FWSL0_BIT {
379 TSU_FWSL0_FW50 = 0x1000, TSU_FWSL0_FW40 = 0x0800,
380 TSU_FWSL0_FW30 = 0x0400, TSU_FWSL0_FW20 = 0x0200,
381 TSU_FWSL0_FW10 = 0x0100, TSU_FWSL0_RMSA0 = 0x0010,
382};
383
384/* TSU_FWSLC */
385enum TSU_FWSLC_BIT {
386 TSU_FWSLC_POSTENU = 0x2000, TSU_FWSLC_POSTENL = 0x1000,
387 TSU_FWSLC_CAMSEL03 = 0x0080, TSU_FWSLC_CAMSEL02 = 0x0040,
388 TSU_FWSLC_CAMSEL01 = 0x0020, TSU_FWSLC_CAMSEL00 = 0x0010,
389 TSU_FWSLC_CAMSEL13 = 0x0008, TSU_FWSLC_CAMSEL12 = 0x0004,
390 TSU_FWSLC_CAMSEL11 = 0x0002, TSU_FWSLC_CAMSEL10 = 0x0001,
391};
392
393/*
394 * The sh ether Tx buffer descriptors.
395 * This structure should be 20 bytes.
396 */
397struct sh_eth_txdesc {
398 u32 status; /* TD0 */
399#if defined(CONFIG_CPU_LITTLE_ENDIAN)
400 u16 pad0; /* TD1 */
401 u16 buffer_length; /* TD1 */
402#else
403 u16 buffer_length; /* TD1 */
404 u16 pad0; /* TD1 */
405#endif
406 u32 addr; /* TD2 */
407 u32 pad1; /* padding data */
408};
409
410/*
411 * The sh ether Rx buffer descriptors.
412 * This structure should be 20 bytes.
413 */
414struct sh_eth_rxdesc {
415 u32 status; /* RD0 */
416#if defined(CONFIG_CPU_LITTLE_ENDIAN)
417 u16 frame_length; /* RD1 */
418 u16 buffer_length; /* RD1 */
419#else
420 u16 buffer_length; /* RD1 */
421 u16 frame_length; /* RD1 */
422#endif
423 u32 addr; /* RD2 */
424 u32 pad0; /* padding data */
425};
426
427struct sh_eth_private {
428 dma_addr_t rx_desc_dma;
429 dma_addr_t tx_desc_dma;
430 struct sh_eth_rxdesc *rx_ring;
431 struct sh_eth_txdesc *tx_ring;
432 struct sk_buff **rx_skbuff;
433 struct sk_buff **tx_skbuff;
434 struct net_device_stats stats;
435 struct timer_list timer;
436 spinlock_t lock;
437 u32 cur_rx, dirty_rx; /* Producer/consumer ring indices */
438 u32 cur_tx, dirty_tx;
439 u32 rx_buf_sz; /* Based on MTU+slack. */
440 /* MII transceiver section. */
441 u32 phy_id; /* PHY ID */
442 struct mii_bus *mii_bus; /* MDIO bus control */
443 struct phy_device *phydev; /* PHY device control */
444 enum phy_state link;
445 int msg_enable;
446 int speed;
447 int duplex;
448 u32 rx_int_var, tx_int_var; /* interrupt control variables */
449 char post_rx; /* POST receive */
450 char post_fw; /* POST forward */
451 struct net_device_stats tsu_stats; /* TSU forward status */
452};
453
454static void swaps(char *src, int len)
455{
456#ifdef __LITTLE_ENDIAN__
457 u32 *p = (u32 *)src;
458 u32 *maxp;
459 maxp = p + ((len + sizeof(u32) - 1) / sizeof(u32));
460
461 for (; p < maxp; p++)
462 *p = swab32(*p);
463#endif
464}