aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/adi
diff options
context:
space:
mode:
authorJeff Kirsher <jeffrey.t.kirsher@intel.com>2011-06-18 03:01:26 -0400
committerJeff Kirsher <jeffrey.t.kirsher@intel.com>2011-08-12 15:38:13 -0400
commit7b35f03338a8557122e62ea1a011f1628b978e8d (patch)
tree3dc66cadbfa8f86c43bd19fac73c954f9b762b24 /drivers/net/ethernet/adi
parentae7668d03c4de78dd0be79278f410a1415786e67 (diff)
bfin_mac: Move the Analog Devices Inc driver
Move the Analog Devices Inc driver into drivers/net/ethernet/adi/ and make the necessary Kconfig and Makefile changes. CC: <uclinux-dist-devel@blackfin.uclinux.org> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com> Acked-by: Bob Liu <bob.liu@analog.com>
Diffstat (limited to 'drivers/net/ethernet/adi')
-rw-r--r--drivers/net/ethernet/adi/Kconfig68
-rw-r--r--drivers/net/ethernet/adi/Makefile5
-rw-r--r--drivers/net/ethernet/adi/bfin_mac.c1767
-rw-r--r--drivers/net/ethernet/adi/bfin_mac.h106
4 files changed, 1946 insertions, 0 deletions
diff --git a/drivers/net/ethernet/adi/Kconfig b/drivers/net/ethernet/adi/Kconfig
new file mode 100644
index 00000000000..6de9851045c
--- /dev/null
+++ b/drivers/net/ethernet/adi/Kconfig
@@ -0,0 +1,68 @@
1#
2# Blackfin device configuration
3#
4
5config NET_BFIN
6 bool "Blackfin devices"
7 depends on BF516 || BF518 || BF526 || BF527 || BF536 || BF537
8 ---help---
9 If you have a network (Ethernet) card belonging to this class, say Y.
10 Make sure you know the name of your card. Read the Ethernet-HOWTO,
11 available from <http://www.tldp.org/docs.html#howto>.
12
13 If unsure, say Y.
14
15 Note that the answer to this question doesn't directly affect the
16 kernel: saying N will just cause the configurator to skip all
17 the remaining Blackfin card questions. If you say Y, you will be
18 asked for your specific card in the following questions.
19
20if NET_BFIN
21
22config BFIN_MAC
23 tristate "Blackfin on-chip MAC support"
24 depends on (BF516 || BF518 || BF526 || BF527 || BF536 || BF537)
25 select CRC32
26 select MII
27 select PHYLIB
28 select BFIN_MAC_USE_L1 if DMA_UNCACHED_NONE
29 ---help---
30 This is the driver for Blackfin on-chip mac device. Say Y if you want
31 it compiled into the kernel. This driver is also available as a
32 module ( = code which can be inserted in and removed from the running
33 kernel whenever you want). The module will be called bfin_mac.
34
35config BFIN_MAC_USE_L1
36 bool "Use L1 memory for rx/tx packets"
37 depends on BFIN_MAC && (BF527 || BF537)
38 default y
39 ---help---
40 To get maximum network performance, you should use L1 memory as rx/tx
41 buffers. Say N here if you want to reserve L1 memory for other uses.
42
43config BFIN_TX_DESC_NUM
44 int "Number of transmit buffer packets"
45 depends on BFIN_MAC
46 range 6 10 if BFIN_MAC_USE_L1
47 range 10 100
48 default "10"
49 ---help---
50 Set the number of buffer packets used in driver.
51
52config BFIN_RX_DESC_NUM
53 int "Number of receive buffer packets"
54 depends on BFIN_MAC
55 range 20 100 if BFIN_MAC_USE_L1
56 range 20 800
57 default "20"
58 ---help---
59 Set the number of buffer packets used in driver.
60
61config BFIN_MAC_USE_HWSTAMP
62 bool "Use IEEE 1588 hwstamp"
63 depends on BFIN_MAC && BF518
64 default y
65 ---help---
66 To support the IEEE 1588 Precision Time Protocol (PTP), select y here
67
68endif # NET_BFIN
diff --git a/drivers/net/ethernet/adi/Makefile b/drivers/net/ethernet/adi/Makefile
new file mode 100644
index 00000000000..b1fbe195d0e
--- /dev/null
+++ b/drivers/net/ethernet/adi/Makefile
@@ -0,0 +1,5 @@
1#
2# Makefile for the Blackfin device drivers.
3#
4
5obj-$(CONFIG_BFIN_MAC) += bfin_mac.o
diff --git a/drivers/net/ethernet/adi/bfin_mac.c b/drivers/net/ethernet/adi/bfin_mac.c
new file mode 100644
index 00000000000..6c019e14854
--- /dev/null
+++ b/drivers/net/ethernet/adi/bfin_mac.c
@@ -0,0 +1,1767 @@
1/*
2 * Blackfin On-Chip MAC Driver
3 *
4 * Copyright 2004-2010 Analog Devices Inc.
5 *
6 * Enter bugs at http://blackfin.uclinux.org/
7 *
8 * Licensed under the GPL-2 or later.
9 */
10
11#define DRV_VERSION "1.1"
12#define DRV_DESC "Blackfin on-chip Ethernet MAC driver"
13
14#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15
16#include <linux/init.h>
17#include <linux/module.h>
18#include <linux/kernel.h>
19#include <linux/sched.h>
20#include <linux/slab.h>
21#include <linux/delay.h>
22#include <linux/timer.h>
23#include <linux/errno.h>
24#include <linux/irq.h>
25#include <linux/io.h>
26#include <linux/ioport.h>
27#include <linux/crc32.h>
28#include <linux/device.h>
29#include <linux/spinlock.h>
30#include <linux/mii.h>
31#include <linux/netdevice.h>
32#include <linux/etherdevice.h>
33#include <linux/ethtool.h>
34#include <linux/skbuff.h>
35#include <linux/platform_device.h>
36
37#include <asm/dma.h>
38#include <linux/dma-mapping.h>
39
40#include <asm/div64.h>
41#include <asm/dpmc.h>
42#include <asm/blackfin.h>
43#include <asm/cacheflush.h>
44#include <asm/portmux.h>
45#include <mach/pll.h>
46
47#include "bfin_mac.h"
48
49MODULE_AUTHOR("Bryan Wu, Luke Yang");
50MODULE_LICENSE("GPL");
51MODULE_DESCRIPTION(DRV_DESC);
52MODULE_ALIAS("platform:bfin_mac");
53
54#if defined(CONFIG_BFIN_MAC_USE_L1)
55# define bfin_mac_alloc(dma_handle, size, num) l1_data_sram_zalloc(size*num)
56# define bfin_mac_free(dma_handle, ptr, num) l1_data_sram_free(ptr)
57#else
58# define bfin_mac_alloc(dma_handle, size, num) \
59 dma_alloc_coherent(NULL, size*num, dma_handle, GFP_KERNEL)
60# define bfin_mac_free(dma_handle, ptr, num) \
61 dma_free_coherent(NULL, sizeof(*ptr)*num, ptr, dma_handle)
62#endif
63
64#define PKT_BUF_SZ 1580
65
66#define MAX_TIMEOUT_CNT 500
67
68/* pointers to maintain transmit list */
69static struct net_dma_desc_tx *tx_list_head;
70static struct net_dma_desc_tx *tx_list_tail;
71static struct net_dma_desc_rx *rx_list_head;
72static struct net_dma_desc_rx *rx_list_tail;
73static struct net_dma_desc_rx *current_rx_ptr;
74static struct net_dma_desc_tx *current_tx_ptr;
75static struct net_dma_desc_tx *tx_desc;
76static struct net_dma_desc_rx *rx_desc;
77
78static void desc_list_free(void)
79{
80 struct net_dma_desc_rx *r;
81 struct net_dma_desc_tx *t;
82 int i;
83#if !defined(CONFIG_BFIN_MAC_USE_L1)
84 dma_addr_t dma_handle = 0;
85#endif
86
87 if (tx_desc) {
88 t = tx_list_head;
89 for (i = 0; i < CONFIG_BFIN_TX_DESC_NUM; i++) {
90 if (t) {
91 if (t->skb) {
92 dev_kfree_skb(t->skb);
93 t->skb = NULL;
94 }
95 t = t->next;
96 }
97 }
98 bfin_mac_free(dma_handle, tx_desc, CONFIG_BFIN_TX_DESC_NUM);
99 }
100
101 if (rx_desc) {
102 r = rx_list_head;
103 for (i = 0; i < CONFIG_BFIN_RX_DESC_NUM; i++) {
104 if (r) {
105 if (r->skb) {
106 dev_kfree_skb(r->skb);
107 r->skb = NULL;
108 }
109 r = r->next;
110 }
111 }
112 bfin_mac_free(dma_handle, rx_desc, CONFIG_BFIN_RX_DESC_NUM);
113 }
114}
115
116static int desc_list_init(void)
117{
118 int i;
119 struct sk_buff *new_skb;
120#if !defined(CONFIG_BFIN_MAC_USE_L1)
121 /*
122 * This dma_handle is useless in Blackfin dma_alloc_coherent().
123 * The real dma handler is the return value of dma_alloc_coherent().
124 */
125 dma_addr_t dma_handle;
126#endif
127
128 tx_desc = bfin_mac_alloc(&dma_handle,
129 sizeof(struct net_dma_desc_tx),
130 CONFIG_BFIN_TX_DESC_NUM);
131 if (tx_desc == NULL)
132 goto init_error;
133
134 rx_desc = bfin_mac_alloc(&dma_handle,
135 sizeof(struct net_dma_desc_rx),
136 CONFIG_BFIN_RX_DESC_NUM);
137 if (rx_desc == NULL)
138 goto init_error;
139
140 /* init tx_list */
141 tx_list_head = tx_list_tail = tx_desc;
142
143 for (i = 0; i < CONFIG_BFIN_TX_DESC_NUM; i++) {
144 struct net_dma_desc_tx *t = tx_desc + i;
145 struct dma_descriptor *a = &(t->desc_a);
146 struct dma_descriptor *b = &(t->desc_b);
147
148 /*
149 * disable DMA
150 * read from memory WNR = 0
151 * wordsize is 32 bits
152 * 6 half words is desc size
153 * large desc flow
154 */
155 a->config = WDSIZE_32 | NDSIZE_6 | DMAFLOW_LARGE;
156 a->start_addr = (unsigned long)t->packet;
157 a->x_count = 0;
158 a->next_dma_desc = b;
159
160 /*
161 * enabled DMA
162 * write to memory WNR = 1
163 * wordsize is 32 bits
164 * disable interrupt
165 * 6 half words is desc size
166 * large desc flow
167 */
168 b->config = DMAEN | WNR | WDSIZE_32 | NDSIZE_6 | DMAFLOW_LARGE;
169 b->start_addr = (unsigned long)(&(t->status));
170 b->x_count = 0;
171
172 t->skb = NULL;
173 tx_list_tail->desc_b.next_dma_desc = a;
174 tx_list_tail->next = t;
175 tx_list_tail = t;
176 }
177 tx_list_tail->next = tx_list_head; /* tx_list is a circle */
178 tx_list_tail->desc_b.next_dma_desc = &(tx_list_head->desc_a);
179 current_tx_ptr = tx_list_head;
180
181 /* init rx_list */
182 rx_list_head = rx_list_tail = rx_desc;
183
184 for (i = 0; i < CONFIG_BFIN_RX_DESC_NUM; i++) {
185 struct net_dma_desc_rx *r = rx_desc + i;
186 struct dma_descriptor *a = &(r->desc_a);
187 struct dma_descriptor *b = &(r->desc_b);
188
189 /* allocate a new skb for next time receive */
190 new_skb = dev_alloc_skb(PKT_BUF_SZ + NET_IP_ALIGN);
191 if (!new_skb) {
192 pr_notice("init: low on mem - packet dropped\n");
193 goto init_error;
194 }
195 skb_reserve(new_skb, NET_IP_ALIGN);
196 /* Invidate the data cache of skb->data range when it is write back
197 * cache. It will prevent overwritting the new data from DMA
198 */
199 blackfin_dcache_invalidate_range((unsigned long)new_skb->head,
200 (unsigned long)new_skb->end);
201 r->skb = new_skb;
202
203 /*
204 * enabled DMA
205 * write to memory WNR = 1
206 * wordsize is 32 bits
207 * disable interrupt
208 * 6 half words is desc size
209 * large desc flow
210 */
211 a->config = DMAEN | WNR | WDSIZE_32 | NDSIZE_6 | DMAFLOW_LARGE;
212 /* since RXDWA is enabled */
213 a->start_addr = (unsigned long)new_skb->data - 2;
214 a->x_count = 0;
215 a->next_dma_desc = b;
216
217 /*
218 * enabled DMA
219 * write to memory WNR = 1
220 * wordsize is 32 bits
221 * enable interrupt
222 * 6 half words is desc size
223 * large desc flow
224 */
225 b->config = DMAEN | WNR | WDSIZE_32 | DI_EN |
226 NDSIZE_6 | DMAFLOW_LARGE;
227 b->start_addr = (unsigned long)(&(r->status));
228 b->x_count = 0;
229
230 rx_list_tail->desc_b.next_dma_desc = a;
231 rx_list_tail->next = r;
232 rx_list_tail = r;
233 }
234 rx_list_tail->next = rx_list_head; /* rx_list is a circle */
235 rx_list_tail->desc_b.next_dma_desc = &(rx_list_head->desc_a);
236 current_rx_ptr = rx_list_head;
237
238 return 0;
239
240init_error:
241 desc_list_free();
242 pr_err("kmalloc failed\n");
243 return -ENOMEM;
244}
245
246
247/*---PHY CONTROL AND CONFIGURATION-----------------------------------------*/
248
249/*
250 * MII operations
251 */
252/* Wait until the previous MDC/MDIO transaction has completed */
253static int bfin_mdio_poll(void)
254{
255 int timeout_cnt = MAX_TIMEOUT_CNT;
256
257 /* poll the STABUSY bit */
258 while ((bfin_read_EMAC_STAADD()) & STABUSY) {
259 udelay(1);
260 if (timeout_cnt-- < 0) {
261 pr_err("wait MDC/MDIO transaction to complete timeout\n");
262 return -ETIMEDOUT;
263 }
264 }
265
266 return 0;
267}
268
269/* Read an off-chip register in a PHY through the MDC/MDIO port */
270static int bfin_mdiobus_read(struct mii_bus *bus, int phy_addr, int regnum)
271{
272 int ret;
273
274 ret = bfin_mdio_poll();
275 if (ret)
276 return ret;
277
278 /* read mode */
279 bfin_write_EMAC_STAADD(SET_PHYAD((u16) phy_addr) |
280 SET_REGAD((u16) regnum) |
281 STABUSY);
282
283 ret = bfin_mdio_poll();
284 if (ret)
285 return ret;
286
287 return (int) bfin_read_EMAC_STADAT();
288}
289
290/* Write an off-chip register in a PHY through the MDC/MDIO port */
291static int bfin_mdiobus_write(struct mii_bus *bus, int phy_addr, int regnum,
292 u16 value)
293{
294 int ret;
295
296 ret = bfin_mdio_poll();
297 if (ret)
298 return ret;
299
300 bfin_write_EMAC_STADAT((u32) value);
301
302 /* write mode */
303 bfin_write_EMAC_STAADD(SET_PHYAD((u16) phy_addr) |
304 SET_REGAD((u16) regnum) |
305 STAOP |
306 STABUSY);
307
308 return bfin_mdio_poll();
309}
310
311static int bfin_mdiobus_reset(struct mii_bus *bus)
312{
313 return 0;
314}
315
316static void bfin_mac_adjust_link(struct net_device *dev)
317{
318 struct bfin_mac_local *lp = netdev_priv(dev);
319 struct phy_device *phydev = lp->phydev;
320 unsigned long flags;
321 int new_state = 0;
322
323 spin_lock_irqsave(&lp->lock, flags);
324 if (phydev->link) {
325 /* Now we make sure that we can be in full duplex mode.
326 * If not, we operate in half-duplex mode. */
327 if (phydev->duplex != lp->old_duplex) {
328 u32 opmode = bfin_read_EMAC_OPMODE();
329 new_state = 1;
330
331 if (phydev->duplex)
332 opmode |= FDMODE;
333 else
334 opmode &= ~(FDMODE);
335
336 bfin_write_EMAC_OPMODE(opmode);
337 lp->old_duplex = phydev->duplex;
338 }
339
340 if (phydev->speed != lp->old_speed) {
341 if (phydev->interface == PHY_INTERFACE_MODE_RMII) {
342 u32 opmode = bfin_read_EMAC_OPMODE();
343 switch (phydev->speed) {
344 case 10:
345 opmode |= RMII_10;
346 break;
347 case 100:
348 opmode &= ~RMII_10;
349 break;
350 default:
351 netdev_warn(dev,
352 "Ack! Speed (%d) is not 10/100!\n",
353 phydev->speed);
354 break;
355 }
356 bfin_write_EMAC_OPMODE(opmode);
357 }
358
359 new_state = 1;
360 lp->old_speed = phydev->speed;
361 }
362
363 if (!lp->old_link) {
364 new_state = 1;
365 lp->old_link = 1;
366 }
367 } else if (lp->old_link) {
368 new_state = 1;
369 lp->old_link = 0;
370 lp->old_speed = 0;
371 lp->old_duplex = -1;
372 }
373
374 if (new_state) {
375 u32 opmode = bfin_read_EMAC_OPMODE();
376 phy_print_status(phydev);
377 pr_debug("EMAC_OPMODE = 0x%08x\n", opmode);
378 }
379
380 spin_unlock_irqrestore(&lp->lock, flags);
381}
382
383/* MDC = 2.5 MHz */
384#define MDC_CLK 2500000
385
386static int mii_probe(struct net_device *dev, int phy_mode)
387{
388 struct bfin_mac_local *lp = netdev_priv(dev);
389 struct phy_device *phydev = NULL;
390 unsigned short sysctl;
391 int i;
392 u32 sclk, mdc_div;
393
394 /* Enable PHY output early */
395 if (!(bfin_read_VR_CTL() & CLKBUFOE))
396 bfin_write_VR_CTL(bfin_read_VR_CTL() | CLKBUFOE);
397
398 sclk = get_sclk();
399 mdc_div = ((sclk / MDC_CLK) / 2) - 1;
400
401 sysctl = bfin_read_EMAC_SYSCTL();
402 sysctl = (sysctl & ~MDCDIV) | SET_MDCDIV(mdc_div);
403 bfin_write_EMAC_SYSCTL(sysctl);
404
405 /* search for connected PHY device */
406 for (i = 0; i < PHY_MAX_ADDR; ++i) {
407 struct phy_device *const tmp_phydev = lp->mii_bus->phy_map[i];
408
409 if (!tmp_phydev)
410 continue; /* no PHY here... */
411
412 phydev = tmp_phydev;
413 break; /* found it */
414 }
415
416 /* now we are supposed to have a proper phydev, to attach to... */
417 if (!phydev) {
418 netdev_err(dev, "no phy device found\n");
419 return -ENODEV;
420 }
421
422 if (phy_mode != PHY_INTERFACE_MODE_RMII &&
423 phy_mode != PHY_INTERFACE_MODE_MII) {
424 netdev_err(dev, "invalid phy interface mode\n");
425 return -EINVAL;
426 }
427
428 phydev = phy_connect(dev, dev_name(&phydev->dev), &bfin_mac_adjust_link,
429 0, phy_mode);
430
431 if (IS_ERR(phydev)) {
432 netdev_err(dev, "could not attach PHY\n");
433 return PTR_ERR(phydev);
434 }
435
436 /* mask with MAC supported features */
437 phydev->supported &= (SUPPORTED_10baseT_Half
438 | SUPPORTED_10baseT_Full
439 | SUPPORTED_100baseT_Half
440 | SUPPORTED_100baseT_Full
441 | SUPPORTED_Autoneg
442 | SUPPORTED_Pause | SUPPORTED_Asym_Pause
443 | SUPPORTED_MII
444 | SUPPORTED_TP);
445
446 phydev->advertising = phydev->supported;
447
448 lp->old_link = 0;
449 lp->old_speed = 0;
450 lp->old_duplex = -1;
451 lp->phydev = phydev;
452
453 pr_info("attached PHY driver [%s] "
454 "(mii_bus:phy_addr=%s, irq=%d, mdc_clk=%dHz(mdc_div=%d)@sclk=%dMHz)\n",
455 phydev->drv->name, dev_name(&phydev->dev), phydev->irq,
456 MDC_CLK, mdc_div, sclk/1000000);
457
458 return 0;
459}
460
461/*
462 * Ethtool support
463 */
464
465/*
466 * interrupt routine for magic packet wakeup
467 */
468static irqreturn_t bfin_mac_wake_interrupt(int irq, void *dev_id)
469{
470 return IRQ_HANDLED;
471}
472
473static int
474bfin_mac_ethtool_getsettings(struct net_device *dev, struct ethtool_cmd *cmd)
475{
476 struct bfin_mac_local *lp = netdev_priv(dev);
477
478 if (lp->phydev)
479 return phy_ethtool_gset(lp->phydev, cmd);
480
481 return -EINVAL;
482}
483
484static int
485bfin_mac_ethtool_setsettings(struct net_device *dev, struct ethtool_cmd *cmd)
486{
487 struct bfin_mac_local *lp = netdev_priv(dev);
488
489 if (!capable(CAP_NET_ADMIN))
490 return -EPERM;
491
492 if (lp->phydev)
493 return phy_ethtool_sset(lp->phydev, cmd);
494
495 return -EINVAL;
496}
497
498static void bfin_mac_ethtool_getdrvinfo(struct net_device *dev,
499 struct ethtool_drvinfo *info)
500{
501 strcpy(info->driver, KBUILD_MODNAME);
502 strcpy(info->version, DRV_VERSION);
503 strcpy(info->fw_version, "N/A");
504 strcpy(info->bus_info, dev_name(&dev->dev));
505}
506
507static void bfin_mac_ethtool_getwol(struct net_device *dev,
508 struct ethtool_wolinfo *wolinfo)
509{
510 struct bfin_mac_local *lp = netdev_priv(dev);
511
512 wolinfo->supported = WAKE_MAGIC;
513 wolinfo->wolopts = lp->wol;
514}
515
516static int bfin_mac_ethtool_setwol(struct net_device *dev,
517 struct ethtool_wolinfo *wolinfo)
518{
519 struct bfin_mac_local *lp = netdev_priv(dev);
520 int rc;
521
522 if (wolinfo->wolopts & (WAKE_MAGICSECURE |
523 WAKE_UCAST |
524 WAKE_MCAST |
525 WAKE_BCAST |
526 WAKE_ARP))
527 return -EOPNOTSUPP;
528
529 lp->wol = wolinfo->wolopts;
530
531 if (lp->wol && !lp->irq_wake_requested) {
532 /* register wake irq handler */
533 rc = request_irq(IRQ_MAC_WAKEDET, bfin_mac_wake_interrupt,
534 IRQF_DISABLED, "EMAC_WAKE", dev);
535 if (rc)
536 return rc;
537 lp->irq_wake_requested = true;
538 }
539
540 if (!lp->wol && lp->irq_wake_requested) {
541 free_irq(IRQ_MAC_WAKEDET, dev);
542 lp->irq_wake_requested = false;
543 }
544
545 /* Make sure the PHY driver doesn't suspend */
546 device_init_wakeup(&dev->dev, lp->wol);
547
548 return 0;
549}
550
551static const struct ethtool_ops bfin_mac_ethtool_ops = {
552 .get_settings = bfin_mac_ethtool_getsettings,
553 .set_settings = bfin_mac_ethtool_setsettings,
554 .get_link = ethtool_op_get_link,
555 .get_drvinfo = bfin_mac_ethtool_getdrvinfo,
556 .get_wol = bfin_mac_ethtool_getwol,
557 .set_wol = bfin_mac_ethtool_setwol,
558};
559
560/**************************************************************************/
561static void setup_system_regs(struct net_device *dev)
562{
563 struct bfin_mac_local *lp = netdev_priv(dev);
564 int i;
565 unsigned short sysctl;
566
567 /*
568 * Odd word alignment for Receive Frame DMA word
569 * Configure checksum support and rcve frame word alignment
570 */
571 sysctl = bfin_read_EMAC_SYSCTL();
572 /*
573 * check if interrupt is requested for any PHY,
574 * enable PHY interrupt only if needed
575 */
576 for (i = 0; i < PHY_MAX_ADDR; ++i)
577 if (lp->mii_bus->irq[i] != PHY_POLL)
578 break;
579 if (i < PHY_MAX_ADDR)
580 sysctl |= PHYIE;
581 sysctl |= RXDWA;
582#if defined(BFIN_MAC_CSUM_OFFLOAD)
583 sysctl |= RXCKS;
584#else
585 sysctl &= ~RXCKS;
586#endif
587 bfin_write_EMAC_SYSCTL(sysctl);
588
589 bfin_write_EMAC_MMC_CTL(RSTC | CROLL);
590
591 /* Set vlan regs to let 1522 bytes long packets pass through */
592 bfin_write_EMAC_VLAN1(lp->vlan1_mask);
593 bfin_write_EMAC_VLAN2(lp->vlan2_mask);
594
595 /* Initialize the TX DMA channel registers */
596 bfin_write_DMA2_X_COUNT(0);
597 bfin_write_DMA2_X_MODIFY(4);
598 bfin_write_DMA2_Y_COUNT(0);
599 bfin_write_DMA2_Y_MODIFY(0);
600
601 /* Initialize the RX DMA channel registers */
602 bfin_write_DMA1_X_COUNT(0);
603 bfin_write_DMA1_X_MODIFY(4);
604 bfin_write_DMA1_Y_COUNT(0);
605 bfin_write_DMA1_Y_MODIFY(0);
606}
607
608static void setup_mac_addr(u8 *mac_addr)
609{
610 u32 addr_low = le32_to_cpu(*(__le32 *) & mac_addr[0]);
611 u16 addr_hi = le16_to_cpu(*(__le16 *) & mac_addr[4]);
612
613 /* this depends on a little-endian machine */
614 bfin_write_EMAC_ADDRLO(addr_low);
615 bfin_write_EMAC_ADDRHI(addr_hi);
616}
617
618static int bfin_mac_set_mac_address(struct net_device *dev, void *p)
619{
620 struct sockaddr *addr = p;
621 if (netif_running(dev))
622 return -EBUSY;
623 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
624 setup_mac_addr(dev->dev_addr);
625 return 0;
626}
627
628#ifdef CONFIG_BFIN_MAC_USE_HWSTAMP
629#define bfin_mac_hwtstamp_is_none(cfg) ((cfg) == HWTSTAMP_FILTER_NONE)
630
631static int bfin_mac_hwtstamp_ioctl(struct net_device *netdev,
632 struct ifreq *ifr, int cmd)
633{
634 struct hwtstamp_config config;
635 struct bfin_mac_local *lp = netdev_priv(netdev);
636 u16 ptpctl;
637 u32 ptpfv1, ptpfv2, ptpfv3, ptpfoff;
638
639 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
640 return -EFAULT;
641
642 pr_debug("%s config flag:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
643 __func__, config.flags, config.tx_type, config.rx_filter);
644
645 /* reserved for future extensions */
646 if (config.flags)
647 return -EINVAL;
648
649 if ((config.tx_type != HWTSTAMP_TX_OFF) &&
650 (config.tx_type != HWTSTAMP_TX_ON))
651 return -ERANGE;
652
653 ptpctl = bfin_read_EMAC_PTP_CTL();
654
655 switch (config.rx_filter) {
656 case HWTSTAMP_FILTER_NONE:
657 /*
658 * Dont allow any timestamping
659 */
660 ptpfv3 = 0xFFFFFFFF;
661 bfin_write_EMAC_PTP_FV3(ptpfv3);
662 break;
663 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
664 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
665 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
666 /*
667 * Clear the five comparison mask bits (bits[12:8]) in EMAC_PTP_CTL)
668 * to enable all the field matches.
669 */
670 ptpctl &= ~0x1F00;
671 bfin_write_EMAC_PTP_CTL(ptpctl);
672 /*
673 * Keep the default values of the EMAC_PTP_FOFF register.
674 */
675 ptpfoff = 0x4A24170C;
676 bfin_write_EMAC_PTP_FOFF(ptpfoff);
677 /*
678 * Keep the default values of the EMAC_PTP_FV1 and EMAC_PTP_FV2
679 * registers.
680 */
681 ptpfv1 = 0x11040800;
682 bfin_write_EMAC_PTP_FV1(ptpfv1);
683 ptpfv2 = 0x0140013F;
684 bfin_write_EMAC_PTP_FV2(ptpfv2);
685 /*
686 * The default value (0xFFFC) allows the timestamping of both
687 * received Sync messages and Delay_Req messages.
688 */
689 ptpfv3 = 0xFFFFFFFC;
690 bfin_write_EMAC_PTP_FV3(ptpfv3);
691
692 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
693 break;
694 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
695 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
696 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
697 /* Clear all five comparison mask bits (bits[12:8]) in the
698 * EMAC_PTP_CTL register to enable all the field matches.
699 */
700 ptpctl &= ~0x1F00;
701 bfin_write_EMAC_PTP_CTL(ptpctl);
702 /*
703 * Keep the default values of the EMAC_PTP_FOFF register, except set
704 * the PTPCOF field to 0x2A.
705 */
706 ptpfoff = 0x2A24170C;
707 bfin_write_EMAC_PTP_FOFF(ptpfoff);
708 /*
709 * Keep the default values of the EMAC_PTP_FV1 and EMAC_PTP_FV2
710 * registers.
711 */
712 ptpfv1 = 0x11040800;
713 bfin_write_EMAC_PTP_FV1(ptpfv1);
714 ptpfv2 = 0x0140013F;
715 bfin_write_EMAC_PTP_FV2(ptpfv2);
716 /*
717 * To allow the timestamping of Pdelay_Req and Pdelay_Resp, set
718 * the value to 0xFFF0.
719 */
720 ptpfv3 = 0xFFFFFFF0;
721 bfin_write_EMAC_PTP_FV3(ptpfv3);
722
723 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
724 break;
725 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
726 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
727 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
728 /*
729 * Clear bits 8 and 12 of the EMAC_PTP_CTL register to enable only the
730 * EFTM and PTPCM field comparison.
731 */
732 ptpctl &= ~0x1100;
733 bfin_write_EMAC_PTP_CTL(ptpctl);
734 /*
735 * Keep the default values of all the fields of the EMAC_PTP_FOFF
736 * register, except set the PTPCOF field to 0x0E.
737 */
738 ptpfoff = 0x0E24170C;
739 bfin_write_EMAC_PTP_FOFF(ptpfoff);
740 /*
741 * Program bits [15:0] of the EMAC_PTP_FV1 register to 0x88F7, which
742 * corresponds to PTP messages on the MAC layer.
743 */
744 ptpfv1 = 0x110488F7;
745 bfin_write_EMAC_PTP_FV1(ptpfv1);
746 ptpfv2 = 0x0140013F;
747 bfin_write_EMAC_PTP_FV2(ptpfv2);
748 /*
749 * To allow the timestamping of Pdelay_Req and Pdelay_Resp
750 * messages, set the value to 0xFFF0.
751 */
752 ptpfv3 = 0xFFFFFFF0;
753 bfin_write_EMAC_PTP_FV3(ptpfv3);
754
755 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
756 break;
757 default:
758 return -ERANGE;
759 }
760
761 if (config.tx_type == HWTSTAMP_TX_OFF &&
762 bfin_mac_hwtstamp_is_none(config.rx_filter)) {
763 ptpctl &= ~PTP_EN;
764 bfin_write_EMAC_PTP_CTL(ptpctl);
765
766 SSYNC();
767 } else {
768 ptpctl |= PTP_EN;
769 bfin_write_EMAC_PTP_CTL(ptpctl);
770
771 /*
772 * clear any existing timestamp
773 */
774 bfin_read_EMAC_PTP_RXSNAPLO();
775 bfin_read_EMAC_PTP_RXSNAPHI();
776
777 bfin_read_EMAC_PTP_TXSNAPLO();
778 bfin_read_EMAC_PTP_TXSNAPHI();
779
780 /*
781 * Set registers so that rollover occurs soon to test this.
782 */
783 bfin_write_EMAC_PTP_TIMELO(0x00000000);
784 bfin_write_EMAC_PTP_TIMEHI(0xFF800000);
785
786 SSYNC();
787
788 lp->compare.last_update = 0;
789 timecounter_init(&lp->clock,
790 &lp->cycles,
791 ktime_to_ns(ktime_get_real()));
792 timecompare_update(&lp->compare, 0);
793 }
794
795 lp->stamp_cfg = config;
796 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
797 -EFAULT : 0;
798}
799
800static void bfin_dump_hwtamp(char *s, ktime_t *hw, ktime_t *ts, struct timecompare *cmp)
801{
802 ktime_t sys = ktime_get_real();
803
804 pr_debug("%s %s hardware:%d,%d transform system:%d,%d system:%d,%d, cmp:%lld, %lld\n",
805 __func__, s, hw->tv.sec, hw->tv.nsec, ts->tv.sec, ts->tv.nsec, sys.tv.sec,
806 sys.tv.nsec, cmp->offset, cmp->skew);
807}
808
809static void bfin_tx_hwtstamp(struct net_device *netdev, struct sk_buff *skb)
810{
811 struct bfin_mac_local *lp = netdev_priv(netdev);
812
813 if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
814 int timeout_cnt = MAX_TIMEOUT_CNT;
815
816 /* When doing time stamping, keep the connection to the socket
817 * a while longer
818 */
819 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
820
821 /*
822 * The timestamping is done at the EMAC module's MII/RMII interface
823 * when the module sees the Start of Frame of an event message packet. This
824 * interface is the closest possible place to the physical Ethernet transmission
825 * medium, providing the best timing accuracy.
826 */
827 while ((!(bfin_read_EMAC_PTP_ISTAT() & TXTL)) && (--timeout_cnt))
828 udelay(1);
829 if (timeout_cnt == 0)
830 netdev_err(netdev, "timestamp the TX packet failed\n");
831 else {
832 struct skb_shared_hwtstamps shhwtstamps;
833 u64 ns;
834 u64 regval;
835
836 regval = bfin_read_EMAC_PTP_TXSNAPLO();
837 regval |= (u64)bfin_read_EMAC_PTP_TXSNAPHI() << 32;
838 memset(&shhwtstamps, 0, sizeof(shhwtstamps));
839 ns = timecounter_cyc2time(&lp->clock,
840 regval);
841 timecompare_update(&lp->compare, ns);
842 shhwtstamps.hwtstamp = ns_to_ktime(ns);
843 shhwtstamps.syststamp =
844 timecompare_transform(&lp->compare, ns);
845 skb_tstamp_tx(skb, &shhwtstamps);
846
847 bfin_dump_hwtamp("TX", &shhwtstamps.hwtstamp, &shhwtstamps.syststamp, &lp->compare);
848 }
849 }
850}
851
852static void bfin_rx_hwtstamp(struct net_device *netdev, struct sk_buff *skb)
853{
854 struct bfin_mac_local *lp = netdev_priv(netdev);
855 u32 valid;
856 u64 regval, ns;
857 struct skb_shared_hwtstamps *shhwtstamps;
858
859 if (bfin_mac_hwtstamp_is_none(lp->stamp_cfg.rx_filter))
860 return;
861
862 valid = bfin_read_EMAC_PTP_ISTAT() & RXEL;
863 if (!valid)
864 return;
865
866 shhwtstamps = skb_hwtstamps(skb);
867
868 regval = bfin_read_EMAC_PTP_RXSNAPLO();
869 regval |= (u64)bfin_read_EMAC_PTP_RXSNAPHI() << 32;
870 ns = timecounter_cyc2time(&lp->clock, regval);
871 timecompare_update(&lp->compare, ns);
872 memset(shhwtstamps, 0, sizeof(*shhwtstamps));
873 shhwtstamps->hwtstamp = ns_to_ktime(ns);
874 shhwtstamps->syststamp = timecompare_transform(&lp->compare, ns);
875
876 bfin_dump_hwtamp("RX", &shhwtstamps->hwtstamp, &shhwtstamps->syststamp, &lp->compare);
877}
878
879/*
880 * bfin_read_clock - read raw cycle counter (to be used by time counter)
881 */
882static cycle_t bfin_read_clock(const struct cyclecounter *tc)
883{
884 u64 stamp;
885
886 stamp = bfin_read_EMAC_PTP_TIMELO();
887 stamp |= (u64)bfin_read_EMAC_PTP_TIMEHI() << 32ULL;
888
889 return stamp;
890}
891
892#define PTP_CLK 25000000
893
894static void bfin_mac_hwtstamp_init(struct net_device *netdev)
895{
896 struct bfin_mac_local *lp = netdev_priv(netdev);
897 u64 append;
898
899 /* Initialize hardware timer */
900 append = PTP_CLK * (1ULL << 32);
901 do_div(append, get_sclk());
902 bfin_write_EMAC_PTP_ADDEND((u32)append);
903
904 memset(&lp->cycles, 0, sizeof(lp->cycles));
905 lp->cycles.read = bfin_read_clock;
906 lp->cycles.mask = CLOCKSOURCE_MASK(64);
907 lp->cycles.mult = 1000000000 / PTP_CLK;
908 lp->cycles.shift = 0;
909
910 /* Synchronize our NIC clock against system wall clock */
911 memset(&lp->compare, 0, sizeof(lp->compare));
912 lp->compare.source = &lp->clock;
913 lp->compare.target = ktime_get_real;
914 lp->compare.num_samples = 10;
915
916 /* Initialize hwstamp config */
917 lp->stamp_cfg.rx_filter = HWTSTAMP_FILTER_NONE;
918 lp->stamp_cfg.tx_type = HWTSTAMP_TX_OFF;
919}
920
921#else
922# define bfin_mac_hwtstamp_is_none(cfg) 0
923# define bfin_mac_hwtstamp_init(dev)
924# define bfin_mac_hwtstamp_ioctl(dev, ifr, cmd) (-EOPNOTSUPP)
925# define bfin_rx_hwtstamp(dev, skb)
926# define bfin_tx_hwtstamp(dev, skb)
927#endif
928
929static inline void _tx_reclaim_skb(void)
930{
931 do {
932 tx_list_head->desc_a.config &= ~DMAEN;
933 tx_list_head->status.status_word = 0;
934 if (tx_list_head->skb) {
935 dev_kfree_skb(tx_list_head->skb);
936 tx_list_head->skb = NULL;
937 }
938 tx_list_head = tx_list_head->next;
939
940 } while (tx_list_head->status.status_word != 0);
941}
942
943static void tx_reclaim_skb(struct bfin_mac_local *lp)
944{
945 int timeout_cnt = MAX_TIMEOUT_CNT;
946
947 if (tx_list_head->status.status_word != 0)
948 _tx_reclaim_skb();
949
950 if (current_tx_ptr->next == tx_list_head) {
951 while (tx_list_head->status.status_word == 0) {
952 /* slow down polling to avoid too many queue stop. */
953 udelay(10);
954 /* reclaim skb if DMA is not running. */
955 if (!(bfin_read_DMA2_IRQ_STATUS() & DMA_RUN))
956 break;
957 if (timeout_cnt-- < 0)
958 break;
959 }
960
961 if (timeout_cnt >= 0)
962 _tx_reclaim_skb();
963 else
964 netif_stop_queue(lp->ndev);
965 }
966
967 if (current_tx_ptr->next != tx_list_head &&
968 netif_queue_stopped(lp->ndev))
969 netif_wake_queue(lp->ndev);
970
971 if (tx_list_head != current_tx_ptr) {
972 /* shorten the timer interval if tx queue is stopped */
973 if (netif_queue_stopped(lp->ndev))
974 lp->tx_reclaim_timer.expires =
975 jiffies + (TX_RECLAIM_JIFFIES >> 4);
976 else
977 lp->tx_reclaim_timer.expires =
978 jiffies + TX_RECLAIM_JIFFIES;
979
980 mod_timer(&lp->tx_reclaim_timer,
981 lp->tx_reclaim_timer.expires);
982 }
983
984 return;
985}
986
987static void tx_reclaim_skb_timeout(unsigned long lp)
988{
989 tx_reclaim_skb((struct bfin_mac_local *)lp);
990}
991
992static int bfin_mac_hard_start_xmit(struct sk_buff *skb,
993 struct net_device *dev)
994{
995 struct bfin_mac_local *lp = netdev_priv(dev);
996 u16 *data;
997 u32 data_align = (unsigned long)(skb->data) & 0x3;
998
999 current_tx_ptr->skb = skb;
1000
1001 if (data_align == 0x2) {
1002 /* move skb->data to current_tx_ptr payload */
1003 data = (u16 *)(skb->data) - 1;
1004 *data = (u16)(skb->len);
1005 /*
1006 * When transmitting an Ethernet packet, the PTP_TSYNC module requires
1007 * a DMA_Length_Word field associated with the packet. The lower 12 bits
1008 * of this field are the length of the packet payload in bytes and the higher
1009 * 4 bits are the timestamping enable field.
1010 */
1011 if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)
1012 *data |= 0x1000;
1013
1014 current_tx_ptr->desc_a.start_addr = (u32)data;
1015 /* this is important! */
1016 blackfin_dcache_flush_range((u32)data,
1017 (u32)((u8 *)data + skb->len + 4));
1018 } else {
1019 *((u16 *)(current_tx_ptr->packet)) = (u16)(skb->len);
1020 /* enable timestamping for the sent packet */
1021 if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)
1022 *((u16 *)(current_tx_ptr->packet)) |= 0x1000;
1023 memcpy((u8 *)(current_tx_ptr->packet + 2), skb->data,
1024 skb->len);
1025 current_tx_ptr->desc_a.start_addr =
1026 (u32)current_tx_ptr->packet;
1027 blackfin_dcache_flush_range(
1028 (u32)current_tx_ptr->packet,
1029 (u32)(current_tx_ptr->packet + skb->len + 2));
1030 }
1031
1032 /* make sure the internal data buffers in the core are drained
1033 * so that the DMA descriptors are completely written when the
1034 * DMA engine goes to fetch them below
1035 */
1036 SSYNC();
1037
1038 /* always clear status buffer before start tx dma */
1039 current_tx_ptr->status.status_word = 0;
1040
1041 /* enable this packet's dma */
1042 current_tx_ptr->desc_a.config |= DMAEN;
1043
1044 /* tx dma is running, just return */
1045 if (bfin_read_DMA2_IRQ_STATUS() & DMA_RUN)
1046 goto out;
1047
1048 /* tx dma is not running */
1049 bfin_write_DMA2_NEXT_DESC_PTR(&(current_tx_ptr->desc_a));
1050 /* dma enabled, read from memory, size is 6 */
1051 bfin_write_DMA2_CONFIG(current_tx_ptr->desc_a.config);
1052 /* Turn on the EMAC tx */
1053 bfin_write_EMAC_OPMODE(bfin_read_EMAC_OPMODE() | TE);
1054
1055out:
1056 bfin_tx_hwtstamp(dev, skb);
1057
1058 current_tx_ptr = current_tx_ptr->next;
1059 dev->stats.tx_packets++;
1060 dev->stats.tx_bytes += (skb->len);
1061
1062 tx_reclaim_skb(lp);
1063
1064 return NETDEV_TX_OK;
1065}
1066
1067#define IP_HEADER_OFF 0
1068#define RX_ERROR_MASK (RX_LONG | RX_ALIGN | RX_CRC | RX_LEN | \
1069 RX_FRAG | RX_ADDR | RX_DMAO | RX_PHY | RX_LATE | RX_RANGE)
1070
1071static void bfin_mac_rx(struct net_device *dev)
1072{
1073 struct sk_buff *skb, *new_skb;
1074 unsigned short len;
1075 struct bfin_mac_local *lp __maybe_unused = netdev_priv(dev);
1076#if defined(BFIN_MAC_CSUM_OFFLOAD)
1077 unsigned int i;
1078 unsigned char fcs[ETH_FCS_LEN + 1];
1079#endif
1080
1081 /* check if frame status word reports an error condition
1082 * we which case we simply drop the packet
1083 */
1084 if (current_rx_ptr->status.status_word & RX_ERROR_MASK) {
1085 netdev_notice(dev, "rx: receive error - packet dropped\n");
1086 dev->stats.rx_dropped++;
1087 goto out;
1088 }
1089
1090 /* allocate a new skb for next time receive */
1091 skb = current_rx_ptr->skb;
1092
1093 new_skb = dev_alloc_skb(PKT_BUF_SZ + NET_IP_ALIGN);
1094 if (!new_skb) {
1095 netdev_notice(dev, "rx: low on mem - packet dropped\n");
1096 dev->stats.rx_dropped++;
1097 goto out;
1098 }
1099 /* reserve 2 bytes for RXDWA padding */
1100 skb_reserve(new_skb, NET_IP_ALIGN);
1101 /* Invidate the data cache of skb->data range when it is write back
1102 * cache. It will prevent overwritting the new data from DMA
1103 */
1104 blackfin_dcache_invalidate_range((unsigned long)new_skb->head,
1105 (unsigned long)new_skb->end);
1106
1107 current_rx_ptr->skb = new_skb;
1108 current_rx_ptr->desc_a.start_addr = (unsigned long)new_skb->data - 2;
1109
1110 len = (unsigned short)((current_rx_ptr->status.status_word) & RX_FRLEN);
1111 /* Deduce Ethernet FCS length from Ethernet payload length */
1112 len -= ETH_FCS_LEN;
1113 skb_put(skb, len);
1114
1115 skb->protocol = eth_type_trans(skb, dev);
1116
1117 bfin_rx_hwtstamp(dev, skb);
1118
1119#if defined(BFIN_MAC_CSUM_OFFLOAD)
1120 /* Checksum offloading only works for IPv4 packets with the standard IP header
1121 * length of 20 bytes, because the blackfin MAC checksum calculation is
1122 * based on that assumption. We must NOT use the calculated checksum if our
1123 * IP version or header break that assumption.
1124 */
1125 if (skb->data[IP_HEADER_OFF] == 0x45) {
1126 skb->csum = current_rx_ptr->status.ip_payload_csum;
1127 /*
1128 * Deduce Ethernet FCS from hardware generated IP payload checksum.
1129 * IP checksum is based on 16-bit one's complement algorithm.
1130 * To deduce a value from checksum is equal to add its inversion.
1131 * If the IP payload len is odd, the inversed FCS should also
1132 * begin from odd address and leave first byte zero.
1133 */
1134 if (skb->len % 2) {
1135 fcs[0] = 0;
1136 for (i = 0; i < ETH_FCS_LEN; i++)
1137 fcs[i + 1] = ~skb->data[skb->len + i];
1138 skb->csum = csum_partial(fcs, ETH_FCS_LEN + 1, skb->csum);
1139 } else {
1140 for (i = 0; i < ETH_FCS_LEN; i++)
1141 fcs[i] = ~skb->data[skb->len + i];
1142 skb->csum = csum_partial(fcs, ETH_FCS_LEN, skb->csum);
1143 }
1144 skb->ip_summed = CHECKSUM_COMPLETE;
1145 }
1146#endif
1147
1148 netif_rx(skb);
1149 dev->stats.rx_packets++;
1150 dev->stats.rx_bytes += len;
1151out:
1152 current_rx_ptr->status.status_word = 0x00000000;
1153 current_rx_ptr = current_rx_ptr->next;
1154}
1155
1156/* interrupt routine to handle rx and error signal */
1157static irqreturn_t bfin_mac_interrupt(int irq, void *dev_id)
1158{
1159 struct net_device *dev = dev_id;
1160 int number = 0;
1161
1162get_one_packet:
1163 if (current_rx_ptr->status.status_word == 0) {
1164 /* no more new packet received */
1165 if (number == 0) {
1166 if (current_rx_ptr->next->status.status_word != 0) {
1167 current_rx_ptr = current_rx_ptr->next;
1168 goto real_rx;
1169 }
1170 }
1171 bfin_write_DMA1_IRQ_STATUS(bfin_read_DMA1_IRQ_STATUS() |
1172 DMA_DONE | DMA_ERR);
1173 return IRQ_HANDLED;
1174 }
1175
1176real_rx:
1177 bfin_mac_rx(dev);
1178 number++;
1179 goto get_one_packet;
1180}
1181
1182#ifdef CONFIG_NET_POLL_CONTROLLER
1183static void bfin_mac_poll(struct net_device *dev)
1184{
1185 struct bfin_mac_local *lp = netdev_priv(dev);
1186
1187 disable_irq(IRQ_MAC_RX);
1188 bfin_mac_interrupt(IRQ_MAC_RX, dev);
1189 tx_reclaim_skb(lp);
1190 enable_irq(IRQ_MAC_RX);
1191}
1192#endif /* CONFIG_NET_POLL_CONTROLLER */
1193
1194static void bfin_mac_disable(void)
1195{
1196 unsigned int opmode;
1197
1198 opmode = bfin_read_EMAC_OPMODE();
1199 opmode &= (~RE);
1200 opmode &= (~TE);
1201 /* Turn off the EMAC */
1202 bfin_write_EMAC_OPMODE(opmode);
1203}
1204
1205/*
1206 * Enable Interrupts, Receive, and Transmit
1207 */
1208static int bfin_mac_enable(struct phy_device *phydev)
1209{
1210 int ret;
1211 u32 opmode;
1212
1213 pr_debug("%s\n", __func__);
1214
1215 /* Set RX DMA */
1216 bfin_write_DMA1_NEXT_DESC_PTR(&(rx_list_head->desc_a));
1217 bfin_write_DMA1_CONFIG(rx_list_head->desc_a.config);
1218
1219 /* Wait MII done */
1220 ret = bfin_mdio_poll();
1221 if (ret)
1222 return ret;
1223
1224 /* We enable only RX here */
1225 /* ASTP : Enable Automatic Pad Stripping
1226 PR : Promiscuous Mode for test
1227 PSF : Receive frames with total length less than 64 bytes.
1228 FDMODE : Full Duplex Mode
1229 LB : Internal Loopback for test
1230 RE : Receiver Enable */
1231 opmode = bfin_read_EMAC_OPMODE();
1232 if (opmode & FDMODE)
1233 opmode |= PSF;
1234 else
1235 opmode |= DRO | DC | PSF;
1236 opmode |= RE;
1237
1238 if (phydev->interface == PHY_INTERFACE_MODE_RMII) {
1239 opmode |= RMII; /* For Now only 100MBit are supported */
1240#if defined(CONFIG_BF537) || defined(CONFIG_BF536)
1241 if (__SILICON_REVISION__ < 3) {
1242 /*
1243 * This isn't publicly documented (fun times!), but in
1244 * silicon <=0.2, the RX and TX pins are clocked together.
1245 * So in order to recv, we must enable the transmit side
1246 * as well. This will cause a spurious TX interrupt too,
1247 * but we can easily consume that.
1248 */
1249 opmode |= TE;
1250 }
1251#endif
1252 }
1253
1254 /* Turn on the EMAC rx */
1255 bfin_write_EMAC_OPMODE(opmode);
1256
1257 return 0;
1258}
1259
1260/* Our watchdog timed out. Called by the networking layer */
1261static void bfin_mac_timeout(struct net_device *dev)
1262{
1263 struct bfin_mac_local *lp = netdev_priv(dev);
1264
1265 pr_debug("%s: %s\n", dev->name, __func__);
1266
1267 bfin_mac_disable();
1268
1269 del_timer(&lp->tx_reclaim_timer);
1270
1271 /* reset tx queue and free skb */
1272 while (tx_list_head != current_tx_ptr) {
1273 tx_list_head->desc_a.config &= ~DMAEN;
1274 tx_list_head->status.status_word = 0;
1275 if (tx_list_head->skb) {
1276 dev_kfree_skb(tx_list_head->skb);
1277 tx_list_head->skb = NULL;
1278 }
1279 tx_list_head = tx_list_head->next;
1280 }
1281
1282 if (netif_queue_stopped(lp->ndev))
1283 netif_wake_queue(lp->ndev);
1284
1285 bfin_mac_enable(lp->phydev);
1286
1287 /* We can accept TX packets again */
1288 dev->trans_start = jiffies; /* prevent tx timeout */
1289 netif_wake_queue(dev);
1290}
1291
1292static void bfin_mac_multicast_hash(struct net_device *dev)
1293{
1294 u32 emac_hashhi, emac_hashlo;
1295 struct netdev_hw_addr *ha;
1296 u32 crc;
1297
1298 emac_hashhi = emac_hashlo = 0;
1299
1300 netdev_for_each_mc_addr(ha, dev) {
1301 crc = ether_crc(ETH_ALEN, ha->addr);
1302 crc >>= 26;
1303
1304 if (crc & 0x20)
1305 emac_hashhi |= 1 << (crc & 0x1f);
1306 else
1307 emac_hashlo |= 1 << (crc & 0x1f);
1308 }
1309
1310 bfin_write_EMAC_HASHHI(emac_hashhi);
1311 bfin_write_EMAC_HASHLO(emac_hashlo);
1312}
1313
1314/*
1315 * This routine will, depending on the values passed to it,
1316 * either make it accept multicast packets, go into
1317 * promiscuous mode (for TCPDUMP and cousins) or accept
1318 * a select set of multicast packets
1319 */
1320static void bfin_mac_set_multicast_list(struct net_device *dev)
1321{
1322 u32 sysctl;
1323
1324 if (dev->flags & IFF_PROMISC) {
1325 netdev_info(dev, "set promisc mode\n");
1326 sysctl = bfin_read_EMAC_OPMODE();
1327 sysctl |= PR;
1328 bfin_write_EMAC_OPMODE(sysctl);
1329 } else if (dev->flags & IFF_ALLMULTI) {
1330 /* accept all multicast */
1331 sysctl = bfin_read_EMAC_OPMODE();
1332 sysctl |= PAM;
1333 bfin_write_EMAC_OPMODE(sysctl);
1334 } else if (!netdev_mc_empty(dev)) {
1335 /* set up multicast hash table */
1336 sysctl = bfin_read_EMAC_OPMODE();
1337 sysctl |= HM;
1338 bfin_write_EMAC_OPMODE(sysctl);
1339 bfin_mac_multicast_hash(dev);
1340 } else {
1341 /* clear promisc or multicast mode */
1342 sysctl = bfin_read_EMAC_OPMODE();
1343 sysctl &= ~(RAF | PAM);
1344 bfin_write_EMAC_OPMODE(sysctl);
1345 }
1346}
1347
1348static int bfin_mac_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
1349{
1350 struct bfin_mac_local *lp = netdev_priv(netdev);
1351
1352 if (!netif_running(netdev))
1353 return -EINVAL;
1354
1355 switch (cmd) {
1356 case SIOCSHWTSTAMP:
1357 return bfin_mac_hwtstamp_ioctl(netdev, ifr, cmd);
1358 default:
1359 if (lp->phydev)
1360 return phy_mii_ioctl(lp->phydev, ifr, cmd);
1361 else
1362 return -EOPNOTSUPP;
1363 }
1364}
1365
1366/*
1367 * this puts the device in an inactive state
1368 */
1369static void bfin_mac_shutdown(struct net_device *dev)
1370{
1371 /* Turn off the EMAC */
1372 bfin_write_EMAC_OPMODE(0x00000000);
1373 /* Turn off the EMAC RX DMA */
1374 bfin_write_DMA1_CONFIG(0x0000);
1375 bfin_write_DMA2_CONFIG(0x0000);
1376}
1377
1378/*
1379 * Open and Initialize the interface
1380 *
1381 * Set up everything, reset the card, etc..
1382 */
1383static int bfin_mac_open(struct net_device *dev)
1384{
1385 struct bfin_mac_local *lp = netdev_priv(dev);
1386 int ret;
1387 pr_debug("%s: %s\n", dev->name, __func__);
1388
1389 /*
1390 * Check that the address is valid. If its not, refuse
1391 * to bring the device up. The user must specify an
1392 * address using ifconfig eth0 hw ether xx:xx:xx:xx:xx:xx
1393 */
1394 if (!is_valid_ether_addr(dev->dev_addr)) {
1395 netdev_warn(dev, "no valid ethernet hw addr\n");
1396 return -EINVAL;
1397 }
1398
1399 /* initial rx and tx list */
1400 ret = desc_list_init();
1401 if (ret)
1402 return ret;
1403
1404 phy_start(lp->phydev);
1405 phy_write(lp->phydev, MII_BMCR, BMCR_RESET);
1406 setup_system_regs(dev);
1407 setup_mac_addr(dev->dev_addr);
1408
1409 bfin_mac_disable();
1410 ret = bfin_mac_enable(lp->phydev);
1411 if (ret)
1412 return ret;
1413 pr_debug("hardware init finished\n");
1414
1415 netif_start_queue(dev);
1416 netif_carrier_on(dev);
1417
1418 return 0;
1419}
1420
1421/*
1422 * this makes the board clean up everything that it can
1423 * and not talk to the outside world. Caused by
1424 * an 'ifconfig ethX down'
1425 */
1426static int bfin_mac_close(struct net_device *dev)
1427{
1428 struct bfin_mac_local *lp = netdev_priv(dev);
1429 pr_debug("%s: %s\n", dev->name, __func__);
1430
1431 netif_stop_queue(dev);
1432 netif_carrier_off(dev);
1433
1434 phy_stop(lp->phydev);
1435 phy_write(lp->phydev, MII_BMCR, BMCR_PDOWN);
1436
1437 /* clear everything */
1438 bfin_mac_shutdown(dev);
1439
1440 /* free the rx/tx buffers */
1441 desc_list_free();
1442
1443 return 0;
1444}
1445
1446static const struct net_device_ops bfin_mac_netdev_ops = {
1447 .ndo_open = bfin_mac_open,
1448 .ndo_stop = bfin_mac_close,
1449 .ndo_start_xmit = bfin_mac_hard_start_xmit,
1450 .ndo_set_mac_address = bfin_mac_set_mac_address,
1451 .ndo_tx_timeout = bfin_mac_timeout,
1452 .ndo_set_multicast_list = bfin_mac_set_multicast_list,
1453 .ndo_do_ioctl = bfin_mac_ioctl,
1454 .ndo_validate_addr = eth_validate_addr,
1455 .ndo_change_mtu = eth_change_mtu,
1456#ifdef CONFIG_NET_POLL_CONTROLLER
1457 .ndo_poll_controller = bfin_mac_poll,
1458#endif
1459};
1460
1461static int __devinit bfin_mac_probe(struct platform_device *pdev)
1462{
1463 struct net_device *ndev;
1464 struct bfin_mac_local *lp;
1465 struct platform_device *pd;
1466 struct bfin_mii_bus_platform_data *mii_bus_data;
1467 int rc;
1468
1469 ndev = alloc_etherdev(sizeof(struct bfin_mac_local));
1470 if (!ndev) {
1471 dev_err(&pdev->dev, "Cannot allocate net device!\n");
1472 return -ENOMEM;
1473 }
1474
1475 SET_NETDEV_DEV(ndev, &pdev->dev);
1476 platform_set_drvdata(pdev, ndev);
1477 lp = netdev_priv(ndev);
1478 lp->ndev = ndev;
1479
1480 /* Grab the MAC address in the MAC */
1481 *(__le32 *) (&(ndev->dev_addr[0])) = cpu_to_le32(bfin_read_EMAC_ADDRLO());
1482 *(__le16 *) (&(ndev->dev_addr[4])) = cpu_to_le16((u16) bfin_read_EMAC_ADDRHI());
1483
1484 /* probe mac */
1485 /*todo: how to proble? which is revision_register */
1486 bfin_write_EMAC_ADDRLO(0x12345678);
1487 if (bfin_read_EMAC_ADDRLO() != 0x12345678) {
1488 dev_err(&pdev->dev, "Cannot detect Blackfin on-chip ethernet MAC controller!\n");
1489 rc = -ENODEV;
1490 goto out_err_probe_mac;
1491 }
1492
1493
1494 /*
1495 * Is it valid? (Did bootloader initialize it?)
1496 * Grab the MAC from the board somehow
1497 * this is done in the arch/blackfin/mach-bfxxx/boards/eth_mac.c
1498 */
1499 if (!is_valid_ether_addr(ndev->dev_addr))
1500 bfin_get_ether_addr(ndev->dev_addr);
1501
1502 /* If still not valid, get a random one */
1503 if (!is_valid_ether_addr(ndev->dev_addr))
1504 random_ether_addr(ndev->dev_addr);
1505
1506 setup_mac_addr(ndev->dev_addr);
1507
1508 if (!pdev->dev.platform_data) {
1509 dev_err(&pdev->dev, "Cannot get platform device bfin_mii_bus!\n");
1510 rc = -ENODEV;
1511 goto out_err_probe_mac;
1512 }
1513 pd = pdev->dev.platform_data;
1514 lp->mii_bus = platform_get_drvdata(pd);
1515 if (!lp->mii_bus) {
1516 dev_err(&pdev->dev, "Cannot get mii_bus!\n");
1517 rc = -ENODEV;
1518 goto out_err_probe_mac;
1519 }
1520 lp->mii_bus->priv = ndev;
1521 mii_bus_data = pd->dev.platform_data;
1522
1523 rc = mii_probe(ndev, mii_bus_data->phy_mode);
1524 if (rc) {
1525 dev_err(&pdev->dev, "MII Probe failed!\n");
1526 goto out_err_mii_probe;
1527 }
1528
1529 lp->vlan1_mask = ETH_P_8021Q | mii_bus_data->vlan1_mask;
1530 lp->vlan2_mask = ETH_P_8021Q | mii_bus_data->vlan2_mask;
1531
1532 /* Fill in the fields of the device structure with ethernet values. */
1533 ether_setup(ndev);
1534
1535 ndev->netdev_ops = &bfin_mac_netdev_ops;
1536 ndev->ethtool_ops = &bfin_mac_ethtool_ops;
1537
1538 init_timer(&lp->tx_reclaim_timer);
1539 lp->tx_reclaim_timer.data = (unsigned long)lp;
1540 lp->tx_reclaim_timer.function = tx_reclaim_skb_timeout;
1541
1542 spin_lock_init(&lp->lock);
1543
1544 /* now, enable interrupts */
1545 /* register irq handler */
1546 rc = request_irq(IRQ_MAC_RX, bfin_mac_interrupt,
1547 IRQF_DISABLED, "EMAC_RX", ndev);
1548 if (rc) {
1549 dev_err(&pdev->dev, "Cannot request Blackfin MAC RX IRQ!\n");
1550 rc = -EBUSY;
1551 goto out_err_request_irq;
1552 }
1553
1554 rc = register_netdev(ndev);
1555 if (rc) {
1556 dev_err(&pdev->dev, "Cannot register net device!\n");
1557 goto out_err_reg_ndev;
1558 }
1559
1560 bfin_mac_hwtstamp_init(ndev);
1561
1562 /* now, print out the card info, in a short format.. */
1563 netdev_info(ndev, "%s, Version %s\n", DRV_DESC, DRV_VERSION);
1564
1565 return 0;
1566
1567out_err_reg_ndev:
1568 free_irq(IRQ_MAC_RX, ndev);
1569out_err_request_irq:
1570out_err_mii_probe:
1571 mdiobus_unregister(lp->mii_bus);
1572 mdiobus_free(lp->mii_bus);
1573out_err_probe_mac:
1574 platform_set_drvdata(pdev, NULL);
1575 free_netdev(ndev);
1576
1577 return rc;
1578}
1579
1580static int __devexit bfin_mac_remove(struct platform_device *pdev)
1581{
1582 struct net_device *ndev = platform_get_drvdata(pdev);
1583 struct bfin_mac_local *lp = netdev_priv(ndev);
1584
1585 platform_set_drvdata(pdev, NULL);
1586
1587 lp->mii_bus->priv = NULL;
1588
1589 unregister_netdev(ndev);
1590
1591 free_irq(IRQ_MAC_RX, ndev);
1592
1593 free_netdev(ndev);
1594
1595 return 0;
1596}
1597
1598#ifdef CONFIG_PM
1599static int bfin_mac_suspend(struct platform_device *pdev, pm_message_t mesg)
1600{
1601 struct net_device *net_dev = platform_get_drvdata(pdev);
1602 struct bfin_mac_local *lp = netdev_priv(net_dev);
1603
1604 if (lp->wol) {
1605 bfin_write_EMAC_OPMODE((bfin_read_EMAC_OPMODE() & ~TE) | RE);
1606 bfin_write_EMAC_WKUP_CTL(MPKE);
1607 enable_irq_wake(IRQ_MAC_WAKEDET);
1608 } else {
1609 if (netif_running(net_dev))
1610 bfin_mac_close(net_dev);
1611 }
1612
1613 return 0;
1614}
1615
1616static int bfin_mac_resume(struct platform_device *pdev)
1617{
1618 struct net_device *net_dev = platform_get_drvdata(pdev);
1619 struct bfin_mac_local *lp = netdev_priv(net_dev);
1620
1621 if (lp->wol) {
1622 bfin_write_EMAC_OPMODE(bfin_read_EMAC_OPMODE() | TE);
1623 bfin_write_EMAC_WKUP_CTL(0);
1624 disable_irq_wake(IRQ_MAC_WAKEDET);
1625 } else {
1626 if (netif_running(net_dev))
1627 bfin_mac_open(net_dev);
1628 }
1629
1630 return 0;
1631}
1632#else
1633#define bfin_mac_suspend NULL
1634#define bfin_mac_resume NULL
1635#endif /* CONFIG_PM */
1636
1637static int __devinit bfin_mii_bus_probe(struct platform_device *pdev)
1638{
1639 struct mii_bus *miibus;
1640 struct bfin_mii_bus_platform_data *mii_bus_pd;
1641 const unsigned short *pin_req;
1642 int rc, i;
1643
1644 mii_bus_pd = dev_get_platdata(&pdev->dev);
1645 if (!mii_bus_pd) {
1646 dev_err(&pdev->dev, "No peripherals in platform data!\n");
1647 return -EINVAL;
1648 }
1649
1650 /*
1651 * We are setting up a network card,
1652 * so set the GPIO pins to Ethernet mode
1653 */
1654 pin_req = mii_bus_pd->mac_peripherals;
1655 rc = peripheral_request_list(pin_req, KBUILD_MODNAME);
1656 if (rc) {
1657 dev_err(&pdev->dev, "Requesting peripherals failed!\n");
1658 return rc;
1659 }
1660
1661 rc = -ENOMEM;
1662 miibus = mdiobus_alloc();
1663 if (miibus == NULL)
1664 goto out_err_alloc;
1665 miibus->read = bfin_mdiobus_read;
1666 miibus->write = bfin_mdiobus_write;
1667 miibus->reset = bfin_mdiobus_reset;
1668
1669 miibus->parent = &pdev->dev;
1670 miibus->name = "bfin_mii_bus";
1671 miibus->phy_mask = mii_bus_pd->phy_mask;
1672
1673 snprintf(miibus->id, MII_BUS_ID_SIZE, "0");
1674 miibus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL);
1675 if (!miibus->irq)
1676 goto out_err_irq_alloc;
1677
1678 for (i = rc; i < PHY_MAX_ADDR; ++i)
1679 miibus->irq[i] = PHY_POLL;
1680
1681 rc = clamp(mii_bus_pd->phydev_number, 0, PHY_MAX_ADDR);
1682 if (rc != mii_bus_pd->phydev_number)
1683 dev_err(&pdev->dev, "Invalid number (%i) of phydevs\n",
1684 mii_bus_pd->phydev_number);
1685 for (i = 0; i < rc; ++i) {
1686 unsigned short phyaddr = mii_bus_pd->phydev_data[i].addr;
1687 if (phyaddr < PHY_MAX_ADDR)
1688 miibus->irq[phyaddr] = mii_bus_pd->phydev_data[i].irq;
1689 else
1690 dev_err(&pdev->dev,
1691 "Invalid PHY address %i for phydev %i\n",
1692 phyaddr, i);
1693 }
1694
1695 rc = mdiobus_register(miibus);
1696 if (rc) {
1697 dev_err(&pdev->dev, "Cannot register MDIO bus!\n");
1698 goto out_err_mdiobus_register;
1699 }
1700
1701 platform_set_drvdata(pdev, miibus);
1702 return 0;
1703
1704out_err_mdiobus_register:
1705 kfree(miibus->irq);
1706out_err_irq_alloc:
1707 mdiobus_free(miibus);
1708out_err_alloc:
1709 peripheral_free_list(pin_req);
1710
1711 return rc;
1712}
1713
1714static int __devexit bfin_mii_bus_remove(struct platform_device *pdev)
1715{
1716 struct mii_bus *miibus = platform_get_drvdata(pdev);
1717 struct bfin_mii_bus_platform_data *mii_bus_pd =
1718 dev_get_platdata(&pdev->dev);
1719
1720 platform_set_drvdata(pdev, NULL);
1721 mdiobus_unregister(miibus);
1722 kfree(miibus->irq);
1723 mdiobus_free(miibus);
1724 peripheral_free_list(mii_bus_pd->mac_peripherals);
1725
1726 return 0;
1727}
1728
1729static struct platform_driver bfin_mii_bus_driver = {
1730 .probe = bfin_mii_bus_probe,
1731 .remove = __devexit_p(bfin_mii_bus_remove),
1732 .driver = {
1733 .name = "bfin_mii_bus",
1734 .owner = THIS_MODULE,
1735 },
1736};
1737
1738static struct platform_driver bfin_mac_driver = {
1739 .probe = bfin_mac_probe,
1740 .remove = __devexit_p(bfin_mac_remove),
1741 .resume = bfin_mac_resume,
1742 .suspend = bfin_mac_suspend,
1743 .driver = {
1744 .name = KBUILD_MODNAME,
1745 .owner = THIS_MODULE,
1746 },
1747};
1748
1749static int __init bfin_mac_init(void)
1750{
1751 int ret;
1752 ret = platform_driver_register(&bfin_mii_bus_driver);
1753 if (!ret)
1754 return platform_driver_register(&bfin_mac_driver);
1755 return -ENODEV;
1756}
1757
1758module_init(bfin_mac_init);
1759
1760static void __exit bfin_mac_cleanup(void)
1761{
1762 platform_driver_unregister(&bfin_mac_driver);
1763 platform_driver_unregister(&bfin_mii_bus_driver);
1764}
1765
1766module_exit(bfin_mac_cleanup);
1767
diff --git a/drivers/net/ethernet/adi/bfin_mac.h b/drivers/net/ethernet/adi/bfin_mac.h
new file mode 100644
index 00000000000..f8559ac9a40
--- /dev/null
+++ b/drivers/net/ethernet/adi/bfin_mac.h
@@ -0,0 +1,106 @@
1/*
2 * Blackfin On-Chip MAC Driver
3 *
4 * Copyright 2004-2007 Analog Devices Inc.
5 *
6 * Enter bugs at http://blackfin.uclinux.org/
7 *
8 * Licensed under the GPL-2 or later.
9 */
10#ifndef _BFIN_MAC_H_
11#define _BFIN_MAC_H_
12
13#include <linux/net_tstamp.h>
14#include <linux/clocksource.h>
15#include <linux/timecompare.h>
16#include <linux/timer.h>
17#include <linux/etherdevice.h>
18#include <linux/bfin_mac.h>
19
20/*
21 * Disable hardware checksum for bug #5600 if writeback cache is
22 * enabled. Otherwize, corrupted RX packet will be sent up stack
23 * without error mark.
24 */
25#ifndef CONFIG_BFIN_EXTMEM_WRITEBACK
26#define BFIN_MAC_CSUM_OFFLOAD
27#endif
28
29#define TX_RECLAIM_JIFFIES (HZ / 5)
30
31struct dma_descriptor {
32 struct dma_descriptor *next_dma_desc;
33 unsigned long start_addr;
34 unsigned short config;
35 unsigned short x_count;
36};
37
38struct status_area_rx {
39#if defined(BFIN_MAC_CSUM_OFFLOAD)
40 unsigned short ip_hdr_csum; /* ip header checksum */
41 /* ip payload(udp or tcp or others) checksum */
42 unsigned short ip_payload_csum;
43#endif
44 unsigned long status_word; /* the frame status word */
45};
46
47struct status_area_tx {
48 unsigned long status_word; /* the frame status word */
49};
50
51/* use two descriptors for a packet */
52struct net_dma_desc_rx {
53 struct net_dma_desc_rx *next;
54 struct sk_buff *skb;
55 struct dma_descriptor desc_a;
56 struct dma_descriptor desc_b;
57 struct status_area_rx status;
58};
59
60/* use two descriptors for a packet */
61struct net_dma_desc_tx {
62 struct net_dma_desc_tx *next;
63 struct sk_buff *skb;
64 struct dma_descriptor desc_a;
65 struct dma_descriptor desc_b;
66 unsigned char packet[1560];
67 struct status_area_tx status;
68};
69
70struct bfin_mac_local {
71 /*
72 * these are things that the kernel wants me to keep, so users
73 * can find out semi-useless statistics of how well the card is
74 * performing
75 */
76 struct net_device_stats stats;
77
78 spinlock_t lock;
79
80 int wol; /* Wake On Lan */
81 int irq_wake_requested;
82 struct timer_list tx_reclaim_timer;
83 struct net_device *ndev;
84
85 /* Data for EMAC_VLAN1 regs */
86 u16 vlan1_mask, vlan2_mask;
87
88 /* MII and PHY stuffs */
89 int old_link; /* used by bf537_adjust_link */
90 int old_speed;
91 int old_duplex;
92
93 struct phy_device *phydev;
94 struct mii_bus *mii_bus;
95
96#if defined(CONFIG_BFIN_MAC_USE_HWSTAMP)
97 struct cyclecounter cycles;
98 struct timecounter clock;
99 struct timecompare compare;
100 struct hwtstamp_config stamp_cfg;
101#endif
102};
103
104extern void bfin_get_ether_addr(char *addr);
105
106#endif