aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/net/Kconfig8
-rw-r--r--drivers/net/Makefile2
-rw-r--r--drivers/net/ll_temac.h374
-rw-r--r--drivers/net/ll_temac_main.c969
-rw-r--r--drivers/net/ll_temac_mdio.c120
5 files changed, 1473 insertions, 0 deletions
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 214a92d1ef75..3320e7761576 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -2362,6 +2362,14 @@ config MV643XX_ETH
2362 Some boards that use the Discovery chipset are the Momenco 2362 Some boards that use the Discovery chipset are the Momenco
2363 Ocelot C and Jaguar ATX and Pegasos II. 2363 Ocelot C and Jaguar ATX and Pegasos II.
2364 2364
2365config XILINX_LL_TEMAC
2366 tristate "Xilinx LL TEMAC (LocalLink Tri-mode Ethernet MAC) driver"
2367 select PHYLIB
2368 depends on PPC_DCR_NATIVE
2369 help
2370 This driver supports the Xilinx 10/100/1000 LocalLink TEMAC
2371 core used in Xilinx Spartan and Virtex FPGAs
2372
2365config QLA3XXX 2373config QLA3XXX
2366 tristate "QLogic QLA3XXX Network Driver Support" 2374 tristate "QLogic QLA3XXX Network Driver Support"
2367 depends on PCI 2375 depends on PCI
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 1fc4602a6ff2..80420f6d0795 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -134,6 +134,8 @@ obj-$(CONFIG_AX88796) += ax88796.o
134 134
135obj-$(CONFIG_TSI108_ETH) += tsi108_eth.o 135obj-$(CONFIG_TSI108_ETH) += tsi108_eth.o
136obj-$(CONFIG_MV643XX_ETH) += mv643xx_eth.o 136obj-$(CONFIG_MV643XX_ETH) += mv643xx_eth.o
137ll_temac-objs := ll_temac_main.o ll_temac_mdio.o
138obj-$(CONFIG_XILINX_LL_TEMAC) += ll_temac.o
137obj-$(CONFIG_QLA3XXX) += qla3xxx.o 139obj-$(CONFIG_QLA3XXX) += qla3xxx.o
138obj-$(CONFIG_QLGE) += qlge/ 140obj-$(CONFIG_QLGE) += qlge/
139 141
diff --git a/drivers/net/ll_temac.h b/drivers/net/ll_temac.h
new file mode 100644
index 000000000000..1af66a1e6911
--- /dev/null
+++ b/drivers/net/ll_temac.h
@@ -0,0 +1,374 @@
1
2#ifndef XILINX_LL_TEMAC_H
3#define XILINX_LL_TEMAC_H
4
5#include <linux/netdevice.h>
6#include <linux/of.h>
7#include <linux/spinlock.h>
8#include <asm/dcr.h>
9#include <asm/dcr-regs.h>
10
11/* packet size info */
12#define XTE_HDR_SIZE 14 /* size of Ethernet header */
13#define XTE_TRL_SIZE 4 /* size of Ethernet trailer (FCS) */
14#define XTE_JUMBO_MTU 9000
15#define XTE_MAX_JUMBO_FRAME_SIZE (XTE_JUMBO_MTU + XTE_HDR_SIZE + XTE_TRL_SIZE)
16
17/* Configuration options */
18
19/* Accept all incoming packets.
20 * This option defaults to disabled (cleared) */
21#define XTE_OPTION_PROMISC (1 << 0)
22/* Jumbo frame support for Tx & Rx.
23 * This option defaults to disabled (cleared) */
24#define XTE_OPTION_JUMBO (1 << 1)
25/* VLAN Rx & Tx frame support.
26 * This option defaults to disabled (cleared) */
27#define XTE_OPTION_VLAN (1 << 2)
28/* Enable recognition of flow control frames on Rx
29 * This option defaults to enabled (set) */
30#define XTE_OPTION_FLOW_CONTROL (1 << 4)
31/* Strip FCS and PAD from incoming frames.
32 * Note: PAD from VLAN frames is not stripped.
33 * This option defaults to disabled (set) */
34#define XTE_OPTION_FCS_STRIP (1 << 5)
35/* Generate FCS field and add PAD automatically for outgoing frames.
36 * This option defaults to enabled (set) */
37#define XTE_OPTION_FCS_INSERT (1 << 6)
38/* Enable Length/Type error checking for incoming frames. When this option is
39set, the MAC will filter frames that have a mismatched type/length field
40and if XTE_OPTION_REPORT_RXERR is set, the user is notified when these
41types of frames are encountered. When this option is cleared, the MAC will
42allow these types of frames to be received.
43This option defaults to enabled (set) */
44#define XTE_OPTION_LENTYPE_ERR (1 << 7)
45/* Enable the transmitter.
46 * This option defaults to enabled (set) */
47#define XTE_OPTION_TXEN (1 << 11)
48/* Enable the receiver
49* This option defaults to enabled (set) */
50#define XTE_OPTION_RXEN (1 << 12)
51
52/* Default options set when device is initialized or reset */
53#define XTE_OPTION_DEFAULTS \
54 (XTE_OPTION_TXEN | \
55 XTE_OPTION_FLOW_CONTROL | \
56 XTE_OPTION_RXEN)
57
58/* XPS_LL_TEMAC SDMA registers definition */
59
60#define TX_NXTDESC_PTR 0x00 /* r */
61#define TX_CURBUF_ADDR 0x01 /* r */
62#define TX_CURBUF_LENGTH 0x02 /* r */
63#define TX_CURDESC_PTR 0x03 /* rw */
64#define TX_TAILDESC_PTR 0x04 /* rw */
65#define TX_CHNL_CTRL 0x05 /* rw */
66/*
67 0:7 24:31 IRQTimeout
68 8:15 16:23 IRQCount
69 16:20 11:15 Reserved
70 21 10 0
71 22 9 UseIntOnEnd
72 23 8 LdIRQCnt
73 24 7 IRQEn
74 25:28 3:6 Reserved
75 29 2 IrqErrEn
76 30 1 IrqDlyEn
77 31 0 IrqCoalEn
78*/
79#define CHNL_CTRL_IRQ_IOE (1 << 9)
80#define CHNL_CTRL_IRQ_EN (1 << 7)
81#define CHNL_CTRL_IRQ_ERR_EN (1 << 2)
82#define CHNL_CTRL_IRQ_DLY_EN (1 << 1)
83#define CHNL_CTRL_IRQ_COAL_EN (1 << 0)
84#define TX_IRQ_REG 0x06 /* rw */
85/*
86 0:7 24:31 DltTmrValue
87 8:15 16:23 ClscCntrValue
88 16:17 14:15 Reserved
89 18:21 10:13 ClscCnt
90 22:23 8:9 DlyCnt
91 24:28 3::7 Reserved
92 29 2 ErrIrq
93 30 1 DlyIrq
94 31 0 CoalIrq
95 */
96#define TX_CHNL_STS 0x07 /* r */
97/*
98 0:9 22:31 Reserved
99 10 21 TailPErr
100 11 20 CmpErr
101 12 19 AddrErr
102 13 18 NxtPErr
103 14 17 CurPErr
104 15 16 BsyWr
105 16:23 8:15 Reserved
106 24 7 Error
107 25 6 IOE
108 26 5 SOE
109 27 4 Cmplt
110 28 3 SOP
111 29 2 EOP
112 30 1 EngBusy
113 31 0 Reserved
114*/
115
116#define RX_NXTDESC_PTR 0x08 /* r */
117#define RX_CURBUF_ADDR 0x09 /* r */
118#define RX_CURBUF_LENGTH 0x0a /* r */
119#define RX_CURDESC_PTR 0x0b /* rw */
120#define RX_TAILDESC_PTR 0x0c /* rw */
121#define RX_CHNL_CTRL 0x0d /* rw */
122/*
123 0:7 24:31 IRQTimeout
124 8:15 16:23 IRQCount
125 16:20 11:15 Reserved
126 21 10 0
127 22 9 UseIntOnEnd
128 23 8 LdIRQCnt
129 24 7 IRQEn
130 25:28 3:6 Reserved
131 29 2 IrqErrEn
132 30 1 IrqDlyEn
133 31 0 IrqCoalEn
134 */
135#define RX_IRQ_REG 0x0e /* rw */
136#define IRQ_COAL (1 << 0)
137#define IRQ_DLY (1 << 1)
138#define IRQ_ERR (1 << 2)
139#define IRQ_DMAERR (1 << 7) /* this is not documented ??? */
140/*
141 0:7 24:31 DltTmrValue
142 8:15 16:23 ClscCntrValue
143 16:17 14:15 Reserved
144 18:21 10:13 ClscCnt
145 22:23 8:9 DlyCnt
146 24:28 3::7 Reserved
147*/
148#define RX_CHNL_STS 0x0f /* r */
149#define CHNL_STS_ENGBUSY (1 << 1)
150#define CHNL_STS_EOP (1 << 2)
151#define CHNL_STS_SOP (1 << 3)
152#define CHNL_STS_CMPLT (1 << 4)
153#define CHNL_STS_SOE (1 << 5)
154#define CHNL_STS_IOE (1 << 6)
155#define CHNL_STS_ERR (1 << 7)
156
157#define CHNL_STS_BSYWR (1 << 16)
158#define CHNL_STS_CURPERR (1 << 17)
159#define CHNL_STS_NXTPERR (1 << 18)
160#define CHNL_STS_ADDRERR (1 << 19)
161#define CHNL_STS_CMPERR (1 << 20)
162#define CHNL_STS_TAILERR (1 << 21)
163/*
164 0:9 22:31 Reserved
165 10 21 TailPErr
166 11 20 CmpErr
167 12 19 AddrErr
168 13 18 NxtPErr
169 14 17 CurPErr
170 15 16 BsyWr
171 16:23 8:15 Reserved
172 24 7 Error
173 25 6 IOE
174 26 5 SOE
175 27 4 Cmplt
176 28 3 SOP
177 29 2 EOP
178 30 1 EngBusy
179 31 0 Reserved
180*/
181
182#define DMA_CONTROL_REG 0x10 /* rw */
183#define DMA_CONTROL_RST (1 << 0)
184#define DMA_TAIL_ENABLE (1 << 2)
185
186/* XPS_LL_TEMAC direct registers definition */
187
188#define XTE_RAF0_OFFSET 0x00
189#define RAF0_RST (1 << 0)
190#define RAF0_MCSTREJ (1 << 1)
191#define RAF0_BCSTREJ (1 << 2)
192#define XTE_TPF0_OFFSET 0x04
193#define XTE_IFGP0_OFFSET 0x08
194#define XTE_ISR0_OFFSET 0x0c
195#define ISR0_HARDACSCMPLT (1 << 0)
196#define ISR0_AUTONEG (1 << 1)
197#define ISR0_RXCMPLT (1 << 2)
198#define ISR0_RXREJ (1 << 3)
199#define ISR0_RXFIFOOVR (1 << 4)
200#define ISR0_TXCMPLT (1 << 5)
201#define ISR0_RXDCMLCK (1 << 6)
202
203#define XTE_IPR0_OFFSET 0x10
204#define XTE_IER0_OFFSET 0x14
205
206#define XTE_MSW0_OFFSET 0x20
207#define XTE_LSW0_OFFSET 0x24
208#define XTE_CTL0_OFFSET 0x28
209#define XTE_RDY0_OFFSET 0x2c
210
211#define XTE_RSE_MIIM_RR_MASK 0x0002
212#define XTE_RSE_MIIM_WR_MASK 0x0004
213#define XTE_RSE_CFG_RR_MASK 0x0020
214#define XTE_RSE_CFG_WR_MASK 0x0040
215#define XTE_RDY0_HARD_ACS_RDY_MASK (0x10000)
216
217/* XPS_LL_TEMAC indirect registers offset definition */
218
219#define XTE_RXC0_OFFSET 0x00000200 /* Rx configuration word 0 */
220#define XTE_RXC1_OFFSET 0x00000240 /* Rx configuration word 1 */
221#define XTE_RXC1_RXRST_MASK (1 << 31) /* Receiver reset */
222#define XTE_RXC1_RXJMBO_MASK (1 << 30) /* Jumbo frame enable */
223#define XTE_RXC1_RXFCS_MASK (1 << 29) /* FCS not stripped */
224#define XTE_RXC1_RXEN_MASK (1 << 28) /* Receiver enable */
225#define XTE_RXC1_RXVLAN_MASK (1 << 27) /* VLAN enable */
226#define XTE_RXC1_RXHD_MASK (1 << 26) /* Half duplex */
227#define XTE_RXC1_RXLT_MASK (1 << 25) /* Length/type check disable */
228
229#define XTE_TXC_OFFSET 0x00000280 /* Tx configuration */
230#define XTE_TXC_TXRST_MASK (1 << 31) /* Transmitter reset */
231#define XTE_TXC_TXJMBO_MASK (1 << 30) /* Jumbo frame enable */
232#define XTE_TXC_TXFCS_MASK (1 << 29) /* Generate FCS */
233#define XTE_TXC_TXEN_MASK (1 << 28) /* Transmitter enable */
234#define XTE_TXC_TXVLAN_MASK (1 << 27) /* VLAN enable */
235#define XTE_TXC_TXHD_MASK (1 << 26) /* Half duplex */
236
237#define XTE_FCC_OFFSET 0x000002C0 /* Flow control config */
238#define XTE_FCC_RXFLO_MASK (1 << 29) /* Rx flow control enable */
239#define XTE_FCC_TXFLO_MASK (1 << 30) /* Tx flow control enable */
240
241#define XTE_EMCFG_OFFSET 0x00000300 /* EMAC configuration */
242#define XTE_EMCFG_LINKSPD_MASK 0xC0000000 /* Link speed */
243#define XTE_EMCFG_HOSTEN_MASK (1 << 26) /* Host interface enable */
244#define XTE_EMCFG_LINKSPD_10 0x00000000 /* 10 Mbit LINKSPD_MASK */
245#define XTE_EMCFG_LINKSPD_100 (1 << 30) /* 100 Mbit LINKSPD_MASK */
246#define XTE_EMCFG_LINKSPD_1000 (1 << 31) /* 1000 Mbit LINKSPD_MASK */
247
248#define XTE_GMIC_OFFSET 0x00000320 /* RGMII/SGMII config */
249#define XTE_MC_OFFSET 0x00000340 /* MDIO configuration */
250#define XTE_UAW0_OFFSET 0x00000380 /* Unicast address word 0 */
251#define XTE_UAW1_OFFSET 0x00000384 /* Unicast address word 1 */
252
253#define XTE_MAW0_OFFSET 0x00000388 /* Multicast addr word 0 */
254#define XTE_MAW1_OFFSET 0x0000038C /* Multicast addr word 1 */
255#define XTE_AFM_OFFSET 0x00000390 /* Promiscuous mode */
256#define XTE_AFM_EPPRM_MASK (1 << 31) /* Promiscuous mode enable */
257
258/* Interrupt Request status */
259#define XTE_TIS_OFFSET 0x000003A0
260#define TIS_FRIS (1 << 0)
261#define TIS_MRIS (1 << 1)
262#define TIS_MWIS (1 << 2)
263#define TIS_ARIS (1 << 3)
264#define TIS_AWIS (1 << 4)
265#define TIS_CRIS (1 << 5)
266#define TIS_CWIS (1 << 6)
267
268#define XTE_TIE_OFFSET 0x000003A4 /* Interrupt enable */
269
270/** MII Mamagement Control register (MGTCR) */
271#define XTE_MGTDR_OFFSET 0x000003B0 /* MII data */
272#define XTE_MIIMAI_OFFSET 0x000003B4 /* MII control */
273
274#define CNTLREG_WRITE_ENABLE_MASK 0x8000
275#define CNTLREG_EMAC1SEL_MASK 0x0400
276#define CNTLREG_ADDRESSCODE_MASK 0x03ff
277
278/* CDMAC descriptor status bit definitions */
279
280#define STS_CTRL_APP0_ERR (1 << 31)
281#define STS_CTRL_APP0_IRQONEND (1 << 30)
282/* undoccumented */
283#define STS_CTRL_APP0_STOPONEND (1 << 29)
284#define STS_CTRL_APP0_CMPLT (1 << 28)
285#define STS_CTRL_APP0_SOP (1 << 27)
286#define STS_CTRL_APP0_EOP (1 << 26)
287#define STS_CTRL_APP0_ENGBUSY (1 << 25)
288/* undocumented */
289#define STS_CTRL_APP0_ENGRST (1 << 24)
290
291#define TX_CONTROL_CALC_CSUM_MASK 1
292
293#define XTE_ALIGN 32
294#define BUFFER_ALIGN(adr) ((XTE_ALIGN - ((u32) adr)) % XTE_ALIGN)
295
296#define MULTICAST_CAM_TABLE_NUM 4
297
298/* TX/RX CURDESC_PTR points to first descriptor */
299/* TX/RX TAILDESC_PTR points to last descriptor in linked list */
300
301/**
302 * struct cdmac_bd - LocalLink buffer descriptor format
303 *
304 * app0 bits:
305 * 0 Error
306 * 1 IrqOnEnd generate an interrupt at completion of DMA op
307 * 2 reserved
308 * 3 completed Current descriptor completed
309 * 4 SOP TX - marks first desc/ RX marks first desct
310 * 5 EOP TX marks last desc/RX marks last desc
311 * 6 EngBusy DMA is processing
312 * 7 reserved
313 * 8:31 application specific
314 */
315struct cdmac_bd {
316 u32 next; /* Physical address of next buffer descriptor */
317 u32 phys;
318 u32 len;
319 u32 app0;
320 u32 app1; /* TX start << 16 | insert */
321 u32 app2; /* TX csum */
322 u32 app3;
323 u32 app4; /* skb for TX length for RX */
324};
325
326struct temac_local {
327 struct net_device *ndev;
328 struct device *dev;
329
330 /* Connection to PHY device */
331 struct phy_device *phy_dev; /* Pointer to PHY device */
332 struct device_node *phy_node;
333
334 /* MDIO bus data */
335 struct mii_bus *mii_bus; /* MII bus reference */
336 int mdio_irqs[PHY_MAX_ADDR]; /* IRQs table for MDIO bus */
337
338 /* IO registers and IRQs */
339 void __iomem *regs;
340 dcr_host_t sdma_dcrs;
341 int tx_irq;
342 int rx_irq;
343 int emac_num;
344
345 struct sk_buff **rx_skb;
346 spinlock_t rx_lock;
347 struct mutex indirect_mutex;
348 u32 options; /* Current options word */
349 int last_link;
350
351 /* Buffer descriptors */
352 struct cdmac_bd *tx_bd_v;
353 dma_addr_t tx_bd_p;
354 struct cdmac_bd *rx_bd_v;
355 dma_addr_t rx_bd_p;
356 int tx_bd_ci;
357 int tx_bd_next;
358 int tx_bd_tail;
359 int rx_bd_ci;
360};
361
362/* xilinx_temac.c */
363u32 temac_ior(struct temac_local *lp, int offset);
364void temac_iow(struct temac_local *lp, int offset, u32 value);
365int temac_indirect_busywait(struct temac_local *lp);
366u32 temac_indirect_in32(struct temac_local *lp, int reg);
367void temac_indirect_out32(struct temac_local *lp, int reg, u32 value);
368
369
370/* xilinx_temac_mdio.c */
371int temac_mdio_setup(struct temac_local *lp, struct device_node *np);
372void temac_mdio_teardown(struct temac_local *lp);
373
374#endif /* XILINX_LL_TEMAC_H */
diff --git a/drivers/net/ll_temac_main.c b/drivers/net/ll_temac_main.c
new file mode 100644
index 000000000000..96e7248876c1
--- /dev/null
+++ b/drivers/net/ll_temac_main.c
@@ -0,0 +1,969 @@
1/*
2 * Driver for Xilinx TEMAC Ethernet device
3 *
4 * Copyright (c) 2008 Nissin Systems Co., Ltd., Yoshio Kashiwagi
5 * Copyright (c) 2005-2008 DLA Systems, David H. Lynch Jr. <dhlii@dlasys.net>
6 * Copyright (c) 2008-2009 Secret Lab Technologies Ltd.
7 *
8 * This is a driver for the Xilinx ll_temac ipcore which is often used
9 * in the Virtex and Spartan series of chips.
10 *
11 * Notes:
12 * - The ll_temac hardware uses indirect access for many of the TEMAC
13 * registers, include the MDIO bus. However, indirect access to MDIO
14 * registers take considerably more clock cycles than to TEMAC registers.
15 * MDIO accesses are long, so threads doing them should probably sleep
16 * rather than busywait. However, since only one indirect access can be
17 * in progress at any given time, that means that *all* indirect accesses
18 * could end up sleeping (to wait for an MDIO access to complete).
19 * Fortunately none of the indirect accesses are on the 'hot' path for tx
20 * or rx, so this should be okay.
21 *
22 * TODO:
23 * - Fix driver to work on more than just Virtex5. Right now the driver
24 * assumes that the locallink DMA registers are accessed via DCR
25 * instructions.
26 * - Factor out locallink DMA code into separate driver
27 * - Fix multicast assignment.
28 * - Fix support for hardware checksumming.
29 * - Testing. Lots and lots of testing.
30 *
31 */
32
33#include <linux/delay.h>
34#include <linux/etherdevice.h>
35#include <linux/init.h>
36#include <linux/mii.h>
37#include <linux/module.h>
38#include <linux/mutex.h>
39#include <linux/netdevice.h>
40#include <linux/of.h>
41#include <linux/of_device.h>
42#include <linux/of_mdio.h>
43#include <linux/of_platform.h>
44#include <linux/skbuff.h>
45#include <linux/spinlock.h>
46#include <linux/tcp.h> /* needed for sizeof(tcphdr) */
47#include <linux/udp.h> /* needed for sizeof(udphdr) */
48#include <linux/phy.h>
49#include <linux/in.h>
50#include <linux/io.h>
51#include <linux/ip.h>
52
53#include "ll_temac.h"
54
55#define TX_BD_NUM 64
56#define RX_BD_NUM 128
57
58/* ---------------------------------------------------------------------
59 * Low level register access functions
60 */
61
62u32 temac_ior(struct temac_local *lp, int offset)
63{
64 return in_be32((u32 *)(lp->regs + offset));
65}
66
67void temac_iow(struct temac_local *lp, int offset, u32 value)
68{
69 out_be32((u32 *) (lp->regs + offset), value);
70}
71
72int temac_indirect_busywait(struct temac_local *lp)
73{
74 long end = jiffies + 2;
75
76 while (!(temac_ior(lp, XTE_RDY0_OFFSET) & XTE_RDY0_HARD_ACS_RDY_MASK)) {
77 if (end - jiffies <= 0) {
78 WARN_ON(1);
79 return -ETIMEDOUT;
80 }
81 msleep(1);
82 }
83 return 0;
84}
85
86/**
87 * temac_indirect_in32
88 *
89 * lp->indirect_mutex must be held when calling this function
90 */
91u32 temac_indirect_in32(struct temac_local *lp, int reg)
92{
93 u32 val;
94
95 if (temac_indirect_busywait(lp))
96 return -ETIMEDOUT;
97 temac_iow(lp, XTE_CTL0_OFFSET, reg);
98 if (temac_indirect_busywait(lp))
99 return -ETIMEDOUT;
100 val = temac_ior(lp, XTE_LSW0_OFFSET);
101
102 return val;
103}
104
105/**
106 * temac_indirect_out32
107 *
108 * lp->indirect_mutex must be held when calling this function
109 */
110void temac_indirect_out32(struct temac_local *lp, int reg, u32 value)
111{
112 if (temac_indirect_busywait(lp))
113 return;
114 temac_iow(lp, XTE_LSW0_OFFSET, value);
115 temac_iow(lp, XTE_CTL0_OFFSET, CNTLREG_WRITE_ENABLE_MASK | reg);
116}
117
118static u32 temac_dma_in32(struct temac_local *lp, int reg)
119{
120 return dcr_read(lp->sdma_dcrs, reg);
121}
122
123static void temac_dma_out32(struct temac_local *lp, int reg, u32 value)
124{
125 dcr_write(lp->sdma_dcrs, reg, value);
126}
127
128/**
129 * temac_dma_bd_init - Setup buffer descriptor rings
130 */
131static int temac_dma_bd_init(struct net_device *ndev)
132{
133 struct temac_local *lp = netdev_priv(ndev);
134 struct sk_buff *skb;
135 int i;
136
137 lp->rx_skb = kzalloc(sizeof(struct sk_buff)*RX_BD_NUM, GFP_KERNEL);
138 /* allocate the tx and rx ring buffer descriptors. */
139 /* returns a virtual addres and a physical address. */
140 lp->tx_bd_v = dma_alloc_coherent(ndev->dev.parent,
141 sizeof(*lp->tx_bd_v) * TX_BD_NUM,
142 &lp->tx_bd_p, GFP_KERNEL);
143 lp->rx_bd_v = dma_alloc_coherent(ndev->dev.parent,
144 sizeof(*lp->rx_bd_v) * RX_BD_NUM,
145 &lp->rx_bd_p, GFP_KERNEL);
146
147 memset(lp->tx_bd_v, 0, sizeof(*lp->tx_bd_v) * TX_BD_NUM);
148 for (i = 0; i < TX_BD_NUM; i++) {
149 lp->tx_bd_v[i].next = lp->tx_bd_p +
150 sizeof(*lp->tx_bd_v) * ((i + 1) % TX_BD_NUM);
151 }
152
153 memset(lp->rx_bd_v, 0, sizeof(*lp->rx_bd_v) * RX_BD_NUM);
154 for (i = 0; i < RX_BD_NUM; i++) {
155 lp->rx_bd_v[i].next = lp->rx_bd_p +
156 sizeof(*lp->rx_bd_v) * ((i + 1) % RX_BD_NUM);
157
158 skb = alloc_skb(XTE_MAX_JUMBO_FRAME_SIZE
159 + XTE_ALIGN, GFP_ATOMIC);
160 if (skb == 0) {
161 dev_err(&ndev->dev, "alloc_skb error %d\n", i);
162 return -1;
163 }
164 lp->rx_skb[i] = skb;
165 skb_reserve(skb, BUFFER_ALIGN(skb->data));
166 /* returns physical address of skb->data */
167 lp->rx_bd_v[i].phys = dma_map_single(ndev->dev.parent,
168 skb->data,
169 XTE_MAX_JUMBO_FRAME_SIZE,
170 DMA_FROM_DEVICE);
171 lp->rx_bd_v[i].len = XTE_MAX_JUMBO_FRAME_SIZE;
172 lp->rx_bd_v[i].app0 = STS_CTRL_APP0_IRQONEND;
173 }
174
175 temac_dma_out32(lp, TX_CHNL_CTRL, 0x10220400 |
176 CHNL_CTRL_IRQ_EN |
177 CHNL_CTRL_IRQ_DLY_EN |
178 CHNL_CTRL_IRQ_COAL_EN);
179 /* 0x10220483 */
180 /* 0x00100483 */
181 temac_dma_out32(lp, RX_CHNL_CTRL, 0xff010000 |
182 CHNL_CTRL_IRQ_EN |
183 CHNL_CTRL_IRQ_DLY_EN |
184 CHNL_CTRL_IRQ_COAL_EN |
185 CHNL_CTRL_IRQ_IOE);
186 /* 0xff010283 */
187
188 temac_dma_out32(lp, RX_CURDESC_PTR, lp->rx_bd_p);
189 temac_dma_out32(lp, RX_TAILDESC_PTR,
190 lp->rx_bd_p + (sizeof(*lp->rx_bd_v) * (RX_BD_NUM - 1)));
191 temac_dma_out32(lp, TX_CURDESC_PTR, lp->tx_bd_p);
192
193 return 0;
194}
195
196/* ---------------------------------------------------------------------
197 * net_device_ops
198 */
199
200static int temac_set_mac_address(struct net_device *ndev, void *address)
201{
202 struct temac_local *lp = netdev_priv(ndev);
203
204 if (address)
205 memcpy(ndev->dev_addr, address, ETH_ALEN);
206
207 if (!is_valid_ether_addr(ndev->dev_addr))
208 random_ether_addr(ndev->dev_addr);
209
210 /* set up unicast MAC address filter set its mac address */
211 mutex_lock(&lp->indirect_mutex);
212 temac_indirect_out32(lp, XTE_UAW0_OFFSET,
213 (ndev->dev_addr[0]) |
214 (ndev->dev_addr[1] << 8) |
215 (ndev->dev_addr[2] << 16) |
216 (ndev->dev_addr[3] << 24));
217 /* There are reserved bits in EUAW1
218 * so don't affect them Set MAC bits [47:32] in EUAW1 */
219 temac_indirect_out32(lp, XTE_UAW1_OFFSET,
220 (ndev->dev_addr[4] & 0x000000ff) |
221 (ndev->dev_addr[5] << 8));
222 mutex_unlock(&lp->indirect_mutex);
223
224 return 0;
225}
226
227static void temac_set_multicast_list(struct net_device *ndev)
228{
229 struct temac_local *lp = netdev_priv(ndev);
230 u32 multi_addr_msw, multi_addr_lsw, val;
231 int i;
232
233 mutex_lock(&lp->indirect_mutex);
234 if (ndev->flags & (IFF_ALLMULTI | IFF_PROMISC)
235 || ndev->mc_count > MULTICAST_CAM_TABLE_NUM) {
236 /*
237 * We must make the kernel realise we had to move
238 * into promisc mode or we start all out war on
239 * the cable. If it was a promisc request the
240 * flag is already set. If not we assert it.
241 */
242 ndev->flags |= IFF_PROMISC;
243 temac_indirect_out32(lp, XTE_AFM_OFFSET, XTE_AFM_EPPRM_MASK);
244 dev_info(&ndev->dev, "Promiscuous mode enabled.\n");
245 } else if (ndev->mc_count) {
246 struct dev_mc_list *mclist = ndev->mc_list;
247 for (i = 0; mclist && i < ndev->mc_count; i++) {
248
249 if (i >= MULTICAST_CAM_TABLE_NUM)
250 break;
251 multi_addr_msw = ((mclist->dmi_addr[3] << 24) |
252 (mclist->dmi_addr[2] << 16) |
253 (mclist->dmi_addr[1] << 8) |
254 (mclist->dmi_addr[0]));
255 temac_indirect_out32(lp, XTE_MAW0_OFFSET,
256 multi_addr_msw);
257 multi_addr_lsw = ((mclist->dmi_addr[5] << 8) |
258 (mclist->dmi_addr[4]) | (i << 16));
259 temac_indirect_out32(lp, XTE_MAW1_OFFSET,
260 multi_addr_lsw);
261 mclist = mclist->next;
262 }
263 } else {
264 val = temac_indirect_in32(lp, XTE_AFM_OFFSET);
265 temac_indirect_out32(lp, XTE_AFM_OFFSET,
266 val & ~XTE_AFM_EPPRM_MASK);
267 temac_indirect_out32(lp, XTE_MAW0_OFFSET, 0);
268 temac_indirect_out32(lp, XTE_MAW1_OFFSET, 0);
269 dev_info(&ndev->dev, "Promiscuous mode disabled.\n");
270 }
271 mutex_unlock(&lp->indirect_mutex);
272}
273
274struct temac_option {
275 int flg;
276 u32 opt;
277 u32 reg;
278 u32 m_or;
279 u32 m_and;
280} temac_options[] = {
281 /* Turn on jumbo packet support for both Rx and Tx */
282 {
283 .opt = XTE_OPTION_JUMBO,
284 .reg = XTE_TXC_OFFSET,
285 .m_or = XTE_TXC_TXJMBO_MASK,
286 },
287 {
288 .opt = XTE_OPTION_JUMBO,
289 .reg = XTE_RXC1_OFFSET,
290 .m_or =XTE_RXC1_RXJMBO_MASK,
291 },
292 /* Turn on VLAN packet support for both Rx and Tx */
293 {
294 .opt = XTE_OPTION_VLAN,
295 .reg = XTE_TXC_OFFSET,
296 .m_or =XTE_TXC_TXVLAN_MASK,
297 },
298 {
299 .opt = XTE_OPTION_VLAN,
300 .reg = XTE_RXC1_OFFSET,
301 .m_or =XTE_RXC1_RXVLAN_MASK,
302 },
303 /* Turn on FCS stripping on receive packets */
304 {
305 .opt = XTE_OPTION_FCS_STRIP,
306 .reg = XTE_RXC1_OFFSET,
307 .m_or =XTE_RXC1_RXFCS_MASK,
308 },
309 /* Turn on FCS insertion on transmit packets */
310 {
311 .opt = XTE_OPTION_FCS_INSERT,
312 .reg = XTE_TXC_OFFSET,
313 .m_or =XTE_TXC_TXFCS_MASK,
314 },
315 /* Turn on length/type field checking on receive packets */
316 {
317 .opt = XTE_OPTION_LENTYPE_ERR,
318 .reg = XTE_RXC1_OFFSET,
319 .m_or =XTE_RXC1_RXLT_MASK,
320 },
321 /* Turn on flow control */
322 {
323 .opt = XTE_OPTION_FLOW_CONTROL,
324 .reg = XTE_FCC_OFFSET,
325 .m_or =XTE_FCC_RXFLO_MASK,
326 },
327 /* Turn on flow control */
328 {
329 .opt = XTE_OPTION_FLOW_CONTROL,
330 .reg = XTE_FCC_OFFSET,
331 .m_or =XTE_FCC_TXFLO_MASK,
332 },
333 /* Turn on promiscuous frame filtering (all frames are received ) */
334 {
335 .opt = XTE_OPTION_PROMISC,
336 .reg = XTE_AFM_OFFSET,
337 .m_or =XTE_AFM_EPPRM_MASK,
338 },
339 /* Enable transmitter if not already enabled */
340 {
341 .opt = XTE_OPTION_TXEN,
342 .reg = XTE_TXC_OFFSET,
343 .m_or =XTE_TXC_TXEN_MASK,
344 },
345 /* Enable receiver? */
346 {
347 .opt = XTE_OPTION_RXEN,
348 .reg = XTE_RXC1_OFFSET,
349 .m_or =XTE_RXC1_RXEN_MASK,
350 },
351 {}
352};
353
354/**
355 * temac_setoptions
356 */
357static u32 temac_setoptions(struct net_device *ndev, u32 options)
358{
359 struct temac_local *lp = netdev_priv(ndev);
360 struct temac_option *tp = &temac_options[0];
361 int reg;
362
363 mutex_lock(&lp->indirect_mutex);
364 while (tp->opt) {
365 reg = temac_indirect_in32(lp, tp->reg) & ~tp->m_or;
366 if (options & tp->opt)
367 reg |= tp->m_or;
368 temac_indirect_out32(lp, tp->reg, reg);
369 tp++;
370 }
371 lp->options |= options;
372 mutex_unlock(&lp->indirect_mutex);
373
374 return (0);
375}
376
377/* Initilize temac */
378static void temac_device_reset(struct net_device *ndev)
379{
380 struct temac_local *lp = netdev_priv(ndev);
381 u32 timeout;
382 u32 val;
383
384 /* Perform a software reset */
385
386 /* 0x300 host enable bit ? */
387 /* reset PHY through control register ?:1 */
388
389 dev_dbg(&ndev->dev, "%s()\n", __func__);
390
391 mutex_lock(&lp->indirect_mutex);
392 /* Reset the receiver and wait for it to finish reset */
393 temac_indirect_out32(lp, XTE_RXC1_OFFSET, XTE_RXC1_RXRST_MASK);
394 timeout = 1000;
395 while (temac_indirect_in32(lp, XTE_RXC1_OFFSET) & XTE_RXC1_RXRST_MASK) {
396 udelay(1);
397 if (--timeout == 0) {
398 dev_err(&ndev->dev,
399 "temac_device_reset RX reset timeout!!\n");
400 break;
401 }
402 }
403
404 /* Reset the transmitter and wait for it to finish reset */
405 temac_indirect_out32(lp, XTE_TXC_OFFSET, XTE_TXC_TXRST_MASK);
406 timeout = 1000;
407 while (temac_indirect_in32(lp, XTE_TXC_OFFSET) & XTE_TXC_TXRST_MASK) {
408 udelay(1);
409 if (--timeout == 0) {
410 dev_err(&ndev->dev,
411 "temac_device_reset TX reset timeout!!\n");
412 break;
413 }
414 }
415
416 /* Disable the receiver */
417 val = temac_indirect_in32(lp, XTE_RXC1_OFFSET);
418 temac_indirect_out32(lp, XTE_RXC1_OFFSET, val & ~XTE_RXC1_RXEN_MASK);
419
420 /* Reset Local Link (DMA) */
421 temac_dma_out32(lp, DMA_CONTROL_REG, DMA_CONTROL_RST);
422 timeout = 1000;
423 while (temac_dma_in32(lp, DMA_CONTROL_REG) & DMA_CONTROL_RST) {
424 udelay(1);
425 if (--timeout == 0) {
426 dev_err(&ndev->dev,
427 "temac_device_reset DMA reset timeout!!\n");
428 break;
429 }
430 }
431 temac_dma_out32(lp, DMA_CONTROL_REG, DMA_TAIL_ENABLE);
432
433 temac_dma_bd_init(ndev);
434
435 temac_indirect_out32(lp, XTE_RXC0_OFFSET, 0);
436 temac_indirect_out32(lp, XTE_RXC1_OFFSET, 0);
437 temac_indirect_out32(lp, XTE_TXC_OFFSET, 0);
438 temac_indirect_out32(lp, XTE_FCC_OFFSET, XTE_FCC_RXFLO_MASK);
439
440 mutex_unlock(&lp->indirect_mutex);
441
442 /* Sync default options with HW
443 * but leave receiver and transmitter disabled. */
444 temac_setoptions(ndev,
445 lp->options & ~(XTE_OPTION_TXEN | XTE_OPTION_RXEN));
446
447 temac_set_mac_address(ndev, NULL);
448
449 /* Set address filter table */
450 temac_set_multicast_list(ndev);
451 if (temac_setoptions(ndev, lp->options))
452 dev_err(&ndev->dev, "Error setting TEMAC options\n");
453
454 /* Init Driver variable */
455 ndev->trans_start = 0;
456}
457
458void temac_adjust_link(struct net_device *ndev)
459{
460 struct temac_local *lp = netdev_priv(ndev);
461 struct phy_device *phy = lp->phy_dev;
462 u32 mii_speed;
463 int link_state;
464
465 /* hash together the state values to decide if something has changed */
466 link_state = phy->speed | (phy->duplex << 1) | phy->link;
467
468 mutex_lock(&lp->indirect_mutex);
469 if (lp->last_link != link_state) {
470 mii_speed = temac_indirect_in32(lp, XTE_EMCFG_OFFSET);
471 mii_speed &= ~XTE_EMCFG_LINKSPD_MASK;
472
473 switch (phy->speed) {
474 case SPEED_1000: mii_speed |= XTE_EMCFG_LINKSPD_1000; break;
475 case SPEED_100: mii_speed |= XTE_EMCFG_LINKSPD_100; break;
476 case SPEED_10: mii_speed |= XTE_EMCFG_LINKSPD_10; break;
477 }
478
479 /* Write new speed setting out to TEMAC */
480 temac_indirect_out32(lp, XTE_EMCFG_OFFSET, mii_speed);
481 lp->last_link = link_state;
482 phy_print_status(phy);
483 }
484 mutex_unlock(&lp->indirect_mutex);
485}
486
487static void temac_start_xmit_done(struct net_device *ndev)
488{
489 struct temac_local *lp = netdev_priv(ndev);
490 struct cdmac_bd *cur_p;
491 unsigned int stat = 0;
492
493 cur_p = &lp->tx_bd_v[lp->tx_bd_ci];
494 stat = cur_p->app0;
495
496 while (stat & STS_CTRL_APP0_CMPLT) {
497 dma_unmap_single(ndev->dev.parent, cur_p->phys, cur_p->len,
498 DMA_TO_DEVICE);
499 if (cur_p->app4)
500 dev_kfree_skb_irq((struct sk_buff *)cur_p->app4);
501 cur_p->app0 = 0;
502
503 ndev->stats.tx_packets++;
504 ndev->stats.tx_bytes += cur_p->len;
505
506 lp->tx_bd_ci++;
507 if (lp->tx_bd_ci >= TX_BD_NUM)
508 lp->tx_bd_ci = 0;
509
510 cur_p = &lp->tx_bd_v[lp->tx_bd_ci];
511 stat = cur_p->app0;
512 }
513
514 netif_wake_queue(ndev);
515}
516
517static int temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
518{
519 struct temac_local *lp = netdev_priv(ndev);
520 struct cdmac_bd *cur_p;
521 dma_addr_t start_p, tail_p;
522 int ii;
523 unsigned long num_frag;
524 skb_frag_t *frag;
525
526 num_frag = skb_shinfo(skb)->nr_frags;
527 frag = &skb_shinfo(skb)->frags[0];
528 start_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * lp->tx_bd_tail;
529 cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
530
531 if (cur_p->app0 & STS_CTRL_APP0_CMPLT) {
532 if (!netif_queue_stopped(ndev)) {
533 netif_stop_queue(ndev);
534 return NETDEV_TX_BUSY;
535 }
536 return NETDEV_TX_BUSY;
537 }
538
539 cur_p->app0 = 0;
540 if (skb->ip_summed == CHECKSUM_PARTIAL) {
541 const struct iphdr *ip = ip_hdr(skb);
542 int length = 0, start = 0, insert = 0;
543
544 switch (ip->protocol) {
545 case IPPROTO_TCP:
546 start = sizeof(struct iphdr) + ETH_HLEN;
547 insert = sizeof(struct iphdr) + ETH_HLEN + 16;
548 length = ip->tot_len - sizeof(struct iphdr);
549 break;
550 case IPPROTO_UDP:
551 start = sizeof(struct iphdr) + ETH_HLEN;
552 insert = sizeof(struct iphdr) + ETH_HLEN + 6;
553 length = ip->tot_len - sizeof(struct iphdr);
554 break;
555 default:
556 break;
557 }
558 cur_p->app1 = ((start << 16) | insert);
559 cur_p->app2 = csum_tcpudp_magic(ip->saddr, ip->daddr,
560 length, ip->protocol, 0);
561 skb->data[insert] = 0;
562 skb->data[insert + 1] = 0;
563 }
564 cur_p->app0 |= STS_CTRL_APP0_SOP;
565 cur_p->len = skb_headlen(skb);
566 cur_p->phys = dma_map_single(ndev->dev.parent, skb->data, skb->len,
567 DMA_TO_DEVICE);
568 cur_p->app4 = (unsigned long)skb;
569
570 for (ii = 0; ii < num_frag; ii++) {
571 lp->tx_bd_tail++;
572 if (lp->tx_bd_tail >= TX_BD_NUM)
573 lp->tx_bd_tail = 0;
574
575 cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
576 cur_p->phys = dma_map_single(ndev->dev.parent,
577 (void *)page_address(frag->page) +
578 frag->page_offset,
579 frag->size, DMA_TO_DEVICE);
580 cur_p->len = frag->size;
581 cur_p->app0 = 0;
582 frag++;
583 }
584 cur_p->app0 |= STS_CTRL_APP0_EOP;
585
586 tail_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * lp->tx_bd_tail;
587 lp->tx_bd_tail++;
588 if (lp->tx_bd_tail >= TX_BD_NUM)
589 lp->tx_bd_tail = 0;
590
591 /* Kick off the transfer */
592 temac_dma_out32(lp, TX_TAILDESC_PTR, tail_p); /* DMA start */
593
594 return 0;
595}
596
597
598static void ll_temac_recv(struct net_device *ndev)
599{
600 struct temac_local *lp = netdev_priv(ndev);
601 struct sk_buff *skb, *new_skb;
602 unsigned int bdstat;
603 struct cdmac_bd *cur_p;
604 dma_addr_t tail_p;
605 int length;
606 unsigned long skb_vaddr;
607 unsigned long flags;
608
609 spin_lock_irqsave(&lp->rx_lock, flags);
610
611 tail_p = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_ci;
612 cur_p = &lp->rx_bd_v[lp->rx_bd_ci];
613
614 bdstat = cur_p->app0;
615 while ((bdstat & STS_CTRL_APP0_CMPLT)) {
616
617 skb = lp->rx_skb[lp->rx_bd_ci];
618 length = cur_p->app4;
619
620 skb_vaddr = virt_to_bus(skb->data);
621 dma_unmap_single(ndev->dev.parent, skb_vaddr, length,
622 DMA_FROM_DEVICE);
623
624 skb_put(skb, length);
625 skb->dev = ndev;
626 skb->protocol = eth_type_trans(skb, ndev);
627 skb->ip_summed = CHECKSUM_NONE;
628
629 netif_rx(skb);
630
631 ndev->stats.rx_packets++;
632 ndev->stats.rx_bytes += length;
633
634 new_skb = alloc_skb(XTE_MAX_JUMBO_FRAME_SIZE + XTE_ALIGN,
635 GFP_ATOMIC);
636 if (new_skb == 0) {
637 dev_err(&ndev->dev, "no memory for new sk_buff\n");
638 spin_unlock_irqrestore(&lp->rx_lock, flags);
639 return;
640 }
641
642 skb_reserve(new_skb, BUFFER_ALIGN(new_skb->data));
643
644 cur_p->app0 = STS_CTRL_APP0_IRQONEND;
645 cur_p->phys = dma_map_single(ndev->dev.parent, new_skb->data,
646 XTE_MAX_JUMBO_FRAME_SIZE,
647 DMA_FROM_DEVICE);
648 cur_p->len = XTE_MAX_JUMBO_FRAME_SIZE;
649 lp->rx_skb[lp->rx_bd_ci] = new_skb;
650
651 lp->rx_bd_ci++;
652 if (lp->rx_bd_ci >= RX_BD_NUM)
653 lp->rx_bd_ci = 0;
654
655 cur_p = &lp->rx_bd_v[lp->rx_bd_ci];
656 bdstat = cur_p->app0;
657 }
658 temac_dma_out32(lp, RX_TAILDESC_PTR, tail_p);
659
660 spin_unlock_irqrestore(&lp->rx_lock, flags);
661}
662
663static irqreturn_t ll_temac_tx_irq(int irq, void *_ndev)
664{
665 struct net_device *ndev = _ndev;
666 struct temac_local *lp = netdev_priv(ndev);
667 unsigned int status;
668
669 status = temac_dma_in32(lp, TX_IRQ_REG);
670 temac_dma_out32(lp, TX_IRQ_REG, status);
671
672 if (status & (IRQ_COAL | IRQ_DLY))
673 temac_start_xmit_done(lp->ndev);
674 if (status & 0x080)
675 dev_err(&ndev->dev, "DMA error 0x%x\n", status);
676
677 return IRQ_HANDLED;
678}
679
680static irqreturn_t ll_temac_rx_irq(int irq, void *_ndev)
681{
682 struct net_device *ndev = _ndev;
683 struct temac_local *lp = netdev_priv(ndev);
684 unsigned int status;
685
686 /* Read and clear the status registers */
687 status = temac_dma_in32(lp, RX_IRQ_REG);
688 temac_dma_out32(lp, RX_IRQ_REG, status);
689
690 if (status & (IRQ_COAL | IRQ_DLY))
691 ll_temac_recv(lp->ndev);
692
693 return IRQ_HANDLED;
694}
695
696static int temac_open(struct net_device *ndev)
697{
698 struct temac_local *lp = netdev_priv(ndev);
699 int rc;
700
701 dev_dbg(&ndev->dev, "temac_open()\n");
702
703 if (lp->phy_node) {
704 lp->phy_dev = of_phy_connect(lp->ndev, lp->phy_node,
705 temac_adjust_link, 0, 0);
706 if (!lp->phy_dev) {
707 dev_err(lp->dev, "of_phy_connect() failed\n");
708 return -ENODEV;
709 }
710
711 phy_start(lp->phy_dev);
712 }
713
714 rc = request_irq(lp->tx_irq, ll_temac_tx_irq, 0, ndev->name, ndev);
715 if (rc)
716 goto err_tx_irq;
717 rc = request_irq(lp->rx_irq, ll_temac_rx_irq, 0, ndev->name, ndev);
718 if (rc)
719 goto err_rx_irq;
720
721 temac_device_reset(ndev);
722 return 0;
723
724 err_rx_irq:
725 free_irq(lp->tx_irq, ndev);
726 err_tx_irq:
727 if (lp->phy_dev)
728 phy_disconnect(lp->phy_dev);
729 lp->phy_dev = NULL;
730 dev_err(lp->dev, "request_irq() failed\n");
731 return rc;
732}
733
734static int temac_stop(struct net_device *ndev)
735{
736 struct temac_local *lp = netdev_priv(ndev);
737
738 dev_dbg(&ndev->dev, "temac_close()\n");
739
740 free_irq(lp->tx_irq, ndev);
741 free_irq(lp->rx_irq, ndev);
742
743 if (lp->phy_dev)
744 phy_disconnect(lp->phy_dev);
745 lp->phy_dev = NULL;
746
747 return 0;
748}
749
750#ifdef CONFIG_NET_POLL_CONTROLLER
751static void
752temac_poll_controller(struct net_device *ndev)
753{
754 struct temac_local *lp = netdev_priv(ndev);
755
756 disable_irq(lp->tx_irq);
757 disable_irq(lp->rx_irq);
758
759 ll_temac_rx_irq(lp->tx_irq, lp);
760 ll_temac_tx_irq(lp->rx_irq, lp);
761
762 enable_irq(lp->tx_irq);
763 enable_irq(lp->rx_irq);
764}
765#endif
766
767static const struct net_device_ops temac_netdev_ops = {
768 .ndo_open = temac_open,
769 .ndo_stop = temac_stop,
770 .ndo_start_xmit = temac_start_xmit,
771 .ndo_set_mac_address = temac_set_mac_address,
772 //.ndo_set_multicast_list = temac_set_multicast_list,
773#ifdef CONFIG_NET_POLL_CONTROLLER
774 .ndo_poll_controller = temac_poll_controller,
775#endif
776};
777
778/* ---------------------------------------------------------------------
779 * SYSFS device attributes
780 */
781static ssize_t temac_show_llink_regs(struct device *dev,
782 struct device_attribute *attr, char *buf)
783{
784 struct net_device *ndev = dev_get_drvdata(dev);
785 struct temac_local *lp = netdev_priv(ndev);
786 int i, len = 0;
787
788 for (i = 0; i < 0x11; i++)
789 len += sprintf(buf + len, "%.8x%s", temac_dma_in32(lp, i),
790 (i % 8) == 7 ? "\n" : " ");
791 len += sprintf(buf + len, "\n");
792
793 return len;
794}
795
796static DEVICE_ATTR(llink_regs, 0440, temac_show_llink_regs, NULL);
797
798static struct attribute *temac_device_attrs[] = {
799 &dev_attr_llink_regs.attr,
800 NULL,
801};
802
803static const struct attribute_group temac_attr_group = {
804 .attrs = temac_device_attrs,
805};
806
807static int __init
808temac_of_probe(struct of_device *op, const struct of_device_id *match)
809{
810 struct device_node *np;
811 struct temac_local *lp;
812 struct net_device *ndev;
813 const void *addr;
814 int size, rc = 0;
815 unsigned int dcrs;
816
817 /* Init network device structure */
818 ndev = alloc_etherdev(sizeof(*lp));
819 if (!ndev) {
820 dev_err(&op->dev, "could not allocate device.\n");
821 return -ENOMEM;
822 }
823 ether_setup(ndev);
824 dev_set_drvdata(&op->dev, ndev);
825 SET_NETDEV_DEV(ndev, &op->dev);
826 ndev->flags &= ~IFF_MULTICAST; /* clear multicast */
827 ndev->features = NETIF_F_SG | NETIF_F_FRAGLIST;
828 ndev->netdev_ops = &temac_netdev_ops;
829#if 0
830 ndev->features |= NETIF_F_IP_CSUM; /* Can checksum TCP/UDP over IPv4. */
831 ndev->features |= NETIF_F_HW_CSUM; /* Can checksum all the packets. */
832 ndev->features |= NETIF_F_IPV6_CSUM; /* Can checksum IPV6 TCP/UDP */
833 ndev->features |= NETIF_F_HIGHDMA; /* Can DMA to high memory. */
834 ndev->features |= NETIF_F_HW_VLAN_TX; /* Transmit VLAN hw accel */
835 ndev->features |= NETIF_F_HW_VLAN_RX; /* Receive VLAN hw acceleration */
836 ndev->features |= NETIF_F_HW_VLAN_FILTER; /* Receive VLAN filtering */
837 ndev->features |= NETIF_F_VLAN_CHALLENGED; /* cannot handle VLAN pkts */
838 ndev->features |= NETIF_F_GSO; /* Enable software GSO. */
839 ndev->features |= NETIF_F_MULTI_QUEUE; /* Has multiple TX/RX queues */
840 ndev->features |= NETIF_F_LRO; /* large receive offload */
841#endif
842
843 /* setup temac private info structure */
844 lp = netdev_priv(ndev);
845 lp->ndev = ndev;
846 lp->dev = &op->dev;
847 lp->options = XTE_OPTION_DEFAULTS;
848 spin_lock_init(&lp->rx_lock);
849 mutex_init(&lp->indirect_mutex);
850
851 /* map device registers */
852 lp->regs = of_iomap(op->node, 0);
853 if (!lp->regs) {
854 dev_err(&op->dev, "could not map temac regs.\n");
855 goto nodev;
856 }
857
858 /* Find the DMA node, map the DMA registers, and decode the DMA IRQs */
859 np = of_parse_phandle(op->node, "llink-connected", 0);
860 if (!np) {
861 dev_err(&op->dev, "could not find DMA node\n");
862 goto nodev;
863 }
864
865 dcrs = dcr_resource_start(np, 0);
866 if (dcrs == 0) {
867 dev_err(&op->dev, "could not get DMA register address\n");
868 goto nodev;;
869 }
870 lp->sdma_dcrs = dcr_map(np, dcrs, dcr_resource_len(np, 0));
871 dev_dbg(&op->dev, "DCR base: %x\n", dcrs);
872
873 lp->rx_irq = irq_of_parse_and_map(np, 0);
874 lp->tx_irq = irq_of_parse_and_map(np, 1);
875 if (!lp->rx_irq || !lp->tx_irq) {
876 dev_err(&op->dev, "could not determine irqs\n");
877 rc = -ENOMEM;
878 goto nodev;
879 }
880
881 of_node_put(np); /* Finished with the DMA node; drop the reference */
882
883 /* Retrieve the MAC address */
884 addr = of_get_property(op->node, "local-mac-address", &size);
885 if ((!addr) || (size != 6)) {
886 dev_err(&op->dev, "could not find MAC address\n");
887 rc = -ENODEV;
888 goto nodev;
889 }
890 temac_set_mac_address(ndev, (void *)addr);
891
892 rc = temac_mdio_setup(lp, op->node);
893 if (rc)
894 dev_warn(&op->dev, "error registering MDIO bus\n");
895
896 lp->phy_node = of_parse_phandle(op->node, "phy-handle", 0);
897 if (lp->phy_node)
898 dev_dbg(lp->dev, "using PHY node %s (%p)\n", np->full_name, np);
899
900 /* Add the device attributes */
901 rc = sysfs_create_group(&lp->dev->kobj, &temac_attr_group);
902 if (rc) {
903 dev_err(lp->dev, "Error creating sysfs files\n");
904 goto nodev;
905 }
906
907 rc = register_netdev(lp->ndev);
908 if (rc) {
909 dev_err(lp->dev, "register_netdev() error (%i)\n", rc);
910 goto err_register_ndev;
911 }
912
913 return 0;
914
915 err_register_ndev:
916 sysfs_remove_group(&lp->dev->kobj, &temac_attr_group);
917 nodev:
918 free_netdev(ndev);
919 ndev = NULL;
920 return rc;
921}
922
923static int __devexit temac_of_remove(struct of_device *op)
924{
925 struct net_device *ndev = dev_get_drvdata(&op->dev);
926 struct temac_local *lp = netdev_priv(ndev);
927
928 temac_mdio_teardown(lp);
929 unregister_netdev(ndev);
930 sysfs_remove_group(&lp->dev->kobj, &temac_attr_group);
931 if (lp->phy_node)
932 of_node_put(lp->phy_node);
933 lp->phy_node = NULL;
934 dev_set_drvdata(&op->dev, NULL);
935 free_netdev(ndev);
936 return 0;
937}
938
939static struct of_device_id temac_of_match[] __devinitdata = {
940 { .compatible = "xlnx,xps-ll-temac-1.01.b", },
941 {},
942};
943MODULE_DEVICE_TABLE(of, temac_of_match);
944
945static struct of_platform_driver temac_of_driver = {
946 .match_table = temac_of_match,
947 .probe = temac_of_probe,
948 .remove = __devexit_p(temac_of_remove),
949 .driver = {
950 .owner = THIS_MODULE,
951 .name = "xilinx_temac",
952 },
953};
954
955static int __init temac_init(void)
956{
957 return of_register_platform_driver(&temac_of_driver);
958}
959module_init(temac_init);
960
961static void __exit temac_exit(void)
962{
963 of_unregister_platform_driver(&temac_of_driver);
964}
965module_exit(temac_exit);
966
967MODULE_DESCRIPTION("Xilinx LL_TEMAC Ethernet driver");
968MODULE_AUTHOR("Yoshio Kashiwagi");
969MODULE_LICENSE("GPL");
diff --git a/drivers/net/ll_temac_mdio.c b/drivers/net/ll_temac_mdio.c
new file mode 100644
index 000000000000..da0e462308d5
--- /dev/null
+++ b/drivers/net/ll_temac_mdio.c
@@ -0,0 +1,120 @@
1/*
2 * MDIO bus driver for the Xilinx TEMAC device
3 *
4 * Copyright (c) 2009 Secret Lab Technologies, Ltd.
5 */
6
7#include <linux/io.h>
8#include <linux/netdevice.h>
9#include <linux/mutex.h>
10#include <linux/phy.h>
11#include <linux/of.h>
12#include <linux/of_device.h>
13#include <linux/of_mdio.h>
14
15#include "ll_temac.h"
16
17/* ---------------------------------------------------------------------
18 * MDIO Bus functions
19 */
20static int temac_mdio_read(struct mii_bus *bus, int phy_id, int reg)
21{
22 struct temac_local *lp = bus->priv;
23 u32 rc;
24
25 /* Write the PHY address to the MIIM Access Initiator register.
26 * When the transfer completes, the PHY register value will appear
27 * in the LSW0 register */
28 mutex_lock(&lp->indirect_mutex);
29 temac_iow(lp, XTE_LSW0_OFFSET, (phy_id << 5) | reg);
30 rc = temac_indirect_in32(lp, XTE_MIIMAI_OFFSET);
31 mutex_unlock(&lp->indirect_mutex);
32
33 dev_dbg(lp->dev, "temac_mdio_read(phy_id=%i, reg=%x) == %x\n",
34 phy_id, reg, rc);
35
36 return rc;
37}
38
39static int temac_mdio_write(struct mii_bus *bus, int phy_id, int reg, u16 val)
40{
41 struct temac_local *lp = bus->priv;
42
43 dev_dbg(lp->dev, "temac_mdio_write(phy_id=%i, reg=%x, val=%x)\n",
44 phy_id, reg, val);
45
46 /* First write the desired value into the write data register
47 * and then write the address into the access initiator register
48 */
49 mutex_lock(&lp->indirect_mutex);
50 temac_indirect_out32(lp, XTE_MGTDR_OFFSET, val);
51 temac_indirect_out32(lp, XTE_MIIMAI_OFFSET, (phy_id << 5) | reg);
52 mutex_unlock(&lp->indirect_mutex);
53
54 return 0;
55}
56
57int temac_mdio_setup(struct temac_local *lp, struct device_node *np)
58{
59 struct mii_bus *bus;
60 const u32 *bus_hz;
61 int clk_div;
62 int rc, size;
63 struct resource res;
64
65 /* Calculate a reasonable divisor for the clock rate */
66 clk_div = 0x3f; /* worst-case default setting */
67 bus_hz = of_get_property(np, "clock-frequency", &size);
68 if (bus_hz && size >= sizeof(*bus_hz)) {
69 clk_div = (*bus_hz) / (2500 * 1000 * 2) - 1;
70 if (clk_div < 1)
71 clk_div = 1;
72 if (clk_div > 0x3f)
73 clk_div = 0x3f;
74 }
75
76 /* Enable the MDIO bus by asserting the enable bit and writing
77 * in the clock config */
78 mutex_lock(&lp->indirect_mutex);
79 temac_indirect_out32(lp, XTE_MC_OFFSET, 1 << 6 | clk_div);
80 mutex_unlock(&lp->indirect_mutex);
81
82 bus = mdiobus_alloc();
83 if (!bus)
84 return -ENOMEM;
85
86 of_address_to_resource(np, 0, &res);
87 snprintf(bus->id, MII_BUS_ID_SIZE, "%.8llx",
88 (unsigned long long)res.start);
89 bus->priv = lp;
90 bus->name = "Xilinx TEMAC MDIO";
91 bus->read = temac_mdio_read;
92 bus->write = temac_mdio_write;
93 bus->parent = lp->dev;
94 bus->irq = lp->mdio_irqs; /* preallocated IRQ table */
95
96 lp->mii_bus = bus;
97
98 rc = of_mdiobus_register(bus, np);
99 if (rc)
100 goto err_register;
101
102 mutex_lock(&lp->indirect_mutex);
103 dev_dbg(lp->dev, "MDIO bus registered; MC:%x\n",
104 temac_indirect_in32(lp, XTE_MC_OFFSET));
105 mutex_unlock(&lp->indirect_mutex);
106 return 0;
107
108 err_register:
109 mdiobus_free(bus);
110 return rc;
111}
112
113void temac_mdio_teardown(struct temac_local *lp)
114{
115 mdiobus_unregister(lp->mii_bus);
116 kfree(lp->mii_bus->irq);
117 mdiobus_free(lp->mii_bus);
118 lp->mii_bus = NULL;
119}
120