diff options
author | Jeff Kirsher <jeffrey.t.kirsher@intel.com> | 2011-06-11 05:29:36 -0400 |
---|---|---|
committer | Jeff Kirsher <jeffrey.t.kirsher@intel.com> | 2011-08-12 03:22:12 -0400 |
commit | ec21e2ec367697b4803e82662bdff6c8567745fc (patch) | |
tree | cdb42f597ff5e60516bdab59d306006fd1fafd1c /drivers/net/ethernet | |
parent | 5346ebf6db077d963e9d81af9df290d7f5532492 (diff) |
freescale: Move the Freescale drivers
Move the Freescale drivers into drivers/net/ethernet/freescale/ and
make the necessary Kconfig and Makefile changes.
CC: Sandeep Gopalpet <sandeep.kumar@freescale.com>
CC: Andy Fleming <afleming@freescale.com>
CC: Shlomi Gridish <gridish@freescale.com>
CC: Li Yang <leoli@freescale.com>
CC: Pantelis Antoniou <pantelis.antoniou@gmail.com>
CC: Vitaly Bordug <vbordug@ru.mvista.com>
CC: Dan Malek <dmalek@jlc.net>
CC: Sylvain Munaut <tnt@246tNt.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Diffstat (limited to 'drivers/net/ethernet')
29 files changed, 20483 insertions, 0 deletions
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig index d3aff7456bae..924c287aaaa9 100644 --- a/drivers/net/ethernet/Kconfig +++ b/drivers/net/ethernet/Kconfig | |||
@@ -24,6 +24,7 @@ source "drivers/net/ethernet/dlink/Kconfig" | |||
24 | source "drivers/net/ethernet/emulex/Kconfig" | 24 | source "drivers/net/ethernet/emulex/Kconfig" |
25 | source "drivers/net/ethernet/neterion/Kconfig" | 25 | source "drivers/net/ethernet/neterion/Kconfig" |
26 | source "drivers/net/ethernet/faraday/Kconfig" | 26 | source "drivers/net/ethernet/faraday/Kconfig" |
27 | source "drivers/net/ethernet/freescale/Kconfig" | ||
27 | source "drivers/net/ethernet/fujitsu/Kconfig" | 28 | source "drivers/net/ethernet/fujitsu/Kconfig" |
28 | source "drivers/net/ethernet/ibm/Kconfig" | 29 | source "drivers/net/ethernet/ibm/Kconfig" |
29 | source "drivers/net/ethernet/intel/Kconfig" | 30 | source "drivers/net/ethernet/intel/Kconfig" |
diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile index b098c5e1fa2c..025d7b763b91 100644 --- a/drivers/net/ethernet/Makefile +++ b/drivers/net/ethernet/Makefile | |||
@@ -16,6 +16,7 @@ obj-$(CONFIG_NET_VENDOR_DLINK) += dlink/ | |||
16 | obj-$(CONFIG_NET_VENDOR_EMULEX) += emulex/ | 16 | obj-$(CONFIG_NET_VENDOR_EMULEX) += emulex/ |
17 | obj-$(CONFIG_NET_VENDOR_EXAR) += neterion/ | 17 | obj-$(CONFIG_NET_VENDOR_EXAR) += neterion/ |
18 | obj-$(CONFIG_NET_VENDOR_FARADAY) += faraday/ | 18 | obj-$(CONFIG_NET_VENDOR_FARADAY) += faraday/ |
19 | obj-$(CONFIG_NET_VENDOR_FREESCALE) += freescale/ | ||
19 | obj-$(CONFIG_NET_VENDOR_FUJITSU) += fujitsu/ | 20 | obj-$(CONFIG_NET_VENDOR_FUJITSU) += fujitsu/ |
20 | obj-$(CONFIG_NET_VENDOR_IBM) += ibm/ | 21 | obj-$(CONFIG_NET_VENDOR_IBM) += ibm/ |
21 | obj-$(CONFIG_NET_VENDOR_INTEL) += intel/ | 22 | obj-$(CONFIG_NET_VENDOR_INTEL) += intel/ |
diff --git a/drivers/net/ethernet/freescale/Kconfig b/drivers/net/ethernet/freescale/Kconfig new file mode 100644 index 000000000000..2fd2c614c08b --- /dev/null +++ b/drivers/net/ethernet/freescale/Kconfig | |||
@@ -0,0 +1,88 @@ | |||
1 | # | ||
2 | # Freescale device configuration | ||
3 | # | ||
4 | |||
5 | config NET_VENDOR_FREESCALE | ||
6 | bool "Freescale devices" | ||
7 | depends on FSL_SOC || QUICC_ENGINE || CPM1 || CPM2 || PPC_MPC512x || \ | ||
8 | M523x || M527x || M5272 || M528x || M520x || M532x || \ | ||
9 | IMX_HAVE_PLATFORM_FEC || MXS_HAVE_PLATFORM_FEC || \ | ||
10 | (PPC_MPC52xx && PPC_BESTCOMM) | ||
11 | ---help--- | ||
12 | If you have a network (Ethernet) card belonging to this class, say Y | ||
13 | and read the Ethernet-HOWTO, available from | ||
14 | <http://www.tldp.org/docs.html#howto>. | ||
15 | |||
16 | Note that the answer to this question doesn't directly affect the | ||
17 | kernel: saying N will just cause the configurator to skip all | ||
18 | the questions about IBM devices. If you say Y, you will be asked for | ||
19 | your specific card in the following questions. | ||
20 | |||
21 | if NET_VENDOR_FREESCALE | ||
22 | |||
23 | config FEC | ||
24 | bool "FEC ethernet controller (of ColdFire and some i.MX CPUs)" | ||
25 | depends on (M523x || M527x || M5272 || M528x || M520x || M532x || \ | ||
26 | IMX_HAVE_PLATFORM_FEC || MXS_HAVE_PLATFORM_FEC) | ||
27 | default IMX_HAVE_PLATFORM_FEC || MXS_HAVE_PLATFORM_FEC if ARM | ||
28 | select PHYLIB | ||
29 | ---help--- | ||
30 | Say Y here if you want to use the built-in 10/100 Fast ethernet | ||
31 | controller on some Motorola ColdFire and Freescale i.MX processors. | ||
32 | |||
33 | config FEC_MPC52xx | ||
34 | tristate "FEC MPC52xx driver" | ||
35 | depends on PPC_MPC52xx && PPC_BESTCOMM | ||
36 | select CRC32 | ||
37 | select PHYLIB | ||
38 | select PPC_BESTCOMM_FEC | ||
39 | ---help--- | ||
40 | This option enables support for the MPC5200's on-chip | ||
41 | Fast Ethernet Controller | ||
42 | If compiled as module, it will be called fec_mpc52xx. | ||
43 | |||
44 | config FEC_MPC52xx_MDIO | ||
45 | bool "FEC MPC52xx MDIO bus driver" | ||
46 | depends on FEC_MPC52xx | ||
47 | default y | ||
48 | ---help--- | ||
49 | The MPC5200's FEC can connect to the Ethernet either with | ||
50 | an external MII PHY chip or 10 Mbps 7-wire interface | ||
51 | (Motorola? industry standard). | ||
52 | If your board uses an external PHY connected to FEC, enable this. | ||
53 | If not sure, enable. | ||
54 | If compiled as module, it will be called fec_mpc52xx_phy. | ||
55 | |||
56 | source "drivers/net/ethernet/freescale/fs_enet/Kconfig" | ||
57 | |||
58 | config FSL_PQ_MDIO | ||
59 | tristate "Freescale PQ MDIO" | ||
60 | depends on FSL_SOC | ||
61 | select PHYLIB | ||
62 | ---help--- | ||
63 | This driver supports the MDIO bus used by the gianfar and UCC drivers. | ||
64 | |||
65 | config UCC_GETH | ||
66 | tristate "Freescale QE Gigabit Ethernet" | ||
67 | depends on QUICC_ENGINE | ||
68 | select FSL_PQ_MDIO | ||
69 | select PHYLIB | ||
70 | ---help--- | ||
71 | This driver supports the Gigabit Ethernet mode of the QUICC Engine, | ||
72 | which is available on some Freescale SOCs. | ||
73 | |||
74 | config UGETH_TX_ON_DEMAND | ||
75 | bool "Transmit on Demand support" | ||
76 | depends on UCC_GETH | ||
77 | |||
78 | config GIANFAR | ||
79 | tristate "Gianfar Ethernet" | ||
80 | depends on FSL_SOC | ||
81 | select FSL_PQ_MDIO | ||
82 | select PHYLIB | ||
83 | select CRC32 | ||
84 | ---help--- | ||
85 | This driver supports the Gigabit TSEC on the MPC83xx, MPC85xx, | ||
86 | and MPC86xx family of chips, and the FEC on the 8540. | ||
87 | |||
88 | endif # NET_VENDOR_FREESCALE | ||
diff --git a/drivers/net/ethernet/freescale/Makefile b/drivers/net/ethernet/freescale/Makefile new file mode 100644 index 000000000000..1752488c9ee5 --- /dev/null +++ b/drivers/net/ethernet/freescale/Makefile | |||
@@ -0,0 +1,18 @@ | |||
1 | # | ||
2 | # Makefile for the Freescale network device drivers. | ||
3 | # | ||
4 | |||
5 | obj-$(CONFIG_FEC) += fec.o | ||
6 | obj-$(CONFIG_FEC_MPC52xx) += fec_mpc52xx.o | ||
7 | ifeq ($(CONFIG_FEC_MPC52xx_MDIO),y) | ||
8 | obj-$(CONFIG_FEC_MPC52xx) += fec_mpc52xx_phy.o | ||
9 | endif | ||
10 | obj-$(CONFIG_FS_ENET) += fs_enet/ | ||
11 | obj-$(CONFIG_FSL_PQ_MDIO) += fsl_pq_mdio.o | ||
12 | obj-$(CONFIG_GIANFAR) += gianfar_driver.o | ||
13 | obj-$(CONFIG_PTP_1588_CLOCK_GIANFAR) += gianfar_ptp.o | ||
14 | gianfar_driver-objs := gianfar.o \ | ||
15 | gianfar_ethtool.o \ | ||
16 | gianfar_sysfs.o | ||
17 | obj-$(CONFIG_UCC_GETH) += ucc_geth_driver.o | ||
18 | ucc_geth_driver-objs := ucc_geth.o ucc_geth_ethtool.o | ||
diff --git a/drivers/net/ethernet/freescale/fec.c b/drivers/net/ethernet/freescale/fec.c new file mode 100644 index 000000000000..e8266ccf818a --- /dev/null +++ b/drivers/net/ethernet/freescale/fec.c | |||
@@ -0,0 +1,1663 @@ | |||
1 | /* | ||
2 | * Fast Ethernet Controller (FEC) driver for Motorola MPC8xx. | ||
3 | * Copyright (c) 1997 Dan Malek (dmalek@jlc.net) | ||
4 | * | ||
5 | * Right now, I am very wasteful with the buffers. I allocate memory | ||
6 | * pages and then divide them into 2K frame buffers. This way I know I | ||
7 | * have buffers large enough to hold one frame within one buffer descriptor. | ||
8 | * Once I get this working, I will use 64 or 128 byte CPM buffers, which | ||
9 | * will be much more memory efficient and will easily handle lots of | ||
10 | * small packets. | ||
11 | * | ||
12 | * Much better multiple PHY support by Magnus Damm. | ||
13 | * Copyright (c) 2000 Ericsson Radio Systems AB. | ||
14 | * | ||
15 | * Support for FEC controller of ColdFire processors. | ||
16 | * Copyright (c) 2001-2005 Greg Ungerer (gerg@snapgear.com) | ||
17 | * | ||
18 | * Bug fixes and cleanup by Philippe De Muyter (phdm@macqel.be) | ||
19 | * Copyright (c) 2004-2006 Macq Electronique SA. | ||
20 | * | ||
21 | * Copyright (C) 2010 Freescale Semiconductor, Inc. | ||
22 | */ | ||
23 | |||
24 | #include <linux/module.h> | ||
25 | #include <linux/kernel.h> | ||
26 | #include <linux/string.h> | ||
27 | #include <linux/ptrace.h> | ||
28 | #include <linux/errno.h> | ||
29 | #include <linux/ioport.h> | ||
30 | #include <linux/slab.h> | ||
31 | #include <linux/interrupt.h> | ||
32 | #include <linux/pci.h> | ||
33 | #include <linux/init.h> | ||
34 | #include <linux/delay.h> | ||
35 | #include <linux/netdevice.h> | ||
36 | #include <linux/etherdevice.h> | ||
37 | #include <linux/skbuff.h> | ||
38 | #include <linux/spinlock.h> | ||
39 | #include <linux/workqueue.h> | ||
40 | #include <linux/bitops.h> | ||
41 | #include <linux/io.h> | ||
42 | #include <linux/irq.h> | ||
43 | #include <linux/clk.h> | ||
44 | #include <linux/platform_device.h> | ||
45 | #include <linux/phy.h> | ||
46 | #include <linux/fec.h> | ||
47 | #include <linux/of.h> | ||
48 | #include <linux/of_device.h> | ||
49 | #include <linux/of_gpio.h> | ||
50 | #include <linux/of_net.h> | ||
51 | |||
52 | #include <asm/cacheflush.h> | ||
53 | |||
54 | #ifndef CONFIG_ARM | ||
55 | #include <asm/coldfire.h> | ||
56 | #include <asm/mcfsim.h> | ||
57 | #endif | ||
58 | |||
59 | #include "fec.h" | ||
60 | |||
61 | #if defined(CONFIG_ARM) | ||
62 | #define FEC_ALIGNMENT 0xf | ||
63 | #else | ||
64 | #define FEC_ALIGNMENT 0x3 | ||
65 | #endif | ||
66 | |||
67 | #define DRIVER_NAME "fec" | ||
68 | |||
69 | /* Controller is ENET-MAC */ | ||
70 | #define FEC_QUIRK_ENET_MAC (1 << 0) | ||
71 | /* Controller needs driver to swap frame */ | ||
72 | #define FEC_QUIRK_SWAP_FRAME (1 << 1) | ||
73 | /* Controller uses gasket */ | ||
74 | #define FEC_QUIRK_USE_GASKET (1 << 2) | ||
75 | |||
76 | static struct platform_device_id fec_devtype[] = { | ||
77 | { | ||
78 | /* keep it for coldfire */ | ||
79 | .name = DRIVER_NAME, | ||
80 | .driver_data = 0, | ||
81 | }, { | ||
82 | .name = "imx25-fec", | ||
83 | .driver_data = FEC_QUIRK_USE_GASKET, | ||
84 | }, { | ||
85 | .name = "imx27-fec", | ||
86 | .driver_data = 0, | ||
87 | }, { | ||
88 | .name = "imx28-fec", | ||
89 | .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME, | ||
90 | }, { | ||
91 | /* sentinel */ | ||
92 | } | ||
93 | }; | ||
94 | MODULE_DEVICE_TABLE(platform, fec_devtype); | ||
95 | |||
96 | enum imx_fec_type { | ||
97 | IMX25_FEC = 1, /* runs on i.mx25/50/53 */ | ||
98 | IMX27_FEC, /* runs on i.mx27/35/51 */ | ||
99 | IMX28_FEC, | ||
100 | }; | ||
101 | |||
102 | static const struct of_device_id fec_dt_ids[] = { | ||
103 | { .compatible = "fsl,imx25-fec", .data = &fec_devtype[IMX25_FEC], }, | ||
104 | { .compatible = "fsl,imx27-fec", .data = &fec_devtype[IMX27_FEC], }, | ||
105 | { .compatible = "fsl,imx28-fec", .data = &fec_devtype[IMX28_FEC], }, | ||
106 | { /* sentinel */ } | ||
107 | }; | ||
108 | MODULE_DEVICE_TABLE(of, fec_dt_ids); | ||
109 | |||
110 | static unsigned char macaddr[ETH_ALEN]; | ||
111 | module_param_array(macaddr, byte, NULL, 0); | ||
112 | MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address"); | ||
113 | |||
114 | #if defined(CONFIG_M5272) | ||
115 | /* | ||
116 | * Some hardware gets it MAC address out of local flash memory. | ||
117 | * if this is non-zero then assume it is the address to get MAC from. | ||
118 | */ | ||
119 | #if defined(CONFIG_NETtel) | ||
120 | #define FEC_FLASHMAC 0xf0006006 | ||
121 | #elif defined(CONFIG_GILBARCONAP) || defined(CONFIG_SCALES) | ||
122 | #define FEC_FLASHMAC 0xf0006000 | ||
123 | #elif defined(CONFIG_CANCam) | ||
124 | #define FEC_FLASHMAC 0xf0020000 | ||
125 | #elif defined (CONFIG_M5272C3) | ||
126 | #define FEC_FLASHMAC (0xffe04000 + 4) | ||
127 | #elif defined(CONFIG_MOD5272) | ||
128 | #define FEC_FLASHMAC 0xffc0406b | ||
129 | #else | ||
130 | #define FEC_FLASHMAC 0 | ||
131 | #endif | ||
132 | #endif /* CONFIG_M5272 */ | ||
133 | |||
134 | /* The number of Tx and Rx buffers. These are allocated from the page | ||
135 | * pool. The code may assume these are power of two, so it it best | ||
136 | * to keep them that size. | ||
137 | * We don't need to allocate pages for the transmitter. We just use | ||
138 | * the skbuffer directly. | ||
139 | */ | ||
140 | #define FEC_ENET_RX_PAGES 8 | ||
141 | #define FEC_ENET_RX_FRSIZE 2048 | ||
142 | #define FEC_ENET_RX_FRPPG (PAGE_SIZE / FEC_ENET_RX_FRSIZE) | ||
143 | #define RX_RING_SIZE (FEC_ENET_RX_FRPPG * FEC_ENET_RX_PAGES) | ||
144 | #define FEC_ENET_TX_FRSIZE 2048 | ||
145 | #define FEC_ENET_TX_FRPPG (PAGE_SIZE / FEC_ENET_TX_FRSIZE) | ||
146 | #define TX_RING_SIZE 16 /* Must be power of two */ | ||
147 | #define TX_RING_MOD_MASK 15 /* for this to work */ | ||
148 | |||
149 | #if (((RX_RING_SIZE + TX_RING_SIZE) * 8) > PAGE_SIZE) | ||
150 | #error "FEC: descriptor ring size constants too large" | ||
151 | #endif | ||
152 | |||
153 | /* Interrupt events/masks. */ | ||
154 | #define FEC_ENET_HBERR ((uint)0x80000000) /* Heartbeat error */ | ||
155 | #define FEC_ENET_BABR ((uint)0x40000000) /* Babbling receiver */ | ||
156 | #define FEC_ENET_BABT ((uint)0x20000000) /* Babbling transmitter */ | ||
157 | #define FEC_ENET_GRA ((uint)0x10000000) /* Graceful stop complete */ | ||
158 | #define FEC_ENET_TXF ((uint)0x08000000) /* Full frame transmitted */ | ||
159 | #define FEC_ENET_TXB ((uint)0x04000000) /* A buffer was transmitted */ | ||
160 | #define FEC_ENET_RXF ((uint)0x02000000) /* Full frame received */ | ||
161 | #define FEC_ENET_RXB ((uint)0x01000000) /* A buffer was received */ | ||
162 | #define FEC_ENET_MII ((uint)0x00800000) /* MII interrupt */ | ||
163 | #define FEC_ENET_EBERR ((uint)0x00400000) /* SDMA bus error */ | ||
164 | |||
165 | #define FEC_DEFAULT_IMASK (FEC_ENET_TXF | FEC_ENET_RXF | FEC_ENET_MII) | ||
166 | |||
167 | /* The FEC stores dest/src/type, data, and checksum for receive packets. | ||
168 | */ | ||
169 | #define PKT_MAXBUF_SIZE 1518 | ||
170 | #define PKT_MINBUF_SIZE 64 | ||
171 | #define PKT_MAXBLR_SIZE 1520 | ||
172 | |||
173 | |||
174 | /* | ||
175 | * The 5270/5271/5280/5282/532x RX control register also contains maximum frame | ||
176 | * size bits. Other FEC hardware does not, so we need to take that into | ||
177 | * account when setting it. | ||
178 | */ | ||
179 | #if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \ | ||
180 | defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) | ||
181 | #define OPT_FRAME_SIZE (PKT_MAXBUF_SIZE << 16) | ||
182 | #else | ||
183 | #define OPT_FRAME_SIZE 0 | ||
184 | #endif | ||
185 | |||
186 | /* The FEC buffer descriptors track the ring buffers. The rx_bd_base and | ||
187 | * tx_bd_base always point to the base of the buffer descriptors. The | ||
188 | * cur_rx and cur_tx point to the currently available buffer. | ||
189 | * The dirty_tx tracks the current buffer that is being sent by the | ||
190 | * controller. The cur_tx and dirty_tx are equal under both completely | ||
191 | * empty and completely full conditions. The empty/ready indicator in | ||
192 | * the buffer descriptor determines the actual condition. | ||
193 | */ | ||
194 | struct fec_enet_private { | ||
195 | /* Hardware registers of the FEC device */ | ||
196 | void __iomem *hwp; | ||
197 | |||
198 | struct net_device *netdev; | ||
199 | |||
200 | struct clk *clk; | ||
201 | |||
202 | /* The saved address of a sent-in-place packet/buffer, for skfree(). */ | ||
203 | unsigned char *tx_bounce[TX_RING_SIZE]; | ||
204 | struct sk_buff* tx_skbuff[TX_RING_SIZE]; | ||
205 | struct sk_buff* rx_skbuff[RX_RING_SIZE]; | ||
206 | ushort skb_cur; | ||
207 | ushort skb_dirty; | ||
208 | |||
209 | /* CPM dual port RAM relative addresses */ | ||
210 | dma_addr_t bd_dma; | ||
211 | /* Address of Rx and Tx buffers */ | ||
212 | struct bufdesc *rx_bd_base; | ||
213 | struct bufdesc *tx_bd_base; | ||
214 | /* The next free ring entry */ | ||
215 | struct bufdesc *cur_rx, *cur_tx; | ||
216 | /* The ring entries to be free()ed */ | ||
217 | struct bufdesc *dirty_tx; | ||
218 | |||
219 | uint tx_full; | ||
220 | /* hold while accessing the HW like ringbuffer for tx/rx but not MAC */ | ||
221 | spinlock_t hw_lock; | ||
222 | |||
223 | struct platform_device *pdev; | ||
224 | |||
225 | int opened; | ||
226 | |||
227 | /* Phylib and MDIO interface */ | ||
228 | struct mii_bus *mii_bus; | ||
229 | struct phy_device *phy_dev; | ||
230 | int mii_timeout; | ||
231 | uint phy_speed; | ||
232 | phy_interface_t phy_interface; | ||
233 | int link; | ||
234 | int full_duplex; | ||
235 | struct completion mdio_done; | ||
236 | }; | ||
237 | |||
238 | /* FEC MII MMFR bits definition */ | ||
239 | #define FEC_MMFR_ST (1 << 30) | ||
240 | #define FEC_MMFR_OP_READ (2 << 28) | ||
241 | #define FEC_MMFR_OP_WRITE (1 << 28) | ||
242 | #define FEC_MMFR_PA(v) ((v & 0x1f) << 23) | ||
243 | #define FEC_MMFR_RA(v) ((v & 0x1f) << 18) | ||
244 | #define FEC_MMFR_TA (2 << 16) | ||
245 | #define FEC_MMFR_DATA(v) (v & 0xffff) | ||
246 | |||
247 | #define FEC_MII_TIMEOUT 1000 /* us */ | ||
248 | |||
249 | /* Transmitter timeout */ | ||
250 | #define TX_TIMEOUT (2 * HZ) | ||
251 | |||
252 | static void *swap_buffer(void *bufaddr, int len) | ||
253 | { | ||
254 | int i; | ||
255 | unsigned int *buf = bufaddr; | ||
256 | |||
257 | for (i = 0; i < (len + 3) / 4; i++, buf++) | ||
258 | *buf = cpu_to_be32(*buf); | ||
259 | |||
260 | return bufaddr; | ||
261 | } | ||
262 | |||
263 | static netdev_tx_t | ||
264 | fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev) | ||
265 | { | ||
266 | struct fec_enet_private *fep = netdev_priv(ndev); | ||
267 | const struct platform_device_id *id_entry = | ||
268 | platform_get_device_id(fep->pdev); | ||
269 | struct bufdesc *bdp; | ||
270 | void *bufaddr; | ||
271 | unsigned short status; | ||
272 | unsigned long flags; | ||
273 | |||
274 | if (!fep->link) { | ||
275 | /* Link is down or autonegotiation is in progress. */ | ||
276 | return NETDEV_TX_BUSY; | ||
277 | } | ||
278 | |||
279 | spin_lock_irqsave(&fep->hw_lock, flags); | ||
280 | /* Fill in a Tx ring entry */ | ||
281 | bdp = fep->cur_tx; | ||
282 | |||
283 | status = bdp->cbd_sc; | ||
284 | |||
285 | if (status & BD_ENET_TX_READY) { | ||
286 | /* Ooops. All transmit buffers are full. Bail out. | ||
287 | * This should not happen, since ndev->tbusy should be set. | ||
288 | */ | ||
289 | printk("%s: tx queue full!.\n", ndev->name); | ||
290 | spin_unlock_irqrestore(&fep->hw_lock, flags); | ||
291 | return NETDEV_TX_BUSY; | ||
292 | } | ||
293 | |||
294 | /* Clear all of the status flags */ | ||
295 | status &= ~BD_ENET_TX_STATS; | ||
296 | |||
297 | /* Set buffer length and buffer pointer */ | ||
298 | bufaddr = skb->data; | ||
299 | bdp->cbd_datlen = skb->len; | ||
300 | |||
301 | /* | ||
302 | * On some FEC implementations data must be aligned on | ||
303 | * 4-byte boundaries. Use bounce buffers to copy data | ||
304 | * and get it aligned. Ugh. | ||
305 | */ | ||
306 | if (((unsigned long) bufaddr) & FEC_ALIGNMENT) { | ||
307 | unsigned int index; | ||
308 | index = bdp - fep->tx_bd_base; | ||
309 | memcpy(fep->tx_bounce[index], skb->data, skb->len); | ||
310 | bufaddr = fep->tx_bounce[index]; | ||
311 | } | ||
312 | |||
313 | /* | ||
314 | * Some design made an incorrect assumption on endian mode of | ||
315 | * the system that it's running on. As the result, driver has to | ||
316 | * swap every frame going to and coming from the controller. | ||
317 | */ | ||
318 | if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) | ||
319 | swap_buffer(bufaddr, skb->len); | ||
320 | |||
321 | /* Save skb pointer */ | ||
322 | fep->tx_skbuff[fep->skb_cur] = skb; | ||
323 | |||
324 | ndev->stats.tx_bytes += skb->len; | ||
325 | fep->skb_cur = (fep->skb_cur+1) & TX_RING_MOD_MASK; | ||
326 | |||
327 | /* Push the data cache so the CPM does not get stale memory | ||
328 | * data. | ||
329 | */ | ||
330 | bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, bufaddr, | ||
331 | FEC_ENET_TX_FRSIZE, DMA_TO_DEVICE); | ||
332 | |||
333 | /* Send it on its way. Tell FEC it's ready, interrupt when done, | ||
334 | * it's the last BD of the frame, and to put the CRC on the end. | ||
335 | */ | ||
336 | status |= (BD_ENET_TX_READY | BD_ENET_TX_INTR | ||
337 | | BD_ENET_TX_LAST | BD_ENET_TX_TC); | ||
338 | bdp->cbd_sc = status; | ||
339 | |||
340 | /* Trigger transmission start */ | ||
341 | writel(0, fep->hwp + FEC_X_DES_ACTIVE); | ||
342 | |||
343 | /* If this was the last BD in the ring, start at the beginning again. */ | ||
344 | if (status & BD_ENET_TX_WRAP) | ||
345 | bdp = fep->tx_bd_base; | ||
346 | else | ||
347 | bdp++; | ||
348 | |||
349 | if (bdp == fep->dirty_tx) { | ||
350 | fep->tx_full = 1; | ||
351 | netif_stop_queue(ndev); | ||
352 | } | ||
353 | |||
354 | fep->cur_tx = bdp; | ||
355 | |||
356 | skb_tx_timestamp(skb); | ||
357 | |||
358 | spin_unlock_irqrestore(&fep->hw_lock, flags); | ||
359 | |||
360 | return NETDEV_TX_OK; | ||
361 | } | ||
362 | |||
363 | /* This function is called to start or restart the FEC during a link | ||
364 | * change. This only happens when switching between half and full | ||
365 | * duplex. | ||
366 | */ | ||
367 | static void | ||
368 | fec_restart(struct net_device *ndev, int duplex) | ||
369 | { | ||
370 | struct fec_enet_private *fep = netdev_priv(ndev); | ||
371 | const struct platform_device_id *id_entry = | ||
372 | platform_get_device_id(fep->pdev); | ||
373 | int i; | ||
374 | u32 temp_mac[2]; | ||
375 | u32 rcntl = OPT_FRAME_SIZE | 0x04; | ||
376 | |||
377 | /* Whack a reset. We should wait for this. */ | ||
378 | writel(1, fep->hwp + FEC_ECNTRL); | ||
379 | udelay(10); | ||
380 | |||
381 | /* | ||
382 | * enet-mac reset will reset mac address registers too, | ||
383 | * so need to reconfigure it. | ||
384 | */ | ||
385 | if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) { | ||
386 | memcpy(&temp_mac, ndev->dev_addr, ETH_ALEN); | ||
387 | writel(cpu_to_be32(temp_mac[0]), fep->hwp + FEC_ADDR_LOW); | ||
388 | writel(cpu_to_be32(temp_mac[1]), fep->hwp + FEC_ADDR_HIGH); | ||
389 | } | ||
390 | |||
391 | /* Clear any outstanding interrupt. */ | ||
392 | writel(0xffc00000, fep->hwp + FEC_IEVENT); | ||
393 | |||
394 | /* Reset all multicast. */ | ||
395 | writel(0, fep->hwp + FEC_GRP_HASH_TABLE_HIGH); | ||
396 | writel(0, fep->hwp + FEC_GRP_HASH_TABLE_LOW); | ||
397 | #ifndef CONFIG_M5272 | ||
398 | writel(0, fep->hwp + FEC_HASH_TABLE_HIGH); | ||
399 | writel(0, fep->hwp + FEC_HASH_TABLE_LOW); | ||
400 | #endif | ||
401 | |||
402 | /* Set maximum receive buffer size. */ | ||
403 | writel(PKT_MAXBLR_SIZE, fep->hwp + FEC_R_BUFF_SIZE); | ||
404 | |||
405 | /* Set receive and transmit descriptor base. */ | ||
406 | writel(fep->bd_dma, fep->hwp + FEC_R_DES_START); | ||
407 | writel((unsigned long)fep->bd_dma + sizeof(struct bufdesc) * RX_RING_SIZE, | ||
408 | fep->hwp + FEC_X_DES_START); | ||
409 | |||
410 | fep->dirty_tx = fep->cur_tx = fep->tx_bd_base; | ||
411 | fep->cur_rx = fep->rx_bd_base; | ||
412 | |||
413 | /* Reset SKB transmit buffers. */ | ||
414 | fep->skb_cur = fep->skb_dirty = 0; | ||
415 | for (i = 0; i <= TX_RING_MOD_MASK; i++) { | ||
416 | if (fep->tx_skbuff[i]) { | ||
417 | dev_kfree_skb_any(fep->tx_skbuff[i]); | ||
418 | fep->tx_skbuff[i] = NULL; | ||
419 | } | ||
420 | } | ||
421 | |||
422 | /* Enable MII mode */ | ||
423 | if (duplex) { | ||
424 | /* FD enable */ | ||
425 | writel(0x04, fep->hwp + FEC_X_CNTRL); | ||
426 | } else { | ||
427 | /* No Rcv on Xmit */ | ||
428 | rcntl |= 0x02; | ||
429 | writel(0x0, fep->hwp + FEC_X_CNTRL); | ||
430 | } | ||
431 | |||
432 | fep->full_duplex = duplex; | ||
433 | |||
434 | /* Set MII speed */ | ||
435 | writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED); | ||
436 | |||
437 | /* | ||
438 | * The phy interface and speed need to get configured | ||
439 | * differently on enet-mac. | ||
440 | */ | ||
441 | if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) { | ||
442 | /* Enable flow control and length check */ | ||
443 | rcntl |= 0x40000000 | 0x00000020; | ||
444 | |||
445 | /* MII or RMII */ | ||
446 | if (fep->phy_interface == PHY_INTERFACE_MODE_RMII) | ||
447 | rcntl |= (1 << 8); | ||
448 | else | ||
449 | rcntl &= ~(1 << 8); | ||
450 | |||
451 | /* 10M or 100M */ | ||
452 | if (fep->phy_dev && fep->phy_dev->speed == SPEED_100) | ||
453 | rcntl &= ~(1 << 9); | ||
454 | else | ||
455 | rcntl |= (1 << 9); | ||
456 | |||
457 | } else { | ||
458 | #ifdef FEC_MIIGSK_ENR | ||
459 | if (id_entry->driver_data & FEC_QUIRK_USE_GASKET) { | ||
460 | /* disable the gasket and wait */ | ||
461 | writel(0, fep->hwp + FEC_MIIGSK_ENR); | ||
462 | while (readl(fep->hwp + FEC_MIIGSK_ENR) & 4) | ||
463 | udelay(1); | ||
464 | |||
465 | /* | ||
466 | * configure the gasket: | ||
467 | * RMII, 50 MHz, no loopback, no echo | ||
468 | * MII, 25 MHz, no loopback, no echo | ||
469 | */ | ||
470 | writel((fep->phy_interface == PHY_INTERFACE_MODE_RMII) ? | ||
471 | 1 : 0, fep->hwp + FEC_MIIGSK_CFGR); | ||
472 | |||
473 | |||
474 | /* re-enable the gasket */ | ||
475 | writel(2, fep->hwp + FEC_MIIGSK_ENR); | ||
476 | } | ||
477 | #endif | ||
478 | } | ||
479 | writel(rcntl, fep->hwp + FEC_R_CNTRL); | ||
480 | |||
481 | /* And last, enable the transmit and receive processing */ | ||
482 | writel(2, fep->hwp + FEC_ECNTRL); | ||
483 | writel(0, fep->hwp + FEC_R_DES_ACTIVE); | ||
484 | |||
485 | /* Enable interrupts we wish to service */ | ||
486 | writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK); | ||
487 | } | ||
488 | |||
489 | static void | ||
490 | fec_stop(struct net_device *ndev) | ||
491 | { | ||
492 | struct fec_enet_private *fep = netdev_priv(ndev); | ||
493 | |||
494 | /* We cannot expect a graceful transmit stop without link !!! */ | ||
495 | if (fep->link) { | ||
496 | writel(1, fep->hwp + FEC_X_CNTRL); /* Graceful transmit stop */ | ||
497 | udelay(10); | ||
498 | if (!(readl(fep->hwp + FEC_IEVENT) & FEC_ENET_GRA)) | ||
499 | printk("fec_stop : Graceful transmit stop did not complete !\n"); | ||
500 | } | ||
501 | |||
502 | /* Whack a reset. We should wait for this. */ | ||
503 | writel(1, fep->hwp + FEC_ECNTRL); | ||
504 | udelay(10); | ||
505 | writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED); | ||
506 | writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK); | ||
507 | } | ||
508 | |||
509 | |||
510 | static void | ||
511 | fec_timeout(struct net_device *ndev) | ||
512 | { | ||
513 | struct fec_enet_private *fep = netdev_priv(ndev); | ||
514 | |||
515 | ndev->stats.tx_errors++; | ||
516 | |||
517 | fec_restart(ndev, fep->full_duplex); | ||
518 | netif_wake_queue(ndev); | ||
519 | } | ||
520 | |||
521 | static void | ||
522 | fec_enet_tx(struct net_device *ndev) | ||
523 | { | ||
524 | struct fec_enet_private *fep; | ||
525 | struct bufdesc *bdp; | ||
526 | unsigned short status; | ||
527 | struct sk_buff *skb; | ||
528 | |||
529 | fep = netdev_priv(ndev); | ||
530 | spin_lock(&fep->hw_lock); | ||
531 | bdp = fep->dirty_tx; | ||
532 | |||
533 | while (((status = bdp->cbd_sc) & BD_ENET_TX_READY) == 0) { | ||
534 | if (bdp == fep->cur_tx && fep->tx_full == 0) | ||
535 | break; | ||
536 | |||
537 | dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr, | ||
538 | FEC_ENET_TX_FRSIZE, DMA_TO_DEVICE); | ||
539 | bdp->cbd_bufaddr = 0; | ||
540 | |||
541 | skb = fep->tx_skbuff[fep->skb_dirty]; | ||
542 | /* Check for errors. */ | ||
543 | if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC | | ||
544 | BD_ENET_TX_RL | BD_ENET_TX_UN | | ||
545 | BD_ENET_TX_CSL)) { | ||
546 | ndev->stats.tx_errors++; | ||
547 | if (status & BD_ENET_TX_HB) /* No heartbeat */ | ||
548 | ndev->stats.tx_heartbeat_errors++; | ||
549 | if (status & BD_ENET_TX_LC) /* Late collision */ | ||
550 | ndev->stats.tx_window_errors++; | ||
551 | if (status & BD_ENET_TX_RL) /* Retrans limit */ | ||
552 | ndev->stats.tx_aborted_errors++; | ||
553 | if (status & BD_ENET_TX_UN) /* Underrun */ | ||
554 | ndev->stats.tx_fifo_errors++; | ||
555 | if (status & BD_ENET_TX_CSL) /* Carrier lost */ | ||
556 | ndev->stats.tx_carrier_errors++; | ||
557 | } else { | ||
558 | ndev->stats.tx_packets++; | ||
559 | } | ||
560 | |||
561 | if (status & BD_ENET_TX_READY) | ||
562 | printk("HEY! Enet xmit interrupt and TX_READY.\n"); | ||
563 | |||
564 | /* Deferred means some collisions occurred during transmit, | ||
565 | * but we eventually sent the packet OK. | ||
566 | */ | ||
567 | if (status & BD_ENET_TX_DEF) | ||
568 | ndev->stats.collisions++; | ||
569 | |||
570 | /* Free the sk buffer associated with this last transmit */ | ||
571 | dev_kfree_skb_any(skb); | ||
572 | fep->tx_skbuff[fep->skb_dirty] = NULL; | ||
573 | fep->skb_dirty = (fep->skb_dirty + 1) & TX_RING_MOD_MASK; | ||
574 | |||
575 | /* Update pointer to next buffer descriptor to be transmitted */ | ||
576 | if (status & BD_ENET_TX_WRAP) | ||
577 | bdp = fep->tx_bd_base; | ||
578 | else | ||
579 | bdp++; | ||
580 | |||
581 | /* Since we have freed up a buffer, the ring is no longer full | ||
582 | */ | ||
583 | if (fep->tx_full) { | ||
584 | fep->tx_full = 0; | ||
585 | if (netif_queue_stopped(ndev)) | ||
586 | netif_wake_queue(ndev); | ||
587 | } | ||
588 | } | ||
589 | fep->dirty_tx = bdp; | ||
590 | spin_unlock(&fep->hw_lock); | ||
591 | } | ||
592 | |||
593 | |||
594 | /* During a receive, the cur_rx points to the current incoming buffer. | ||
595 | * When we update through the ring, if the next incoming buffer has | ||
596 | * not been given to the system, we just set the empty indicator, | ||
597 | * effectively tossing the packet. | ||
598 | */ | ||
599 | static void | ||
600 | fec_enet_rx(struct net_device *ndev) | ||
601 | { | ||
602 | struct fec_enet_private *fep = netdev_priv(ndev); | ||
603 | const struct platform_device_id *id_entry = | ||
604 | platform_get_device_id(fep->pdev); | ||
605 | struct bufdesc *bdp; | ||
606 | unsigned short status; | ||
607 | struct sk_buff *skb; | ||
608 | ushort pkt_len; | ||
609 | __u8 *data; | ||
610 | |||
611 | #ifdef CONFIG_M532x | ||
612 | flush_cache_all(); | ||
613 | #endif | ||
614 | |||
615 | spin_lock(&fep->hw_lock); | ||
616 | |||
617 | /* First, grab all of the stats for the incoming packet. | ||
618 | * These get messed up if we get called due to a busy condition. | ||
619 | */ | ||
620 | bdp = fep->cur_rx; | ||
621 | |||
622 | while (!((status = bdp->cbd_sc) & BD_ENET_RX_EMPTY)) { | ||
623 | |||
624 | /* Since we have allocated space to hold a complete frame, | ||
625 | * the last indicator should be set. | ||
626 | */ | ||
627 | if ((status & BD_ENET_RX_LAST) == 0) | ||
628 | printk("FEC ENET: rcv is not +last\n"); | ||
629 | |||
630 | if (!fep->opened) | ||
631 | goto rx_processing_done; | ||
632 | |||
633 | /* Check for errors. */ | ||
634 | if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_NO | | ||
635 | BD_ENET_RX_CR | BD_ENET_RX_OV)) { | ||
636 | ndev->stats.rx_errors++; | ||
637 | if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH)) { | ||
638 | /* Frame too long or too short. */ | ||
639 | ndev->stats.rx_length_errors++; | ||
640 | } | ||
641 | if (status & BD_ENET_RX_NO) /* Frame alignment */ | ||
642 | ndev->stats.rx_frame_errors++; | ||
643 | if (status & BD_ENET_RX_CR) /* CRC Error */ | ||
644 | ndev->stats.rx_crc_errors++; | ||
645 | if (status & BD_ENET_RX_OV) /* FIFO overrun */ | ||
646 | ndev->stats.rx_fifo_errors++; | ||
647 | } | ||
648 | |||
649 | /* Report late collisions as a frame error. | ||
650 | * On this error, the BD is closed, but we don't know what we | ||
651 | * have in the buffer. So, just drop this frame on the floor. | ||
652 | */ | ||
653 | if (status & BD_ENET_RX_CL) { | ||
654 | ndev->stats.rx_errors++; | ||
655 | ndev->stats.rx_frame_errors++; | ||
656 | goto rx_processing_done; | ||
657 | } | ||
658 | |||
659 | /* Process the incoming frame. */ | ||
660 | ndev->stats.rx_packets++; | ||
661 | pkt_len = bdp->cbd_datlen; | ||
662 | ndev->stats.rx_bytes += pkt_len; | ||
663 | data = (__u8*)__va(bdp->cbd_bufaddr); | ||
664 | |||
665 | dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr, | ||
666 | FEC_ENET_TX_FRSIZE, DMA_FROM_DEVICE); | ||
667 | |||
668 | if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) | ||
669 | swap_buffer(data, pkt_len); | ||
670 | |||
671 | /* This does 16 byte alignment, exactly what we need. | ||
672 | * The packet length includes FCS, but we don't want to | ||
673 | * include that when passing upstream as it messes up | ||
674 | * bridging applications. | ||
675 | */ | ||
676 | skb = dev_alloc_skb(pkt_len - 4 + NET_IP_ALIGN); | ||
677 | |||
678 | if (unlikely(!skb)) { | ||
679 | printk("%s: Memory squeeze, dropping packet.\n", | ||
680 | ndev->name); | ||
681 | ndev->stats.rx_dropped++; | ||
682 | } else { | ||
683 | skb_reserve(skb, NET_IP_ALIGN); | ||
684 | skb_put(skb, pkt_len - 4); /* Make room */ | ||
685 | skb_copy_to_linear_data(skb, data, pkt_len - 4); | ||
686 | skb->protocol = eth_type_trans(skb, ndev); | ||
687 | if (!skb_defer_rx_timestamp(skb)) | ||
688 | netif_rx(skb); | ||
689 | } | ||
690 | |||
691 | bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, data, | ||
692 | FEC_ENET_TX_FRSIZE, DMA_FROM_DEVICE); | ||
693 | rx_processing_done: | ||
694 | /* Clear the status flags for this buffer */ | ||
695 | status &= ~BD_ENET_RX_STATS; | ||
696 | |||
697 | /* Mark the buffer empty */ | ||
698 | status |= BD_ENET_RX_EMPTY; | ||
699 | bdp->cbd_sc = status; | ||
700 | |||
701 | /* Update BD pointer to next entry */ | ||
702 | if (status & BD_ENET_RX_WRAP) | ||
703 | bdp = fep->rx_bd_base; | ||
704 | else | ||
705 | bdp++; | ||
706 | /* Doing this here will keep the FEC running while we process | ||
707 | * incoming frames. On a heavily loaded network, we should be | ||
708 | * able to keep up at the expense of system resources. | ||
709 | */ | ||
710 | writel(0, fep->hwp + FEC_R_DES_ACTIVE); | ||
711 | } | ||
712 | fep->cur_rx = bdp; | ||
713 | |||
714 | spin_unlock(&fep->hw_lock); | ||
715 | } | ||
716 | |||
717 | static irqreturn_t | ||
718 | fec_enet_interrupt(int irq, void *dev_id) | ||
719 | { | ||
720 | struct net_device *ndev = dev_id; | ||
721 | struct fec_enet_private *fep = netdev_priv(ndev); | ||
722 | uint int_events; | ||
723 | irqreturn_t ret = IRQ_NONE; | ||
724 | |||
725 | do { | ||
726 | int_events = readl(fep->hwp + FEC_IEVENT); | ||
727 | writel(int_events, fep->hwp + FEC_IEVENT); | ||
728 | |||
729 | if (int_events & FEC_ENET_RXF) { | ||
730 | ret = IRQ_HANDLED; | ||
731 | fec_enet_rx(ndev); | ||
732 | } | ||
733 | |||
734 | /* Transmit OK, or non-fatal error. Update the buffer | ||
735 | * descriptors. FEC handles all errors, we just discover | ||
736 | * them as part of the transmit process. | ||
737 | */ | ||
738 | if (int_events & FEC_ENET_TXF) { | ||
739 | ret = IRQ_HANDLED; | ||
740 | fec_enet_tx(ndev); | ||
741 | } | ||
742 | |||
743 | if (int_events & FEC_ENET_MII) { | ||
744 | ret = IRQ_HANDLED; | ||
745 | complete(&fep->mdio_done); | ||
746 | } | ||
747 | } while (int_events); | ||
748 | |||
749 | return ret; | ||
750 | } | ||
751 | |||
752 | |||
753 | |||
754 | /* ------------------------------------------------------------------------- */ | ||
755 | static void __inline__ fec_get_mac(struct net_device *ndev) | ||
756 | { | ||
757 | struct fec_enet_private *fep = netdev_priv(ndev); | ||
758 | struct fec_platform_data *pdata = fep->pdev->dev.platform_data; | ||
759 | unsigned char *iap, tmpaddr[ETH_ALEN]; | ||
760 | |||
761 | /* | ||
762 | * try to get mac address in following order: | ||
763 | * | ||
764 | * 1) module parameter via kernel command line in form | ||
765 | * fec.macaddr=0x00,0x04,0x9f,0x01,0x30,0xe0 | ||
766 | */ | ||
767 | iap = macaddr; | ||
768 | |||
769 | #ifdef CONFIG_OF | ||
770 | /* | ||
771 | * 2) from device tree data | ||
772 | */ | ||
773 | if (!is_valid_ether_addr(iap)) { | ||
774 | struct device_node *np = fep->pdev->dev.of_node; | ||
775 | if (np) { | ||
776 | const char *mac = of_get_mac_address(np); | ||
777 | if (mac) | ||
778 | iap = (unsigned char *) mac; | ||
779 | } | ||
780 | } | ||
781 | #endif | ||
782 | |||
783 | /* | ||
784 | * 3) from flash or fuse (via platform data) | ||
785 | */ | ||
786 | if (!is_valid_ether_addr(iap)) { | ||
787 | #ifdef CONFIG_M5272 | ||
788 | if (FEC_FLASHMAC) | ||
789 | iap = (unsigned char *)FEC_FLASHMAC; | ||
790 | #else | ||
791 | if (pdata) | ||
792 | memcpy(iap, pdata->mac, ETH_ALEN); | ||
793 | #endif | ||
794 | } | ||
795 | |||
796 | /* | ||
797 | * 4) FEC mac registers set by bootloader | ||
798 | */ | ||
799 | if (!is_valid_ether_addr(iap)) { | ||
800 | *((unsigned long *) &tmpaddr[0]) = | ||
801 | be32_to_cpu(readl(fep->hwp + FEC_ADDR_LOW)); | ||
802 | *((unsigned short *) &tmpaddr[4]) = | ||
803 | be16_to_cpu(readl(fep->hwp + FEC_ADDR_HIGH) >> 16); | ||
804 | iap = &tmpaddr[0]; | ||
805 | } | ||
806 | |||
807 | memcpy(ndev->dev_addr, iap, ETH_ALEN); | ||
808 | |||
809 | /* Adjust MAC if using macaddr */ | ||
810 | if (iap == macaddr) | ||
811 | ndev->dev_addr[ETH_ALEN-1] = macaddr[ETH_ALEN-1] + fep->pdev->id; | ||
812 | } | ||
813 | |||
814 | /* ------------------------------------------------------------------------- */ | ||
815 | |||
816 | /* | ||
817 | * Phy section | ||
818 | */ | ||
819 | static void fec_enet_adjust_link(struct net_device *ndev) | ||
820 | { | ||
821 | struct fec_enet_private *fep = netdev_priv(ndev); | ||
822 | struct phy_device *phy_dev = fep->phy_dev; | ||
823 | unsigned long flags; | ||
824 | |||
825 | int status_change = 0; | ||
826 | |||
827 | spin_lock_irqsave(&fep->hw_lock, flags); | ||
828 | |||
829 | /* Prevent a state halted on mii error */ | ||
830 | if (fep->mii_timeout && phy_dev->state == PHY_HALTED) { | ||
831 | phy_dev->state = PHY_RESUMING; | ||
832 | goto spin_unlock; | ||
833 | } | ||
834 | |||
835 | /* Duplex link change */ | ||
836 | if (phy_dev->link) { | ||
837 | if (fep->full_duplex != phy_dev->duplex) { | ||
838 | fec_restart(ndev, phy_dev->duplex); | ||
839 | status_change = 1; | ||
840 | } | ||
841 | } | ||
842 | |||
843 | /* Link on or off change */ | ||
844 | if (phy_dev->link != fep->link) { | ||
845 | fep->link = phy_dev->link; | ||
846 | if (phy_dev->link) | ||
847 | fec_restart(ndev, phy_dev->duplex); | ||
848 | else | ||
849 | fec_stop(ndev); | ||
850 | status_change = 1; | ||
851 | } | ||
852 | |||
853 | spin_unlock: | ||
854 | spin_unlock_irqrestore(&fep->hw_lock, flags); | ||
855 | |||
856 | if (status_change) | ||
857 | phy_print_status(phy_dev); | ||
858 | } | ||
859 | |||
860 | static int fec_enet_mdio_read(struct mii_bus *bus, int mii_id, int regnum) | ||
861 | { | ||
862 | struct fec_enet_private *fep = bus->priv; | ||
863 | unsigned long time_left; | ||
864 | |||
865 | fep->mii_timeout = 0; | ||
866 | init_completion(&fep->mdio_done); | ||
867 | |||
868 | /* start a read op */ | ||
869 | writel(FEC_MMFR_ST | FEC_MMFR_OP_READ | | ||
870 | FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(regnum) | | ||
871 | FEC_MMFR_TA, fep->hwp + FEC_MII_DATA); | ||
872 | |||
873 | /* wait for end of transfer */ | ||
874 | time_left = wait_for_completion_timeout(&fep->mdio_done, | ||
875 | usecs_to_jiffies(FEC_MII_TIMEOUT)); | ||
876 | if (time_left == 0) { | ||
877 | fep->mii_timeout = 1; | ||
878 | printk(KERN_ERR "FEC: MDIO read timeout\n"); | ||
879 | return -ETIMEDOUT; | ||
880 | } | ||
881 | |||
882 | /* return value */ | ||
883 | return FEC_MMFR_DATA(readl(fep->hwp + FEC_MII_DATA)); | ||
884 | } | ||
885 | |||
886 | static int fec_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum, | ||
887 | u16 value) | ||
888 | { | ||
889 | struct fec_enet_private *fep = bus->priv; | ||
890 | unsigned long time_left; | ||
891 | |||
892 | fep->mii_timeout = 0; | ||
893 | init_completion(&fep->mdio_done); | ||
894 | |||
895 | /* start a write op */ | ||
896 | writel(FEC_MMFR_ST | FEC_MMFR_OP_WRITE | | ||
897 | FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(regnum) | | ||
898 | FEC_MMFR_TA | FEC_MMFR_DATA(value), | ||
899 | fep->hwp + FEC_MII_DATA); | ||
900 | |||
901 | /* wait for end of transfer */ | ||
902 | time_left = wait_for_completion_timeout(&fep->mdio_done, | ||
903 | usecs_to_jiffies(FEC_MII_TIMEOUT)); | ||
904 | if (time_left == 0) { | ||
905 | fep->mii_timeout = 1; | ||
906 | printk(KERN_ERR "FEC: MDIO write timeout\n"); | ||
907 | return -ETIMEDOUT; | ||
908 | } | ||
909 | |||
910 | return 0; | ||
911 | } | ||
912 | |||
913 | static int fec_enet_mdio_reset(struct mii_bus *bus) | ||
914 | { | ||
915 | return 0; | ||
916 | } | ||
917 | |||
918 | static int fec_enet_mii_probe(struct net_device *ndev) | ||
919 | { | ||
920 | struct fec_enet_private *fep = netdev_priv(ndev); | ||
921 | struct phy_device *phy_dev = NULL; | ||
922 | char mdio_bus_id[MII_BUS_ID_SIZE]; | ||
923 | char phy_name[MII_BUS_ID_SIZE + 3]; | ||
924 | int phy_id; | ||
925 | int dev_id = fep->pdev->id; | ||
926 | |||
927 | fep->phy_dev = NULL; | ||
928 | |||
929 | /* check for attached phy */ | ||
930 | for (phy_id = 0; (phy_id < PHY_MAX_ADDR); phy_id++) { | ||
931 | if ((fep->mii_bus->phy_mask & (1 << phy_id))) | ||
932 | continue; | ||
933 | if (fep->mii_bus->phy_map[phy_id] == NULL) | ||
934 | continue; | ||
935 | if (fep->mii_bus->phy_map[phy_id]->phy_id == 0) | ||
936 | continue; | ||
937 | if (dev_id--) | ||
938 | continue; | ||
939 | strncpy(mdio_bus_id, fep->mii_bus->id, MII_BUS_ID_SIZE); | ||
940 | break; | ||
941 | } | ||
942 | |||
943 | if (phy_id >= PHY_MAX_ADDR) { | ||
944 | printk(KERN_INFO "%s: no PHY, assuming direct connection " | ||
945 | "to switch\n", ndev->name); | ||
946 | strncpy(mdio_bus_id, "0", MII_BUS_ID_SIZE); | ||
947 | phy_id = 0; | ||
948 | } | ||
949 | |||
950 | snprintf(phy_name, MII_BUS_ID_SIZE, PHY_ID_FMT, mdio_bus_id, phy_id); | ||
951 | phy_dev = phy_connect(ndev, phy_name, &fec_enet_adjust_link, 0, | ||
952 | PHY_INTERFACE_MODE_MII); | ||
953 | if (IS_ERR(phy_dev)) { | ||
954 | printk(KERN_ERR "%s: could not attach to PHY\n", ndev->name); | ||
955 | return PTR_ERR(phy_dev); | ||
956 | } | ||
957 | |||
958 | /* mask with MAC supported features */ | ||
959 | phy_dev->supported &= PHY_BASIC_FEATURES; | ||
960 | phy_dev->advertising = phy_dev->supported; | ||
961 | |||
962 | fep->phy_dev = phy_dev; | ||
963 | fep->link = 0; | ||
964 | fep->full_duplex = 0; | ||
965 | |||
966 | printk(KERN_INFO "%s: Freescale FEC PHY driver [%s] " | ||
967 | "(mii_bus:phy_addr=%s, irq=%d)\n", ndev->name, | ||
968 | fep->phy_dev->drv->name, dev_name(&fep->phy_dev->dev), | ||
969 | fep->phy_dev->irq); | ||
970 | |||
971 | return 0; | ||
972 | } | ||
973 | |||
974 | static int fec_enet_mii_init(struct platform_device *pdev) | ||
975 | { | ||
976 | static struct mii_bus *fec0_mii_bus; | ||
977 | struct net_device *ndev = platform_get_drvdata(pdev); | ||
978 | struct fec_enet_private *fep = netdev_priv(ndev); | ||
979 | const struct platform_device_id *id_entry = | ||
980 | platform_get_device_id(fep->pdev); | ||
981 | int err = -ENXIO, i; | ||
982 | |||
983 | /* | ||
984 | * The dual fec interfaces are not equivalent with enet-mac. | ||
985 | * Here are the differences: | ||
986 | * | ||
987 | * - fec0 supports MII & RMII modes while fec1 only supports RMII | ||
988 | * - fec0 acts as the 1588 time master while fec1 is slave | ||
989 | * - external phys can only be configured by fec0 | ||
990 | * | ||
991 | * That is to say fec1 can not work independently. It only works | ||
992 | * when fec0 is working. The reason behind this design is that the | ||
993 | * second interface is added primarily for Switch mode. | ||
994 | * | ||
995 | * Because of the last point above, both phys are attached on fec0 | ||
996 | * mdio interface in board design, and need to be configured by | ||
997 | * fec0 mii_bus. | ||
998 | */ | ||
999 | if ((id_entry->driver_data & FEC_QUIRK_ENET_MAC) && pdev->id) { | ||
1000 | /* fec1 uses fec0 mii_bus */ | ||
1001 | fep->mii_bus = fec0_mii_bus; | ||
1002 | return 0; | ||
1003 | } | ||
1004 | |||
1005 | fep->mii_timeout = 0; | ||
1006 | |||
1007 | /* | ||
1008 | * Set MII speed to 2.5 MHz (= clk_get_rate() / 2 * phy_speed) | ||
1009 | */ | ||
1010 | fep->phy_speed = DIV_ROUND_UP(clk_get_rate(fep->clk), 5000000) << 1; | ||
1011 | writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED); | ||
1012 | |||
1013 | fep->mii_bus = mdiobus_alloc(); | ||
1014 | if (fep->mii_bus == NULL) { | ||
1015 | err = -ENOMEM; | ||
1016 | goto err_out; | ||
1017 | } | ||
1018 | |||
1019 | fep->mii_bus->name = "fec_enet_mii_bus"; | ||
1020 | fep->mii_bus->read = fec_enet_mdio_read; | ||
1021 | fep->mii_bus->write = fec_enet_mdio_write; | ||
1022 | fep->mii_bus->reset = fec_enet_mdio_reset; | ||
1023 | snprintf(fep->mii_bus->id, MII_BUS_ID_SIZE, "%x", pdev->id + 1); | ||
1024 | fep->mii_bus->priv = fep; | ||
1025 | fep->mii_bus->parent = &pdev->dev; | ||
1026 | |||
1027 | fep->mii_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL); | ||
1028 | if (!fep->mii_bus->irq) { | ||
1029 | err = -ENOMEM; | ||
1030 | goto err_out_free_mdiobus; | ||
1031 | } | ||
1032 | |||
1033 | for (i = 0; i < PHY_MAX_ADDR; i++) | ||
1034 | fep->mii_bus->irq[i] = PHY_POLL; | ||
1035 | |||
1036 | if (mdiobus_register(fep->mii_bus)) | ||
1037 | goto err_out_free_mdio_irq; | ||
1038 | |||
1039 | /* save fec0 mii_bus */ | ||
1040 | if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) | ||
1041 | fec0_mii_bus = fep->mii_bus; | ||
1042 | |||
1043 | return 0; | ||
1044 | |||
1045 | err_out_free_mdio_irq: | ||
1046 | kfree(fep->mii_bus->irq); | ||
1047 | err_out_free_mdiobus: | ||
1048 | mdiobus_free(fep->mii_bus); | ||
1049 | err_out: | ||
1050 | return err; | ||
1051 | } | ||
1052 | |||
1053 | static void fec_enet_mii_remove(struct fec_enet_private *fep) | ||
1054 | { | ||
1055 | if (fep->phy_dev) | ||
1056 | phy_disconnect(fep->phy_dev); | ||
1057 | mdiobus_unregister(fep->mii_bus); | ||
1058 | kfree(fep->mii_bus->irq); | ||
1059 | mdiobus_free(fep->mii_bus); | ||
1060 | } | ||
1061 | |||
1062 | static int fec_enet_get_settings(struct net_device *ndev, | ||
1063 | struct ethtool_cmd *cmd) | ||
1064 | { | ||
1065 | struct fec_enet_private *fep = netdev_priv(ndev); | ||
1066 | struct phy_device *phydev = fep->phy_dev; | ||
1067 | |||
1068 | if (!phydev) | ||
1069 | return -ENODEV; | ||
1070 | |||
1071 | return phy_ethtool_gset(phydev, cmd); | ||
1072 | } | ||
1073 | |||
1074 | static int fec_enet_set_settings(struct net_device *ndev, | ||
1075 | struct ethtool_cmd *cmd) | ||
1076 | { | ||
1077 | struct fec_enet_private *fep = netdev_priv(ndev); | ||
1078 | struct phy_device *phydev = fep->phy_dev; | ||
1079 | |||
1080 | if (!phydev) | ||
1081 | return -ENODEV; | ||
1082 | |||
1083 | return phy_ethtool_sset(phydev, cmd); | ||
1084 | } | ||
1085 | |||
1086 | static void fec_enet_get_drvinfo(struct net_device *ndev, | ||
1087 | struct ethtool_drvinfo *info) | ||
1088 | { | ||
1089 | struct fec_enet_private *fep = netdev_priv(ndev); | ||
1090 | |||
1091 | strcpy(info->driver, fep->pdev->dev.driver->name); | ||
1092 | strcpy(info->version, "Revision: 1.0"); | ||
1093 | strcpy(info->bus_info, dev_name(&ndev->dev)); | ||
1094 | } | ||
1095 | |||
1096 | static struct ethtool_ops fec_enet_ethtool_ops = { | ||
1097 | .get_settings = fec_enet_get_settings, | ||
1098 | .set_settings = fec_enet_set_settings, | ||
1099 | .get_drvinfo = fec_enet_get_drvinfo, | ||
1100 | .get_link = ethtool_op_get_link, | ||
1101 | }; | ||
1102 | |||
1103 | static int fec_enet_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd) | ||
1104 | { | ||
1105 | struct fec_enet_private *fep = netdev_priv(ndev); | ||
1106 | struct phy_device *phydev = fep->phy_dev; | ||
1107 | |||
1108 | if (!netif_running(ndev)) | ||
1109 | return -EINVAL; | ||
1110 | |||
1111 | if (!phydev) | ||
1112 | return -ENODEV; | ||
1113 | |||
1114 | return phy_mii_ioctl(phydev, rq, cmd); | ||
1115 | } | ||
1116 | |||
1117 | static void fec_enet_free_buffers(struct net_device *ndev) | ||
1118 | { | ||
1119 | struct fec_enet_private *fep = netdev_priv(ndev); | ||
1120 | int i; | ||
1121 | struct sk_buff *skb; | ||
1122 | struct bufdesc *bdp; | ||
1123 | |||
1124 | bdp = fep->rx_bd_base; | ||
1125 | for (i = 0; i < RX_RING_SIZE; i++) { | ||
1126 | skb = fep->rx_skbuff[i]; | ||
1127 | |||
1128 | if (bdp->cbd_bufaddr) | ||
1129 | dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr, | ||
1130 | FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE); | ||
1131 | if (skb) | ||
1132 | dev_kfree_skb(skb); | ||
1133 | bdp++; | ||
1134 | } | ||
1135 | |||
1136 | bdp = fep->tx_bd_base; | ||
1137 | for (i = 0; i < TX_RING_SIZE; i++) | ||
1138 | kfree(fep->tx_bounce[i]); | ||
1139 | } | ||
1140 | |||
1141 | static int fec_enet_alloc_buffers(struct net_device *ndev) | ||
1142 | { | ||
1143 | struct fec_enet_private *fep = netdev_priv(ndev); | ||
1144 | int i; | ||
1145 | struct sk_buff *skb; | ||
1146 | struct bufdesc *bdp; | ||
1147 | |||
1148 | bdp = fep->rx_bd_base; | ||
1149 | for (i = 0; i < RX_RING_SIZE; i++) { | ||
1150 | skb = dev_alloc_skb(FEC_ENET_RX_FRSIZE); | ||
1151 | if (!skb) { | ||
1152 | fec_enet_free_buffers(ndev); | ||
1153 | return -ENOMEM; | ||
1154 | } | ||
1155 | fep->rx_skbuff[i] = skb; | ||
1156 | |||
1157 | bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, skb->data, | ||
1158 | FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE); | ||
1159 | bdp->cbd_sc = BD_ENET_RX_EMPTY; | ||
1160 | bdp++; | ||
1161 | } | ||
1162 | |||
1163 | /* Set the last buffer to wrap. */ | ||
1164 | bdp--; | ||
1165 | bdp->cbd_sc |= BD_SC_WRAP; | ||
1166 | |||
1167 | bdp = fep->tx_bd_base; | ||
1168 | for (i = 0; i < TX_RING_SIZE; i++) { | ||
1169 | fep->tx_bounce[i] = kmalloc(FEC_ENET_TX_FRSIZE, GFP_KERNEL); | ||
1170 | |||
1171 | bdp->cbd_sc = 0; | ||
1172 | bdp->cbd_bufaddr = 0; | ||
1173 | bdp++; | ||
1174 | } | ||
1175 | |||
1176 | /* Set the last buffer to wrap. */ | ||
1177 | bdp--; | ||
1178 | bdp->cbd_sc |= BD_SC_WRAP; | ||
1179 | |||
1180 | return 0; | ||
1181 | } | ||
1182 | |||
1183 | static int | ||
1184 | fec_enet_open(struct net_device *ndev) | ||
1185 | { | ||
1186 | struct fec_enet_private *fep = netdev_priv(ndev); | ||
1187 | int ret; | ||
1188 | |||
1189 | /* I should reset the ring buffers here, but I don't yet know | ||
1190 | * a simple way to do that. | ||
1191 | */ | ||
1192 | |||
1193 | ret = fec_enet_alloc_buffers(ndev); | ||
1194 | if (ret) | ||
1195 | return ret; | ||
1196 | |||
1197 | /* Probe and connect to PHY when open the interface */ | ||
1198 | ret = fec_enet_mii_probe(ndev); | ||
1199 | if (ret) { | ||
1200 | fec_enet_free_buffers(ndev); | ||
1201 | return ret; | ||
1202 | } | ||
1203 | phy_start(fep->phy_dev); | ||
1204 | netif_start_queue(ndev); | ||
1205 | fep->opened = 1; | ||
1206 | return 0; | ||
1207 | } | ||
1208 | |||
1209 | static int | ||
1210 | fec_enet_close(struct net_device *ndev) | ||
1211 | { | ||
1212 | struct fec_enet_private *fep = netdev_priv(ndev); | ||
1213 | |||
1214 | /* Don't know what to do yet. */ | ||
1215 | fep->opened = 0; | ||
1216 | netif_stop_queue(ndev); | ||
1217 | fec_stop(ndev); | ||
1218 | |||
1219 | if (fep->phy_dev) { | ||
1220 | phy_stop(fep->phy_dev); | ||
1221 | phy_disconnect(fep->phy_dev); | ||
1222 | } | ||
1223 | |||
1224 | fec_enet_free_buffers(ndev); | ||
1225 | |||
1226 | return 0; | ||
1227 | } | ||
1228 | |||
1229 | /* Set or clear the multicast filter for this adaptor. | ||
1230 | * Skeleton taken from sunlance driver. | ||
1231 | * The CPM Ethernet implementation allows Multicast as well as individual | ||
1232 | * MAC address filtering. Some of the drivers check to make sure it is | ||
1233 | * a group multicast address, and discard those that are not. I guess I | ||
1234 | * will do the same for now, but just remove the test if you want | ||
1235 | * individual filtering as well (do the upper net layers want or support | ||
1236 | * this kind of feature?). | ||
1237 | */ | ||
1238 | |||
1239 | #define HASH_BITS 6 /* #bits in hash */ | ||
1240 | #define CRC32_POLY 0xEDB88320 | ||
1241 | |||
1242 | static void set_multicast_list(struct net_device *ndev) | ||
1243 | { | ||
1244 | struct fec_enet_private *fep = netdev_priv(ndev); | ||
1245 | struct netdev_hw_addr *ha; | ||
1246 | unsigned int i, bit, data, crc, tmp; | ||
1247 | unsigned char hash; | ||
1248 | |||
1249 | if (ndev->flags & IFF_PROMISC) { | ||
1250 | tmp = readl(fep->hwp + FEC_R_CNTRL); | ||
1251 | tmp |= 0x8; | ||
1252 | writel(tmp, fep->hwp + FEC_R_CNTRL); | ||
1253 | return; | ||
1254 | } | ||
1255 | |||
1256 | tmp = readl(fep->hwp + FEC_R_CNTRL); | ||
1257 | tmp &= ~0x8; | ||
1258 | writel(tmp, fep->hwp + FEC_R_CNTRL); | ||
1259 | |||
1260 | if (ndev->flags & IFF_ALLMULTI) { | ||
1261 | /* Catch all multicast addresses, so set the | ||
1262 | * filter to all 1's | ||
1263 | */ | ||
1264 | writel(0xffffffff, fep->hwp + FEC_GRP_HASH_TABLE_HIGH); | ||
1265 | writel(0xffffffff, fep->hwp + FEC_GRP_HASH_TABLE_LOW); | ||
1266 | |||
1267 | return; | ||
1268 | } | ||
1269 | |||
1270 | /* Clear filter and add the addresses in hash register | ||
1271 | */ | ||
1272 | writel(0, fep->hwp + FEC_GRP_HASH_TABLE_HIGH); | ||
1273 | writel(0, fep->hwp + FEC_GRP_HASH_TABLE_LOW); | ||
1274 | |||
1275 | netdev_for_each_mc_addr(ha, ndev) { | ||
1276 | /* calculate crc32 value of mac address */ | ||
1277 | crc = 0xffffffff; | ||
1278 | |||
1279 | for (i = 0; i < ndev->addr_len; i++) { | ||
1280 | data = ha->addr[i]; | ||
1281 | for (bit = 0; bit < 8; bit++, data >>= 1) { | ||
1282 | crc = (crc >> 1) ^ | ||
1283 | (((crc ^ data) & 1) ? CRC32_POLY : 0); | ||
1284 | } | ||
1285 | } | ||
1286 | |||
1287 | /* only upper 6 bits (HASH_BITS) are used | ||
1288 | * which point to specific bit in he hash registers | ||
1289 | */ | ||
1290 | hash = (crc >> (32 - HASH_BITS)) & 0x3f; | ||
1291 | |||
1292 | if (hash > 31) { | ||
1293 | tmp = readl(fep->hwp + FEC_GRP_HASH_TABLE_HIGH); | ||
1294 | tmp |= 1 << (hash - 32); | ||
1295 | writel(tmp, fep->hwp + FEC_GRP_HASH_TABLE_HIGH); | ||
1296 | } else { | ||
1297 | tmp = readl(fep->hwp + FEC_GRP_HASH_TABLE_LOW); | ||
1298 | tmp |= 1 << hash; | ||
1299 | writel(tmp, fep->hwp + FEC_GRP_HASH_TABLE_LOW); | ||
1300 | } | ||
1301 | } | ||
1302 | } | ||
1303 | |||
1304 | /* Set a MAC change in hardware. */ | ||
1305 | static int | ||
1306 | fec_set_mac_address(struct net_device *ndev, void *p) | ||
1307 | { | ||
1308 | struct fec_enet_private *fep = netdev_priv(ndev); | ||
1309 | struct sockaddr *addr = p; | ||
1310 | |||
1311 | if (!is_valid_ether_addr(addr->sa_data)) | ||
1312 | return -EADDRNOTAVAIL; | ||
1313 | |||
1314 | memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len); | ||
1315 | |||
1316 | writel(ndev->dev_addr[3] | (ndev->dev_addr[2] << 8) | | ||
1317 | (ndev->dev_addr[1] << 16) | (ndev->dev_addr[0] << 24), | ||
1318 | fep->hwp + FEC_ADDR_LOW); | ||
1319 | writel((ndev->dev_addr[5] << 16) | (ndev->dev_addr[4] << 24), | ||
1320 | fep->hwp + FEC_ADDR_HIGH); | ||
1321 | return 0; | ||
1322 | } | ||
1323 | |||
1324 | static const struct net_device_ops fec_netdev_ops = { | ||
1325 | .ndo_open = fec_enet_open, | ||
1326 | .ndo_stop = fec_enet_close, | ||
1327 | .ndo_start_xmit = fec_enet_start_xmit, | ||
1328 | .ndo_set_multicast_list = set_multicast_list, | ||
1329 | .ndo_change_mtu = eth_change_mtu, | ||
1330 | .ndo_validate_addr = eth_validate_addr, | ||
1331 | .ndo_tx_timeout = fec_timeout, | ||
1332 | .ndo_set_mac_address = fec_set_mac_address, | ||
1333 | .ndo_do_ioctl = fec_enet_ioctl, | ||
1334 | }; | ||
1335 | |||
1336 | /* | ||
1337 | * XXX: We need to clean up on failure exits here. | ||
1338 | * | ||
1339 | */ | ||
1340 | static int fec_enet_init(struct net_device *ndev) | ||
1341 | { | ||
1342 | struct fec_enet_private *fep = netdev_priv(ndev); | ||
1343 | struct bufdesc *cbd_base; | ||
1344 | struct bufdesc *bdp; | ||
1345 | int i; | ||
1346 | |||
1347 | /* Allocate memory for buffer descriptors. */ | ||
1348 | cbd_base = dma_alloc_coherent(NULL, PAGE_SIZE, &fep->bd_dma, | ||
1349 | GFP_KERNEL); | ||
1350 | if (!cbd_base) { | ||
1351 | printk("FEC: allocate descriptor memory failed?\n"); | ||
1352 | return -ENOMEM; | ||
1353 | } | ||
1354 | |||
1355 | spin_lock_init(&fep->hw_lock); | ||
1356 | |||
1357 | fep->netdev = ndev; | ||
1358 | |||
1359 | /* Get the Ethernet address */ | ||
1360 | fec_get_mac(ndev); | ||
1361 | |||
1362 | /* Set receive and transmit descriptor base. */ | ||
1363 | fep->rx_bd_base = cbd_base; | ||
1364 | fep->tx_bd_base = cbd_base + RX_RING_SIZE; | ||
1365 | |||
1366 | /* The FEC Ethernet specific entries in the device structure */ | ||
1367 | ndev->watchdog_timeo = TX_TIMEOUT; | ||
1368 | ndev->netdev_ops = &fec_netdev_ops; | ||
1369 | ndev->ethtool_ops = &fec_enet_ethtool_ops; | ||
1370 | |||
1371 | /* Initialize the receive buffer descriptors. */ | ||
1372 | bdp = fep->rx_bd_base; | ||
1373 | for (i = 0; i < RX_RING_SIZE; i++) { | ||
1374 | |||
1375 | /* Initialize the BD for every fragment in the page. */ | ||
1376 | bdp->cbd_sc = 0; | ||
1377 | bdp++; | ||
1378 | } | ||
1379 | |||
1380 | /* Set the last buffer to wrap */ | ||
1381 | bdp--; | ||
1382 | bdp->cbd_sc |= BD_SC_WRAP; | ||
1383 | |||
1384 | /* ...and the same for transmit */ | ||
1385 | bdp = fep->tx_bd_base; | ||
1386 | for (i = 0; i < TX_RING_SIZE; i++) { | ||
1387 | |||
1388 | /* Initialize the BD for every fragment in the page. */ | ||
1389 | bdp->cbd_sc = 0; | ||
1390 | bdp->cbd_bufaddr = 0; | ||
1391 | bdp++; | ||
1392 | } | ||
1393 | |||
1394 | /* Set the last buffer to wrap */ | ||
1395 | bdp--; | ||
1396 | bdp->cbd_sc |= BD_SC_WRAP; | ||
1397 | |||
1398 | fec_restart(ndev, 0); | ||
1399 | |||
1400 | return 0; | ||
1401 | } | ||
1402 | |||
1403 | #ifdef CONFIG_OF | ||
1404 | static int __devinit fec_get_phy_mode_dt(struct platform_device *pdev) | ||
1405 | { | ||
1406 | struct device_node *np = pdev->dev.of_node; | ||
1407 | |||
1408 | if (np) | ||
1409 | return of_get_phy_mode(np); | ||
1410 | |||
1411 | return -ENODEV; | ||
1412 | } | ||
1413 | |||
1414 | static int __devinit fec_reset_phy(struct platform_device *pdev) | ||
1415 | { | ||
1416 | int err, phy_reset; | ||
1417 | struct device_node *np = pdev->dev.of_node; | ||
1418 | |||
1419 | if (!np) | ||
1420 | return -ENODEV; | ||
1421 | |||
1422 | phy_reset = of_get_named_gpio(np, "phy-reset-gpios", 0); | ||
1423 | err = gpio_request_one(phy_reset, GPIOF_OUT_INIT_LOW, "phy-reset"); | ||
1424 | if (err) { | ||
1425 | pr_warn("FEC: failed to get gpio phy-reset: %d\n", err); | ||
1426 | return err; | ||
1427 | } | ||
1428 | msleep(1); | ||
1429 | gpio_set_value(phy_reset, 1); | ||
1430 | |||
1431 | return 0; | ||
1432 | } | ||
1433 | #else /* CONFIG_OF */ | ||
1434 | static inline int fec_get_phy_mode_dt(struct platform_device *pdev) | ||
1435 | { | ||
1436 | return -ENODEV; | ||
1437 | } | ||
1438 | |||
1439 | static inline int fec_reset_phy(struct platform_device *pdev) | ||
1440 | { | ||
1441 | /* | ||
1442 | * In case of platform probe, the reset has been done | ||
1443 | * by machine code. | ||
1444 | */ | ||
1445 | return 0; | ||
1446 | } | ||
1447 | #endif /* CONFIG_OF */ | ||
1448 | |||
1449 | static int __devinit | ||
1450 | fec_probe(struct platform_device *pdev) | ||
1451 | { | ||
1452 | struct fec_enet_private *fep; | ||
1453 | struct fec_platform_data *pdata; | ||
1454 | struct net_device *ndev; | ||
1455 | int i, irq, ret = 0; | ||
1456 | struct resource *r; | ||
1457 | const struct of_device_id *of_id; | ||
1458 | |||
1459 | of_id = of_match_device(fec_dt_ids, &pdev->dev); | ||
1460 | if (of_id) | ||
1461 | pdev->id_entry = of_id->data; | ||
1462 | |||
1463 | r = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
1464 | if (!r) | ||
1465 | return -ENXIO; | ||
1466 | |||
1467 | r = request_mem_region(r->start, resource_size(r), pdev->name); | ||
1468 | if (!r) | ||
1469 | return -EBUSY; | ||
1470 | |||
1471 | /* Init network device */ | ||
1472 | ndev = alloc_etherdev(sizeof(struct fec_enet_private)); | ||
1473 | if (!ndev) { | ||
1474 | ret = -ENOMEM; | ||
1475 | goto failed_alloc_etherdev; | ||
1476 | } | ||
1477 | |||
1478 | SET_NETDEV_DEV(ndev, &pdev->dev); | ||
1479 | |||
1480 | /* setup board info structure */ | ||
1481 | fep = netdev_priv(ndev); | ||
1482 | |||
1483 | fep->hwp = ioremap(r->start, resource_size(r)); | ||
1484 | fep->pdev = pdev; | ||
1485 | |||
1486 | if (!fep->hwp) { | ||
1487 | ret = -ENOMEM; | ||
1488 | goto failed_ioremap; | ||
1489 | } | ||
1490 | |||
1491 | platform_set_drvdata(pdev, ndev); | ||
1492 | |||
1493 | ret = fec_get_phy_mode_dt(pdev); | ||
1494 | if (ret < 0) { | ||
1495 | pdata = pdev->dev.platform_data; | ||
1496 | if (pdata) | ||
1497 | fep->phy_interface = pdata->phy; | ||
1498 | else | ||
1499 | fep->phy_interface = PHY_INTERFACE_MODE_MII; | ||
1500 | } else { | ||
1501 | fep->phy_interface = ret; | ||
1502 | } | ||
1503 | |||
1504 | fec_reset_phy(pdev); | ||
1505 | |||
1506 | /* This device has up to three irqs on some platforms */ | ||
1507 | for (i = 0; i < 3; i++) { | ||
1508 | irq = platform_get_irq(pdev, i); | ||
1509 | if (i && irq < 0) | ||
1510 | break; | ||
1511 | ret = request_irq(irq, fec_enet_interrupt, IRQF_DISABLED, pdev->name, ndev); | ||
1512 | if (ret) { | ||
1513 | while (--i >= 0) { | ||
1514 | irq = platform_get_irq(pdev, i); | ||
1515 | free_irq(irq, ndev); | ||
1516 | } | ||
1517 | goto failed_irq; | ||
1518 | } | ||
1519 | } | ||
1520 | |||
1521 | fep->clk = clk_get(&pdev->dev, "fec_clk"); | ||
1522 | if (IS_ERR(fep->clk)) { | ||
1523 | ret = PTR_ERR(fep->clk); | ||
1524 | goto failed_clk; | ||
1525 | } | ||
1526 | clk_enable(fep->clk); | ||
1527 | |||
1528 | ret = fec_enet_init(ndev); | ||
1529 | if (ret) | ||
1530 | goto failed_init; | ||
1531 | |||
1532 | ret = fec_enet_mii_init(pdev); | ||
1533 | if (ret) | ||
1534 | goto failed_mii_init; | ||
1535 | |||
1536 | /* Carrier starts down, phylib will bring it up */ | ||
1537 | netif_carrier_off(ndev); | ||
1538 | |||
1539 | ret = register_netdev(ndev); | ||
1540 | if (ret) | ||
1541 | goto failed_register; | ||
1542 | |||
1543 | return 0; | ||
1544 | |||
1545 | failed_register: | ||
1546 | fec_enet_mii_remove(fep); | ||
1547 | failed_mii_init: | ||
1548 | failed_init: | ||
1549 | clk_disable(fep->clk); | ||
1550 | clk_put(fep->clk); | ||
1551 | failed_clk: | ||
1552 | for (i = 0; i < 3; i++) { | ||
1553 | irq = platform_get_irq(pdev, i); | ||
1554 | if (irq > 0) | ||
1555 | free_irq(irq, ndev); | ||
1556 | } | ||
1557 | failed_irq: | ||
1558 | iounmap(fep->hwp); | ||
1559 | failed_ioremap: | ||
1560 | free_netdev(ndev); | ||
1561 | failed_alloc_etherdev: | ||
1562 | release_mem_region(r->start, resource_size(r)); | ||
1563 | |||
1564 | return ret; | ||
1565 | } | ||
1566 | |||
1567 | static int __devexit | ||
1568 | fec_drv_remove(struct platform_device *pdev) | ||
1569 | { | ||
1570 | struct net_device *ndev = platform_get_drvdata(pdev); | ||
1571 | struct fec_enet_private *fep = netdev_priv(ndev); | ||
1572 | struct resource *r; | ||
1573 | |||
1574 | fec_stop(ndev); | ||
1575 | fec_enet_mii_remove(fep); | ||
1576 | clk_disable(fep->clk); | ||
1577 | clk_put(fep->clk); | ||
1578 | iounmap(fep->hwp); | ||
1579 | unregister_netdev(ndev); | ||
1580 | free_netdev(ndev); | ||
1581 | |||
1582 | r = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
1583 | BUG_ON(!r); | ||
1584 | release_mem_region(r->start, resource_size(r)); | ||
1585 | |||
1586 | platform_set_drvdata(pdev, NULL); | ||
1587 | |||
1588 | return 0; | ||
1589 | } | ||
1590 | |||
1591 | #ifdef CONFIG_PM | ||
1592 | static int | ||
1593 | fec_suspend(struct device *dev) | ||
1594 | { | ||
1595 | struct net_device *ndev = dev_get_drvdata(dev); | ||
1596 | struct fec_enet_private *fep = netdev_priv(ndev); | ||
1597 | |||
1598 | if (netif_running(ndev)) { | ||
1599 | fec_stop(ndev); | ||
1600 | netif_device_detach(ndev); | ||
1601 | } | ||
1602 | clk_disable(fep->clk); | ||
1603 | |||
1604 | return 0; | ||
1605 | } | ||
1606 | |||
1607 | static int | ||
1608 | fec_resume(struct device *dev) | ||
1609 | { | ||
1610 | struct net_device *ndev = dev_get_drvdata(dev); | ||
1611 | struct fec_enet_private *fep = netdev_priv(ndev); | ||
1612 | |||
1613 | clk_enable(fep->clk); | ||
1614 | if (netif_running(ndev)) { | ||
1615 | fec_restart(ndev, fep->full_duplex); | ||
1616 | netif_device_attach(ndev); | ||
1617 | } | ||
1618 | |||
1619 | return 0; | ||
1620 | } | ||
1621 | |||
1622 | static const struct dev_pm_ops fec_pm_ops = { | ||
1623 | .suspend = fec_suspend, | ||
1624 | .resume = fec_resume, | ||
1625 | .freeze = fec_suspend, | ||
1626 | .thaw = fec_resume, | ||
1627 | .poweroff = fec_suspend, | ||
1628 | .restore = fec_resume, | ||
1629 | }; | ||
1630 | #endif | ||
1631 | |||
1632 | static struct platform_driver fec_driver = { | ||
1633 | .driver = { | ||
1634 | .name = DRIVER_NAME, | ||
1635 | .owner = THIS_MODULE, | ||
1636 | #ifdef CONFIG_PM | ||
1637 | .pm = &fec_pm_ops, | ||
1638 | #endif | ||
1639 | .of_match_table = fec_dt_ids, | ||
1640 | }, | ||
1641 | .id_table = fec_devtype, | ||
1642 | .probe = fec_probe, | ||
1643 | .remove = __devexit_p(fec_drv_remove), | ||
1644 | }; | ||
1645 | |||
1646 | static int __init | ||
1647 | fec_enet_module_init(void) | ||
1648 | { | ||
1649 | printk(KERN_INFO "FEC Ethernet Driver\n"); | ||
1650 | |||
1651 | return platform_driver_register(&fec_driver); | ||
1652 | } | ||
1653 | |||
1654 | static void __exit | ||
1655 | fec_enet_cleanup(void) | ||
1656 | { | ||
1657 | platform_driver_unregister(&fec_driver); | ||
1658 | } | ||
1659 | |||
1660 | module_exit(fec_enet_cleanup); | ||
1661 | module_init(fec_enet_module_init); | ||
1662 | |||
1663 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h new file mode 100644 index 000000000000..8b2c6d797e6d --- /dev/null +++ b/drivers/net/ethernet/freescale/fec.h | |||
@@ -0,0 +1,148 @@ | |||
1 | /****************************************************************************/ | ||
2 | |||
3 | /* | ||
4 | * fec.h -- Fast Ethernet Controller for Motorola ColdFire SoC | ||
5 | * processors. | ||
6 | * | ||
7 | * (C) Copyright 2000-2005, Greg Ungerer (gerg@snapgear.com) | ||
8 | * (C) Copyright 2000-2001, Lineo (www.lineo.com) | ||
9 | */ | ||
10 | |||
11 | /****************************************************************************/ | ||
12 | #ifndef FEC_H | ||
13 | #define FEC_H | ||
14 | /****************************************************************************/ | ||
15 | |||
16 | #if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \ | ||
17 | defined(CONFIG_M520x) || defined(CONFIG_M532x) || \ | ||
18 | defined(CONFIG_ARCH_MXC) || defined(CONFIG_SOC_IMX28) | ||
19 | /* | ||
20 | * Just figures, Motorola would have to change the offsets for | ||
21 | * registers in the same peripheral device on different models | ||
22 | * of the ColdFire! | ||
23 | */ | ||
24 | #define FEC_IEVENT 0x004 /* Interrupt event reg */ | ||
25 | #define FEC_IMASK 0x008 /* Interrupt mask reg */ | ||
26 | #define FEC_R_DES_ACTIVE 0x010 /* Receive descriptor reg */ | ||
27 | #define FEC_X_DES_ACTIVE 0x014 /* Transmit descriptor reg */ | ||
28 | #define FEC_ECNTRL 0x024 /* Ethernet control reg */ | ||
29 | #define FEC_MII_DATA 0x040 /* MII manage frame reg */ | ||
30 | #define FEC_MII_SPEED 0x044 /* MII speed control reg */ | ||
31 | #define FEC_MIB_CTRLSTAT 0x064 /* MIB control/status reg */ | ||
32 | #define FEC_R_CNTRL 0x084 /* Receive control reg */ | ||
33 | #define FEC_X_CNTRL 0x0c4 /* Transmit Control reg */ | ||
34 | #define FEC_ADDR_LOW 0x0e4 /* Low 32bits MAC address */ | ||
35 | #define FEC_ADDR_HIGH 0x0e8 /* High 16bits MAC address */ | ||
36 | #define FEC_OPD 0x0ec /* Opcode + Pause duration */ | ||
37 | #define FEC_HASH_TABLE_HIGH 0x118 /* High 32bits hash table */ | ||
38 | #define FEC_HASH_TABLE_LOW 0x11c /* Low 32bits hash table */ | ||
39 | #define FEC_GRP_HASH_TABLE_HIGH 0x120 /* High 32bits hash table */ | ||
40 | #define FEC_GRP_HASH_TABLE_LOW 0x124 /* Low 32bits hash table */ | ||
41 | #define FEC_X_WMRK 0x144 /* FIFO transmit water mark */ | ||
42 | #define FEC_R_BOUND 0x14c /* FIFO receive bound reg */ | ||
43 | #define FEC_R_FSTART 0x150 /* FIFO receive start reg */ | ||
44 | #define FEC_R_DES_START 0x180 /* Receive descriptor ring */ | ||
45 | #define FEC_X_DES_START 0x184 /* Transmit descriptor ring */ | ||
46 | #define FEC_R_BUFF_SIZE 0x188 /* Maximum receive buff size */ | ||
47 | #define FEC_MIIGSK_CFGR 0x300 /* MIIGSK Configuration reg */ | ||
48 | #define FEC_MIIGSK_ENR 0x308 /* MIIGSK Enable reg */ | ||
49 | |||
50 | #else | ||
51 | |||
52 | #define FEC_ECNTRL 0x000 /* Ethernet control reg */ | ||
53 | #define FEC_IEVENT 0x004 /* Interrupt even reg */ | ||
54 | #define FEC_IMASK 0x008 /* Interrupt mask reg */ | ||
55 | #define FEC_IVEC 0x00c /* Interrupt vec status reg */ | ||
56 | #define FEC_R_DES_ACTIVE 0x010 /* Receive descriptor reg */ | ||
57 | #define FEC_X_DES_ACTIVE 0x014 /* Transmit descriptor reg */ | ||
58 | #define FEC_MII_DATA 0x040 /* MII manage frame reg */ | ||
59 | #define FEC_MII_SPEED 0x044 /* MII speed control reg */ | ||
60 | #define FEC_R_BOUND 0x08c /* FIFO receive bound reg */ | ||
61 | #define FEC_R_FSTART 0x090 /* FIFO receive start reg */ | ||
62 | #define FEC_X_WMRK 0x0a4 /* FIFO transmit water mark */ | ||
63 | #define FEC_X_FSTART 0x0ac /* FIFO transmit start reg */ | ||
64 | #define FEC_R_CNTRL 0x104 /* Receive control reg */ | ||
65 | #define FEC_MAX_FRM_LEN 0x108 /* Maximum frame length reg */ | ||
66 | #define FEC_X_CNTRL 0x144 /* Transmit Control reg */ | ||
67 | #define FEC_ADDR_LOW 0x3c0 /* Low 32bits MAC address */ | ||
68 | #define FEC_ADDR_HIGH 0x3c4 /* High 16bits MAC address */ | ||
69 | #define FEC_GRP_HASH_TABLE_HIGH 0x3c8 /* High 32bits hash table */ | ||
70 | #define FEC_GRP_HASH_TABLE_LOW 0x3cc /* Low 32bits hash table */ | ||
71 | #define FEC_R_DES_START 0x3d0 /* Receive descriptor ring */ | ||
72 | #define FEC_X_DES_START 0x3d4 /* Transmit descriptor ring */ | ||
73 | #define FEC_R_BUFF_SIZE 0x3d8 /* Maximum receive buff size */ | ||
74 | #define FEC_FIFO_RAM 0x400 /* FIFO RAM buffer */ | ||
75 | |||
76 | #endif /* CONFIG_M5272 */ | ||
77 | |||
78 | |||
79 | /* | ||
80 | * Define the buffer descriptor structure. | ||
81 | */ | ||
82 | #if defined(CONFIG_ARCH_MXC) || defined(CONFIG_SOC_IMX28) | ||
83 | struct bufdesc { | ||
84 | unsigned short cbd_datlen; /* Data length */ | ||
85 | unsigned short cbd_sc; /* Control and status info */ | ||
86 | unsigned long cbd_bufaddr; /* Buffer address */ | ||
87 | }; | ||
88 | #else | ||
89 | struct bufdesc { | ||
90 | unsigned short cbd_sc; /* Control and status info */ | ||
91 | unsigned short cbd_datlen; /* Data length */ | ||
92 | unsigned long cbd_bufaddr; /* Buffer address */ | ||
93 | }; | ||
94 | #endif | ||
95 | |||
96 | /* | ||
97 | * The following definitions courtesy of commproc.h, which where | ||
98 | * Copyright (c) 1997 Dan Malek (dmalek@jlc.net). | ||
99 | */ | ||
100 | #define BD_SC_EMPTY ((ushort)0x8000) /* Receive is empty */ | ||
101 | #define BD_SC_READY ((ushort)0x8000) /* Transmit is ready */ | ||
102 | #define BD_SC_WRAP ((ushort)0x2000) /* Last buffer descriptor */ | ||
103 | #define BD_SC_INTRPT ((ushort)0x1000) /* Interrupt on change */ | ||
104 | #define BD_SC_CM ((ushort)0x0200) /* Continuous mode */ | ||
105 | #define BD_SC_ID ((ushort)0x0100) /* Rec'd too many idles */ | ||
106 | #define BD_SC_P ((ushort)0x0100) /* xmt preamble */ | ||
107 | #define BD_SC_BR ((ushort)0x0020) /* Break received */ | ||
108 | #define BD_SC_FR ((ushort)0x0010) /* Framing error */ | ||
109 | #define BD_SC_PR ((ushort)0x0008) /* Parity error */ | ||
110 | #define BD_SC_OV ((ushort)0x0002) /* Overrun */ | ||
111 | #define BD_SC_CD ((ushort)0x0001) /* ?? */ | ||
112 | |||
113 | /* Buffer descriptor control/status used by Ethernet receive. | ||
114 | */ | ||
115 | #define BD_ENET_RX_EMPTY ((ushort)0x8000) | ||
116 | #define BD_ENET_RX_WRAP ((ushort)0x2000) | ||
117 | #define BD_ENET_RX_INTR ((ushort)0x1000) | ||
118 | #define BD_ENET_RX_LAST ((ushort)0x0800) | ||
119 | #define BD_ENET_RX_FIRST ((ushort)0x0400) | ||
120 | #define BD_ENET_RX_MISS ((ushort)0x0100) | ||
121 | #define BD_ENET_RX_LG ((ushort)0x0020) | ||
122 | #define BD_ENET_RX_NO ((ushort)0x0010) | ||
123 | #define BD_ENET_RX_SH ((ushort)0x0008) | ||
124 | #define BD_ENET_RX_CR ((ushort)0x0004) | ||
125 | #define BD_ENET_RX_OV ((ushort)0x0002) | ||
126 | #define BD_ENET_RX_CL ((ushort)0x0001) | ||
127 | #define BD_ENET_RX_STATS ((ushort)0x013f) /* All status bits */ | ||
128 | |||
129 | /* Buffer descriptor control/status used by Ethernet transmit. | ||
130 | */ | ||
131 | #define BD_ENET_TX_READY ((ushort)0x8000) | ||
132 | #define BD_ENET_TX_PAD ((ushort)0x4000) | ||
133 | #define BD_ENET_TX_WRAP ((ushort)0x2000) | ||
134 | #define BD_ENET_TX_INTR ((ushort)0x1000) | ||
135 | #define BD_ENET_TX_LAST ((ushort)0x0800) | ||
136 | #define BD_ENET_TX_TC ((ushort)0x0400) | ||
137 | #define BD_ENET_TX_DEF ((ushort)0x0200) | ||
138 | #define BD_ENET_TX_HB ((ushort)0x0100) | ||
139 | #define BD_ENET_TX_LC ((ushort)0x0080) | ||
140 | #define BD_ENET_TX_RL ((ushort)0x0040) | ||
141 | #define BD_ENET_TX_RCMASK ((ushort)0x003c) | ||
142 | #define BD_ENET_TX_UN ((ushort)0x0002) | ||
143 | #define BD_ENET_TX_CSL ((ushort)0x0001) | ||
144 | #define BD_ENET_TX_STATS ((ushort)0x03ff) /* All status bits */ | ||
145 | |||
146 | |||
147 | /****************************************************************************/ | ||
148 | #endif /* FEC_H */ | ||
diff --git a/drivers/net/ethernet/freescale/fec_mpc52xx.c b/drivers/net/ethernet/freescale/fec_mpc52xx.c new file mode 100644 index 000000000000..cb4416e591f1 --- /dev/null +++ b/drivers/net/ethernet/freescale/fec_mpc52xx.c | |||
@@ -0,0 +1,1104 @@ | |||
1 | /* | ||
2 | * Driver for the MPC5200 Fast Ethernet Controller | ||
3 | * | ||
4 | * Originally written by Dale Farnsworth <dfarnsworth@mvista.com> and | ||
5 | * now maintained by Sylvain Munaut <tnt@246tNt.com> | ||
6 | * | ||
7 | * Copyright (C) 2007 Domen Puncer, Telargo, Inc. | ||
8 | * Copyright (C) 2007 Sylvain Munaut <tnt@246tNt.com> | ||
9 | * Copyright (C) 2003-2004 MontaVista, Software, Inc. | ||
10 | * | ||
11 | * This file is licensed under the terms of the GNU General Public License | ||
12 | * version 2. This program is licensed "as is" without any warranty of any | ||
13 | * kind, whether express or implied. | ||
14 | * | ||
15 | */ | ||
16 | |||
17 | #include <linux/dma-mapping.h> | ||
18 | #include <linux/module.h> | ||
19 | |||
20 | #include <linux/kernel.h> | ||
21 | #include <linux/types.h> | ||
22 | #include <linux/spinlock.h> | ||
23 | #include <linux/slab.h> | ||
24 | #include <linux/errno.h> | ||
25 | #include <linux/init.h> | ||
26 | #include <linux/interrupt.h> | ||
27 | #include <linux/crc32.h> | ||
28 | #include <linux/hardirq.h> | ||
29 | #include <linux/delay.h> | ||
30 | #include <linux/of_device.h> | ||
31 | #include <linux/of_mdio.h> | ||
32 | #include <linux/of_platform.h> | ||
33 | |||
34 | #include <linux/netdevice.h> | ||
35 | #include <linux/etherdevice.h> | ||
36 | #include <linux/ethtool.h> | ||
37 | #include <linux/skbuff.h> | ||
38 | |||
39 | #include <asm/io.h> | ||
40 | #include <asm/delay.h> | ||
41 | #include <asm/mpc52xx.h> | ||
42 | |||
43 | #include <sysdev/bestcomm/bestcomm.h> | ||
44 | #include <sysdev/bestcomm/fec.h> | ||
45 | |||
46 | #include "fec_mpc52xx.h" | ||
47 | |||
48 | #define DRIVER_NAME "mpc52xx-fec" | ||
49 | |||
50 | /* Private driver data structure */ | ||
51 | struct mpc52xx_fec_priv { | ||
52 | struct net_device *ndev; | ||
53 | int duplex; | ||
54 | int speed; | ||
55 | int r_irq; | ||
56 | int t_irq; | ||
57 | struct mpc52xx_fec __iomem *fec; | ||
58 | struct bcom_task *rx_dmatsk; | ||
59 | struct bcom_task *tx_dmatsk; | ||
60 | spinlock_t lock; | ||
61 | int msg_enable; | ||
62 | |||
63 | /* MDIO link details */ | ||
64 | unsigned int mdio_speed; | ||
65 | struct device_node *phy_node; | ||
66 | struct phy_device *phydev; | ||
67 | enum phy_state link; | ||
68 | int seven_wire_mode; | ||
69 | }; | ||
70 | |||
71 | |||
72 | static irqreturn_t mpc52xx_fec_interrupt(int, void *); | ||
73 | static irqreturn_t mpc52xx_fec_rx_interrupt(int, void *); | ||
74 | static irqreturn_t mpc52xx_fec_tx_interrupt(int, void *); | ||
75 | static void mpc52xx_fec_stop(struct net_device *dev); | ||
76 | static void mpc52xx_fec_start(struct net_device *dev); | ||
77 | static void mpc52xx_fec_reset(struct net_device *dev); | ||
78 | |||
79 | static u8 mpc52xx_fec_mac_addr[6]; | ||
80 | module_param_array_named(mac, mpc52xx_fec_mac_addr, byte, NULL, 0); | ||
81 | MODULE_PARM_DESC(mac, "six hex digits, ie. 0x1,0x2,0xc0,0x01,0xba,0xbe"); | ||
82 | |||
83 | #define MPC52xx_MESSAGES_DEFAULT ( NETIF_MSG_DRV | NETIF_MSG_PROBE | \ | ||
84 | NETIF_MSG_LINK | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP) | ||
85 | static int debug = -1; /* the above default */ | ||
86 | module_param(debug, int, 0); | ||
87 | MODULE_PARM_DESC(debug, "debugging messages level"); | ||
88 | |||
89 | static void mpc52xx_fec_tx_timeout(struct net_device *dev) | ||
90 | { | ||
91 | struct mpc52xx_fec_priv *priv = netdev_priv(dev); | ||
92 | unsigned long flags; | ||
93 | |||
94 | dev_warn(&dev->dev, "transmit timed out\n"); | ||
95 | |||
96 | spin_lock_irqsave(&priv->lock, flags); | ||
97 | mpc52xx_fec_reset(dev); | ||
98 | dev->stats.tx_errors++; | ||
99 | spin_unlock_irqrestore(&priv->lock, flags); | ||
100 | |||
101 | netif_wake_queue(dev); | ||
102 | } | ||
103 | |||
104 | static void mpc52xx_fec_set_paddr(struct net_device *dev, u8 *mac) | ||
105 | { | ||
106 | struct mpc52xx_fec_priv *priv = netdev_priv(dev); | ||
107 | struct mpc52xx_fec __iomem *fec = priv->fec; | ||
108 | |||
109 | out_be32(&fec->paddr1, *(u32 *)(&mac[0])); | ||
110 | out_be32(&fec->paddr2, (*(u16 *)(&mac[4]) << 16) | FEC_PADDR2_TYPE); | ||
111 | } | ||
112 | |||
113 | static void mpc52xx_fec_get_paddr(struct net_device *dev, u8 *mac) | ||
114 | { | ||
115 | struct mpc52xx_fec_priv *priv = netdev_priv(dev); | ||
116 | struct mpc52xx_fec __iomem *fec = priv->fec; | ||
117 | |||
118 | *(u32 *)(&mac[0]) = in_be32(&fec->paddr1); | ||
119 | *(u16 *)(&mac[4]) = in_be32(&fec->paddr2) >> 16; | ||
120 | } | ||
121 | |||
122 | static int mpc52xx_fec_set_mac_address(struct net_device *dev, void *addr) | ||
123 | { | ||
124 | struct sockaddr *sock = addr; | ||
125 | |||
126 | memcpy(dev->dev_addr, sock->sa_data, dev->addr_len); | ||
127 | |||
128 | mpc52xx_fec_set_paddr(dev, sock->sa_data); | ||
129 | return 0; | ||
130 | } | ||
131 | |||
132 | static void mpc52xx_fec_free_rx_buffers(struct net_device *dev, struct bcom_task *s) | ||
133 | { | ||
134 | while (!bcom_queue_empty(s)) { | ||
135 | struct bcom_fec_bd *bd; | ||
136 | struct sk_buff *skb; | ||
137 | |||
138 | skb = bcom_retrieve_buffer(s, NULL, (struct bcom_bd **)&bd); | ||
139 | dma_unmap_single(dev->dev.parent, bd->skb_pa, skb->len, | ||
140 | DMA_FROM_DEVICE); | ||
141 | kfree_skb(skb); | ||
142 | } | ||
143 | } | ||
144 | |||
145 | static void | ||
146 | mpc52xx_fec_rx_submit(struct net_device *dev, struct sk_buff *rskb) | ||
147 | { | ||
148 | struct mpc52xx_fec_priv *priv = netdev_priv(dev); | ||
149 | struct bcom_fec_bd *bd; | ||
150 | |||
151 | bd = (struct bcom_fec_bd *) bcom_prepare_next_buffer(priv->rx_dmatsk); | ||
152 | bd->status = FEC_RX_BUFFER_SIZE; | ||
153 | bd->skb_pa = dma_map_single(dev->dev.parent, rskb->data, | ||
154 | FEC_RX_BUFFER_SIZE, DMA_FROM_DEVICE); | ||
155 | bcom_submit_next_buffer(priv->rx_dmatsk, rskb); | ||
156 | } | ||
157 | |||
158 | static int mpc52xx_fec_alloc_rx_buffers(struct net_device *dev, struct bcom_task *rxtsk) | ||
159 | { | ||
160 | struct sk_buff *skb; | ||
161 | |||
162 | while (!bcom_queue_full(rxtsk)) { | ||
163 | skb = dev_alloc_skb(FEC_RX_BUFFER_SIZE); | ||
164 | if (!skb) | ||
165 | return -EAGAIN; | ||
166 | |||
167 | /* zero out the initial receive buffers to aid debugging */ | ||
168 | memset(skb->data, 0, FEC_RX_BUFFER_SIZE); | ||
169 | mpc52xx_fec_rx_submit(dev, skb); | ||
170 | } | ||
171 | return 0; | ||
172 | } | ||
173 | |||
174 | /* based on generic_adjust_link from fs_enet-main.c */ | ||
175 | static void mpc52xx_fec_adjust_link(struct net_device *dev) | ||
176 | { | ||
177 | struct mpc52xx_fec_priv *priv = netdev_priv(dev); | ||
178 | struct phy_device *phydev = priv->phydev; | ||
179 | int new_state = 0; | ||
180 | |||
181 | if (phydev->link != PHY_DOWN) { | ||
182 | if (phydev->duplex != priv->duplex) { | ||
183 | struct mpc52xx_fec __iomem *fec = priv->fec; | ||
184 | u32 rcntrl; | ||
185 | u32 tcntrl; | ||
186 | |||
187 | new_state = 1; | ||
188 | priv->duplex = phydev->duplex; | ||
189 | |||
190 | rcntrl = in_be32(&fec->r_cntrl); | ||
191 | tcntrl = in_be32(&fec->x_cntrl); | ||
192 | |||
193 | rcntrl &= ~FEC_RCNTRL_DRT; | ||
194 | tcntrl &= ~FEC_TCNTRL_FDEN; | ||
195 | if (phydev->duplex == DUPLEX_FULL) | ||
196 | tcntrl |= FEC_TCNTRL_FDEN; /* FD enable */ | ||
197 | else | ||
198 | rcntrl |= FEC_RCNTRL_DRT; /* disable Rx on Tx (HD) */ | ||
199 | |||
200 | out_be32(&fec->r_cntrl, rcntrl); | ||
201 | out_be32(&fec->x_cntrl, tcntrl); | ||
202 | } | ||
203 | |||
204 | if (phydev->speed != priv->speed) { | ||
205 | new_state = 1; | ||
206 | priv->speed = phydev->speed; | ||
207 | } | ||
208 | |||
209 | if (priv->link == PHY_DOWN) { | ||
210 | new_state = 1; | ||
211 | priv->link = phydev->link; | ||
212 | } | ||
213 | |||
214 | } else if (priv->link) { | ||
215 | new_state = 1; | ||
216 | priv->link = PHY_DOWN; | ||
217 | priv->speed = 0; | ||
218 | priv->duplex = -1; | ||
219 | } | ||
220 | |||
221 | if (new_state && netif_msg_link(priv)) | ||
222 | phy_print_status(phydev); | ||
223 | } | ||
224 | |||
225 | static int mpc52xx_fec_open(struct net_device *dev) | ||
226 | { | ||
227 | struct mpc52xx_fec_priv *priv = netdev_priv(dev); | ||
228 | int err = -EBUSY; | ||
229 | |||
230 | if (priv->phy_node) { | ||
231 | priv->phydev = of_phy_connect(priv->ndev, priv->phy_node, | ||
232 | mpc52xx_fec_adjust_link, 0, 0); | ||
233 | if (!priv->phydev) { | ||
234 | dev_err(&dev->dev, "of_phy_connect failed\n"); | ||
235 | return -ENODEV; | ||
236 | } | ||
237 | phy_start(priv->phydev); | ||
238 | } | ||
239 | |||
240 | if (request_irq(dev->irq, mpc52xx_fec_interrupt, IRQF_SHARED, | ||
241 | DRIVER_NAME "_ctrl", dev)) { | ||
242 | dev_err(&dev->dev, "ctrl interrupt request failed\n"); | ||
243 | goto free_phy; | ||
244 | } | ||
245 | if (request_irq(priv->r_irq, mpc52xx_fec_rx_interrupt, 0, | ||
246 | DRIVER_NAME "_rx", dev)) { | ||
247 | dev_err(&dev->dev, "rx interrupt request failed\n"); | ||
248 | goto free_ctrl_irq; | ||
249 | } | ||
250 | if (request_irq(priv->t_irq, mpc52xx_fec_tx_interrupt, 0, | ||
251 | DRIVER_NAME "_tx", dev)) { | ||
252 | dev_err(&dev->dev, "tx interrupt request failed\n"); | ||
253 | goto free_2irqs; | ||
254 | } | ||
255 | |||
256 | bcom_fec_rx_reset(priv->rx_dmatsk); | ||
257 | bcom_fec_tx_reset(priv->tx_dmatsk); | ||
258 | |||
259 | err = mpc52xx_fec_alloc_rx_buffers(dev, priv->rx_dmatsk); | ||
260 | if (err) { | ||
261 | dev_err(&dev->dev, "mpc52xx_fec_alloc_rx_buffers failed\n"); | ||
262 | goto free_irqs; | ||
263 | } | ||
264 | |||
265 | bcom_enable(priv->rx_dmatsk); | ||
266 | bcom_enable(priv->tx_dmatsk); | ||
267 | |||
268 | mpc52xx_fec_start(dev); | ||
269 | |||
270 | netif_start_queue(dev); | ||
271 | |||
272 | return 0; | ||
273 | |||
274 | free_irqs: | ||
275 | free_irq(priv->t_irq, dev); | ||
276 | free_2irqs: | ||
277 | free_irq(priv->r_irq, dev); | ||
278 | free_ctrl_irq: | ||
279 | free_irq(dev->irq, dev); | ||
280 | free_phy: | ||
281 | if (priv->phydev) { | ||
282 | phy_stop(priv->phydev); | ||
283 | phy_disconnect(priv->phydev); | ||
284 | priv->phydev = NULL; | ||
285 | } | ||
286 | |||
287 | return err; | ||
288 | } | ||
289 | |||
290 | static int mpc52xx_fec_close(struct net_device *dev) | ||
291 | { | ||
292 | struct mpc52xx_fec_priv *priv = netdev_priv(dev); | ||
293 | |||
294 | netif_stop_queue(dev); | ||
295 | |||
296 | mpc52xx_fec_stop(dev); | ||
297 | |||
298 | mpc52xx_fec_free_rx_buffers(dev, priv->rx_dmatsk); | ||
299 | |||
300 | free_irq(dev->irq, dev); | ||
301 | free_irq(priv->r_irq, dev); | ||
302 | free_irq(priv->t_irq, dev); | ||
303 | |||
304 | if (priv->phydev) { | ||
305 | /* power down phy */ | ||
306 | phy_stop(priv->phydev); | ||
307 | phy_disconnect(priv->phydev); | ||
308 | priv->phydev = NULL; | ||
309 | } | ||
310 | |||
311 | return 0; | ||
312 | } | ||
313 | |||
314 | /* This will only be invoked if your driver is _not_ in XOFF state. | ||
315 | * What this means is that you need not check it, and that this | ||
316 | * invariant will hold if you make sure that the netif_*_queue() | ||
317 | * calls are done at the proper times. | ||
318 | */ | ||
319 | static int mpc52xx_fec_start_xmit(struct sk_buff *skb, struct net_device *dev) | ||
320 | { | ||
321 | struct mpc52xx_fec_priv *priv = netdev_priv(dev); | ||
322 | struct bcom_fec_bd *bd; | ||
323 | unsigned long flags; | ||
324 | |||
325 | if (bcom_queue_full(priv->tx_dmatsk)) { | ||
326 | if (net_ratelimit()) | ||
327 | dev_err(&dev->dev, "transmit queue overrun\n"); | ||
328 | return NETDEV_TX_BUSY; | ||
329 | } | ||
330 | |||
331 | spin_lock_irqsave(&priv->lock, flags); | ||
332 | |||
333 | bd = (struct bcom_fec_bd *) | ||
334 | bcom_prepare_next_buffer(priv->tx_dmatsk); | ||
335 | |||
336 | bd->status = skb->len | BCOM_FEC_TX_BD_TFD | BCOM_FEC_TX_BD_TC; | ||
337 | bd->skb_pa = dma_map_single(dev->dev.parent, skb->data, skb->len, | ||
338 | DMA_TO_DEVICE); | ||
339 | |||
340 | skb_tx_timestamp(skb); | ||
341 | bcom_submit_next_buffer(priv->tx_dmatsk, skb); | ||
342 | spin_unlock_irqrestore(&priv->lock, flags); | ||
343 | |||
344 | if (bcom_queue_full(priv->tx_dmatsk)) { | ||
345 | netif_stop_queue(dev); | ||
346 | } | ||
347 | |||
348 | return NETDEV_TX_OK; | ||
349 | } | ||
350 | |||
351 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
352 | static void mpc52xx_fec_poll_controller(struct net_device *dev) | ||
353 | { | ||
354 | struct mpc52xx_fec_priv *priv = netdev_priv(dev); | ||
355 | |||
356 | disable_irq(priv->t_irq); | ||
357 | mpc52xx_fec_tx_interrupt(priv->t_irq, dev); | ||
358 | enable_irq(priv->t_irq); | ||
359 | disable_irq(priv->r_irq); | ||
360 | mpc52xx_fec_rx_interrupt(priv->r_irq, dev); | ||
361 | enable_irq(priv->r_irq); | ||
362 | } | ||
363 | #endif | ||
364 | |||
365 | |||
366 | /* This handles BestComm transmit task interrupts | ||
367 | */ | ||
368 | static irqreturn_t mpc52xx_fec_tx_interrupt(int irq, void *dev_id) | ||
369 | { | ||
370 | struct net_device *dev = dev_id; | ||
371 | struct mpc52xx_fec_priv *priv = netdev_priv(dev); | ||
372 | |||
373 | spin_lock(&priv->lock); | ||
374 | while (bcom_buffer_done(priv->tx_dmatsk)) { | ||
375 | struct sk_buff *skb; | ||
376 | struct bcom_fec_bd *bd; | ||
377 | skb = bcom_retrieve_buffer(priv->tx_dmatsk, NULL, | ||
378 | (struct bcom_bd **)&bd); | ||
379 | dma_unmap_single(dev->dev.parent, bd->skb_pa, skb->len, | ||
380 | DMA_TO_DEVICE); | ||
381 | |||
382 | dev_kfree_skb_irq(skb); | ||
383 | } | ||
384 | spin_unlock(&priv->lock); | ||
385 | |||
386 | netif_wake_queue(dev); | ||
387 | |||
388 | return IRQ_HANDLED; | ||
389 | } | ||
390 | |||
391 | static irqreturn_t mpc52xx_fec_rx_interrupt(int irq, void *dev_id) | ||
392 | { | ||
393 | struct net_device *dev = dev_id; | ||
394 | struct mpc52xx_fec_priv *priv = netdev_priv(dev); | ||
395 | struct sk_buff *rskb; /* received sk_buff */ | ||
396 | struct sk_buff *skb; /* new sk_buff to enqueue in its place */ | ||
397 | struct bcom_fec_bd *bd; | ||
398 | u32 status, physaddr; | ||
399 | int length; | ||
400 | |||
401 | spin_lock(&priv->lock); | ||
402 | |||
403 | while (bcom_buffer_done(priv->rx_dmatsk)) { | ||
404 | |||
405 | rskb = bcom_retrieve_buffer(priv->rx_dmatsk, &status, | ||
406 | (struct bcom_bd **)&bd); | ||
407 | physaddr = bd->skb_pa; | ||
408 | |||
409 | /* Test for errors in received frame */ | ||
410 | if (status & BCOM_FEC_RX_BD_ERRORS) { | ||
411 | /* Drop packet and reuse the buffer */ | ||
412 | mpc52xx_fec_rx_submit(dev, rskb); | ||
413 | dev->stats.rx_dropped++; | ||
414 | continue; | ||
415 | } | ||
416 | |||
417 | /* skbs are allocated on open, so now we allocate a new one, | ||
418 | * and remove the old (with the packet) */ | ||
419 | skb = dev_alloc_skb(FEC_RX_BUFFER_SIZE); | ||
420 | if (!skb) { | ||
421 | /* Can't get a new one : reuse the same & drop pkt */ | ||
422 | dev_notice(&dev->dev, "Low memory - dropped packet.\n"); | ||
423 | mpc52xx_fec_rx_submit(dev, rskb); | ||
424 | dev->stats.rx_dropped++; | ||
425 | continue; | ||
426 | } | ||
427 | |||
428 | /* Enqueue the new sk_buff back on the hardware */ | ||
429 | mpc52xx_fec_rx_submit(dev, skb); | ||
430 | |||
431 | /* Process the received skb - Drop the spin lock while | ||
432 | * calling into the network stack */ | ||
433 | spin_unlock(&priv->lock); | ||
434 | |||
435 | dma_unmap_single(dev->dev.parent, physaddr, rskb->len, | ||
436 | DMA_FROM_DEVICE); | ||
437 | length = status & BCOM_FEC_RX_BD_LEN_MASK; | ||
438 | skb_put(rskb, length - 4); /* length without CRC32 */ | ||
439 | rskb->protocol = eth_type_trans(rskb, dev); | ||
440 | if (!skb_defer_rx_timestamp(skb)) | ||
441 | netif_rx(rskb); | ||
442 | |||
443 | spin_lock(&priv->lock); | ||
444 | } | ||
445 | |||
446 | spin_unlock(&priv->lock); | ||
447 | |||
448 | return IRQ_HANDLED; | ||
449 | } | ||
450 | |||
451 | static irqreturn_t mpc52xx_fec_interrupt(int irq, void *dev_id) | ||
452 | { | ||
453 | struct net_device *dev = dev_id; | ||
454 | struct mpc52xx_fec_priv *priv = netdev_priv(dev); | ||
455 | struct mpc52xx_fec __iomem *fec = priv->fec; | ||
456 | u32 ievent; | ||
457 | |||
458 | ievent = in_be32(&fec->ievent); | ||
459 | |||
460 | ievent &= ~FEC_IEVENT_MII; /* mii is handled separately */ | ||
461 | if (!ievent) | ||
462 | return IRQ_NONE; | ||
463 | |||
464 | out_be32(&fec->ievent, ievent); /* clear pending events */ | ||
465 | |||
466 | /* on fifo error, soft-reset fec */ | ||
467 | if (ievent & (FEC_IEVENT_RFIFO_ERROR | FEC_IEVENT_XFIFO_ERROR)) { | ||
468 | |||
469 | if (net_ratelimit() && (ievent & FEC_IEVENT_RFIFO_ERROR)) | ||
470 | dev_warn(&dev->dev, "FEC_IEVENT_RFIFO_ERROR\n"); | ||
471 | if (net_ratelimit() && (ievent & FEC_IEVENT_XFIFO_ERROR)) | ||
472 | dev_warn(&dev->dev, "FEC_IEVENT_XFIFO_ERROR\n"); | ||
473 | |||
474 | spin_lock(&priv->lock); | ||
475 | mpc52xx_fec_reset(dev); | ||
476 | spin_unlock(&priv->lock); | ||
477 | |||
478 | return IRQ_HANDLED; | ||
479 | } | ||
480 | |||
481 | if (ievent & ~FEC_IEVENT_TFINT) | ||
482 | dev_dbg(&dev->dev, "ievent: %08x\n", ievent); | ||
483 | |||
484 | return IRQ_HANDLED; | ||
485 | } | ||
486 | |||
487 | /* | ||
488 | * Get the current statistics. | ||
489 | * This may be called with the card open or closed. | ||
490 | */ | ||
491 | static struct net_device_stats *mpc52xx_fec_get_stats(struct net_device *dev) | ||
492 | { | ||
493 | struct mpc52xx_fec_priv *priv = netdev_priv(dev); | ||
494 | struct net_device_stats *stats = &dev->stats; | ||
495 | struct mpc52xx_fec __iomem *fec = priv->fec; | ||
496 | |||
497 | stats->rx_bytes = in_be32(&fec->rmon_r_octets); | ||
498 | stats->rx_packets = in_be32(&fec->rmon_r_packets); | ||
499 | stats->rx_errors = in_be32(&fec->rmon_r_crc_align) + | ||
500 | in_be32(&fec->rmon_r_undersize) + | ||
501 | in_be32(&fec->rmon_r_oversize) + | ||
502 | in_be32(&fec->rmon_r_frag) + | ||
503 | in_be32(&fec->rmon_r_jab); | ||
504 | |||
505 | stats->tx_bytes = in_be32(&fec->rmon_t_octets); | ||
506 | stats->tx_packets = in_be32(&fec->rmon_t_packets); | ||
507 | stats->tx_errors = in_be32(&fec->rmon_t_crc_align) + | ||
508 | in_be32(&fec->rmon_t_undersize) + | ||
509 | in_be32(&fec->rmon_t_oversize) + | ||
510 | in_be32(&fec->rmon_t_frag) + | ||
511 | in_be32(&fec->rmon_t_jab); | ||
512 | |||
513 | stats->multicast = in_be32(&fec->rmon_r_mc_pkt); | ||
514 | stats->collisions = in_be32(&fec->rmon_t_col); | ||
515 | |||
516 | /* detailed rx_errors: */ | ||
517 | stats->rx_length_errors = in_be32(&fec->rmon_r_undersize) | ||
518 | + in_be32(&fec->rmon_r_oversize) | ||
519 | + in_be32(&fec->rmon_r_frag) | ||
520 | + in_be32(&fec->rmon_r_jab); | ||
521 | stats->rx_over_errors = in_be32(&fec->r_macerr); | ||
522 | stats->rx_crc_errors = in_be32(&fec->ieee_r_crc); | ||
523 | stats->rx_frame_errors = in_be32(&fec->ieee_r_align); | ||
524 | stats->rx_fifo_errors = in_be32(&fec->rmon_r_drop); | ||
525 | stats->rx_missed_errors = in_be32(&fec->rmon_r_drop); | ||
526 | |||
527 | /* detailed tx_errors: */ | ||
528 | stats->tx_aborted_errors = 0; | ||
529 | stats->tx_carrier_errors = in_be32(&fec->ieee_t_cserr); | ||
530 | stats->tx_fifo_errors = in_be32(&fec->rmon_t_drop); | ||
531 | stats->tx_heartbeat_errors = in_be32(&fec->ieee_t_sqe); | ||
532 | stats->tx_window_errors = in_be32(&fec->ieee_t_lcol); | ||
533 | |||
534 | return stats; | ||
535 | } | ||
536 | |||
537 | /* | ||
538 | * Read MIB counters in order to reset them, | ||
539 | * then zero all the stats fields in memory | ||
540 | */ | ||
541 | static void mpc52xx_fec_reset_stats(struct net_device *dev) | ||
542 | { | ||
543 | struct mpc52xx_fec_priv *priv = netdev_priv(dev); | ||
544 | struct mpc52xx_fec __iomem *fec = priv->fec; | ||
545 | |||
546 | out_be32(&fec->mib_control, FEC_MIB_DISABLE); | ||
547 | memset_io(&fec->rmon_t_drop, 0, | ||
548 | offsetof(struct mpc52xx_fec, reserved10) - | ||
549 | offsetof(struct mpc52xx_fec, rmon_t_drop)); | ||
550 | out_be32(&fec->mib_control, 0); | ||
551 | |||
552 | memset(&dev->stats, 0, sizeof(dev->stats)); | ||
553 | } | ||
554 | |||
555 | /* | ||
556 | * Set or clear the multicast filter for this adaptor. | ||
557 | */ | ||
558 | static void mpc52xx_fec_set_multicast_list(struct net_device *dev) | ||
559 | { | ||
560 | struct mpc52xx_fec_priv *priv = netdev_priv(dev); | ||
561 | struct mpc52xx_fec __iomem *fec = priv->fec; | ||
562 | u32 rx_control; | ||
563 | |||
564 | rx_control = in_be32(&fec->r_cntrl); | ||
565 | |||
566 | if (dev->flags & IFF_PROMISC) { | ||
567 | rx_control |= FEC_RCNTRL_PROM; | ||
568 | out_be32(&fec->r_cntrl, rx_control); | ||
569 | } else { | ||
570 | rx_control &= ~FEC_RCNTRL_PROM; | ||
571 | out_be32(&fec->r_cntrl, rx_control); | ||
572 | |||
573 | if (dev->flags & IFF_ALLMULTI) { | ||
574 | out_be32(&fec->gaddr1, 0xffffffff); | ||
575 | out_be32(&fec->gaddr2, 0xffffffff); | ||
576 | } else { | ||
577 | u32 crc; | ||
578 | struct netdev_hw_addr *ha; | ||
579 | u32 gaddr1 = 0x00000000; | ||
580 | u32 gaddr2 = 0x00000000; | ||
581 | |||
582 | netdev_for_each_mc_addr(ha, dev) { | ||
583 | crc = ether_crc_le(6, ha->addr) >> 26; | ||
584 | if (crc >= 32) | ||
585 | gaddr1 |= 1 << (crc-32); | ||
586 | else | ||
587 | gaddr2 |= 1 << crc; | ||
588 | } | ||
589 | out_be32(&fec->gaddr1, gaddr1); | ||
590 | out_be32(&fec->gaddr2, gaddr2); | ||
591 | } | ||
592 | } | ||
593 | } | ||
594 | |||
595 | /** | ||
596 | * mpc52xx_fec_hw_init | ||
597 | * @dev: network device | ||
598 | * | ||
599 | * Setup various hardware setting, only needed once on start | ||
600 | */ | ||
601 | static void mpc52xx_fec_hw_init(struct net_device *dev) | ||
602 | { | ||
603 | struct mpc52xx_fec_priv *priv = netdev_priv(dev); | ||
604 | struct mpc52xx_fec __iomem *fec = priv->fec; | ||
605 | int i; | ||
606 | |||
607 | /* Whack a reset. We should wait for this. */ | ||
608 | out_be32(&fec->ecntrl, FEC_ECNTRL_RESET); | ||
609 | for (i = 0; i < FEC_RESET_DELAY; ++i) { | ||
610 | if ((in_be32(&fec->ecntrl) & FEC_ECNTRL_RESET) == 0) | ||
611 | break; | ||
612 | udelay(1); | ||
613 | } | ||
614 | if (i == FEC_RESET_DELAY) | ||
615 | dev_err(&dev->dev, "FEC Reset timeout!\n"); | ||
616 | |||
617 | /* set pause to 0x20 frames */ | ||
618 | out_be32(&fec->op_pause, FEC_OP_PAUSE_OPCODE | 0x20); | ||
619 | |||
620 | /* high service request will be deasserted when there's < 7 bytes in fifo | ||
621 | * low service request will be deasserted when there's < 4*7 bytes in fifo | ||
622 | */ | ||
623 | out_be32(&fec->rfifo_cntrl, FEC_FIFO_CNTRL_FRAME | FEC_FIFO_CNTRL_LTG_7); | ||
624 | out_be32(&fec->tfifo_cntrl, FEC_FIFO_CNTRL_FRAME | FEC_FIFO_CNTRL_LTG_7); | ||
625 | |||
626 | /* alarm when <= x bytes in FIFO */ | ||
627 | out_be32(&fec->rfifo_alarm, 0x0000030c); | ||
628 | out_be32(&fec->tfifo_alarm, 0x00000100); | ||
629 | |||
630 | /* begin transmittion when 256 bytes are in FIFO (or EOF or FIFO full) */ | ||
631 | out_be32(&fec->x_wmrk, FEC_FIFO_WMRK_256B); | ||
632 | |||
633 | /* enable crc generation */ | ||
634 | out_be32(&fec->xmit_fsm, FEC_XMIT_FSM_APPEND_CRC | FEC_XMIT_FSM_ENABLE_CRC); | ||
635 | out_be32(&fec->iaddr1, 0x00000000); /* No individual filter */ | ||
636 | out_be32(&fec->iaddr2, 0x00000000); /* No individual filter */ | ||
637 | |||
638 | /* set phy speed. | ||
639 | * this can't be done in phy driver, since it needs to be called | ||
640 | * before fec stuff (even on resume) */ | ||
641 | out_be32(&fec->mii_speed, priv->mdio_speed); | ||
642 | } | ||
643 | |||
644 | /** | ||
645 | * mpc52xx_fec_start | ||
646 | * @dev: network device | ||
647 | * | ||
648 | * This function is called to start or restart the FEC during a link | ||
649 | * change. This happens on fifo errors or when switching between half | ||
650 | * and full duplex. | ||
651 | */ | ||
652 | static void mpc52xx_fec_start(struct net_device *dev) | ||
653 | { | ||
654 | struct mpc52xx_fec_priv *priv = netdev_priv(dev); | ||
655 | struct mpc52xx_fec __iomem *fec = priv->fec; | ||
656 | u32 rcntrl; | ||
657 | u32 tcntrl; | ||
658 | u32 tmp; | ||
659 | |||
660 | /* clear sticky error bits */ | ||
661 | tmp = FEC_FIFO_STATUS_ERR | FEC_FIFO_STATUS_UF | FEC_FIFO_STATUS_OF; | ||
662 | out_be32(&fec->rfifo_status, in_be32(&fec->rfifo_status) & tmp); | ||
663 | out_be32(&fec->tfifo_status, in_be32(&fec->tfifo_status) & tmp); | ||
664 | |||
665 | /* FIFOs will reset on mpc52xx_fec_enable */ | ||
666 | out_be32(&fec->reset_cntrl, FEC_RESET_CNTRL_ENABLE_IS_RESET); | ||
667 | |||
668 | /* Set station address. */ | ||
669 | mpc52xx_fec_set_paddr(dev, dev->dev_addr); | ||
670 | |||
671 | mpc52xx_fec_set_multicast_list(dev); | ||
672 | |||
673 | /* set max frame len, enable flow control, select mii mode */ | ||
674 | rcntrl = FEC_RX_BUFFER_SIZE << 16; /* max frame length */ | ||
675 | rcntrl |= FEC_RCNTRL_FCE; | ||
676 | |||
677 | if (!priv->seven_wire_mode) | ||
678 | rcntrl |= FEC_RCNTRL_MII_MODE; | ||
679 | |||
680 | if (priv->duplex == DUPLEX_FULL) | ||
681 | tcntrl = FEC_TCNTRL_FDEN; /* FD enable */ | ||
682 | else { | ||
683 | rcntrl |= FEC_RCNTRL_DRT; /* disable Rx on Tx (HD) */ | ||
684 | tcntrl = 0; | ||
685 | } | ||
686 | out_be32(&fec->r_cntrl, rcntrl); | ||
687 | out_be32(&fec->x_cntrl, tcntrl); | ||
688 | |||
689 | /* Clear any outstanding interrupt. */ | ||
690 | out_be32(&fec->ievent, 0xffffffff); | ||
691 | |||
692 | /* Enable interrupts we wish to service. */ | ||
693 | out_be32(&fec->imask, FEC_IMASK_ENABLE); | ||
694 | |||
695 | /* And last, enable the transmit and receive processing. */ | ||
696 | out_be32(&fec->ecntrl, FEC_ECNTRL_ETHER_EN); | ||
697 | out_be32(&fec->r_des_active, 0x01000000); | ||
698 | } | ||
699 | |||
700 | /** | ||
701 | * mpc52xx_fec_stop | ||
702 | * @dev: network device | ||
703 | * | ||
704 | * stop all activity on fec and empty dma buffers | ||
705 | */ | ||
706 | static void mpc52xx_fec_stop(struct net_device *dev) | ||
707 | { | ||
708 | struct mpc52xx_fec_priv *priv = netdev_priv(dev); | ||
709 | struct mpc52xx_fec __iomem *fec = priv->fec; | ||
710 | unsigned long timeout; | ||
711 | |||
712 | /* disable all interrupts */ | ||
713 | out_be32(&fec->imask, 0); | ||
714 | |||
715 | /* Disable the rx task. */ | ||
716 | bcom_disable(priv->rx_dmatsk); | ||
717 | |||
718 | /* Wait for tx queue to drain, but only if we're in process context */ | ||
719 | if (!in_interrupt()) { | ||
720 | timeout = jiffies + msecs_to_jiffies(2000); | ||
721 | while (time_before(jiffies, timeout) && | ||
722 | !bcom_queue_empty(priv->tx_dmatsk)) | ||
723 | msleep(100); | ||
724 | |||
725 | if (time_after_eq(jiffies, timeout)) | ||
726 | dev_err(&dev->dev, "queues didn't drain\n"); | ||
727 | #if 1 | ||
728 | if (time_after_eq(jiffies, timeout)) { | ||
729 | dev_err(&dev->dev, " tx: index: %i, outdex: %i\n", | ||
730 | priv->tx_dmatsk->index, | ||
731 | priv->tx_dmatsk->outdex); | ||
732 | dev_err(&dev->dev, " rx: index: %i, outdex: %i\n", | ||
733 | priv->rx_dmatsk->index, | ||
734 | priv->rx_dmatsk->outdex); | ||
735 | } | ||
736 | #endif | ||
737 | } | ||
738 | |||
739 | bcom_disable(priv->tx_dmatsk); | ||
740 | |||
741 | /* Stop FEC */ | ||
742 | out_be32(&fec->ecntrl, in_be32(&fec->ecntrl) & ~FEC_ECNTRL_ETHER_EN); | ||
743 | } | ||
744 | |||
745 | /* reset fec and bestcomm tasks */ | ||
746 | static void mpc52xx_fec_reset(struct net_device *dev) | ||
747 | { | ||
748 | struct mpc52xx_fec_priv *priv = netdev_priv(dev); | ||
749 | struct mpc52xx_fec __iomem *fec = priv->fec; | ||
750 | |||
751 | mpc52xx_fec_stop(dev); | ||
752 | |||
753 | out_be32(&fec->rfifo_status, in_be32(&fec->rfifo_status)); | ||
754 | out_be32(&fec->reset_cntrl, FEC_RESET_CNTRL_RESET_FIFO); | ||
755 | |||
756 | mpc52xx_fec_free_rx_buffers(dev, priv->rx_dmatsk); | ||
757 | |||
758 | mpc52xx_fec_hw_init(dev); | ||
759 | |||
760 | bcom_fec_rx_reset(priv->rx_dmatsk); | ||
761 | bcom_fec_tx_reset(priv->tx_dmatsk); | ||
762 | |||
763 | mpc52xx_fec_alloc_rx_buffers(dev, priv->rx_dmatsk); | ||
764 | |||
765 | bcom_enable(priv->rx_dmatsk); | ||
766 | bcom_enable(priv->tx_dmatsk); | ||
767 | |||
768 | mpc52xx_fec_start(dev); | ||
769 | |||
770 | netif_wake_queue(dev); | ||
771 | } | ||
772 | |||
773 | |||
774 | /* ethtool interface */ | ||
775 | |||
776 | static int mpc52xx_fec_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) | ||
777 | { | ||
778 | struct mpc52xx_fec_priv *priv = netdev_priv(dev); | ||
779 | |||
780 | if (!priv->phydev) | ||
781 | return -ENODEV; | ||
782 | |||
783 | return phy_ethtool_gset(priv->phydev, cmd); | ||
784 | } | ||
785 | |||
786 | static int mpc52xx_fec_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) | ||
787 | { | ||
788 | struct mpc52xx_fec_priv *priv = netdev_priv(dev); | ||
789 | |||
790 | if (!priv->phydev) | ||
791 | return -ENODEV; | ||
792 | |||
793 | return phy_ethtool_sset(priv->phydev, cmd); | ||
794 | } | ||
795 | |||
796 | static u32 mpc52xx_fec_get_msglevel(struct net_device *dev) | ||
797 | { | ||
798 | struct mpc52xx_fec_priv *priv = netdev_priv(dev); | ||
799 | return priv->msg_enable; | ||
800 | } | ||
801 | |||
802 | static void mpc52xx_fec_set_msglevel(struct net_device *dev, u32 level) | ||
803 | { | ||
804 | struct mpc52xx_fec_priv *priv = netdev_priv(dev); | ||
805 | priv->msg_enable = level; | ||
806 | } | ||
807 | |||
808 | static const struct ethtool_ops mpc52xx_fec_ethtool_ops = { | ||
809 | .get_settings = mpc52xx_fec_get_settings, | ||
810 | .set_settings = mpc52xx_fec_set_settings, | ||
811 | .get_link = ethtool_op_get_link, | ||
812 | .get_msglevel = mpc52xx_fec_get_msglevel, | ||
813 | .set_msglevel = mpc52xx_fec_set_msglevel, | ||
814 | }; | ||
815 | |||
816 | |||
817 | static int mpc52xx_fec_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) | ||
818 | { | ||
819 | struct mpc52xx_fec_priv *priv = netdev_priv(dev); | ||
820 | |||
821 | if (!priv->phydev) | ||
822 | return -ENOTSUPP; | ||
823 | |||
824 | return phy_mii_ioctl(priv->phydev, rq, cmd); | ||
825 | } | ||
826 | |||
827 | static const struct net_device_ops mpc52xx_fec_netdev_ops = { | ||
828 | .ndo_open = mpc52xx_fec_open, | ||
829 | .ndo_stop = mpc52xx_fec_close, | ||
830 | .ndo_start_xmit = mpc52xx_fec_start_xmit, | ||
831 | .ndo_set_multicast_list = mpc52xx_fec_set_multicast_list, | ||
832 | .ndo_set_mac_address = mpc52xx_fec_set_mac_address, | ||
833 | .ndo_validate_addr = eth_validate_addr, | ||
834 | .ndo_do_ioctl = mpc52xx_fec_ioctl, | ||
835 | .ndo_change_mtu = eth_change_mtu, | ||
836 | .ndo_tx_timeout = mpc52xx_fec_tx_timeout, | ||
837 | .ndo_get_stats = mpc52xx_fec_get_stats, | ||
838 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
839 | .ndo_poll_controller = mpc52xx_fec_poll_controller, | ||
840 | #endif | ||
841 | }; | ||
842 | |||
843 | /* ======================================================================== */ | ||
844 | /* OF Driver */ | ||
845 | /* ======================================================================== */ | ||
846 | |||
847 | static int __devinit mpc52xx_fec_probe(struct platform_device *op) | ||
848 | { | ||
849 | int rv; | ||
850 | struct net_device *ndev; | ||
851 | struct mpc52xx_fec_priv *priv = NULL; | ||
852 | struct resource mem; | ||
853 | const u32 *prop; | ||
854 | int prop_size; | ||
855 | |||
856 | phys_addr_t rx_fifo; | ||
857 | phys_addr_t tx_fifo; | ||
858 | |||
859 | /* Get the ether ndev & it's private zone */ | ||
860 | ndev = alloc_etherdev(sizeof(struct mpc52xx_fec_priv)); | ||
861 | if (!ndev) | ||
862 | return -ENOMEM; | ||
863 | |||
864 | priv = netdev_priv(ndev); | ||
865 | priv->ndev = ndev; | ||
866 | |||
867 | /* Reserve FEC control zone */ | ||
868 | rv = of_address_to_resource(op->dev.of_node, 0, &mem); | ||
869 | if (rv) { | ||
870 | printk(KERN_ERR DRIVER_NAME ": " | ||
871 | "Error while parsing device node resource\n" ); | ||
872 | goto err_netdev; | ||
873 | } | ||
874 | if (resource_size(&mem) < sizeof(struct mpc52xx_fec)) { | ||
875 | printk(KERN_ERR DRIVER_NAME | ||
876 | " - invalid resource size (%lx < %x), check mpc52xx_devices.c\n", | ||
877 | (unsigned long)resource_size(&mem), | ||
878 | sizeof(struct mpc52xx_fec)); | ||
879 | rv = -EINVAL; | ||
880 | goto err_netdev; | ||
881 | } | ||
882 | |||
883 | if (!request_mem_region(mem.start, sizeof(struct mpc52xx_fec), | ||
884 | DRIVER_NAME)) { | ||
885 | rv = -EBUSY; | ||
886 | goto err_netdev; | ||
887 | } | ||
888 | |||
889 | /* Init ether ndev with what we have */ | ||
890 | ndev->netdev_ops = &mpc52xx_fec_netdev_ops; | ||
891 | ndev->ethtool_ops = &mpc52xx_fec_ethtool_ops; | ||
892 | ndev->watchdog_timeo = FEC_WATCHDOG_TIMEOUT; | ||
893 | ndev->base_addr = mem.start; | ||
894 | SET_NETDEV_DEV(ndev, &op->dev); | ||
895 | |||
896 | spin_lock_init(&priv->lock); | ||
897 | |||
898 | /* ioremap the zones */ | ||
899 | priv->fec = ioremap(mem.start, sizeof(struct mpc52xx_fec)); | ||
900 | |||
901 | if (!priv->fec) { | ||
902 | rv = -ENOMEM; | ||
903 | goto err_mem_region; | ||
904 | } | ||
905 | |||
906 | /* Bestcomm init */ | ||
907 | rx_fifo = ndev->base_addr + offsetof(struct mpc52xx_fec, rfifo_data); | ||
908 | tx_fifo = ndev->base_addr + offsetof(struct mpc52xx_fec, tfifo_data); | ||
909 | |||
910 | priv->rx_dmatsk = bcom_fec_rx_init(FEC_RX_NUM_BD, rx_fifo, FEC_RX_BUFFER_SIZE); | ||
911 | priv->tx_dmatsk = bcom_fec_tx_init(FEC_TX_NUM_BD, tx_fifo); | ||
912 | |||
913 | if (!priv->rx_dmatsk || !priv->tx_dmatsk) { | ||
914 | printk(KERN_ERR DRIVER_NAME ": Can not init SDMA tasks\n" ); | ||
915 | rv = -ENOMEM; | ||
916 | goto err_rx_tx_dmatsk; | ||
917 | } | ||
918 | |||
919 | /* Get the IRQ we need one by one */ | ||
920 | /* Control */ | ||
921 | ndev->irq = irq_of_parse_and_map(op->dev.of_node, 0); | ||
922 | |||
923 | /* RX */ | ||
924 | priv->r_irq = bcom_get_task_irq(priv->rx_dmatsk); | ||
925 | |||
926 | /* TX */ | ||
927 | priv->t_irq = bcom_get_task_irq(priv->tx_dmatsk); | ||
928 | |||
929 | /* MAC address init */ | ||
930 | if (!is_zero_ether_addr(mpc52xx_fec_mac_addr)) | ||
931 | memcpy(ndev->dev_addr, mpc52xx_fec_mac_addr, 6); | ||
932 | else | ||
933 | mpc52xx_fec_get_paddr(ndev, ndev->dev_addr); | ||
934 | |||
935 | priv->msg_enable = netif_msg_init(debug, MPC52xx_MESSAGES_DEFAULT); | ||
936 | |||
937 | /* | ||
938 | * Link mode configuration | ||
939 | */ | ||
940 | |||
941 | /* Start with safe defaults for link connection */ | ||
942 | priv->speed = 100; | ||
943 | priv->duplex = DUPLEX_HALF; | ||
944 | priv->mdio_speed = ((mpc5xxx_get_bus_frequency(op->dev.of_node) >> 20) / 5) << 1; | ||
945 | |||
946 | /* The current speed preconfigures the speed of the MII link */ | ||
947 | prop = of_get_property(op->dev.of_node, "current-speed", &prop_size); | ||
948 | if (prop && (prop_size >= sizeof(u32) * 2)) { | ||
949 | priv->speed = prop[0]; | ||
950 | priv->duplex = prop[1] ? DUPLEX_FULL : DUPLEX_HALF; | ||
951 | } | ||
952 | |||
953 | /* If there is a phy handle, then get the PHY node */ | ||
954 | priv->phy_node = of_parse_phandle(op->dev.of_node, "phy-handle", 0); | ||
955 | |||
956 | /* the 7-wire property means don't use MII mode */ | ||
957 | if (of_find_property(op->dev.of_node, "fsl,7-wire-mode", NULL)) { | ||
958 | priv->seven_wire_mode = 1; | ||
959 | dev_info(&ndev->dev, "using 7-wire PHY mode\n"); | ||
960 | } | ||
961 | |||
962 | /* Hardware init */ | ||
963 | mpc52xx_fec_hw_init(ndev); | ||
964 | mpc52xx_fec_reset_stats(ndev); | ||
965 | |||
966 | rv = register_netdev(ndev); | ||
967 | if (rv < 0) | ||
968 | goto err_node; | ||
969 | |||
970 | /* We're done ! */ | ||
971 | dev_set_drvdata(&op->dev, ndev); | ||
972 | |||
973 | return 0; | ||
974 | |||
975 | err_node: | ||
976 | of_node_put(priv->phy_node); | ||
977 | irq_dispose_mapping(ndev->irq); | ||
978 | err_rx_tx_dmatsk: | ||
979 | if (priv->rx_dmatsk) | ||
980 | bcom_fec_rx_release(priv->rx_dmatsk); | ||
981 | if (priv->tx_dmatsk) | ||
982 | bcom_fec_tx_release(priv->tx_dmatsk); | ||
983 | iounmap(priv->fec); | ||
984 | err_mem_region: | ||
985 | release_mem_region(mem.start, sizeof(struct mpc52xx_fec)); | ||
986 | err_netdev: | ||
987 | free_netdev(ndev); | ||
988 | |||
989 | return rv; | ||
990 | } | ||
991 | |||
992 | static int | ||
993 | mpc52xx_fec_remove(struct platform_device *op) | ||
994 | { | ||
995 | struct net_device *ndev; | ||
996 | struct mpc52xx_fec_priv *priv; | ||
997 | |||
998 | ndev = dev_get_drvdata(&op->dev); | ||
999 | priv = netdev_priv(ndev); | ||
1000 | |||
1001 | unregister_netdev(ndev); | ||
1002 | |||
1003 | if (priv->phy_node) | ||
1004 | of_node_put(priv->phy_node); | ||
1005 | priv->phy_node = NULL; | ||
1006 | |||
1007 | irq_dispose_mapping(ndev->irq); | ||
1008 | |||
1009 | bcom_fec_rx_release(priv->rx_dmatsk); | ||
1010 | bcom_fec_tx_release(priv->tx_dmatsk); | ||
1011 | |||
1012 | iounmap(priv->fec); | ||
1013 | |||
1014 | release_mem_region(ndev->base_addr, sizeof(struct mpc52xx_fec)); | ||
1015 | |||
1016 | free_netdev(ndev); | ||
1017 | |||
1018 | dev_set_drvdata(&op->dev, NULL); | ||
1019 | return 0; | ||
1020 | } | ||
1021 | |||
1022 | #ifdef CONFIG_PM | ||
1023 | static int mpc52xx_fec_of_suspend(struct platform_device *op, pm_message_t state) | ||
1024 | { | ||
1025 | struct net_device *dev = dev_get_drvdata(&op->dev); | ||
1026 | |||
1027 | if (netif_running(dev)) | ||
1028 | mpc52xx_fec_close(dev); | ||
1029 | |||
1030 | return 0; | ||
1031 | } | ||
1032 | |||
1033 | static int mpc52xx_fec_of_resume(struct platform_device *op) | ||
1034 | { | ||
1035 | struct net_device *dev = dev_get_drvdata(&op->dev); | ||
1036 | |||
1037 | mpc52xx_fec_hw_init(dev); | ||
1038 | mpc52xx_fec_reset_stats(dev); | ||
1039 | |||
1040 | if (netif_running(dev)) | ||
1041 | mpc52xx_fec_open(dev); | ||
1042 | |||
1043 | return 0; | ||
1044 | } | ||
1045 | #endif | ||
1046 | |||
1047 | static struct of_device_id mpc52xx_fec_match[] = { | ||
1048 | { .compatible = "fsl,mpc5200b-fec", }, | ||
1049 | { .compatible = "fsl,mpc5200-fec", }, | ||
1050 | { .compatible = "mpc5200-fec", }, | ||
1051 | { } | ||
1052 | }; | ||
1053 | |||
1054 | MODULE_DEVICE_TABLE(of, mpc52xx_fec_match); | ||
1055 | |||
1056 | static struct platform_driver mpc52xx_fec_driver = { | ||
1057 | .driver = { | ||
1058 | .name = DRIVER_NAME, | ||
1059 | .owner = THIS_MODULE, | ||
1060 | .of_match_table = mpc52xx_fec_match, | ||
1061 | }, | ||
1062 | .probe = mpc52xx_fec_probe, | ||
1063 | .remove = mpc52xx_fec_remove, | ||
1064 | #ifdef CONFIG_PM | ||
1065 | .suspend = mpc52xx_fec_of_suspend, | ||
1066 | .resume = mpc52xx_fec_of_resume, | ||
1067 | #endif | ||
1068 | }; | ||
1069 | |||
1070 | |||
1071 | /* ======================================================================== */ | ||
1072 | /* Module */ | ||
1073 | /* ======================================================================== */ | ||
1074 | |||
1075 | static int __init | ||
1076 | mpc52xx_fec_init(void) | ||
1077 | { | ||
1078 | #ifdef CONFIG_FEC_MPC52xx_MDIO | ||
1079 | int ret; | ||
1080 | ret = platform_driver_register(&mpc52xx_fec_mdio_driver); | ||
1081 | if (ret) { | ||
1082 | printk(KERN_ERR DRIVER_NAME ": failed to register mdio driver\n"); | ||
1083 | return ret; | ||
1084 | } | ||
1085 | #endif | ||
1086 | return platform_driver_register(&mpc52xx_fec_driver); | ||
1087 | } | ||
1088 | |||
1089 | static void __exit | ||
1090 | mpc52xx_fec_exit(void) | ||
1091 | { | ||
1092 | platform_driver_unregister(&mpc52xx_fec_driver); | ||
1093 | #ifdef CONFIG_FEC_MPC52xx_MDIO | ||
1094 | platform_driver_unregister(&mpc52xx_fec_mdio_driver); | ||
1095 | #endif | ||
1096 | } | ||
1097 | |||
1098 | |||
1099 | module_init(mpc52xx_fec_init); | ||
1100 | module_exit(mpc52xx_fec_exit); | ||
1101 | |||
1102 | MODULE_LICENSE("GPL"); | ||
1103 | MODULE_AUTHOR("Dale Farnsworth"); | ||
1104 | MODULE_DESCRIPTION("Ethernet driver for the Freescale MPC52xx FEC"); | ||
diff --git a/drivers/net/ethernet/freescale/fec_mpc52xx.h b/drivers/net/ethernet/freescale/fec_mpc52xx.h new file mode 100644 index 000000000000..41d2dffde55b --- /dev/null +++ b/drivers/net/ethernet/freescale/fec_mpc52xx.h | |||
@@ -0,0 +1,294 @@ | |||
1 | /* | ||
2 | * drivers/drivers/net/fec_mpc52xx/fec.h | ||
3 | * | ||
4 | * Driver for the MPC5200 Fast Ethernet Controller | ||
5 | * | ||
6 | * Author: Dale Farnsworth <dfarnsworth@mvista.com> | ||
7 | * | ||
8 | * 2003-2004 (c) MontaVista, Software, Inc. This file is licensed under | ||
9 | * the terms of the GNU General Public License version 2. This program | ||
10 | * is licensed "as is" without any warranty of any kind, whether express | ||
11 | * or implied. | ||
12 | */ | ||
13 | |||
14 | #ifndef __DRIVERS_NET_MPC52XX_FEC_H__ | ||
15 | #define __DRIVERS_NET_MPC52XX_FEC_H__ | ||
16 | |||
17 | #include <linux/phy.h> | ||
18 | |||
19 | /* Tunable constant */ | ||
20 | /* FEC_RX_BUFFER_SIZE includes 4 bytes for CRC32 */ | ||
21 | #define FEC_RX_BUFFER_SIZE 1522 /* max receive packet size */ | ||
22 | #define FEC_RX_NUM_BD 256 | ||
23 | #define FEC_TX_NUM_BD 64 | ||
24 | |||
25 | #define FEC_RESET_DELAY 50 /* uS */ | ||
26 | |||
27 | #define FEC_WATCHDOG_TIMEOUT ((400*HZ)/1000) | ||
28 | |||
29 | /* ======================================================================== */ | ||
30 | /* Hardware register sets & bits */ | ||
31 | /* ======================================================================== */ | ||
32 | |||
33 | struct mpc52xx_fec { | ||
34 | u32 fec_id; /* FEC + 0x000 */ | ||
35 | u32 ievent; /* FEC + 0x004 */ | ||
36 | u32 imask; /* FEC + 0x008 */ | ||
37 | |||
38 | u32 reserved0[1]; /* FEC + 0x00C */ | ||
39 | u32 r_des_active; /* FEC + 0x010 */ | ||
40 | u32 x_des_active; /* FEC + 0x014 */ | ||
41 | u32 r_des_active_cl; /* FEC + 0x018 */ | ||
42 | u32 x_des_active_cl; /* FEC + 0x01C */ | ||
43 | u32 ivent_set; /* FEC + 0x020 */ | ||
44 | u32 ecntrl; /* FEC + 0x024 */ | ||
45 | |||
46 | u32 reserved1[6]; /* FEC + 0x028-03C */ | ||
47 | u32 mii_data; /* FEC + 0x040 */ | ||
48 | u32 mii_speed; /* FEC + 0x044 */ | ||
49 | u32 mii_status; /* FEC + 0x048 */ | ||
50 | |||
51 | u32 reserved2[5]; /* FEC + 0x04C-05C */ | ||
52 | u32 mib_data; /* FEC + 0x060 */ | ||
53 | u32 mib_control; /* FEC + 0x064 */ | ||
54 | |||
55 | u32 reserved3[6]; /* FEC + 0x068-7C */ | ||
56 | u32 r_activate; /* FEC + 0x080 */ | ||
57 | u32 r_cntrl; /* FEC + 0x084 */ | ||
58 | u32 r_hash; /* FEC + 0x088 */ | ||
59 | u32 r_data; /* FEC + 0x08C */ | ||
60 | u32 ar_done; /* FEC + 0x090 */ | ||
61 | u32 r_test; /* FEC + 0x094 */ | ||
62 | u32 r_mib; /* FEC + 0x098 */ | ||
63 | u32 r_da_low; /* FEC + 0x09C */ | ||
64 | u32 r_da_high; /* FEC + 0x0A0 */ | ||
65 | |||
66 | u32 reserved4[7]; /* FEC + 0x0A4-0BC */ | ||
67 | u32 x_activate; /* FEC + 0x0C0 */ | ||
68 | u32 x_cntrl; /* FEC + 0x0C4 */ | ||
69 | u32 backoff; /* FEC + 0x0C8 */ | ||
70 | u32 x_data; /* FEC + 0x0CC */ | ||
71 | u32 x_status; /* FEC + 0x0D0 */ | ||
72 | u32 x_mib; /* FEC + 0x0D4 */ | ||
73 | u32 x_test; /* FEC + 0x0D8 */ | ||
74 | u32 fdxfc_da1; /* FEC + 0x0DC */ | ||
75 | u32 fdxfc_da2; /* FEC + 0x0E0 */ | ||
76 | u32 paddr1; /* FEC + 0x0E4 */ | ||
77 | u32 paddr2; /* FEC + 0x0E8 */ | ||
78 | u32 op_pause; /* FEC + 0x0EC */ | ||
79 | |||
80 | u32 reserved5[4]; /* FEC + 0x0F0-0FC */ | ||
81 | u32 instr_reg; /* FEC + 0x100 */ | ||
82 | u32 context_reg; /* FEC + 0x104 */ | ||
83 | u32 test_cntrl; /* FEC + 0x108 */ | ||
84 | u32 acc_reg; /* FEC + 0x10C */ | ||
85 | u32 ones; /* FEC + 0x110 */ | ||
86 | u32 zeros; /* FEC + 0x114 */ | ||
87 | u32 iaddr1; /* FEC + 0x118 */ | ||
88 | u32 iaddr2; /* FEC + 0x11C */ | ||
89 | u32 gaddr1; /* FEC + 0x120 */ | ||
90 | u32 gaddr2; /* FEC + 0x124 */ | ||
91 | u32 random; /* FEC + 0x128 */ | ||
92 | u32 rand1; /* FEC + 0x12C */ | ||
93 | u32 tmp; /* FEC + 0x130 */ | ||
94 | |||
95 | u32 reserved6[3]; /* FEC + 0x134-13C */ | ||
96 | u32 fifo_id; /* FEC + 0x140 */ | ||
97 | u32 x_wmrk; /* FEC + 0x144 */ | ||
98 | u32 fcntrl; /* FEC + 0x148 */ | ||
99 | u32 r_bound; /* FEC + 0x14C */ | ||
100 | u32 r_fstart; /* FEC + 0x150 */ | ||
101 | u32 r_count; /* FEC + 0x154 */ | ||
102 | u32 r_lag; /* FEC + 0x158 */ | ||
103 | u32 r_read; /* FEC + 0x15C */ | ||
104 | u32 r_write; /* FEC + 0x160 */ | ||
105 | u32 x_count; /* FEC + 0x164 */ | ||
106 | u32 x_lag; /* FEC + 0x168 */ | ||
107 | u32 x_retry; /* FEC + 0x16C */ | ||
108 | u32 x_write; /* FEC + 0x170 */ | ||
109 | u32 x_read; /* FEC + 0x174 */ | ||
110 | |||
111 | u32 reserved7[2]; /* FEC + 0x178-17C */ | ||
112 | u32 fm_cntrl; /* FEC + 0x180 */ | ||
113 | u32 rfifo_data; /* FEC + 0x184 */ | ||
114 | u32 rfifo_status; /* FEC + 0x188 */ | ||
115 | u32 rfifo_cntrl; /* FEC + 0x18C */ | ||
116 | u32 rfifo_lrf_ptr; /* FEC + 0x190 */ | ||
117 | u32 rfifo_lwf_ptr; /* FEC + 0x194 */ | ||
118 | u32 rfifo_alarm; /* FEC + 0x198 */ | ||
119 | u32 rfifo_rdptr; /* FEC + 0x19C */ | ||
120 | u32 rfifo_wrptr; /* FEC + 0x1A0 */ | ||
121 | u32 tfifo_data; /* FEC + 0x1A4 */ | ||
122 | u32 tfifo_status; /* FEC + 0x1A8 */ | ||
123 | u32 tfifo_cntrl; /* FEC + 0x1AC */ | ||
124 | u32 tfifo_lrf_ptr; /* FEC + 0x1B0 */ | ||
125 | u32 tfifo_lwf_ptr; /* FEC + 0x1B4 */ | ||
126 | u32 tfifo_alarm; /* FEC + 0x1B8 */ | ||
127 | u32 tfifo_rdptr; /* FEC + 0x1BC */ | ||
128 | u32 tfifo_wrptr; /* FEC + 0x1C0 */ | ||
129 | |||
130 | u32 reset_cntrl; /* FEC + 0x1C4 */ | ||
131 | u32 xmit_fsm; /* FEC + 0x1C8 */ | ||
132 | |||
133 | u32 reserved8[3]; /* FEC + 0x1CC-1D4 */ | ||
134 | u32 rdes_data0; /* FEC + 0x1D8 */ | ||
135 | u32 rdes_data1; /* FEC + 0x1DC */ | ||
136 | u32 r_length; /* FEC + 0x1E0 */ | ||
137 | u32 x_length; /* FEC + 0x1E4 */ | ||
138 | u32 x_addr; /* FEC + 0x1E8 */ | ||
139 | u32 cdes_data; /* FEC + 0x1EC */ | ||
140 | u32 status; /* FEC + 0x1F0 */ | ||
141 | u32 dma_control; /* FEC + 0x1F4 */ | ||
142 | u32 des_cmnd; /* FEC + 0x1F8 */ | ||
143 | u32 data; /* FEC + 0x1FC */ | ||
144 | |||
145 | u32 rmon_t_drop; /* FEC + 0x200 */ | ||
146 | u32 rmon_t_packets; /* FEC + 0x204 */ | ||
147 | u32 rmon_t_bc_pkt; /* FEC + 0x208 */ | ||
148 | u32 rmon_t_mc_pkt; /* FEC + 0x20C */ | ||
149 | u32 rmon_t_crc_align; /* FEC + 0x210 */ | ||
150 | u32 rmon_t_undersize; /* FEC + 0x214 */ | ||
151 | u32 rmon_t_oversize; /* FEC + 0x218 */ | ||
152 | u32 rmon_t_frag; /* FEC + 0x21C */ | ||
153 | u32 rmon_t_jab; /* FEC + 0x220 */ | ||
154 | u32 rmon_t_col; /* FEC + 0x224 */ | ||
155 | u32 rmon_t_p64; /* FEC + 0x228 */ | ||
156 | u32 rmon_t_p65to127; /* FEC + 0x22C */ | ||
157 | u32 rmon_t_p128to255; /* FEC + 0x230 */ | ||
158 | u32 rmon_t_p256to511; /* FEC + 0x234 */ | ||
159 | u32 rmon_t_p512to1023; /* FEC + 0x238 */ | ||
160 | u32 rmon_t_p1024to2047; /* FEC + 0x23C */ | ||
161 | u32 rmon_t_p_gte2048; /* FEC + 0x240 */ | ||
162 | u32 rmon_t_octets; /* FEC + 0x244 */ | ||
163 | u32 ieee_t_drop; /* FEC + 0x248 */ | ||
164 | u32 ieee_t_frame_ok; /* FEC + 0x24C */ | ||
165 | u32 ieee_t_1col; /* FEC + 0x250 */ | ||
166 | u32 ieee_t_mcol; /* FEC + 0x254 */ | ||
167 | u32 ieee_t_def; /* FEC + 0x258 */ | ||
168 | u32 ieee_t_lcol; /* FEC + 0x25C */ | ||
169 | u32 ieee_t_excol; /* FEC + 0x260 */ | ||
170 | u32 ieee_t_macerr; /* FEC + 0x264 */ | ||
171 | u32 ieee_t_cserr; /* FEC + 0x268 */ | ||
172 | u32 ieee_t_sqe; /* FEC + 0x26C */ | ||
173 | u32 t_fdxfc; /* FEC + 0x270 */ | ||
174 | u32 ieee_t_octets_ok; /* FEC + 0x274 */ | ||
175 | |||
176 | u32 reserved9[2]; /* FEC + 0x278-27C */ | ||
177 | u32 rmon_r_drop; /* FEC + 0x280 */ | ||
178 | u32 rmon_r_packets; /* FEC + 0x284 */ | ||
179 | u32 rmon_r_bc_pkt; /* FEC + 0x288 */ | ||
180 | u32 rmon_r_mc_pkt; /* FEC + 0x28C */ | ||
181 | u32 rmon_r_crc_align; /* FEC + 0x290 */ | ||
182 | u32 rmon_r_undersize; /* FEC + 0x294 */ | ||
183 | u32 rmon_r_oversize; /* FEC + 0x298 */ | ||
184 | u32 rmon_r_frag; /* FEC + 0x29C */ | ||
185 | u32 rmon_r_jab; /* FEC + 0x2A0 */ | ||
186 | |||
187 | u32 rmon_r_resvd_0; /* FEC + 0x2A4 */ | ||
188 | |||
189 | u32 rmon_r_p64; /* FEC + 0x2A8 */ | ||
190 | u32 rmon_r_p65to127; /* FEC + 0x2AC */ | ||
191 | u32 rmon_r_p128to255; /* FEC + 0x2B0 */ | ||
192 | u32 rmon_r_p256to511; /* FEC + 0x2B4 */ | ||
193 | u32 rmon_r_p512to1023; /* FEC + 0x2B8 */ | ||
194 | u32 rmon_r_p1024to2047; /* FEC + 0x2BC */ | ||
195 | u32 rmon_r_p_gte2048; /* FEC + 0x2C0 */ | ||
196 | u32 rmon_r_octets; /* FEC + 0x2C4 */ | ||
197 | u32 ieee_r_drop; /* FEC + 0x2C8 */ | ||
198 | u32 ieee_r_frame_ok; /* FEC + 0x2CC */ | ||
199 | u32 ieee_r_crc; /* FEC + 0x2D0 */ | ||
200 | u32 ieee_r_align; /* FEC + 0x2D4 */ | ||
201 | u32 r_macerr; /* FEC + 0x2D8 */ | ||
202 | u32 r_fdxfc; /* FEC + 0x2DC */ | ||
203 | u32 ieee_r_octets_ok; /* FEC + 0x2E0 */ | ||
204 | |||
205 | u32 reserved10[7]; /* FEC + 0x2E4-2FC */ | ||
206 | |||
207 | u32 reserved11[64]; /* FEC + 0x300-3FF */ | ||
208 | }; | ||
209 | |||
210 | #define FEC_MIB_DISABLE 0x80000000 | ||
211 | |||
212 | #define FEC_IEVENT_HBERR 0x80000000 | ||
213 | #define FEC_IEVENT_BABR 0x40000000 | ||
214 | #define FEC_IEVENT_BABT 0x20000000 | ||
215 | #define FEC_IEVENT_GRA 0x10000000 | ||
216 | #define FEC_IEVENT_TFINT 0x08000000 | ||
217 | #define FEC_IEVENT_MII 0x00800000 | ||
218 | #define FEC_IEVENT_LATE_COL 0x00200000 | ||
219 | #define FEC_IEVENT_COL_RETRY_LIM 0x00100000 | ||
220 | #define FEC_IEVENT_XFIFO_UN 0x00080000 | ||
221 | #define FEC_IEVENT_XFIFO_ERROR 0x00040000 | ||
222 | #define FEC_IEVENT_RFIFO_ERROR 0x00020000 | ||
223 | |||
224 | #define FEC_IMASK_HBERR 0x80000000 | ||
225 | #define FEC_IMASK_BABR 0x40000000 | ||
226 | #define FEC_IMASK_BABT 0x20000000 | ||
227 | #define FEC_IMASK_GRA 0x10000000 | ||
228 | #define FEC_IMASK_MII 0x00800000 | ||
229 | #define FEC_IMASK_LATE_COL 0x00200000 | ||
230 | #define FEC_IMASK_COL_RETRY_LIM 0x00100000 | ||
231 | #define FEC_IMASK_XFIFO_UN 0x00080000 | ||
232 | #define FEC_IMASK_XFIFO_ERROR 0x00040000 | ||
233 | #define FEC_IMASK_RFIFO_ERROR 0x00020000 | ||
234 | |||
235 | /* all but MII, which is enabled separately */ | ||
236 | #define FEC_IMASK_ENABLE (FEC_IMASK_HBERR | FEC_IMASK_BABR | \ | ||
237 | FEC_IMASK_BABT | FEC_IMASK_GRA | FEC_IMASK_LATE_COL | \ | ||
238 | FEC_IMASK_COL_RETRY_LIM | FEC_IMASK_XFIFO_UN | \ | ||
239 | FEC_IMASK_XFIFO_ERROR | FEC_IMASK_RFIFO_ERROR) | ||
240 | |||
241 | #define FEC_RCNTRL_MAX_FL_SHIFT 16 | ||
242 | #define FEC_RCNTRL_LOOP 0x01 | ||
243 | #define FEC_RCNTRL_DRT 0x02 | ||
244 | #define FEC_RCNTRL_MII_MODE 0x04 | ||
245 | #define FEC_RCNTRL_PROM 0x08 | ||
246 | #define FEC_RCNTRL_BC_REJ 0x10 | ||
247 | #define FEC_RCNTRL_FCE 0x20 | ||
248 | |||
249 | #define FEC_TCNTRL_GTS 0x00000001 | ||
250 | #define FEC_TCNTRL_HBC 0x00000002 | ||
251 | #define FEC_TCNTRL_FDEN 0x00000004 | ||
252 | #define FEC_TCNTRL_TFC_PAUSE 0x00000008 | ||
253 | #define FEC_TCNTRL_RFC_PAUSE 0x00000010 | ||
254 | |||
255 | #define FEC_ECNTRL_RESET 0x00000001 | ||
256 | #define FEC_ECNTRL_ETHER_EN 0x00000002 | ||
257 | |||
258 | #define FEC_MII_DATA_ST 0x40000000 /* Start frame */ | ||
259 | #define FEC_MII_DATA_OP_RD 0x20000000 /* Perform read */ | ||
260 | #define FEC_MII_DATA_OP_WR 0x10000000 /* Perform write */ | ||
261 | #define FEC_MII_DATA_PA_MSK 0x0f800000 /* PHY Address mask */ | ||
262 | #define FEC_MII_DATA_RA_MSK 0x007c0000 /* PHY Register mask */ | ||
263 | #define FEC_MII_DATA_TA 0x00020000 /* Turnaround */ | ||
264 | #define FEC_MII_DATA_DATAMSK 0x0000ffff /* PHY data mask */ | ||
265 | |||
266 | #define FEC_MII_READ_FRAME (FEC_MII_DATA_ST | FEC_MII_DATA_OP_RD | FEC_MII_DATA_TA) | ||
267 | #define FEC_MII_WRITE_FRAME (FEC_MII_DATA_ST | FEC_MII_DATA_OP_WR | FEC_MII_DATA_TA) | ||
268 | |||
269 | #define FEC_MII_DATA_RA_SHIFT 0x12 /* MII reg addr bits */ | ||
270 | #define FEC_MII_DATA_PA_SHIFT 0x17 /* MII PHY addr bits */ | ||
271 | |||
272 | #define FEC_PADDR2_TYPE 0x8808 | ||
273 | |||
274 | #define FEC_OP_PAUSE_OPCODE 0x00010000 | ||
275 | |||
276 | #define FEC_FIFO_WMRK_256B 0x3 | ||
277 | |||
278 | #define FEC_FIFO_STATUS_ERR 0x00400000 | ||
279 | #define FEC_FIFO_STATUS_UF 0x00200000 | ||
280 | #define FEC_FIFO_STATUS_OF 0x00100000 | ||
281 | |||
282 | #define FEC_FIFO_CNTRL_FRAME 0x08000000 | ||
283 | #define FEC_FIFO_CNTRL_LTG_7 0x07000000 | ||
284 | |||
285 | #define FEC_RESET_CNTRL_RESET_FIFO 0x02000000 | ||
286 | #define FEC_RESET_CNTRL_ENABLE_IS_RESET 0x01000000 | ||
287 | |||
288 | #define FEC_XMIT_FSM_APPEND_CRC 0x02000000 | ||
289 | #define FEC_XMIT_FSM_ENABLE_CRC 0x01000000 | ||
290 | |||
291 | |||
292 | extern struct platform_driver mpc52xx_fec_mdio_driver; | ||
293 | |||
294 | #endif /* __DRIVERS_NET_MPC52XX_FEC_H__ */ | ||
diff --git a/drivers/net/ethernet/freescale/fec_mpc52xx_phy.c b/drivers/net/ethernet/freescale/fec_mpc52xx_phy.c new file mode 100644 index 000000000000..360a578c2bb7 --- /dev/null +++ b/drivers/net/ethernet/freescale/fec_mpc52xx_phy.c | |||
@@ -0,0 +1,160 @@ | |||
1 | /* | ||
2 | * Driver for the MPC5200 Fast Ethernet Controller - MDIO bus driver | ||
3 | * | ||
4 | * Copyright (C) 2007 Domen Puncer, Telargo, Inc. | ||
5 | * Copyright (C) 2008 Wolfram Sang, Pengutronix | ||
6 | * | ||
7 | * This file is licensed under the terms of the GNU General Public License | ||
8 | * version 2. This program is licensed "as is" without any warranty of any | ||
9 | * kind, whether express or implied. | ||
10 | */ | ||
11 | |||
12 | #include <linux/kernel.h> | ||
13 | #include <linux/module.h> | ||
14 | #include <linux/netdevice.h> | ||
15 | #include <linux/phy.h> | ||
16 | #include <linux/of_platform.h> | ||
17 | #include <linux/slab.h> | ||
18 | #include <linux/of_mdio.h> | ||
19 | #include <asm/io.h> | ||
20 | #include <asm/mpc52xx.h> | ||
21 | #include "fec_mpc52xx.h" | ||
22 | |||
23 | struct mpc52xx_fec_mdio_priv { | ||
24 | struct mpc52xx_fec __iomem *regs; | ||
25 | int mdio_irqs[PHY_MAX_ADDR]; | ||
26 | }; | ||
27 | |||
28 | static int mpc52xx_fec_mdio_transfer(struct mii_bus *bus, int phy_id, | ||
29 | int reg, u32 value) | ||
30 | { | ||
31 | struct mpc52xx_fec_mdio_priv *priv = bus->priv; | ||
32 | struct mpc52xx_fec __iomem *fec = priv->regs; | ||
33 | int tries = 3; | ||
34 | |||
35 | value |= (phy_id << FEC_MII_DATA_PA_SHIFT) & FEC_MII_DATA_PA_MSK; | ||
36 | value |= (reg << FEC_MII_DATA_RA_SHIFT) & FEC_MII_DATA_RA_MSK; | ||
37 | |||
38 | out_be32(&fec->ievent, FEC_IEVENT_MII); | ||
39 | out_be32(&fec->mii_data, value); | ||
40 | |||
41 | /* wait for it to finish, this takes about 23 us on lite5200b */ | ||
42 | while (!(in_be32(&fec->ievent) & FEC_IEVENT_MII) && --tries) | ||
43 | msleep(1); | ||
44 | |||
45 | if (!tries) | ||
46 | return -ETIMEDOUT; | ||
47 | |||
48 | return value & FEC_MII_DATA_OP_RD ? | ||
49 | in_be32(&fec->mii_data) & FEC_MII_DATA_DATAMSK : 0; | ||
50 | } | ||
51 | |||
52 | static int mpc52xx_fec_mdio_read(struct mii_bus *bus, int phy_id, int reg) | ||
53 | { | ||
54 | return mpc52xx_fec_mdio_transfer(bus, phy_id, reg, FEC_MII_READ_FRAME); | ||
55 | } | ||
56 | |||
57 | static int mpc52xx_fec_mdio_write(struct mii_bus *bus, int phy_id, int reg, | ||
58 | u16 data) | ||
59 | { | ||
60 | return mpc52xx_fec_mdio_transfer(bus, phy_id, reg, | ||
61 | data | FEC_MII_WRITE_FRAME); | ||
62 | } | ||
63 | |||
64 | static int mpc52xx_fec_mdio_probe(struct platform_device *of) | ||
65 | { | ||
66 | struct device *dev = &of->dev; | ||
67 | struct device_node *np = of->dev.of_node; | ||
68 | struct mii_bus *bus; | ||
69 | struct mpc52xx_fec_mdio_priv *priv; | ||
70 | struct resource res; | ||
71 | int err; | ||
72 | |||
73 | bus = mdiobus_alloc(); | ||
74 | if (bus == NULL) | ||
75 | return -ENOMEM; | ||
76 | priv = kzalloc(sizeof(*priv), GFP_KERNEL); | ||
77 | if (priv == NULL) { | ||
78 | err = -ENOMEM; | ||
79 | goto out_free; | ||
80 | } | ||
81 | |||
82 | bus->name = "mpc52xx MII bus"; | ||
83 | bus->read = mpc52xx_fec_mdio_read; | ||
84 | bus->write = mpc52xx_fec_mdio_write; | ||
85 | |||
86 | /* setup irqs */ | ||
87 | bus->irq = priv->mdio_irqs; | ||
88 | |||
89 | /* setup registers */ | ||
90 | err = of_address_to_resource(np, 0, &res); | ||
91 | if (err) | ||
92 | goto out_free; | ||
93 | priv->regs = ioremap(res.start, resource_size(&res)); | ||
94 | if (priv->regs == NULL) { | ||
95 | err = -ENOMEM; | ||
96 | goto out_free; | ||
97 | } | ||
98 | |||
99 | snprintf(bus->id, MII_BUS_ID_SIZE, "%x", res.start); | ||
100 | bus->priv = priv; | ||
101 | |||
102 | bus->parent = dev; | ||
103 | dev_set_drvdata(dev, bus); | ||
104 | |||
105 | /* set MII speed */ | ||
106 | out_be32(&priv->regs->mii_speed, | ||
107 | ((mpc5xxx_get_bus_frequency(of->dev.of_node) >> 20) / 5) << 1); | ||
108 | |||
109 | err = of_mdiobus_register(bus, np); | ||
110 | if (err) | ||
111 | goto out_unmap; | ||
112 | |||
113 | return 0; | ||
114 | |||
115 | out_unmap: | ||
116 | iounmap(priv->regs); | ||
117 | out_free: | ||
118 | kfree(priv); | ||
119 | mdiobus_free(bus); | ||
120 | |||
121 | return err; | ||
122 | } | ||
123 | |||
124 | static int mpc52xx_fec_mdio_remove(struct platform_device *of) | ||
125 | { | ||
126 | struct device *dev = &of->dev; | ||
127 | struct mii_bus *bus = dev_get_drvdata(dev); | ||
128 | struct mpc52xx_fec_mdio_priv *priv = bus->priv; | ||
129 | |||
130 | mdiobus_unregister(bus); | ||
131 | dev_set_drvdata(dev, NULL); | ||
132 | iounmap(priv->regs); | ||
133 | kfree(priv); | ||
134 | mdiobus_free(bus); | ||
135 | |||
136 | return 0; | ||
137 | } | ||
138 | |||
139 | static struct of_device_id mpc52xx_fec_mdio_match[] = { | ||
140 | { .compatible = "fsl,mpc5200b-mdio", }, | ||
141 | { .compatible = "fsl,mpc5200-mdio", }, | ||
142 | { .compatible = "mpc5200b-fec-phy", }, | ||
143 | {} | ||
144 | }; | ||
145 | MODULE_DEVICE_TABLE(of, mpc52xx_fec_mdio_match); | ||
146 | |||
147 | struct platform_driver mpc52xx_fec_mdio_driver = { | ||
148 | .driver = { | ||
149 | .name = "mpc5200b-fec-phy", | ||
150 | .owner = THIS_MODULE, | ||
151 | .of_match_table = mpc52xx_fec_mdio_match, | ||
152 | }, | ||
153 | .probe = mpc52xx_fec_mdio_probe, | ||
154 | .remove = mpc52xx_fec_mdio_remove, | ||
155 | }; | ||
156 | |||
157 | /* let fec driver call it, since this has to be registered before it */ | ||
158 | EXPORT_SYMBOL_GPL(mpc52xx_fec_mdio_driver); | ||
159 | |||
160 | MODULE_LICENSE("Dual BSD/GPL"); | ||
diff --git a/drivers/net/ethernet/freescale/fs_enet/Kconfig b/drivers/net/ethernet/freescale/fs_enet/Kconfig new file mode 100644 index 000000000000..be92229f2c2a --- /dev/null +++ b/drivers/net/ethernet/freescale/fs_enet/Kconfig | |||
@@ -0,0 +1,34 @@ | |||
1 | config FS_ENET | ||
2 | tristate "Freescale Ethernet Driver" | ||
3 | depends on NET_VENDOR_FREESCALE && (CPM1 || CPM2 || PPC_MPC512x) | ||
4 | select MII | ||
5 | select PHYLIB | ||
6 | |||
7 | config FS_ENET_MPC5121_FEC | ||
8 | def_bool y if (FS_ENET && PPC_MPC512x) | ||
9 | select FS_ENET_HAS_FEC | ||
10 | |||
11 | config FS_ENET_HAS_SCC | ||
12 | bool "Chip has an SCC usable for ethernet" | ||
13 | depends on FS_ENET && (CPM1 || CPM2) | ||
14 | default y | ||
15 | |||
16 | config FS_ENET_HAS_FCC | ||
17 | bool "Chip has an FCC usable for ethernet" | ||
18 | depends on FS_ENET && CPM2 | ||
19 | default y | ||
20 | |||
21 | config FS_ENET_HAS_FEC | ||
22 | bool "Chip has an FEC usable for ethernet" | ||
23 | depends on FS_ENET && (CPM1 || FS_ENET_MPC5121_FEC) | ||
24 | select FS_ENET_MDIO_FEC | ||
25 | default y | ||
26 | |||
27 | config FS_ENET_MDIO_FEC | ||
28 | tristate "MDIO driver for FEC" | ||
29 | depends on FS_ENET && (CPM1 || FS_ENET_MPC5121_FEC) | ||
30 | |||
31 | config FS_ENET_MDIO_FCC | ||
32 | tristate "MDIO driver for FCC" | ||
33 | depends on FS_ENET && CPM2 | ||
34 | select MDIO_BITBANG | ||
diff --git a/drivers/net/ethernet/freescale/fs_enet/Makefile b/drivers/net/ethernet/freescale/fs_enet/Makefile new file mode 100644 index 000000000000..d4a305ee3455 --- /dev/null +++ b/drivers/net/ethernet/freescale/fs_enet/Makefile | |||
@@ -0,0 +1,14 @@ | |||
1 | # | ||
2 | # Makefile for the Freescale Ethernet controllers | ||
3 | # | ||
4 | |||
5 | obj-$(CONFIG_FS_ENET) += fs_enet.o | ||
6 | |||
7 | fs_enet-$(CONFIG_FS_ENET_HAS_SCC) += mac-scc.o | ||
8 | fs_enet-$(CONFIG_FS_ENET_HAS_FEC) += mac-fec.o | ||
9 | fs_enet-$(CONFIG_FS_ENET_HAS_FCC) += mac-fcc.o | ||
10 | |||
11 | obj-$(CONFIG_FS_ENET_MDIO_FEC) += mii-fec.o | ||
12 | obj-$(CONFIG_FS_ENET_MDIO_FCC) += mii-bitbang.o | ||
13 | |||
14 | fs_enet-objs := fs_enet-main.o $(fs_enet-m) | ||
diff --git a/drivers/net/ethernet/freescale/fs_enet/fec.h b/drivers/net/ethernet/freescale/fs_enet/fec.h new file mode 100644 index 000000000000..e980527e2b99 --- /dev/null +++ b/drivers/net/ethernet/freescale/fs_enet/fec.h | |||
@@ -0,0 +1,42 @@ | |||
1 | #ifndef FS_ENET_FEC_H | ||
2 | #define FS_ENET_FEC_H | ||
3 | |||
4 | /* CRC polynomium used by the FEC for the multicast group filtering */ | ||
5 | #define FEC_CRC_POLY 0x04C11DB7 | ||
6 | |||
7 | #define FEC_MAX_MULTICAST_ADDRS 64 | ||
8 | |||
9 | /* Interrupt events/masks. | ||
10 | */ | ||
11 | #define FEC_ENET_HBERR 0x80000000U /* Heartbeat error */ | ||
12 | #define FEC_ENET_BABR 0x40000000U /* Babbling receiver */ | ||
13 | #define FEC_ENET_BABT 0x20000000U /* Babbling transmitter */ | ||
14 | #define FEC_ENET_GRA 0x10000000U /* Graceful stop complete */ | ||
15 | #define FEC_ENET_TXF 0x08000000U /* Full frame transmitted */ | ||
16 | #define FEC_ENET_TXB 0x04000000U /* A buffer was transmitted */ | ||
17 | #define FEC_ENET_RXF 0x02000000U /* Full frame received */ | ||
18 | #define FEC_ENET_RXB 0x01000000U /* A buffer was received */ | ||
19 | #define FEC_ENET_MII 0x00800000U /* MII interrupt */ | ||
20 | #define FEC_ENET_EBERR 0x00400000U /* SDMA bus error */ | ||
21 | |||
22 | #define FEC_ECNTRL_PINMUX 0x00000004 | ||
23 | #define FEC_ECNTRL_ETHER_EN 0x00000002 | ||
24 | #define FEC_ECNTRL_RESET 0x00000001 | ||
25 | |||
26 | #define FEC_RCNTRL_BC_REJ 0x00000010 | ||
27 | #define FEC_RCNTRL_PROM 0x00000008 | ||
28 | #define FEC_RCNTRL_MII_MODE 0x00000004 | ||
29 | #define FEC_RCNTRL_DRT 0x00000002 | ||
30 | #define FEC_RCNTRL_LOOP 0x00000001 | ||
31 | |||
32 | #define FEC_TCNTRL_FDEN 0x00000004 | ||
33 | #define FEC_TCNTRL_HBC 0x00000002 | ||
34 | #define FEC_TCNTRL_GTS 0x00000001 | ||
35 | |||
36 | |||
37 | |||
38 | /* | ||
39 | * Delay to wait for FEC reset command to complete (in us) | ||
40 | */ | ||
41 | #define FEC_RESET_DELAY 50 | ||
42 | #endif | ||
diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c new file mode 100644 index 000000000000..329ef231a096 --- /dev/null +++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c | |||
@@ -0,0 +1,1196 @@ | |||
1 | /* | ||
2 | * Combined Ethernet driver for Motorola MPC8xx and MPC82xx. | ||
3 | * | ||
4 | * Copyright (c) 2003 Intracom S.A. | ||
5 | * by Pantelis Antoniou <panto@intracom.gr> | ||
6 | * | ||
7 | * 2005 (c) MontaVista Software, Inc. | ||
8 | * Vitaly Bordug <vbordug@ru.mvista.com> | ||
9 | * | ||
10 | * Heavily based on original FEC driver by Dan Malek <dan@embeddededge.com> | ||
11 | * and modifications by Joakim Tjernlund <joakim.tjernlund@lumentis.se> | ||
12 | * | ||
13 | * This file is licensed under the terms of the GNU General Public License | ||
14 | * version 2. This program is licensed "as is" without any warranty of any | ||
15 | * kind, whether express or implied. | ||
16 | */ | ||
17 | |||
18 | #include <linux/module.h> | ||
19 | #include <linux/kernel.h> | ||
20 | #include <linux/types.h> | ||
21 | #include <linux/string.h> | ||
22 | #include <linux/ptrace.h> | ||
23 | #include <linux/errno.h> | ||
24 | #include <linux/ioport.h> | ||
25 | #include <linux/slab.h> | ||
26 | #include <linux/interrupt.h> | ||
27 | #include <linux/init.h> | ||
28 | #include <linux/delay.h> | ||
29 | #include <linux/netdevice.h> | ||
30 | #include <linux/etherdevice.h> | ||
31 | #include <linux/skbuff.h> | ||
32 | #include <linux/spinlock.h> | ||
33 | #include <linux/mii.h> | ||
34 | #include <linux/ethtool.h> | ||
35 | #include <linux/bitops.h> | ||
36 | #include <linux/fs.h> | ||
37 | #include <linux/platform_device.h> | ||
38 | #include <linux/phy.h> | ||
39 | #include <linux/of.h> | ||
40 | #include <linux/of_mdio.h> | ||
41 | #include <linux/of_platform.h> | ||
42 | #include <linux/of_gpio.h> | ||
43 | #include <linux/of_net.h> | ||
44 | |||
45 | #include <linux/vmalloc.h> | ||
46 | #include <asm/pgtable.h> | ||
47 | #include <asm/irq.h> | ||
48 | #include <asm/uaccess.h> | ||
49 | |||
50 | #include "fs_enet.h" | ||
51 | |||
52 | /*************************************************/ | ||
53 | |||
54 | MODULE_AUTHOR("Pantelis Antoniou <panto@intracom.gr>"); | ||
55 | MODULE_DESCRIPTION("Freescale Ethernet Driver"); | ||
56 | MODULE_LICENSE("GPL"); | ||
57 | MODULE_VERSION(DRV_MODULE_VERSION); | ||
58 | |||
59 | static int fs_enet_debug = -1; /* -1 == use FS_ENET_DEF_MSG_ENABLE as value */ | ||
60 | module_param(fs_enet_debug, int, 0); | ||
61 | MODULE_PARM_DESC(fs_enet_debug, | ||
62 | "Freescale bitmapped debugging message enable value"); | ||
63 | |||
64 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
65 | static void fs_enet_netpoll(struct net_device *dev); | ||
66 | #endif | ||
67 | |||
68 | static void fs_set_multicast_list(struct net_device *dev) | ||
69 | { | ||
70 | struct fs_enet_private *fep = netdev_priv(dev); | ||
71 | |||
72 | (*fep->ops->set_multicast_list)(dev); | ||
73 | } | ||
74 | |||
75 | static void skb_align(struct sk_buff *skb, int align) | ||
76 | { | ||
77 | int off = ((unsigned long)skb->data) & (align - 1); | ||
78 | |||
79 | if (off) | ||
80 | skb_reserve(skb, align - off); | ||
81 | } | ||
82 | |||
83 | /* NAPI receive function */ | ||
84 | static int fs_enet_rx_napi(struct napi_struct *napi, int budget) | ||
85 | { | ||
86 | struct fs_enet_private *fep = container_of(napi, struct fs_enet_private, napi); | ||
87 | struct net_device *dev = fep->ndev; | ||
88 | const struct fs_platform_info *fpi = fep->fpi; | ||
89 | cbd_t __iomem *bdp; | ||
90 | struct sk_buff *skb, *skbn, *skbt; | ||
91 | int received = 0; | ||
92 | u16 pkt_len, sc; | ||
93 | int curidx; | ||
94 | |||
95 | /* | ||
96 | * First, grab all of the stats for the incoming packet. | ||
97 | * These get messed up if we get called due to a busy condition. | ||
98 | */ | ||
99 | bdp = fep->cur_rx; | ||
100 | |||
101 | /* clear RX status bits for napi*/ | ||
102 | (*fep->ops->napi_clear_rx_event)(dev); | ||
103 | |||
104 | while (((sc = CBDR_SC(bdp)) & BD_ENET_RX_EMPTY) == 0) { | ||
105 | curidx = bdp - fep->rx_bd_base; | ||
106 | |||
107 | /* | ||
108 | * Since we have allocated space to hold a complete frame, | ||
109 | * the last indicator should be set. | ||
110 | */ | ||
111 | if ((sc & BD_ENET_RX_LAST) == 0) | ||
112 | dev_warn(fep->dev, "rcv is not +last\n"); | ||
113 | |||
114 | /* | ||
115 | * Check for errors. | ||
116 | */ | ||
117 | if (sc & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_CL | | ||
118 | BD_ENET_RX_NO | BD_ENET_RX_CR | BD_ENET_RX_OV)) { | ||
119 | fep->stats.rx_errors++; | ||
120 | /* Frame too long or too short. */ | ||
121 | if (sc & (BD_ENET_RX_LG | BD_ENET_RX_SH)) | ||
122 | fep->stats.rx_length_errors++; | ||
123 | /* Frame alignment */ | ||
124 | if (sc & (BD_ENET_RX_NO | BD_ENET_RX_CL)) | ||
125 | fep->stats.rx_frame_errors++; | ||
126 | /* CRC Error */ | ||
127 | if (sc & BD_ENET_RX_CR) | ||
128 | fep->stats.rx_crc_errors++; | ||
129 | /* FIFO overrun */ | ||
130 | if (sc & BD_ENET_RX_OV) | ||
131 | fep->stats.rx_crc_errors++; | ||
132 | |||
133 | skb = fep->rx_skbuff[curidx]; | ||
134 | |||
135 | dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp), | ||
136 | L1_CACHE_ALIGN(PKT_MAXBUF_SIZE), | ||
137 | DMA_FROM_DEVICE); | ||
138 | |||
139 | skbn = skb; | ||
140 | |||
141 | } else { | ||
142 | skb = fep->rx_skbuff[curidx]; | ||
143 | |||
144 | dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp), | ||
145 | L1_CACHE_ALIGN(PKT_MAXBUF_SIZE), | ||
146 | DMA_FROM_DEVICE); | ||
147 | |||
148 | /* | ||
149 | * Process the incoming frame. | ||
150 | */ | ||
151 | fep->stats.rx_packets++; | ||
152 | pkt_len = CBDR_DATLEN(bdp) - 4; /* remove CRC */ | ||
153 | fep->stats.rx_bytes += pkt_len + 4; | ||
154 | |||
155 | if (pkt_len <= fpi->rx_copybreak) { | ||
156 | /* +2 to make IP header L1 cache aligned */ | ||
157 | skbn = dev_alloc_skb(pkt_len + 2); | ||
158 | if (skbn != NULL) { | ||
159 | skb_reserve(skbn, 2); /* align IP header */ | ||
160 | skb_copy_from_linear_data(skb, | ||
161 | skbn->data, pkt_len); | ||
162 | /* swap */ | ||
163 | skbt = skb; | ||
164 | skb = skbn; | ||
165 | skbn = skbt; | ||
166 | } | ||
167 | } else { | ||
168 | skbn = dev_alloc_skb(ENET_RX_FRSIZE); | ||
169 | |||
170 | if (skbn) | ||
171 | skb_align(skbn, ENET_RX_ALIGN); | ||
172 | } | ||
173 | |||
174 | if (skbn != NULL) { | ||
175 | skb_put(skb, pkt_len); /* Make room */ | ||
176 | skb->protocol = eth_type_trans(skb, dev); | ||
177 | received++; | ||
178 | netif_receive_skb(skb); | ||
179 | } else { | ||
180 | dev_warn(fep->dev, | ||
181 | "Memory squeeze, dropping packet.\n"); | ||
182 | fep->stats.rx_dropped++; | ||
183 | skbn = skb; | ||
184 | } | ||
185 | } | ||
186 | |||
187 | fep->rx_skbuff[curidx] = skbn; | ||
188 | CBDW_BUFADDR(bdp, dma_map_single(fep->dev, skbn->data, | ||
189 | L1_CACHE_ALIGN(PKT_MAXBUF_SIZE), | ||
190 | DMA_FROM_DEVICE)); | ||
191 | CBDW_DATLEN(bdp, 0); | ||
192 | CBDW_SC(bdp, (sc & ~BD_ENET_RX_STATS) | BD_ENET_RX_EMPTY); | ||
193 | |||
194 | /* | ||
195 | * Update BD pointer to next entry. | ||
196 | */ | ||
197 | if ((sc & BD_ENET_RX_WRAP) == 0) | ||
198 | bdp++; | ||
199 | else | ||
200 | bdp = fep->rx_bd_base; | ||
201 | |||
202 | (*fep->ops->rx_bd_done)(dev); | ||
203 | |||
204 | if (received >= budget) | ||
205 | break; | ||
206 | } | ||
207 | |||
208 | fep->cur_rx = bdp; | ||
209 | |||
210 | if (received < budget) { | ||
211 | /* done */ | ||
212 | napi_complete(napi); | ||
213 | (*fep->ops->napi_enable_rx)(dev); | ||
214 | } | ||
215 | return received; | ||
216 | } | ||
217 | |||
218 | /* non NAPI receive function */ | ||
219 | static int fs_enet_rx_non_napi(struct net_device *dev) | ||
220 | { | ||
221 | struct fs_enet_private *fep = netdev_priv(dev); | ||
222 | const struct fs_platform_info *fpi = fep->fpi; | ||
223 | cbd_t __iomem *bdp; | ||
224 | struct sk_buff *skb, *skbn, *skbt; | ||
225 | int received = 0; | ||
226 | u16 pkt_len, sc; | ||
227 | int curidx; | ||
228 | /* | ||
229 | * First, grab all of the stats for the incoming packet. | ||
230 | * These get messed up if we get called due to a busy condition. | ||
231 | */ | ||
232 | bdp = fep->cur_rx; | ||
233 | |||
234 | while (((sc = CBDR_SC(bdp)) & BD_ENET_RX_EMPTY) == 0) { | ||
235 | |||
236 | curidx = bdp - fep->rx_bd_base; | ||
237 | |||
238 | /* | ||
239 | * Since we have allocated space to hold a complete frame, | ||
240 | * the last indicator should be set. | ||
241 | */ | ||
242 | if ((sc & BD_ENET_RX_LAST) == 0) | ||
243 | dev_warn(fep->dev, "rcv is not +last\n"); | ||
244 | |||
245 | /* | ||
246 | * Check for errors. | ||
247 | */ | ||
248 | if (sc & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_CL | | ||
249 | BD_ENET_RX_NO | BD_ENET_RX_CR | BD_ENET_RX_OV)) { | ||
250 | fep->stats.rx_errors++; | ||
251 | /* Frame too long or too short. */ | ||
252 | if (sc & (BD_ENET_RX_LG | BD_ENET_RX_SH)) | ||
253 | fep->stats.rx_length_errors++; | ||
254 | /* Frame alignment */ | ||
255 | if (sc & (BD_ENET_RX_NO | BD_ENET_RX_CL)) | ||
256 | fep->stats.rx_frame_errors++; | ||
257 | /* CRC Error */ | ||
258 | if (sc & BD_ENET_RX_CR) | ||
259 | fep->stats.rx_crc_errors++; | ||
260 | /* FIFO overrun */ | ||
261 | if (sc & BD_ENET_RX_OV) | ||
262 | fep->stats.rx_crc_errors++; | ||
263 | |||
264 | skb = fep->rx_skbuff[curidx]; | ||
265 | |||
266 | dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp), | ||
267 | L1_CACHE_ALIGN(PKT_MAXBUF_SIZE), | ||
268 | DMA_FROM_DEVICE); | ||
269 | |||
270 | skbn = skb; | ||
271 | |||
272 | } else { | ||
273 | |||
274 | skb = fep->rx_skbuff[curidx]; | ||
275 | |||
276 | dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp), | ||
277 | L1_CACHE_ALIGN(PKT_MAXBUF_SIZE), | ||
278 | DMA_FROM_DEVICE); | ||
279 | |||
280 | /* | ||
281 | * Process the incoming frame. | ||
282 | */ | ||
283 | fep->stats.rx_packets++; | ||
284 | pkt_len = CBDR_DATLEN(bdp) - 4; /* remove CRC */ | ||
285 | fep->stats.rx_bytes += pkt_len + 4; | ||
286 | |||
287 | if (pkt_len <= fpi->rx_copybreak) { | ||
288 | /* +2 to make IP header L1 cache aligned */ | ||
289 | skbn = dev_alloc_skb(pkt_len + 2); | ||
290 | if (skbn != NULL) { | ||
291 | skb_reserve(skbn, 2); /* align IP header */ | ||
292 | skb_copy_from_linear_data(skb, | ||
293 | skbn->data, pkt_len); | ||
294 | /* swap */ | ||
295 | skbt = skb; | ||
296 | skb = skbn; | ||
297 | skbn = skbt; | ||
298 | } | ||
299 | } else { | ||
300 | skbn = dev_alloc_skb(ENET_RX_FRSIZE); | ||
301 | |||
302 | if (skbn) | ||
303 | skb_align(skbn, ENET_RX_ALIGN); | ||
304 | } | ||
305 | |||
306 | if (skbn != NULL) { | ||
307 | skb_put(skb, pkt_len); /* Make room */ | ||
308 | skb->protocol = eth_type_trans(skb, dev); | ||
309 | received++; | ||
310 | netif_rx(skb); | ||
311 | } else { | ||
312 | dev_warn(fep->dev, | ||
313 | "Memory squeeze, dropping packet.\n"); | ||
314 | fep->stats.rx_dropped++; | ||
315 | skbn = skb; | ||
316 | } | ||
317 | } | ||
318 | |||
319 | fep->rx_skbuff[curidx] = skbn; | ||
320 | CBDW_BUFADDR(bdp, dma_map_single(fep->dev, skbn->data, | ||
321 | L1_CACHE_ALIGN(PKT_MAXBUF_SIZE), | ||
322 | DMA_FROM_DEVICE)); | ||
323 | CBDW_DATLEN(bdp, 0); | ||
324 | CBDW_SC(bdp, (sc & ~BD_ENET_RX_STATS) | BD_ENET_RX_EMPTY); | ||
325 | |||
326 | /* | ||
327 | * Update BD pointer to next entry. | ||
328 | */ | ||
329 | if ((sc & BD_ENET_RX_WRAP) == 0) | ||
330 | bdp++; | ||
331 | else | ||
332 | bdp = fep->rx_bd_base; | ||
333 | |||
334 | (*fep->ops->rx_bd_done)(dev); | ||
335 | } | ||
336 | |||
337 | fep->cur_rx = bdp; | ||
338 | |||
339 | return 0; | ||
340 | } | ||
341 | |||
342 | static void fs_enet_tx(struct net_device *dev) | ||
343 | { | ||
344 | struct fs_enet_private *fep = netdev_priv(dev); | ||
345 | cbd_t __iomem *bdp; | ||
346 | struct sk_buff *skb; | ||
347 | int dirtyidx, do_wake, do_restart; | ||
348 | u16 sc; | ||
349 | |||
350 | spin_lock(&fep->tx_lock); | ||
351 | bdp = fep->dirty_tx; | ||
352 | |||
353 | do_wake = do_restart = 0; | ||
354 | while (((sc = CBDR_SC(bdp)) & BD_ENET_TX_READY) == 0) { | ||
355 | dirtyidx = bdp - fep->tx_bd_base; | ||
356 | |||
357 | if (fep->tx_free == fep->tx_ring) | ||
358 | break; | ||
359 | |||
360 | skb = fep->tx_skbuff[dirtyidx]; | ||
361 | |||
362 | /* | ||
363 | * Check for errors. | ||
364 | */ | ||
365 | if (sc & (BD_ENET_TX_HB | BD_ENET_TX_LC | | ||
366 | BD_ENET_TX_RL | BD_ENET_TX_UN | BD_ENET_TX_CSL)) { | ||
367 | |||
368 | if (sc & BD_ENET_TX_HB) /* No heartbeat */ | ||
369 | fep->stats.tx_heartbeat_errors++; | ||
370 | if (sc & BD_ENET_TX_LC) /* Late collision */ | ||
371 | fep->stats.tx_window_errors++; | ||
372 | if (sc & BD_ENET_TX_RL) /* Retrans limit */ | ||
373 | fep->stats.tx_aborted_errors++; | ||
374 | if (sc & BD_ENET_TX_UN) /* Underrun */ | ||
375 | fep->stats.tx_fifo_errors++; | ||
376 | if (sc & BD_ENET_TX_CSL) /* Carrier lost */ | ||
377 | fep->stats.tx_carrier_errors++; | ||
378 | |||
379 | if (sc & (BD_ENET_TX_LC | BD_ENET_TX_RL | BD_ENET_TX_UN)) { | ||
380 | fep->stats.tx_errors++; | ||
381 | do_restart = 1; | ||
382 | } | ||
383 | } else | ||
384 | fep->stats.tx_packets++; | ||
385 | |||
386 | if (sc & BD_ENET_TX_READY) { | ||
387 | dev_warn(fep->dev, | ||
388 | "HEY! Enet xmit interrupt and TX_READY.\n"); | ||
389 | } | ||
390 | |||
391 | /* | ||
392 | * Deferred means some collisions occurred during transmit, | ||
393 | * but we eventually sent the packet OK. | ||
394 | */ | ||
395 | if (sc & BD_ENET_TX_DEF) | ||
396 | fep->stats.collisions++; | ||
397 | |||
398 | /* unmap */ | ||
399 | dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp), | ||
400 | skb->len, DMA_TO_DEVICE); | ||
401 | |||
402 | /* | ||
403 | * Free the sk buffer associated with this last transmit. | ||
404 | */ | ||
405 | dev_kfree_skb_irq(skb); | ||
406 | fep->tx_skbuff[dirtyidx] = NULL; | ||
407 | |||
408 | /* | ||
409 | * Update pointer to next buffer descriptor to be transmitted. | ||
410 | */ | ||
411 | if ((sc & BD_ENET_TX_WRAP) == 0) | ||
412 | bdp++; | ||
413 | else | ||
414 | bdp = fep->tx_bd_base; | ||
415 | |||
416 | /* | ||
417 | * Since we have freed up a buffer, the ring is no longer | ||
418 | * full. | ||
419 | */ | ||
420 | if (!fep->tx_free++) | ||
421 | do_wake = 1; | ||
422 | } | ||
423 | |||
424 | fep->dirty_tx = bdp; | ||
425 | |||
426 | if (do_restart) | ||
427 | (*fep->ops->tx_restart)(dev); | ||
428 | |||
429 | spin_unlock(&fep->tx_lock); | ||
430 | |||
431 | if (do_wake) | ||
432 | netif_wake_queue(dev); | ||
433 | } | ||
434 | |||
435 | /* | ||
436 | * The interrupt handler. | ||
437 | * This is called from the MPC core interrupt. | ||
438 | */ | ||
439 | static irqreturn_t | ||
440 | fs_enet_interrupt(int irq, void *dev_id) | ||
441 | { | ||
442 | struct net_device *dev = dev_id; | ||
443 | struct fs_enet_private *fep; | ||
444 | const struct fs_platform_info *fpi; | ||
445 | u32 int_events; | ||
446 | u32 int_clr_events; | ||
447 | int nr, napi_ok; | ||
448 | int handled; | ||
449 | |||
450 | fep = netdev_priv(dev); | ||
451 | fpi = fep->fpi; | ||
452 | |||
453 | nr = 0; | ||
454 | while ((int_events = (*fep->ops->get_int_events)(dev)) != 0) { | ||
455 | nr++; | ||
456 | |||
457 | int_clr_events = int_events; | ||
458 | if (fpi->use_napi) | ||
459 | int_clr_events &= ~fep->ev_napi_rx; | ||
460 | |||
461 | (*fep->ops->clear_int_events)(dev, int_clr_events); | ||
462 | |||
463 | if (int_events & fep->ev_err) | ||
464 | (*fep->ops->ev_error)(dev, int_events); | ||
465 | |||
466 | if (int_events & fep->ev_rx) { | ||
467 | if (!fpi->use_napi) | ||
468 | fs_enet_rx_non_napi(dev); | ||
469 | else { | ||
470 | napi_ok = napi_schedule_prep(&fep->napi); | ||
471 | |||
472 | (*fep->ops->napi_disable_rx)(dev); | ||
473 | (*fep->ops->clear_int_events)(dev, fep->ev_napi_rx); | ||
474 | |||
475 | /* NOTE: it is possible for FCCs in NAPI mode */ | ||
476 | /* to submit a spurious interrupt while in poll */ | ||
477 | if (napi_ok) | ||
478 | __napi_schedule(&fep->napi); | ||
479 | } | ||
480 | } | ||
481 | |||
482 | if (int_events & fep->ev_tx) | ||
483 | fs_enet_tx(dev); | ||
484 | } | ||
485 | |||
486 | handled = nr > 0; | ||
487 | return IRQ_RETVAL(handled); | ||
488 | } | ||
489 | |||
490 | void fs_init_bds(struct net_device *dev) | ||
491 | { | ||
492 | struct fs_enet_private *fep = netdev_priv(dev); | ||
493 | cbd_t __iomem *bdp; | ||
494 | struct sk_buff *skb; | ||
495 | int i; | ||
496 | |||
497 | fs_cleanup_bds(dev); | ||
498 | |||
499 | fep->dirty_tx = fep->cur_tx = fep->tx_bd_base; | ||
500 | fep->tx_free = fep->tx_ring; | ||
501 | fep->cur_rx = fep->rx_bd_base; | ||
502 | |||
503 | /* | ||
504 | * Initialize the receive buffer descriptors. | ||
505 | */ | ||
506 | for (i = 0, bdp = fep->rx_bd_base; i < fep->rx_ring; i++, bdp++) { | ||
507 | skb = dev_alloc_skb(ENET_RX_FRSIZE); | ||
508 | if (skb == NULL) { | ||
509 | dev_warn(fep->dev, | ||
510 | "Memory squeeze, unable to allocate skb\n"); | ||
511 | break; | ||
512 | } | ||
513 | skb_align(skb, ENET_RX_ALIGN); | ||
514 | fep->rx_skbuff[i] = skb; | ||
515 | CBDW_BUFADDR(bdp, | ||
516 | dma_map_single(fep->dev, skb->data, | ||
517 | L1_CACHE_ALIGN(PKT_MAXBUF_SIZE), | ||
518 | DMA_FROM_DEVICE)); | ||
519 | CBDW_DATLEN(bdp, 0); /* zero */ | ||
520 | CBDW_SC(bdp, BD_ENET_RX_EMPTY | | ||
521 | ((i < fep->rx_ring - 1) ? 0 : BD_SC_WRAP)); | ||
522 | } | ||
523 | /* | ||
524 | * if we failed, fillup remainder | ||
525 | */ | ||
526 | for (; i < fep->rx_ring; i++, bdp++) { | ||
527 | fep->rx_skbuff[i] = NULL; | ||
528 | CBDW_SC(bdp, (i < fep->rx_ring - 1) ? 0 : BD_SC_WRAP); | ||
529 | } | ||
530 | |||
531 | /* | ||
532 | * ...and the same for transmit. | ||
533 | */ | ||
534 | for (i = 0, bdp = fep->tx_bd_base; i < fep->tx_ring; i++, bdp++) { | ||
535 | fep->tx_skbuff[i] = NULL; | ||
536 | CBDW_BUFADDR(bdp, 0); | ||
537 | CBDW_DATLEN(bdp, 0); | ||
538 | CBDW_SC(bdp, (i < fep->tx_ring - 1) ? 0 : BD_SC_WRAP); | ||
539 | } | ||
540 | } | ||
541 | |||
542 | void fs_cleanup_bds(struct net_device *dev) | ||
543 | { | ||
544 | struct fs_enet_private *fep = netdev_priv(dev); | ||
545 | struct sk_buff *skb; | ||
546 | cbd_t __iomem *bdp; | ||
547 | int i; | ||
548 | |||
549 | /* | ||
550 | * Reset SKB transmit buffers. | ||
551 | */ | ||
552 | for (i = 0, bdp = fep->tx_bd_base; i < fep->tx_ring; i++, bdp++) { | ||
553 | if ((skb = fep->tx_skbuff[i]) == NULL) | ||
554 | continue; | ||
555 | |||
556 | /* unmap */ | ||
557 | dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp), | ||
558 | skb->len, DMA_TO_DEVICE); | ||
559 | |||
560 | fep->tx_skbuff[i] = NULL; | ||
561 | dev_kfree_skb(skb); | ||
562 | } | ||
563 | |||
564 | /* | ||
565 | * Reset SKB receive buffers | ||
566 | */ | ||
567 | for (i = 0, bdp = fep->rx_bd_base; i < fep->rx_ring; i++, bdp++) { | ||
568 | if ((skb = fep->rx_skbuff[i]) == NULL) | ||
569 | continue; | ||
570 | |||
571 | /* unmap */ | ||
572 | dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp), | ||
573 | L1_CACHE_ALIGN(PKT_MAXBUF_SIZE), | ||
574 | DMA_FROM_DEVICE); | ||
575 | |||
576 | fep->rx_skbuff[i] = NULL; | ||
577 | |||
578 | dev_kfree_skb(skb); | ||
579 | } | ||
580 | } | ||
581 | |||
582 | /**********************************************************************************/ | ||
583 | |||
584 | #ifdef CONFIG_FS_ENET_MPC5121_FEC | ||
585 | /* | ||
586 | * MPC5121 FEC requeries 4-byte alignment for TX data buffer! | ||
587 | */ | ||
588 | static struct sk_buff *tx_skb_align_workaround(struct net_device *dev, | ||
589 | struct sk_buff *skb) | ||
590 | { | ||
591 | struct sk_buff *new_skb; | ||
592 | struct fs_enet_private *fep = netdev_priv(dev); | ||
593 | |||
594 | /* Alloc new skb */ | ||
595 | new_skb = dev_alloc_skb(skb->len + 4); | ||
596 | if (!new_skb) { | ||
597 | if (net_ratelimit()) { | ||
598 | dev_warn(fep->dev, | ||
599 | "Memory squeeze, dropping tx packet.\n"); | ||
600 | } | ||
601 | return NULL; | ||
602 | } | ||
603 | |||
604 | /* Make sure new skb is properly aligned */ | ||
605 | skb_align(new_skb, 4); | ||
606 | |||
607 | /* Copy data to new skb ... */ | ||
608 | skb_copy_from_linear_data(skb, new_skb->data, skb->len); | ||
609 | skb_put(new_skb, skb->len); | ||
610 | |||
611 | /* ... and free an old one */ | ||
612 | dev_kfree_skb_any(skb); | ||
613 | |||
614 | return new_skb; | ||
615 | } | ||
616 | #endif | ||
617 | |||
618 | static int fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev) | ||
619 | { | ||
620 | struct fs_enet_private *fep = netdev_priv(dev); | ||
621 | cbd_t __iomem *bdp; | ||
622 | int curidx; | ||
623 | u16 sc; | ||
624 | unsigned long flags; | ||
625 | |||
626 | #ifdef CONFIG_FS_ENET_MPC5121_FEC | ||
627 | if (((unsigned long)skb->data) & 0x3) { | ||
628 | skb = tx_skb_align_workaround(dev, skb); | ||
629 | if (!skb) { | ||
630 | /* | ||
631 | * We have lost packet due to memory allocation error | ||
632 | * in tx_skb_align_workaround(). Hopefully original | ||
633 | * skb is still valid, so try transmit it later. | ||
634 | */ | ||
635 | return NETDEV_TX_BUSY; | ||
636 | } | ||
637 | } | ||
638 | #endif | ||
639 | spin_lock_irqsave(&fep->tx_lock, flags); | ||
640 | |||
641 | /* | ||
642 | * Fill in a Tx ring entry | ||
643 | */ | ||
644 | bdp = fep->cur_tx; | ||
645 | |||
646 | if (!fep->tx_free || (CBDR_SC(bdp) & BD_ENET_TX_READY)) { | ||
647 | netif_stop_queue(dev); | ||
648 | spin_unlock_irqrestore(&fep->tx_lock, flags); | ||
649 | |||
650 | /* | ||
651 | * Ooops. All transmit buffers are full. Bail out. | ||
652 | * This should not happen, since the tx queue should be stopped. | ||
653 | */ | ||
654 | dev_warn(fep->dev, "tx queue full!.\n"); | ||
655 | return NETDEV_TX_BUSY; | ||
656 | } | ||
657 | |||
658 | curidx = bdp - fep->tx_bd_base; | ||
659 | /* | ||
660 | * Clear all of the status flags. | ||
661 | */ | ||
662 | CBDC_SC(bdp, BD_ENET_TX_STATS); | ||
663 | |||
664 | /* | ||
665 | * Save skb pointer. | ||
666 | */ | ||
667 | fep->tx_skbuff[curidx] = skb; | ||
668 | |||
669 | fep->stats.tx_bytes += skb->len; | ||
670 | |||
671 | /* | ||
672 | * Push the data cache so the CPM does not get stale memory data. | ||
673 | */ | ||
674 | CBDW_BUFADDR(bdp, dma_map_single(fep->dev, | ||
675 | skb->data, skb->len, DMA_TO_DEVICE)); | ||
676 | CBDW_DATLEN(bdp, skb->len); | ||
677 | |||
678 | /* | ||
679 | * If this was the last BD in the ring, start at the beginning again. | ||
680 | */ | ||
681 | if ((CBDR_SC(bdp) & BD_ENET_TX_WRAP) == 0) | ||
682 | fep->cur_tx++; | ||
683 | else | ||
684 | fep->cur_tx = fep->tx_bd_base; | ||
685 | |||
686 | if (!--fep->tx_free) | ||
687 | netif_stop_queue(dev); | ||
688 | |||
689 | /* Trigger transmission start */ | ||
690 | sc = BD_ENET_TX_READY | BD_ENET_TX_INTR | | ||
691 | BD_ENET_TX_LAST | BD_ENET_TX_TC; | ||
692 | |||
693 | /* note that while FEC does not have this bit | ||
694 | * it marks it as available for software use | ||
695 | * yay for hw reuse :) */ | ||
696 | if (skb->len <= 60) | ||
697 | sc |= BD_ENET_TX_PAD; | ||
698 | CBDS_SC(bdp, sc); | ||
699 | |||
700 | skb_tx_timestamp(skb); | ||
701 | |||
702 | (*fep->ops->tx_kickstart)(dev); | ||
703 | |||
704 | spin_unlock_irqrestore(&fep->tx_lock, flags); | ||
705 | |||
706 | return NETDEV_TX_OK; | ||
707 | } | ||
708 | |||
709 | static void fs_timeout(struct net_device *dev) | ||
710 | { | ||
711 | struct fs_enet_private *fep = netdev_priv(dev); | ||
712 | unsigned long flags; | ||
713 | int wake = 0; | ||
714 | |||
715 | fep->stats.tx_errors++; | ||
716 | |||
717 | spin_lock_irqsave(&fep->lock, flags); | ||
718 | |||
719 | if (dev->flags & IFF_UP) { | ||
720 | phy_stop(fep->phydev); | ||
721 | (*fep->ops->stop)(dev); | ||
722 | (*fep->ops->restart)(dev); | ||
723 | phy_start(fep->phydev); | ||
724 | } | ||
725 | |||
726 | phy_start(fep->phydev); | ||
727 | wake = fep->tx_free && !(CBDR_SC(fep->cur_tx) & BD_ENET_TX_READY); | ||
728 | spin_unlock_irqrestore(&fep->lock, flags); | ||
729 | |||
730 | if (wake) | ||
731 | netif_wake_queue(dev); | ||
732 | } | ||
733 | |||
734 | /*----------------------------------------------------------------------------- | ||
735 | * generic link-change handler - should be sufficient for most cases | ||
736 | *-----------------------------------------------------------------------------*/ | ||
737 | static void generic_adjust_link(struct net_device *dev) | ||
738 | { | ||
739 | struct fs_enet_private *fep = netdev_priv(dev); | ||
740 | struct phy_device *phydev = fep->phydev; | ||
741 | int new_state = 0; | ||
742 | |||
743 | if (phydev->link) { | ||
744 | /* adjust to duplex mode */ | ||
745 | if (phydev->duplex != fep->oldduplex) { | ||
746 | new_state = 1; | ||
747 | fep->oldduplex = phydev->duplex; | ||
748 | } | ||
749 | |||
750 | if (phydev->speed != fep->oldspeed) { | ||
751 | new_state = 1; | ||
752 | fep->oldspeed = phydev->speed; | ||
753 | } | ||
754 | |||
755 | if (!fep->oldlink) { | ||
756 | new_state = 1; | ||
757 | fep->oldlink = 1; | ||
758 | } | ||
759 | |||
760 | if (new_state) | ||
761 | fep->ops->restart(dev); | ||
762 | } else if (fep->oldlink) { | ||
763 | new_state = 1; | ||
764 | fep->oldlink = 0; | ||
765 | fep->oldspeed = 0; | ||
766 | fep->oldduplex = -1; | ||
767 | } | ||
768 | |||
769 | if (new_state && netif_msg_link(fep)) | ||
770 | phy_print_status(phydev); | ||
771 | } | ||
772 | |||
773 | |||
774 | static void fs_adjust_link(struct net_device *dev) | ||
775 | { | ||
776 | struct fs_enet_private *fep = netdev_priv(dev); | ||
777 | unsigned long flags; | ||
778 | |||
779 | spin_lock_irqsave(&fep->lock, flags); | ||
780 | |||
781 | if(fep->ops->adjust_link) | ||
782 | fep->ops->adjust_link(dev); | ||
783 | else | ||
784 | generic_adjust_link(dev); | ||
785 | |||
786 | spin_unlock_irqrestore(&fep->lock, flags); | ||
787 | } | ||
788 | |||
789 | static int fs_init_phy(struct net_device *dev) | ||
790 | { | ||
791 | struct fs_enet_private *fep = netdev_priv(dev); | ||
792 | struct phy_device *phydev; | ||
793 | |||
794 | fep->oldlink = 0; | ||
795 | fep->oldspeed = 0; | ||
796 | fep->oldduplex = -1; | ||
797 | |||
798 | phydev = of_phy_connect(dev, fep->fpi->phy_node, &fs_adjust_link, 0, | ||
799 | PHY_INTERFACE_MODE_MII); | ||
800 | if (!phydev) { | ||
801 | phydev = of_phy_connect_fixed_link(dev, &fs_adjust_link, | ||
802 | PHY_INTERFACE_MODE_MII); | ||
803 | } | ||
804 | if (!phydev) { | ||
805 | dev_err(&dev->dev, "Could not attach to PHY\n"); | ||
806 | return -ENODEV; | ||
807 | } | ||
808 | |||
809 | fep->phydev = phydev; | ||
810 | |||
811 | return 0; | ||
812 | } | ||
813 | |||
814 | static int fs_enet_open(struct net_device *dev) | ||
815 | { | ||
816 | struct fs_enet_private *fep = netdev_priv(dev); | ||
817 | int r; | ||
818 | int err; | ||
819 | |||
820 | /* to initialize the fep->cur_rx,... */ | ||
821 | /* not doing this, will cause a crash in fs_enet_rx_napi */ | ||
822 | fs_init_bds(fep->ndev); | ||
823 | |||
824 | if (fep->fpi->use_napi) | ||
825 | napi_enable(&fep->napi); | ||
826 | |||
827 | /* Install our interrupt handler. */ | ||
828 | r = request_irq(fep->interrupt, fs_enet_interrupt, IRQF_SHARED, | ||
829 | "fs_enet-mac", dev); | ||
830 | if (r != 0) { | ||
831 | dev_err(fep->dev, "Could not allocate FS_ENET IRQ!"); | ||
832 | if (fep->fpi->use_napi) | ||
833 | napi_disable(&fep->napi); | ||
834 | return -EINVAL; | ||
835 | } | ||
836 | |||
837 | err = fs_init_phy(dev); | ||
838 | if (err) { | ||
839 | free_irq(fep->interrupt, dev); | ||
840 | if (fep->fpi->use_napi) | ||
841 | napi_disable(&fep->napi); | ||
842 | return err; | ||
843 | } | ||
844 | phy_start(fep->phydev); | ||
845 | |||
846 | netif_start_queue(dev); | ||
847 | |||
848 | return 0; | ||
849 | } | ||
850 | |||
851 | static int fs_enet_close(struct net_device *dev) | ||
852 | { | ||
853 | struct fs_enet_private *fep = netdev_priv(dev); | ||
854 | unsigned long flags; | ||
855 | |||
856 | netif_stop_queue(dev); | ||
857 | netif_carrier_off(dev); | ||
858 | if (fep->fpi->use_napi) | ||
859 | napi_disable(&fep->napi); | ||
860 | phy_stop(fep->phydev); | ||
861 | |||
862 | spin_lock_irqsave(&fep->lock, flags); | ||
863 | spin_lock(&fep->tx_lock); | ||
864 | (*fep->ops->stop)(dev); | ||
865 | spin_unlock(&fep->tx_lock); | ||
866 | spin_unlock_irqrestore(&fep->lock, flags); | ||
867 | |||
868 | /* release any irqs */ | ||
869 | phy_disconnect(fep->phydev); | ||
870 | fep->phydev = NULL; | ||
871 | free_irq(fep->interrupt, dev); | ||
872 | |||
873 | return 0; | ||
874 | } | ||
875 | |||
876 | static struct net_device_stats *fs_enet_get_stats(struct net_device *dev) | ||
877 | { | ||
878 | struct fs_enet_private *fep = netdev_priv(dev); | ||
879 | return &fep->stats; | ||
880 | } | ||
881 | |||
882 | /*************************************************************************/ | ||
883 | |||
884 | static void fs_get_drvinfo(struct net_device *dev, | ||
885 | struct ethtool_drvinfo *info) | ||
886 | { | ||
887 | strcpy(info->driver, DRV_MODULE_NAME); | ||
888 | strcpy(info->version, DRV_MODULE_VERSION); | ||
889 | } | ||
890 | |||
891 | static int fs_get_regs_len(struct net_device *dev) | ||
892 | { | ||
893 | struct fs_enet_private *fep = netdev_priv(dev); | ||
894 | |||
895 | return (*fep->ops->get_regs_len)(dev); | ||
896 | } | ||
897 | |||
898 | static void fs_get_regs(struct net_device *dev, struct ethtool_regs *regs, | ||
899 | void *p) | ||
900 | { | ||
901 | struct fs_enet_private *fep = netdev_priv(dev); | ||
902 | unsigned long flags; | ||
903 | int r, len; | ||
904 | |||
905 | len = regs->len; | ||
906 | |||
907 | spin_lock_irqsave(&fep->lock, flags); | ||
908 | r = (*fep->ops->get_regs)(dev, p, &len); | ||
909 | spin_unlock_irqrestore(&fep->lock, flags); | ||
910 | |||
911 | if (r == 0) | ||
912 | regs->version = 0; | ||
913 | } | ||
914 | |||
915 | static int fs_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) | ||
916 | { | ||
917 | struct fs_enet_private *fep = netdev_priv(dev); | ||
918 | |||
919 | if (!fep->phydev) | ||
920 | return -ENODEV; | ||
921 | |||
922 | return phy_ethtool_gset(fep->phydev, cmd); | ||
923 | } | ||
924 | |||
925 | static int fs_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) | ||
926 | { | ||
927 | struct fs_enet_private *fep = netdev_priv(dev); | ||
928 | |||
929 | if (!fep->phydev) | ||
930 | return -ENODEV; | ||
931 | |||
932 | return phy_ethtool_sset(fep->phydev, cmd); | ||
933 | } | ||
934 | |||
935 | static int fs_nway_reset(struct net_device *dev) | ||
936 | { | ||
937 | return 0; | ||
938 | } | ||
939 | |||
940 | static u32 fs_get_msglevel(struct net_device *dev) | ||
941 | { | ||
942 | struct fs_enet_private *fep = netdev_priv(dev); | ||
943 | return fep->msg_enable; | ||
944 | } | ||
945 | |||
946 | static void fs_set_msglevel(struct net_device *dev, u32 value) | ||
947 | { | ||
948 | struct fs_enet_private *fep = netdev_priv(dev); | ||
949 | fep->msg_enable = value; | ||
950 | } | ||
951 | |||
952 | static const struct ethtool_ops fs_ethtool_ops = { | ||
953 | .get_drvinfo = fs_get_drvinfo, | ||
954 | .get_regs_len = fs_get_regs_len, | ||
955 | .get_settings = fs_get_settings, | ||
956 | .set_settings = fs_set_settings, | ||
957 | .nway_reset = fs_nway_reset, | ||
958 | .get_link = ethtool_op_get_link, | ||
959 | .get_msglevel = fs_get_msglevel, | ||
960 | .set_msglevel = fs_set_msglevel, | ||
961 | .get_regs = fs_get_regs, | ||
962 | }; | ||
963 | |||
964 | static int fs_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) | ||
965 | { | ||
966 | struct fs_enet_private *fep = netdev_priv(dev); | ||
967 | |||
968 | if (!netif_running(dev)) | ||
969 | return -EINVAL; | ||
970 | |||
971 | return phy_mii_ioctl(fep->phydev, rq, cmd); | ||
972 | } | ||
973 | |||
974 | extern int fs_mii_connect(struct net_device *dev); | ||
975 | extern void fs_mii_disconnect(struct net_device *dev); | ||
976 | |||
977 | /**************************************************************************************/ | ||
978 | |||
979 | #ifdef CONFIG_FS_ENET_HAS_FEC | ||
980 | #define IS_FEC(match) ((match)->data == &fs_fec_ops) | ||
981 | #else | ||
982 | #define IS_FEC(match) 0 | ||
983 | #endif | ||
984 | |||
985 | static const struct net_device_ops fs_enet_netdev_ops = { | ||
986 | .ndo_open = fs_enet_open, | ||
987 | .ndo_stop = fs_enet_close, | ||
988 | .ndo_get_stats = fs_enet_get_stats, | ||
989 | .ndo_start_xmit = fs_enet_start_xmit, | ||
990 | .ndo_tx_timeout = fs_timeout, | ||
991 | .ndo_set_multicast_list = fs_set_multicast_list, | ||
992 | .ndo_do_ioctl = fs_ioctl, | ||
993 | .ndo_validate_addr = eth_validate_addr, | ||
994 | .ndo_set_mac_address = eth_mac_addr, | ||
995 | .ndo_change_mtu = eth_change_mtu, | ||
996 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
997 | .ndo_poll_controller = fs_enet_netpoll, | ||
998 | #endif | ||
999 | }; | ||
1000 | |||
1001 | static struct of_device_id fs_enet_match[]; | ||
1002 | static int __devinit fs_enet_probe(struct platform_device *ofdev) | ||
1003 | { | ||
1004 | const struct of_device_id *match; | ||
1005 | struct net_device *ndev; | ||
1006 | struct fs_enet_private *fep; | ||
1007 | struct fs_platform_info *fpi; | ||
1008 | const u32 *data; | ||
1009 | const u8 *mac_addr; | ||
1010 | int privsize, len, ret = -ENODEV; | ||
1011 | |||
1012 | match = of_match_device(fs_enet_match, &ofdev->dev); | ||
1013 | if (!match) | ||
1014 | return -EINVAL; | ||
1015 | |||
1016 | fpi = kzalloc(sizeof(*fpi), GFP_KERNEL); | ||
1017 | if (!fpi) | ||
1018 | return -ENOMEM; | ||
1019 | |||
1020 | if (!IS_FEC(match)) { | ||
1021 | data = of_get_property(ofdev->dev.of_node, "fsl,cpm-command", &len); | ||
1022 | if (!data || len != 4) | ||
1023 | goto out_free_fpi; | ||
1024 | |||
1025 | fpi->cp_command = *data; | ||
1026 | } | ||
1027 | |||
1028 | fpi->rx_ring = 32; | ||
1029 | fpi->tx_ring = 32; | ||
1030 | fpi->rx_copybreak = 240; | ||
1031 | fpi->use_napi = 1; | ||
1032 | fpi->napi_weight = 17; | ||
1033 | fpi->phy_node = of_parse_phandle(ofdev->dev.of_node, "phy-handle", 0); | ||
1034 | if ((!fpi->phy_node) && (!of_get_property(ofdev->dev.of_node, "fixed-link", | ||
1035 | NULL))) | ||
1036 | goto out_free_fpi; | ||
1037 | |||
1038 | privsize = sizeof(*fep) + | ||
1039 | sizeof(struct sk_buff **) * | ||
1040 | (fpi->rx_ring + fpi->tx_ring); | ||
1041 | |||
1042 | ndev = alloc_etherdev(privsize); | ||
1043 | if (!ndev) { | ||
1044 | ret = -ENOMEM; | ||
1045 | goto out_put; | ||
1046 | } | ||
1047 | |||
1048 | SET_NETDEV_DEV(ndev, &ofdev->dev); | ||
1049 | dev_set_drvdata(&ofdev->dev, ndev); | ||
1050 | |||
1051 | fep = netdev_priv(ndev); | ||
1052 | fep->dev = &ofdev->dev; | ||
1053 | fep->ndev = ndev; | ||
1054 | fep->fpi = fpi; | ||
1055 | fep->ops = match->data; | ||
1056 | |||
1057 | ret = fep->ops->setup_data(ndev); | ||
1058 | if (ret) | ||
1059 | goto out_free_dev; | ||
1060 | |||
1061 | fep->rx_skbuff = (struct sk_buff **)&fep[1]; | ||
1062 | fep->tx_skbuff = fep->rx_skbuff + fpi->rx_ring; | ||
1063 | |||
1064 | spin_lock_init(&fep->lock); | ||
1065 | spin_lock_init(&fep->tx_lock); | ||
1066 | |||
1067 | mac_addr = of_get_mac_address(ofdev->dev.of_node); | ||
1068 | if (mac_addr) | ||
1069 | memcpy(ndev->dev_addr, mac_addr, 6); | ||
1070 | |||
1071 | ret = fep->ops->allocate_bd(ndev); | ||
1072 | if (ret) | ||
1073 | goto out_cleanup_data; | ||
1074 | |||
1075 | fep->rx_bd_base = fep->ring_base; | ||
1076 | fep->tx_bd_base = fep->rx_bd_base + fpi->rx_ring; | ||
1077 | |||
1078 | fep->tx_ring = fpi->tx_ring; | ||
1079 | fep->rx_ring = fpi->rx_ring; | ||
1080 | |||
1081 | ndev->netdev_ops = &fs_enet_netdev_ops; | ||
1082 | ndev->watchdog_timeo = 2 * HZ; | ||
1083 | if (fpi->use_napi) | ||
1084 | netif_napi_add(ndev, &fep->napi, fs_enet_rx_napi, | ||
1085 | fpi->napi_weight); | ||
1086 | |||
1087 | ndev->ethtool_ops = &fs_ethtool_ops; | ||
1088 | |||
1089 | init_timer(&fep->phy_timer_list); | ||
1090 | |||
1091 | netif_carrier_off(ndev); | ||
1092 | |||
1093 | ret = register_netdev(ndev); | ||
1094 | if (ret) | ||
1095 | goto out_free_bd; | ||
1096 | |||
1097 | pr_info("%s: fs_enet: %pM\n", ndev->name, ndev->dev_addr); | ||
1098 | |||
1099 | return 0; | ||
1100 | |||
1101 | out_free_bd: | ||
1102 | fep->ops->free_bd(ndev); | ||
1103 | out_cleanup_data: | ||
1104 | fep->ops->cleanup_data(ndev); | ||
1105 | out_free_dev: | ||
1106 | free_netdev(ndev); | ||
1107 | dev_set_drvdata(&ofdev->dev, NULL); | ||
1108 | out_put: | ||
1109 | of_node_put(fpi->phy_node); | ||
1110 | out_free_fpi: | ||
1111 | kfree(fpi); | ||
1112 | return ret; | ||
1113 | } | ||
1114 | |||
1115 | static int fs_enet_remove(struct platform_device *ofdev) | ||
1116 | { | ||
1117 | struct net_device *ndev = dev_get_drvdata(&ofdev->dev); | ||
1118 | struct fs_enet_private *fep = netdev_priv(ndev); | ||
1119 | |||
1120 | unregister_netdev(ndev); | ||
1121 | |||
1122 | fep->ops->free_bd(ndev); | ||
1123 | fep->ops->cleanup_data(ndev); | ||
1124 | dev_set_drvdata(fep->dev, NULL); | ||
1125 | of_node_put(fep->fpi->phy_node); | ||
1126 | free_netdev(ndev); | ||
1127 | return 0; | ||
1128 | } | ||
1129 | |||
1130 | static struct of_device_id fs_enet_match[] = { | ||
1131 | #ifdef CONFIG_FS_ENET_HAS_SCC | ||
1132 | { | ||
1133 | .compatible = "fsl,cpm1-scc-enet", | ||
1134 | .data = (void *)&fs_scc_ops, | ||
1135 | }, | ||
1136 | { | ||
1137 | .compatible = "fsl,cpm2-scc-enet", | ||
1138 | .data = (void *)&fs_scc_ops, | ||
1139 | }, | ||
1140 | #endif | ||
1141 | #ifdef CONFIG_FS_ENET_HAS_FCC | ||
1142 | { | ||
1143 | .compatible = "fsl,cpm2-fcc-enet", | ||
1144 | .data = (void *)&fs_fcc_ops, | ||
1145 | }, | ||
1146 | #endif | ||
1147 | #ifdef CONFIG_FS_ENET_HAS_FEC | ||
1148 | #ifdef CONFIG_FS_ENET_MPC5121_FEC | ||
1149 | { | ||
1150 | .compatible = "fsl,mpc5121-fec", | ||
1151 | .data = (void *)&fs_fec_ops, | ||
1152 | }, | ||
1153 | #else | ||
1154 | { | ||
1155 | .compatible = "fsl,pq1-fec-enet", | ||
1156 | .data = (void *)&fs_fec_ops, | ||
1157 | }, | ||
1158 | #endif | ||
1159 | #endif | ||
1160 | {} | ||
1161 | }; | ||
1162 | MODULE_DEVICE_TABLE(of, fs_enet_match); | ||
1163 | |||
1164 | static struct platform_driver fs_enet_driver = { | ||
1165 | .driver = { | ||
1166 | .owner = THIS_MODULE, | ||
1167 | .name = "fs_enet", | ||
1168 | .of_match_table = fs_enet_match, | ||
1169 | }, | ||
1170 | .probe = fs_enet_probe, | ||
1171 | .remove = fs_enet_remove, | ||
1172 | }; | ||
1173 | |||
1174 | static int __init fs_init(void) | ||
1175 | { | ||
1176 | return platform_driver_register(&fs_enet_driver); | ||
1177 | } | ||
1178 | |||
1179 | static void __exit fs_cleanup(void) | ||
1180 | { | ||
1181 | platform_driver_unregister(&fs_enet_driver); | ||
1182 | } | ||
1183 | |||
1184 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
1185 | static void fs_enet_netpoll(struct net_device *dev) | ||
1186 | { | ||
1187 | disable_irq(dev->irq); | ||
1188 | fs_enet_interrupt(dev->irq, dev); | ||
1189 | enable_irq(dev->irq); | ||
1190 | } | ||
1191 | #endif | ||
1192 | |||
1193 | /**************************************************************************************/ | ||
1194 | |||
1195 | module_init(fs_init); | ||
1196 | module_exit(fs_cleanup); | ||
diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet.h b/drivers/net/ethernet/freescale/fs_enet/fs_enet.h new file mode 100644 index 000000000000..1ece4b1a689e --- /dev/null +++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet.h | |||
@@ -0,0 +1,244 @@ | |||
1 | #ifndef FS_ENET_H | ||
2 | #define FS_ENET_H | ||
3 | |||
4 | #include <linux/mii.h> | ||
5 | #include <linux/netdevice.h> | ||
6 | #include <linux/types.h> | ||
7 | #include <linux/list.h> | ||
8 | #include <linux/phy.h> | ||
9 | #include <linux/dma-mapping.h> | ||
10 | |||
11 | #include <linux/fs_enet_pd.h> | ||
12 | #include <asm/fs_pd.h> | ||
13 | |||
14 | #ifdef CONFIG_CPM1 | ||
15 | #include <asm/cpm1.h> | ||
16 | #endif | ||
17 | |||
18 | #if defined(CONFIG_FS_ENET_HAS_FEC) | ||
19 | #include <asm/cpm.h> | ||
20 | |||
21 | #if defined(CONFIG_FS_ENET_MPC5121_FEC) | ||
22 | /* MPC5121 FEC has different register layout */ | ||
23 | struct fec { | ||
24 | u32 fec_reserved0; | ||
25 | u32 fec_ievent; /* Interrupt event reg */ | ||
26 | u32 fec_imask; /* Interrupt mask reg */ | ||
27 | u32 fec_reserved1; | ||
28 | u32 fec_r_des_active; /* Receive descriptor reg */ | ||
29 | u32 fec_x_des_active; /* Transmit descriptor reg */ | ||
30 | u32 fec_reserved2[3]; | ||
31 | u32 fec_ecntrl; /* Ethernet control reg */ | ||
32 | u32 fec_reserved3[6]; | ||
33 | u32 fec_mii_data; /* MII manage frame reg */ | ||
34 | u32 fec_mii_speed; /* MII speed control reg */ | ||
35 | u32 fec_reserved4[7]; | ||
36 | u32 fec_mib_ctrlstat; /* MIB control/status reg */ | ||
37 | u32 fec_reserved5[7]; | ||
38 | u32 fec_r_cntrl; /* Receive control reg */ | ||
39 | u32 fec_reserved6[15]; | ||
40 | u32 fec_x_cntrl; /* Transmit Control reg */ | ||
41 | u32 fec_reserved7[7]; | ||
42 | u32 fec_addr_low; /* Low 32bits MAC address */ | ||
43 | u32 fec_addr_high; /* High 16bits MAC address */ | ||
44 | u32 fec_opd; /* Opcode + Pause duration */ | ||
45 | u32 fec_reserved8[10]; | ||
46 | u32 fec_hash_table_high; /* High 32bits hash table */ | ||
47 | u32 fec_hash_table_low; /* Low 32bits hash table */ | ||
48 | u32 fec_grp_hash_table_high; /* High 32bits hash table */ | ||
49 | u32 fec_grp_hash_table_low; /* Low 32bits hash table */ | ||
50 | u32 fec_reserved9[7]; | ||
51 | u32 fec_x_wmrk; /* FIFO transmit water mark */ | ||
52 | u32 fec_reserved10; | ||
53 | u32 fec_r_bound; /* FIFO receive bound reg */ | ||
54 | u32 fec_r_fstart; /* FIFO receive start reg */ | ||
55 | u32 fec_reserved11[11]; | ||
56 | u32 fec_r_des_start; /* Receive descriptor ring */ | ||
57 | u32 fec_x_des_start; /* Transmit descriptor ring */ | ||
58 | u32 fec_r_buff_size; /* Maximum receive buff size */ | ||
59 | u32 fec_reserved12[26]; | ||
60 | u32 fec_dma_control; /* DMA Endian and other ctrl */ | ||
61 | }; | ||
62 | #endif | ||
63 | |||
64 | struct fec_info { | ||
65 | struct fec __iomem *fecp; | ||
66 | u32 mii_speed; | ||
67 | }; | ||
68 | #endif | ||
69 | |||
70 | #ifdef CONFIG_CPM2 | ||
71 | #include <asm/cpm2.h> | ||
72 | #endif | ||
73 | |||
74 | /* hw driver ops */ | ||
75 | struct fs_ops { | ||
76 | int (*setup_data)(struct net_device *dev); | ||
77 | int (*allocate_bd)(struct net_device *dev); | ||
78 | void (*free_bd)(struct net_device *dev); | ||
79 | void (*cleanup_data)(struct net_device *dev); | ||
80 | void (*set_multicast_list)(struct net_device *dev); | ||
81 | void (*adjust_link)(struct net_device *dev); | ||
82 | void (*restart)(struct net_device *dev); | ||
83 | void (*stop)(struct net_device *dev); | ||
84 | void (*napi_clear_rx_event)(struct net_device *dev); | ||
85 | void (*napi_enable_rx)(struct net_device *dev); | ||
86 | void (*napi_disable_rx)(struct net_device *dev); | ||
87 | void (*rx_bd_done)(struct net_device *dev); | ||
88 | void (*tx_kickstart)(struct net_device *dev); | ||
89 | u32 (*get_int_events)(struct net_device *dev); | ||
90 | void (*clear_int_events)(struct net_device *dev, u32 int_events); | ||
91 | void (*ev_error)(struct net_device *dev, u32 int_events); | ||
92 | int (*get_regs)(struct net_device *dev, void *p, int *sizep); | ||
93 | int (*get_regs_len)(struct net_device *dev); | ||
94 | void (*tx_restart)(struct net_device *dev); | ||
95 | }; | ||
96 | |||
97 | struct phy_info { | ||
98 | unsigned int id; | ||
99 | const char *name; | ||
100 | void (*startup) (struct net_device * dev); | ||
101 | void (*shutdown) (struct net_device * dev); | ||
102 | void (*ack_int) (struct net_device * dev); | ||
103 | }; | ||
104 | |||
105 | /* The FEC stores dest/src/type, data, and checksum for receive packets. | ||
106 | */ | ||
107 | #define MAX_MTU 1508 /* Allow fullsized pppoe packets over VLAN */ | ||
108 | #define MIN_MTU 46 /* this is data size */ | ||
109 | #define CRC_LEN 4 | ||
110 | |||
111 | #define PKT_MAXBUF_SIZE (MAX_MTU+ETH_HLEN+CRC_LEN) | ||
112 | #define PKT_MINBUF_SIZE (MIN_MTU+ETH_HLEN+CRC_LEN) | ||
113 | |||
114 | /* Must be a multiple of 32 (to cover both FEC & FCC) */ | ||
115 | #define PKT_MAXBLR_SIZE ((PKT_MAXBUF_SIZE + 31) & ~31) | ||
116 | /* This is needed so that invalidate_xxx wont invalidate too much */ | ||
117 | #define ENET_RX_ALIGN 16 | ||
118 | #define ENET_RX_FRSIZE L1_CACHE_ALIGN(PKT_MAXBUF_SIZE + ENET_RX_ALIGN - 1) | ||
119 | |||
120 | struct fs_enet_private { | ||
121 | struct napi_struct napi; | ||
122 | struct device *dev; /* pointer back to the device (must be initialized first) */ | ||
123 | struct net_device *ndev; | ||
124 | spinlock_t lock; /* during all ops except TX pckt processing */ | ||
125 | spinlock_t tx_lock; /* during fs_start_xmit and fs_tx */ | ||
126 | struct fs_platform_info *fpi; | ||
127 | const struct fs_ops *ops; | ||
128 | int rx_ring, tx_ring; | ||
129 | dma_addr_t ring_mem_addr; | ||
130 | void __iomem *ring_base; | ||
131 | struct sk_buff **rx_skbuff; | ||
132 | struct sk_buff **tx_skbuff; | ||
133 | cbd_t __iomem *rx_bd_base; /* Address of Rx and Tx buffers. */ | ||
134 | cbd_t __iomem *tx_bd_base; | ||
135 | cbd_t __iomem *dirty_tx; /* ring entries to be free()ed. */ | ||
136 | cbd_t __iomem *cur_rx; | ||
137 | cbd_t __iomem *cur_tx; | ||
138 | int tx_free; | ||
139 | struct net_device_stats stats; | ||
140 | struct timer_list phy_timer_list; | ||
141 | const struct phy_info *phy; | ||
142 | u32 msg_enable; | ||
143 | struct mii_if_info mii_if; | ||
144 | unsigned int last_mii_status; | ||
145 | int interrupt; | ||
146 | |||
147 | struct phy_device *phydev; | ||
148 | int oldduplex, oldspeed, oldlink; /* current settings */ | ||
149 | |||
150 | /* event masks */ | ||
151 | u32 ev_napi_rx; /* mask of NAPI rx events */ | ||
152 | u32 ev_rx; /* rx event mask */ | ||
153 | u32 ev_tx; /* tx event mask */ | ||
154 | u32 ev_err; /* error event mask */ | ||
155 | |||
156 | u16 bd_rx_empty; /* mask of BD rx empty */ | ||
157 | u16 bd_rx_err; /* mask of BD rx errors */ | ||
158 | |||
159 | union { | ||
160 | struct { | ||
161 | int idx; /* FEC1 = 0, FEC2 = 1 */ | ||
162 | void __iomem *fecp; /* hw registers */ | ||
163 | u32 hthi, htlo; /* state for multicast */ | ||
164 | } fec; | ||
165 | |||
166 | struct { | ||
167 | int idx; /* FCC1-3 = 0-2 */ | ||
168 | void __iomem *fccp; /* hw registers */ | ||
169 | void __iomem *ep; /* parameter ram */ | ||
170 | void __iomem *fcccp; /* hw registers cont. */ | ||
171 | void __iomem *mem; /* FCC DPRAM */ | ||
172 | u32 gaddrh, gaddrl; /* group address */ | ||
173 | } fcc; | ||
174 | |||
175 | struct { | ||
176 | int idx; /* FEC1 = 0, FEC2 = 1 */ | ||
177 | void __iomem *sccp; /* hw registers */ | ||
178 | void __iomem *ep; /* parameter ram */ | ||
179 | u32 hthi, htlo; /* state for multicast */ | ||
180 | } scc; | ||
181 | |||
182 | }; | ||
183 | }; | ||
184 | |||
185 | /***************************************************************************/ | ||
186 | |||
187 | void fs_init_bds(struct net_device *dev); | ||
188 | void fs_cleanup_bds(struct net_device *dev); | ||
189 | |||
190 | /***************************************************************************/ | ||
191 | |||
192 | #define DRV_MODULE_NAME "fs_enet" | ||
193 | #define PFX DRV_MODULE_NAME ": " | ||
194 | #define DRV_MODULE_VERSION "1.0" | ||
195 | #define DRV_MODULE_RELDATE "Aug 8, 2005" | ||
196 | |||
197 | /***************************************************************************/ | ||
198 | |||
199 | int fs_enet_platform_init(void); | ||
200 | void fs_enet_platform_cleanup(void); | ||
201 | |||
202 | /***************************************************************************/ | ||
203 | /* buffer descriptor access macros */ | ||
204 | |||
205 | /* access macros */ | ||
206 | #if defined(CONFIG_CPM1) | ||
207 | /* for a a CPM1 __raw_xxx's are sufficient */ | ||
208 | #define __cbd_out32(addr, x) __raw_writel(x, addr) | ||
209 | #define __cbd_out16(addr, x) __raw_writew(x, addr) | ||
210 | #define __cbd_in32(addr) __raw_readl(addr) | ||
211 | #define __cbd_in16(addr) __raw_readw(addr) | ||
212 | #else | ||
213 | /* for others play it safe */ | ||
214 | #define __cbd_out32(addr, x) out_be32(addr, x) | ||
215 | #define __cbd_out16(addr, x) out_be16(addr, x) | ||
216 | #define __cbd_in32(addr) in_be32(addr) | ||
217 | #define __cbd_in16(addr) in_be16(addr) | ||
218 | #endif | ||
219 | |||
220 | /* write */ | ||
221 | #define CBDW_SC(_cbd, _sc) __cbd_out16(&(_cbd)->cbd_sc, (_sc)) | ||
222 | #define CBDW_DATLEN(_cbd, _datlen) __cbd_out16(&(_cbd)->cbd_datlen, (_datlen)) | ||
223 | #define CBDW_BUFADDR(_cbd, _bufaddr) __cbd_out32(&(_cbd)->cbd_bufaddr, (_bufaddr)) | ||
224 | |||
225 | /* read */ | ||
226 | #define CBDR_SC(_cbd) __cbd_in16(&(_cbd)->cbd_sc) | ||
227 | #define CBDR_DATLEN(_cbd) __cbd_in16(&(_cbd)->cbd_datlen) | ||
228 | #define CBDR_BUFADDR(_cbd) __cbd_in32(&(_cbd)->cbd_bufaddr) | ||
229 | |||
230 | /* set bits */ | ||
231 | #define CBDS_SC(_cbd, _sc) CBDW_SC(_cbd, CBDR_SC(_cbd) | (_sc)) | ||
232 | |||
233 | /* clear bits */ | ||
234 | #define CBDC_SC(_cbd, _sc) CBDW_SC(_cbd, CBDR_SC(_cbd) & ~(_sc)) | ||
235 | |||
236 | /*******************************************************************/ | ||
237 | |||
238 | extern const struct fs_ops fs_fec_ops; | ||
239 | extern const struct fs_ops fs_fcc_ops; | ||
240 | extern const struct fs_ops fs_scc_ops; | ||
241 | |||
242 | /*******************************************************************/ | ||
243 | |||
244 | #endif | ||
diff --git a/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c b/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c new file mode 100644 index 000000000000..7583a9572bcc --- /dev/null +++ b/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c | |||
@@ -0,0 +1,584 @@ | |||
1 | /* | ||
2 | * FCC driver for Motorola MPC82xx (PQ2). | ||
3 | * | ||
4 | * Copyright (c) 2003 Intracom S.A. | ||
5 | * by Pantelis Antoniou <panto@intracom.gr> | ||
6 | * | ||
7 | * 2005 (c) MontaVista Software, Inc. | ||
8 | * Vitaly Bordug <vbordug@ru.mvista.com> | ||
9 | * | ||
10 | * This file is licensed under the terms of the GNU General Public License | ||
11 | * version 2. This program is licensed "as is" without any warranty of any | ||
12 | * kind, whether express or implied. | ||
13 | */ | ||
14 | |||
15 | #include <linux/module.h> | ||
16 | #include <linux/kernel.h> | ||
17 | #include <linux/types.h> | ||
18 | #include <linux/string.h> | ||
19 | #include <linux/ptrace.h> | ||
20 | #include <linux/errno.h> | ||
21 | #include <linux/ioport.h> | ||
22 | #include <linux/interrupt.h> | ||
23 | #include <linux/init.h> | ||
24 | #include <linux/delay.h> | ||
25 | #include <linux/netdevice.h> | ||
26 | #include <linux/etherdevice.h> | ||
27 | #include <linux/skbuff.h> | ||
28 | #include <linux/spinlock.h> | ||
29 | #include <linux/mii.h> | ||
30 | #include <linux/ethtool.h> | ||
31 | #include <linux/bitops.h> | ||
32 | #include <linux/fs.h> | ||
33 | #include <linux/platform_device.h> | ||
34 | #include <linux/phy.h> | ||
35 | #include <linux/of_device.h> | ||
36 | #include <linux/gfp.h> | ||
37 | |||
38 | #include <asm/immap_cpm2.h> | ||
39 | #include <asm/mpc8260.h> | ||
40 | #include <asm/cpm2.h> | ||
41 | |||
42 | #include <asm/pgtable.h> | ||
43 | #include <asm/irq.h> | ||
44 | #include <asm/uaccess.h> | ||
45 | |||
46 | #include "fs_enet.h" | ||
47 | |||
48 | /*************************************************/ | ||
49 | |||
50 | /* FCC access macros */ | ||
51 | |||
52 | /* write, read, set bits, clear bits */ | ||
53 | #define W32(_p, _m, _v) out_be32(&(_p)->_m, (_v)) | ||
54 | #define R32(_p, _m) in_be32(&(_p)->_m) | ||
55 | #define S32(_p, _m, _v) W32(_p, _m, R32(_p, _m) | (_v)) | ||
56 | #define C32(_p, _m, _v) W32(_p, _m, R32(_p, _m) & ~(_v)) | ||
57 | |||
58 | #define W16(_p, _m, _v) out_be16(&(_p)->_m, (_v)) | ||
59 | #define R16(_p, _m) in_be16(&(_p)->_m) | ||
60 | #define S16(_p, _m, _v) W16(_p, _m, R16(_p, _m) | (_v)) | ||
61 | #define C16(_p, _m, _v) W16(_p, _m, R16(_p, _m) & ~(_v)) | ||
62 | |||
63 | #define W8(_p, _m, _v) out_8(&(_p)->_m, (_v)) | ||
64 | #define R8(_p, _m) in_8(&(_p)->_m) | ||
65 | #define S8(_p, _m, _v) W8(_p, _m, R8(_p, _m) | (_v)) | ||
66 | #define C8(_p, _m, _v) W8(_p, _m, R8(_p, _m) & ~(_v)) | ||
67 | |||
68 | /*************************************************/ | ||
69 | |||
70 | #define FCC_MAX_MULTICAST_ADDRS 64 | ||
71 | |||
72 | #define mk_mii_read(REG) (0x60020000 | ((REG & 0x1f) << 18)) | ||
73 | #define mk_mii_write(REG, VAL) (0x50020000 | ((REG & 0x1f) << 18) | (VAL & 0xffff)) | ||
74 | #define mk_mii_end 0 | ||
75 | |||
76 | #define MAX_CR_CMD_LOOPS 10000 | ||
77 | |||
78 | static inline int fcc_cr_cmd(struct fs_enet_private *fep, u32 op) | ||
79 | { | ||
80 | const struct fs_platform_info *fpi = fep->fpi; | ||
81 | |||
82 | return cpm_command(fpi->cp_command, op); | ||
83 | } | ||
84 | |||
85 | static int do_pd_setup(struct fs_enet_private *fep) | ||
86 | { | ||
87 | struct platform_device *ofdev = to_platform_device(fep->dev); | ||
88 | struct fs_platform_info *fpi = fep->fpi; | ||
89 | int ret = -EINVAL; | ||
90 | |||
91 | fep->interrupt = of_irq_to_resource(ofdev->dev.of_node, 0, NULL); | ||
92 | if (fep->interrupt == NO_IRQ) | ||
93 | goto out; | ||
94 | |||
95 | fep->fcc.fccp = of_iomap(ofdev->dev.of_node, 0); | ||
96 | if (!fep->fcc.fccp) | ||
97 | goto out; | ||
98 | |||
99 | fep->fcc.ep = of_iomap(ofdev->dev.of_node, 1); | ||
100 | if (!fep->fcc.ep) | ||
101 | goto out_fccp; | ||
102 | |||
103 | fep->fcc.fcccp = of_iomap(ofdev->dev.of_node, 2); | ||
104 | if (!fep->fcc.fcccp) | ||
105 | goto out_ep; | ||
106 | |||
107 | fep->fcc.mem = (void __iomem *)cpm2_immr; | ||
108 | fpi->dpram_offset = cpm_dpalloc(128, 32); | ||
109 | if (IS_ERR_VALUE(fpi->dpram_offset)) { | ||
110 | ret = fpi->dpram_offset; | ||
111 | goto out_fcccp; | ||
112 | } | ||
113 | |||
114 | return 0; | ||
115 | |||
116 | out_fcccp: | ||
117 | iounmap(fep->fcc.fcccp); | ||
118 | out_ep: | ||
119 | iounmap(fep->fcc.ep); | ||
120 | out_fccp: | ||
121 | iounmap(fep->fcc.fccp); | ||
122 | out: | ||
123 | return ret; | ||
124 | } | ||
125 | |||
126 | #define FCC_NAPI_RX_EVENT_MSK (FCC_ENET_RXF | FCC_ENET_RXB) | ||
127 | #define FCC_RX_EVENT (FCC_ENET_RXF) | ||
128 | #define FCC_TX_EVENT (FCC_ENET_TXB) | ||
129 | #define FCC_ERR_EVENT_MSK (FCC_ENET_TXE) | ||
130 | |||
131 | static int setup_data(struct net_device *dev) | ||
132 | { | ||
133 | struct fs_enet_private *fep = netdev_priv(dev); | ||
134 | |||
135 | if (do_pd_setup(fep) != 0) | ||
136 | return -EINVAL; | ||
137 | |||
138 | fep->ev_napi_rx = FCC_NAPI_RX_EVENT_MSK; | ||
139 | fep->ev_rx = FCC_RX_EVENT; | ||
140 | fep->ev_tx = FCC_TX_EVENT; | ||
141 | fep->ev_err = FCC_ERR_EVENT_MSK; | ||
142 | |||
143 | return 0; | ||
144 | } | ||
145 | |||
146 | static int allocate_bd(struct net_device *dev) | ||
147 | { | ||
148 | struct fs_enet_private *fep = netdev_priv(dev); | ||
149 | const struct fs_platform_info *fpi = fep->fpi; | ||
150 | |||
151 | fep->ring_base = (void __iomem __force *)dma_alloc_coherent(fep->dev, | ||
152 | (fpi->tx_ring + fpi->rx_ring) * | ||
153 | sizeof(cbd_t), &fep->ring_mem_addr, | ||
154 | GFP_KERNEL); | ||
155 | if (fep->ring_base == NULL) | ||
156 | return -ENOMEM; | ||
157 | |||
158 | return 0; | ||
159 | } | ||
160 | |||
161 | static void free_bd(struct net_device *dev) | ||
162 | { | ||
163 | struct fs_enet_private *fep = netdev_priv(dev); | ||
164 | const struct fs_platform_info *fpi = fep->fpi; | ||
165 | |||
166 | if (fep->ring_base) | ||
167 | dma_free_coherent(fep->dev, | ||
168 | (fpi->tx_ring + fpi->rx_ring) * sizeof(cbd_t), | ||
169 | (void __force *)fep->ring_base, fep->ring_mem_addr); | ||
170 | } | ||
171 | |||
172 | static void cleanup_data(struct net_device *dev) | ||
173 | { | ||
174 | /* nothing */ | ||
175 | } | ||
176 | |||
177 | static void set_promiscuous_mode(struct net_device *dev) | ||
178 | { | ||
179 | struct fs_enet_private *fep = netdev_priv(dev); | ||
180 | fcc_t __iomem *fccp = fep->fcc.fccp; | ||
181 | |||
182 | S32(fccp, fcc_fpsmr, FCC_PSMR_PRO); | ||
183 | } | ||
184 | |||
185 | static void set_multicast_start(struct net_device *dev) | ||
186 | { | ||
187 | struct fs_enet_private *fep = netdev_priv(dev); | ||
188 | fcc_enet_t __iomem *ep = fep->fcc.ep; | ||
189 | |||
190 | W32(ep, fen_gaddrh, 0); | ||
191 | W32(ep, fen_gaddrl, 0); | ||
192 | } | ||
193 | |||
194 | static void set_multicast_one(struct net_device *dev, const u8 *mac) | ||
195 | { | ||
196 | struct fs_enet_private *fep = netdev_priv(dev); | ||
197 | fcc_enet_t __iomem *ep = fep->fcc.ep; | ||
198 | u16 taddrh, taddrm, taddrl; | ||
199 | |||
200 | taddrh = ((u16)mac[5] << 8) | mac[4]; | ||
201 | taddrm = ((u16)mac[3] << 8) | mac[2]; | ||
202 | taddrl = ((u16)mac[1] << 8) | mac[0]; | ||
203 | |||
204 | W16(ep, fen_taddrh, taddrh); | ||
205 | W16(ep, fen_taddrm, taddrm); | ||
206 | W16(ep, fen_taddrl, taddrl); | ||
207 | fcc_cr_cmd(fep, CPM_CR_SET_GADDR); | ||
208 | } | ||
209 | |||
210 | static void set_multicast_finish(struct net_device *dev) | ||
211 | { | ||
212 | struct fs_enet_private *fep = netdev_priv(dev); | ||
213 | fcc_t __iomem *fccp = fep->fcc.fccp; | ||
214 | fcc_enet_t __iomem *ep = fep->fcc.ep; | ||
215 | |||
216 | /* clear promiscuous always */ | ||
217 | C32(fccp, fcc_fpsmr, FCC_PSMR_PRO); | ||
218 | |||
219 | /* if all multi or too many multicasts; just enable all */ | ||
220 | if ((dev->flags & IFF_ALLMULTI) != 0 || | ||
221 | netdev_mc_count(dev) > FCC_MAX_MULTICAST_ADDRS) { | ||
222 | |||
223 | W32(ep, fen_gaddrh, 0xffffffff); | ||
224 | W32(ep, fen_gaddrl, 0xffffffff); | ||
225 | } | ||
226 | |||
227 | /* read back */ | ||
228 | fep->fcc.gaddrh = R32(ep, fen_gaddrh); | ||
229 | fep->fcc.gaddrl = R32(ep, fen_gaddrl); | ||
230 | } | ||
231 | |||
232 | static void set_multicast_list(struct net_device *dev) | ||
233 | { | ||
234 | struct netdev_hw_addr *ha; | ||
235 | |||
236 | if ((dev->flags & IFF_PROMISC) == 0) { | ||
237 | set_multicast_start(dev); | ||
238 | netdev_for_each_mc_addr(ha, dev) | ||
239 | set_multicast_one(dev, ha->addr); | ||
240 | set_multicast_finish(dev); | ||
241 | } else | ||
242 | set_promiscuous_mode(dev); | ||
243 | } | ||
244 | |||
245 | static void restart(struct net_device *dev) | ||
246 | { | ||
247 | struct fs_enet_private *fep = netdev_priv(dev); | ||
248 | const struct fs_platform_info *fpi = fep->fpi; | ||
249 | fcc_t __iomem *fccp = fep->fcc.fccp; | ||
250 | fcc_c_t __iomem *fcccp = fep->fcc.fcccp; | ||
251 | fcc_enet_t __iomem *ep = fep->fcc.ep; | ||
252 | dma_addr_t rx_bd_base_phys, tx_bd_base_phys; | ||
253 | u16 paddrh, paddrm, paddrl; | ||
254 | const unsigned char *mac; | ||
255 | int i; | ||
256 | |||
257 | C32(fccp, fcc_gfmr, FCC_GFMR_ENR | FCC_GFMR_ENT); | ||
258 | |||
259 | /* clear everything (slow & steady does it) */ | ||
260 | for (i = 0; i < sizeof(*ep); i++) | ||
261 | out_8((u8 __iomem *)ep + i, 0); | ||
262 | |||
263 | /* get physical address */ | ||
264 | rx_bd_base_phys = fep->ring_mem_addr; | ||
265 | tx_bd_base_phys = rx_bd_base_phys + sizeof(cbd_t) * fpi->rx_ring; | ||
266 | |||
267 | /* point to bds */ | ||
268 | W32(ep, fen_genfcc.fcc_rbase, rx_bd_base_phys); | ||
269 | W32(ep, fen_genfcc.fcc_tbase, tx_bd_base_phys); | ||
270 | |||
271 | /* Set maximum bytes per receive buffer. | ||
272 | * It must be a multiple of 32. | ||
273 | */ | ||
274 | W16(ep, fen_genfcc.fcc_mrblr, PKT_MAXBLR_SIZE); | ||
275 | |||
276 | W32(ep, fen_genfcc.fcc_rstate, (CPMFCR_GBL | CPMFCR_EB) << 24); | ||
277 | W32(ep, fen_genfcc.fcc_tstate, (CPMFCR_GBL | CPMFCR_EB) << 24); | ||
278 | |||
279 | /* Allocate space in the reserved FCC area of DPRAM for the | ||
280 | * internal buffers. No one uses this space (yet), so we | ||
281 | * can do this. Later, we will add resource management for | ||
282 | * this area. | ||
283 | */ | ||
284 | |||
285 | W16(ep, fen_genfcc.fcc_riptr, fpi->dpram_offset); | ||
286 | W16(ep, fen_genfcc.fcc_tiptr, fpi->dpram_offset + 32); | ||
287 | |||
288 | W16(ep, fen_padptr, fpi->dpram_offset + 64); | ||
289 | |||
290 | /* fill with special symbol... */ | ||
291 | memset_io(fep->fcc.mem + fpi->dpram_offset + 64, 0x88, 32); | ||
292 | |||
293 | W32(ep, fen_genfcc.fcc_rbptr, 0); | ||
294 | W32(ep, fen_genfcc.fcc_tbptr, 0); | ||
295 | W32(ep, fen_genfcc.fcc_rcrc, 0); | ||
296 | W32(ep, fen_genfcc.fcc_tcrc, 0); | ||
297 | W16(ep, fen_genfcc.fcc_res1, 0); | ||
298 | W32(ep, fen_genfcc.fcc_res2, 0); | ||
299 | |||
300 | /* no CAM */ | ||
301 | W32(ep, fen_camptr, 0); | ||
302 | |||
303 | /* Set CRC preset and mask */ | ||
304 | W32(ep, fen_cmask, 0xdebb20e3); | ||
305 | W32(ep, fen_cpres, 0xffffffff); | ||
306 | |||
307 | W32(ep, fen_crcec, 0); /* CRC Error counter */ | ||
308 | W32(ep, fen_alec, 0); /* alignment error counter */ | ||
309 | W32(ep, fen_disfc, 0); /* discard frame counter */ | ||
310 | W16(ep, fen_retlim, 15); /* Retry limit threshold */ | ||
311 | W16(ep, fen_pper, 0); /* Normal persistence */ | ||
312 | |||
313 | /* set group address */ | ||
314 | W32(ep, fen_gaddrh, fep->fcc.gaddrh); | ||
315 | W32(ep, fen_gaddrl, fep->fcc.gaddrh); | ||
316 | |||
317 | /* Clear hash filter tables */ | ||
318 | W32(ep, fen_iaddrh, 0); | ||
319 | W32(ep, fen_iaddrl, 0); | ||
320 | |||
321 | /* Clear the Out-of-sequence TxBD */ | ||
322 | W16(ep, fen_tfcstat, 0); | ||
323 | W16(ep, fen_tfclen, 0); | ||
324 | W32(ep, fen_tfcptr, 0); | ||
325 | |||
326 | W16(ep, fen_mflr, PKT_MAXBUF_SIZE); /* maximum frame length register */ | ||
327 | W16(ep, fen_minflr, PKT_MINBUF_SIZE); /* minimum frame length register */ | ||
328 | |||
329 | /* set address */ | ||
330 | mac = dev->dev_addr; | ||
331 | paddrh = ((u16)mac[5] << 8) | mac[4]; | ||
332 | paddrm = ((u16)mac[3] << 8) | mac[2]; | ||
333 | paddrl = ((u16)mac[1] << 8) | mac[0]; | ||
334 | |||
335 | W16(ep, fen_paddrh, paddrh); | ||
336 | W16(ep, fen_paddrm, paddrm); | ||
337 | W16(ep, fen_paddrl, paddrl); | ||
338 | |||
339 | W16(ep, fen_taddrh, 0); | ||
340 | W16(ep, fen_taddrm, 0); | ||
341 | W16(ep, fen_taddrl, 0); | ||
342 | |||
343 | W16(ep, fen_maxd1, 1520); /* maximum DMA1 length */ | ||
344 | W16(ep, fen_maxd2, 1520); /* maximum DMA2 length */ | ||
345 | |||
346 | /* Clear stat counters, in case we ever enable RMON */ | ||
347 | W32(ep, fen_octc, 0); | ||
348 | W32(ep, fen_colc, 0); | ||
349 | W32(ep, fen_broc, 0); | ||
350 | W32(ep, fen_mulc, 0); | ||
351 | W32(ep, fen_uspc, 0); | ||
352 | W32(ep, fen_frgc, 0); | ||
353 | W32(ep, fen_ospc, 0); | ||
354 | W32(ep, fen_jbrc, 0); | ||
355 | W32(ep, fen_p64c, 0); | ||
356 | W32(ep, fen_p65c, 0); | ||
357 | W32(ep, fen_p128c, 0); | ||
358 | W32(ep, fen_p256c, 0); | ||
359 | W32(ep, fen_p512c, 0); | ||
360 | W32(ep, fen_p1024c, 0); | ||
361 | |||
362 | W16(ep, fen_rfthr, 0); /* Suggested by manual */ | ||
363 | W16(ep, fen_rfcnt, 0); | ||
364 | W16(ep, fen_cftype, 0); | ||
365 | |||
366 | fs_init_bds(dev); | ||
367 | |||
368 | /* adjust to speed (for RMII mode) */ | ||
369 | if (fpi->use_rmii) { | ||
370 | if (fep->phydev->speed == 100) | ||
371 | C8(fcccp, fcc_gfemr, 0x20); | ||
372 | else | ||
373 | S8(fcccp, fcc_gfemr, 0x20); | ||
374 | } | ||
375 | |||
376 | fcc_cr_cmd(fep, CPM_CR_INIT_TRX); | ||
377 | |||
378 | /* clear events */ | ||
379 | W16(fccp, fcc_fcce, 0xffff); | ||
380 | |||
381 | /* Enable interrupts we wish to service */ | ||
382 | W16(fccp, fcc_fccm, FCC_ENET_TXE | FCC_ENET_RXF | FCC_ENET_TXB); | ||
383 | |||
384 | /* Set GFMR to enable Ethernet operating mode */ | ||
385 | W32(fccp, fcc_gfmr, FCC_GFMR_TCI | FCC_GFMR_MODE_ENET); | ||
386 | |||
387 | /* set sync/delimiters */ | ||
388 | W16(fccp, fcc_fdsr, 0xd555); | ||
389 | |||
390 | W32(fccp, fcc_fpsmr, FCC_PSMR_ENCRC); | ||
391 | |||
392 | if (fpi->use_rmii) | ||
393 | S32(fccp, fcc_fpsmr, FCC_PSMR_RMII); | ||
394 | |||
395 | /* adjust to duplex mode */ | ||
396 | if (fep->phydev->duplex) | ||
397 | S32(fccp, fcc_fpsmr, FCC_PSMR_FDE | FCC_PSMR_LPB); | ||
398 | else | ||
399 | C32(fccp, fcc_fpsmr, FCC_PSMR_FDE | FCC_PSMR_LPB); | ||
400 | |||
401 | /* Restore multicast and promiscuous settings */ | ||
402 | set_multicast_list(dev); | ||
403 | |||
404 | S32(fccp, fcc_gfmr, FCC_GFMR_ENR | FCC_GFMR_ENT); | ||
405 | } | ||
406 | |||
407 | static void stop(struct net_device *dev) | ||
408 | { | ||
409 | struct fs_enet_private *fep = netdev_priv(dev); | ||
410 | fcc_t __iomem *fccp = fep->fcc.fccp; | ||
411 | |||
412 | /* stop ethernet */ | ||
413 | C32(fccp, fcc_gfmr, FCC_GFMR_ENR | FCC_GFMR_ENT); | ||
414 | |||
415 | /* clear events */ | ||
416 | W16(fccp, fcc_fcce, 0xffff); | ||
417 | |||
418 | /* clear interrupt mask */ | ||
419 | W16(fccp, fcc_fccm, 0); | ||
420 | |||
421 | fs_cleanup_bds(dev); | ||
422 | } | ||
423 | |||
424 | static void napi_clear_rx_event(struct net_device *dev) | ||
425 | { | ||
426 | struct fs_enet_private *fep = netdev_priv(dev); | ||
427 | fcc_t __iomem *fccp = fep->fcc.fccp; | ||
428 | |||
429 | W16(fccp, fcc_fcce, FCC_NAPI_RX_EVENT_MSK); | ||
430 | } | ||
431 | |||
432 | static void napi_enable_rx(struct net_device *dev) | ||
433 | { | ||
434 | struct fs_enet_private *fep = netdev_priv(dev); | ||
435 | fcc_t __iomem *fccp = fep->fcc.fccp; | ||
436 | |||
437 | S16(fccp, fcc_fccm, FCC_NAPI_RX_EVENT_MSK); | ||
438 | } | ||
439 | |||
440 | static void napi_disable_rx(struct net_device *dev) | ||
441 | { | ||
442 | struct fs_enet_private *fep = netdev_priv(dev); | ||
443 | fcc_t __iomem *fccp = fep->fcc.fccp; | ||
444 | |||
445 | C16(fccp, fcc_fccm, FCC_NAPI_RX_EVENT_MSK); | ||
446 | } | ||
447 | |||
448 | static void rx_bd_done(struct net_device *dev) | ||
449 | { | ||
450 | /* nothing */ | ||
451 | } | ||
452 | |||
453 | static void tx_kickstart(struct net_device *dev) | ||
454 | { | ||
455 | struct fs_enet_private *fep = netdev_priv(dev); | ||
456 | fcc_t __iomem *fccp = fep->fcc.fccp; | ||
457 | |||
458 | S16(fccp, fcc_ftodr, 0x8000); | ||
459 | } | ||
460 | |||
461 | static u32 get_int_events(struct net_device *dev) | ||
462 | { | ||
463 | struct fs_enet_private *fep = netdev_priv(dev); | ||
464 | fcc_t __iomem *fccp = fep->fcc.fccp; | ||
465 | |||
466 | return (u32)R16(fccp, fcc_fcce); | ||
467 | } | ||
468 | |||
469 | static void clear_int_events(struct net_device *dev, u32 int_events) | ||
470 | { | ||
471 | struct fs_enet_private *fep = netdev_priv(dev); | ||
472 | fcc_t __iomem *fccp = fep->fcc.fccp; | ||
473 | |||
474 | W16(fccp, fcc_fcce, int_events & 0xffff); | ||
475 | } | ||
476 | |||
477 | static void ev_error(struct net_device *dev, u32 int_events) | ||
478 | { | ||
479 | struct fs_enet_private *fep = netdev_priv(dev); | ||
480 | |||
481 | dev_warn(fep->dev, "FS_ENET ERROR(s) 0x%x\n", int_events); | ||
482 | } | ||
483 | |||
484 | static int get_regs(struct net_device *dev, void *p, int *sizep) | ||
485 | { | ||
486 | struct fs_enet_private *fep = netdev_priv(dev); | ||
487 | |||
488 | if (*sizep < sizeof(fcc_t) + sizeof(fcc_enet_t) + 1) | ||
489 | return -EINVAL; | ||
490 | |||
491 | memcpy_fromio(p, fep->fcc.fccp, sizeof(fcc_t)); | ||
492 | p = (char *)p + sizeof(fcc_t); | ||
493 | |||
494 | memcpy_fromio(p, fep->fcc.ep, sizeof(fcc_enet_t)); | ||
495 | p = (char *)p + sizeof(fcc_enet_t); | ||
496 | |||
497 | memcpy_fromio(p, fep->fcc.fcccp, 1); | ||
498 | return 0; | ||
499 | } | ||
500 | |||
501 | static int get_regs_len(struct net_device *dev) | ||
502 | { | ||
503 | return sizeof(fcc_t) + sizeof(fcc_enet_t) + 1; | ||
504 | } | ||
505 | |||
506 | /* Some transmit errors cause the transmitter to shut | ||
507 | * down. We now issue a restart transmit. | ||
508 | * Also, to workaround 8260 device erratum CPM37, we must | ||
509 | * disable and then re-enable the transmitterfollowing a | ||
510 | * Late Collision, Underrun, or Retry Limit error. | ||
511 | * In addition, tbptr may point beyond BDs beyond still marked | ||
512 | * as ready due to internal pipelining, so we need to look back | ||
513 | * through the BDs and adjust tbptr to point to the last BD | ||
514 | * marked as ready. This may result in some buffers being | ||
515 | * retransmitted. | ||
516 | */ | ||
517 | static void tx_restart(struct net_device *dev) | ||
518 | { | ||
519 | struct fs_enet_private *fep = netdev_priv(dev); | ||
520 | fcc_t __iomem *fccp = fep->fcc.fccp; | ||
521 | const struct fs_platform_info *fpi = fep->fpi; | ||
522 | fcc_enet_t __iomem *ep = fep->fcc.ep; | ||
523 | cbd_t __iomem *curr_tbptr; | ||
524 | cbd_t __iomem *recheck_bd; | ||
525 | cbd_t __iomem *prev_bd; | ||
526 | cbd_t __iomem *last_tx_bd; | ||
527 | |||
528 | last_tx_bd = fep->tx_bd_base + (fpi->tx_ring * sizeof(cbd_t)); | ||
529 | |||
530 | /* get the current bd held in TBPTR and scan back from this point */ | ||
531 | recheck_bd = curr_tbptr = (cbd_t __iomem *) | ||
532 | ((R32(ep, fen_genfcc.fcc_tbptr) - fep->ring_mem_addr) + | ||
533 | fep->ring_base); | ||
534 | |||
535 | prev_bd = (recheck_bd == fep->tx_bd_base) ? last_tx_bd : recheck_bd - 1; | ||
536 | |||
537 | /* Move through the bds in reverse, look for the earliest buffer | ||
538 | * that is not ready. Adjust TBPTR to the following buffer */ | ||
539 | while ((CBDR_SC(prev_bd) & BD_ENET_TX_READY) != 0) { | ||
540 | /* Go back one buffer */ | ||
541 | recheck_bd = prev_bd; | ||
542 | |||
543 | /* update the previous buffer */ | ||
544 | prev_bd = (prev_bd == fep->tx_bd_base) ? last_tx_bd : prev_bd - 1; | ||
545 | |||
546 | /* We should never see all bds marked as ready, check anyway */ | ||
547 | if (recheck_bd == curr_tbptr) | ||
548 | break; | ||
549 | } | ||
550 | /* Now update the TBPTR and dirty flag to the current buffer */ | ||
551 | W32(ep, fen_genfcc.fcc_tbptr, | ||
552 | (uint) (((void *)recheck_bd - fep->ring_base) + | ||
553 | fep->ring_mem_addr)); | ||
554 | fep->dirty_tx = recheck_bd; | ||
555 | |||
556 | C32(fccp, fcc_gfmr, FCC_GFMR_ENT); | ||
557 | udelay(10); | ||
558 | S32(fccp, fcc_gfmr, FCC_GFMR_ENT); | ||
559 | |||
560 | fcc_cr_cmd(fep, CPM_CR_RESTART_TX); | ||
561 | } | ||
562 | |||
563 | /*************************************************************************/ | ||
564 | |||
565 | const struct fs_ops fs_fcc_ops = { | ||
566 | .setup_data = setup_data, | ||
567 | .cleanup_data = cleanup_data, | ||
568 | .set_multicast_list = set_multicast_list, | ||
569 | .restart = restart, | ||
570 | .stop = stop, | ||
571 | .napi_clear_rx_event = napi_clear_rx_event, | ||
572 | .napi_enable_rx = napi_enable_rx, | ||
573 | .napi_disable_rx = napi_disable_rx, | ||
574 | .rx_bd_done = rx_bd_done, | ||
575 | .tx_kickstart = tx_kickstart, | ||
576 | .get_int_events = get_int_events, | ||
577 | .clear_int_events = clear_int_events, | ||
578 | .ev_error = ev_error, | ||
579 | .get_regs = get_regs, | ||
580 | .get_regs_len = get_regs_len, | ||
581 | .tx_restart = tx_restart, | ||
582 | .allocate_bd = allocate_bd, | ||
583 | .free_bd = free_bd, | ||
584 | }; | ||
diff --git a/drivers/net/ethernet/freescale/fs_enet/mac-fec.c b/drivers/net/ethernet/freescale/fs_enet/mac-fec.c new file mode 100644 index 000000000000..b9fbc83d64a7 --- /dev/null +++ b/drivers/net/ethernet/freescale/fs_enet/mac-fec.c | |||
@@ -0,0 +1,498 @@ | |||
1 | /* | ||
2 | * Freescale Ethernet controllers | ||
3 | * | ||
4 | * Copyright (c) 2005 Intracom S.A. | ||
5 | * by Pantelis Antoniou <panto@intracom.gr> | ||
6 | * | ||
7 | * 2005 (c) MontaVista Software, Inc. | ||
8 | * Vitaly Bordug <vbordug@ru.mvista.com> | ||
9 | * | ||
10 | * This file is licensed under the terms of the GNU General Public License | ||
11 | * version 2. This program is licensed "as is" without any warranty of any | ||
12 | * kind, whether express or implied. | ||
13 | */ | ||
14 | |||
15 | #include <linux/module.h> | ||
16 | #include <linux/kernel.h> | ||
17 | #include <linux/types.h> | ||
18 | #include <linux/string.h> | ||
19 | #include <linux/ptrace.h> | ||
20 | #include <linux/errno.h> | ||
21 | #include <linux/ioport.h> | ||
22 | #include <linux/interrupt.h> | ||
23 | #include <linux/init.h> | ||
24 | #include <linux/delay.h> | ||
25 | #include <linux/netdevice.h> | ||
26 | #include <linux/etherdevice.h> | ||
27 | #include <linux/skbuff.h> | ||
28 | #include <linux/spinlock.h> | ||
29 | #include <linux/mii.h> | ||
30 | #include <linux/ethtool.h> | ||
31 | #include <linux/bitops.h> | ||
32 | #include <linux/fs.h> | ||
33 | #include <linux/platform_device.h> | ||
34 | #include <linux/of_device.h> | ||
35 | #include <linux/gfp.h> | ||
36 | |||
37 | #include <asm/irq.h> | ||
38 | #include <asm/uaccess.h> | ||
39 | |||
40 | #ifdef CONFIG_8xx | ||
41 | #include <asm/8xx_immap.h> | ||
42 | #include <asm/pgtable.h> | ||
43 | #include <asm/mpc8xx.h> | ||
44 | #include <asm/cpm1.h> | ||
45 | #endif | ||
46 | |||
47 | #include "fs_enet.h" | ||
48 | #include "fec.h" | ||
49 | |||
50 | /*************************************************/ | ||
51 | |||
52 | #if defined(CONFIG_CPM1) | ||
53 | /* for a CPM1 __raw_xxx's are sufficient */ | ||
54 | #define __fs_out32(addr, x) __raw_writel(x, addr) | ||
55 | #define __fs_out16(addr, x) __raw_writew(x, addr) | ||
56 | #define __fs_in32(addr) __raw_readl(addr) | ||
57 | #define __fs_in16(addr) __raw_readw(addr) | ||
58 | #else | ||
59 | /* for others play it safe */ | ||
60 | #define __fs_out32(addr, x) out_be32(addr, x) | ||
61 | #define __fs_out16(addr, x) out_be16(addr, x) | ||
62 | #define __fs_in32(addr) in_be32(addr) | ||
63 | #define __fs_in16(addr) in_be16(addr) | ||
64 | #endif | ||
65 | |||
66 | /* write */ | ||
67 | #define FW(_fecp, _reg, _v) __fs_out32(&(_fecp)->fec_ ## _reg, (_v)) | ||
68 | |||
69 | /* read */ | ||
70 | #define FR(_fecp, _reg) __fs_in32(&(_fecp)->fec_ ## _reg) | ||
71 | |||
72 | /* set bits */ | ||
73 | #define FS(_fecp, _reg, _v) FW(_fecp, _reg, FR(_fecp, _reg) | (_v)) | ||
74 | |||
75 | /* clear bits */ | ||
76 | #define FC(_fecp, _reg, _v) FW(_fecp, _reg, FR(_fecp, _reg) & ~(_v)) | ||
77 | |||
78 | /* | ||
79 | * Delay to wait for FEC reset command to complete (in us) | ||
80 | */ | ||
81 | #define FEC_RESET_DELAY 50 | ||
82 | |||
83 | static int whack_reset(struct fec __iomem *fecp) | ||
84 | { | ||
85 | int i; | ||
86 | |||
87 | FW(fecp, ecntrl, FEC_ECNTRL_PINMUX | FEC_ECNTRL_RESET); | ||
88 | for (i = 0; i < FEC_RESET_DELAY; i++) { | ||
89 | if ((FR(fecp, ecntrl) & FEC_ECNTRL_RESET) == 0) | ||
90 | return 0; /* OK */ | ||
91 | udelay(1); | ||
92 | } | ||
93 | |||
94 | return -1; | ||
95 | } | ||
96 | |||
97 | static int do_pd_setup(struct fs_enet_private *fep) | ||
98 | { | ||
99 | struct platform_device *ofdev = to_platform_device(fep->dev); | ||
100 | |||
101 | fep->interrupt = of_irq_to_resource(ofdev->dev.of_node, 0, NULL); | ||
102 | if (fep->interrupt == NO_IRQ) | ||
103 | return -EINVAL; | ||
104 | |||
105 | fep->fec.fecp = of_iomap(ofdev->dev.of_node, 0); | ||
106 | if (!fep->fcc.fccp) | ||
107 | return -EINVAL; | ||
108 | |||
109 | return 0; | ||
110 | } | ||
111 | |||
112 | #define FEC_NAPI_RX_EVENT_MSK (FEC_ENET_RXF | FEC_ENET_RXB) | ||
113 | #define FEC_RX_EVENT (FEC_ENET_RXF) | ||
114 | #define FEC_TX_EVENT (FEC_ENET_TXF) | ||
115 | #define FEC_ERR_EVENT_MSK (FEC_ENET_HBERR | FEC_ENET_BABR | \ | ||
116 | FEC_ENET_BABT | FEC_ENET_EBERR) | ||
117 | |||
118 | static int setup_data(struct net_device *dev) | ||
119 | { | ||
120 | struct fs_enet_private *fep = netdev_priv(dev); | ||
121 | |||
122 | if (do_pd_setup(fep) != 0) | ||
123 | return -EINVAL; | ||
124 | |||
125 | fep->fec.hthi = 0; | ||
126 | fep->fec.htlo = 0; | ||
127 | |||
128 | fep->ev_napi_rx = FEC_NAPI_RX_EVENT_MSK; | ||
129 | fep->ev_rx = FEC_RX_EVENT; | ||
130 | fep->ev_tx = FEC_TX_EVENT; | ||
131 | fep->ev_err = FEC_ERR_EVENT_MSK; | ||
132 | |||
133 | return 0; | ||
134 | } | ||
135 | |||
136 | static int allocate_bd(struct net_device *dev) | ||
137 | { | ||
138 | struct fs_enet_private *fep = netdev_priv(dev); | ||
139 | const struct fs_platform_info *fpi = fep->fpi; | ||
140 | |||
141 | fep->ring_base = (void __force __iomem *)dma_alloc_coherent(fep->dev, | ||
142 | (fpi->tx_ring + fpi->rx_ring) * | ||
143 | sizeof(cbd_t), &fep->ring_mem_addr, | ||
144 | GFP_KERNEL); | ||
145 | if (fep->ring_base == NULL) | ||
146 | return -ENOMEM; | ||
147 | |||
148 | return 0; | ||
149 | } | ||
150 | |||
151 | static void free_bd(struct net_device *dev) | ||
152 | { | ||
153 | struct fs_enet_private *fep = netdev_priv(dev); | ||
154 | const struct fs_platform_info *fpi = fep->fpi; | ||
155 | |||
156 | if(fep->ring_base) | ||
157 | dma_free_coherent(fep->dev, (fpi->tx_ring + fpi->rx_ring) | ||
158 | * sizeof(cbd_t), | ||
159 | (void __force *)fep->ring_base, | ||
160 | fep->ring_mem_addr); | ||
161 | } | ||
162 | |||
163 | static void cleanup_data(struct net_device *dev) | ||
164 | { | ||
165 | /* nothing */ | ||
166 | } | ||
167 | |||
168 | static void set_promiscuous_mode(struct net_device *dev) | ||
169 | { | ||
170 | struct fs_enet_private *fep = netdev_priv(dev); | ||
171 | struct fec __iomem *fecp = fep->fec.fecp; | ||
172 | |||
173 | FS(fecp, r_cntrl, FEC_RCNTRL_PROM); | ||
174 | } | ||
175 | |||
176 | static void set_multicast_start(struct net_device *dev) | ||
177 | { | ||
178 | struct fs_enet_private *fep = netdev_priv(dev); | ||
179 | |||
180 | fep->fec.hthi = 0; | ||
181 | fep->fec.htlo = 0; | ||
182 | } | ||
183 | |||
184 | static void set_multicast_one(struct net_device *dev, const u8 *mac) | ||
185 | { | ||
186 | struct fs_enet_private *fep = netdev_priv(dev); | ||
187 | int temp, hash_index, i, j; | ||
188 | u32 crc, csrVal; | ||
189 | u8 byte, msb; | ||
190 | |||
191 | crc = 0xffffffff; | ||
192 | for (i = 0; i < 6; i++) { | ||
193 | byte = mac[i]; | ||
194 | for (j = 0; j < 8; j++) { | ||
195 | msb = crc >> 31; | ||
196 | crc <<= 1; | ||
197 | if (msb ^ (byte & 0x1)) | ||
198 | crc ^= FEC_CRC_POLY; | ||
199 | byte >>= 1; | ||
200 | } | ||
201 | } | ||
202 | |||
203 | temp = (crc & 0x3f) >> 1; | ||
204 | hash_index = ((temp & 0x01) << 4) | | ||
205 | ((temp & 0x02) << 2) | | ||
206 | ((temp & 0x04)) | | ||
207 | ((temp & 0x08) >> 2) | | ||
208 | ((temp & 0x10) >> 4); | ||
209 | csrVal = 1 << hash_index; | ||
210 | if (crc & 1) | ||
211 | fep->fec.hthi |= csrVal; | ||
212 | else | ||
213 | fep->fec.htlo |= csrVal; | ||
214 | } | ||
215 | |||
216 | static void set_multicast_finish(struct net_device *dev) | ||
217 | { | ||
218 | struct fs_enet_private *fep = netdev_priv(dev); | ||
219 | struct fec __iomem *fecp = fep->fec.fecp; | ||
220 | |||
221 | /* if all multi or too many multicasts; just enable all */ | ||
222 | if ((dev->flags & IFF_ALLMULTI) != 0 || | ||
223 | netdev_mc_count(dev) > FEC_MAX_MULTICAST_ADDRS) { | ||
224 | fep->fec.hthi = 0xffffffffU; | ||
225 | fep->fec.htlo = 0xffffffffU; | ||
226 | } | ||
227 | |||
228 | FC(fecp, r_cntrl, FEC_RCNTRL_PROM); | ||
229 | FW(fecp, grp_hash_table_high, fep->fec.hthi); | ||
230 | FW(fecp, grp_hash_table_low, fep->fec.htlo); | ||
231 | } | ||
232 | |||
233 | static void set_multicast_list(struct net_device *dev) | ||
234 | { | ||
235 | struct netdev_hw_addr *ha; | ||
236 | |||
237 | if ((dev->flags & IFF_PROMISC) == 0) { | ||
238 | set_multicast_start(dev); | ||
239 | netdev_for_each_mc_addr(ha, dev) | ||
240 | set_multicast_one(dev, ha->addr); | ||
241 | set_multicast_finish(dev); | ||
242 | } else | ||
243 | set_promiscuous_mode(dev); | ||
244 | } | ||
245 | |||
246 | static void restart(struct net_device *dev) | ||
247 | { | ||
248 | struct fs_enet_private *fep = netdev_priv(dev); | ||
249 | struct fec __iomem *fecp = fep->fec.fecp; | ||
250 | const struct fs_platform_info *fpi = fep->fpi; | ||
251 | dma_addr_t rx_bd_base_phys, tx_bd_base_phys; | ||
252 | int r; | ||
253 | u32 addrhi, addrlo; | ||
254 | |||
255 | struct mii_bus* mii = fep->phydev->bus; | ||
256 | struct fec_info* fec_inf = mii->priv; | ||
257 | |||
258 | r = whack_reset(fep->fec.fecp); | ||
259 | if (r != 0) | ||
260 | dev_err(fep->dev, "FEC Reset FAILED!\n"); | ||
261 | /* | ||
262 | * Set station address. | ||
263 | */ | ||
264 | addrhi = ((u32) dev->dev_addr[0] << 24) | | ||
265 | ((u32) dev->dev_addr[1] << 16) | | ||
266 | ((u32) dev->dev_addr[2] << 8) | | ||
267 | (u32) dev->dev_addr[3]; | ||
268 | addrlo = ((u32) dev->dev_addr[4] << 24) | | ||
269 | ((u32) dev->dev_addr[5] << 16); | ||
270 | FW(fecp, addr_low, addrhi); | ||
271 | FW(fecp, addr_high, addrlo); | ||
272 | |||
273 | /* | ||
274 | * Reset all multicast. | ||
275 | */ | ||
276 | FW(fecp, grp_hash_table_high, fep->fec.hthi); | ||
277 | FW(fecp, grp_hash_table_low, fep->fec.htlo); | ||
278 | |||
279 | /* | ||
280 | * Set maximum receive buffer size. | ||
281 | */ | ||
282 | FW(fecp, r_buff_size, PKT_MAXBLR_SIZE); | ||
283 | #ifdef CONFIG_FS_ENET_MPC5121_FEC | ||
284 | FW(fecp, r_cntrl, PKT_MAXBUF_SIZE << 16); | ||
285 | #else | ||
286 | FW(fecp, r_hash, PKT_MAXBUF_SIZE); | ||
287 | #endif | ||
288 | |||
289 | /* get physical address */ | ||
290 | rx_bd_base_phys = fep->ring_mem_addr; | ||
291 | tx_bd_base_phys = rx_bd_base_phys + sizeof(cbd_t) * fpi->rx_ring; | ||
292 | |||
293 | /* | ||
294 | * Set receive and transmit descriptor base. | ||
295 | */ | ||
296 | FW(fecp, r_des_start, rx_bd_base_phys); | ||
297 | FW(fecp, x_des_start, tx_bd_base_phys); | ||
298 | |||
299 | fs_init_bds(dev); | ||
300 | |||
301 | /* | ||
302 | * Enable big endian and don't care about SDMA FC. | ||
303 | */ | ||
304 | #ifdef CONFIG_FS_ENET_MPC5121_FEC | ||
305 | FS(fecp, dma_control, 0xC0000000); | ||
306 | #else | ||
307 | FW(fecp, fun_code, 0x78000000); | ||
308 | #endif | ||
309 | |||
310 | /* | ||
311 | * Set MII speed. | ||
312 | */ | ||
313 | FW(fecp, mii_speed, fec_inf->mii_speed); | ||
314 | |||
315 | /* | ||
316 | * Clear any outstanding interrupt. | ||
317 | */ | ||
318 | FW(fecp, ievent, 0xffc0); | ||
319 | #ifndef CONFIG_FS_ENET_MPC5121_FEC | ||
320 | FW(fecp, ivec, (virq_to_hw(fep->interrupt) / 2) << 29); | ||
321 | |||
322 | FW(fecp, r_cntrl, FEC_RCNTRL_MII_MODE); /* MII enable */ | ||
323 | #else | ||
324 | /* | ||
325 | * Only set MII mode - do not touch maximum frame length | ||
326 | * configured before. | ||
327 | */ | ||
328 | FS(fecp, r_cntrl, FEC_RCNTRL_MII_MODE); | ||
329 | #endif | ||
330 | /* | ||
331 | * adjust to duplex mode | ||
332 | */ | ||
333 | if (fep->phydev->duplex) { | ||
334 | FC(fecp, r_cntrl, FEC_RCNTRL_DRT); | ||
335 | FS(fecp, x_cntrl, FEC_TCNTRL_FDEN); /* FD enable */ | ||
336 | } else { | ||
337 | FS(fecp, r_cntrl, FEC_RCNTRL_DRT); | ||
338 | FC(fecp, x_cntrl, FEC_TCNTRL_FDEN); /* FD disable */ | ||
339 | } | ||
340 | |||
341 | /* | ||
342 | * Enable interrupts we wish to service. | ||
343 | */ | ||
344 | FW(fecp, imask, FEC_ENET_TXF | FEC_ENET_TXB | | ||
345 | FEC_ENET_RXF | FEC_ENET_RXB); | ||
346 | |||
347 | /* | ||
348 | * And last, enable the transmit and receive processing. | ||
349 | */ | ||
350 | FW(fecp, ecntrl, FEC_ECNTRL_PINMUX | FEC_ECNTRL_ETHER_EN); | ||
351 | FW(fecp, r_des_active, 0x01000000); | ||
352 | } | ||
353 | |||
354 | static void stop(struct net_device *dev) | ||
355 | { | ||
356 | struct fs_enet_private *fep = netdev_priv(dev); | ||
357 | const struct fs_platform_info *fpi = fep->fpi; | ||
358 | struct fec __iomem *fecp = fep->fec.fecp; | ||
359 | |||
360 | struct fec_info* feci= fep->phydev->bus->priv; | ||
361 | |||
362 | int i; | ||
363 | |||
364 | if ((FR(fecp, ecntrl) & FEC_ECNTRL_ETHER_EN) == 0) | ||
365 | return; /* already down */ | ||
366 | |||
367 | FW(fecp, x_cntrl, 0x01); /* Graceful transmit stop */ | ||
368 | for (i = 0; ((FR(fecp, ievent) & 0x10000000) == 0) && | ||
369 | i < FEC_RESET_DELAY; i++) | ||
370 | udelay(1); | ||
371 | |||
372 | if (i == FEC_RESET_DELAY) | ||
373 | dev_warn(fep->dev, "FEC timeout on graceful transmit stop\n"); | ||
374 | /* | ||
375 | * Disable FEC. Let only MII interrupts. | ||
376 | */ | ||
377 | FW(fecp, imask, 0); | ||
378 | FC(fecp, ecntrl, FEC_ECNTRL_ETHER_EN); | ||
379 | |||
380 | fs_cleanup_bds(dev); | ||
381 | |||
382 | /* shut down FEC1? that's where the mii bus is */ | ||
383 | if (fpi->has_phy) { | ||
384 | FS(fecp, r_cntrl, FEC_RCNTRL_MII_MODE); /* MII enable */ | ||
385 | FS(fecp, ecntrl, FEC_ECNTRL_PINMUX | FEC_ECNTRL_ETHER_EN); | ||
386 | FW(fecp, ievent, FEC_ENET_MII); | ||
387 | FW(fecp, mii_speed, feci->mii_speed); | ||
388 | } | ||
389 | } | ||
390 | |||
391 | static void napi_clear_rx_event(struct net_device *dev) | ||
392 | { | ||
393 | struct fs_enet_private *fep = netdev_priv(dev); | ||
394 | struct fec __iomem *fecp = fep->fec.fecp; | ||
395 | |||
396 | FW(fecp, ievent, FEC_NAPI_RX_EVENT_MSK); | ||
397 | } | ||
398 | |||
399 | static void napi_enable_rx(struct net_device *dev) | ||
400 | { | ||
401 | struct fs_enet_private *fep = netdev_priv(dev); | ||
402 | struct fec __iomem *fecp = fep->fec.fecp; | ||
403 | |||
404 | FS(fecp, imask, FEC_NAPI_RX_EVENT_MSK); | ||
405 | } | ||
406 | |||
407 | static void napi_disable_rx(struct net_device *dev) | ||
408 | { | ||
409 | struct fs_enet_private *fep = netdev_priv(dev); | ||
410 | struct fec __iomem *fecp = fep->fec.fecp; | ||
411 | |||
412 | FC(fecp, imask, FEC_NAPI_RX_EVENT_MSK); | ||
413 | } | ||
414 | |||
415 | static void rx_bd_done(struct net_device *dev) | ||
416 | { | ||
417 | struct fs_enet_private *fep = netdev_priv(dev); | ||
418 | struct fec __iomem *fecp = fep->fec.fecp; | ||
419 | |||
420 | FW(fecp, r_des_active, 0x01000000); | ||
421 | } | ||
422 | |||
423 | static void tx_kickstart(struct net_device *dev) | ||
424 | { | ||
425 | struct fs_enet_private *fep = netdev_priv(dev); | ||
426 | struct fec __iomem *fecp = fep->fec.fecp; | ||
427 | |||
428 | FW(fecp, x_des_active, 0x01000000); | ||
429 | } | ||
430 | |||
431 | static u32 get_int_events(struct net_device *dev) | ||
432 | { | ||
433 | struct fs_enet_private *fep = netdev_priv(dev); | ||
434 | struct fec __iomem *fecp = fep->fec.fecp; | ||
435 | |||
436 | return FR(fecp, ievent) & FR(fecp, imask); | ||
437 | } | ||
438 | |||
439 | static void clear_int_events(struct net_device *dev, u32 int_events) | ||
440 | { | ||
441 | struct fs_enet_private *fep = netdev_priv(dev); | ||
442 | struct fec __iomem *fecp = fep->fec.fecp; | ||
443 | |||
444 | FW(fecp, ievent, int_events); | ||
445 | } | ||
446 | |||
447 | static void ev_error(struct net_device *dev, u32 int_events) | ||
448 | { | ||
449 | struct fs_enet_private *fep = netdev_priv(dev); | ||
450 | |||
451 | dev_warn(fep->dev, "FEC ERROR(s) 0x%x\n", int_events); | ||
452 | } | ||
453 | |||
454 | static int get_regs(struct net_device *dev, void *p, int *sizep) | ||
455 | { | ||
456 | struct fs_enet_private *fep = netdev_priv(dev); | ||
457 | |||
458 | if (*sizep < sizeof(struct fec)) | ||
459 | return -EINVAL; | ||
460 | |||
461 | memcpy_fromio(p, fep->fec.fecp, sizeof(struct fec)); | ||
462 | |||
463 | return 0; | ||
464 | } | ||
465 | |||
466 | static int get_regs_len(struct net_device *dev) | ||
467 | { | ||
468 | return sizeof(struct fec); | ||
469 | } | ||
470 | |||
471 | static void tx_restart(struct net_device *dev) | ||
472 | { | ||
473 | /* nothing */ | ||
474 | } | ||
475 | |||
476 | /*************************************************************************/ | ||
477 | |||
478 | const struct fs_ops fs_fec_ops = { | ||
479 | .setup_data = setup_data, | ||
480 | .cleanup_data = cleanup_data, | ||
481 | .set_multicast_list = set_multicast_list, | ||
482 | .restart = restart, | ||
483 | .stop = stop, | ||
484 | .napi_clear_rx_event = napi_clear_rx_event, | ||
485 | .napi_enable_rx = napi_enable_rx, | ||
486 | .napi_disable_rx = napi_disable_rx, | ||
487 | .rx_bd_done = rx_bd_done, | ||
488 | .tx_kickstart = tx_kickstart, | ||
489 | .get_int_events = get_int_events, | ||
490 | .clear_int_events = clear_int_events, | ||
491 | .ev_error = ev_error, | ||
492 | .get_regs = get_regs, | ||
493 | .get_regs_len = get_regs_len, | ||
494 | .tx_restart = tx_restart, | ||
495 | .allocate_bd = allocate_bd, | ||
496 | .free_bd = free_bd, | ||
497 | }; | ||
498 | |||
diff --git a/drivers/net/ethernet/freescale/fs_enet/mac-scc.c b/drivers/net/ethernet/freescale/fs_enet/mac-scc.c new file mode 100644 index 000000000000..22a02a767069 --- /dev/null +++ b/drivers/net/ethernet/freescale/fs_enet/mac-scc.c | |||
@@ -0,0 +1,484 @@ | |||
1 | /* | ||
2 | * Ethernet on Serial Communications Controller (SCC) driver for Motorola MPC8xx and MPC82xx. | ||
3 | * | ||
4 | * Copyright (c) 2003 Intracom S.A. | ||
5 | * by Pantelis Antoniou <panto@intracom.gr> | ||
6 | * | ||
7 | * 2005 (c) MontaVista Software, Inc. | ||
8 | * Vitaly Bordug <vbordug@ru.mvista.com> | ||
9 | * | ||
10 | * This file is licensed under the terms of the GNU General Public License | ||
11 | * version 2. This program is licensed "as is" without any warranty of any | ||
12 | * kind, whether express or implied. | ||
13 | */ | ||
14 | |||
15 | #include <linux/module.h> | ||
16 | #include <linux/kernel.h> | ||
17 | #include <linux/types.h> | ||
18 | #include <linux/string.h> | ||
19 | #include <linux/ptrace.h> | ||
20 | #include <linux/errno.h> | ||
21 | #include <linux/ioport.h> | ||
22 | #include <linux/interrupt.h> | ||
23 | #include <linux/init.h> | ||
24 | #include <linux/delay.h> | ||
25 | #include <linux/netdevice.h> | ||
26 | #include <linux/etherdevice.h> | ||
27 | #include <linux/skbuff.h> | ||
28 | #include <linux/spinlock.h> | ||
29 | #include <linux/mii.h> | ||
30 | #include <linux/ethtool.h> | ||
31 | #include <linux/bitops.h> | ||
32 | #include <linux/fs.h> | ||
33 | #include <linux/platform_device.h> | ||
34 | #include <linux/of_platform.h> | ||
35 | |||
36 | #include <asm/irq.h> | ||
37 | #include <asm/uaccess.h> | ||
38 | |||
39 | #ifdef CONFIG_8xx | ||
40 | #include <asm/8xx_immap.h> | ||
41 | #include <asm/pgtable.h> | ||
42 | #include <asm/mpc8xx.h> | ||
43 | #include <asm/cpm1.h> | ||
44 | #endif | ||
45 | |||
46 | #include "fs_enet.h" | ||
47 | |||
48 | /*************************************************/ | ||
49 | #if defined(CONFIG_CPM1) | ||
50 | /* for a 8xx __raw_xxx's are sufficient */ | ||
51 | #define __fs_out32(addr, x) __raw_writel(x, addr) | ||
52 | #define __fs_out16(addr, x) __raw_writew(x, addr) | ||
53 | #define __fs_out8(addr, x) __raw_writeb(x, addr) | ||
54 | #define __fs_in32(addr) __raw_readl(addr) | ||
55 | #define __fs_in16(addr) __raw_readw(addr) | ||
56 | #define __fs_in8(addr) __raw_readb(addr) | ||
57 | #else | ||
58 | /* for others play it safe */ | ||
59 | #define __fs_out32(addr, x) out_be32(addr, x) | ||
60 | #define __fs_out16(addr, x) out_be16(addr, x) | ||
61 | #define __fs_in32(addr) in_be32(addr) | ||
62 | #define __fs_in16(addr) in_be16(addr) | ||
63 | #define __fs_out8(addr, x) out_8(addr, x) | ||
64 | #define __fs_in8(addr) in_8(addr) | ||
65 | #endif | ||
66 | |||
67 | /* write, read, set bits, clear bits */ | ||
68 | #define W32(_p, _m, _v) __fs_out32(&(_p)->_m, (_v)) | ||
69 | #define R32(_p, _m) __fs_in32(&(_p)->_m) | ||
70 | #define S32(_p, _m, _v) W32(_p, _m, R32(_p, _m) | (_v)) | ||
71 | #define C32(_p, _m, _v) W32(_p, _m, R32(_p, _m) & ~(_v)) | ||
72 | |||
73 | #define W16(_p, _m, _v) __fs_out16(&(_p)->_m, (_v)) | ||
74 | #define R16(_p, _m) __fs_in16(&(_p)->_m) | ||
75 | #define S16(_p, _m, _v) W16(_p, _m, R16(_p, _m) | (_v)) | ||
76 | #define C16(_p, _m, _v) W16(_p, _m, R16(_p, _m) & ~(_v)) | ||
77 | |||
78 | #define W8(_p, _m, _v) __fs_out8(&(_p)->_m, (_v)) | ||
79 | #define R8(_p, _m) __fs_in8(&(_p)->_m) | ||
80 | #define S8(_p, _m, _v) W8(_p, _m, R8(_p, _m) | (_v)) | ||
81 | #define C8(_p, _m, _v) W8(_p, _m, R8(_p, _m) & ~(_v)) | ||
82 | |||
83 | #define SCC_MAX_MULTICAST_ADDRS 64 | ||
84 | |||
85 | /* | ||
86 | * Delay to wait for SCC reset command to complete (in us) | ||
87 | */ | ||
88 | #define SCC_RESET_DELAY 50 | ||
89 | |||
90 | static inline int scc_cr_cmd(struct fs_enet_private *fep, u32 op) | ||
91 | { | ||
92 | const struct fs_platform_info *fpi = fep->fpi; | ||
93 | |||
94 | return cpm_command(fpi->cp_command, op); | ||
95 | } | ||
96 | |||
97 | static int do_pd_setup(struct fs_enet_private *fep) | ||
98 | { | ||
99 | struct platform_device *ofdev = to_platform_device(fep->dev); | ||
100 | |||
101 | fep->interrupt = of_irq_to_resource(ofdev->dev.of_node, 0, NULL); | ||
102 | if (fep->interrupt == NO_IRQ) | ||
103 | return -EINVAL; | ||
104 | |||
105 | fep->scc.sccp = of_iomap(ofdev->dev.of_node, 0); | ||
106 | if (!fep->scc.sccp) | ||
107 | return -EINVAL; | ||
108 | |||
109 | fep->scc.ep = of_iomap(ofdev->dev.of_node, 1); | ||
110 | if (!fep->scc.ep) { | ||
111 | iounmap(fep->scc.sccp); | ||
112 | return -EINVAL; | ||
113 | } | ||
114 | |||
115 | return 0; | ||
116 | } | ||
117 | |||
118 | #define SCC_NAPI_RX_EVENT_MSK (SCCE_ENET_RXF | SCCE_ENET_RXB) | ||
119 | #define SCC_RX_EVENT (SCCE_ENET_RXF) | ||
120 | #define SCC_TX_EVENT (SCCE_ENET_TXB) | ||
121 | #define SCC_ERR_EVENT_MSK (SCCE_ENET_TXE | SCCE_ENET_BSY) | ||
122 | |||
123 | static int setup_data(struct net_device *dev) | ||
124 | { | ||
125 | struct fs_enet_private *fep = netdev_priv(dev); | ||
126 | |||
127 | do_pd_setup(fep); | ||
128 | |||
129 | fep->scc.hthi = 0; | ||
130 | fep->scc.htlo = 0; | ||
131 | |||
132 | fep->ev_napi_rx = SCC_NAPI_RX_EVENT_MSK; | ||
133 | fep->ev_rx = SCC_RX_EVENT; | ||
134 | fep->ev_tx = SCC_TX_EVENT | SCCE_ENET_TXE; | ||
135 | fep->ev_err = SCC_ERR_EVENT_MSK; | ||
136 | |||
137 | return 0; | ||
138 | } | ||
139 | |||
140 | static int allocate_bd(struct net_device *dev) | ||
141 | { | ||
142 | struct fs_enet_private *fep = netdev_priv(dev); | ||
143 | const struct fs_platform_info *fpi = fep->fpi; | ||
144 | |||
145 | fep->ring_mem_addr = cpm_dpalloc((fpi->tx_ring + fpi->rx_ring) * | ||
146 | sizeof(cbd_t), 8); | ||
147 | if (IS_ERR_VALUE(fep->ring_mem_addr)) | ||
148 | return -ENOMEM; | ||
149 | |||
150 | fep->ring_base = (void __iomem __force*) | ||
151 | cpm_dpram_addr(fep->ring_mem_addr); | ||
152 | |||
153 | return 0; | ||
154 | } | ||
155 | |||
156 | static void free_bd(struct net_device *dev) | ||
157 | { | ||
158 | struct fs_enet_private *fep = netdev_priv(dev); | ||
159 | |||
160 | if (fep->ring_base) | ||
161 | cpm_dpfree(fep->ring_mem_addr); | ||
162 | } | ||
163 | |||
164 | static void cleanup_data(struct net_device *dev) | ||
165 | { | ||
166 | /* nothing */ | ||
167 | } | ||
168 | |||
169 | static void set_promiscuous_mode(struct net_device *dev) | ||
170 | { | ||
171 | struct fs_enet_private *fep = netdev_priv(dev); | ||
172 | scc_t __iomem *sccp = fep->scc.sccp; | ||
173 | |||
174 | S16(sccp, scc_psmr, SCC_PSMR_PRO); | ||
175 | } | ||
176 | |||
177 | static void set_multicast_start(struct net_device *dev) | ||
178 | { | ||
179 | struct fs_enet_private *fep = netdev_priv(dev); | ||
180 | scc_enet_t __iomem *ep = fep->scc.ep; | ||
181 | |||
182 | W16(ep, sen_gaddr1, 0); | ||
183 | W16(ep, sen_gaddr2, 0); | ||
184 | W16(ep, sen_gaddr3, 0); | ||
185 | W16(ep, sen_gaddr4, 0); | ||
186 | } | ||
187 | |||
188 | static void set_multicast_one(struct net_device *dev, const u8 * mac) | ||
189 | { | ||
190 | struct fs_enet_private *fep = netdev_priv(dev); | ||
191 | scc_enet_t __iomem *ep = fep->scc.ep; | ||
192 | u16 taddrh, taddrm, taddrl; | ||
193 | |||
194 | taddrh = ((u16) mac[5] << 8) | mac[4]; | ||
195 | taddrm = ((u16) mac[3] << 8) | mac[2]; | ||
196 | taddrl = ((u16) mac[1] << 8) | mac[0]; | ||
197 | |||
198 | W16(ep, sen_taddrh, taddrh); | ||
199 | W16(ep, sen_taddrm, taddrm); | ||
200 | W16(ep, sen_taddrl, taddrl); | ||
201 | scc_cr_cmd(fep, CPM_CR_SET_GADDR); | ||
202 | } | ||
203 | |||
204 | static void set_multicast_finish(struct net_device *dev) | ||
205 | { | ||
206 | struct fs_enet_private *fep = netdev_priv(dev); | ||
207 | scc_t __iomem *sccp = fep->scc.sccp; | ||
208 | scc_enet_t __iomem *ep = fep->scc.ep; | ||
209 | |||
210 | /* clear promiscuous always */ | ||
211 | C16(sccp, scc_psmr, SCC_PSMR_PRO); | ||
212 | |||
213 | /* if all multi or too many multicasts; just enable all */ | ||
214 | if ((dev->flags & IFF_ALLMULTI) != 0 || | ||
215 | netdev_mc_count(dev) > SCC_MAX_MULTICAST_ADDRS) { | ||
216 | |||
217 | W16(ep, sen_gaddr1, 0xffff); | ||
218 | W16(ep, sen_gaddr2, 0xffff); | ||
219 | W16(ep, sen_gaddr3, 0xffff); | ||
220 | W16(ep, sen_gaddr4, 0xffff); | ||
221 | } | ||
222 | } | ||
223 | |||
224 | static void set_multicast_list(struct net_device *dev) | ||
225 | { | ||
226 | struct netdev_hw_addr *ha; | ||
227 | |||
228 | if ((dev->flags & IFF_PROMISC) == 0) { | ||
229 | set_multicast_start(dev); | ||
230 | netdev_for_each_mc_addr(ha, dev) | ||
231 | set_multicast_one(dev, ha->addr); | ||
232 | set_multicast_finish(dev); | ||
233 | } else | ||
234 | set_promiscuous_mode(dev); | ||
235 | } | ||
236 | |||
237 | /* | ||
238 | * This function is called to start or restart the FEC during a link | ||
239 | * change. This only happens when switching between half and full | ||
240 | * duplex. | ||
241 | */ | ||
242 | static void restart(struct net_device *dev) | ||
243 | { | ||
244 | struct fs_enet_private *fep = netdev_priv(dev); | ||
245 | scc_t __iomem *sccp = fep->scc.sccp; | ||
246 | scc_enet_t __iomem *ep = fep->scc.ep; | ||
247 | const struct fs_platform_info *fpi = fep->fpi; | ||
248 | u16 paddrh, paddrm, paddrl; | ||
249 | const unsigned char *mac; | ||
250 | int i; | ||
251 | |||
252 | C32(sccp, scc_gsmrl, SCC_GSMRL_ENR | SCC_GSMRL_ENT); | ||
253 | |||
254 | /* clear everything (slow & steady does it) */ | ||
255 | for (i = 0; i < sizeof(*ep); i++) | ||
256 | __fs_out8((u8 __iomem *)ep + i, 0); | ||
257 | |||
258 | /* point to bds */ | ||
259 | W16(ep, sen_genscc.scc_rbase, fep->ring_mem_addr); | ||
260 | W16(ep, sen_genscc.scc_tbase, | ||
261 | fep->ring_mem_addr + sizeof(cbd_t) * fpi->rx_ring); | ||
262 | |||
263 | /* Initialize function code registers for big-endian. | ||
264 | */ | ||
265 | #ifndef CONFIG_NOT_COHERENT_CACHE | ||
266 | W8(ep, sen_genscc.scc_rfcr, SCC_EB | SCC_GBL); | ||
267 | W8(ep, sen_genscc.scc_tfcr, SCC_EB | SCC_GBL); | ||
268 | #else | ||
269 | W8(ep, sen_genscc.scc_rfcr, SCC_EB); | ||
270 | W8(ep, sen_genscc.scc_tfcr, SCC_EB); | ||
271 | #endif | ||
272 | |||
273 | /* Set maximum bytes per receive buffer. | ||
274 | * This appears to be an Ethernet frame size, not the buffer | ||
275 | * fragment size. It must be a multiple of four. | ||
276 | */ | ||
277 | W16(ep, sen_genscc.scc_mrblr, 0x5f0); | ||
278 | |||
279 | /* Set CRC preset and mask. | ||
280 | */ | ||
281 | W32(ep, sen_cpres, 0xffffffff); | ||
282 | W32(ep, sen_cmask, 0xdebb20e3); | ||
283 | |||
284 | W32(ep, sen_crcec, 0); /* CRC Error counter */ | ||
285 | W32(ep, sen_alec, 0); /* alignment error counter */ | ||
286 | W32(ep, sen_disfc, 0); /* discard frame counter */ | ||
287 | |||
288 | W16(ep, sen_pads, 0x8888); /* Tx short frame pad character */ | ||
289 | W16(ep, sen_retlim, 15); /* Retry limit threshold */ | ||
290 | |||
291 | W16(ep, sen_maxflr, 0x5ee); /* maximum frame length register */ | ||
292 | |||
293 | W16(ep, sen_minflr, PKT_MINBUF_SIZE); /* minimum frame length register */ | ||
294 | |||
295 | W16(ep, sen_maxd1, 0x000005f0); /* maximum DMA1 length */ | ||
296 | W16(ep, sen_maxd2, 0x000005f0); /* maximum DMA2 length */ | ||
297 | |||
298 | /* Clear hash tables. | ||
299 | */ | ||
300 | W16(ep, sen_gaddr1, 0); | ||
301 | W16(ep, sen_gaddr2, 0); | ||
302 | W16(ep, sen_gaddr3, 0); | ||
303 | W16(ep, sen_gaddr4, 0); | ||
304 | W16(ep, sen_iaddr1, 0); | ||
305 | W16(ep, sen_iaddr2, 0); | ||
306 | W16(ep, sen_iaddr3, 0); | ||
307 | W16(ep, sen_iaddr4, 0); | ||
308 | |||
309 | /* set address | ||
310 | */ | ||
311 | mac = dev->dev_addr; | ||
312 | paddrh = ((u16) mac[5] << 8) | mac[4]; | ||
313 | paddrm = ((u16) mac[3] << 8) | mac[2]; | ||
314 | paddrl = ((u16) mac[1] << 8) | mac[0]; | ||
315 | |||
316 | W16(ep, sen_paddrh, paddrh); | ||
317 | W16(ep, sen_paddrm, paddrm); | ||
318 | W16(ep, sen_paddrl, paddrl); | ||
319 | |||
320 | W16(ep, sen_pper, 0); | ||
321 | W16(ep, sen_taddrl, 0); | ||
322 | W16(ep, sen_taddrm, 0); | ||
323 | W16(ep, sen_taddrh, 0); | ||
324 | |||
325 | fs_init_bds(dev); | ||
326 | |||
327 | scc_cr_cmd(fep, CPM_CR_INIT_TRX); | ||
328 | |||
329 | W16(sccp, scc_scce, 0xffff); | ||
330 | |||
331 | /* Enable interrupts we wish to service. | ||
332 | */ | ||
333 | W16(sccp, scc_sccm, SCCE_ENET_TXE | SCCE_ENET_RXF | SCCE_ENET_TXB); | ||
334 | |||
335 | /* Set GSMR_H to enable all normal operating modes. | ||
336 | * Set GSMR_L to enable Ethernet to MC68160. | ||
337 | */ | ||
338 | W32(sccp, scc_gsmrh, 0); | ||
339 | W32(sccp, scc_gsmrl, | ||
340 | SCC_GSMRL_TCI | SCC_GSMRL_TPL_48 | SCC_GSMRL_TPP_10 | | ||
341 | SCC_GSMRL_MODE_ENET); | ||
342 | |||
343 | /* Set sync/delimiters. | ||
344 | */ | ||
345 | W16(sccp, scc_dsr, 0xd555); | ||
346 | |||
347 | /* Set processing mode. Use Ethernet CRC, catch broadcast, and | ||
348 | * start frame search 22 bit times after RENA. | ||
349 | */ | ||
350 | W16(sccp, scc_psmr, SCC_PSMR_ENCRC | SCC_PSMR_NIB22); | ||
351 | |||
352 | /* Set full duplex mode if needed */ | ||
353 | if (fep->phydev->duplex) | ||
354 | S16(sccp, scc_psmr, SCC_PSMR_LPB | SCC_PSMR_FDE); | ||
355 | |||
356 | S32(sccp, scc_gsmrl, SCC_GSMRL_ENR | SCC_GSMRL_ENT); | ||
357 | } | ||
358 | |||
359 | static void stop(struct net_device *dev) | ||
360 | { | ||
361 | struct fs_enet_private *fep = netdev_priv(dev); | ||
362 | scc_t __iomem *sccp = fep->scc.sccp; | ||
363 | int i; | ||
364 | |||
365 | for (i = 0; (R16(sccp, scc_sccm) == 0) && i < SCC_RESET_DELAY; i++) | ||
366 | udelay(1); | ||
367 | |||
368 | if (i == SCC_RESET_DELAY) | ||
369 | dev_warn(fep->dev, "SCC timeout on graceful transmit stop\n"); | ||
370 | |||
371 | W16(sccp, scc_sccm, 0); | ||
372 | C32(sccp, scc_gsmrl, SCC_GSMRL_ENR | SCC_GSMRL_ENT); | ||
373 | |||
374 | fs_cleanup_bds(dev); | ||
375 | } | ||
376 | |||
377 | static void napi_clear_rx_event(struct net_device *dev) | ||
378 | { | ||
379 | struct fs_enet_private *fep = netdev_priv(dev); | ||
380 | scc_t __iomem *sccp = fep->scc.sccp; | ||
381 | |||
382 | W16(sccp, scc_scce, SCC_NAPI_RX_EVENT_MSK); | ||
383 | } | ||
384 | |||
385 | static void napi_enable_rx(struct net_device *dev) | ||
386 | { | ||
387 | struct fs_enet_private *fep = netdev_priv(dev); | ||
388 | scc_t __iomem *sccp = fep->scc.sccp; | ||
389 | |||
390 | S16(sccp, scc_sccm, SCC_NAPI_RX_EVENT_MSK); | ||
391 | } | ||
392 | |||
393 | static void napi_disable_rx(struct net_device *dev) | ||
394 | { | ||
395 | struct fs_enet_private *fep = netdev_priv(dev); | ||
396 | scc_t __iomem *sccp = fep->scc.sccp; | ||
397 | |||
398 | C16(sccp, scc_sccm, SCC_NAPI_RX_EVENT_MSK); | ||
399 | } | ||
400 | |||
401 | static void rx_bd_done(struct net_device *dev) | ||
402 | { | ||
403 | /* nothing */ | ||
404 | } | ||
405 | |||
406 | static void tx_kickstart(struct net_device *dev) | ||
407 | { | ||
408 | /* nothing */ | ||
409 | } | ||
410 | |||
411 | static u32 get_int_events(struct net_device *dev) | ||
412 | { | ||
413 | struct fs_enet_private *fep = netdev_priv(dev); | ||
414 | scc_t __iomem *sccp = fep->scc.sccp; | ||
415 | |||
416 | return (u32) R16(sccp, scc_scce); | ||
417 | } | ||
418 | |||
419 | static void clear_int_events(struct net_device *dev, u32 int_events) | ||
420 | { | ||
421 | struct fs_enet_private *fep = netdev_priv(dev); | ||
422 | scc_t __iomem *sccp = fep->scc.sccp; | ||
423 | |||
424 | W16(sccp, scc_scce, int_events & 0xffff); | ||
425 | } | ||
426 | |||
427 | static void ev_error(struct net_device *dev, u32 int_events) | ||
428 | { | ||
429 | struct fs_enet_private *fep = netdev_priv(dev); | ||
430 | |||
431 | dev_warn(fep->dev, "SCC ERROR(s) 0x%x\n", int_events); | ||
432 | } | ||
433 | |||
434 | static int get_regs(struct net_device *dev, void *p, int *sizep) | ||
435 | { | ||
436 | struct fs_enet_private *fep = netdev_priv(dev); | ||
437 | |||
438 | if (*sizep < sizeof(scc_t) + sizeof(scc_enet_t __iomem *)) | ||
439 | return -EINVAL; | ||
440 | |||
441 | memcpy_fromio(p, fep->scc.sccp, sizeof(scc_t)); | ||
442 | p = (char *)p + sizeof(scc_t); | ||
443 | |||
444 | memcpy_fromio(p, fep->scc.ep, sizeof(scc_enet_t __iomem *)); | ||
445 | |||
446 | return 0; | ||
447 | } | ||
448 | |||
449 | static int get_regs_len(struct net_device *dev) | ||
450 | { | ||
451 | return sizeof(scc_t) + sizeof(scc_enet_t __iomem *); | ||
452 | } | ||
453 | |||
454 | static void tx_restart(struct net_device *dev) | ||
455 | { | ||
456 | struct fs_enet_private *fep = netdev_priv(dev); | ||
457 | |||
458 | scc_cr_cmd(fep, CPM_CR_RESTART_TX); | ||
459 | } | ||
460 | |||
461 | |||
462 | |||
463 | /*************************************************************************/ | ||
464 | |||
465 | const struct fs_ops fs_scc_ops = { | ||
466 | .setup_data = setup_data, | ||
467 | .cleanup_data = cleanup_data, | ||
468 | .set_multicast_list = set_multicast_list, | ||
469 | .restart = restart, | ||
470 | .stop = stop, | ||
471 | .napi_clear_rx_event = napi_clear_rx_event, | ||
472 | .napi_enable_rx = napi_enable_rx, | ||
473 | .napi_disable_rx = napi_disable_rx, | ||
474 | .rx_bd_done = rx_bd_done, | ||
475 | .tx_kickstart = tx_kickstart, | ||
476 | .get_int_events = get_int_events, | ||
477 | .clear_int_events = clear_int_events, | ||
478 | .ev_error = ev_error, | ||
479 | .get_regs = get_regs, | ||
480 | .get_regs_len = get_regs_len, | ||
481 | .tx_restart = tx_restart, | ||
482 | .allocate_bd = allocate_bd, | ||
483 | .free_bd = free_bd, | ||
484 | }; | ||
diff --git a/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c b/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c new file mode 100644 index 000000000000..b09270b5d0a5 --- /dev/null +++ b/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c | |||
@@ -0,0 +1,246 @@ | |||
1 | /* | ||
2 | * Combined Ethernet driver for Motorola MPC8xx and MPC82xx. | ||
3 | * | ||
4 | * Copyright (c) 2003 Intracom S.A. | ||
5 | * by Pantelis Antoniou <panto@intracom.gr> | ||
6 | * | ||
7 | * 2005 (c) MontaVista Software, Inc. | ||
8 | * Vitaly Bordug <vbordug@ru.mvista.com> | ||
9 | * | ||
10 | * This file is licensed under the terms of the GNU General Public License | ||
11 | * version 2. This program is licensed "as is" without any warranty of any | ||
12 | * kind, whether express or implied. | ||
13 | */ | ||
14 | |||
15 | #include <linux/module.h> | ||
16 | #include <linux/ioport.h> | ||
17 | #include <linux/slab.h> | ||
18 | #include <linux/init.h> | ||
19 | #include <linux/interrupt.h> | ||
20 | #include <linux/netdevice.h> | ||
21 | #include <linux/etherdevice.h> | ||
22 | #include <linux/mii.h> | ||
23 | #include <linux/platform_device.h> | ||
24 | #include <linux/mdio-bitbang.h> | ||
25 | #include <linux/of_mdio.h> | ||
26 | #include <linux/of_platform.h> | ||
27 | |||
28 | #include "fs_enet.h" | ||
29 | |||
30 | struct bb_info { | ||
31 | struct mdiobb_ctrl ctrl; | ||
32 | __be32 __iomem *dir; | ||
33 | __be32 __iomem *dat; | ||
34 | u32 mdio_msk; | ||
35 | u32 mdc_msk; | ||
36 | }; | ||
37 | |||
38 | /* FIXME: If any other users of GPIO crop up, then these will have to | ||
39 | * have some sort of global synchronization to avoid races with other | ||
40 | * pins on the same port. The ideal solution would probably be to | ||
41 | * bind the ports to a GPIO driver, and have this be a client of it. | ||
42 | */ | ||
43 | static inline void bb_set(u32 __iomem *p, u32 m) | ||
44 | { | ||
45 | out_be32(p, in_be32(p) | m); | ||
46 | } | ||
47 | |||
48 | static inline void bb_clr(u32 __iomem *p, u32 m) | ||
49 | { | ||
50 | out_be32(p, in_be32(p) & ~m); | ||
51 | } | ||
52 | |||
53 | static inline int bb_read(u32 __iomem *p, u32 m) | ||
54 | { | ||
55 | return (in_be32(p) & m) != 0; | ||
56 | } | ||
57 | |||
58 | static inline void mdio_dir(struct mdiobb_ctrl *ctrl, int dir) | ||
59 | { | ||
60 | struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl); | ||
61 | |||
62 | if (dir) | ||
63 | bb_set(bitbang->dir, bitbang->mdio_msk); | ||
64 | else | ||
65 | bb_clr(bitbang->dir, bitbang->mdio_msk); | ||
66 | |||
67 | /* Read back to flush the write. */ | ||
68 | in_be32(bitbang->dir); | ||
69 | } | ||
70 | |||
71 | static inline int mdio_read(struct mdiobb_ctrl *ctrl) | ||
72 | { | ||
73 | struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl); | ||
74 | return bb_read(bitbang->dat, bitbang->mdio_msk); | ||
75 | } | ||
76 | |||
77 | static inline void mdio(struct mdiobb_ctrl *ctrl, int what) | ||
78 | { | ||
79 | struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl); | ||
80 | |||
81 | if (what) | ||
82 | bb_set(bitbang->dat, bitbang->mdio_msk); | ||
83 | else | ||
84 | bb_clr(bitbang->dat, bitbang->mdio_msk); | ||
85 | |||
86 | /* Read back to flush the write. */ | ||
87 | in_be32(bitbang->dat); | ||
88 | } | ||
89 | |||
90 | static inline void mdc(struct mdiobb_ctrl *ctrl, int what) | ||
91 | { | ||
92 | struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl); | ||
93 | |||
94 | if (what) | ||
95 | bb_set(bitbang->dat, bitbang->mdc_msk); | ||
96 | else | ||
97 | bb_clr(bitbang->dat, bitbang->mdc_msk); | ||
98 | |||
99 | /* Read back to flush the write. */ | ||
100 | in_be32(bitbang->dat); | ||
101 | } | ||
102 | |||
103 | static struct mdiobb_ops bb_ops = { | ||
104 | .owner = THIS_MODULE, | ||
105 | .set_mdc = mdc, | ||
106 | .set_mdio_dir = mdio_dir, | ||
107 | .set_mdio_data = mdio, | ||
108 | .get_mdio_data = mdio_read, | ||
109 | }; | ||
110 | |||
111 | static int __devinit fs_mii_bitbang_init(struct mii_bus *bus, | ||
112 | struct device_node *np) | ||
113 | { | ||
114 | struct resource res; | ||
115 | const u32 *data; | ||
116 | int mdio_pin, mdc_pin, len; | ||
117 | struct bb_info *bitbang = bus->priv; | ||
118 | |||
119 | int ret = of_address_to_resource(np, 0, &res); | ||
120 | if (ret) | ||
121 | return ret; | ||
122 | |||
123 | if (resource_size(&res) <= 13) | ||
124 | return -ENODEV; | ||
125 | |||
126 | /* This should really encode the pin number as well, but all | ||
127 | * we get is an int, and the odds of multiple bitbang mdio buses | ||
128 | * is low enough that it's not worth going too crazy. | ||
129 | */ | ||
130 | snprintf(bus->id, MII_BUS_ID_SIZE, "%x", res.start); | ||
131 | |||
132 | data = of_get_property(np, "fsl,mdio-pin", &len); | ||
133 | if (!data || len != 4) | ||
134 | return -ENODEV; | ||
135 | mdio_pin = *data; | ||
136 | |||
137 | data = of_get_property(np, "fsl,mdc-pin", &len); | ||
138 | if (!data || len != 4) | ||
139 | return -ENODEV; | ||
140 | mdc_pin = *data; | ||
141 | |||
142 | bitbang->dir = ioremap(res.start, resource_size(&res)); | ||
143 | if (!bitbang->dir) | ||
144 | return -ENOMEM; | ||
145 | |||
146 | bitbang->dat = bitbang->dir + 4; | ||
147 | bitbang->mdio_msk = 1 << (31 - mdio_pin); | ||
148 | bitbang->mdc_msk = 1 << (31 - mdc_pin); | ||
149 | |||
150 | return 0; | ||
151 | } | ||
152 | |||
153 | static int __devinit fs_enet_mdio_probe(struct platform_device *ofdev) | ||
154 | { | ||
155 | struct mii_bus *new_bus; | ||
156 | struct bb_info *bitbang; | ||
157 | int ret = -ENOMEM; | ||
158 | |||
159 | bitbang = kzalloc(sizeof(struct bb_info), GFP_KERNEL); | ||
160 | if (!bitbang) | ||
161 | goto out; | ||
162 | |||
163 | bitbang->ctrl.ops = &bb_ops; | ||
164 | |||
165 | new_bus = alloc_mdio_bitbang(&bitbang->ctrl); | ||
166 | if (!new_bus) | ||
167 | goto out_free_priv; | ||
168 | |||
169 | new_bus->name = "CPM2 Bitbanged MII", | ||
170 | |||
171 | ret = fs_mii_bitbang_init(new_bus, ofdev->dev.of_node); | ||
172 | if (ret) | ||
173 | goto out_free_bus; | ||
174 | |||
175 | new_bus->phy_mask = ~0; | ||
176 | new_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL); | ||
177 | if (!new_bus->irq) | ||
178 | goto out_unmap_regs; | ||
179 | |||
180 | new_bus->parent = &ofdev->dev; | ||
181 | dev_set_drvdata(&ofdev->dev, new_bus); | ||
182 | |||
183 | ret = of_mdiobus_register(new_bus, ofdev->dev.of_node); | ||
184 | if (ret) | ||
185 | goto out_free_irqs; | ||
186 | |||
187 | return 0; | ||
188 | |||
189 | out_free_irqs: | ||
190 | dev_set_drvdata(&ofdev->dev, NULL); | ||
191 | kfree(new_bus->irq); | ||
192 | out_unmap_regs: | ||
193 | iounmap(bitbang->dir); | ||
194 | out_free_bus: | ||
195 | free_mdio_bitbang(new_bus); | ||
196 | out_free_priv: | ||
197 | kfree(bitbang); | ||
198 | out: | ||
199 | return ret; | ||
200 | } | ||
201 | |||
202 | static int fs_enet_mdio_remove(struct platform_device *ofdev) | ||
203 | { | ||
204 | struct mii_bus *bus = dev_get_drvdata(&ofdev->dev); | ||
205 | struct bb_info *bitbang = bus->priv; | ||
206 | |||
207 | mdiobus_unregister(bus); | ||
208 | dev_set_drvdata(&ofdev->dev, NULL); | ||
209 | kfree(bus->irq); | ||
210 | free_mdio_bitbang(bus); | ||
211 | iounmap(bitbang->dir); | ||
212 | kfree(bitbang); | ||
213 | |||
214 | return 0; | ||
215 | } | ||
216 | |||
217 | static struct of_device_id fs_enet_mdio_bb_match[] = { | ||
218 | { | ||
219 | .compatible = "fsl,cpm2-mdio-bitbang", | ||
220 | }, | ||
221 | {}, | ||
222 | }; | ||
223 | MODULE_DEVICE_TABLE(of, fs_enet_mdio_bb_match); | ||
224 | |||
225 | static struct platform_driver fs_enet_bb_mdio_driver = { | ||
226 | .driver = { | ||
227 | .name = "fsl-bb-mdio", | ||
228 | .owner = THIS_MODULE, | ||
229 | .of_match_table = fs_enet_mdio_bb_match, | ||
230 | }, | ||
231 | .probe = fs_enet_mdio_probe, | ||
232 | .remove = fs_enet_mdio_remove, | ||
233 | }; | ||
234 | |||
235 | static int fs_enet_mdio_bb_init(void) | ||
236 | { | ||
237 | return platform_driver_register(&fs_enet_bb_mdio_driver); | ||
238 | } | ||
239 | |||
240 | static void fs_enet_mdio_bb_exit(void) | ||
241 | { | ||
242 | platform_driver_unregister(&fs_enet_bb_mdio_driver); | ||
243 | } | ||
244 | |||
245 | module_init(fs_enet_mdio_bb_init); | ||
246 | module_exit(fs_enet_mdio_bb_exit); | ||
diff --git a/drivers/net/ethernet/freescale/fs_enet/mii-fec.c b/drivers/net/ethernet/freescale/fs_enet/mii-fec.c new file mode 100644 index 000000000000..e0e9d6c35d83 --- /dev/null +++ b/drivers/net/ethernet/freescale/fs_enet/mii-fec.c | |||
@@ -0,0 +1,251 @@ | |||
1 | /* | ||
2 | * Combined Ethernet driver for Motorola MPC8xx and MPC82xx. | ||
3 | * | ||
4 | * Copyright (c) 2003 Intracom S.A. | ||
5 | * by Pantelis Antoniou <panto@intracom.gr> | ||
6 | * | ||
7 | * 2005 (c) MontaVista Software, Inc. | ||
8 | * Vitaly Bordug <vbordug@ru.mvista.com> | ||
9 | * | ||
10 | * This file is licensed under the terms of the GNU General Public License | ||
11 | * version 2. This program is licensed "as is" without any warranty of any | ||
12 | * kind, whether express or implied. | ||
13 | */ | ||
14 | |||
15 | #include <linux/module.h> | ||
16 | #include <linux/types.h> | ||
17 | #include <linux/kernel.h> | ||
18 | #include <linux/string.h> | ||
19 | #include <linux/ptrace.h> | ||
20 | #include <linux/errno.h> | ||
21 | #include <linux/ioport.h> | ||
22 | #include <linux/slab.h> | ||
23 | #include <linux/interrupt.h> | ||
24 | #include <linux/init.h> | ||
25 | #include <linux/delay.h> | ||
26 | #include <linux/netdevice.h> | ||
27 | #include <linux/etherdevice.h> | ||
28 | #include <linux/skbuff.h> | ||
29 | #include <linux/spinlock.h> | ||
30 | #include <linux/mii.h> | ||
31 | #include <linux/ethtool.h> | ||
32 | #include <linux/bitops.h> | ||
33 | #include <linux/platform_device.h> | ||
34 | #include <linux/of_platform.h> | ||
35 | |||
36 | #include <asm/pgtable.h> | ||
37 | #include <asm/irq.h> | ||
38 | #include <asm/uaccess.h> | ||
39 | #include <asm/mpc5xxx.h> | ||
40 | |||
41 | #include "fs_enet.h" | ||
42 | #include "fec.h" | ||
43 | |||
44 | /* Make MII read/write commands for the FEC. | ||
45 | */ | ||
46 | #define mk_mii_read(REG) (0x60020000 | ((REG & 0x1f) << 18)) | ||
47 | #define mk_mii_write(REG, VAL) (0x50020000 | ((REG & 0x1f) << 18) | (VAL & 0xffff)) | ||
48 | #define mk_mii_end 0 | ||
49 | |||
50 | #define FEC_MII_LOOPS 10000 | ||
51 | |||
52 | static int fs_enet_fec_mii_read(struct mii_bus *bus , int phy_id, int location) | ||
53 | { | ||
54 | struct fec_info* fec = bus->priv; | ||
55 | struct fec __iomem *fecp = fec->fecp; | ||
56 | int i, ret = -1; | ||
57 | |||
58 | BUG_ON((in_be32(&fecp->fec_r_cntrl) & FEC_RCNTRL_MII_MODE) == 0); | ||
59 | |||
60 | /* Add PHY address to register command. */ | ||
61 | out_be32(&fecp->fec_mii_data, (phy_id << 23) | mk_mii_read(location)); | ||
62 | |||
63 | for (i = 0; i < FEC_MII_LOOPS; i++) | ||
64 | if ((in_be32(&fecp->fec_ievent) & FEC_ENET_MII) != 0) | ||
65 | break; | ||
66 | |||
67 | if (i < FEC_MII_LOOPS) { | ||
68 | out_be32(&fecp->fec_ievent, FEC_ENET_MII); | ||
69 | ret = in_be32(&fecp->fec_mii_data) & 0xffff; | ||
70 | } | ||
71 | |||
72 | return ret; | ||
73 | } | ||
74 | |||
75 | static int fs_enet_fec_mii_write(struct mii_bus *bus, int phy_id, int location, u16 val) | ||
76 | { | ||
77 | struct fec_info* fec = bus->priv; | ||
78 | struct fec __iomem *fecp = fec->fecp; | ||
79 | int i; | ||
80 | |||
81 | /* this must never happen */ | ||
82 | BUG_ON((in_be32(&fecp->fec_r_cntrl) & FEC_RCNTRL_MII_MODE) == 0); | ||
83 | |||
84 | /* Add PHY address to register command. */ | ||
85 | out_be32(&fecp->fec_mii_data, (phy_id << 23) | mk_mii_write(location, val)); | ||
86 | |||
87 | for (i = 0; i < FEC_MII_LOOPS; i++) | ||
88 | if ((in_be32(&fecp->fec_ievent) & FEC_ENET_MII) != 0) | ||
89 | break; | ||
90 | |||
91 | if (i < FEC_MII_LOOPS) | ||
92 | out_be32(&fecp->fec_ievent, FEC_ENET_MII); | ||
93 | |||
94 | return 0; | ||
95 | |||
96 | } | ||
97 | |||
98 | static int fs_enet_fec_mii_reset(struct mii_bus *bus) | ||
99 | { | ||
100 | /* nothing here - for now */ | ||
101 | return 0; | ||
102 | } | ||
103 | |||
104 | static struct of_device_id fs_enet_mdio_fec_match[]; | ||
105 | static int __devinit fs_enet_mdio_probe(struct platform_device *ofdev) | ||
106 | { | ||
107 | const struct of_device_id *match; | ||
108 | struct resource res; | ||
109 | struct mii_bus *new_bus; | ||
110 | struct fec_info *fec; | ||
111 | int (*get_bus_freq)(struct device_node *); | ||
112 | int ret = -ENOMEM, clock, speed; | ||
113 | |||
114 | match = of_match_device(fs_enet_mdio_fec_match, &ofdev->dev); | ||
115 | if (!match) | ||
116 | return -EINVAL; | ||
117 | get_bus_freq = match->data; | ||
118 | |||
119 | new_bus = mdiobus_alloc(); | ||
120 | if (!new_bus) | ||
121 | goto out; | ||
122 | |||
123 | fec = kzalloc(sizeof(struct fec_info), GFP_KERNEL); | ||
124 | if (!fec) | ||
125 | goto out_mii; | ||
126 | |||
127 | new_bus->priv = fec; | ||
128 | new_bus->name = "FEC MII Bus"; | ||
129 | new_bus->read = &fs_enet_fec_mii_read; | ||
130 | new_bus->write = &fs_enet_fec_mii_write; | ||
131 | new_bus->reset = &fs_enet_fec_mii_reset; | ||
132 | |||
133 | ret = of_address_to_resource(ofdev->dev.of_node, 0, &res); | ||
134 | if (ret) | ||
135 | goto out_res; | ||
136 | |||
137 | snprintf(new_bus->id, MII_BUS_ID_SIZE, "%x", res.start); | ||
138 | |||
139 | fec->fecp = ioremap(res.start, resource_size(&res)); | ||
140 | if (!fec->fecp) | ||
141 | goto out_fec; | ||
142 | |||
143 | if (get_bus_freq) { | ||
144 | clock = get_bus_freq(ofdev->dev.of_node); | ||
145 | if (!clock) { | ||
146 | /* Use maximum divider if clock is unknown */ | ||
147 | dev_warn(&ofdev->dev, "could not determine IPS clock\n"); | ||
148 | clock = 0x3F * 5000000; | ||
149 | } | ||
150 | } else | ||
151 | clock = ppc_proc_freq; | ||
152 | |||
153 | /* | ||
154 | * Scale for a MII clock <= 2.5 MHz | ||
155 | * Note that only 6 bits (25:30) are available for MII speed. | ||
156 | */ | ||
157 | speed = (clock + 4999999) / 5000000; | ||
158 | if (speed > 0x3F) { | ||
159 | speed = 0x3F; | ||
160 | dev_err(&ofdev->dev, | ||
161 | "MII clock (%d Hz) exceeds max (2.5 MHz)\n", | ||
162 | clock / speed); | ||
163 | } | ||
164 | |||
165 | fec->mii_speed = speed << 1; | ||
166 | |||
167 | setbits32(&fec->fecp->fec_r_cntrl, FEC_RCNTRL_MII_MODE); | ||
168 | setbits32(&fec->fecp->fec_ecntrl, FEC_ECNTRL_PINMUX | | ||
169 | FEC_ECNTRL_ETHER_EN); | ||
170 | out_be32(&fec->fecp->fec_ievent, FEC_ENET_MII); | ||
171 | clrsetbits_be32(&fec->fecp->fec_mii_speed, 0x7E, fec->mii_speed); | ||
172 | |||
173 | new_bus->phy_mask = ~0; | ||
174 | new_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL); | ||
175 | if (!new_bus->irq) | ||
176 | goto out_unmap_regs; | ||
177 | |||
178 | new_bus->parent = &ofdev->dev; | ||
179 | dev_set_drvdata(&ofdev->dev, new_bus); | ||
180 | |||
181 | ret = of_mdiobus_register(new_bus, ofdev->dev.of_node); | ||
182 | if (ret) | ||
183 | goto out_free_irqs; | ||
184 | |||
185 | return 0; | ||
186 | |||
187 | out_free_irqs: | ||
188 | dev_set_drvdata(&ofdev->dev, NULL); | ||
189 | kfree(new_bus->irq); | ||
190 | out_unmap_regs: | ||
191 | iounmap(fec->fecp); | ||
192 | out_res: | ||
193 | out_fec: | ||
194 | kfree(fec); | ||
195 | out_mii: | ||
196 | mdiobus_free(new_bus); | ||
197 | out: | ||
198 | return ret; | ||
199 | } | ||
200 | |||
201 | static int fs_enet_mdio_remove(struct platform_device *ofdev) | ||
202 | { | ||
203 | struct mii_bus *bus = dev_get_drvdata(&ofdev->dev); | ||
204 | struct fec_info *fec = bus->priv; | ||
205 | |||
206 | mdiobus_unregister(bus); | ||
207 | dev_set_drvdata(&ofdev->dev, NULL); | ||
208 | kfree(bus->irq); | ||
209 | iounmap(fec->fecp); | ||
210 | kfree(fec); | ||
211 | mdiobus_free(bus); | ||
212 | |||
213 | return 0; | ||
214 | } | ||
215 | |||
216 | static struct of_device_id fs_enet_mdio_fec_match[] = { | ||
217 | { | ||
218 | .compatible = "fsl,pq1-fec-mdio", | ||
219 | }, | ||
220 | #if defined(CONFIG_PPC_MPC512x) | ||
221 | { | ||
222 | .compatible = "fsl,mpc5121-fec-mdio", | ||
223 | .data = mpc5xxx_get_bus_frequency, | ||
224 | }, | ||
225 | #endif | ||
226 | {}, | ||
227 | }; | ||
228 | MODULE_DEVICE_TABLE(of, fs_enet_mdio_fec_match); | ||
229 | |||
230 | static struct platform_driver fs_enet_fec_mdio_driver = { | ||
231 | .driver = { | ||
232 | .name = "fsl-fec-mdio", | ||
233 | .owner = THIS_MODULE, | ||
234 | .of_match_table = fs_enet_mdio_fec_match, | ||
235 | }, | ||
236 | .probe = fs_enet_mdio_probe, | ||
237 | .remove = fs_enet_mdio_remove, | ||
238 | }; | ||
239 | |||
240 | static int fs_enet_mdio_fec_init(void) | ||
241 | { | ||
242 | return platform_driver_register(&fs_enet_fec_mdio_driver); | ||
243 | } | ||
244 | |||
245 | static void fs_enet_mdio_fec_exit(void) | ||
246 | { | ||
247 | platform_driver_unregister(&fs_enet_fec_mdio_driver); | ||
248 | } | ||
249 | |||
250 | module_init(fs_enet_mdio_fec_init); | ||
251 | module_exit(fs_enet_mdio_fec_exit); | ||
diff --git a/drivers/net/ethernet/freescale/fsl_pq_mdio.c b/drivers/net/ethernet/freescale/fsl_pq_mdio.c new file mode 100644 index 000000000000..52f4e8ad48e7 --- /dev/null +++ b/drivers/net/ethernet/freescale/fsl_pq_mdio.c | |||
@@ -0,0 +1,494 @@ | |||
1 | /* | ||
2 | * Freescale PowerQUICC Ethernet Driver -- MIIM bus implementation | ||
3 | * Provides Bus interface for MIIM regs | ||
4 | * | ||
5 | * Author: Andy Fleming <afleming@freescale.com> | ||
6 | * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com> | ||
7 | * | ||
8 | * Copyright 2002-2004, 2008-2009 Freescale Semiconductor, Inc. | ||
9 | * | ||
10 | * Based on gianfar_mii.c and ucc_geth_mii.c (Li Yang, Kim Phillips) | ||
11 | * | ||
12 | * This program is free software; you can redistribute it and/or modify it | ||
13 | * under the terms of the GNU General Public License as published by the | ||
14 | * Free Software Foundation; either version 2 of the License, or (at your | ||
15 | * option) any later version. | ||
16 | * | ||
17 | */ | ||
18 | |||
19 | #include <linux/kernel.h> | ||
20 | #include <linux/string.h> | ||
21 | #include <linux/errno.h> | ||
22 | #include <linux/unistd.h> | ||
23 | #include <linux/slab.h> | ||
24 | #include <linux/interrupt.h> | ||
25 | #include <linux/init.h> | ||
26 | #include <linux/delay.h> | ||
27 | #include <linux/netdevice.h> | ||
28 | #include <linux/etherdevice.h> | ||
29 | #include <linux/skbuff.h> | ||
30 | #include <linux/spinlock.h> | ||
31 | #include <linux/mm.h> | ||
32 | #include <linux/module.h> | ||
33 | #include <linux/platform_device.h> | ||
34 | #include <linux/crc32.h> | ||
35 | #include <linux/mii.h> | ||
36 | #include <linux/phy.h> | ||
37 | #include <linux/of.h> | ||
38 | #include <linux/of_address.h> | ||
39 | #include <linux/of_mdio.h> | ||
40 | #include <linux/of_platform.h> | ||
41 | |||
42 | #include <asm/io.h> | ||
43 | #include <asm/irq.h> | ||
44 | #include <asm/uaccess.h> | ||
45 | #include <asm/ucc.h> | ||
46 | |||
47 | #include "gianfar.h" | ||
48 | #include "fsl_pq_mdio.h" | ||
49 | |||
50 | struct fsl_pq_mdio_priv { | ||
51 | void __iomem *map; | ||
52 | struct fsl_pq_mdio __iomem *regs; | ||
53 | }; | ||
54 | |||
55 | /* | ||
56 | * Write value to the PHY at mii_id at register regnum, | ||
57 | * on the bus attached to the local interface, which may be different from the | ||
58 | * generic mdio bus (tied to a single interface), waiting until the write is | ||
59 | * done before returning. This is helpful in programming interfaces like | ||
60 | * the TBI which control interfaces like onchip SERDES and are always tied to | ||
61 | * the local mdio pins, which may not be the same as system mdio bus, used for | ||
62 | * controlling the external PHYs, for example. | ||
63 | */ | ||
64 | int fsl_pq_local_mdio_write(struct fsl_pq_mdio __iomem *regs, int mii_id, | ||
65 | int regnum, u16 value) | ||
66 | { | ||
67 | /* Set the PHY address and the register address we want to write */ | ||
68 | out_be32(®s->miimadd, (mii_id << 8) | regnum); | ||
69 | |||
70 | /* Write out the value we want */ | ||
71 | out_be32(®s->miimcon, value); | ||
72 | |||
73 | /* Wait for the transaction to finish */ | ||
74 | while (in_be32(®s->miimind) & MIIMIND_BUSY) | ||
75 | cpu_relax(); | ||
76 | |||
77 | return 0; | ||
78 | } | ||
79 | |||
80 | /* | ||
81 | * Read the bus for PHY at addr mii_id, register regnum, and | ||
82 | * return the value. Clears miimcom first. All PHY operation | ||
83 | * done on the bus attached to the local interface, | ||
84 | * which may be different from the generic mdio bus | ||
85 | * This is helpful in programming interfaces like | ||
86 | * the TBI which, in turn, control interfaces like onchip SERDES | ||
87 | * and are always tied to the local mdio pins, which may not be the | ||
88 | * same as system mdio bus, used for controlling the external PHYs, for eg. | ||
89 | */ | ||
90 | int fsl_pq_local_mdio_read(struct fsl_pq_mdio __iomem *regs, | ||
91 | int mii_id, int regnum) | ||
92 | { | ||
93 | u16 value; | ||
94 | |||
95 | /* Set the PHY address and the register address we want to read */ | ||
96 | out_be32(®s->miimadd, (mii_id << 8) | regnum); | ||
97 | |||
98 | /* Clear miimcom, and then initiate a read */ | ||
99 | out_be32(®s->miimcom, 0); | ||
100 | out_be32(®s->miimcom, MII_READ_COMMAND); | ||
101 | |||
102 | /* Wait for the transaction to finish */ | ||
103 | while (in_be32(®s->miimind) & (MIIMIND_NOTVALID | MIIMIND_BUSY)) | ||
104 | cpu_relax(); | ||
105 | |||
106 | /* Grab the value of the register from miimstat */ | ||
107 | value = in_be32(®s->miimstat); | ||
108 | |||
109 | return value; | ||
110 | } | ||
111 | |||
112 | static struct fsl_pq_mdio __iomem *fsl_pq_mdio_get_regs(struct mii_bus *bus) | ||
113 | { | ||
114 | struct fsl_pq_mdio_priv *priv = bus->priv; | ||
115 | |||
116 | return priv->regs; | ||
117 | } | ||
118 | |||
119 | /* | ||
120 | * Write value to the PHY at mii_id at register regnum, | ||
121 | * on the bus, waiting until the write is done before returning. | ||
122 | */ | ||
123 | int fsl_pq_mdio_write(struct mii_bus *bus, int mii_id, int regnum, u16 value) | ||
124 | { | ||
125 | struct fsl_pq_mdio __iomem *regs = fsl_pq_mdio_get_regs(bus); | ||
126 | |||
127 | /* Write to the local MII regs */ | ||
128 | return fsl_pq_local_mdio_write(regs, mii_id, regnum, value); | ||
129 | } | ||
130 | |||
131 | /* | ||
132 | * Read the bus for PHY at addr mii_id, register regnum, and | ||
133 | * return the value. Clears miimcom first. | ||
134 | */ | ||
135 | int fsl_pq_mdio_read(struct mii_bus *bus, int mii_id, int regnum) | ||
136 | { | ||
137 | struct fsl_pq_mdio __iomem *regs = fsl_pq_mdio_get_regs(bus); | ||
138 | |||
139 | /* Read the local MII regs */ | ||
140 | return fsl_pq_local_mdio_read(regs, mii_id, regnum); | ||
141 | } | ||
142 | |||
143 | /* Reset the MIIM registers, and wait for the bus to free */ | ||
144 | static int fsl_pq_mdio_reset(struct mii_bus *bus) | ||
145 | { | ||
146 | struct fsl_pq_mdio __iomem *regs = fsl_pq_mdio_get_regs(bus); | ||
147 | int timeout = PHY_INIT_TIMEOUT; | ||
148 | |||
149 | mutex_lock(&bus->mdio_lock); | ||
150 | |||
151 | /* Reset the management interface */ | ||
152 | out_be32(®s->miimcfg, MIIMCFG_RESET); | ||
153 | |||
154 | /* Setup the MII Mgmt clock speed */ | ||
155 | out_be32(®s->miimcfg, MIIMCFG_INIT_VALUE); | ||
156 | |||
157 | /* Wait until the bus is free */ | ||
158 | while ((in_be32(®s->miimind) & MIIMIND_BUSY) && timeout--) | ||
159 | cpu_relax(); | ||
160 | |||
161 | mutex_unlock(&bus->mdio_lock); | ||
162 | |||
163 | if (timeout < 0) { | ||
164 | printk(KERN_ERR "%s: The MII Bus is stuck!\n", | ||
165 | bus->name); | ||
166 | return -EBUSY; | ||
167 | } | ||
168 | |||
169 | return 0; | ||
170 | } | ||
171 | |||
172 | void fsl_pq_mdio_bus_name(char *name, struct device_node *np) | ||
173 | { | ||
174 | const u32 *addr; | ||
175 | u64 taddr = OF_BAD_ADDR; | ||
176 | |||
177 | addr = of_get_address(np, 0, NULL, NULL); | ||
178 | if (addr) | ||
179 | taddr = of_translate_address(np, addr); | ||
180 | |||
181 | snprintf(name, MII_BUS_ID_SIZE, "%s@%llx", np->name, | ||
182 | (unsigned long long)taddr); | ||
183 | } | ||
184 | EXPORT_SYMBOL_GPL(fsl_pq_mdio_bus_name); | ||
185 | |||
186 | /* Scan the bus in reverse, looking for an empty spot */ | ||
187 | static int fsl_pq_mdio_find_free(struct mii_bus *new_bus) | ||
188 | { | ||
189 | int i; | ||
190 | |||
191 | for (i = PHY_MAX_ADDR; i > 0; i--) { | ||
192 | u32 phy_id; | ||
193 | |||
194 | if (get_phy_id(new_bus, i, &phy_id)) | ||
195 | return -1; | ||
196 | |||
197 | if (phy_id == 0xffffffff) | ||
198 | break; | ||
199 | } | ||
200 | |||
201 | return i; | ||
202 | } | ||
203 | |||
204 | |||
205 | #if defined(CONFIG_GIANFAR) || defined(CONFIG_GIANFAR_MODULE) | ||
206 | static u32 __iomem *get_gfar_tbipa(struct fsl_pq_mdio __iomem *regs, struct device_node *np) | ||
207 | { | ||
208 | struct gfar __iomem *enet_regs; | ||
209 | |||
210 | /* | ||
211 | * This is mildly evil, but so is our hardware for doing this. | ||
212 | * Also, we have to cast back to struct gfar because of | ||
213 | * definition weirdness done in gianfar.h. | ||
214 | */ | ||
215 | if(of_device_is_compatible(np, "fsl,gianfar-mdio") || | ||
216 | of_device_is_compatible(np, "fsl,gianfar-tbi") || | ||
217 | of_device_is_compatible(np, "gianfar")) { | ||
218 | enet_regs = (struct gfar __iomem *)regs; | ||
219 | return &enet_regs->tbipa; | ||
220 | } else if (of_device_is_compatible(np, "fsl,etsec2-mdio") || | ||
221 | of_device_is_compatible(np, "fsl,etsec2-tbi")) { | ||
222 | return of_iomap(np, 1); | ||
223 | } else | ||
224 | return NULL; | ||
225 | } | ||
226 | #endif | ||
227 | |||
228 | |||
229 | #if defined(CONFIG_UCC_GETH) || defined(CONFIG_UCC_GETH_MODULE) | ||
230 | static int get_ucc_id_for_range(u64 start, u64 end, u32 *ucc_id) | ||
231 | { | ||
232 | struct device_node *np = NULL; | ||
233 | int err = 0; | ||
234 | |||
235 | for_each_compatible_node(np, NULL, "ucc_geth") { | ||
236 | struct resource tempres; | ||
237 | |||
238 | err = of_address_to_resource(np, 0, &tempres); | ||
239 | if (err) | ||
240 | continue; | ||
241 | |||
242 | /* if our mdio regs fall within this UCC regs range */ | ||
243 | if ((start >= tempres.start) && (end <= tempres.end)) { | ||
244 | /* Find the id of the UCC */ | ||
245 | const u32 *id; | ||
246 | |||
247 | id = of_get_property(np, "cell-index", NULL); | ||
248 | if (!id) { | ||
249 | id = of_get_property(np, "device-id", NULL); | ||
250 | if (!id) | ||
251 | continue; | ||
252 | } | ||
253 | |||
254 | *ucc_id = *id; | ||
255 | |||
256 | return 0; | ||
257 | } | ||
258 | } | ||
259 | |||
260 | if (err) | ||
261 | return err; | ||
262 | else | ||
263 | return -EINVAL; | ||
264 | } | ||
265 | #endif | ||
266 | |||
267 | |||
268 | static int fsl_pq_mdio_probe(struct platform_device *ofdev) | ||
269 | { | ||
270 | struct device_node *np = ofdev->dev.of_node; | ||
271 | struct device_node *tbi; | ||
272 | struct fsl_pq_mdio_priv *priv; | ||
273 | struct fsl_pq_mdio __iomem *regs = NULL; | ||
274 | void __iomem *map; | ||
275 | u32 __iomem *tbipa; | ||
276 | struct mii_bus *new_bus; | ||
277 | int tbiaddr = -1; | ||
278 | const u32 *addrp; | ||
279 | u64 addr = 0, size = 0; | ||
280 | int err; | ||
281 | |||
282 | priv = kzalloc(sizeof(*priv), GFP_KERNEL); | ||
283 | if (!priv) | ||
284 | return -ENOMEM; | ||
285 | |||
286 | new_bus = mdiobus_alloc(); | ||
287 | if (!new_bus) { | ||
288 | err = -ENOMEM; | ||
289 | goto err_free_priv; | ||
290 | } | ||
291 | |||
292 | new_bus->name = "Freescale PowerQUICC MII Bus", | ||
293 | new_bus->read = &fsl_pq_mdio_read, | ||
294 | new_bus->write = &fsl_pq_mdio_write, | ||
295 | new_bus->reset = &fsl_pq_mdio_reset, | ||
296 | new_bus->priv = priv; | ||
297 | fsl_pq_mdio_bus_name(new_bus->id, np); | ||
298 | |||
299 | addrp = of_get_address(np, 0, &size, NULL); | ||
300 | if (!addrp) { | ||
301 | err = -EINVAL; | ||
302 | goto err_free_bus; | ||
303 | } | ||
304 | |||
305 | /* Set the PHY base address */ | ||
306 | addr = of_translate_address(np, addrp); | ||
307 | if (addr == OF_BAD_ADDR) { | ||
308 | err = -EINVAL; | ||
309 | goto err_free_bus; | ||
310 | } | ||
311 | |||
312 | map = ioremap(addr, size); | ||
313 | if (!map) { | ||
314 | err = -ENOMEM; | ||
315 | goto err_free_bus; | ||
316 | } | ||
317 | priv->map = map; | ||
318 | |||
319 | if (of_device_is_compatible(np, "fsl,gianfar-mdio") || | ||
320 | of_device_is_compatible(np, "fsl,gianfar-tbi") || | ||
321 | of_device_is_compatible(np, "fsl,ucc-mdio") || | ||
322 | of_device_is_compatible(np, "ucc_geth_phy")) | ||
323 | map -= offsetof(struct fsl_pq_mdio, miimcfg); | ||
324 | regs = map; | ||
325 | priv->regs = regs; | ||
326 | |||
327 | new_bus->irq = kcalloc(PHY_MAX_ADDR, sizeof(int), GFP_KERNEL); | ||
328 | |||
329 | if (NULL == new_bus->irq) { | ||
330 | err = -ENOMEM; | ||
331 | goto err_unmap_regs; | ||
332 | } | ||
333 | |||
334 | new_bus->parent = &ofdev->dev; | ||
335 | dev_set_drvdata(&ofdev->dev, new_bus); | ||
336 | |||
337 | if (of_device_is_compatible(np, "fsl,gianfar-mdio") || | ||
338 | of_device_is_compatible(np, "fsl,gianfar-tbi") || | ||
339 | of_device_is_compatible(np, "fsl,etsec2-mdio") || | ||
340 | of_device_is_compatible(np, "fsl,etsec2-tbi") || | ||
341 | of_device_is_compatible(np, "gianfar")) { | ||
342 | #if defined(CONFIG_GIANFAR) || defined(CONFIG_GIANFAR_MODULE) | ||
343 | tbipa = get_gfar_tbipa(regs, np); | ||
344 | if (!tbipa) { | ||
345 | err = -EINVAL; | ||
346 | goto err_free_irqs; | ||
347 | } | ||
348 | #else | ||
349 | err = -ENODEV; | ||
350 | goto err_free_irqs; | ||
351 | #endif | ||
352 | } else if (of_device_is_compatible(np, "fsl,ucc-mdio") || | ||
353 | of_device_is_compatible(np, "ucc_geth_phy")) { | ||
354 | #if defined(CONFIG_UCC_GETH) || defined(CONFIG_UCC_GETH_MODULE) | ||
355 | u32 id; | ||
356 | static u32 mii_mng_master; | ||
357 | |||
358 | tbipa = ®s->utbipar; | ||
359 | |||
360 | if ((err = get_ucc_id_for_range(addr, addr + size, &id))) | ||
361 | goto err_free_irqs; | ||
362 | |||
363 | if (!mii_mng_master) { | ||
364 | mii_mng_master = id; | ||
365 | ucc_set_qe_mux_mii_mng(id - 1); | ||
366 | } | ||
367 | #else | ||
368 | err = -ENODEV; | ||
369 | goto err_free_irqs; | ||
370 | #endif | ||
371 | } else { | ||
372 | err = -ENODEV; | ||
373 | goto err_free_irqs; | ||
374 | } | ||
375 | |||
376 | for_each_child_of_node(np, tbi) { | ||
377 | if (!strncmp(tbi->type, "tbi-phy", 8)) | ||
378 | break; | ||
379 | } | ||
380 | |||
381 | if (tbi) { | ||
382 | const u32 *prop = of_get_property(tbi, "reg", NULL); | ||
383 | |||
384 | if (prop) | ||
385 | tbiaddr = *prop; | ||
386 | } | ||
387 | |||
388 | if (tbiaddr == -1) { | ||
389 | out_be32(tbipa, 0); | ||
390 | |||
391 | tbiaddr = fsl_pq_mdio_find_free(new_bus); | ||
392 | } | ||
393 | |||
394 | /* | ||
395 | * We define TBIPA at 0 to be illegal, opting to fail for boards that | ||
396 | * have PHYs at 1-31, rather than change tbipa and rescan. | ||
397 | */ | ||
398 | if (tbiaddr == 0) { | ||
399 | err = -EBUSY; | ||
400 | |||
401 | goto err_free_irqs; | ||
402 | } | ||
403 | |||
404 | out_be32(tbipa, tbiaddr); | ||
405 | |||
406 | err = of_mdiobus_register(new_bus, np); | ||
407 | if (err) { | ||
408 | printk (KERN_ERR "%s: Cannot register as MDIO bus\n", | ||
409 | new_bus->name); | ||
410 | goto err_free_irqs; | ||
411 | } | ||
412 | |||
413 | return 0; | ||
414 | |||
415 | err_free_irqs: | ||
416 | kfree(new_bus->irq); | ||
417 | err_unmap_regs: | ||
418 | iounmap(priv->map); | ||
419 | err_free_bus: | ||
420 | kfree(new_bus); | ||
421 | err_free_priv: | ||
422 | kfree(priv); | ||
423 | return err; | ||
424 | } | ||
425 | |||
426 | |||
427 | static int fsl_pq_mdio_remove(struct platform_device *ofdev) | ||
428 | { | ||
429 | struct device *device = &ofdev->dev; | ||
430 | struct mii_bus *bus = dev_get_drvdata(device); | ||
431 | struct fsl_pq_mdio_priv *priv = bus->priv; | ||
432 | |||
433 | mdiobus_unregister(bus); | ||
434 | |||
435 | dev_set_drvdata(device, NULL); | ||
436 | |||
437 | iounmap(priv->map); | ||
438 | bus->priv = NULL; | ||
439 | mdiobus_free(bus); | ||
440 | kfree(priv); | ||
441 | |||
442 | return 0; | ||
443 | } | ||
444 | |||
445 | static struct of_device_id fsl_pq_mdio_match[] = { | ||
446 | { | ||
447 | .type = "mdio", | ||
448 | .compatible = "ucc_geth_phy", | ||
449 | }, | ||
450 | { | ||
451 | .type = "mdio", | ||
452 | .compatible = "gianfar", | ||
453 | }, | ||
454 | { | ||
455 | .compatible = "fsl,ucc-mdio", | ||
456 | }, | ||
457 | { | ||
458 | .compatible = "fsl,gianfar-tbi", | ||
459 | }, | ||
460 | { | ||
461 | .compatible = "fsl,gianfar-mdio", | ||
462 | }, | ||
463 | { | ||
464 | .compatible = "fsl,etsec2-tbi", | ||
465 | }, | ||
466 | { | ||
467 | .compatible = "fsl,etsec2-mdio", | ||
468 | }, | ||
469 | {}, | ||
470 | }; | ||
471 | MODULE_DEVICE_TABLE(of, fsl_pq_mdio_match); | ||
472 | |||
473 | static struct platform_driver fsl_pq_mdio_driver = { | ||
474 | .driver = { | ||
475 | .name = "fsl-pq_mdio", | ||
476 | .owner = THIS_MODULE, | ||
477 | .of_match_table = fsl_pq_mdio_match, | ||
478 | }, | ||
479 | .probe = fsl_pq_mdio_probe, | ||
480 | .remove = fsl_pq_mdio_remove, | ||
481 | }; | ||
482 | |||
483 | int __init fsl_pq_mdio_init(void) | ||
484 | { | ||
485 | return platform_driver_register(&fsl_pq_mdio_driver); | ||
486 | } | ||
487 | module_init(fsl_pq_mdio_init); | ||
488 | |||
489 | void fsl_pq_mdio_exit(void) | ||
490 | { | ||
491 | platform_driver_unregister(&fsl_pq_mdio_driver); | ||
492 | } | ||
493 | module_exit(fsl_pq_mdio_exit); | ||
494 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/net/ethernet/freescale/fsl_pq_mdio.h b/drivers/net/ethernet/freescale/fsl_pq_mdio.h new file mode 100644 index 000000000000..bd17a2a0139b --- /dev/null +++ b/drivers/net/ethernet/freescale/fsl_pq_mdio.h | |||
@@ -0,0 +1,52 @@ | |||
1 | /* | ||
2 | * Freescale PowerQUICC MDIO Driver -- MII Management Bus Implementation | ||
3 | * Driver for the MDIO bus controller on Freescale PowerQUICC processors | ||
4 | * | ||
5 | * Author: Andy Fleming | ||
6 | * Modifier: Sandeep Gopalpet | ||
7 | * | ||
8 | * Copyright 2002-2004, 2008-2009 Freescale Semiconductor, Inc. | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or modify it | ||
11 | * under the terms of the GNU General Public License as published by the | ||
12 | * Free Software Foundation; either version 2 of the License, or (at your | ||
13 | * option) any later version. | ||
14 | * | ||
15 | */ | ||
16 | #ifndef __FSL_PQ_MDIO_H | ||
17 | #define __FSL_PQ_MDIO_H | ||
18 | |||
19 | #define MIIMIND_BUSY 0x00000001 | ||
20 | #define MIIMIND_NOTVALID 0x00000004 | ||
21 | #define MIIMCFG_INIT_VALUE 0x00000007 | ||
22 | #define MIIMCFG_RESET 0x80000000 | ||
23 | |||
24 | #define MII_READ_COMMAND 0x00000001 | ||
25 | |||
26 | struct fsl_pq_mdio { | ||
27 | u8 res1[16]; | ||
28 | u32 ieventm; /* MDIO Interrupt event register (for etsec2)*/ | ||
29 | u32 imaskm; /* MDIO Interrupt mask register (for etsec2)*/ | ||
30 | u8 res2[4]; | ||
31 | u32 emapm; /* MDIO Event mapping register (for etsec2)*/ | ||
32 | u8 res3[1280]; | ||
33 | u32 miimcfg; /* MII management configuration reg */ | ||
34 | u32 miimcom; /* MII management command reg */ | ||
35 | u32 miimadd; /* MII management address reg */ | ||
36 | u32 miimcon; /* MII management control reg */ | ||
37 | u32 miimstat; /* MII management status reg */ | ||
38 | u32 miimind; /* MII management indication reg */ | ||
39 | u8 reserved[28]; /* Space holder */ | ||
40 | u32 utbipar; /* TBI phy address reg (only on UCC) */ | ||
41 | u8 res4[2728]; | ||
42 | } __packed; | ||
43 | |||
44 | int fsl_pq_mdio_read(struct mii_bus *bus, int mii_id, int regnum); | ||
45 | int fsl_pq_mdio_write(struct mii_bus *bus, int mii_id, int regnum, u16 value); | ||
46 | int fsl_pq_local_mdio_write(struct fsl_pq_mdio __iomem *regs, int mii_id, | ||
47 | int regnum, u16 value); | ||
48 | int fsl_pq_local_mdio_read(struct fsl_pq_mdio __iomem *regs, int mii_id, int regnum); | ||
49 | int __init fsl_pq_mdio_init(void); | ||
50 | void fsl_pq_mdio_exit(void); | ||
51 | void fsl_pq_mdio_bus_name(char *name, struct device_node *np); | ||
52 | #endif /* FSL_PQ_MDIO_H */ | ||
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c new file mode 100644 index 000000000000..2659daad783d --- /dev/null +++ b/drivers/net/ethernet/freescale/gianfar.c | |||
@@ -0,0 +1,3291 @@ | |||
1 | /* | ||
2 | * drivers/net/gianfar.c | ||
3 | * | ||
4 | * Gianfar Ethernet Driver | ||
5 | * This driver is designed for the non-CPM ethernet controllers | ||
6 | * on the 85xx and 83xx family of integrated processors | ||
7 | * Based on 8260_io/fcc_enet.c | ||
8 | * | ||
9 | * Author: Andy Fleming | ||
10 | * Maintainer: Kumar Gala | ||
11 | * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com> | ||
12 | * | ||
13 | * Copyright 2002-2009, 2011 Freescale Semiconductor, Inc. | ||
14 | * Copyright 2007 MontaVista Software, Inc. | ||
15 | * | ||
16 | * This program is free software; you can redistribute it and/or modify it | ||
17 | * under the terms of the GNU General Public License as published by the | ||
18 | * Free Software Foundation; either version 2 of the License, or (at your | ||
19 | * option) any later version. | ||
20 | * | ||
21 | * Gianfar: AKA Lambda Draconis, "Dragon" | ||
22 | * RA 11 31 24.2 | ||
23 | * Dec +69 19 52 | ||
24 | * V 3.84 | ||
25 | * B-V +1.62 | ||
26 | * | ||
27 | * Theory of operation | ||
28 | * | ||
29 | * The driver is initialized through of_device. Configuration information | ||
30 | * is therefore conveyed through an OF-style device tree. | ||
31 | * | ||
32 | * The Gianfar Ethernet Controller uses a ring of buffer | ||
33 | * descriptors. The beginning is indicated by a register | ||
34 | * pointing to the physical address of the start of the ring. | ||
35 | * The end is determined by a "wrap" bit being set in the | ||
36 | * last descriptor of the ring. | ||
37 | * | ||
38 | * When a packet is received, the RXF bit in the | ||
39 | * IEVENT register is set, triggering an interrupt when the | ||
40 | * corresponding bit in the IMASK register is also set (if | ||
41 | * interrupt coalescing is active, then the interrupt may not | ||
42 | * happen immediately, but will wait until either a set number | ||
43 | * of frames or amount of time have passed). In NAPI, the | ||
44 | * interrupt handler will signal there is work to be done, and | ||
45 | * exit. This method will start at the last known empty | ||
46 | * descriptor, and process every subsequent descriptor until there | ||
47 | * are none left with data (NAPI will stop after a set number of | ||
48 | * packets to give time to other tasks, but will eventually | ||
49 | * process all the packets). The data arrives inside a | ||
50 | * pre-allocated skb, and so after the skb is passed up to the | ||
51 | * stack, a new skb must be allocated, and the address field in | ||
52 | * the buffer descriptor must be updated to indicate this new | ||
53 | * skb. | ||
54 | * | ||
55 | * When the kernel requests that a packet be transmitted, the | ||
56 | * driver starts where it left off last time, and points the | ||
57 | * descriptor at the buffer which was passed in. The driver | ||
58 | * then informs the DMA engine that there are packets ready to | ||
59 | * be transmitted. Once the controller is finished transmitting | ||
60 | * the packet, an interrupt may be triggered (under the same | ||
61 | * conditions as for reception, but depending on the TXF bit). | ||
62 | * The driver then cleans up the buffer. | ||
63 | */ | ||
64 | |||
65 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
66 | #define DEBUG | ||
67 | |||
68 | #include <linux/kernel.h> | ||
69 | #include <linux/string.h> | ||
70 | #include <linux/errno.h> | ||
71 | #include <linux/unistd.h> | ||
72 | #include <linux/slab.h> | ||
73 | #include <linux/interrupt.h> | ||
74 | #include <linux/init.h> | ||
75 | #include <linux/delay.h> | ||
76 | #include <linux/netdevice.h> | ||
77 | #include <linux/etherdevice.h> | ||
78 | #include <linux/skbuff.h> | ||
79 | #include <linux/if_vlan.h> | ||
80 | #include <linux/spinlock.h> | ||
81 | #include <linux/mm.h> | ||
82 | #include <linux/of_mdio.h> | ||
83 | #include <linux/of_platform.h> | ||
84 | #include <linux/ip.h> | ||
85 | #include <linux/tcp.h> | ||
86 | #include <linux/udp.h> | ||
87 | #include <linux/in.h> | ||
88 | #include <linux/net_tstamp.h> | ||
89 | |||
90 | #include <asm/io.h> | ||
91 | #include <asm/reg.h> | ||
92 | #include <asm/irq.h> | ||
93 | #include <asm/uaccess.h> | ||
94 | #include <linux/module.h> | ||
95 | #include <linux/dma-mapping.h> | ||
96 | #include <linux/crc32.h> | ||
97 | #include <linux/mii.h> | ||
98 | #include <linux/phy.h> | ||
99 | #include <linux/phy_fixed.h> | ||
100 | #include <linux/of.h> | ||
101 | #include <linux/of_net.h> | ||
102 | |||
103 | #include "gianfar.h" | ||
104 | #include "fsl_pq_mdio.h" | ||
105 | |||
106 | #define TX_TIMEOUT (1*HZ) | ||
107 | #undef BRIEF_GFAR_ERRORS | ||
108 | #undef VERBOSE_GFAR_ERRORS | ||
109 | |||
110 | const char gfar_driver_name[] = "Gianfar Ethernet"; | ||
111 | const char gfar_driver_version[] = "1.3"; | ||
112 | |||
113 | static int gfar_enet_open(struct net_device *dev); | ||
114 | static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev); | ||
115 | static void gfar_reset_task(struct work_struct *work); | ||
116 | static void gfar_timeout(struct net_device *dev); | ||
117 | static int gfar_close(struct net_device *dev); | ||
118 | struct sk_buff *gfar_new_skb(struct net_device *dev); | ||
119 | static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp, | ||
120 | struct sk_buff *skb); | ||
121 | static int gfar_set_mac_address(struct net_device *dev); | ||
122 | static int gfar_change_mtu(struct net_device *dev, int new_mtu); | ||
123 | static irqreturn_t gfar_error(int irq, void *dev_id); | ||
124 | static irqreturn_t gfar_transmit(int irq, void *dev_id); | ||
125 | static irqreturn_t gfar_interrupt(int irq, void *dev_id); | ||
126 | static void adjust_link(struct net_device *dev); | ||
127 | static void init_registers(struct net_device *dev); | ||
128 | static int init_phy(struct net_device *dev); | ||
129 | static int gfar_probe(struct platform_device *ofdev); | ||
130 | static int gfar_remove(struct platform_device *ofdev); | ||
131 | static void free_skb_resources(struct gfar_private *priv); | ||
132 | static void gfar_set_multi(struct net_device *dev); | ||
133 | static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr); | ||
134 | static void gfar_configure_serdes(struct net_device *dev); | ||
135 | static int gfar_poll(struct napi_struct *napi, int budget); | ||
136 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
137 | static void gfar_netpoll(struct net_device *dev); | ||
138 | #endif | ||
139 | int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit); | ||
140 | static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue); | ||
141 | static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb, | ||
142 | int amount_pull); | ||
143 | void gfar_halt(struct net_device *dev); | ||
144 | static void gfar_halt_nodisable(struct net_device *dev); | ||
145 | void gfar_start(struct net_device *dev); | ||
146 | static void gfar_clear_exact_match(struct net_device *dev); | ||
147 | static void gfar_set_mac_for_addr(struct net_device *dev, int num, | ||
148 | const u8 *addr); | ||
149 | static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); | ||
150 | |||
151 | MODULE_AUTHOR("Freescale Semiconductor, Inc"); | ||
152 | MODULE_DESCRIPTION("Gianfar Ethernet Driver"); | ||
153 | MODULE_LICENSE("GPL"); | ||
154 | |||
155 | static void gfar_init_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp, | ||
156 | dma_addr_t buf) | ||
157 | { | ||
158 | u32 lstatus; | ||
159 | |||
160 | bdp->bufPtr = buf; | ||
161 | |||
162 | lstatus = BD_LFLAG(RXBD_EMPTY | RXBD_INTERRUPT); | ||
163 | if (bdp == rx_queue->rx_bd_base + rx_queue->rx_ring_size - 1) | ||
164 | lstatus |= BD_LFLAG(RXBD_WRAP); | ||
165 | |||
166 | eieio(); | ||
167 | |||
168 | bdp->lstatus = lstatus; | ||
169 | } | ||
170 | |||
171 | static int gfar_init_bds(struct net_device *ndev) | ||
172 | { | ||
173 | struct gfar_private *priv = netdev_priv(ndev); | ||
174 | struct gfar_priv_tx_q *tx_queue = NULL; | ||
175 | struct gfar_priv_rx_q *rx_queue = NULL; | ||
176 | struct txbd8 *txbdp; | ||
177 | struct rxbd8 *rxbdp; | ||
178 | int i, j; | ||
179 | |||
180 | for (i = 0; i < priv->num_tx_queues; i++) { | ||
181 | tx_queue = priv->tx_queue[i]; | ||
182 | /* Initialize some variables in our dev structure */ | ||
183 | tx_queue->num_txbdfree = tx_queue->tx_ring_size; | ||
184 | tx_queue->dirty_tx = tx_queue->tx_bd_base; | ||
185 | tx_queue->cur_tx = tx_queue->tx_bd_base; | ||
186 | tx_queue->skb_curtx = 0; | ||
187 | tx_queue->skb_dirtytx = 0; | ||
188 | |||
189 | /* Initialize Transmit Descriptor Ring */ | ||
190 | txbdp = tx_queue->tx_bd_base; | ||
191 | for (j = 0; j < tx_queue->tx_ring_size; j++) { | ||
192 | txbdp->lstatus = 0; | ||
193 | txbdp->bufPtr = 0; | ||
194 | txbdp++; | ||
195 | } | ||
196 | |||
197 | /* Set the last descriptor in the ring to indicate wrap */ | ||
198 | txbdp--; | ||
199 | txbdp->status |= TXBD_WRAP; | ||
200 | } | ||
201 | |||
202 | for (i = 0; i < priv->num_rx_queues; i++) { | ||
203 | rx_queue = priv->rx_queue[i]; | ||
204 | rx_queue->cur_rx = rx_queue->rx_bd_base; | ||
205 | rx_queue->skb_currx = 0; | ||
206 | rxbdp = rx_queue->rx_bd_base; | ||
207 | |||
208 | for (j = 0; j < rx_queue->rx_ring_size; j++) { | ||
209 | struct sk_buff *skb = rx_queue->rx_skbuff[j]; | ||
210 | |||
211 | if (skb) { | ||
212 | gfar_init_rxbdp(rx_queue, rxbdp, | ||
213 | rxbdp->bufPtr); | ||
214 | } else { | ||
215 | skb = gfar_new_skb(ndev); | ||
216 | if (!skb) { | ||
217 | netdev_err(ndev, "Can't allocate RX buffers\n"); | ||
218 | goto err_rxalloc_fail; | ||
219 | } | ||
220 | rx_queue->rx_skbuff[j] = skb; | ||
221 | |||
222 | gfar_new_rxbdp(rx_queue, rxbdp, skb); | ||
223 | } | ||
224 | |||
225 | rxbdp++; | ||
226 | } | ||
227 | |||
228 | } | ||
229 | |||
230 | return 0; | ||
231 | |||
232 | err_rxalloc_fail: | ||
233 | free_skb_resources(priv); | ||
234 | return -ENOMEM; | ||
235 | } | ||
236 | |||
237 | static int gfar_alloc_skb_resources(struct net_device *ndev) | ||
238 | { | ||
239 | void *vaddr; | ||
240 | dma_addr_t addr; | ||
241 | int i, j, k; | ||
242 | struct gfar_private *priv = netdev_priv(ndev); | ||
243 | struct device *dev = &priv->ofdev->dev; | ||
244 | struct gfar_priv_tx_q *tx_queue = NULL; | ||
245 | struct gfar_priv_rx_q *rx_queue = NULL; | ||
246 | |||
247 | priv->total_tx_ring_size = 0; | ||
248 | for (i = 0; i < priv->num_tx_queues; i++) | ||
249 | priv->total_tx_ring_size += priv->tx_queue[i]->tx_ring_size; | ||
250 | |||
251 | priv->total_rx_ring_size = 0; | ||
252 | for (i = 0; i < priv->num_rx_queues; i++) | ||
253 | priv->total_rx_ring_size += priv->rx_queue[i]->rx_ring_size; | ||
254 | |||
255 | /* Allocate memory for the buffer descriptors */ | ||
256 | vaddr = dma_alloc_coherent(dev, | ||
257 | sizeof(struct txbd8) * priv->total_tx_ring_size + | ||
258 | sizeof(struct rxbd8) * priv->total_rx_ring_size, | ||
259 | &addr, GFP_KERNEL); | ||
260 | if (!vaddr) { | ||
261 | netif_err(priv, ifup, ndev, | ||
262 | "Could not allocate buffer descriptors!\n"); | ||
263 | return -ENOMEM; | ||
264 | } | ||
265 | |||
266 | for (i = 0; i < priv->num_tx_queues; i++) { | ||
267 | tx_queue = priv->tx_queue[i]; | ||
268 | tx_queue->tx_bd_base = vaddr; | ||
269 | tx_queue->tx_bd_dma_base = addr; | ||
270 | tx_queue->dev = ndev; | ||
271 | /* enet DMA only understands physical addresses */ | ||
272 | addr += sizeof(struct txbd8) *tx_queue->tx_ring_size; | ||
273 | vaddr += sizeof(struct txbd8) *tx_queue->tx_ring_size; | ||
274 | } | ||
275 | |||
276 | /* Start the rx descriptor ring where the tx ring leaves off */ | ||
277 | for (i = 0; i < priv->num_rx_queues; i++) { | ||
278 | rx_queue = priv->rx_queue[i]; | ||
279 | rx_queue->rx_bd_base = vaddr; | ||
280 | rx_queue->rx_bd_dma_base = addr; | ||
281 | rx_queue->dev = ndev; | ||
282 | addr += sizeof (struct rxbd8) * rx_queue->rx_ring_size; | ||
283 | vaddr += sizeof (struct rxbd8) * rx_queue->rx_ring_size; | ||
284 | } | ||
285 | |||
286 | /* Setup the skbuff rings */ | ||
287 | for (i = 0; i < priv->num_tx_queues; i++) { | ||
288 | tx_queue = priv->tx_queue[i]; | ||
289 | tx_queue->tx_skbuff = kmalloc(sizeof(*tx_queue->tx_skbuff) * | ||
290 | tx_queue->tx_ring_size, GFP_KERNEL); | ||
291 | if (!tx_queue->tx_skbuff) { | ||
292 | netif_err(priv, ifup, ndev, | ||
293 | "Could not allocate tx_skbuff\n"); | ||
294 | goto cleanup; | ||
295 | } | ||
296 | |||
297 | for (k = 0; k < tx_queue->tx_ring_size; k++) | ||
298 | tx_queue->tx_skbuff[k] = NULL; | ||
299 | } | ||
300 | |||
301 | for (i = 0; i < priv->num_rx_queues; i++) { | ||
302 | rx_queue = priv->rx_queue[i]; | ||
303 | rx_queue->rx_skbuff = kmalloc(sizeof(*rx_queue->rx_skbuff) * | ||
304 | rx_queue->rx_ring_size, GFP_KERNEL); | ||
305 | |||
306 | if (!rx_queue->rx_skbuff) { | ||
307 | netif_err(priv, ifup, ndev, | ||
308 | "Could not allocate rx_skbuff\n"); | ||
309 | goto cleanup; | ||
310 | } | ||
311 | |||
312 | for (j = 0; j < rx_queue->rx_ring_size; j++) | ||
313 | rx_queue->rx_skbuff[j] = NULL; | ||
314 | } | ||
315 | |||
316 | if (gfar_init_bds(ndev)) | ||
317 | goto cleanup; | ||
318 | |||
319 | return 0; | ||
320 | |||
321 | cleanup: | ||
322 | free_skb_resources(priv); | ||
323 | return -ENOMEM; | ||
324 | } | ||
325 | |||
326 | static void gfar_init_tx_rx_base(struct gfar_private *priv) | ||
327 | { | ||
328 | struct gfar __iomem *regs = priv->gfargrp[0].regs; | ||
329 | u32 __iomem *baddr; | ||
330 | int i; | ||
331 | |||
332 | baddr = ®s->tbase0; | ||
333 | for(i = 0; i < priv->num_tx_queues; i++) { | ||
334 | gfar_write(baddr, priv->tx_queue[i]->tx_bd_dma_base); | ||
335 | baddr += 2; | ||
336 | } | ||
337 | |||
338 | baddr = ®s->rbase0; | ||
339 | for(i = 0; i < priv->num_rx_queues; i++) { | ||
340 | gfar_write(baddr, priv->rx_queue[i]->rx_bd_dma_base); | ||
341 | baddr += 2; | ||
342 | } | ||
343 | } | ||
344 | |||
345 | static void gfar_init_mac(struct net_device *ndev) | ||
346 | { | ||
347 | struct gfar_private *priv = netdev_priv(ndev); | ||
348 | struct gfar __iomem *regs = priv->gfargrp[0].regs; | ||
349 | u32 rctrl = 0; | ||
350 | u32 tctrl = 0; | ||
351 | u32 attrs = 0; | ||
352 | |||
353 | /* write the tx/rx base registers */ | ||
354 | gfar_init_tx_rx_base(priv); | ||
355 | |||
356 | /* Configure the coalescing support */ | ||
357 | gfar_configure_coalescing(priv, 0xFF, 0xFF); | ||
358 | |||
359 | if (priv->rx_filer_enable) { | ||
360 | rctrl |= RCTRL_FILREN; | ||
361 | /* Program the RIR0 reg with the required distribution */ | ||
362 | gfar_write(®s->rir0, DEFAULT_RIR0); | ||
363 | } | ||
364 | |||
365 | if (ndev->features & NETIF_F_RXCSUM) | ||
366 | rctrl |= RCTRL_CHECKSUMMING; | ||
367 | |||
368 | if (priv->extended_hash) { | ||
369 | rctrl |= RCTRL_EXTHASH; | ||
370 | |||
371 | gfar_clear_exact_match(ndev); | ||
372 | rctrl |= RCTRL_EMEN; | ||
373 | } | ||
374 | |||
375 | if (priv->padding) { | ||
376 | rctrl &= ~RCTRL_PAL_MASK; | ||
377 | rctrl |= RCTRL_PADDING(priv->padding); | ||
378 | } | ||
379 | |||
380 | /* Insert receive time stamps into padding alignment bytes */ | ||
381 | if (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER) { | ||
382 | rctrl &= ~RCTRL_PAL_MASK; | ||
383 | rctrl |= RCTRL_PADDING(8); | ||
384 | priv->padding = 8; | ||
385 | } | ||
386 | |||
387 | /* Enable HW time stamping if requested from user space */ | ||
388 | if (priv->hwts_rx_en) | ||
389 | rctrl |= RCTRL_PRSDEP_INIT | RCTRL_TS_ENABLE; | ||
390 | |||
391 | if (ndev->features & NETIF_F_HW_VLAN_RX) | ||
392 | rctrl |= RCTRL_VLEX | RCTRL_PRSDEP_INIT; | ||
393 | |||
394 | /* Init rctrl based on our settings */ | ||
395 | gfar_write(®s->rctrl, rctrl); | ||
396 | |||
397 | if (ndev->features & NETIF_F_IP_CSUM) | ||
398 | tctrl |= TCTRL_INIT_CSUM; | ||
399 | |||
400 | tctrl |= TCTRL_TXSCHED_PRIO; | ||
401 | |||
402 | gfar_write(®s->tctrl, tctrl); | ||
403 | |||
404 | /* Set the extraction length and index */ | ||
405 | attrs = ATTRELI_EL(priv->rx_stash_size) | | ||
406 | ATTRELI_EI(priv->rx_stash_index); | ||
407 | |||
408 | gfar_write(®s->attreli, attrs); | ||
409 | |||
410 | /* Start with defaults, and add stashing or locking | ||
411 | * depending on the approprate variables */ | ||
412 | attrs = ATTR_INIT_SETTINGS; | ||
413 | |||
414 | if (priv->bd_stash_en) | ||
415 | attrs |= ATTR_BDSTASH; | ||
416 | |||
417 | if (priv->rx_stash_size != 0) | ||
418 | attrs |= ATTR_BUFSTASH; | ||
419 | |||
420 | gfar_write(®s->attr, attrs); | ||
421 | |||
422 | gfar_write(®s->fifo_tx_thr, priv->fifo_threshold); | ||
423 | gfar_write(®s->fifo_tx_starve, priv->fifo_starve); | ||
424 | gfar_write(®s->fifo_tx_starve_shutoff, priv->fifo_starve_off); | ||
425 | } | ||
426 | |||
427 | static struct net_device_stats *gfar_get_stats(struct net_device *dev) | ||
428 | { | ||
429 | struct gfar_private *priv = netdev_priv(dev); | ||
430 | unsigned long rx_packets = 0, rx_bytes = 0, rx_dropped = 0; | ||
431 | unsigned long tx_packets = 0, tx_bytes = 0; | ||
432 | int i = 0; | ||
433 | |||
434 | for (i = 0; i < priv->num_rx_queues; i++) { | ||
435 | rx_packets += priv->rx_queue[i]->stats.rx_packets; | ||
436 | rx_bytes += priv->rx_queue[i]->stats.rx_bytes; | ||
437 | rx_dropped += priv->rx_queue[i]->stats.rx_dropped; | ||
438 | } | ||
439 | |||
440 | dev->stats.rx_packets = rx_packets; | ||
441 | dev->stats.rx_bytes = rx_bytes; | ||
442 | dev->stats.rx_dropped = rx_dropped; | ||
443 | |||
444 | for (i = 0; i < priv->num_tx_queues; i++) { | ||
445 | tx_bytes += priv->tx_queue[i]->stats.tx_bytes; | ||
446 | tx_packets += priv->tx_queue[i]->stats.tx_packets; | ||
447 | } | ||
448 | |||
449 | dev->stats.tx_bytes = tx_bytes; | ||
450 | dev->stats.tx_packets = tx_packets; | ||
451 | |||
452 | return &dev->stats; | ||
453 | } | ||
454 | |||
455 | static const struct net_device_ops gfar_netdev_ops = { | ||
456 | .ndo_open = gfar_enet_open, | ||
457 | .ndo_start_xmit = gfar_start_xmit, | ||
458 | .ndo_stop = gfar_close, | ||
459 | .ndo_change_mtu = gfar_change_mtu, | ||
460 | .ndo_set_features = gfar_set_features, | ||
461 | .ndo_set_multicast_list = gfar_set_multi, | ||
462 | .ndo_tx_timeout = gfar_timeout, | ||
463 | .ndo_do_ioctl = gfar_ioctl, | ||
464 | .ndo_get_stats = gfar_get_stats, | ||
465 | .ndo_set_mac_address = eth_mac_addr, | ||
466 | .ndo_validate_addr = eth_validate_addr, | ||
467 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
468 | .ndo_poll_controller = gfar_netpoll, | ||
469 | #endif | ||
470 | }; | ||
471 | |||
472 | void lock_rx_qs(struct gfar_private *priv) | ||
473 | { | ||
474 | int i = 0x0; | ||
475 | |||
476 | for (i = 0; i < priv->num_rx_queues; i++) | ||
477 | spin_lock(&priv->rx_queue[i]->rxlock); | ||
478 | } | ||
479 | |||
480 | void lock_tx_qs(struct gfar_private *priv) | ||
481 | { | ||
482 | int i = 0x0; | ||
483 | |||
484 | for (i = 0; i < priv->num_tx_queues; i++) | ||
485 | spin_lock(&priv->tx_queue[i]->txlock); | ||
486 | } | ||
487 | |||
488 | void unlock_rx_qs(struct gfar_private *priv) | ||
489 | { | ||
490 | int i = 0x0; | ||
491 | |||
492 | for (i = 0; i < priv->num_rx_queues; i++) | ||
493 | spin_unlock(&priv->rx_queue[i]->rxlock); | ||
494 | } | ||
495 | |||
496 | void unlock_tx_qs(struct gfar_private *priv) | ||
497 | { | ||
498 | int i = 0x0; | ||
499 | |||
500 | for (i = 0; i < priv->num_tx_queues; i++) | ||
501 | spin_unlock(&priv->tx_queue[i]->txlock); | ||
502 | } | ||
503 | |||
504 | static bool gfar_is_vlan_on(struct gfar_private *priv) | ||
505 | { | ||
506 | return (priv->ndev->features & NETIF_F_HW_VLAN_RX) || | ||
507 | (priv->ndev->features & NETIF_F_HW_VLAN_TX); | ||
508 | } | ||
509 | |||
510 | /* Returns 1 if incoming frames use an FCB */ | ||
511 | static inline int gfar_uses_fcb(struct gfar_private *priv) | ||
512 | { | ||
513 | return gfar_is_vlan_on(priv) || | ||
514 | (priv->ndev->features & NETIF_F_RXCSUM) || | ||
515 | (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER); | ||
516 | } | ||
517 | |||
518 | static void free_tx_pointers(struct gfar_private *priv) | ||
519 | { | ||
520 | int i = 0; | ||
521 | |||
522 | for (i = 0; i < priv->num_tx_queues; i++) | ||
523 | kfree(priv->tx_queue[i]); | ||
524 | } | ||
525 | |||
526 | static void free_rx_pointers(struct gfar_private *priv) | ||
527 | { | ||
528 | int i = 0; | ||
529 | |||
530 | for (i = 0; i < priv->num_rx_queues; i++) | ||
531 | kfree(priv->rx_queue[i]); | ||
532 | } | ||
533 | |||
534 | static void unmap_group_regs(struct gfar_private *priv) | ||
535 | { | ||
536 | int i = 0; | ||
537 | |||
538 | for (i = 0; i < MAXGROUPS; i++) | ||
539 | if (priv->gfargrp[i].regs) | ||
540 | iounmap(priv->gfargrp[i].regs); | ||
541 | } | ||
542 | |||
543 | static void disable_napi(struct gfar_private *priv) | ||
544 | { | ||
545 | int i = 0; | ||
546 | |||
547 | for (i = 0; i < priv->num_grps; i++) | ||
548 | napi_disable(&priv->gfargrp[i].napi); | ||
549 | } | ||
550 | |||
551 | static void enable_napi(struct gfar_private *priv) | ||
552 | { | ||
553 | int i = 0; | ||
554 | |||
555 | for (i = 0; i < priv->num_grps; i++) | ||
556 | napi_enable(&priv->gfargrp[i].napi); | ||
557 | } | ||
558 | |||
559 | static int gfar_parse_group(struct device_node *np, | ||
560 | struct gfar_private *priv, const char *model) | ||
561 | { | ||
562 | u32 *queue_mask; | ||
563 | |||
564 | priv->gfargrp[priv->num_grps].regs = of_iomap(np, 0); | ||
565 | if (!priv->gfargrp[priv->num_grps].regs) | ||
566 | return -ENOMEM; | ||
567 | |||
568 | priv->gfargrp[priv->num_grps].interruptTransmit = | ||
569 | irq_of_parse_and_map(np, 0); | ||
570 | |||
571 | /* If we aren't the FEC we have multiple interrupts */ | ||
572 | if (model && strcasecmp(model, "FEC")) { | ||
573 | priv->gfargrp[priv->num_grps].interruptReceive = | ||
574 | irq_of_parse_and_map(np, 1); | ||
575 | priv->gfargrp[priv->num_grps].interruptError = | ||
576 | irq_of_parse_and_map(np,2); | ||
577 | if (priv->gfargrp[priv->num_grps].interruptTransmit == NO_IRQ || | ||
578 | priv->gfargrp[priv->num_grps].interruptReceive == NO_IRQ || | ||
579 | priv->gfargrp[priv->num_grps].interruptError == NO_IRQ) | ||
580 | return -EINVAL; | ||
581 | } | ||
582 | |||
583 | priv->gfargrp[priv->num_grps].grp_id = priv->num_grps; | ||
584 | priv->gfargrp[priv->num_grps].priv = priv; | ||
585 | spin_lock_init(&priv->gfargrp[priv->num_grps].grplock); | ||
586 | if(priv->mode == MQ_MG_MODE) { | ||
587 | queue_mask = (u32 *)of_get_property(np, | ||
588 | "fsl,rx-bit-map", NULL); | ||
589 | priv->gfargrp[priv->num_grps].rx_bit_map = | ||
590 | queue_mask ? *queue_mask :(DEFAULT_MAPPING >> priv->num_grps); | ||
591 | queue_mask = (u32 *)of_get_property(np, | ||
592 | "fsl,tx-bit-map", NULL); | ||
593 | priv->gfargrp[priv->num_grps].tx_bit_map = | ||
594 | queue_mask ? *queue_mask : (DEFAULT_MAPPING >> priv->num_grps); | ||
595 | } else { | ||
596 | priv->gfargrp[priv->num_grps].rx_bit_map = 0xFF; | ||
597 | priv->gfargrp[priv->num_grps].tx_bit_map = 0xFF; | ||
598 | } | ||
599 | priv->num_grps++; | ||
600 | |||
601 | return 0; | ||
602 | } | ||
603 | |||
604 | static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev) | ||
605 | { | ||
606 | const char *model; | ||
607 | const char *ctype; | ||
608 | const void *mac_addr; | ||
609 | int err = 0, i; | ||
610 | struct net_device *dev = NULL; | ||
611 | struct gfar_private *priv = NULL; | ||
612 | struct device_node *np = ofdev->dev.of_node; | ||
613 | struct device_node *child = NULL; | ||
614 | const u32 *stash; | ||
615 | const u32 *stash_len; | ||
616 | const u32 *stash_idx; | ||
617 | unsigned int num_tx_qs, num_rx_qs; | ||
618 | u32 *tx_queues, *rx_queues; | ||
619 | |||
620 | if (!np || !of_device_is_available(np)) | ||
621 | return -ENODEV; | ||
622 | |||
623 | /* parse the num of tx and rx queues */ | ||
624 | tx_queues = (u32 *)of_get_property(np, "fsl,num_tx_queues", NULL); | ||
625 | num_tx_qs = tx_queues ? *tx_queues : 1; | ||
626 | |||
627 | if (num_tx_qs > MAX_TX_QS) { | ||
628 | pr_err("num_tx_qs(=%d) greater than MAX_TX_QS(=%d)\n", | ||
629 | num_tx_qs, MAX_TX_QS); | ||
630 | pr_err("Cannot do alloc_etherdev, aborting\n"); | ||
631 | return -EINVAL; | ||
632 | } | ||
633 | |||
634 | rx_queues = (u32 *)of_get_property(np, "fsl,num_rx_queues", NULL); | ||
635 | num_rx_qs = rx_queues ? *rx_queues : 1; | ||
636 | |||
637 | if (num_rx_qs > MAX_RX_QS) { | ||
638 | pr_err("num_rx_qs(=%d) greater than MAX_RX_QS(=%d)\n", | ||
639 | num_rx_qs, MAX_RX_QS); | ||
640 | pr_err("Cannot do alloc_etherdev, aborting\n"); | ||
641 | return -EINVAL; | ||
642 | } | ||
643 | |||
644 | *pdev = alloc_etherdev_mq(sizeof(*priv), num_tx_qs); | ||
645 | dev = *pdev; | ||
646 | if (NULL == dev) | ||
647 | return -ENOMEM; | ||
648 | |||
649 | priv = netdev_priv(dev); | ||
650 | priv->node = ofdev->dev.of_node; | ||
651 | priv->ndev = dev; | ||
652 | |||
653 | priv->num_tx_queues = num_tx_qs; | ||
654 | netif_set_real_num_rx_queues(dev, num_rx_qs); | ||
655 | priv->num_rx_queues = num_rx_qs; | ||
656 | priv->num_grps = 0x0; | ||
657 | |||
658 | /* Init Rx queue filer rule set linked list*/ | ||
659 | INIT_LIST_HEAD(&priv->rx_list.list); | ||
660 | priv->rx_list.count = 0; | ||
661 | mutex_init(&priv->rx_queue_access); | ||
662 | |||
663 | model = of_get_property(np, "model", NULL); | ||
664 | |||
665 | for (i = 0; i < MAXGROUPS; i++) | ||
666 | priv->gfargrp[i].regs = NULL; | ||
667 | |||
668 | /* Parse and initialize group specific information */ | ||
669 | if (of_device_is_compatible(np, "fsl,etsec2")) { | ||
670 | priv->mode = MQ_MG_MODE; | ||
671 | for_each_child_of_node(np, child) { | ||
672 | err = gfar_parse_group(child, priv, model); | ||
673 | if (err) | ||
674 | goto err_grp_init; | ||
675 | } | ||
676 | } else { | ||
677 | priv->mode = SQ_SG_MODE; | ||
678 | err = gfar_parse_group(np, priv, model); | ||
679 | if(err) | ||
680 | goto err_grp_init; | ||
681 | } | ||
682 | |||
683 | for (i = 0; i < priv->num_tx_queues; i++) | ||
684 | priv->tx_queue[i] = NULL; | ||
685 | for (i = 0; i < priv->num_rx_queues; i++) | ||
686 | priv->rx_queue[i] = NULL; | ||
687 | |||
688 | for (i = 0; i < priv->num_tx_queues; i++) { | ||
689 | priv->tx_queue[i] = kzalloc(sizeof(struct gfar_priv_tx_q), | ||
690 | GFP_KERNEL); | ||
691 | if (!priv->tx_queue[i]) { | ||
692 | err = -ENOMEM; | ||
693 | goto tx_alloc_failed; | ||
694 | } | ||
695 | priv->tx_queue[i]->tx_skbuff = NULL; | ||
696 | priv->tx_queue[i]->qindex = i; | ||
697 | priv->tx_queue[i]->dev = dev; | ||
698 | spin_lock_init(&(priv->tx_queue[i]->txlock)); | ||
699 | } | ||
700 | |||
701 | for (i = 0; i < priv->num_rx_queues; i++) { | ||
702 | priv->rx_queue[i] = kzalloc(sizeof(struct gfar_priv_rx_q), | ||
703 | GFP_KERNEL); | ||
704 | if (!priv->rx_queue[i]) { | ||
705 | err = -ENOMEM; | ||
706 | goto rx_alloc_failed; | ||
707 | } | ||
708 | priv->rx_queue[i]->rx_skbuff = NULL; | ||
709 | priv->rx_queue[i]->qindex = i; | ||
710 | priv->rx_queue[i]->dev = dev; | ||
711 | spin_lock_init(&(priv->rx_queue[i]->rxlock)); | ||
712 | } | ||
713 | |||
714 | |||
715 | stash = of_get_property(np, "bd-stash", NULL); | ||
716 | |||
717 | if (stash) { | ||
718 | priv->device_flags |= FSL_GIANFAR_DEV_HAS_BD_STASHING; | ||
719 | priv->bd_stash_en = 1; | ||
720 | } | ||
721 | |||
722 | stash_len = of_get_property(np, "rx-stash-len", NULL); | ||
723 | |||
724 | if (stash_len) | ||
725 | priv->rx_stash_size = *stash_len; | ||
726 | |||
727 | stash_idx = of_get_property(np, "rx-stash-idx", NULL); | ||
728 | |||
729 | if (stash_idx) | ||
730 | priv->rx_stash_index = *stash_idx; | ||
731 | |||
732 | if (stash_len || stash_idx) | ||
733 | priv->device_flags |= FSL_GIANFAR_DEV_HAS_BUF_STASHING; | ||
734 | |||
735 | mac_addr = of_get_mac_address(np); | ||
736 | if (mac_addr) | ||
737 | memcpy(dev->dev_addr, mac_addr, MAC_ADDR_LEN); | ||
738 | |||
739 | if (model && !strcasecmp(model, "TSEC")) | ||
740 | priv->device_flags = | ||
741 | FSL_GIANFAR_DEV_HAS_GIGABIT | | ||
742 | FSL_GIANFAR_DEV_HAS_COALESCE | | ||
743 | FSL_GIANFAR_DEV_HAS_RMON | | ||
744 | FSL_GIANFAR_DEV_HAS_MULTI_INTR; | ||
745 | if (model && !strcasecmp(model, "eTSEC")) | ||
746 | priv->device_flags = | ||
747 | FSL_GIANFAR_DEV_HAS_GIGABIT | | ||
748 | FSL_GIANFAR_DEV_HAS_COALESCE | | ||
749 | FSL_GIANFAR_DEV_HAS_RMON | | ||
750 | FSL_GIANFAR_DEV_HAS_MULTI_INTR | | ||
751 | FSL_GIANFAR_DEV_HAS_PADDING | | ||
752 | FSL_GIANFAR_DEV_HAS_CSUM | | ||
753 | FSL_GIANFAR_DEV_HAS_VLAN | | ||
754 | FSL_GIANFAR_DEV_HAS_MAGIC_PACKET | | ||
755 | FSL_GIANFAR_DEV_HAS_EXTENDED_HASH | | ||
756 | FSL_GIANFAR_DEV_HAS_TIMER; | ||
757 | |||
758 | ctype = of_get_property(np, "phy-connection-type", NULL); | ||
759 | |||
760 | /* We only care about rgmii-id. The rest are autodetected */ | ||
761 | if (ctype && !strcmp(ctype, "rgmii-id")) | ||
762 | priv->interface = PHY_INTERFACE_MODE_RGMII_ID; | ||
763 | else | ||
764 | priv->interface = PHY_INTERFACE_MODE_MII; | ||
765 | |||
766 | if (of_get_property(np, "fsl,magic-packet", NULL)) | ||
767 | priv->device_flags |= FSL_GIANFAR_DEV_HAS_MAGIC_PACKET; | ||
768 | |||
769 | priv->phy_node = of_parse_phandle(np, "phy-handle", 0); | ||
770 | |||
771 | /* Find the TBI PHY. If it's not there, we don't support SGMII */ | ||
772 | priv->tbi_node = of_parse_phandle(np, "tbi-handle", 0); | ||
773 | |||
774 | return 0; | ||
775 | |||
776 | rx_alloc_failed: | ||
777 | free_rx_pointers(priv); | ||
778 | tx_alloc_failed: | ||
779 | free_tx_pointers(priv); | ||
780 | err_grp_init: | ||
781 | unmap_group_regs(priv); | ||
782 | free_netdev(dev); | ||
783 | return err; | ||
784 | } | ||
785 | |||
786 | static int gfar_hwtstamp_ioctl(struct net_device *netdev, | ||
787 | struct ifreq *ifr, int cmd) | ||
788 | { | ||
789 | struct hwtstamp_config config; | ||
790 | struct gfar_private *priv = netdev_priv(netdev); | ||
791 | |||
792 | if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) | ||
793 | return -EFAULT; | ||
794 | |||
795 | /* reserved for future extensions */ | ||
796 | if (config.flags) | ||
797 | return -EINVAL; | ||
798 | |||
799 | switch (config.tx_type) { | ||
800 | case HWTSTAMP_TX_OFF: | ||
801 | priv->hwts_tx_en = 0; | ||
802 | break; | ||
803 | case HWTSTAMP_TX_ON: | ||
804 | if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)) | ||
805 | return -ERANGE; | ||
806 | priv->hwts_tx_en = 1; | ||
807 | break; | ||
808 | default: | ||
809 | return -ERANGE; | ||
810 | } | ||
811 | |||
812 | switch (config.rx_filter) { | ||
813 | case HWTSTAMP_FILTER_NONE: | ||
814 | if (priv->hwts_rx_en) { | ||
815 | stop_gfar(netdev); | ||
816 | priv->hwts_rx_en = 0; | ||
817 | startup_gfar(netdev); | ||
818 | } | ||
819 | break; | ||
820 | default: | ||
821 | if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)) | ||
822 | return -ERANGE; | ||
823 | if (!priv->hwts_rx_en) { | ||
824 | stop_gfar(netdev); | ||
825 | priv->hwts_rx_en = 1; | ||
826 | startup_gfar(netdev); | ||
827 | } | ||
828 | config.rx_filter = HWTSTAMP_FILTER_ALL; | ||
829 | break; | ||
830 | } | ||
831 | |||
832 | return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? | ||
833 | -EFAULT : 0; | ||
834 | } | ||
835 | |||
836 | /* Ioctl MII Interface */ | ||
837 | static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) | ||
838 | { | ||
839 | struct gfar_private *priv = netdev_priv(dev); | ||
840 | |||
841 | if (!netif_running(dev)) | ||
842 | return -EINVAL; | ||
843 | |||
844 | if (cmd == SIOCSHWTSTAMP) | ||
845 | return gfar_hwtstamp_ioctl(dev, rq, cmd); | ||
846 | |||
847 | if (!priv->phydev) | ||
848 | return -ENODEV; | ||
849 | |||
850 | return phy_mii_ioctl(priv->phydev, rq, cmd); | ||
851 | } | ||
852 | |||
853 | static unsigned int reverse_bitmap(unsigned int bit_map, unsigned int max_qs) | ||
854 | { | ||
855 | unsigned int new_bit_map = 0x0; | ||
856 | int mask = 0x1 << (max_qs - 1), i; | ||
857 | for (i = 0; i < max_qs; i++) { | ||
858 | if (bit_map & mask) | ||
859 | new_bit_map = new_bit_map + (1 << i); | ||
860 | mask = mask >> 0x1; | ||
861 | } | ||
862 | return new_bit_map; | ||
863 | } | ||
864 | |||
865 | static u32 cluster_entry_per_class(struct gfar_private *priv, u32 rqfar, | ||
866 | u32 class) | ||
867 | { | ||
868 | u32 rqfpr = FPR_FILER_MASK; | ||
869 | u32 rqfcr = 0x0; | ||
870 | |||
871 | rqfar--; | ||
872 | rqfcr = RQFCR_CLE | RQFCR_PID_MASK | RQFCR_CMP_EXACT; | ||
873 | priv->ftp_rqfpr[rqfar] = rqfpr; | ||
874 | priv->ftp_rqfcr[rqfar] = rqfcr; | ||
875 | gfar_write_filer(priv, rqfar, rqfcr, rqfpr); | ||
876 | |||
877 | rqfar--; | ||
878 | rqfcr = RQFCR_CMP_NOMATCH; | ||
879 | priv->ftp_rqfpr[rqfar] = rqfpr; | ||
880 | priv->ftp_rqfcr[rqfar] = rqfcr; | ||
881 | gfar_write_filer(priv, rqfar, rqfcr, rqfpr); | ||
882 | |||
883 | rqfar--; | ||
884 | rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_PARSE | RQFCR_CLE | RQFCR_AND; | ||
885 | rqfpr = class; | ||
886 | priv->ftp_rqfcr[rqfar] = rqfcr; | ||
887 | priv->ftp_rqfpr[rqfar] = rqfpr; | ||
888 | gfar_write_filer(priv, rqfar, rqfcr, rqfpr); | ||
889 | |||
890 | rqfar--; | ||
891 | rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_MASK | RQFCR_AND; | ||
892 | rqfpr = class; | ||
893 | priv->ftp_rqfcr[rqfar] = rqfcr; | ||
894 | priv->ftp_rqfpr[rqfar] = rqfpr; | ||
895 | gfar_write_filer(priv, rqfar, rqfcr, rqfpr); | ||
896 | |||
897 | return rqfar; | ||
898 | } | ||
899 | |||
900 | static void gfar_init_filer_table(struct gfar_private *priv) | ||
901 | { | ||
902 | int i = 0x0; | ||
903 | u32 rqfar = MAX_FILER_IDX; | ||
904 | u32 rqfcr = 0x0; | ||
905 | u32 rqfpr = FPR_FILER_MASK; | ||
906 | |||
907 | /* Default rule */ | ||
908 | rqfcr = RQFCR_CMP_MATCH; | ||
909 | priv->ftp_rqfcr[rqfar] = rqfcr; | ||
910 | priv->ftp_rqfpr[rqfar] = rqfpr; | ||
911 | gfar_write_filer(priv, rqfar, rqfcr, rqfpr); | ||
912 | |||
913 | rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6); | ||
914 | rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_UDP); | ||
915 | rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_TCP); | ||
916 | rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4); | ||
917 | rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_UDP); | ||
918 | rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_TCP); | ||
919 | |||
920 | /* cur_filer_idx indicated the first non-masked rule */ | ||
921 | priv->cur_filer_idx = rqfar; | ||
922 | |||
923 | /* Rest are masked rules */ | ||
924 | rqfcr = RQFCR_CMP_NOMATCH; | ||
925 | for (i = 0; i < rqfar; i++) { | ||
926 | priv->ftp_rqfcr[i] = rqfcr; | ||
927 | priv->ftp_rqfpr[i] = rqfpr; | ||
928 | gfar_write_filer(priv, i, rqfcr, rqfpr); | ||
929 | } | ||
930 | } | ||
931 | |||
932 | static void gfar_detect_errata(struct gfar_private *priv) | ||
933 | { | ||
934 | struct device *dev = &priv->ofdev->dev; | ||
935 | unsigned int pvr = mfspr(SPRN_PVR); | ||
936 | unsigned int svr = mfspr(SPRN_SVR); | ||
937 | unsigned int mod = (svr >> 16) & 0xfff6; /* w/o E suffix */ | ||
938 | unsigned int rev = svr & 0xffff; | ||
939 | |||
940 | /* MPC8313 Rev 2.0 and higher; All MPC837x */ | ||
941 | if ((pvr == 0x80850010 && mod == 0x80b0 && rev >= 0x0020) || | ||
942 | (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0)) | ||
943 | priv->errata |= GFAR_ERRATA_74; | ||
944 | |||
945 | /* MPC8313 and MPC837x all rev */ | ||
946 | if ((pvr == 0x80850010 && mod == 0x80b0) || | ||
947 | (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0)) | ||
948 | priv->errata |= GFAR_ERRATA_76; | ||
949 | |||
950 | /* MPC8313 and MPC837x all rev */ | ||
951 | if ((pvr == 0x80850010 && mod == 0x80b0) || | ||
952 | (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0)) | ||
953 | priv->errata |= GFAR_ERRATA_A002; | ||
954 | |||
955 | /* MPC8313 Rev < 2.0, MPC8548 rev 2.0 */ | ||
956 | if ((pvr == 0x80850010 && mod == 0x80b0 && rev < 0x0020) || | ||
957 | (pvr == 0x80210020 && mod == 0x8030 && rev == 0x0020)) | ||
958 | priv->errata |= GFAR_ERRATA_12; | ||
959 | |||
960 | if (priv->errata) | ||
961 | dev_info(dev, "enabled errata workarounds, flags: 0x%x\n", | ||
962 | priv->errata); | ||
963 | } | ||
964 | |||
965 | /* Set up the ethernet device structure, private data, | ||
966 | * and anything else we need before we start */ | ||
967 | static int gfar_probe(struct platform_device *ofdev) | ||
968 | { | ||
969 | u32 tempval; | ||
970 | struct net_device *dev = NULL; | ||
971 | struct gfar_private *priv = NULL; | ||
972 | struct gfar __iomem *regs = NULL; | ||
973 | int err = 0, i, grp_idx = 0; | ||
974 | int len_devname; | ||
975 | u32 rstat = 0, tstat = 0, rqueue = 0, tqueue = 0; | ||
976 | u32 isrg = 0; | ||
977 | u32 __iomem *baddr; | ||
978 | |||
979 | err = gfar_of_init(ofdev, &dev); | ||
980 | |||
981 | if (err) | ||
982 | return err; | ||
983 | |||
984 | priv = netdev_priv(dev); | ||
985 | priv->ndev = dev; | ||
986 | priv->ofdev = ofdev; | ||
987 | priv->node = ofdev->dev.of_node; | ||
988 | SET_NETDEV_DEV(dev, &ofdev->dev); | ||
989 | |||
990 | spin_lock_init(&priv->bflock); | ||
991 | INIT_WORK(&priv->reset_task, gfar_reset_task); | ||
992 | |||
993 | dev_set_drvdata(&ofdev->dev, priv); | ||
994 | regs = priv->gfargrp[0].regs; | ||
995 | |||
996 | gfar_detect_errata(priv); | ||
997 | |||
998 | /* Stop the DMA engine now, in case it was running before */ | ||
999 | /* (The firmware could have used it, and left it running). */ | ||
1000 | gfar_halt(dev); | ||
1001 | |||
1002 | /* Reset MAC layer */ | ||
1003 | gfar_write(®s->maccfg1, MACCFG1_SOFT_RESET); | ||
1004 | |||
1005 | /* We need to delay at least 3 TX clocks */ | ||
1006 | udelay(2); | ||
1007 | |||
1008 | tempval = (MACCFG1_TX_FLOW | MACCFG1_RX_FLOW); | ||
1009 | gfar_write(®s->maccfg1, tempval); | ||
1010 | |||
1011 | /* Initialize MACCFG2. */ | ||
1012 | tempval = MACCFG2_INIT_SETTINGS; | ||
1013 | if (gfar_has_errata(priv, GFAR_ERRATA_74)) | ||
1014 | tempval |= MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK; | ||
1015 | gfar_write(®s->maccfg2, tempval); | ||
1016 | |||
1017 | /* Initialize ECNTRL */ | ||
1018 | gfar_write(®s->ecntrl, ECNTRL_INIT_SETTINGS); | ||
1019 | |||
1020 | /* Set the dev->base_addr to the gfar reg region */ | ||
1021 | dev->base_addr = (unsigned long) regs; | ||
1022 | |||
1023 | SET_NETDEV_DEV(dev, &ofdev->dev); | ||
1024 | |||
1025 | /* Fill in the dev structure */ | ||
1026 | dev->watchdog_timeo = TX_TIMEOUT; | ||
1027 | dev->mtu = 1500; | ||
1028 | dev->netdev_ops = &gfar_netdev_ops; | ||
1029 | dev->ethtool_ops = &gfar_ethtool_ops; | ||
1030 | |||
1031 | /* Register for napi ...We are registering NAPI for each grp */ | ||
1032 | for (i = 0; i < priv->num_grps; i++) | ||
1033 | netif_napi_add(dev, &priv->gfargrp[i].napi, gfar_poll, GFAR_DEV_WEIGHT); | ||
1034 | |||
1035 | if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) { | ||
1036 | dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG | | ||
1037 | NETIF_F_RXCSUM; | ||
1038 | dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG | | ||
1039 | NETIF_F_RXCSUM | NETIF_F_HIGHDMA; | ||
1040 | } | ||
1041 | |||
1042 | if (priv->device_flags & FSL_GIANFAR_DEV_HAS_VLAN) { | ||
1043 | dev->hw_features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; | ||
1044 | dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; | ||
1045 | } | ||
1046 | |||
1047 | if (priv->device_flags & FSL_GIANFAR_DEV_HAS_EXTENDED_HASH) { | ||
1048 | priv->extended_hash = 1; | ||
1049 | priv->hash_width = 9; | ||
1050 | |||
1051 | priv->hash_regs[0] = ®s->igaddr0; | ||
1052 | priv->hash_regs[1] = ®s->igaddr1; | ||
1053 | priv->hash_regs[2] = ®s->igaddr2; | ||
1054 | priv->hash_regs[3] = ®s->igaddr3; | ||
1055 | priv->hash_regs[4] = ®s->igaddr4; | ||
1056 | priv->hash_regs[5] = ®s->igaddr5; | ||
1057 | priv->hash_regs[6] = ®s->igaddr6; | ||
1058 | priv->hash_regs[7] = ®s->igaddr7; | ||
1059 | priv->hash_regs[8] = ®s->gaddr0; | ||
1060 | priv->hash_regs[9] = ®s->gaddr1; | ||
1061 | priv->hash_regs[10] = ®s->gaddr2; | ||
1062 | priv->hash_regs[11] = ®s->gaddr3; | ||
1063 | priv->hash_regs[12] = ®s->gaddr4; | ||
1064 | priv->hash_regs[13] = ®s->gaddr5; | ||
1065 | priv->hash_regs[14] = ®s->gaddr6; | ||
1066 | priv->hash_regs[15] = ®s->gaddr7; | ||
1067 | |||
1068 | } else { | ||
1069 | priv->extended_hash = 0; | ||
1070 | priv->hash_width = 8; | ||
1071 | |||
1072 | priv->hash_regs[0] = ®s->gaddr0; | ||
1073 | priv->hash_regs[1] = ®s->gaddr1; | ||
1074 | priv->hash_regs[2] = ®s->gaddr2; | ||
1075 | priv->hash_regs[3] = ®s->gaddr3; | ||
1076 | priv->hash_regs[4] = ®s->gaddr4; | ||
1077 | priv->hash_regs[5] = ®s->gaddr5; | ||
1078 | priv->hash_regs[6] = ®s->gaddr6; | ||
1079 | priv->hash_regs[7] = ®s->gaddr7; | ||
1080 | } | ||
1081 | |||
1082 | if (priv->device_flags & FSL_GIANFAR_DEV_HAS_PADDING) | ||
1083 | priv->padding = DEFAULT_PADDING; | ||
1084 | else | ||
1085 | priv->padding = 0; | ||
1086 | |||
1087 | if (dev->features & NETIF_F_IP_CSUM || | ||
1088 | priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER) | ||
1089 | dev->hard_header_len += GMAC_FCB_LEN; | ||
1090 | |||
1091 | /* Program the isrg regs only if number of grps > 1 */ | ||
1092 | if (priv->num_grps > 1) { | ||
1093 | baddr = ®s->isrg0; | ||
1094 | for (i = 0; i < priv->num_grps; i++) { | ||
1095 | isrg |= (priv->gfargrp[i].rx_bit_map << ISRG_SHIFT_RX); | ||
1096 | isrg |= (priv->gfargrp[i].tx_bit_map << ISRG_SHIFT_TX); | ||
1097 | gfar_write(baddr, isrg); | ||
1098 | baddr++; | ||
1099 | isrg = 0x0; | ||
1100 | } | ||
1101 | } | ||
1102 | |||
1103 | /* Need to reverse the bit maps as bit_map's MSB is q0 | ||
1104 | * but, for_each_set_bit parses from right to left, which | ||
1105 | * basically reverses the queue numbers */ | ||
1106 | for (i = 0; i< priv->num_grps; i++) { | ||
1107 | priv->gfargrp[i].tx_bit_map = reverse_bitmap( | ||
1108 | priv->gfargrp[i].tx_bit_map, MAX_TX_QS); | ||
1109 | priv->gfargrp[i].rx_bit_map = reverse_bitmap( | ||
1110 | priv->gfargrp[i].rx_bit_map, MAX_RX_QS); | ||
1111 | } | ||
1112 | |||
1113 | /* Calculate RSTAT, TSTAT, RQUEUE and TQUEUE values, | ||
1114 | * also assign queues to groups */ | ||
1115 | for (grp_idx = 0; grp_idx < priv->num_grps; grp_idx++) { | ||
1116 | priv->gfargrp[grp_idx].num_rx_queues = 0x0; | ||
1117 | for_each_set_bit(i, &priv->gfargrp[grp_idx].rx_bit_map, | ||
1118 | priv->num_rx_queues) { | ||
1119 | priv->gfargrp[grp_idx].num_rx_queues++; | ||
1120 | priv->rx_queue[i]->grp = &priv->gfargrp[grp_idx]; | ||
1121 | rstat = rstat | (RSTAT_CLEAR_RHALT >> i); | ||
1122 | rqueue = rqueue | ((RQUEUE_EN0 | RQUEUE_EX0) >> i); | ||
1123 | } | ||
1124 | priv->gfargrp[grp_idx].num_tx_queues = 0x0; | ||
1125 | for_each_set_bit(i, &priv->gfargrp[grp_idx].tx_bit_map, | ||
1126 | priv->num_tx_queues) { | ||
1127 | priv->gfargrp[grp_idx].num_tx_queues++; | ||
1128 | priv->tx_queue[i]->grp = &priv->gfargrp[grp_idx]; | ||
1129 | tstat = tstat | (TSTAT_CLEAR_THALT >> i); | ||
1130 | tqueue = tqueue | (TQUEUE_EN0 >> i); | ||
1131 | } | ||
1132 | priv->gfargrp[grp_idx].rstat = rstat; | ||
1133 | priv->gfargrp[grp_idx].tstat = tstat; | ||
1134 | rstat = tstat =0; | ||
1135 | } | ||
1136 | |||
1137 | gfar_write(®s->rqueue, rqueue); | ||
1138 | gfar_write(®s->tqueue, tqueue); | ||
1139 | |||
1140 | priv->rx_buffer_size = DEFAULT_RX_BUFFER_SIZE; | ||
1141 | |||
1142 | /* Initializing some of the rx/tx queue level parameters */ | ||
1143 | for (i = 0; i < priv->num_tx_queues; i++) { | ||
1144 | priv->tx_queue[i]->tx_ring_size = DEFAULT_TX_RING_SIZE; | ||
1145 | priv->tx_queue[i]->num_txbdfree = DEFAULT_TX_RING_SIZE; | ||
1146 | priv->tx_queue[i]->txcoalescing = DEFAULT_TX_COALESCE; | ||
1147 | priv->tx_queue[i]->txic = DEFAULT_TXIC; | ||
1148 | } | ||
1149 | |||
1150 | for (i = 0; i < priv->num_rx_queues; i++) { | ||
1151 | priv->rx_queue[i]->rx_ring_size = DEFAULT_RX_RING_SIZE; | ||
1152 | priv->rx_queue[i]->rxcoalescing = DEFAULT_RX_COALESCE; | ||
1153 | priv->rx_queue[i]->rxic = DEFAULT_RXIC; | ||
1154 | } | ||
1155 | |||
1156 | /* always enable rx filer*/ | ||
1157 | priv->rx_filer_enable = 1; | ||
1158 | /* Enable most messages by default */ | ||
1159 | priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1; | ||
1160 | |||
1161 | /* Carrier starts down, phylib will bring it up */ | ||
1162 | netif_carrier_off(dev); | ||
1163 | |||
1164 | err = register_netdev(dev); | ||
1165 | |||
1166 | if (err) { | ||
1167 | pr_err("%s: Cannot register net device, aborting\n", dev->name); | ||
1168 | goto register_fail; | ||
1169 | } | ||
1170 | |||
1171 | device_init_wakeup(&dev->dev, | ||
1172 | priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET); | ||
1173 | |||
1174 | /* fill out IRQ number and name fields */ | ||
1175 | len_devname = strlen(dev->name); | ||
1176 | for (i = 0; i < priv->num_grps; i++) { | ||
1177 | strncpy(&priv->gfargrp[i].int_name_tx[0], dev->name, | ||
1178 | len_devname); | ||
1179 | if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { | ||
1180 | strncpy(&priv->gfargrp[i].int_name_tx[len_devname], | ||
1181 | "_g", sizeof("_g")); | ||
1182 | priv->gfargrp[i].int_name_tx[ | ||
1183 | strlen(priv->gfargrp[i].int_name_tx)] = i+48; | ||
1184 | strncpy(&priv->gfargrp[i].int_name_tx[strlen( | ||
1185 | priv->gfargrp[i].int_name_tx)], | ||
1186 | "_tx", sizeof("_tx") + 1); | ||
1187 | |||
1188 | strncpy(&priv->gfargrp[i].int_name_rx[0], dev->name, | ||
1189 | len_devname); | ||
1190 | strncpy(&priv->gfargrp[i].int_name_rx[len_devname], | ||
1191 | "_g", sizeof("_g")); | ||
1192 | priv->gfargrp[i].int_name_rx[ | ||
1193 | strlen(priv->gfargrp[i].int_name_rx)] = i+48; | ||
1194 | strncpy(&priv->gfargrp[i].int_name_rx[strlen( | ||
1195 | priv->gfargrp[i].int_name_rx)], | ||
1196 | "_rx", sizeof("_rx") + 1); | ||
1197 | |||
1198 | strncpy(&priv->gfargrp[i].int_name_er[0], dev->name, | ||
1199 | len_devname); | ||
1200 | strncpy(&priv->gfargrp[i].int_name_er[len_devname], | ||
1201 | "_g", sizeof("_g")); | ||
1202 | priv->gfargrp[i].int_name_er[strlen( | ||
1203 | priv->gfargrp[i].int_name_er)] = i+48; | ||
1204 | strncpy(&priv->gfargrp[i].int_name_er[strlen(\ | ||
1205 | priv->gfargrp[i].int_name_er)], | ||
1206 | "_er", sizeof("_er") + 1); | ||
1207 | } else | ||
1208 | priv->gfargrp[i].int_name_tx[len_devname] = '\0'; | ||
1209 | } | ||
1210 | |||
1211 | /* Initialize the filer table */ | ||
1212 | gfar_init_filer_table(priv); | ||
1213 | |||
1214 | /* Create all the sysfs files */ | ||
1215 | gfar_init_sysfs(dev); | ||
1216 | |||
1217 | /* Print out the device info */ | ||
1218 | netdev_info(dev, "mac: %pM\n", dev->dev_addr); | ||
1219 | |||
1220 | /* Even more device info helps when determining which kernel */ | ||
1221 | /* provided which set of benchmarks. */ | ||
1222 | netdev_info(dev, "Running with NAPI enabled\n"); | ||
1223 | for (i = 0; i < priv->num_rx_queues; i++) | ||
1224 | netdev_info(dev, "RX BD ring size for Q[%d]: %d\n", | ||
1225 | i, priv->rx_queue[i]->rx_ring_size); | ||
1226 | for(i = 0; i < priv->num_tx_queues; i++) | ||
1227 | netdev_info(dev, "TX BD ring size for Q[%d]: %d\n", | ||
1228 | i, priv->tx_queue[i]->tx_ring_size); | ||
1229 | |||
1230 | return 0; | ||
1231 | |||
1232 | register_fail: | ||
1233 | unmap_group_regs(priv); | ||
1234 | free_tx_pointers(priv); | ||
1235 | free_rx_pointers(priv); | ||
1236 | if (priv->phy_node) | ||
1237 | of_node_put(priv->phy_node); | ||
1238 | if (priv->tbi_node) | ||
1239 | of_node_put(priv->tbi_node); | ||
1240 | free_netdev(dev); | ||
1241 | return err; | ||
1242 | } | ||
1243 | |||
1244 | static int gfar_remove(struct platform_device *ofdev) | ||
1245 | { | ||
1246 | struct gfar_private *priv = dev_get_drvdata(&ofdev->dev); | ||
1247 | |||
1248 | if (priv->phy_node) | ||
1249 | of_node_put(priv->phy_node); | ||
1250 | if (priv->tbi_node) | ||
1251 | of_node_put(priv->tbi_node); | ||
1252 | |||
1253 | dev_set_drvdata(&ofdev->dev, NULL); | ||
1254 | |||
1255 | unregister_netdev(priv->ndev); | ||
1256 | unmap_group_regs(priv); | ||
1257 | free_netdev(priv->ndev); | ||
1258 | |||
1259 | return 0; | ||
1260 | } | ||
1261 | |||
1262 | #ifdef CONFIG_PM | ||
1263 | |||
1264 | static int gfar_suspend(struct device *dev) | ||
1265 | { | ||
1266 | struct gfar_private *priv = dev_get_drvdata(dev); | ||
1267 | struct net_device *ndev = priv->ndev; | ||
1268 | struct gfar __iomem *regs = priv->gfargrp[0].regs; | ||
1269 | unsigned long flags; | ||
1270 | u32 tempval; | ||
1271 | |||
1272 | int magic_packet = priv->wol_en && | ||
1273 | (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET); | ||
1274 | |||
1275 | netif_device_detach(ndev); | ||
1276 | |||
1277 | if (netif_running(ndev)) { | ||
1278 | |||
1279 | local_irq_save(flags); | ||
1280 | lock_tx_qs(priv); | ||
1281 | lock_rx_qs(priv); | ||
1282 | |||
1283 | gfar_halt_nodisable(ndev); | ||
1284 | |||
1285 | /* Disable Tx, and Rx if wake-on-LAN is disabled. */ | ||
1286 | tempval = gfar_read(®s->maccfg1); | ||
1287 | |||
1288 | tempval &= ~MACCFG1_TX_EN; | ||
1289 | |||
1290 | if (!magic_packet) | ||
1291 | tempval &= ~MACCFG1_RX_EN; | ||
1292 | |||
1293 | gfar_write(®s->maccfg1, tempval); | ||
1294 | |||
1295 | unlock_rx_qs(priv); | ||
1296 | unlock_tx_qs(priv); | ||
1297 | local_irq_restore(flags); | ||
1298 | |||
1299 | disable_napi(priv); | ||
1300 | |||
1301 | if (magic_packet) { | ||
1302 | /* Enable interrupt on Magic Packet */ | ||
1303 | gfar_write(®s->imask, IMASK_MAG); | ||
1304 | |||
1305 | /* Enable Magic Packet mode */ | ||
1306 | tempval = gfar_read(®s->maccfg2); | ||
1307 | tempval |= MACCFG2_MPEN; | ||
1308 | gfar_write(®s->maccfg2, tempval); | ||
1309 | } else { | ||
1310 | phy_stop(priv->phydev); | ||
1311 | } | ||
1312 | } | ||
1313 | |||
1314 | return 0; | ||
1315 | } | ||
1316 | |||
1317 | static int gfar_resume(struct device *dev) | ||
1318 | { | ||
1319 | struct gfar_private *priv = dev_get_drvdata(dev); | ||
1320 | struct net_device *ndev = priv->ndev; | ||
1321 | struct gfar __iomem *regs = priv->gfargrp[0].regs; | ||
1322 | unsigned long flags; | ||
1323 | u32 tempval; | ||
1324 | int magic_packet = priv->wol_en && | ||
1325 | (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET); | ||
1326 | |||
1327 | if (!netif_running(ndev)) { | ||
1328 | netif_device_attach(ndev); | ||
1329 | return 0; | ||
1330 | } | ||
1331 | |||
1332 | if (!magic_packet && priv->phydev) | ||
1333 | phy_start(priv->phydev); | ||
1334 | |||
1335 | /* Disable Magic Packet mode, in case something | ||
1336 | * else woke us up. | ||
1337 | */ | ||
1338 | local_irq_save(flags); | ||
1339 | lock_tx_qs(priv); | ||
1340 | lock_rx_qs(priv); | ||
1341 | |||
1342 | tempval = gfar_read(®s->maccfg2); | ||
1343 | tempval &= ~MACCFG2_MPEN; | ||
1344 | gfar_write(®s->maccfg2, tempval); | ||
1345 | |||
1346 | gfar_start(ndev); | ||
1347 | |||
1348 | unlock_rx_qs(priv); | ||
1349 | unlock_tx_qs(priv); | ||
1350 | local_irq_restore(flags); | ||
1351 | |||
1352 | netif_device_attach(ndev); | ||
1353 | |||
1354 | enable_napi(priv); | ||
1355 | |||
1356 | return 0; | ||
1357 | } | ||
1358 | |||
1359 | static int gfar_restore(struct device *dev) | ||
1360 | { | ||
1361 | struct gfar_private *priv = dev_get_drvdata(dev); | ||
1362 | struct net_device *ndev = priv->ndev; | ||
1363 | |||
1364 | if (!netif_running(ndev)) | ||
1365 | return 0; | ||
1366 | |||
1367 | gfar_init_bds(ndev); | ||
1368 | init_registers(ndev); | ||
1369 | gfar_set_mac_address(ndev); | ||
1370 | gfar_init_mac(ndev); | ||
1371 | gfar_start(ndev); | ||
1372 | |||
1373 | priv->oldlink = 0; | ||
1374 | priv->oldspeed = 0; | ||
1375 | priv->oldduplex = -1; | ||
1376 | |||
1377 | if (priv->phydev) | ||
1378 | phy_start(priv->phydev); | ||
1379 | |||
1380 | netif_device_attach(ndev); | ||
1381 | enable_napi(priv); | ||
1382 | |||
1383 | return 0; | ||
1384 | } | ||
1385 | |||
1386 | static struct dev_pm_ops gfar_pm_ops = { | ||
1387 | .suspend = gfar_suspend, | ||
1388 | .resume = gfar_resume, | ||
1389 | .freeze = gfar_suspend, | ||
1390 | .thaw = gfar_resume, | ||
1391 | .restore = gfar_restore, | ||
1392 | }; | ||
1393 | |||
1394 | #define GFAR_PM_OPS (&gfar_pm_ops) | ||
1395 | |||
1396 | #else | ||
1397 | |||
1398 | #define GFAR_PM_OPS NULL | ||
1399 | |||
1400 | #endif | ||
1401 | |||
1402 | /* Reads the controller's registers to determine what interface | ||
1403 | * connects it to the PHY. | ||
1404 | */ | ||
1405 | static phy_interface_t gfar_get_interface(struct net_device *dev) | ||
1406 | { | ||
1407 | struct gfar_private *priv = netdev_priv(dev); | ||
1408 | struct gfar __iomem *regs = priv->gfargrp[0].regs; | ||
1409 | u32 ecntrl; | ||
1410 | |||
1411 | ecntrl = gfar_read(®s->ecntrl); | ||
1412 | |||
1413 | if (ecntrl & ECNTRL_SGMII_MODE) | ||
1414 | return PHY_INTERFACE_MODE_SGMII; | ||
1415 | |||
1416 | if (ecntrl & ECNTRL_TBI_MODE) { | ||
1417 | if (ecntrl & ECNTRL_REDUCED_MODE) | ||
1418 | return PHY_INTERFACE_MODE_RTBI; | ||
1419 | else | ||
1420 | return PHY_INTERFACE_MODE_TBI; | ||
1421 | } | ||
1422 | |||
1423 | if (ecntrl & ECNTRL_REDUCED_MODE) { | ||
1424 | if (ecntrl & ECNTRL_REDUCED_MII_MODE) | ||
1425 | return PHY_INTERFACE_MODE_RMII; | ||
1426 | else { | ||
1427 | phy_interface_t interface = priv->interface; | ||
1428 | |||
1429 | /* | ||
1430 | * This isn't autodetected right now, so it must | ||
1431 | * be set by the device tree or platform code. | ||
1432 | */ | ||
1433 | if (interface == PHY_INTERFACE_MODE_RGMII_ID) | ||
1434 | return PHY_INTERFACE_MODE_RGMII_ID; | ||
1435 | |||
1436 | return PHY_INTERFACE_MODE_RGMII; | ||
1437 | } | ||
1438 | } | ||
1439 | |||
1440 | if (priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT) | ||
1441 | return PHY_INTERFACE_MODE_GMII; | ||
1442 | |||
1443 | return PHY_INTERFACE_MODE_MII; | ||
1444 | } | ||
1445 | |||
1446 | |||
1447 | /* Initializes driver's PHY state, and attaches to the PHY. | ||
1448 | * Returns 0 on success. | ||
1449 | */ | ||
1450 | static int init_phy(struct net_device *dev) | ||
1451 | { | ||
1452 | struct gfar_private *priv = netdev_priv(dev); | ||
1453 | uint gigabit_support = | ||
1454 | priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT ? | ||
1455 | SUPPORTED_1000baseT_Full : 0; | ||
1456 | phy_interface_t interface; | ||
1457 | |||
1458 | priv->oldlink = 0; | ||
1459 | priv->oldspeed = 0; | ||
1460 | priv->oldduplex = -1; | ||
1461 | |||
1462 | interface = gfar_get_interface(dev); | ||
1463 | |||
1464 | priv->phydev = of_phy_connect(dev, priv->phy_node, &adjust_link, 0, | ||
1465 | interface); | ||
1466 | if (!priv->phydev) | ||
1467 | priv->phydev = of_phy_connect_fixed_link(dev, &adjust_link, | ||
1468 | interface); | ||
1469 | if (!priv->phydev) { | ||
1470 | dev_err(&dev->dev, "could not attach to PHY\n"); | ||
1471 | return -ENODEV; | ||
1472 | } | ||
1473 | |||
1474 | if (interface == PHY_INTERFACE_MODE_SGMII) | ||
1475 | gfar_configure_serdes(dev); | ||
1476 | |||
1477 | /* Remove any features not supported by the controller */ | ||
1478 | priv->phydev->supported &= (GFAR_SUPPORTED | gigabit_support); | ||
1479 | priv->phydev->advertising = priv->phydev->supported; | ||
1480 | |||
1481 | return 0; | ||
1482 | } | ||
1483 | |||
1484 | /* | ||
1485 | * Initialize TBI PHY interface for communicating with the | ||
1486 | * SERDES lynx PHY on the chip. We communicate with this PHY | ||
1487 | * through the MDIO bus on each controller, treating it as a | ||
1488 | * "normal" PHY at the address found in the TBIPA register. We assume | ||
1489 | * that the TBIPA register is valid. Either the MDIO bus code will set | ||
1490 | * it to a value that doesn't conflict with other PHYs on the bus, or the | ||
1491 | * value doesn't matter, as there are no other PHYs on the bus. | ||
1492 | */ | ||
1493 | static void gfar_configure_serdes(struct net_device *dev) | ||
1494 | { | ||
1495 | struct gfar_private *priv = netdev_priv(dev); | ||
1496 | struct phy_device *tbiphy; | ||
1497 | |||
1498 | if (!priv->tbi_node) { | ||
1499 | dev_warn(&dev->dev, "error: SGMII mode requires that the " | ||
1500 | "device tree specify a tbi-handle\n"); | ||
1501 | return; | ||
1502 | } | ||
1503 | |||
1504 | tbiphy = of_phy_find_device(priv->tbi_node); | ||
1505 | if (!tbiphy) { | ||
1506 | dev_err(&dev->dev, "error: Could not get TBI device\n"); | ||
1507 | return; | ||
1508 | } | ||
1509 | |||
1510 | /* | ||
1511 | * If the link is already up, we must already be ok, and don't need to | ||
1512 | * configure and reset the TBI<->SerDes link. Maybe U-Boot configured | ||
1513 | * everything for us? Resetting it takes the link down and requires | ||
1514 | * several seconds for it to come back. | ||
1515 | */ | ||
1516 | if (phy_read(tbiphy, MII_BMSR) & BMSR_LSTATUS) | ||
1517 | return; | ||
1518 | |||
1519 | /* Single clk mode, mii mode off(for serdes communication) */ | ||
1520 | phy_write(tbiphy, MII_TBICON, TBICON_CLK_SELECT); | ||
1521 | |||
1522 | phy_write(tbiphy, MII_ADVERTISE, | ||
1523 | ADVERTISE_1000XFULL | ADVERTISE_1000XPAUSE | | ||
1524 | ADVERTISE_1000XPSE_ASYM); | ||
1525 | |||
1526 | phy_write(tbiphy, MII_BMCR, BMCR_ANENABLE | | ||
1527 | BMCR_ANRESTART | BMCR_FULLDPLX | BMCR_SPEED1000); | ||
1528 | } | ||
1529 | |||
1530 | static void init_registers(struct net_device *dev) | ||
1531 | { | ||
1532 | struct gfar_private *priv = netdev_priv(dev); | ||
1533 | struct gfar __iomem *regs = NULL; | ||
1534 | int i = 0; | ||
1535 | |||
1536 | for (i = 0; i < priv->num_grps; i++) { | ||
1537 | regs = priv->gfargrp[i].regs; | ||
1538 | /* Clear IEVENT */ | ||
1539 | gfar_write(®s->ievent, IEVENT_INIT_CLEAR); | ||
1540 | |||
1541 | /* Initialize IMASK */ | ||
1542 | gfar_write(®s->imask, IMASK_INIT_CLEAR); | ||
1543 | } | ||
1544 | |||
1545 | regs = priv->gfargrp[0].regs; | ||
1546 | /* Init hash registers to zero */ | ||
1547 | gfar_write(®s->igaddr0, 0); | ||
1548 | gfar_write(®s->igaddr1, 0); | ||
1549 | gfar_write(®s->igaddr2, 0); | ||
1550 | gfar_write(®s->igaddr3, 0); | ||
1551 | gfar_write(®s->igaddr4, 0); | ||
1552 | gfar_write(®s->igaddr5, 0); | ||
1553 | gfar_write(®s->igaddr6, 0); | ||
1554 | gfar_write(®s->igaddr7, 0); | ||
1555 | |||
1556 | gfar_write(®s->gaddr0, 0); | ||
1557 | gfar_write(®s->gaddr1, 0); | ||
1558 | gfar_write(®s->gaddr2, 0); | ||
1559 | gfar_write(®s->gaddr3, 0); | ||
1560 | gfar_write(®s->gaddr4, 0); | ||
1561 | gfar_write(®s->gaddr5, 0); | ||
1562 | gfar_write(®s->gaddr6, 0); | ||
1563 | gfar_write(®s->gaddr7, 0); | ||
1564 | |||
1565 | /* Zero out the rmon mib registers if it has them */ | ||
1566 | if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) { | ||
1567 | memset_io(&(regs->rmon), 0, sizeof (struct rmon_mib)); | ||
1568 | |||
1569 | /* Mask off the CAM interrupts */ | ||
1570 | gfar_write(®s->rmon.cam1, 0xffffffff); | ||
1571 | gfar_write(®s->rmon.cam2, 0xffffffff); | ||
1572 | } | ||
1573 | |||
1574 | /* Initialize the max receive buffer length */ | ||
1575 | gfar_write(®s->mrblr, priv->rx_buffer_size); | ||
1576 | |||
1577 | /* Initialize the Minimum Frame Length Register */ | ||
1578 | gfar_write(®s->minflr, MINFLR_INIT_SETTINGS); | ||
1579 | } | ||
1580 | |||
1581 | static int __gfar_is_rx_idle(struct gfar_private *priv) | ||
1582 | { | ||
1583 | u32 res; | ||
1584 | |||
1585 | /* | ||
1586 | * Normaly TSEC should not hang on GRS commands, so we should | ||
1587 | * actually wait for IEVENT_GRSC flag. | ||
1588 | */ | ||
1589 | if (likely(!gfar_has_errata(priv, GFAR_ERRATA_A002))) | ||
1590 | return 0; | ||
1591 | |||
1592 | /* | ||
1593 | * Read the eTSEC register at offset 0xD1C. If bits 7-14 are | ||
1594 | * the same as bits 23-30, the eTSEC Rx is assumed to be idle | ||
1595 | * and the Rx can be safely reset. | ||
1596 | */ | ||
1597 | res = gfar_read((void __iomem *)priv->gfargrp[0].regs + 0xd1c); | ||
1598 | res &= 0x7f807f80; | ||
1599 | if ((res & 0xffff) == (res >> 16)) | ||
1600 | return 1; | ||
1601 | |||
1602 | return 0; | ||
1603 | } | ||
1604 | |||
1605 | /* Halt the receive and transmit queues */ | ||
1606 | static void gfar_halt_nodisable(struct net_device *dev) | ||
1607 | { | ||
1608 | struct gfar_private *priv = netdev_priv(dev); | ||
1609 | struct gfar __iomem *regs = NULL; | ||
1610 | u32 tempval; | ||
1611 | int i = 0; | ||
1612 | |||
1613 | for (i = 0; i < priv->num_grps; i++) { | ||
1614 | regs = priv->gfargrp[i].regs; | ||
1615 | /* Mask all interrupts */ | ||
1616 | gfar_write(®s->imask, IMASK_INIT_CLEAR); | ||
1617 | |||
1618 | /* Clear all interrupts */ | ||
1619 | gfar_write(®s->ievent, IEVENT_INIT_CLEAR); | ||
1620 | } | ||
1621 | |||
1622 | regs = priv->gfargrp[0].regs; | ||
1623 | /* Stop the DMA, and wait for it to stop */ | ||
1624 | tempval = gfar_read(®s->dmactrl); | ||
1625 | if ((tempval & (DMACTRL_GRS | DMACTRL_GTS)) | ||
1626 | != (DMACTRL_GRS | DMACTRL_GTS)) { | ||
1627 | int ret; | ||
1628 | |||
1629 | tempval |= (DMACTRL_GRS | DMACTRL_GTS); | ||
1630 | gfar_write(®s->dmactrl, tempval); | ||
1631 | |||
1632 | do { | ||
1633 | ret = spin_event_timeout(((gfar_read(®s->ievent) & | ||
1634 | (IEVENT_GRSC | IEVENT_GTSC)) == | ||
1635 | (IEVENT_GRSC | IEVENT_GTSC)), 1000000, 0); | ||
1636 | if (!ret && !(gfar_read(®s->ievent) & IEVENT_GRSC)) | ||
1637 | ret = __gfar_is_rx_idle(priv); | ||
1638 | } while (!ret); | ||
1639 | } | ||
1640 | } | ||
1641 | |||
1642 | /* Halt the receive and transmit queues */ | ||
1643 | void gfar_halt(struct net_device *dev) | ||
1644 | { | ||
1645 | struct gfar_private *priv = netdev_priv(dev); | ||
1646 | struct gfar __iomem *regs = priv->gfargrp[0].regs; | ||
1647 | u32 tempval; | ||
1648 | |||
1649 | gfar_halt_nodisable(dev); | ||
1650 | |||
1651 | /* Disable Rx and Tx */ | ||
1652 | tempval = gfar_read(®s->maccfg1); | ||
1653 | tempval &= ~(MACCFG1_RX_EN | MACCFG1_TX_EN); | ||
1654 | gfar_write(®s->maccfg1, tempval); | ||
1655 | } | ||
1656 | |||
1657 | static void free_grp_irqs(struct gfar_priv_grp *grp) | ||
1658 | { | ||
1659 | free_irq(grp->interruptError, grp); | ||
1660 | free_irq(grp->interruptTransmit, grp); | ||
1661 | free_irq(grp->interruptReceive, grp); | ||
1662 | } | ||
1663 | |||
1664 | void stop_gfar(struct net_device *dev) | ||
1665 | { | ||
1666 | struct gfar_private *priv = netdev_priv(dev); | ||
1667 | unsigned long flags; | ||
1668 | int i; | ||
1669 | |||
1670 | phy_stop(priv->phydev); | ||
1671 | |||
1672 | |||
1673 | /* Lock it down */ | ||
1674 | local_irq_save(flags); | ||
1675 | lock_tx_qs(priv); | ||
1676 | lock_rx_qs(priv); | ||
1677 | |||
1678 | gfar_halt(dev); | ||
1679 | |||
1680 | unlock_rx_qs(priv); | ||
1681 | unlock_tx_qs(priv); | ||
1682 | local_irq_restore(flags); | ||
1683 | |||
1684 | /* Free the IRQs */ | ||
1685 | if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { | ||
1686 | for (i = 0; i < priv->num_grps; i++) | ||
1687 | free_grp_irqs(&priv->gfargrp[i]); | ||
1688 | } else { | ||
1689 | for (i = 0; i < priv->num_grps; i++) | ||
1690 | free_irq(priv->gfargrp[i].interruptTransmit, | ||
1691 | &priv->gfargrp[i]); | ||
1692 | } | ||
1693 | |||
1694 | free_skb_resources(priv); | ||
1695 | } | ||
1696 | |||
1697 | static void free_skb_tx_queue(struct gfar_priv_tx_q *tx_queue) | ||
1698 | { | ||
1699 | struct txbd8 *txbdp; | ||
1700 | struct gfar_private *priv = netdev_priv(tx_queue->dev); | ||
1701 | int i, j; | ||
1702 | |||
1703 | txbdp = tx_queue->tx_bd_base; | ||
1704 | |||
1705 | for (i = 0; i < tx_queue->tx_ring_size; i++) { | ||
1706 | if (!tx_queue->tx_skbuff[i]) | ||
1707 | continue; | ||
1708 | |||
1709 | dma_unmap_single(&priv->ofdev->dev, txbdp->bufPtr, | ||
1710 | txbdp->length, DMA_TO_DEVICE); | ||
1711 | txbdp->lstatus = 0; | ||
1712 | for (j = 0; j < skb_shinfo(tx_queue->tx_skbuff[i])->nr_frags; | ||
1713 | j++) { | ||
1714 | txbdp++; | ||
1715 | dma_unmap_page(&priv->ofdev->dev, txbdp->bufPtr, | ||
1716 | txbdp->length, DMA_TO_DEVICE); | ||
1717 | } | ||
1718 | txbdp++; | ||
1719 | dev_kfree_skb_any(tx_queue->tx_skbuff[i]); | ||
1720 | tx_queue->tx_skbuff[i] = NULL; | ||
1721 | } | ||
1722 | kfree(tx_queue->tx_skbuff); | ||
1723 | } | ||
1724 | |||
1725 | static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue) | ||
1726 | { | ||
1727 | struct rxbd8 *rxbdp; | ||
1728 | struct gfar_private *priv = netdev_priv(rx_queue->dev); | ||
1729 | int i; | ||
1730 | |||
1731 | rxbdp = rx_queue->rx_bd_base; | ||
1732 | |||
1733 | for (i = 0; i < rx_queue->rx_ring_size; i++) { | ||
1734 | if (rx_queue->rx_skbuff[i]) { | ||
1735 | dma_unmap_single(&priv->ofdev->dev, | ||
1736 | rxbdp->bufPtr, priv->rx_buffer_size, | ||
1737 | DMA_FROM_DEVICE); | ||
1738 | dev_kfree_skb_any(rx_queue->rx_skbuff[i]); | ||
1739 | rx_queue->rx_skbuff[i] = NULL; | ||
1740 | } | ||
1741 | rxbdp->lstatus = 0; | ||
1742 | rxbdp->bufPtr = 0; | ||
1743 | rxbdp++; | ||
1744 | } | ||
1745 | kfree(rx_queue->rx_skbuff); | ||
1746 | } | ||
1747 | |||
1748 | /* If there are any tx skbs or rx skbs still around, free them. | ||
1749 | * Then free tx_skbuff and rx_skbuff */ | ||
1750 | static void free_skb_resources(struct gfar_private *priv) | ||
1751 | { | ||
1752 | struct gfar_priv_tx_q *tx_queue = NULL; | ||
1753 | struct gfar_priv_rx_q *rx_queue = NULL; | ||
1754 | int i; | ||
1755 | |||
1756 | /* Go through all the buffer descriptors and free their data buffers */ | ||
1757 | for (i = 0; i < priv->num_tx_queues; i++) { | ||
1758 | tx_queue = priv->tx_queue[i]; | ||
1759 | if(tx_queue->tx_skbuff) | ||
1760 | free_skb_tx_queue(tx_queue); | ||
1761 | } | ||
1762 | |||
1763 | for (i = 0; i < priv->num_rx_queues; i++) { | ||
1764 | rx_queue = priv->rx_queue[i]; | ||
1765 | if(rx_queue->rx_skbuff) | ||
1766 | free_skb_rx_queue(rx_queue); | ||
1767 | } | ||
1768 | |||
1769 | dma_free_coherent(&priv->ofdev->dev, | ||
1770 | sizeof(struct txbd8) * priv->total_tx_ring_size + | ||
1771 | sizeof(struct rxbd8) * priv->total_rx_ring_size, | ||
1772 | priv->tx_queue[0]->tx_bd_base, | ||
1773 | priv->tx_queue[0]->tx_bd_dma_base); | ||
1774 | skb_queue_purge(&priv->rx_recycle); | ||
1775 | } | ||
1776 | |||
1777 | void gfar_start(struct net_device *dev) | ||
1778 | { | ||
1779 | struct gfar_private *priv = netdev_priv(dev); | ||
1780 | struct gfar __iomem *regs = priv->gfargrp[0].regs; | ||
1781 | u32 tempval; | ||
1782 | int i = 0; | ||
1783 | |||
1784 | /* Enable Rx and Tx in MACCFG1 */ | ||
1785 | tempval = gfar_read(®s->maccfg1); | ||
1786 | tempval |= (MACCFG1_RX_EN | MACCFG1_TX_EN); | ||
1787 | gfar_write(®s->maccfg1, tempval); | ||
1788 | |||
1789 | /* Initialize DMACTRL to have WWR and WOP */ | ||
1790 | tempval = gfar_read(®s->dmactrl); | ||
1791 | tempval |= DMACTRL_INIT_SETTINGS; | ||
1792 | gfar_write(®s->dmactrl, tempval); | ||
1793 | |||
1794 | /* Make sure we aren't stopped */ | ||
1795 | tempval = gfar_read(®s->dmactrl); | ||
1796 | tempval &= ~(DMACTRL_GRS | DMACTRL_GTS); | ||
1797 | gfar_write(®s->dmactrl, tempval); | ||
1798 | |||
1799 | for (i = 0; i < priv->num_grps; i++) { | ||
1800 | regs = priv->gfargrp[i].regs; | ||
1801 | /* Clear THLT/RHLT, so that the DMA starts polling now */ | ||
1802 | gfar_write(®s->tstat, priv->gfargrp[i].tstat); | ||
1803 | gfar_write(®s->rstat, priv->gfargrp[i].rstat); | ||
1804 | /* Unmask the interrupts we look for */ | ||
1805 | gfar_write(®s->imask, IMASK_DEFAULT); | ||
1806 | } | ||
1807 | |||
1808 | dev->trans_start = jiffies; /* prevent tx timeout */ | ||
1809 | } | ||
1810 | |||
1811 | void gfar_configure_coalescing(struct gfar_private *priv, | ||
1812 | unsigned long tx_mask, unsigned long rx_mask) | ||
1813 | { | ||
1814 | struct gfar __iomem *regs = priv->gfargrp[0].regs; | ||
1815 | u32 __iomem *baddr; | ||
1816 | int i = 0; | ||
1817 | |||
1818 | /* Backward compatible case ---- even if we enable | ||
1819 | * multiple queues, there's only single reg to program | ||
1820 | */ | ||
1821 | gfar_write(®s->txic, 0); | ||
1822 | if(likely(priv->tx_queue[0]->txcoalescing)) | ||
1823 | gfar_write(®s->txic, priv->tx_queue[0]->txic); | ||
1824 | |||
1825 | gfar_write(®s->rxic, 0); | ||
1826 | if(unlikely(priv->rx_queue[0]->rxcoalescing)) | ||
1827 | gfar_write(®s->rxic, priv->rx_queue[0]->rxic); | ||
1828 | |||
1829 | if (priv->mode == MQ_MG_MODE) { | ||
1830 | baddr = ®s->txic0; | ||
1831 | for_each_set_bit(i, &tx_mask, priv->num_tx_queues) { | ||
1832 | if (likely(priv->tx_queue[i]->txcoalescing)) { | ||
1833 | gfar_write(baddr + i, 0); | ||
1834 | gfar_write(baddr + i, priv->tx_queue[i]->txic); | ||
1835 | } | ||
1836 | } | ||
1837 | |||
1838 | baddr = ®s->rxic0; | ||
1839 | for_each_set_bit(i, &rx_mask, priv->num_rx_queues) { | ||
1840 | if (likely(priv->rx_queue[i]->rxcoalescing)) { | ||
1841 | gfar_write(baddr + i, 0); | ||
1842 | gfar_write(baddr + i, priv->rx_queue[i]->rxic); | ||
1843 | } | ||
1844 | } | ||
1845 | } | ||
1846 | } | ||
1847 | |||
1848 | static int register_grp_irqs(struct gfar_priv_grp *grp) | ||
1849 | { | ||
1850 | struct gfar_private *priv = grp->priv; | ||
1851 | struct net_device *dev = priv->ndev; | ||
1852 | int err; | ||
1853 | |||
1854 | /* If the device has multiple interrupts, register for | ||
1855 | * them. Otherwise, only register for the one */ | ||
1856 | if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { | ||
1857 | /* Install our interrupt handlers for Error, | ||
1858 | * Transmit, and Receive */ | ||
1859 | if ((err = request_irq(grp->interruptError, gfar_error, 0, | ||
1860 | grp->int_name_er,grp)) < 0) { | ||
1861 | netif_err(priv, intr, dev, "Can't get IRQ %d\n", | ||
1862 | grp->interruptError); | ||
1863 | |||
1864 | goto err_irq_fail; | ||
1865 | } | ||
1866 | |||
1867 | if ((err = request_irq(grp->interruptTransmit, gfar_transmit, | ||
1868 | 0, grp->int_name_tx, grp)) < 0) { | ||
1869 | netif_err(priv, intr, dev, "Can't get IRQ %d\n", | ||
1870 | grp->interruptTransmit); | ||
1871 | goto tx_irq_fail; | ||
1872 | } | ||
1873 | |||
1874 | if ((err = request_irq(grp->interruptReceive, gfar_receive, 0, | ||
1875 | grp->int_name_rx, grp)) < 0) { | ||
1876 | netif_err(priv, intr, dev, "Can't get IRQ %d\n", | ||
1877 | grp->interruptReceive); | ||
1878 | goto rx_irq_fail; | ||
1879 | } | ||
1880 | } else { | ||
1881 | if ((err = request_irq(grp->interruptTransmit, gfar_interrupt, 0, | ||
1882 | grp->int_name_tx, grp)) < 0) { | ||
1883 | netif_err(priv, intr, dev, "Can't get IRQ %d\n", | ||
1884 | grp->interruptTransmit); | ||
1885 | goto err_irq_fail; | ||
1886 | } | ||
1887 | } | ||
1888 | |||
1889 | return 0; | ||
1890 | |||
1891 | rx_irq_fail: | ||
1892 | free_irq(grp->interruptTransmit, grp); | ||
1893 | tx_irq_fail: | ||
1894 | free_irq(grp->interruptError, grp); | ||
1895 | err_irq_fail: | ||
1896 | return err; | ||
1897 | |||
1898 | } | ||
1899 | |||
1900 | /* Bring the controller up and running */ | ||
1901 | int startup_gfar(struct net_device *ndev) | ||
1902 | { | ||
1903 | struct gfar_private *priv = netdev_priv(ndev); | ||
1904 | struct gfar __iomem *regs = NULL; | ||
1905 | int err, i, j; | ||
1906 | |||
1907 | for (i = 0; i < priv->num_grps; i++) { | ||
1908 | regs= priv->gfargrp[i].regs; | ||
1909 | gfar_write(®s->imask, IMASK_INIT_CLEAR); | ||
1910 | } | ||
1911 | |||
1912 | regs= priv->gfargrp[0].regs; | ||
1913 | err = gfar_alloc_skb_resources(ndev); | ||
1914 | if (err) | ||
1915 | return err; | ||
1916 | |||
1917 | gfar_init_mac(ndev); | ||
1918 | |||
1919 | for (i = 0; i < priv->num_grps; i++) { | ||
1920 | err = register_grp_irqs(&priv->gfargrp[i]); | ||
1921 | if (err) { | ||
1922 | for (j = 0; j < i; j++) | ||
1923 | free_grp_irqs(&priv->gfargrp[j]); | ||
1924 | goto irq_fail; | ||
1925 | } | ||
1926 | } | ||
1927 | |||
1928 | /* Start the controller */ | ||
1929 | gfar_start(ndev); | ||
1930 | |||
1931 | phy_start(priv->phydev); | ||
1932 | |||
1933 | gfar_configure_coalescing(priv, 0xFF, 0xFF); | ||
1934 | |||
1935 | return 0; | ||
1936 | |||
1937 | irq_fail: | ||
1938 | free_skb_resources(priv); | ||
1939 | return err; | ||
1940 | } | ||
1941 | |||
1942 | /* Called when something needs to use the ethernet device */ | ||
1943 | /* Returns 0 for success. */ | ||
1944 | static int gfar_enet_open(struct net_device *dev) | ||
1945 | { | ||
1946 | struct gfar_private *priv = netdev_priv(dev); | ||
1947 | int err; | ||
1948 | |||
1949 | enable_napi(priv); | ||
1950 | |||
1951 | skb_queue_head_init(&priv->rx_recycle); | ||
1952 | |||
1953 | /* Initialize a bunch of registers */ | ||
1954 | init_registers(dev); | ||
1955 | |||
1956 | gfar_set_mac_address(dev); | ||
1957 | |||
1958 | err = init_phy(dev); | ||
1959 | |||
1960 | if (err) { | ||
1961 | disable_napi(priv); | ||
1962 | return err; | ||
1963 | } | ||
1964 | |||
1965 | err = startup_gfar(dev); | ||
1966 | if (err) { | ||
1967 | disable_napi(priv); | ||
1968 | return err; | ||
1969 | } | ||
1970 | |||
1971 | netif_tx_start_all_queues(dev); | ||
1972 | |||
1973 | device_set_wakeup_enable(&dev->dev, priv->wol_en); | ||
1974 | |||
1975 | return err; | ||
1976 | } | ||
1977 | |||
1978 | static inline struct txfcb *gfar_add_fcb(struct sk_buff *skb) | ||
1979 | { | ||
1980 | struct txfcb *fcb = (struct txfcb *)skb_push(skb, GMAC_FCB_LEN); | ||
1981 | |||
1982 | memset(fcb, 0, GMAC_FCB_LEN); | ||
1983 | |||
1984 | return fcb; | ||
1985 | } | ||
1986 | |||
1987 | static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb) | ||
1988 | { | ||
1989 | u8 flags = 0; | ||
1990 | |||
1991 | /* If we're here, it's a IP packet with a TCP or UDP | ||
1992 | * payload. We set it to checksum, using a pseudo-header | ||
1993 | * we provide | ||
1994 | */ | ||
1995 | flags = TXFCB_DEFAULT; | ||
1996 | |||
1997 | /* Tell the controller what the protocol is */ | ||
1998 | /* And provide the already calculated phcs */ | ||
1999 | if (ip_hdr(skb)->protocol == IPPROTO_UDP) { | ||
2000 | flags |= TXFCB_UDP; | ||
2001 | fcb->phcs = udp_hdr(skb)->check; | ||
2002 | } else | ||
2003 | fcb->phcs = tcp_hdr(skb)->check; | ||
2004 | |||
2005 | /* l3os is the distance between the start of the | ||
2006 | * frame (skb->data) and the start of the IP hdr. | ||
2007 | * l4os is the distance between the start of the | ||
2008 | * l3 hdr and the l4 hdr */ | ||
2009 | fcb->l3os = (u16)(skb_network_offset(skb) - GMAC_FCB_LEN); | ||
2010 | fcb->l4os = skb_network_header_len(skb); | ||
2011 | |||
2012 | fcb->flags = flags; | ||
2013 | } | ||
2014 | |||
2015 | void inline gfar_tx_vlan(struct sk_buff *skb, struct txfcb *fcb) | ||
2016 | { | ||
2017 | fcb->flags |= TXFCB_VLN; | ||
2018 | fcb->vlctl = vlan_tx_tag_get(skb); | ||
2019 | } | ||
2020 | |||
2021 | static inline struct txbd8 *skip_txbd(struct txbd8 *bdp, int stride, | ||
2022 | struct txbd8 *base, int ring_size) | ||
2023 | { | ||
2024 | struct txbd8 *new_bd = bdp + stride; | ||
2025 | |||
2026 | return (new_bd >= (base + ring_size)) ? (new_bd - ring_size) : new_bd; | ||
2027 | } | ||
2028 | |||
2029 | static inline struct txbd8 *next_txbd(struct txbd8 *bdp, struct txbd8 *base, | ||
2030 | int ring_size) | ||
2031 | { | ||
2032 | return skip_txbd(bdp, 1, base, ring_size); | ||
2033 | } | ||
2034 | |||
2035 | /* This is called by the kernel when a frame is ready for transmission. */ | ||
2036 | /* It is pointed to by the dev->hard_start_xmit function pointer */ | ||
2037 | static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) | ||
2038 | { | ||
2039 | struct gfar_private *priv = netdev_priv(dev); | ||
2040 | struct gfar_priv_tx_q *tx_queue = NULL; | ||
2041 | struct netdev_queue *txq; | ||
2042 | struct gfar __iomem *regs = NULL; | ||
2043 | struct txfcb *fcb = NULL; | ||
2044 | struct txbd8 *txbdp, *txbdp_start, *base, *txbdp_tstamp = NULL; | ||
2045 | u32 lstatus; | ||
2046 | int i, rq = 0, do_tstamp = 0; | ||
2047 | u32 bufaddr; | ||
2048 | unsigned long flags; | ||
2049 | unsigned int nr_frags, nr_txbds, length; | ||
2050 | |||
2051 | /* | ||
2052 | * TOE=1 frames larger than 2500 bytes may see excess delays | ||
2053 | * before start of transmission. | ||
2054 | */ | ||
2055 | if (unlikely(gfar_has_errata(priv, GFAR_ERRATA_76) && | ||
2056 | skb->ip_summed == CHECKSUM_PARTIAL && | ||
2057 | skb->len > 2500)) { | ||
2058 | int ret; | ||
2059 | |||
2060 | ret = skb_checksum_help(skb); | ||
2061 | if (ret) | ||
2062 | return ret; | ||
2063 | } | ||
2064 | |||
2065 | rq = skb->queue_mapping; | ||
2066 | tx_queue = priv->tx_queue[rq]; | ||
2067 | txq = netdev_get_tx_queue(dev, rq); | ||
2068 | base = tx_queue->tx_bd_base; | ||
2069 | regs = tx_queue->grp->regs; | ||
2070 | |||
2071 | /* check if time stamp should be generated */ | ||
2072 | if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP && | ||
2073 | priv->hwts_tx_en)) | ||
2074 | do_tstamp = 1; | ||
2075 | |||
2076 | /* make space for additional header when fcb is needed */ | ||
2077 | if (((skb->ip_summed == CHECKSUM_PARTIAL) || | ||
2078 | vlan_tx_tag_present(skb) || | ||
2079 | unlikely(do_tstamp)) && | ||
2080 | (skb_headroom(skb) < GMAC_FCB_LEN)) { | ||
2081 | struct sk_buff *skb_new; | ||
2082 | |||
2083 | skb_new = skb_realloc_headroom(skb, GMAC_FCB_LEN); | ||
2084 | if (!skb_new) { | ||
2085 | dev->stats.tx_errors++; | ||
2086 | kfree_skb(skb); | ||
2087 | return NETDEV_TX_OK; | ||
2088 | } | ||
2089 | kfree_skb(skb); | ||
2090 | skb = skb_new; | ||
2091 | } | ||
2092 | |||
2093 | /* total number of fragments in the SKB */ | ||
2094 | nr_frags = skb_shinfo(skb)->nr_frags; | ||
2095 | |||
2096 | /* calculate the required number of TxBDs for this skb */ | ||
2097 | if (unlikely(do_tstamp)) | ||
2098 | nr_txbds = nr_frags + 2; | ||
2099 | else | ||
2100 | nr_txbds = nr_frags + 1; | ||
2101 | |||
2102 | /* check if there is space to queue this packet */ | ||
2103 | if (nr_txbds > tx_queue->num_txbdfree) { | ||
2104 | /* no space, stop the queue */ | ||
2105 | netif_tx_stop_queue(txq); | ||
2106 | dev->stats.tx_fifo_errors++; | ||
2107 | return NETDEV_TX_BUSY; | ||
2108 | } | ||
2109 | |||
2110 | /* Update transmit stats */ | ||
2111 | tx_queue->stats.tx_bytes += skb->len; | ||
2112 | tx_queue->stats.tx_packets++; | ||
2113 | |||
2114 | txbdp = txbdp_start = tx_queue->cur_tx; | ||
2115 | lstatus = txbdp->lstatus; | ||
2116 | |||
2117 | /* Time stamp insertion requires one additional TxBD */ | ||
2118 | if (unlikely(do_tstamp)) | ||
2119 | txbdp_tstamp = txbdp = next_txbd(txbdp, base, | ||
2120 | tx_queue->tx_ring_size); | ||
2121 | |||
2122 | if (nr_frags == 0) { | ||
2123 | if (unlikely(do_tstamp)) | ||
2124 | txbdp_tstamp->lstatus |= BD_LFLAG(TXBD_LAST | | ||
2125 | TXBD_INTERRUPT); | ||
2126 | else | ||
2127 | lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT); | ||
2128 | } else { | ||
2129 | /* Place the fragment addresses and lengths into the TxBDs */ | ||
2130 | for (i = 0; i < nr_frags; i++) { | ||
2131 | /* Point at the next BD, wrapping as needed */ | ||
2132 | txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size); | ||
2133 | |||
2134 | length = skb_shinfo(skb)->frags[i].size; | ||
2135 | |||
2136 | lstatus = txbdp->lstatus | length | | ||
2137 | BD_LFLAG(TXBD_READY); | ||
2138 | |||
2139 | /* Handle the last BD specially */ | ||
2140 | if (i == nr_frags - 1) | ||
2141 | lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT); | ||
2142 | |||
2143 | bufaddr = dma_map_page(&priv->ofdev->dev, | ||
2144 | skb_shinfo(skb)->frags[i].page, | ||
2145 | skb_shinfo(skb)->frags[i].page_offset, | ||
2146 | length, | ||
2147 | DMA_TO_DEVICE); | ||
2148 | |||
2149 | /* set the TxBD length and buffer pointer */ | ||
2150 | txbdp->bufPtr = bufaddr; | ||
2151 | txbdp->lstatus = lstatus; | ||
2152 | } | ||
2153 | |||
2154 | lstatus = txbdp_start->lstatus; | ||
2155 | } | ||
2156 | |||
2157 | /* Set up checksumming */ | ||
2158 | if (CHECKSUM_PARTIAL == skb->ip_summed) { | ||
2159 | fcb = gfar_add_fcb(skb); | ||
2160 | /* as specified by errata */ | ||
2161 | if (unlikely(gfar_has_errata(priv, GFAR_ERRATA_12) | ||
2162 | && ((unsigned long)fcb % 0x20) > 0x18)) { | ||
2163 | __skb_pull(skb, GMAC_FCB_LEN); | ||
2164 | skb_checksum_help(skb); | ||
2165 | } else { | ||
2166 | lstatus |= BD_LFLAG(TXBD_TOE); | ||
2167 | gfar_tx_checksum(skb, fcb); | ||
2168 | } | ||
2169 | } | ||
2170 | |||
2171 | if (vlan_tx_tag_present(skb)) { | ||
2172 | if (unlikely(NULL == fcb)) { | ||
2173 | fcb = gfar_add_fcb(skb); | ||
2174 | lstatus |= BD_LFLAG(TXBD_TOE); | ||
2175 | } | ||
2176 | |||
2177 | gfar_tx_vlan(skb, fcb); | ||
2178 | } | ||
2179 | |||
2180 | /* Setup tx hardware time stamping if requested */ | ||
2181 | if (unlikely(do_tstamp)) { | ||
2182 | skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; | ||
2183 | if (fcb == NULL) | ||
2184 | fcb = gfar_add_fcb(skb); | ||
2185 | fcb->ptp = 1; | ||
2186 | lstatus |= BD_LFLAG(TXBD_TOE); | ||
2187 | } | ||
2188 | |||
2189 | txbdp_start->bufPtr = dma_map_single(&priv->ofdev->dev, skb->data, | ||
2190 | skb_headlen(skb), DMA_TO_DEVICE); | ||
2191 | |||
2192 | /* | ||
2193 | * If time stamping is requested one additional TxBD must be set up. The | ||
2194 | * first TxBD points to the FCB and must have a data length of | ||
2195 | * GMAC_FCB_LEN. The second TxBD points to the actual frame data with | ||
2196 | * the full frame length. | ||
2197 | */ | ||
2198 | if (unlikely(do_tstamp)) { | ||
2199 | txbdp_tstamp->bufPtr = txbdp_start->bufPtr + GMAC_FCB_LEN; | ||
2200 | txbdp_tstamp->lstatus |= BD_LFLAG(TXBD_READY) | | ||
2201 | (skb_headlen(skb) - GMAC_FCB_LEN); | ||
2202 | lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | GMAC_FCB_LEN; | ||
2203 | } else { | ||
2204 | lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb); | ||
2205 | } | ||
2206 | |||
2207 | /* | ||
2208 | * We can work in parallel with gfar_clean_tx_ring(), except | ||
2209 | * when modifying num_txbdfree. Note that we didn't grab the lock | ||
2210 | * when we were reading the num_txbdfree and checking for available | ||
2211 | * space, that's because outside of this function it can only grow, | ||
2212 | * and once we've got needed space, it cannot suddenly disappear. | ||
2213 | * | ||
2214 | * The lock also protects us from gfar_error(), which can modify | ||
2215 | * regs->tstat and thus retrigger the transfers, which is why we | ||
2216 | * also must grab the lock before setting ready bit for the first | ||
2217 | * to be transmitted BD. | ||
2218 | */ | ||
2219 | spin_lock_irqsave(&tx_queue->txlock, flags); | ||
2220 | |||
2221 | /* | ||
2222 | * The powerpc-specific eieio() is used, as wmb() has too strong | ||
2223 | * semantics (it requires synchronization between cacheable and | ||
2224 | * uncacheable mappings, which eieio doesn't provide and which we | ||
2225 | * don't need), thus requiring a more expensive sync instruction. At | ||
2226 | * some point, the set of architecture-independent barrier functions | ||
2227 | * should be expanded to include weaker barriers. | ||
2228 | */ | ||
2229 | eieio(); | ||
2230 | |||
2231 | txbdp_start->lstatus = lstatus; | ||
2232 | |||
2233 | eieio(); /* force lstatus write before tx_skbuff */ | ||
2234 | |||
2235 | tx_queue->tx_skbuff[tx_queue->skb_curtx] = skb; | ||
2236 | |||
2237 | /* Update the current skb pointer to the next entry we will use | ||
2238 | * (wrapping if necessary) */ | ||
2239 | tx_queue->skb_curtx = (tx_queue->skb_curtx + 1) & | ||
2240 | TX_RING_MOD_MASK(tx_queue->tx_ring_size); | ||
2241 | |||
2242 | tx_queue->cur_tx = next_txbd(txbdp, base, tx_queue->tx_ring_size); | ||
2243 | |||
2244 | /* reduce TxBD free count */ | ||
2245 | tx_queue->num_txbdfree -= (nr_txbds); | ||
2246 | |||
2247 | /* If the next BD still needs to be cleaned up, then the bds | ||
2248 | are full. We need to tell the kernel to stop sending us stuff. */ | ||
2249 | if (!tx_queue->num_txbdfree) { | ||
2250 | netif_tx_stop_queue(txq); | ||
2251 | |||
2252 | dev->stats.tx_fifo_errors++; | ||
2253 | } | ||
2254 | |||
2255 | /* Tell the DMA to go go go */ | ||
2256 | gfar_write(®s->tstat, TSTAT_CLEAR_THALT >> tx_queue->qindex); | ||
2257 | |||
2258 | /* Unlock priv */ | ||
2259 | spin_unlock_irqrestore(&tx_queue->txlock, flags); | ||
2260 | |||
2261 | return NETDEV_TX_OK; | ||
2262 | } | ||
2263 | |||
2264 | /* Stops the kernel queue, and halts the controller */ | ||
2265 | static int gfar_close(struct net_device *dev) | ||
2266 | { | ||
2267 | struct gfar_private *priv = netdev_priv(dev); | ||
2268 | |||
2269 | disable_napi(priv); | ||
2270 | |||
2271 | cancel_work_sync(&priv->reset_task); | ||
2272 | stop_gfar(dev); | ||
2273 | |||
2274 | /* Disconnect from the PHY */ | ||
2275 | phy_disconnect(priv->phydev); | ||
2276 | priv->phydev = NULL; | ||
2277 | |||
2278 | netif_tx_stop_all_queues(dev); | ||
2279 | |||
2280 | return 0; | ||
2281 | } | ||
2282 | |||
2283 | /* Changes the mac address if the controller is not running. */ | ||
2284 | static int gfar_set_mac_address(struct net_device *dev) | ||
2285 | { | ||
2286 | gfar_set_mac_for_addr(dev, 0, dev->dev_addr); | ||
2287 | |||
2288 | return 0; | ||
2289 | } | ||
2290 | |||
2291 | /* Check if rx parser should be activated */ | ||
2292 | void gfar_check_rx_parser_mode(struct gfar_private *priv) | ||
2293 | { | ||
2294 | struct gfar __iomem *regs; | ||
2295 | u32 tempval; | ||
2296 | |||
2297 | regs = priv->gfargrp[0].regs; | ||
2298 | |||
2299 | tempval = gfar_read(®s->rctrl); | ||
2300 | /* If parse is no longer required, then disable parser */ | ||
2301 | if (tempval & RCTRL_REQ_PARSER) | ||
2302 | tempval |= RCTRL_PRSDEP_INIT; | ||
2303 | else | ||
2304 | tempval &= ~RCTRL_PRSDEP_INIT; | ||
2305 | gfar_write(®s->rctrl, tempval); | ||
2306 | } | ||
2307 | |||
2308 | /* Enables and disables VLAN insertion/extraction */ | ||
2309 | void gfar_vlan_mode(struct net_device *dev, u32 features) | ||
2310 | { | ||
2311 | struct gfar_private *priv = netdev_priv(dev); | ||
2312 | struct gfar __iomem *regs = NULL; | ||
2313 | unsigned long flags; | ||
2314 | u32 tempval; | ||
2315 | |||
2316 | regs = priv->gfargrp[0].regs; | ||
2317 | local_irq_save(flags); | ||
2318 | lock_rx_qs(priv); | ||
2319 | |||
2320 | if (features & NETIF_F_HW_VLAN_TX) { | ||
2321 | /* Enable VLAN tag insertion */ | ||
2322 | tempval = gfar_read(®s->tctrl); | ||
2323 | tempval |= TCTRL_VLINS; | ||
2324 | gfar_write(®s->tctrl, tempval); | ||
2325 | } else { | ||
2326 | /* Disable VLAN tag insertion */ | ||
2327 | tempval = gfar_read(®s->tctrl); | ||
2328 | tempval &= ~TCTRL_VLINS; | ||
2329 | gfar_write(®s->tctrl, tempval); | ||
2330 | } | ||
2331 | |||
2332 | if (features & NETIF_F_HW_VLAN_RX) { | ||
2333 | /* Enable VLAN tag extraction */ | ||
2334 | tempval = gfar_read(®s->rctrl); | ||
2335 | tempval |= (RCTRL_VLEX | RCTRL_PRSDEP_INIT); | ||
2336 | gfar_write(®s->rctrl, tempval); | ||
2337 | } else { | ||
2338 | /* Disable VLAN tag extraction */ | ||
2339 | tempval = gfar_read(®s->rctrl); | ||
2340 | tempval &= ~RCTRL_VLEX; | ||
2341 | gfar_write(®s->rctrl, tempval); | ||
2342 | |||
2343 | gfar_check_rx_parser_mode(priv); | ||
2344 | } | ||
2345 | |||
2346 | gfar_change_mtu(dev, dev->mtu); | ||
2347 | |||
2348 | unlock_rx_qs(priv); | ||
2349 | local_irq_restore(flags); | ||
2350 | } | ||
2351 | |||
2352 | static int gfar_change_mtu(struct net_device *dev, int new_mtu) | ||
2353 | { | ||
2354 | int tempsize, tempval; | ||
2355 | struct gfar_private *priv = netdev_priv(dev); | ||
2356 | struct gfar __iomem *regs = priv->gfargrp[0].regs; | ||
2357 | int oldsize = priv->rx_buffer_size; | ||
2358 | int frame_size = new_mtu + ETH_HLEN; | ||
2359 | |||
2360 | if (gfar_is_vlan_on(priv)) | ||
2361 | frame_size += VLAN_HLEN; | ||
2362 | |||
2363 | if ((frame_size < 64) || (frame_size > JUMBO_FRAME_SIZE)) { | ||
2364 | netif_err(priv, drv, dev, "Invalid MTU setting\n"); | ||
2365 | return -EINVAL; | ||
2366 | } | ||
2367 | |||
2368 | if (gfar_uses_fcb(priv)) | ||
2369 | frame_size += GMAC_FCB_LEN; | ||
2370 | |||
2371 | frame_size += priv->padding; | ||
2372 | |||
2373 | tempsize = | ||
2374 | (frame_size & ~(INCREMENTAL_BUFFER_SIZE - 1)) + | ||
2375 | INCREMENTAL_BUFFER_SIZE; | ||
2376 | |||
2377 | /* Only stop and start the controller if it isn't already | ||
2378 | * stopped, and we changed something */ | ||
2379 | if ((oldsize != tempsize) && (dev->flags & IFF_UP)) | ||
2380 | stop_gfar(dev); | ||
2381 | |||
2382 | priv->rx_buffer_size = tempsize; | ||
2383 | |||
2384 | dev->mtu = new_mtu; | ||
2385 | |||
2386 | gfar_write(®s->mrblr, priv->rx_buffer_size); | ||
2387 | gfar_write(®s->maxfrm, priv->rx_buffer_size); | ||
2388 | |||
2389 | /* If the mtu is larger than the max size for standard | ||
2390 | * ethernet frames (ie, a jumbo frame), then set maccfg2 | ||
2391 | * to allow huge frames, and to check the length */ | ||
2392 | tempval = gfar_read(®s->maccfg2); | ||
2393 | |||
2394 | if (priv->rx_buffer_size > DEFAULT_RX_BUFFER_SIZE || | ||
2395 | gfar_has_errata(priv, GFAR_ERRATA_74)) | ||
2396 | tempval |= (MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK); | ||
2397 | else | ||
2398 | tempval &= ~(MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK); | ||
2399 | |||
2400 | gfar_write(®s->maccfg2, tempval); | ||
2401 | |||
2402 | if ((oldsize != tempsize) && (dev->flags & IFF_UP)) | ||
2403 | startup_gfar(dev); | ||
2404 | |||
2405 | return 0; | ||
2406 | } | ||
2407 | |||
2408 | /* gfar_reset_task gets scheduled when a packet has not been | ||
2409 | * transmitted after a set amount of time. | ||
2410 | * For now, assume that clearing out all the structures, and | ||
2411 | * starting over will fix the problem. | ||
2412 | */ | ||
2413 | static void gfar_reset_task(struct work_struct *work) | ||
2414 | { | ||
2415 | struct gfar_private *priv = container_of(work, struct gfar_private, | ||
2416 | reset_task); | ||
2417 | struct net_device *dev = priv->ndev; | ||
2418 | |||
2419 | if (dev->flags & IFF_UP) { | ||
2420 | netif_tx_stop_all_queues(dev); | ||
2421 | stop_gfar(dev); | ||
2422 | startup_gfar(dev); | ||
2423 | netif_tx_start_all_queues(dev); | ||
2424 | } | ||
2425 | |||
2426 | netif_tx_schedule_all(dev); | ||
2427 | } | ||
2428 | |||
2429 | static void gfar_timeout(struct net_device *dev) | ||
2430 | { | ||
2431 | struct gfar_private *priv = netdev_priv(dev); | ||
2432 | |||
2433 | dev->stats.tx_errors++; | ||
2434 | schedule_work(&priv->reset_task); | ||
2435 | } | ||
2436 | |||
2437 | static void gfar_align_skb(struct sk_buff *skb) | ||
2438 | { | ||
2439 | /* We need the data buffer to be aligned properly. We will reserve | ||
2440 | * as many bytes as needed to align the data properly | ||
2441 | */ | ||
2442 | skb_reserve(skb, RXBUF_ALIGNMENT - | ||
2443 | (((unsigned long) skb->data) & (RXBUF_ALIGNMENT - 1))); | ||
2444 | } | ||
2445 | |||
2446 | /* Interrupt Handler for Transmit complete */ | ||
2447 | static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue) | ||
2448 | { | ||
2449 | struct net_device *dev = tx_queue->dev; | ||
2450 | struct gfar_private *priv = netdev_priv(dev); | ||
2451 | struct gfar_priv_rx_q *rx_queue = NULL; | ||
2452 | struct txbd8 *bdp, *next = NULL; | ||
2453 | struct txbd8 *lbdp = NULL; | ||
2454 | struct txbd8 *base = tx_queue->tx_bd_base; | ||
2455 | struct sk_buff *skb; | ||
2456 | int skb_dirtytx; | ||
2457 | int tx_ring_size = tx_queue->tx_ring_size; | ||
2458 | int frags = 0, nr_txbds = 0; | ||
2459 | int i; | ||
2460 | int howmany = 0; | ||
2461 | u32 lstatus; | ||
2462 | size_t buflen; | ||
2463 | |||
2464 | rx_queue = priv->rx_queue[tx_queue->qindex]; | ||
2465 | bdp = tx_queue->dirty_tx; | ||
2466 | skb_dirtytx = tx_queue->skb_dirtytx; | ||
2467 | |||
2468 | while ((skb = tx_queue->tx_skbuff[skb_dirtytx])) { | ||
2469 | unsigned long flags; | ||
2470 | |||
2471 | frags = skb_shinfo(skb)->nr_frags; | ||
2472 | |||
2473 | /* | ||
2474 | * When time stamping, one additional TxBD must be freed. | ||
2475 | * Also, we need to dma_unmap_single() the TxPAL. | ||
2476 | */ | ||
2477 | if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) | ||
2478 | nr_txbds = frags + 2; | ||
2479 | else | ||
2480 | nr_txbds = frags + 1; | ||
2481 | |||
2482 | lbdp = skip_txbd(bdp, nr_txbds - 1, base, tx_ring_size); | ||
2483 | |||
2484 | lstatus = lbdp->lstatus; | ||
2485 | |||
2486 | /* Only clean completed frames */ | ||
2487 | if ((lstatus & BD_LFLAG(TXBD_READY)) && | ||
2488 | (lstatus & BD_LENGTH_MASK)) | ||
2489 | break; | ||
2490 | |||
2491 | if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) { | ||
2492 | next = next_txbd(bdp, base, tx_ring_size); | ||
2493 | buflen = next->length + GMAC_FCB_LEN; | ||
2494 | } else | ||
2495 | buflen = bdp->length; | ||
2496 | |||
2497 | dma_unmap_single(&priv->ofdev->dev, bdp->bufPtr, | ||
2498 | buflen, DMA_TO_DEVICE); | ||
2499 | |||
2500 | if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) { | ||
2501 | struct skb_shared_hwtstamps shhwtstamps; | ||
2502 | u64 *ns = (u64*) (((u32)skb->data + 0x10) & ~0x7); | ||
2503 | memset(&shhwtstamps, 0, sizeof(shhwtstamps)); | ||
2504 | shhwtstamps.hwtstamp = ns_to_ktime(*ns); | ||
2505 | skb_tstamp_tx(skb, &shhwtstamps); | ||
2506 | bdp->lstatus &= BD_LFLAG(TXBD_WRAP); | ||
2507 | bdp = next; | ||
2508 | } | ||
2509 | |||
2510 | bdp->lstatus &= BD_LFLAG(TXBD_WRAP); | ||
2511 | bdp = next_txbd(bdp, base, tx_ring_size); | ||
2512 | |||
2513 | for (i = 0; i < frags; i++) { | ||
2514 | dma_unmap_page(&priv->ofdev->dev, | ||
2515 | bdp->bufPtr, | ||
2516 | bdp->length, | ||
2517 | DMA_TO_DEVICE); | ||
2518 | bdp->lstatus &= BD_LFLAG(TXBD_WRAP); | ||
2519 | bdp = next_txbd(bdp, base, tx_ring_size); | ||
2520 | } | ||
2521 | |||
2522 | /* | ||
2523 | * If there's room in the queue (limit it to rx_buffer_size) | ||
2524 | * we add this skb back into the pool, if it's the right size | ||
2525 | */ | ||
2526 | if (skb_queue_len(&priv->rx_recycle) < rx_queue->rx_ring_size && | ||
2527 | skb_recycle_check(skb, priv->rx_buffer_size + | ||
2528 | RXBUF_ALIGNMENT)) { | ||
2529 | gfar_align_skb(skb); | ||
2530 | skb_queue_head(&priv->rx_recycle, skb); | ||
2531 | } else | ||
2532 | dev_kfree_skb_any(skb); | ||
2533 | |||
2534 | tx_queue->tx_skbuff[skb_dirtytx] = NULL; | ||
2535 | |||
2536 | skb_dirtytx = (skb_dirtytx + 1) & | ||
2537 | TX_RING_MOD_MASK(tx_ring_size); | ||
2538 | |||
2539 | howmany++; | ||
2540 | spin_lock_irqsave(&tx_queue->txlock, flags); | ||
2541 | tx_queue->num_txbdfree += nr_txbds; | ||
2542 | spin_unlock_irqrestore(&tx_queue->txlock, flags); | ||
2543 | } | ||
2544 | |||
2545 | /* If we freed a buffer, we can restart transmission, if necessary */ | ||
2546 | if (__netif_subqueue_stopped(dev, tx_queue->qindex) && tx_queue->num_txbdfree) | ||
2547 | netif_wake_subqueue(dev, tx_queue->qindex); | ||
2548 | |||
2549 | /* Update dirty indicators */ | ||
2550 | tx_queue->skb_dirtytx = skb_dirtytx; | ||
2551 | tx_queue->dirty_tx = bdp; | ||
2552 | |||
2553 | return howmany; | ||
2554 | } | ||
2555 | |||
2556 | static void gfar_schedule_cleanup(struct gfar_priv_grp *gfargrp) | ||
2557 | { | ||
2558 | unsigned long flags; | ||
2559 | |||
2560 | spin_lock_irqsave(&gfargrp->grplock, flags); | ||
2561 | if (napi_schedule_prep(&gfargrp->napi)) { | ||
2562 | gfar_write(&gfargrp->regs->imask, IMASK_RTX_DISABLED); | ||
2563 | __napi_schedule(&gfargrp->napi); | ||
2564 | } else { | ||
2565 | /* | ||
2566 | * Clear IEVENT, so interrupts aren't called again | ||
2567 | * because of the packets that have already arrived. | ||
2568 | */ | ||
2569 | gfar_write(&gfargrp->regs->ievent, IEVENT_RTX_MASK); | ||
2570 | } | ||
2571 | spin_unlock_irqrestore(&gfargrp->grplock, flags); | ||
2572 | |||
2573 | } | ||
2574 | |||
2575 | /* Interrupt Handler for Transmit complete */ | ||
2576 | static irqreturn_t gfar_transmit(int irq, void *grp_id) | ||
2577 | { | ||
2578 | gfar_schedule_cleanup((struct gfar_priv_grp *)grp_id); | ||
2579 | return IRQ_HANDLED; | ||
2580 | } | ||
2581 | |||
2582 | static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp, | ||
2583 | struct sk_buff *skb) | ||
2584 | { | ||
2585 | struct net_device *dev = rx_queue->dev; | ||
2586 | struct gfar_private *priv = netdev_priv(dev); | ||
2587 | dma_addr_t buf; | ||
2588 | |||
2589 | buf = dma_map_single(&priv->ofdev->dev, skb->data, | ||
2590 | priv->rx_buffer_size, DMA_FROM_DEVICE); | ||
2591 | gfar_init_rxbdp(rx_queue, bdp, buf); | ||
2592 | } | ||
2593 | |||
2594 | static struct sk_buff * gfar_alloc_skb(struct net_device *dev) | ||
2595 | { | ||
2596 | struct gfar_private *priv = netdev_priv(dev); | ||
2597 | struct sk_buff *skb = NULL; | ||
2598 | |||
2599 | skb = netdev_alloc_skb(dev, priv->rx_buffer_size + RXBUF_ALIGNMENT); | ||
2600 | if (!skb) | ||
2601 | return NULL; | ||
2602 | |||
2603 | gfar_align_skb(skb); | ||
2604 | |||
2605 | return skb; | ||
2606 | } | ||
2607 | |||
2608 | struct sk_buff * gfar_new_skb(struct net_device *dev) | ||
2609 | { | ||
2610 | struct gfar_private *priv = netdev_priv(dev); | ||
2611 | struct sk_buff *skb = NULL; | ||
2612 | |||
2613 | skb = skb_dequeue(&priv->rx_recycle); | ||
2614 | if (!skb) | ||
2615 | skb = gfar_alloc_skb(dev); | ||
2616 | |||
2617 | return skb; | ||
2618 | } | ||
2619 | |||
2620 | static inline void count_errors(unsigned short status, struct net_device *dev) | ||
2621 | { | ||
2622 | struct gfar_private *priv = netdev_priv(dev); | ||
2623 | struct net_device_stats *stats = &dev->stats; | ||
2624 | struct gfar_extra_stats *estats = &priv->extra_stats; | ||
2625 | |||
2626 | /* If the packet was truncated, none of the other errors | ||
2627 | * matter */ | ||
2628 | if (status & RXBD_TRUNCATED) { | ||
2629 | stats->rx_length_errors++; | ||
2630 | |||
2631 | estats->rx_trunc++; | ||
2632 | |||
2633 | return; | ||
2634 | } | ||
2635 | /* Count the errors, if there were any */ | ||
2636 | if (status & (RXBD_LARGE | RXBD_SHORT)) { | ||
2637 | stats->rx_length_errors++; | ||
2638 | |||
2639 | if (status & RXBD_LARGE) | ||
2640 | estats->rx_large++; | ||
2641 | else | ||
2642 | estats->rx_short++; | ||
2643 | } | ||
2644 | if (status & RXBD_NONOCTET) { | ||
2645 | stats->rx_frame_errors++; | ||
2646 | estats->rx_nonoctet++; | ||
2647 | } | ||
2648 | if (status & RXBD_CRCERR) { | ||
2649 | estats->rx_crcerr++; | ||
2650 | stats->rx_crc_errors++; | ||
2651 | } | ||
2652 | if (status & RXBD_OVERRUN) { | ||
2653 | estats->rx_overrun++; | ||
2654 | stats->rx_crc_errors++; | ||
2655 | } | ||
2656 | } | ||
2657 | |||
2658 | irqreturn_t gfar_receive(int irq, void *grp_id) | ||
2659 | { | ||
2660 | gfar_schedule_cleanup((struct gfar_priv_grp *)grp_id); | ||
2661 | return IRQ_HANDLED; | ||
2662 | } | ||
2663 | |||
2664 | static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb) | ||
2665 | { | ||
2666 | /* If valid headers were found, and valid sums | ||
2667 | * were verified, then we tell the kernel that no | ||
2668 | * checksumming is necessary. Otherwise, it is */ | ||
2669 | if ((fcb->flags & RXFCB_CSUM_MASK) == (RXFCB_CIP | RXFCB_CTU)) | ||
2670 | skb->ip_summed = CHECKSUM_UNNECESSARY; | ||
2671 | else | ||
2672 | skb_checksum_none_assert(skb); | ||
2673 | } | ||
2674 | |||
2675 | |||
2676 | /* gfar_process_frame() -- handle one incoming packet if skb | ||
2677 | * isn't NULL. */ | ||
2678 | static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb, | ||
2679 | int amount_pull) | ||
2680 | { | ||
2681 | struct gfar_private *priv = netdev_priv(dev); | ||
2682 | struct rxfcb *fcb = NULL; | ||
2683 | |||
2684 | int ret; | ||
2685 | |||
2686 | /* fcb is at the beginning if exists */ | ||
2687 | fcb = (struct rxfcb *)skb->data; | ||
2688 | |||
2689 | /* Remove the FCB from the skb */ | ||
2690 | /* Remove the padded bytes, if there are any */ | ||
2691 | if (amount_pull) { | ||
2692 | skb_record_rx_queue(skb, fcb->rq); | ||
2693 | skb_pull(skb, amount_pull); | ||
2694 | } | ||
2695 | |||
2696 | /* Get receive timestamp from the skb */ | ||
2697 | if (priv->hwts_rx_en) { | ||
2698 | struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb); | ||
2699 | u64 *ns = (u64 *) skb->data; | ||
2700 | memset(shhwtstamps, 0, sizeof(*shhwtstamps)); | ||
2701 | shhwtstamps->hwtstamp = ns_to_ktime(*ns); | ||
2702 | } | ||
2703 | |||
2704 | if (priv->padding) | ||
2705 | skb_pull(skb, priv->padding); | ||
2706 | |||
2707 | if (dev->features & NETIF_F_RXCSUM) | ||
2708 | gfar_rx_checksum(skb, fcb); | ||
2709 | |||
2710 | /* Tell the skb what kind of packet this is */ | ||
2711 | skb->protocol = eth_type_trans(skb, dev); | ||
2712 | |||
2713 | /* Set vlan tag */ | ||
2714 | if (fcb->flags & RXFCB_VLN) | ||
2715 | __vlan_hwaccel_put_tag(skb, fcb->vlctl); | ||
2716 | |||
2717 | /* Send the packet up the stack */ | ||
2718 | ret = netif_receive_skb(skb); | ||
2719 | |||
2720 | if (NET_RX_DROP == ret) | ||
2721 | priv->extra_stats.kernel_dropped++; | ||
2722 | |||
2723 | return 0; | ||
2724 | } | ||
2725 | |||
2726 | /* gfar_clean_rx_ring() -- Processes each frame in the rx ring | ||
2727 | * until the budget/quota has been reached. Returns the number | ||
2728 | * of frames handled | ||
2729 | */ | ||
2730 | int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit) | ||
2731 | { | ||
2732 | struct net_device *dev = rx_queue->dev; | ||
2733 | struct rxbd8 *bdp, *base; | ||
2734 | struct sk_buff *skb; | ||
2735 | int pkt_len; | ||
2736 | int amount_pull; | ||
2737 | int howmany = 0; | ||
2738 | struct gfar_private *priv = netdev_priv(dev); | ||
2739 | |||
2740 | /* Get the first full descriptor */ | ||
2741 | bdp = rx_queue->cur_rx; | ||
2742 | base = rx_queue->rx_bd_base; | ||
2743 | |||
2744 | amount_pull = (gfar_uses_fcb(priv) ? GMAC_FCB_LEN : 0); | ||
2745 | |||
2746 | while (!((bdp->status & RXBD_EMPTY) || (--rx_work_limit < 0))) { | ||
2747 | struct sk_buff *newskb; | ||
2748 | rmb(); | ||
2749 | |||
2750 | /* Add another skb for the future */ | ||
2751 | newskb = gfar_new_skb(dev); | ||
2752 | |||
2753 | skb = rx_queue->rx_skbuff[rx_queue->skb_currx]; | ||
2754 | |||
2755 | dma_unmap_single(&priv->ofdev->dev, bdp->bufPtr, | ||
2756 | priv->rx_buffer_size, DMA_FROM_DEVICE); | ||
2757 | |||
2758 | if (unlikely(!(bdp->status & RXBD_ERR) && | ||
2759 | bdp->length > priv->rx_buffer_size)) | ||
2760 | bdp->status = RXBD_LARGE; | ||
2761 | |||
2762 | /* We drop the frame if we failed to allocate a new buffer */ | ||
2763 | if (unlikely(!newskb || !(bdp->status & RXBD_LAST) || | ||
2764 | bdp->status & RXBD_ERR)) { | ||
2765 | count_errors(bdp->status, dev); | ||
2766 | |||
2767 | if (unlikely(!newskb)) | ||
2768 | newskb = skb; | ||
2769 | else if (skb) | ||
2770 | skb_queue_head(&priv->rx_recycle, skb); | ||
2771 | } else { | ||
2772 | /* Increment the number of packets */ | ||
2773 | rx_queue->stats.rx_packets++; | ||
2774 | howmany++; | ||
2775 | |||
2776 | if (likely(skb)) { | ||
2777 | pkt_len = bdp->length - ETH_FCS_LEN; | ||
2778 | /* Remove the FCS from the packet length */ | ||
2779 | skb_put(skb, pkt_len); | ||
2780 | rx_queue->stats.rx_bytes += pkt_len; | ||
2781 | skb_record_rx_queue(skb, rx_queue->qindex); | ||
2782 | gfar_process_frame(dev, skb, amount_pull); | ||
2783 | |||
2784 | } else { | ||
2785 | netif_warn(priv, rx_err, dev, "Missing skb!\n"); | ||
2786 | rx_queue->stats.rx_dropped++; | ||
2787 | priv->extra_stats.rx_skbmissing++; | ||
2788 | } | ||
2789 | |||
2790 | } | ||
2791 | |||
2792 | rx_queue->rx_skbuff[rx_queue->skb_currx] = newskb; | ||
2793 | |||
2794 | /* Setup the new bdp */ | ||
2795 | gfar_new_rxbdp(rx_queue, bdp, newskb); | ||
2796 | |||
2797 | /* Update to the next pointer */ | ||
2798 | bdp = next_bd(bdp, base, rx_queue->rx_ring_size); | ||
2799 | |||
2800 | /* update to point at the next skb */ | ||
2801 | rx_queue->skb_currx = | ||
2802 | (rx_queue->skb_currx + 1) & | ||
2803 | RX_RING_MOD_MASK(rx_queue->rx_ring_size); | ||
2804 | } | ||
2805 | |||
2806 | /* Update the current rxbd pointer to be the next one */ | ||
2807 | rx_queue->cur_rx = bdp; | ||
2808 | |||
2809 | return howmany; | ||
2810 | } | ||
2811 | |||
2812 | static int gfar_poll(struct napi_struct *napi, int budget) | ||
2813 | { | ||
2814 | struct gfar_priv_grp *gfargrp = container_of(napi, | ||
2815 | struct gfar_priv_grp, napi); | ||
2816 | struct gfar_private *priv = gfargrp->priv; | ||
2817 | struct gfar __iomem *regs = gfargrp->regs; | ||
2818 | struct gfar_priv_tx_q *tx_queue = NULL; | ||
2819 | struct gfar_priv_rx_q *rx_queue = NULL; | ||
2820 | int rx_cleaned = 0, budget_per_queue = 0, rx_cleaned_per_queue = 0; | ||
2821 | int tx_cleaned = 0, i, left_over_budget = budget; | ||
2822 | unsigned long serviced_queues = 0; | ||
2823 | int num_queues = 0; | ||
2824 | |||
2825 | num_queues = gfargrp->num_rx_queues; | ||
2826 | budget_per_queue = budget/num_queues; | ||
2827 | |||
2828 | /* Clear IEVENT, so interrupts aren't called again | ||
2829 | * because of the packets that have already arrived */ | ||
2830 | gfar_write(®s->ievent, IEVENT_RTX_MASK); | ||
2831 | |||
2832 | while (num_queues && left_over_budget) { | ||
2833 | |||
2834 | budget_per_queue = left_over_budget/num_queues; | ||
2835 | left_over_budget = 0; | ||
2836 | |||
2837 | for_each_set_bit(i, &gfargrp->rx_bit_map, priv->num_rx_queues) { | ||
2838 | if (test_bit(i, &serviced_queues)) | ||
2839 | continue; | ||
2840 | rx_queue = priv->rx_queue[i]; | ||
2841 | tx_queue = priv->tx_queue[rx_queue->qindex]; | ||
2842 | |||
2843 | tx_cleaned += gfar_clean_tx_ring(tx_queue); | ||
2844 | rx_cleaned_per_queue = gfar_clean_rx_ring(rx_queue, | ||
2845 | budget_per_queue); | ||
2846 | rx_cleaned += rx_cleaned_per_queue; | ||
2847 | if(rx_cleaned_per_queue < budget_per_queue) { | ||
2848 | left_over_budget = left_over_budget + | ||
2849 | (budget_per_queue - rx_cleaned_per_queue); | ||
2850 | set_bit(i, &serviced_queues); | ||
2851 | num_queues--; | ||
2852 | } | ||
2853 | } | ||
2854 | } | ||
2855 | |||
2856 | if (tx_cleaned) | ||
2857 | return budget; | ||
2858 | |||
2859 | if (rx_cleaned < budget) { | ||
2860 | napi_complete(napi); | ||
2861 | |||
2862 | /* Clear the halt bit in RSTAT */ | ||
2863 | gfar_write(®s->rstat, gfargrp->rstat); | ||
2864 | |||
2865 | gfar_write(®s->imask, IMASK_DEFAULT); | ||
2866 | |||
2867 | /* If we are coalescing interrupts, update the timer */ | ||
2868 | /* Otherwise, clear it */ | ||
2869 | gfar_configure_coalescing(priv, | ||
2870 | gfargrp->rx_bit_map, gfargrp->tx_bit_map); | ||
2871 | } | ||
2872 | |||
2873 | return rx_cleaned; | ||
2874 | } | ||
2875 | |||
2876 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
2877 | /* | ||
2878 | * Polling 'interrupt' - used by things like netconsole to send skbs | ||
2879 | * without having to re-enable interrupts. It's not called while | ||
2880 | * the interrupt routine is executing. | ||
2881 | */ | ||
2882 | static void gfar_netpoll(struct net_device *dev) | ||
2883 | { | ||
2884 | struct gfar_private *priv = netdev_priv(dev); | ||
2885 | int i = 0; | ||
2886 | |||
2887 | /* If the device has multiple interrupts, run tx/rx */ | ||
2888 | if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { | ||
2889 | for (i = 0; i < priv->num_grps; i++) { | ||
2890 | disable_irq(priv->gfargrp[i].interruptTransmit); | ||
2891 | disable_irq(priv->gfargrp[i].interruptReceive); | ||
2892 | disable_irq(priv->gfargrp[i].interruptError); | ||
2893 | gfar_interrupt(priv->gfargrp[i].interruptTransmit, | ||
2894 | &priv->gfargrp[i]); | ||
2895 | enable_irq(priv->gfargrp[i].interruptError); | ||
2896 | enable_irq(priv->gfargrp[i].interruptReceive); | ||
2897 | enable_irq(priv->gfargrp[i].interruptTransmit); | ||
2898 | } | ||
2899 | } else { | ||
2900 | for (i = 0; i < priv->num_grps; i++) { | ||
2901 | disable_irq(priv->gfargrp[i].interruptTransmit); | ||
2902 | gfar_interrupt(priv->gfargrp[i].interruptTransmit, | ||
2903 | &priv->gfargrp[i]); | ||
2904 | enable_irq(priv->gfargrp[i].interruptTransmit); | ||
2905 | } | ||
2906 | } | ||
2907 | } | ||
2908 | #endif | ||
2909 | |||
2910 | /* The interrupt handler for devices with one interrupt */ | ||
2911 | static irqreturn_t gfar_interrupt(int irq, void *grp_id) | ||
2912 | { | ||
2913 | struct gfar_priv_grp *gfargrp = grp_id; | ||
2914 | |||
2915 | /* Save ievent for future reference */ | ||
2916 | u32 events = gfar_read(&gfargrp->regs->ievent); | ||
2917 | |||
2918 | /* Check for reception */ | ||
2919 | if (events & IEVENT_RX_MASK) | ||
2920 | gfar_receive(irq, grp_id); | ||
2921 | |||
2922 | /* Check for transmit completion */ | ||
2923 | if (events & IEVENT_TX_MASK) | ||
2924 | gfar_transmit(irq, grp_id); | ||
2925 | |||
2926 | /* Check for errors */ | ||
2927 | if (events & IEVENT_ERR_MASK) | ||
2928 | gfar_error(irq, grp_id); | ||
2929 | |||
2930 | return IRQ_HANDLED; | ||
2931 | } | ||
2932 | |||
2933 | /* Called every time the controller might need to be made | ||
2934 | * aware of new link state. The PHY code conveys this | ||
2935 | * information through variables in the phydev structure, and this | ||
2936 | * function converts those variables into the appropriate | ||
2937 | * register values, and can bring down the device if needed. | ||
2938 | */ | ||
2939 | static void adjust_link(struct net_device *dev) | ||
2940 | { | ||
2941 | struct gfar_private *priv = netdev_priv(dev); | ||
2942 | struct gfar __iomem *regs = priv->gfargrp[0].regs; | ||
2943 | unsigned long flags; | ||
2944 | struct phy_device *phydev = priv->phydev; | ||
2945 | int new_state = 0; | ||
2946 | |||
2947 | local_irq_save(flags); | ||
2948 | lock_tx_qs(priv); | ||
2949 | |||
2950 | if (phydev->link) { | ||
2951 | u32 tempval = gfar_read(®s->maccfg2); | ||
2952 | u32 ecntrl = gfar_read(®s->ecntrl); | ||
2953 | |||
2954 | /* Now we make sure that we can be in full duplex mode. | ||
2955 | * If not, we operate in half-duplex mode. */ | ||
2956 | if (phydev->duplex != priv->oldduplex) { | ||
2957 | new_state = 1; | ||
2958 | if (!(phydev->duplex)) | ||
2959 | tempval &= ~(MACCFG2_FULL_DUPLEX); | ||
2960 | else | ||
2961 | tempval |= MACCFG2_FULL_DUPLEX; | ||
2962 | |||
2963 | priv->oldduplex = phydev->duplex; | ||
2964 | } | ||
2965 | |||
2966 | if (phydev->speed != priv->oldspeed) { | ||
2967 | new_state = 1; | ||
2968 | switch (phydev->speed) { | ||
2969 | case 1000: | ||
2970 | tempval = | ||
2971 | ((tempval & ~(MACCFG2_IF)) | MACCFG2_GMII); | ||
2972 | |||
2973 | ecntrl &= ~(ECNTRL_R100); | ||
2974 | break; | ||
2975 | case 100: | ||
2976 | case 10: | ||
2977 | tempval = | ||
2978 | ((tempval & ~(MACCFG2_IF)) | MACCFG2_MII); | ||
2979 | |||
2980 | /* Reduced mode distinguishes | ||
2981 | * between 10 and 100 */ | ||
2982 | if (phydev->speed == SPEED_100) | ||
2983 | ecntrl |= ECNTRL_R100; | ||
2984 | else | ||
2985 | ecntrl &= ~(ECNTRL_R100); | ||
2986 | break; | ||
2987 | default: | ||
2988 | netif_warn(priv, link, dev, | ||
2989 | "Ack! Speed (%d) is not 10/100/1000!\n", | ||
2990 | phydev->speed); | ||
2991 | break; | ||
2992 | } | ||
2993 | |||
2994 | priv->oldspeed = phydev->speed; | ||
2995 | } | ||
2996 | |||
2997 | gfar_write(®s->maccfg2, tempval); | ||
2998 | gfar_write(®s->ecntrl, ecntrl); | ||
2999 | |||
3000 | if (!priv->oldlink) { | ||
3001 | new_state = 1; | ||
3002 | priv->oldlink = 1; | ||
3003 | } | ||
3004 | } else if (priv->oldlink) { | ||
3005 | new_state = 1; | ||
3006 | priv->oldlink = 0; | ||
3007 | priv->oldspeed = 0; | ||
3008 | priv->oldduplex = -1; | ||
3009 | } | ||
3010 | |||
3011 | if (new_state && netif_msg_link(priv)) | ||
3012 | phy_print_status(phydev); | ||
3013 | unlock_tx_qs(priv); | ||
3014 | local_irq_restore(flags); | ||
3015 | } | ||
3016 | |||
3017 | /* Update the hash table based on the current list of multicast | ||
3018 | * addresses we subscribe to. Also, change the promiscuity of | ||
3019 | * the device based on the flags (this function is called | ||
3020 | * whenever dev->flags is changed */ | ||
3021 | static void gfar_set_multi(struct net_device *dev) | ||
3022 | { | ||
3023 | struct netdev_hw_addr *ha; | ||
3024 | struct gfar_private *priv = netdev_priv(dev); | ||
3025 | struct gfar __iomem *regs = priv->gfargrp[0].regs; | ||
3026 | u32 tempval; | ||
3027 | |||
3028 | if (dev->flags & IFF_PROMISC) { | ||
3029 | /* Set RCTRL to PROM */ | ||
3030 | tempval = gfar_read(®s->rctrl); | ||
3031 | tempval |= RCTRL_PROM; | ||
3032 | gfar_write(®s->rctrl, tempval); | ||
3033 | } else { | ||
3034 | /* Set RCTRL to not PROM */ | ||
3035 | tempval = gfar_read(®s->rctrl); | ||
3036 | tempval &= ~(RCTRL_PROM); | ||
3037 | gfar_write(®s->rctrl, tempval); | ||
3038 | } | ||
3039 | |||
3040 | if (dev->flags & IFF_ALLMULTI) { | ||
3041 | /* Set the hash to rx all multicast frames */ | ||
3042 | gfar_write(®s->igaddr0, 0xffffffff); | ||
3043 | gfar_write(®s->igaddr1, 0xffffffff); | ||
3044 | gfar_write(®s->igaddr2, 0xffffffff); | ||
3045 | gfar_write(®s->igaddr3, 0xffffffff); | ||
3046 | gfar_write(®s->igaddr4, 0xffffffff); | ||
3047 | gfar_write(®s->igaddr5, 0xffffffff); | ||
3048 | gfar_write(®s->igaddr6, 0xffffffff); | ||
3049 | gfar_write(®s->igaddr7, 0xffffffff); | ||
3050 | gfar_write(®s->gaddr0, 0xffffffff); | ||
3051 | gfar_write(®s->gaddr1, 0xffffffff); | ||
3052 | gfar_write(®s->gaddr2, 0xffffffff); | ||
3053 | gfar_write(®s->gaddr3, 0xffffffff); | ||
3054 | gfar_write(®s->gaddr4, 0xffffffff); | ||
3055 | gfar_write(®s->gaddr5, 0xffffffff); | ||
3056 | gfar_write(®s->gaddr6, 0xffffffff); | ||
3057 | gfar_write(®s->gaddr7, 0xffffffff); | ||
3058 | } else { | ||
3059 | int em_num; | ||
3060 | int idx; | ||
3061 | |||
3062 | /* zero out the hash */ | ||
3063 | gfar_write(®s->igaddr0, 0x0); | ||
3064 | gfar_write(®s->igaddr1, 0x0); | ||
3065 | gfar_write(®s->igaddr2, 0x0); | ||
3066 | gfar_write(®s->igaddr3, 0x0); | ||
3067 | gfar_write(®s->igaddr4, 0x0); | ||
3068 | gfar_write(®s->igaddr5, 0x0); | ||
3069 | gfar_write(®s->igaddr6, 0x0); | ||
3070 | gfar_write(®s->igaddr7, 0x0); | ||
3071 | gfar_write(®s->gaddr0, 0x0); | ||
3072 | gfar_write(®s->gaddr1, 0x0); | ||
3073 | gfar_write(®s->gaddr2, 0x0); | ||
3074 | gfar_write(®s->gaddr3, 0x0); | ||
3075 | gfar_write(®s->gaddr4, 0x0); | ||
3076 | gfar_write(®s->gaddr5, 0x0); | ||
3077 | gfar_write(®s->gaddr6, 0x0); | ||
3078 | gfar_write(®s->gaddr7, 0x0); | ||
3079 | |||
3080 | /* If we have extended hash tables, we need to | ||
3081 | * clear the exact match registers to prepare for | ||
3082 | * setting them */ | ||
3083 | if (priv->extended_hash) { | ||
3084 | em_num = GFAR_EM_NUM + 1; | ||
3085 | gfar_clear_exact_match(dev); | ||
3086 | idx = 1; | ||
3087 | } else { | ||
3088 | idx = 0; | ||
3089 | em_num = 0; | ||
3090 | } | ||
3091 | |||
3092 | if (netdev_mc_empty(dev)) | ||
3093 | return; | ||
3094 | |||
3095 | /* Parse the list, and set the appropriate bits */ | ||
3096 | netdev_for_each_mc_addr(ha, dev) { | ||
3097 | if (idx < em_num) { | ||
3098 | gfar_set_mac_for_addr(dev, idx, ha->addr); | ||
3099 | idx++; | ||
3100 | } else | ||
3101 | gfar_set_hash_for_addr(dev, ha->addr); | ||
3102 | } | ||
3103 | } | ||
3104 | } | ||
3105 | |||
3106 | |||
3107 | /* Clears each of the exact match registers to zero, so they | ||
3108 | * don't interfere with normal reception */ | ||
3109 | static void gfar_clear_exact_match(struct net_device *dev) | ||
3110 | { | ||
3111 | int idx; | ||
3112 | static const u8 zero_arr[MAC_ADDR_LEN] = {0, 0, 0, 0, 0, 0}; | ||
3113 | |||
3114 | for(idx = 1;idx < GFAR_EM_NUM + 1;idx++) | ||
3115 | gfar_set_mac_for_addr(dev, idx, zero_arr); | ||
3116 | } | ||
3117 | |||
3118 | /* Set the appropriate hash bit for the given addr */ | ||
3119 | /* The algorithm works like so: | ||
3120 | * 1) Take the Destination Address (ie the multicast address), and | ||
3121 | * do a CRC on it (little endian), and reverse the bits of the | ||
3122 | * result. | ||
3123 | * 2) Use the 8 most significant bits as a hash into a 256-entry | ||
3124 | * table. The table is controlled through 8 32-bit registers: | ||
3125 | * gaddr0-7. gaddr0's MSB is entry 0, and gaddr7's LSB is | ||
3126 | * gaddr7. This means that the 3 most significant bits in the | ||
3127 | * hash index which gaddr register to use, and the 5 other bits | ||
3128 | * indicate which bit (assuming an IBM numbering scheme, which | ||
3129 | * for PowerPC (tm) is usually the case) in the register holds | ||
3130 | * the entry. */ | ||
3131 | static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr) | ||
3132 | { | ||
3133 | u32 tempval; | ||
3134 | struct gfar_private *priv = netdev_priv(dev); | ||
3135 | u32 result = ether_crc(MAC_ADDR_LEN, addr); | ||
3136 | int width = priv->hash_width; | ||
3137 | u8 whichbit = (result >> (32 - width)) & 0x1f; | ||
3138 | u8 whichreg = result >> (32 - width + 5); | ||
3139 | u32 value = (1 << (31-whichbit)); | ||
3140 | |||
3141 | tempval = gfar_read(priv->hash_regs[whichreg]); | ||
3142 | tempval |= value; | ||
3143 | gfar_write(priv->hash_regs[whichreg], tempval); | ||
3144 | } | ||
3145 | |||
3146 | |||
3147 | /* There are multiple MAC Address register pairs on some controllers | ||
3148 | * This function sets the numth pair to a given address | ||
3149 | */ | ||
3150 | static void gfar_set_mac_for_addr(struct net_device *dev, int num, | ||
3151 | const u8 *addr) | ||
3152 | { | ||
3153 | struct gfar_private *priv = netdev_priv(dev); | ||
3154 | struct gfar __iomem *regs = priv->gfargrp[0].regs; | ||
3155 | int idx; | ||
3156 | char tmpbuf[MAC_ADDR_LEN]; | ||
3157 | u32 tempval; | ||
3158 | u32 __iomem *macptr = ®s->macstnaddr1; | ||
3159 | |||
3160 | macptr += num*2; | ||
3161 | |||
3162 | /* Now copy it into the mac registers backwards, cuz */ | ||
3163 | /* little endian is silly */ | ||
3164 | for (idx = 0; idx < MAC_ADDR_LEN; idx++) | ||
3165 | tmpbuf[MAC_ADDR_LEN - 1 - idx] = addr[idx]; | ||
3166 | |||
3167 | gfar_write(macptr, *((u32 *) (tmpbuf))); | ||
3168 | |||
3169 | tempval = *((u32 *) (tmpbuf + 4)); | ||
3170 | |||
3171 | gfar_write(macptr+1, tempval); | ||
3172 | } | ||
3173 | |||
3174 | /* GFAR error interrupt handler */ | ||
3175 | static irqreturn_t gfar_error(int irq, void *grp_id) | ||
3176 | { | ||
3177 | struct gfar_priv_grp *gfargrp = grp_id; | ||
3178 | struct gfar __iomem *regs = gfargrp->regs; | ||
3179 | struct gfar_private *priv= gfargrp->priv; | ||
3180 | struct net_device *dev = priv->ndev; | ||
3181 | |||
3182 | /* Save ievent for future reference */ | ||
3183 | u32 events = gfar_read(®s->ievent); | ||
3184 | |||
3185 | /* Clear IEVENT */ | ||
3186 | gfar_write(®s->ievent, events & IEVENT_ERR_MASK); | ||
3187 | |||
3188 | /* Magic Packet is not an error. */ | ||
3189 | if ((priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) && | ||
3190 | (events & IEVENT_MAG)) | ||
3191 | events &= ~IEVENT_MAG; | ||
3192 | |||
3193 | /* Hmm... */ | ||
3194 | if (netif_msg_rx_err(priv) || netif_msg_tx_err(priv)) | ||
3195 | netdev_dbg(dev, "error interrupt (ievent=0x%08x imask=0x%08x)\n", | ||
3196 | events, gfar_read(®s->imask)); | ||
3197 | |||
3198 | /* Update the error counters */ | ||
3199 | if (events & IEVENT_TXE) { | ||
3200 | dev->stats.tx_errors++; | ||
3201 | |||
3202 | if (events & IEVENT_LC) | ||
3203 | dev->stats.tx_window_errors++; | ||
3204 | if (events & IEVENT_CRL) | ||
3205 | dev->stats.tx_aborted_errors++; | ||
3206 | if (events & IEVENT_XFUN) { | ||
3207 | unsigned long flags; | ||
3208 | |||
3209 | netif_dbg(priv, tx_err, dev, | ||
3210 | "TX FIFO underrun, packet dropped\n"); | ||
3211 | dev->stats.tx_dropped++; | ||
3212 | priv->extra_stats.tx_underrun++; | ||
3213 | |||
3214 | local_irq_save(flags); | ||
3215 | lock_tx_qs(priv); | ||
3216 | |||
3217 | /* Reactivate the Tx Queues */ | ||
3218 | gfar_write(®s->tstat, gfargrp->tstat); | ||
3219 | |||
3220 | unlock_tx_qs(priv); | ||
3221 | local_irq_restore(flags); | ||
3222 | } | ||
3223 | netif_dbg(priv, tx_err, dev, "Transmit Error\n"); | ||
3224 | } | ||
3225 | if (events & IEVENT_BSY) { | ||
3226 | dev->stats.rx_errors++; | ||
3227 | priv->extra_stats.rx_bsy++; | ||
3228 | |||
3229 | gfar_receive(irq, grp_id); | ||
3230 | |||
3231 | netif_dbg(priv, rx_err, dev, "busy error (rstat: %x)\n", | ||
3232 | gfar_read(®s->rstat)); | ||
3233 | } | ||
3234 | if (events & IEVENT_BABR) { | ||
3235 | dev->stats.rx_errors++; | ||
3236 | priv->extra_stats.rx_babr++; | ||
3237 | |||
3238 | netif_dbg(priv, rx_err, dev, "babbling RX error\n"); | ||
3239 | } | ||
3240 | if (events & IEVENT_EBERR) { | ||
3241 | priv->extra_stats.eberr++; | ||
3242 | netif_dbg(priv, rx_err, dev, "bus error\n"); | ||
3243 | } | ||
3244 | if (events & IEVENT_RXC) | ||
3245 | netif_dbg(priv, rx_status, dev, "control frame\n"); | ||
3246 | |||
3247 | if (events & IEVENT_BABT) { | ||
3248 | priv->extra_stats.tx_babt++; | ||
3249 | netif_dbg(priv, tx_err, dev, "babbling TX error\n"); | ||
3250 | } | ||
3251 | return IRQ_HANDLED; | ||
3252 | } | ||
3253 | |||
3254 | static struct of_device_id gfar_match[] = | ||
3255 | { | ||
3256 | { | ||
3257 | .type = "network", | ||
3258 | .compatible = "gianfar", | ||
3259 | }, | ||
3260 | { | ||
3261 | .compatible = "fsl,etsec2", | ||
3262 | }, | ||
3263 | {}, | ||
3264 | }; | ||
3265 | MODULE_DEVICE_TABLE(of, gfar_match); | ||
3266 | |||
3267 | /* Structure for a device driver */ | ||
3268 | static struct platform_driver gfar_driver = { | ||
3269 | .driver = { | ||
3270 | .name = "fsl-gianfar", | ||
3271 | .owner = THIS_MODULE, | ||
3272 | .pm = GFAR_PM_OPS, | ||
3273 | .of_match_table = gfar_match, | ||
3274 | }, | ||
3275 | .probe = gfar_probe, | ||
3276 | .remove = gfar_remove, | ||
3277 | }; | ||
3278 | |||
3279 | static int __init gfar_init(void) | ||
3280 | { | ||
3281 | return platform_driver_register(&gfar_driver); | ||
3282 | } | ||
3283 | |||
3284 | static void __exit gfar_exit(void) | ||
3285 | { | ||
3286 | platform_driver_unregister(&gfar_driver); | ||
3287 | } | ||
3288 | |||
3289 | module_init(gfar_init); | ||
3290 | module_exit(gfar_exit); | ||
3291 | |||
diff --git a/drivers/net/ethernet/freescale/gianfar.h b/drivers/net/ethernet/freescale/gianfar.h new file mode 100644 index 000000000000..9aa43773e8e3 --- /dev/null +++ b/drivers/net/ethernet/freescale/gianfar.h | |||
@@ -0,0 +1,1216 @@ | |||
1 | /* | ||
2 | * drivers/net/gianfar.h | ||
3 | * | ||
4 | * Gianfar Ethernet Driver | ||
5 | * Driver for FEC on MPC8540 and TSEC on MPC8540/MPC8560 | ||
6 | * Based on 8260_io/fcc_enet.c | ||
7 | * | ||
8 | * Author: Andy Fleming | ||
9 | * Maintainer: Kumar Gala | ||
10 | * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com> | ||
11 | * | ||
12 | * Copyright 2002-2009, 2011 Freescale Semiconductor, Inc. | ||
13 | * | ||
14 | * This program is free software; you can redistribute it and/or modify it | ||
15 | * under the terms of the GNU General Public License as published by the | ||
16 | * Free Software Foundation; either version 2 of the License, or (at your | ||
17 | * option) any later version. | ||
18 | * | ||
19 | * Still left to do: | ||
20 | * -Add support for module parameters | ||
21 | * -Add patch for ethtool phys id | ||
22 | */ | ||
23 | #ifndef __GIANFAR_H | ||
24 | #define __GIANFAR_H | ||
25 | |||
26 | #include <linux/kernel.h> | ||
27 | #include <linux/sched.h> | ||
28 | #include <linux/string.h> | ||
29 | #include <linux/errno.h> | ||
30 | #include <linux/slab.h> | ||
31 | #include <linux/interrupt.h> | ||
32 | #include <linux/init.h> | ||
33 | #include <linux/delay.h> | ||
34 | #include <linux/netdevice.h> | ||
35 | #include <linux/etherdevice.h> | ||
36 | #include <linux/skbuff.h> | ||
37 | #include <linux/spinlock.h> | ||
38 | #include <linux/mm.h> | ||
39 | #include <linux/mii.h> | ||
40 | #include <linux/phy.h> | ||
41 | |||
42 | #include <asm/io.h> | ||
43 | #include <asm/irq.h> | ||
44 | #include <asm/uaccess.h> | ||
45 | #include <linux/module.h> | ||
46 | #include <linux/crc32.h> | ||
47 | #include <linux/workqueue.h> | ||
48 | #include <linux/ethtool.h> | ||
49 | |||
50 | struct ethtool_flow_spec_container { | ||
51 | struct ethtool_rx_flow_spec fs; | ||
52 | struct list_head list; | ||
53 | }; | ||
54 | |||
55 | struct ethtool_rx_list { | ||
56 | struct list_head list; | ||
57 | unsigned int count; | ||
58 | }; | ||
59 | |||
60 | /* The maximum number of packets to be handled in one call of gfar_poll */ | ||
61 | #define GFAR_DEV_WEIGHT 64 | ||
62 | |||
63 | /* Length for FCB */ | ||
64 | #define GMAC_FCB_LEN 8 | ||
65 | |||
66 | /* Default padding amount */ | ||
67 | #define DEFAULT_PADDING 2 | ||
68 | |||
69 | /* Number of bytes to align the rx bufs to */ | ||
70 | #define RXBUF_ALIGNMENT 64 | ||
71 | |||
72 | /* The number of bytes which composes a unit for the purpose of | ||
73 | * allocating data buffers. ie-for any given MTU, the data buffer | ||
74 | * will be the next highest multiple of 512 bytes. */ | ||
75 | #define INCREMENTAL_BUFFER_SIZE 512 | ||
76 | |||
77 | |||
78 | #define MAC_ADDR_LEN 6 | ||
79 | |||
80 | #define PHY_INIT_TIMEOUT 100000 | ||
81 | #define GFAR_PHY_CHANGE_TIME 2 | ||
82 | |||
83 | #define DEVICE_NAME "%s: Gianfar Ethernet Controller Version 1.2, " | ||
84 | #define DRV_NAME "gfar-enet" | ||
85 | extern const char gfar_driver_name[]; | ||
86 | extern const char gfar_driver_version[]; | ||
87 | |||
88 | /* MAXIMUM NUMBER OF QUEUES SUPPORTED */ | ||
89 | #define MAX_TX_QS 0x8 | ||
90 | #define MAX_RX_QS 0x8 | ||
91 | |||
92 | /* MAXIMUM NUMBER OF GROUPS SUPPORTED */ | ||
93 | #define MAXGROUPS 0x2 | ||
94 | |||
95 | /* These need to be powers of 2 for this driver */ | ||
96 | #define DEFAULT_TX_RING_SIZE 256 | ||
97 | #define DEFAULT_RX_RING_SIZE 256 | ||
98 | |||
99 | #define GFAR_RX_MAX_RING_SIZE 256 | ||
100 | #define GFAR_TX_MAX_RING_SIZE 256 | ||
101 | |||
102 | #define GFAR_MAX_FIFO_THRESHOLD 511 | ||
103 | #define GFAR_MAX_FIFO_STARVE 511 | ||
104 | #define GFAR_MAX_FIFO_STARVE_OFF 511 | ||
105 | |||
106 | #define DEFAULT_RX_BUFFER_SIZE 1536 | ||
107 | #define TX_RING_MOD_MASK(size) (size-1) | ||
108 | #define RX_RING_MOD_MASK(size) (size-1) | ||
109 | #define JUMBO_BUFFER_SIZE 9728 | ||
110 | #define JUMBO_FRAME_SIZE 9600 | ||
111 | |||
112 | #define DEFAULT_FIFO_TX_THR 0x100 | ||
113 | #define DEFAULT_FIFO_TX_STARVE 0x40 | ||
114 | #define DEFAULT_FIFO_TX_STARVE_OFF 0x80 | ||
115 | #define DEFAULT_BD_STASH 1 | ||
116 | #define DEFAULT_STASH_LENGTH 96 | ||
117 | #define DEFAULT_STASH_INDEX 0 | ||
118 | |||
119 | /* The number of Exact Match registers */ | ||
120 | #define GFAR_EM_NUM 15 | ||
121 | |||
122 | /* Latency of interface clock in nanoseconds */ | ||
123 | /* Interface clock latency , in this case, means the | ||
124 | * time described by a value of 1 in the interrupt | ||
125 | * coalescing registers' time fields. Since those fields | ||
126 | * refer to the time it takes for 64 clocks to pass, the | ||
127 | * latencies are as such: | ||
128 | * GBIT = 125MHz => 8ns/clock => 8*64 ns / tick | ||
129 | * 100 = 25 MHz => 40ns/clock => 40*64 ns / tick | ||
130 | * 10 = 2.5 MHz => 400ns/clock => 400*64 ns / tick | ||
131 | */ | ||
132 | #define GFAR_GBIT_TIME 512 | ||
133 | #define GFAR_100_TIME 2560 | ||
134 | #define GFAR_10_TIME 25600 | ||
135 | |||
136 | #define DEFAULT_TX_COALESCE 1 | ||
137 | #define DEFAULT_TXCOUNT 16 | ||
138 | #define DEFAULT_TXTIME 21 | ||
139 | |||
140 | #define DEFAULT_RXTIME 21 | ||
141 | |||
142 | #define DEFAULT_RX_COALESCE 0 | ||
143 | #define DEFAULT_RXCOUNT 0 | ||
144 | |||
145 | #define GFAR_SUPPORTED (SUPPORTED_10baseT_Half \ | ||
146 | | SUPPORTED_10baseT_Full \ | ||
147 | | SUPPORTED_100baseT_Half \ | ||
148 | | SUPPORTED_100baseT_Full \ | ||
149 | | SUPPORTED_Autoneg \ | ||
150 | | SUPPORTED_MII) | ||
151 | |||
152 | /* TBI register addresses */ | ||
153 | #define MII_TBICON 0x11 | ||
154 | |||
155 | /* TBICON register bit fields */ | ||
156 | #define TBICON_CLK_SELECT 0x0020 | ||
157 | |||
158 | /* MAC register bits */ | ||
159 | #define MACCFG1_SOFT_RESET 0x80000000 | ||
160 | #define MACCFG1_RESET_RX_MC 0x00080000 | ||
161 | #define MACCFG1_RESET_TX_MC 0x00040000 | ||
162 | #define MACCFG1_RESET_RX_FUN 0x00020000 | ||
163 | #define MACCFG1_RESET_TX_FUN 0x00010000 | ||
164 | #define MACCFG1_LOOPBACK 0x00000100 | ||
165 | #define MACCFG1_RX_FLOW 0x00000020 | ||
166 | #define MACCFG1_TX_FLOW 0x00000010 | ||
167 | #define MACCFG1_SYNCD_RX_EN 0x00000008 | ||
168 | #define MACCFG1_RX_EN 0x00000004 | ||
169 | #define MACCFG1_SYNCD_TX_EN 0x00000002 | ||
170 | #define MACCFG1_TX_EN 0x00000001 | ||
171 | |||
172 | #define MACCFG2_INIT_SETTINGS 0x00007205 | ||
173 | #define MACCFG2_FULL_DUPLEX 0x00000001 | ||
174 | #define MACCFG2_IF 0x00000300 | ||
175 | #define MACCFG2_MII 0x00000100 | ||
176 | #define MACCFG2_GMII 0x00000200 | ||
177 | #define MACCFG2_HUGEFRAME 0x00000020 | ||
178 | #define MACCFG2_LENGTHCHECK 0x00000010 | ||
179 | #define MACCFG2_MPEN 0x00000008 | ||
180 | |||
181 | #define ECNTRL_FIFM 0x00008000 | ||
182 | #define ECNTRL_INIT_SETTINGS 0x00001000 | ||
183 | #define ECNTRL_TBI_MODE 0x00000020 | ||
184 | #define ECNTRL_REDUCED_MODE 0x00000010 | ||
185 | #define ECNTRL_R100 0x00000008 | ||
186 | #define ECNTRL_REDUCED_MII_MODE 0x00000004 | ||
187 | #define ECNTRL_SGMII_MODE 0x00000002 | ||
188 | |||
189 | #define MRBLR_INIT_SETTINGS DEFAULT_RX_BUFFER_SIZE | ||
190 | |||
191 | #define MINFLR_INIT_SETTINGS 0x00000040 | ||
192 | |||
193 | /* Tqueue control */ | ||
194 | #define TQUEUE_EN0 0x00008000 | ||
195 | #define TQUEUE_EN1 0x00004000 | ||
196 | #define TQUEUE_EN2 0x00002000 | ||
197 | #define TQUEUE_EN3 0x00001000 | ||
198 | #define TQUEUE_EN4 0x00000800 | ||
199 | #define TQUEUE_EN5 0x00000400 | ||
200 | #define TQUEUE_EN6 0x00000200 | ||
201 | #define TQUEUE_EN7 0x00000100 | ||
202 | #define TQUEUE_EN_ALL 0x0000FF00 | ||
203 | |||
204 | #define TR03WT_WT0_MASK 0xFF000000 | ||
205 | #define TR03WT_WT1_MASK 0x00FF0000 | ||
206 | #define TR03WT_WT2_MASK 0x0000FF00 | ||
207 | #define TR03WT_WT3_MASK 0x000000FF | ||
208 | |||
209 | #define TR47WT_WT4_MASK 0xFF000000 | ||
210 | #define TR47WT_WT5_MASK 0x00FF0000 | ||
211 | #define TR47WT_WT6_MASK 0x0000FF00 | ||
212 | #define TR47WT_WT7_MASK 0x000000FF | ||
213 | |||
214 | /* Rqueue control */ | ||
215 | #define RQUEUE_EX0 0x00800000 | ||
216 | #define RQUEUE_EX1 0x00400000 | ||
217 | #define RQUEUE_EX2 0x00200000 | ||
218 | #define RQUEUE_EX3 0x00100000 | ||
219 | #define RQUEUE_EX4 0x00080000 | ||
220 | #define RQUEUE_EX5 0x00040000 | ||
221 | #define RQUEUE_EX6 0x00020000 | ||
222 | #define RQUEUE_EX7 0x00010000 | ||
223 | #define RQUEUE_EX_ALL 0x00FF0000 | ||
224 | |||
225 | #define RQUEUE_EN0 0x00000080 | ||
226 | #define RQUEUE_EN1 0x00000040 | ||
227 | #define RQUEUE_EN2 0x00000020 | ||
228 | #define RQUEUE_EN3 0x00000010 | ||
229 | #define RQUEUE_EN4 0x00000008 | ||
230 | #define RQUEUE_EN5 0x00000004 | ||
231 | #define RQUEUE_EN6 0x00000002 | ||
232 | #define RQUEUE_EN7 0x00000001 | ||
233 | #define RQUEUE_EN_ALL 0x000000FF | ||
234 | |||
235 | /* Init to do tx snooping for buffers and descriptors */ | ||
236 | #define DMACTRL_INIT_SETTINGS 0x000000c3 | ||
237 | #define DMACTRL_GRS 0x00000010 | ||
238 | #define DMACTRL_GTS 0x00000008 | ||
239 | |||
240 | #define TSTAT_CLEAR_THALT_ALL 0xFF000000 | ||
241 | #define TSTAT_CLEAR_THALT 0x80000000 | ||
242 | #define TSTAT_CLEAR_THALT0 0x80000000 | ||
243 | #define TSTAT_CLEAR_THALT1 0x40000000 | ||
244 | #define TSTAT_CLEAR_THALT2 0x20000000 | ||
245 | #define TSTAT_CLEAR_THALT3 0x10000000 | ||
246 | #define TSTAT_CLEAR_THALT4 0x08000000 | ||
247 | #define TSTAT_CLEAR_THALT5 0x04000000 | ||
248 | #define TSTAT_CLEAR_THALT6 0x02000000 | ||
249 | #define TSTAT_CLEAR_THALT7 0x01000000 | ||
250 | |||
251 | /* Interrupt coalescing macros */ | ||
252 | #define IC_ICEN 0x80000000 | ||
253 | #define IC_ICFT_MASK 0x1fe00000 | ||
254 | #define IC_ICFT_SHIFT 21 | ||
255 | #define mk_ic_icft(x) \ | ||
256 | (((unsigned int)x << IC_ICFT_SHIFT)&IC_ICFT_MASK) | ||
257 | #define IC_ICTT_MASK 0x0000ffff | ||
258 | #define mk_ic_ictt(x) (x&IC_ICTT_MASK) | ||
259 | |||
260 | #define mk_ic_value(count, time) (IC_ICEN | \ | ||
261 | mk_ic_icft(count) | \ | ||
262 | mk_ic_ictt(time)) | ||
263 | #define get_icft_value(ic) (((unsigned long)ic & IC_ICFT_MASK) >> \ | ||
264 | IC_ICFT_SHIFT) | ||
265 | #define get_ictt_value(ic) ((unsigned long)ic & IC_ICTT_MASK) | ||
266 | |||
267 | #define DEFAULT_TXIC mk_ic_value(DEFAULT_TXCOUNT, DEFAULT_TXTIME) | ||
268 | #define DEFAULT_RXIC mk_ic_value(DEFAULT_RXCOUNT, DEFAULT_RXTIME) | ||
269 | |||
270 | #define skip_bd(bdp, stride, base, ring_size) ({ \ | ||
271 | typeof(bdp) new_bd = (bdp) + (stride); \ | ||
272 | (new_bd >= (base) + (ring_size)) ? (new_bd - (ring_size)) : new_bd; }) | ||
273 | |||
274 | #define next_bd(bdp, base, ring_size) skip_bd(bdp, 1, base, ring_size) | ||
275 | |||
276 | #define RCTRL_TS_ENABLE 0x01000000 | ||
277 | #define RCTRL_PAL_MASK 0x001f0000 | ||
278 | #define RCTRL_VLEX 0x00002000 | ||
279 | #define RCTRL_FILREN 0x00001000 | ||
280 | #define RCTRL_GHTX 0x00000400 | ||
281 | #define RCTRL_IPCSEN 0x00000200 | ||
282 | #define RCTRL_TUCSEN 0x00000100 | ||
283 | #define RCTRL_PRSDEP_MASK 0x000000c0 | ||
284 | #define RCTRL_PRSDEP_INIT 0x000000c0 | ||
285 | #define RCTRL_PRSFM 0x00000020 | ||
286 | #define RCTRL_PROM 0x00000008 | ||
287 | #define RCTRL_EMEN 0x00000002 | ||
288 | #define RCTRL_REQ_PARSER (RCTRL_VLEX | RCTRL_IPCSEN | \ | ||
289 | RCTRL_TUCSEN | RCTRL_FILREN) | ||
290 | #define RCTRL_CHECKSUMMING (RCTRL_IPCSEN | RCTRL_TUCSEN | \ | ||
291 | RCTRL_PRSDEP_INIT) | ||
292 | #define RCTRL_EXTHASH (RCTRL_GHTX) | ||
293 | #define RCTRL_VLAN (RCTRL_PRSDEP_INIT) | ||
294 | #define RCTRL_PADDING(x) ((x << 16) & RCTRL_PAL_MASK) | ||
295 | |||
296 | |||
297 | #define RSTAT_CLEAR_RHALT 0x00800000 | ||
298 | |||
299 | #define TCTRL_IPCSEN 0x00004000 | ||
300 | #define TCTRL_TUCSEN 0x00002000 | ||
301 | #define TCTRL_VLINS 0x00001000 | ||
302 | #define TCTRL_THDF 0x00000800 | ||
303 | #define TCTRL_RFCPAUSE 0x00000010 | ||
304 | #define TCTRL_TFCPAUSE 0x00000008 | ||
305 | #define TCTRL_TXSCHED_MASK 0x00000006 | ||
306 | #define TCTRL_TXSCHED_INIT 0x00000000 | ||
307 | #define TCTRL_TXSCHED_PRIO 0x00000002 | ||
308 | #define TCTRL_TXSCHED_WRRS 0x00000004 | ||
309 | #define TCTRL_INIT_CSUM (TCTRL_TUCSEN | TCTRL_IPCSEN) | ||
310 | |||
311 | #define IEVENT_INIT_CLEAR 0xffffffff | ||
312 | #define IEVENT_BABR 0x80000000 | ||
313 | #define IEVENT_RXC 0x40000000 | ||
314 | #define IEVENT_BSY 0x20000000 | ||
315 | #define IEVENT_EBERR 0x10000000 | ||
316 | #define IEVENT_MSRO 0x04000000 | ||
317 | #define IEVENT_GTSC 0x02000000 | ||
318 | #define IEVENT_BABT 0x01000000 | ||
319 | #define IEVENT_TXC 0x00800000 | ||
320 | #define IEVENT_TXE 0x00400000 | ||
321 | #define IEVENT_TXB 0x00200000 | ||
322 | #define IEVENT_TXF 0x00100000 | ||
323 | #define IEVENT_LC 0x00040000 | ||
324 | #define IEVENT_CRL 0x00020000 | ||
325 | #define IEVENT_XFUN 0x00010000 | ||
326 | #define IEVENT_RXB0 0x00008000 | ||
327 | #define IEVENT_MAG 0x00000800 | ||
328 | #define IEVENT_GRSC 0x00000100 | ||
329 | #define IEVENT_RXF0 0x00000080 | ||
330 | #define IEVENT_FIR 0x00000008 | ||
331 | #define IEVENT_FIQ 0x00000004 | ||
332 | #define IEVENT_DPE 0x00000002 | ||
333 | #define IEVENT_PERR 0x00000001 | ||
334 | #define IEVENT_RX_MASK (IEVENT_RXB0 | IEVENT_RXF0 | IEVENT_BSY) | ||
335 | #define IEVENT_TX_MASK (IEVENT_TXB | IEVENT_TXF) | ||
336 | #define IEVENT_RTX_MASK (IEVENT_RX_MASK | IEVENT_TX_MASK) | ||
337 | #define IEVENT_ERR_MASK \ | ||
338 | (IEVENT_RXC | IEVENT_BSY | IEVENT_EBERR | IEVENT_MSRO | \ | ||
339 | IEVENT_BABT | IEVENT_TXC | IEVENT_TXE | IEVENT_LC \ | ||
340 | | IEVENT_CRL | IEVENT_XFUN | IEVENT_DPE | IEVENT_PERR \ | ||
341 | | IEVENT_MAG | IEVENT_BABR) | ||
342 | |||
343 | #define IMASK_INIT_CLEAR 0x00000000 | ||
344 | #define IMASK_BABR 0x80000000 | ||
345 | #define IMASK_RXC 0x40000000 | ||
346 | #define IMASK_BSY 0x20000000 | ||
347 | #define IMASK_EBERR 0x10000000 | ||
348 | #define IMASK_MSRO 0x04000000 | ||
349 | #define IMASK_GTSC 0x02000000 | ||
350 | #define IMASK_BABT 0x01000000 | ||
351 | #define IMASK_TXC 0x00800000 | ||
352 | #define IMASK_TXEEN 0x00400000 | ||
353 | #define IMASK_TXBEN 0x00200000 | ||
354 | #define IMASK_TXFEN 0x00100000 | ||
355 | #define IMASK_LC 0x00040000 | ||
356 | #define IMASK_CRL 0x00020000 | ||
357 | #define IMASK_XFUN 0x00010000 | ||
358 | #define IMASK_RXB0 0x00008000 | ||
359 | #define IMASK_MAG 0x00000800 | ||
360 | #define IMASK_GRSC 0x00000100 | ||
361 | #define IMASK_RXFEN0 0x00000080 | ||
362 | #define IMASK_FIR 0x00000008 | ||
363 | #define IMASK_FIQ 0x00000004 | ||
364 | #define IMASK_DPE 0x00000002 | ||
365 | #define IMASK_PERR 0x00000001 | ||
366 | #define IMASK_DEFAULT (IMASK_TXEEN | IMASK_TXFEN | IMASK_TXBEN | \ | ||
367 | IMASK_RXFEN0 | IMASK_BSY | IMASK_EBERR | IMASK_BABR | \ | ||
368 | IMASK_XFUN | IMASK_RXC | IMASK_BABT | IMASK_DPE \ | ||
369 | | IMASK_PERR) | ||
370 | #define IMASK_RTX_DISABLED ((~(IMASK_RXFEN0 | IMASK_TXFEN | IMASK_BSY)) \ | ||
371 | & IMASK_DEFAULT) | ||
372 | |||
373 | /* Fifo management */ | ||
374 | #define FIFO_TX_THR_MASK 0x01ff | ||
375 | #define FIFO_TX_STARVE_MASK 0x01ff | ||
376 | #define FIFO_TX_STARVE_OFF_MASK 0x01ff | ||
377 | |||
378 | /* Attribute fields */ | ||
379 | |||
380 | /* This enables rx snooping for buffers and descriptors */ | ||
381 | #define ATTR_BDSTASH 0x00000800 | ||
382 | |||
383 | #define ATTR_BUFSTASH 0x00004000 | ||
384 | |||
385 | #define ATTR_SNOOPING 0x000000c0 | ||
386 | #define ATTR_INIT_SETTINGS ATTR_SNOOPING | ||
387 | |||
388 | #define ATTRELI_INIT_SETTINGS 0x0 | ||
389 | #define ATTRELI_EL_MASK 0x3fff0000 | ||
390 | #define ATTRELI_EL(x) (x << 16) | ||
391 | #define ATTRELI_EI_MASK 0x00003fff | ||
392 | #define ATTRELI_EI(x) (x) | ||
393 | |||
394 | #define BD_LFLAG(flags) ((flags) << 16) | ||
395 | #define BD_LENGTH_MASK 0x0000ffff | ||
396 | |||
397 | #define FPR_FILER_MASK 0xFFFFFFFF | ||
398 | #define MAX_FILER_IDX 0xFF | ||
399 | |||
400 | /* This default RIR value directly corresponds | ||
401 | * to the 3-bit hash value generated */ | ||
402 | #define DEFAULT_RIR0 0x05397700 | ||
403 | |||
404 | /* RQFCR register bits */ | ||
405 | #define RQFCR_GPI 0x80000000 | ||
406 | #define RQFCR_HASHTBL_Q 0x00000000 | ||
407 | #define RQFCR_HASHTBL_0 0x00020000 | ||
408 | #define RQFCR_HASHTBL_1 0x00040000 | ||
409 | #define RQFCR_HASHTBL_2 0x00060000 | ||
410 | #define RQFCR_HASHTBL_3 0x00080000 | ||
411 | #define RQFCR_HASH 0x00010000 | ||
412 | #define RQFCR_QUEUE 0x0000FC00 | ||
413 | #define RQFCR_CLE 0x00000200 | ||
414 | #define RQFCR_RJE 0x00000100 | ||
415 | #define RQFCR_AND 0x00000080 | ||
416 | #define RQFCR_CMP_EXACT 0x00000000 | ||
417 | #define RQFCR_CMP_MATCH 0x00000020 | ||
418 | #define RQFCR_CMP_NOEXACT 0x00000040 | ||
419 | #define RQFCR_CMP_NOMATCH 0x00000060 | ||
420 | |||
421 | /* RQFCR PID values */ | ||
422 | #define RQFCR_PID_MASK 0x00000000 | ||
423 | #define RQFCR_PID_PARSE 0x00000001 | ||
424 | #define RQFCR_PID_ARB 0x00000002 | ||
425 | #define RQFCR_PID_DAH 0x00000003 | ||
426 | #define RQFCR_PID_DAL 0x00000004 | ||
427 | #define RQFCR_PID_SAH 0x00000005 | ||
428 | #define RQFCR_PID_SAL 0x00000006 | ||
429 | #define RQFCR_PID_ETY 0x00000007 | ||
430 | #define RQFCR_PID_VID 0x00000008 | ||
431 | #define RQFCR_PID_PRI 0x00000009 | ||
432 | #define RQFCR_PID_TOS 0x0000000A | ||
433 | #define RQFCR_PID_L4P 0x0000000B | ||
434 | #define RQFCR_PID_DIA 0x0000000C | ||
435 | #define RQFCR_PID_SIA 0x0000000D | ||
436 | #define RQFCR_PID_DPT 0x0000000E | ||
437 | #define RQFCR_PID_SPT 0x0000000F | ||
438 | |||
439 | /* RQFPR when PID is 0x0001 */ | ||
440 | #define RQFPR_HDR_GE_512 0x00200000 | ||
441 | #define RQFPR_LERR 0x00100000 | ||
442 | #define RQFPR_RAR 0x00080000 | ||
443 | #define RQFPR_RARQ 0x00040000 | ||
444 | #define RQFPR_AR 0x00020000 | ||
445 | #define RQFPR_ARQ 0x00010000 | ||
446 | #define RQFPR_EBC 0x00008000 | ||
447 | #define RQFPR_VLN 0x00004000 | ||
448 | #define RQFPR_CFI 0x00002000 | ||
449 | #define RQFPR_JUM 0x00001000 | ||
450 | #define RQFPR_IPF 0x00000800 | ||
451 | #define RQFPR_FIF 0x00000400 | ||
452 | #define RQFPR_IPV4 0x00000200 | ||
453 | #define RQFPR_IPV6 0x00000100 | ||
454 | #define RQFPR_ICC 0x00000080 | ||
455 | #define RQFPR_ICV 0x00000040 | ||
456 | #define RQFPR_TCP 0x00000020 | ||
457 | #define RQFPR_UDP 0x00000010 | ||
458 | #define RQFPR_TUC 0x00000008 | ||
459 | #define RQFPR_TUV 0x00000004 | ||
460 | #define RQFPR_PER 0x00000002 | ||
461 | #define RQFPR_EER 0x00000001 | ||
462 | |||
463 | /* TxBD status field bits */ | ||
464 | #define TXBD_READY 0x8000 | ||
465 | #define TXBD_PADCRC 0x4000 | ||
466 | #define TXBD_WRAP 0x2000 | ||
467 | #define TXBD_INTERRUPT 0x1000 | ||
468 | #define TXBD_LAST 0x0800 | ||
469 | #define TXBD_CRC 0x0400 | ||
470 | #define TXBD_DEF 0x0200 | ||
471 | #define TXBD_HUGEFRAME 0x0080 | ||
472 | #define TXBD_LATECOLLISION 0x0080 | ||
473 | #define TXBD_RETRYLIMIT 0x0040 | ||
474 | #define TXBD_RETRYCOUNTMASK 0x003c | ||
475 | #define TXBD_UNDERRUN 0x0002 | ||
476 | #define TXBD_TOE 0x0002 | ||
477 | |||
478 | /* Tx FCB param bits */ | ||
479 | #define TXFCB_VLN 0x80 | ||
480 | #define TXFCB_IP 0x40 | ||
481 | #define TXFCB_IP6 0x20 | ||
482 | #define TXFCB_TUP 0x10 | ||
483 | #define TXFCB_UDP 0x08 | ||
484 | #define TXFCB_CIP 0x04 | ||
485 | #define TXFCB_CTU 0x02 | ||
486 | #define TXFCB_NPH 0x01 | ||
487 | #define TXFCB_DEFAULT (TXFCB_IP|TXFCB_TUP|TXFCB_CTU|TXFCB_NPH) | ||
488 | |||
489 | /* RxBD status field bits */ | ||
490 | #define RXBD_EMPTY 0x8000 | ||
491 | #define RXBD_RO1 0x4000 | ||
492 | #define RXBD_WRAP 0x2000 | ||
493 | #define RXBD_INTERRUPT 0x1000 | ||
494 | #define RXBD_LAST 0x0800 | ||
495 | #define RXBD_FIRST 0x0400 | ||
496 | #define RXBD_MISS 0x0100 | ||
497 | #define RXBD_BROADCAST 0x0080 | ||
498 | #define RXBD_MULTICAST 0x0040 | ||
499 | #define RXBD_LARGE 0x0020 | ||
500 | #define RXBD_NONOCTET 0x0010 | ||
501 | #define RXBD_SHORT 0x0008 | ||
502 | #define RXBD_CRCERR 0x0004 | ||
503 | #define RXBD_OVERRUN 0x0002 | ||
504 | #define RXBD_TRUNCATED 0x0001 | ||
505 | #define RXBD_STATS 0x01ff | ||
506 | #define RXBD_ERR (RXBD_LARGE | RXBD_SHORT | RXBD_NONOCTET \ | ||
507 | | RXBD_CRCERR | RXBD_OVERRUN \ | ||
508 | | RXBD_TRUNCATED) | ||
509 | |||
510 | /* Rx FCB status field bits */ | ||
511 | #define RXFCB_VLN 0x8000 | ||
512 | #define RXFCB_IP 0x4000 | ||
513 | #define RXFCB_IP6 0x2000 | ||
514 | #define RXFCB_TUP 0x1000 | ||
515 | #define RXFCB_CIP 0x0800 | ||
516 | #define RXFCB_CTU 0x0400 | ||
517 | #define RXFCB_EIP 0x0200 | ||
518 | #define RXFCB_ETU 0x0100 | ||
519 | #define RXFCB_CSUM_MASK 0x0f00 | ||
520 | #define RXFCB_PERR_MASK 0x000c | ||
521 | #define RXFCB_PERR_BADL3 0x0008 | ||
522 | |||
523 | #define GFAR_INT_NAME_MAX IFNAMSIZ + 4 | ||
524 | |||
525 | struct txbd8 | ||
526 | { | ||
527 | union { | ||
528 | struct { | ||
529 | u16 status; /* Status Fields */ | ||
530 | u16 length; /* Buffer length */ | ||
531 | }; | ||
532 | u32 lstatus; | ||
533 | }; | ||
534 | u32 bufPtr; /* Buffer Pointer */ | ||
535 | }; | ||
536 | |||
537 | struct txfcb { | ||
538 | u8 flags; | ||
539 | u8 ptp; /* Flag to enable tx timestamping */ | ||
540 | u8 l4os; /* Level 4 Header Offset */ | ||
541 | u8 l3os; /* Level 3 Header Offset */ | ||
542 | u16 phcs; /* Pseudo-header Checksum */ | ||
543 | u16 vlctl; /* VLAN control word */ | ||
544 | }; | ||
545 | |||
546 | struct rxbd8 | ||
547 | { | ||
548 | union { | ||
549 | struct { | ||
550 | u16 status; /* Status Fields */ | ||
551 | u16 length; /* Buffer Length */ | ||
552 | }; | ||
553 | u32 lstatus; | ||
554 | }; | ||
555 | u32 bufPtr; /* Buffer Pointer */ | ||
556 | }; | ||
557 | |||
558 | struct rxfcb { | ||
559 | u16 flags; | ||
560 | u8 rq; /* Receive Queue index */ | ||
561 | u8 pro; /* Layer 4 Protocol */ | ||
562 | u16 reserved; | ||
563 | u16 vlctl; /* VLAN control word */ | ||
564 | }; | ||
565 | |||
566 | struct gianfar_skb_cb { | ||
567 | int alignamount; | ||
568 | }; | ||
569 | |||
570 | #define GFAR_CB(skb) ((struct gianfar_skb_cb *)((skb)->cb)) | ||
571 | |||
572 | struct rmon_mib | ||
573 | { | ||
574 | u32 tr64; /* 0x.680 - Transmit and Receive 64-byte Frame Counter */ | ||
575 | u32 tr127; /* 0x.684 - Transmit and Receive 65-127 byte Frame Counter */ | ||
576 | u32 tr255; /* 0x.688 - Transmit and Receive 128-255 byte Frame Counter */ | ||
577 | u32 tr511; /* 0x.68c - Transmit and Receive 256-511 byte Frame Counter */ | ||
578 | u32 tr1k; /* 0x.690 - Transmit and Receive 512-1023 byte Frame Counter */ | ||
579 | u32 trmax; /* 0x.694 - Transmit and Receive 1024-1518 byte Frame Counter */ | ||
580 | u32 trmgv; /* 0x.698 - Transmit and Receive 1519-1522 byte Good VLAN Frame */ | ||
581 | u32 rbyt; /* 0x.69c - Receive Byte Counter */ | ||
582 | u32 rpkt; /* 0x.6a0 - Receive Packet Counter */ | ||
583 | u32 rfcs; /* 0x.6a4 - Receive FCS Error Counter */ | ||
584 | u32 rmca; /* 0x.6a8 - Receive Multicast Packet Counter */ | ||
585 | u32 rbca; /* 0x.6ac - Receive Broadcast Packet Counter */ | ||
586 | u32 rxcf; /* 0x.6b0 - Receive Control Frame Packet Counter */ | ||
587 | u32 rxpf; /* 0x.6b4 - Receive Pause Frame Packet Counter */ | ||
588 | u32 rxuo; /* 0x.6b8 - Receive Unknown OP Code Counter */ | ||
589 | u32 raln; /* 0x.6bc - Receive Alignment Error Counter */ | ||
590 | u32 rflr; /* 0x.6c0 - Receive Frame Length Error Counter */ | ||
591 | u32 rcde; /* 0x.6c4 - Receive Code Error Counter */ | ||
592 | u32 rcse; /* 0x.6c8 - Receive Carrier Sense Error Counter */ | ||
593 | u32 rund; /* 0x.6cc - Receive Undersize Packet Counter */ | ||
594 | u32 rovr; /* 0x.6d0 - Receive Oversize Packet Counter */ | ||
595 | u32 rfrg; /* 0x.6d4 - Receive Fragments Counter */ | ||
596 | u32 rjbr; /* 0x.6d8 - Receive Jabber Counter */ | ||
597 | u32 rdrp; /* 0x.6dc - Receive Drop Counter */ | ||
598 | u32 tbyt; /* 0x.6e0 - Transmit Byte Counter Counter */ | ||
599 | u32 tpkt; /* 0x.6e4 - Transmit Packet Counter */ | ||
600 | u32 tmca; /* 0x.6e8 - Transmit Multicast Packet Counter */ | ||
601 | u32 tbca; /* 0x.6ec - Transmit Broadcast Packet Counter */ | ||
602 | u32 txpf; /* 0x.6f0 - Transmit Pause Control Frame Counter */ | ||
603 | u32 tdfr; /* 0x.6f4 - Transmit Deferral Packet Counter */ | ||
604 | u32 tedf; /* 0x.6f8 - Transmit Excessive Deferral Packet Counter */ | ||
605 | u32 tscl; /* 0x.6fc - Transmit Single Collision Packet Counter */ | ||
606 | u32 tmcl; /* 0x.700 - Transmit Multiple Collision Packet Counter */ | ||
607 | u32 tlcl; /* 0x.704 - Transmit Late Collision Packet Counter */ | ||
608 | u32 txcl; /* 0x.708 - Transmit Excessive Collision Packet Counter */ | ||
609 | u32 tncl; /* 0x.70c - Transmit Total Collision Counter */ | ||
610 | u8 res1[4]; | ||
611 | u32 tdrp; /* 0x.714 - Transmit Drop Frame Counter */ | ||
612 | u32 tjbr; /* 0x.718 - Transmit Jabber Frame Counter */ | ||
613 | u32 tfcs; /* 0x.71c - Transmit FCS Error Counter */ | ||
614 | u32 txcf; /* 0x.720 - Transmit Control Frame Counter */ | ||
615 | u32 tovr; /* 0x.724 - Transmit Oversize Frame Counter */ | ||
616 | u32 tund; /* 0x.728 - Transmit Undersize Frame Counter */ | ||
617 | u32 tfrg; /* 0x.72c - Transmit Fragments Frame Counter */ | ||
618 | u32 car1; /* 0x.730 - Carry Register One */ | ||
619 | u32 car2; /* 0x.734 - Carry Register Two */ | ||
620 | u32 cam1; /* 0x.738 - Carry Mask Register One */ | ||
621 | u32 cam2; /* 0x.73c - Carry Mask Register Two */ | ||
622 | }; | ||
623 | |||
624 | struct gfar_extra_stats { | ||
625 | u64 kernel_dropped; | ||
626 | u64 rx_large; | ||
627 | u64 rx_short; | ||
628 | u64 rx_nonoctet; | ||
629 | u64 rx_crcerr; | ||
630 | u64 rx_overrun; | ||
631 | u64 rx_bsy; | ||
632 | u64 rx_babr; | ||
633 | u64 rx_trunc; | ||
634 | u64 eberr; | ||
635 | u64 tx_babt; | ||
636 | u64 tx_underrun; | ||
637 | u64 rx_skbmissing; | ||
638 | u64 tx_timeout; | ||
639 | }; | ||
640 | |||
641 | #define GFAR_RMON_LEN ((sizeof(struct rmon_mib) - 16)/sizeof(u32)) | ||
642 | #define GFAR_EXTRA_STATS_LEN (sizeof(struct gfar_extra_stats)/sizeof(u64)) | ||
643 | |||
644 | /* Number of stats in the stats structure (ignore car and cam regs)*/ | ||
645 | #define GFAR_STATS_LEN (GFAR_RMON_LEN + GFAR_EXTRA_STATS_LEN) | ||
646 | |||
647 | #define GFAR_INFOSTR_LEN 32 | ||
648 | |||
649 | struct gfar_stats { | ||
650 | u64 extra[GFAR_EXTRA_STATS_LEN]; | ||
651 | u64 rmon[GFAR_RMON_LEN]; | ||
652 | }; | ||
653 | |||
654 | |||
655 | struct gfar { | ||
656 | u32 tsec_id; /* 0x.000 - Controller ID register */ | ||
657 | u32 tsec_id2; /* 0x.004 - Controller ID2 register */ | ||
658 | u8 res1[8]; | ||
659 | u32 ievent; /* 0x.010 - Interrupt Event Register */ | ||
660 | u32 imask; /* 0x.014 - Interrupt Mask Register */ | ||
661 | u32 edis; /* 0x.018 - Error Disabled Register */ | ||
662 | u32 emapg; /* 0x.01c - Group Error mapping register */ | ||
663 | u32 ecntrl; /* 0x.020 - Ethernet Control Register */ | ||
664 | u32 minflr; /* 0x.024 - Minimum Frame Length Register */ | ||
665 | u32 ptv; /* 0x.028 - Pause Time Value Register */ | ||
666 | u32 dmactrl; /* 0x.02c - DMA Control Register */ | ||
667 | u32 tbipa; /* 0x.030 - TBI PHY Address Register */ | ||
668 | u8 res2[28]; | ||
669 | u32 fifo_rx_pause; /* 0x.050 - FIFO receive pause start threshold | ||
670 | register */ | ||
671 | u32 fifo_rx_pause_shutoff; /* x.054 - FIFO receive starve shutoff | ||
672 | register */ | ||
673 | u32 fifo_rx_alarm; /* 0x.058 - FIFO receive alarm start threshold | ||
674 | register */ | ||
675 | u32 fifo_rx_alarm_shutoff; /*0x.05c - FIFO receive alarm starve | ||
676 | shutoff register */ | ||
677 | u8 res3[44]; | ||
678 | u32 fifo_tx_thr; /* 0x.08c - FIFO transmit threshold register */ | ||
679 | u8 res4[8]; | ||
680 | u32 fifo_tx_starve; /* 0x.098 - FIFO transmit starve register */ | ||
681 | u32 fifo_tx_starve_shutoff; /* 0x.09c - FIFO transmit starve shutoff register */ | ||
682 | u8 res5[96]; | ||
683 | u32 tctrl; /* 0x.100 - Transmit Control Register */ | ||
684 | u32 tstat; /* 0x.104 - Transmit Status Register */ | ||
685 | u32 dfvlan; /* 0x.108 - Default VLAN Control word */ | ||
686 | u32 tbdlen; /* 0x.10c - Transmit Buffer Descriptor Data Length Register */ | ||
687 | u32 txic; /* 0x.110 - Transmit Interrupt Coalescing Configuration Register */ | ||
688 | u32 tqueue; /* 0x.114 - Transmit queue control register */ | ||
689 | u8 res7[40]; | ||
690 | u32 tr03wt; /* 0x.140 - TxBD Rings 0-3 round-robin weightings */ | ||
691 | u32 tr47wt; /* 0x.144 - TxBD Rings 4-7 round-robin weightings */ | ||
692 | u8 res8[52]; | ||
693 | u32 tbdbph; /* 0x.17c - Tx data buffer pointer high */ | ||
694 | u8 res9a[4]; | ||
695 | u32 tbptr0; /* 0x.184 - TxBD Pointer for ring 0 */ | ||
696 | u8 res9b[4]; | ||
697 | u32 tbptr1; /* 0x.18c - TxBD Pointer for ring 1 */ | ||
698 | u8 res9c[4]; | ||
699 | u32 tbptr2; /* 0x.194 - TxBD Pointer for ring 2 */ | ||
700 | u8 res9d[4]; | ||
701 | u32 tbptr3; /* 0x.19c - TxBD Pointer for ring 3 */ | ||
702 | u8 res9e[4]; | ||
703 | u32 tbptr4; /* 0x.1a4 - TxBD Pointer for ring 4 */ | ||
704 | u8 res9f[4]; | ||
705 | u32 tbptr5; /* 0x.1ac - TxBD Pointer for ring 5 */ | ||
706 | u8 res9g[4]; | ||
707 | u32 tbptr6; /* 0x.1b4 - TxBD Pointer for ring 6 */ | ||
708 | u8 res9h[4]; | ||
709 | u32 tbptr7; /* 0x.1bc - TxBD Pointer for ring 7 */ | ||
710 | u8 res9[64]; | ||
711 | u32 tbaseh; /* 0x.200 - TxBD base address high */ | ||
712 | u32 tbase0; /* 0x.204 - TxBD Base Address of ring 0 */ | ||
713 | u8 res10a[4]; | ||
714 | u32 tbase1; /* 0x.20c - TxBD Base Address of ring 1 */ | ||
715 | u8 res10b[4]; | ||
716 | u32 tbase2; /* 0x.214 - TxBD Base Address of ring 2 */ | ||
717 | u8 res10c[4]; | ||
718 | u32 tbase3; /* 0x.21c - TxBD Base Address of ring 3 */ | ||
719 | u8 res10d[4]; | ||
720 | u32 tbase4; /* 0x.224 - TxBD Base Address of ring 4 */ | ||
721 | u8 res10e[4]; | ||
722 | u32 tbase5; /* 0x.22c - TxBD Base Address of ring 5 */ | ||
723 | u8 res10f[4]; | ||
724 | u32 tbase6; /* 0x.234 - TxBD Base Address of ring 6 */ | ||
725 | u8 res10g[4]; | ||
726 | u32 tbase7; /* 0x.23c - TxBD Base Address of ring 7 */ | ||
727 | u8 res10[192]; | ||
728 | u32 rctrl; /* 0x.300 - Receive Control Register */ | ||
729 | u32 rstat; /* 0x.304 - Receive Status Register */ | ||
730 | u8 res12[8]; | ||
731 | u32 rxic; /* 0x.310 - Receive Interrupt Coalescing Configuration Register */ | ||
732 | u32 rqueue; /* 0x.314 - Receive queue control register */ | ||
733 | u32 rir0; /* 0x.318 - Ring mapping register 0 */ | ||
734 | u32 rir1; /* 0x.31c - Ring mapping register 1 */ | ||
735 | u32 rir2; /* 0x.320 - Ring mapping register 2 */ | ||
736 | u32 rir3; /* 0x.324 - Ring mapping register 3 */ | ||
737 | u8 res13[8]; | ||
738 | u32 rbifx; /* 0x.330 - Receive bit field extract control register */ | ||
739 | u32 rqfar; /* 0x.334 - Receive queue filing table address register */ | ||
740 | u32 rqfcr; /* 0x.338 - Receive queue filing table control register */ | ||
741 | u32 rqfpr; /* 0x.33c - Receive queue filing table property register */ | ||
742 | u32 mrblr; /* 0x.340 - Maximum Receive Buffer Length Register */ | ||
743 | u8 res14[56]; | ||
744 | u32 rbdbph; /* 0x.37c - Rx data buffer pointer high */ | ||
745 | u8 res15a[4]; | ||
746 | u32 rbptr0; /* 0x.384 - RxBD pointer for ring 0 */ | ||
747 | u8 res15b[4]; | ||
748 | u32 rbptr1; /* 0x.38c - RxBD pointer for ring 1 */ | ||
749 | u8 res15c[4]; | ||
750 | u32 rbptr2; /* 0x.394 - RxBD pointer for ring 2 */ | ||
751 | u8 res15d[4]; | ||
752 | u32 rbptr3; /* 0x.39c - RxBD pointer for ring 3 */ | ||
753 | u8 res15e[4]; | ||
754 | u32 rbptr4; /* 0x.3a4 - RxBD pointer for ring 4 */ | ||
755 | u8 res15f[4]; | ||
756 | u32 rbptr5; /* 0x.3ac - RxBD pointer for ring 5 */ | ||
757 | u8 res15g[4]; | ||
758 | u32 rbptr6; /* 0x.3b4 - RxBD pointer for ring 6 */ | ||
759 | u8 res15h[4]; | ||
760 | u32 rbptr7; /* 0x.3bc - RxBD pointer for ring 7 */ | ||
761 | u8 res16[64]; | ||
762 | u32 rbaseh; /* 0x.400 - RxBD base address high */ | ||
763 | u32 rbase0; /* 0x.404 - RxBD base address of ring 0 */ | ||
764 | u8 res17a[4]; | ||
765 | u32 rbase1; /* 0x.40c - RxBD base address of ring 1 */ | ||
766 | u8 res17b[4]; | ||
767 | u32 rbase2; /* 0x.414 - RxBD base address of ring 2 */ | ||
768 | u8 res17c[4]; | ||
769 | u32 rbase3; /* 0x.41c - RxBD base address of ring 3 */ | ||
770 | u8 res17d[4]; | ||
771 | u32 rbase4; /* 0x.424 - RxBD base address of ring 4 */ | ||
772 | u8 res17e[4]; | ||
773 | u32 rbase5; /* 0x.42c - RxBD base address of ring 5 */ | ||
774 | u8 res17f[4]; | ||
775 | u32 rbase6; /* 0x.434 - RxBD base address of ring 6 */ | ||
776 | u8 res17g[4]; | ||
777 | u32 rbase7; /* 0x.43c - RxBD base address of ring 7 */ | ||
778 | u8 res17[192]; | ||
779 | u32 maccfg1; /* 0x.500 - MAC Configuration 1 Register */ | ||
780 | u32 maccfg2; /* 0x.504 - MAC Configuration 2 Register */ | ||
781 | u32 ipgifg; /* 0x.508 - Inter Packet Gap/Inter Frame Gap Register */ | ||
782 | u32 hafdup; /* 0x.50c - Half Duplex Register */ | ||
783 | u32 maxfrm; /* 0x.510 - Maximum Frame Length Register */ | ||
784 | u8 res18[12]; | ||
785 | u8 gfar_mii_regs[24]; /* See gianfar_phy.h */ | ||
786 | u32 ifctrl; /* 0x.538 - Interface control register */ | ||
787 | u32 ifstat; /* 0x.53c - Interface Status Register */ | ||
788 | u32 macstnaddr1; /* 0x.540 - Station Address Part 1 Register */ | ||
789 | u32 macstnaddr2; /* 0x.544 - Station Address Part 2 Register */ | ||
790 | u32 mac01addr1; /* 0x.548 - MAC exact match address 1, part 1 */ | ||
791 | u32 mac01addr2; /* 0x.54c - MAC exact match address 1, part 2 */ | ||
792 | u32 mac02addr1; /* 0x.550 - MAC exact match address 2, part 1 */ | ||
793 | u32 mac02addr2; /* 0x.554 - MAC exact match address 2, part 2 */ | ||
794 | u32 mac03addr1; /* 0x.558 - MAC exact match address 3, part 1 */ | ||
795 | u32 mac03addr2; /* 0x.55c - MAC exact match address 3, part 2 */ | ||
796 | u32 mac04addr1; /* 0x.560 - MAC exact match address 4, part 1 */ | ||
797 | u32 mac04addr2; /* 0x.564 - MAC exact match address 4, part 2 */ | ||
798 | u32 mac05addr1; /* 0x.568 - MAC exact match address 5, part 1 */ | ||
799 | u32 mac05addr2; /* 0x.56c - MAC exact match address 5, part 2 */ | ||
800 | u32 mac06addr1; /* 0x.570 - MAC exact match address 6, part 1 */ | ||
801 | u32 mac06addr2; /* 0x.574 - MAC exact match address 6, part 2 */ | ||
802 | u32 mac07addr1; /* 0x.578 - MAC exact match address 7, part 1 */ | ||
803 | u32 mac07addr2; /* 0x.57c - MAC exact match address 7, part 2 */ | ||
804 | u32 mac08addr1; /* 0x.580 - MAC exact match address 8, part 1 */ | ||
805 | u32 mac08addr2; /* 0x.584 - MAC exact match address 8, part 2 */ | ||
806 | u32 mac09addr1; /* 0x.588 - MAC exact match address 9, part 1 */ | ||
807 | u32 mac09addr2; /* 0x.58c - MAC exact match address 9, part 2 */ | ||
808 | u32 mac10addr1; /* 0x.590 - MAC exact match address 10, part 1*/ | ||
809 | u32 mac10addr2; /* 0x.594 - MAC exact match address 10, part 2*/ | ||
810 | u32 mac11addr1; /* 0x.598 - MAC exact match address 11, part 1*/ | ||
811 | u32 mac11addr2; /* 0x.59c - MAC exact match address 11, part 2*/ | ||
812 | u32 mac12addr1; /* 0x.5a0 - MAC exact match address 12, part 1*/ | ||
813 | u32 mac12addr2; /* 0x.5a4 - MAC exact match address 12, part 2*/ | ||
814 | u32 mac13addr1; /* 0x.5a8 - MAC exact match address 13, part 1*/ | ||
815 | u32 mac13addr2; /* 0x.5ac - MAC exact match address 13, part 2*/ | ||
816 | u32 mac14addr1; /* 0x.5b0 - MAC exact match address 14, part 1*/ | ||
817 | u32 mac14addr2; /* 0x.5b4 - MAC exact match address 14, part 2*/ | ||
818 | u32 mac15addr1; /* 0x.5b8 - MAC exact match address 15, part 1*/ | ||
819 | u32 mac15addr2; /* 0x.5bc - MAC exact match address 15, part 2*/ | ||
820 | u8 res20[192]; | ||
821 | struct rmon_mib rmon; /* 0x.680-0x.73c */ | ||
822 | u32 rrej; /* 0x.740 - Receive filer rejected packet counter */ | ||
823 | u8 res21[188]; | ||
824 | u32 igaddr0; /* 0x.800 - Indivdual/Group address register 0*/ | ||
825 | u32 igaddr1; /* 0x.804 - Indivdual/Group address register 1*/ | ||
826 | u32 igaddr2; /* 0x.808 - Indivdual/Group address register 2*/ | ||
827 | u32 igaddr3; /* 0x.80c - Indivdual/Group address register 3*/ | ||
828 | u32 igaddr4; /* 0x.810 - Indivdual/Group address register 4*/ | ||
829 | u32 igaddr5; /* 0x.814 - Indivdual/Group address register 5*/ | ||
830 | u32 igaddr6; /* 0x.818 - Indivdual/Group address register 6*/ | ||
831 | u32 igaddr7; /* 0x.81c - Indivdual/Group address register 7*/ | ||
832 | u8 res22[96]; | ||
833 | u32 gaddr0; /* 0x.880 - Group address register 0 */ | ||
834 | u32 gaddr1; /* 0x.884 - Group address register 1 */ | ||
835 | u32 gaddr2; /* 0x.888 - Group address register 2 */ | ||
836 | u32 gaddr3; /* 0x.88c - Group address register 3 */ | ||
837 | u32 gaddr4; /* 0x.890 - Group address register 4 */ | ||
838 | u32 gaddr5; /* 0x.894 - Group address register 5 */ | ||
839 | u32 gaddr6; /* 0x.898 - Group address register 6 */ | ||
840 | u32 gaddr7; /* 0x.89c - Group address register 7 */ | ||
841 | u8 res23a[352]; | ||
842 | u32 fifocfg; /* 0x.a00 - FIFO interface config register */ | ||
843 | u8 res23b[252]; | ||
844 | u8 res23c[248]; | ||
845 | u32 attr; /* 0x.bf8 - Attributes Register */ | ||
846 | u32 attreli; /* 0x.bfc - Attributes Extract Length and Extract Index Register */ | ||
847 | u8 res24[688]; | ||
848 | u32 isrg0; /* 0x.eb0 - Interrupt steering group 0 register */ | ||
849 | u32 isrg1; /* 0x.eb4 - Interrupt steering group 1 register */ | ||
850 | u32 isrg2; /* 0x.eb8 - Interrupt steering group 2 register */ | ||
851 | u32 isrg3; /* 0x.ebc - Interrupt steering group 3 register */ | ||
852 | u8 res25[16]; | ||
853 | u32 rxic0; /* 0x.ed0 - Ring 0 Rx interrupt coalescing */ | ||
854 | u32 rxic1; /* 0x.ed4 - Ring 1 Rx interrupt coalescing */ | ||
855 | u32 rxic2; /* 0x.ed8 - Ring 2 Rx interrupt coalescing */ | ||
856 | u32 rxic3; /* 0x.edc - Ring 3 Rx interrupt coalescing */ | ||
857 | u32 rxic4; /* 0x.ee0 - Ring 4 Rx interrupt coalescing */ | ||
858 | u32 rxic5; /* 0x.ee4 - Ring 5 Rx interrupt coalescing */ | ||
859 | u32 rxic6; /* 0x.ee8 - Ring 6 Rx interrupt coalescing */ | ||
860 | u32 rxic7; /* 0x.eec - Ring 7 Rx interrupt coalescing */ | ||
861 | u8 res26[32]; | ||
862 | u32 txic0; /* 0x.f10 - Ring 0 Tx interrupt coalescing */ | ||
863 | u32 txic1; /* 0x.f14 - Ring 1 Tx interrupt coalescing */ | ||
864 | u32 txic2; /* 0x.f18 - Ring 2 Tx interrupt coalescing */ | ||
865 | u32 txic3; /* 0x.f1c - Ring 3 Tx interrupt coalescing */ | ||
866 | u32 txic4; /* 0x.f20 - Ring 4 Tx interrupt coalescing */ | ||
867 | u32 txic5; /* 0x.f24 - Ring 5 Tx interrupt coalescing */ | ||
868 | u32 txic6; /* 0x.f28 - Ring 6 Tx interrupt coalescing */ | ||
869 | u32 txic7; /* 0x.f2c - Ring 7 Tx interrupt coalescing */ | ||
870 | u8 res27[208]; | ||
871 | }; | ||
872 | |||
873 | /* Flags related to gianfar device features */ | ||
874 | #define FSL_GIANFAR_DEV_HAS_GIGABIT 0x00000001 | ||
875 | #define FSL_GIANFAR_DEV_HAS_COALESCE 0x00000002 | ||
876 | #define FSL_GIANFAR_DEV_HAS_RMON 0x00000004 | ||
877 | #define FSL_GIANFAR_DEV_HAS_MULTI_INTR 0x00000008 | ||
878 | #define FSL_GIANFAR_DEV_HAS_CSUM 0x00000010 | ||
879 | #define FSL_GIANFAR_DEV_HAS_VLAN 0x00000020 | ||
880 | #define FSL_GIANFAR_DEV_HAS_EXTENDED_HASH 0x00000040 | ||
881 | #define FSL_GIANFAR_DEV_HAS_PADDING 0x00000080 | ||
882 | #define FSL_GIANFAR_DEV_HAS_MAGIC_PACKET 0x00000100 | ||
883 | #define FSL_GIANFAR_DEV_HAS_BD_STASHING 0x00000200 | ||
884 | #define FSL_GIANFAR_DEV_HAS_BUF_STASHING 0x00000400 | ||
885 | #define FSL_GIANFAR_DEV_HAS_TIMER 0x00000800 | ||
886 | |||
887 | #if (MAXGROUPS == 2) | ||
888 | #define DEFAULT_MAPPING 0xAA | ||
889 | #else | ||
890 | #define DEFAULT_MAPPING 0xFF | ||
891 | #endif | ||
892 | |||
893 | #define ISRG_SHIFT_TX 0x10 | ||
894 | #define ISRG_SHIFT_RX 0x18 | ||
895 | |||
896 | /* The same driver can operate in two modes */ | ||
897 | /* SQ_SG_MODE: Single Queue Single Group Mode | ||
898 | * (Backward compatible mode) | ||
899 | * MQ_MG_MODE: Multi Queue Multi Group mode | ||
900 | */ | ||
901 | enum { | ||
902 | SQ_SG_MODE = 0, | ||
903 | MQ_MG_MODE | ||
904 | }; | ||
905 | |||
906 | /* | ||
907 | * Per TX queue stats | ||
908 | */ | ||
909 | struct tx_q_stats { | ||
910 | unsigned long tx_packets; | ||
911 | unsigned long tx_bytes; | ||
912 | }; | ||
913 | |||
914 | /** | ||
915 | * struct gfar_priv_tx_q - per tx queue structure | ||
916 | * @txlock: per queue tx spin lock | ||
917 | * @tx_skbuff:skb pointers | ||
918 | * @skb_curtx: to be used skb pointer | ||
919 | * @skb_dirtytx:the last used skb pointer | ||
920 | * @stats: bytes/packets stats | ||
921 | * @qindex: index of this queue | ||
922 | * @dev: back pointer to the dev structure | ||
923 | * @grp: back pointer to the group to which this queue belongs | ||
924 | * @tx_bd_base: First tx buffer descriptor | ||
925 | * @cur_tx: Next free ring entry | ||
926 | * @dirty_tx: First buffer in line to be transmitted | ||
927 | * @tx_ring_size: Tx ring size | ||
928 | * @num_txbdfree: number of free TxBds | ||
929 | * @txcoalescing: enable/disable tx coalescing | ||
930 | * @txic: transmit interrupt coalescing value | ||
931 | * @txcount: coalescing value if based on tx frame count | ||
932 | * @txtime: coalescing value if based on time | ||
933 | */ | ||
934 | struct gfar_priv_tx_q { | ||
935 | spinlock_t txlock __attribute__ ((aligned (SMP_CACHE_BYTES))); | ||
936 | struct sk_buff ** tx_skbuff; | ||
937 | /* Buffer descriptor pointers */ | ||
938 | dma_addr_t tx_bd_dma_base; | ||
939 | struct txbd8 *tx_bd_base; | ||
940 | struct txbd8 *cur_tx; | ||
941 | struct txbd8 *dirty_tx; | ||
942 | struct tx_q_stats stats; | ||
943 | struct net_device *dev; | ||
944 | struct gfar_priv_grp *grp; | ||
945 | u16 skb_curtx; | ||
946 | u16 skb_dirtytx; | ||
947 | u16 qindex; | ||
948 | unsigned int tx_ring_size; | ||
949 | unsigned int num_txbdfree; | ||
950 | /* Configuration info for the coalescing features */ | ||
951 | unsigned char txcoalescing; | ||
952 | unsigned long txic; | ||
953 | unsigned short txcount; | ||
954 | unsigned short txtime; | ||
955 | }; | ||
956 | |||
957 | /* | ||
958 | * Per RX queue stats | ||
959 | */ | ||
960 | struct rx_q_stats { | ||
961 | unsigned long rx_packets; | ||
962 | unsigned long rx_bytes; | ||
963 | unsigned long rx_dropped; | ||
964 | }; | ||
965 | |||
966 | /** | ||
967 | * struct gfar_priv_rx_q - per rx queue structure | ||
968 | * @rxlock: per queue rx spin lock | ||
969 | * @rx_skbuff: skb pointers | ||
970 | * @skb_currx: currently use skb pointer | ||
971 | * @rx_bd_base: First rx buffer descriptor | ||
972 | * @cur_rx: Next free rx ring entry | ||
973 | * @qindex: index of this queue | ||
974 | * @dev: back pointer to the dev structure | ||
975 | * @rx_ring_size: Rx ring size | ||
976 | * @rxcoalescing: enable/disable rx-coalescing | ||
977 | * @rxic: receive interrupt coalescing vlaue | ||
978 | */ | ||
979 | |||
980 | struct gfar_priv_rx_q { | ||
981 | spinlock_t rxlock __attribute__ ((aligned (SMP_CACHE_BYTES))); | ||
982 | struct sk_buff ** rx_skbuff; | ||
983 | dma_addr_t rx_bd_dma_base; | ||
984 | struct rxbd8 *rx_bd_base; | ||
985 | struct rxbd8 *cur_rx; | ||
986 | struct net_device *dev; | ||
987 | struct gfar_priv_grp *grp; | ||
988 | struct rx_q_stats stats; | ||
989 | u16 skb_currx; | ||
990 | u16 qindex; | ||
991 | unsigned int rx_ring_size; | ||
992 | /* RX Coalescing values */ | ||
993 | unsigned char rxcoalescing; | ||
994 | unsigned long rxic; | ||
995 | }; | ||
996 | |||
997 | /** | ||
998 | * struct gfar_priv_grp - per group structure | ||
999 | * @napi: the napi poll function | ||
1000 | * @priv: back pointer to the priv structure | ||
1001 | * @regs: the ioremapped register space for this group | ||
1002 | * @grp_id: group id for this group | ||
1003 | * @interruptTransmit: The TX interrupt number for this group | ||
1004 | * @interruptReceive: The RX interrupt number for this group | ||
1005 | * @interruptError: The ERROR interrupt number for this group | ||
1006 | * @int_name_tx: tx interrupt name for this group | ||
1007 | * @int_name_rx: rx interrupt name for this group | ||
1008 | * @int_name_er: er interrupt name for this group | ||
1009 | */ | ||
1010 | |||
1011 | struct gfar_priv_grp { | ||
1012 | spinlock_t grplock __attribute__ ((aligned (SMP_CACHE_BYTES))); | ||
1013 | struct napi_struct napi; | ||
1014 | struct gfar_private *priv; | ||
1015 | struct gfar __iomem *regs; | ||
1016 | unsigned int grp_id; | ||
1017 | unsigned long rx_bit_map; | ||
1018 | unsigned long tx_bit_map; | ||
1019 | unsigned long num_tx_queues; | ||
1020 | unsigned long num_rx_queues; | ||
1021 | unsigned int rstat; | ||
1022 | unsigned int tstat; | ||
1023 | unsigned int imask; | ||
1024 | unsigned int ievent; | ||
1025 | unsigned int interruptTransmit; | ||
1026 | unsigned int interruptReceive; | ||
1027 | unsigned int interruptError; | ||
1028 | |||
1029 | char int_name_tx[GFAR_INT_NAME_MAX]; | ||
1030 | char int_name_rx[GFAR_INT_NAME_MAX]; | ||
1031 | char int_name_er[GFAR_INT_NAME_MAX]; | ||
1032 | }; | ||
1033 | |||
1034 | enum gfar_errata { | ||
1035 | GFAR_ERRATA_74 = 0x01, | ||
1036 | GFAR_ERRATA_76 = 0x02, | ||
1037 | GFAR_ERRATA_A002 = 0x04, | ||
1038 | GFAR_ERRATA_12 = 0x08, /* a.k.a errata eTSEC49 */ | ||
1039 | }; | ||
1040 | |||
1041 | /* Struct stolen almost completely (and shamelessly) from the FCC enet source | ||
1042 | * (Ok, that's not so true anymore, but there is a family resemblance) | ||
1043 | * The GFAR buffer descriptors track the ring buffers. The rx_bd_base | ||
1044 | * and tx_bd_base always point to the currently available buffer. | ||
1045 | * The dirty_tx tracks the current buffer that is being sent by the | ||
1046 | * controller. The cur_tx and dirty_tx are equal under both completely | ||
1047 | * empty and completely full conditions. The empty/ready indicator in | ||
1048 | * the buffer descriptor determines the actual condition. | ||
1049 | */ | ||
1050 | struct gfar_private { | ||
1051 | |||
1052 | /* Indicates how many tx, rx queues are enabled */ | ||
1053 | unsigned int num_tx_queues; | ||
1054 | unsigned int num_rx_queues; | ||
1055 | unsigned int num_grps; | ||
1056 | unsigned int mode; | ||
1057 | |||
1058 | /* The total tx and rx ring size for the enabled queues */ | ||
1059 | unsigned int total_tx_ring_size; | ||
1060 | unsigned int total_rx_ring_size; | ||
1061 | |||
1062 | struct device_node *node; | ||
1063 | struct net_device *ndev; | ||
1064 | struct platform_device *ofdev; | ||
1065 | enum gfar_errata errata; | ||
1066 | |||
1067 | struct gfar_priv_grp gfargrp[MAXGROUPS]; | ||
1068 | struct gfar_priv_tx_q *tx_queue[MAX_TX_QS]; | ||
1069 | struct gfar_priv_rx_q *rx_queue[MAX_RX_QS]; | ||
1070 | |||
1071 | /* RX per device parameters */ | ||
1072 | unsigned int rx_buffer_size; | ||
1073 | unsigned int rx_stash_size; | ||
1074 | unsigned int rx_stash_index; | ||
1075 | |||
1076 | u32 cur_filer_idx; | ||
1077 | |||
1078 | struct sk_buff_head rx_recycle; | ||
1079 | |||
1080 | /* RX queue filer rule set*/ | ||
1081 | struct ethtool_rx_list rx_list; | ||
1082 | struct mutex rx_queue_access; | ||
1083 | |||
1084 | /* Hash registers and their width */ | ||
1085 | u32 __iomem *hash_regs[16]; | ||
1086 | int hash_width; | ||
1087 | |||
1088 | /* global parameters */ | ||
1089 | unsigned int fifo_threshold; | ||
1090 | unsigned int fifo_starve; | ||
1091 | unsigned int fifo_starve_off; | ||
1092 | |||
1093 | /* Bitfield update lock */ | ||
1094 | spinlock_t bflock; | ||
1095 | |||
1096 | phy_interface_t interface; | ||
1097 | struct device_node *phy_node; | ||
1098 | struct device_node *tbi_node; | ||
1099 | u32 device_flags; | ||
1100 | unsigned char | ||
1101 | extended_hash:1, | ||
1102 | bd_stash_en:1, | ||
1103 | rx_filer_enable:1, | ||
1104 | wol_en:1; /* Wake-on-LAN enabled */ | ||
1105 | unsigned short padding; | ||
1106 | |||
1107 | /* PHY stuff */ | ||
1108 | struct phy_device *phydev; | ||
1109 | struct mii_bus *mii_bus; | ||
1110 | int oldspeed; | ||
1111 | int oldduplex; | ||
1112 | int oldlink; | ||
1113 | |||
1114 | uint32_t msg_enable; | ||
1115 | |||
1116 | struct work_struct reset_task; | ||
1117 | |||
1118 | /* Network Statistics */ | ||
1119 | struct gfar_extra_stats extra_stats; | ||
1120 | |||
1121 | /* HW time stamping enabled flag */ | ||
1122 | int hwts_rx_en; | ||
1123 | int hwts_tx_en; | ||
1124 | |||
1125 | /*Filer table*/ | ||
1126 | unsigned int ftp_rqfpr[MAX_FILER_IDX + 1]; | ||
1127 | unsigned int ftp_rqfcr[MAX_FILER_IDX + 1]; | ||
1128 | }; | ||
1129 | |||
1130 | |||
1131 | static inline int gfar_has_errata(struct gfar_private *priv, | ||
1132 | enum gfar_errata err) | ||
1133 | { | ||
1134 | return priv->errata & err; | ||
1135 | } | ||
1136 | |||
1137 | static inline u32 gfar_read(volatile unsigned __iomem *addr) | ||
1138 | { | ||
1139 | u32 val; | ||
1140 | val = in_be32(addr); | ||
1141 | return val; | ||
1142 | } | ||
1143 | |||
1144 | static inline void gfar_write(volatile unsigned __iomem *addr, u32 val) | ||
1145 | { | ||
1146 | out_be32(addr, val); | ||
1147 | } | ||
1148 | |||
1149 | static inline void gfar_write_filer(struct gfar_private *priv, | ||
1150 | unsigned int far, unsigned int fcr, unsigned int fpr) | ||
1151 | { | ||
1152 | struct gfar __iomem *regs = priv->gfargrp[0].regs; | ||
1153 | |||
1154 | gfar_write(®s->rqfar, far); | ||
1155 | gfar_write(®s->rqfcr, fcr); | ||
1156 | gfar_write(®s->rqfpr, fpr); | ||
1157 | } | ||
1158 | |||
1159 | static inline void gfar_read_filer(struct gfar_private *priv, | ||
1160 | unsigned int far, unsigned int *fcr, unsigned int *fpr) | ||
1161 | { | ||
1162 | struct gfar __iomem *regs = priv->gfargrp[0].regs; | ||
1163 | |||
1164 | gfar_write(®s->rqfar, far); | ||
1165 | *fcr = gfar_read(®s->rqfcr); | ||
1166 | *fpr = gfar_read(®s->rqfpr); | ||
1167 | } | ||
1168 | |||
1169 | extern void lock_rx_qs(struct gfar_private *priv); | ||
1170 | extern void lock_tx_qs(struct gfar_private *priv); | ||
1171 | extern void unlock_rx_qs(struct gfar_private *priv); | ||
1172 | extern void unlock_tx_qs(struct gfar_private *priv); | ||
1173 | extern irqreturn_t gfar_receive(int irq, void *dev_id); | ||
1174 | extern int startup_gfar(struct net_device *dev); | ||
1175 | extern void stop_gfar(struct net_device *dev); | ||
1176 | extern void gfar_halt(struct net_device *dev); | ||
1177 | extern void gfar_phy_test(struct mii_bus *bus, struct phy_device *phydev, | ||
1178 | int enable, u32 regnum, u32 read); | ||
1179 | extern void gfar_configure_coalescing(struct gfar_private *priv, | ||
1180 | unsigned long tx_mask, unsigned long rx_mask); | ||
1181 | void gfar_init_sysfs(struct net_device *dev); | ||
1182 | int gfar_set_features(struct net_device *dev, u32 features); | ||
1183 | extern void gfar_check_rx_parser_mode(struct gfar_private *priv); | ||
1184 | extern void gfar_vlan_mode(struct net_device *dev, u32 features); | ||
1185 | |||
1186 | extern const struct ethtool_ops gfar_ethtool_ops; | ||
1187 | |||
1188 | #define MAX_FILER_CACHE_IDX (2*(MAX_FILER_IDX)) | ||
1189 | |||
1190 | #define RQFCR_PID_PRI_MASK 0xFFFFFFF8 | ||
1191 | #define RQFCR_PID_L4P_MASK 0xFFFFFF00 | ||
1192 | #define RQFCR_PID_VID_MASK 0xFFFFF000 | ||
1193 | #define RQFCR_PID_PORT_MASK 0xFFFF0000 | ||
1194 | #define RQFCR_PID_MAC_MASK 0xFF000000 | ||
1195 | |||
1196 | struct gfar_mask_entry { | ||
1197 | unsigned int mask; /* The mask value which is valid form start to end */ | ||
1198 | unsigned int start; | ||
1199 | unsigned int end; | ||
1200 | unsigned int block; /* Same block values indicate depended entries */ | ||
1201 | }; | ||
1202 | |||
1203 | /* Represents a receive filer table entry */ | ||
1204 | struct gfar_filer_entry { | ||
1205 | u32 ctrl; | ||
1206 | u32 prop; | ||
1207 | }; | ||
1208 | |||
1209 | |||
1210 | /* The 20 additional entries are a shadow for one extra element */ | ||
1211 | struct filer_table { | ||
1212 | u32 index; | ||
1213 | struct gfar_filer_entry fe[MAX_FILER_CACHE_IDX + 20]; | ||
1214 | }; | ||
1215 | |||
1216 | #endif /* __GIANFAR_H */ | ||
diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c new file mode 100644 index 000000000000..6e350692d118 --- /dev/null +++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c | |||
@@ -0,0 +1,1747 @@ | |||
1 | /* | ||
2 | * drivers/net/gianfar_ethtool.c | ||
3 | * | ||
4 | * Gianfar Ethernet Driver | ||
5 | * Ethtool support for Gianfar Enet | ||
6 | * Based on e1000 ethtool support | ||
7 | * | ||
8 | * Author: Andy Fleming | ||
9 | * Maintainer: Kumar Gala | ||
10 | * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com> | ||
11 | * | ||
12 | * Copyright 2003-2006, 2008-2009, 2011 Freescale Semiconductor, Inc. | ||
13 | * | ||
14 | * This software may be used and distributed according to | ||
15 | * the terms of the GNU Public License, Version 2, incorporated herein | ||
16 | * by reference. | ||
17 | */ | ||
18 | |||
19 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
20 | |||
21 | #include <linux/kernel.h> | ||
22 | #include <linux/string.h> | ||
23 | #include <linux/errno.h> | ||
24 | #include <linux/interrupt.h> | ||
25 | #include <linux/init.h> | ||
26 | #include <linux/delay.h> | ||
27 | #include <linux/netdevice.h> | ||
28 | #include <linux/etherdevice.h> | ||
29 | #include <linux/skbuff.h> | ||
30 | #include <linux/spinlock.h> | ||
31 | #include <linux/mm.h> | ||
32 | |||
33 | #include <asm/io.h> | ||
34 | #include <asm/irq.h> | ||
35 | #include <asm/uaccess.h> | ||
36 | #include <linux/module.h> | ||
37 | #include <linux/crc32.h> | ||
38 | #include <asm/types.h> | ||
39 | #include <linux/ethtool.h> | ||
40 | #include <linux/mii.h> | ||
41 | #include <linux/phy.h> | ||
42 | #include <linux/sort.h> | ||
43 | #include <linux/if_vlan.h> | ||
44 | |||
45 | #include "gianfar.h" | ||
46 | |||
47 | extern void gfar_start(struct net_device *dev); | ||
48 | extern int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit); | ||
49 | |||
50 | #define GFAR_MAX_COAL_USECS 0xffff | ||
51 | #define GFAR_MAX_COAL_FRAMES 0xff | ||
52 | static void gfar_fill_stats(struct net_device *dev, struct ethtool_stats *dummy, | ||
53 | u64 * buf); | ||
54 | static void gfar_gstrings(struct net_device *dev, u32 stringset, u8 * buf); | ||
55 | static int gfar_gcoalesce(struct net_device *dev, struct ethtool_coalesce *cvals); | ||
56 | static int gfar_scoalesce(struct net_device *dev, struct ethtool_coalesce *cvals); | ||
57 | static void gfar_gringparam(struct net_device *dev, struct ethtool_ringparam *rvals); | ||
58 | static int gfar_sringparam(struct net_device *dev, struct ethtool_ringparam *rvals); | ||
59 | static void gfar_gdrvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo); | ||
60 | |||
61 | static char stat_gstrings[][ETH_GSTRING_LEN] = { | ||
62 | "rx-dropped-by-kernel", | ||
63 | "rx-large-frame-errors", | ||
64 | "rx-short-frame-errors", | ||
65 | "rx-non-octet-errors", | ||
66 | "rx-crc-errors", | ||
67 | "rx-overrun-errors", | ||
68 | "rx-busy-errors", | ||
69 | "rx-babbling-errors", | ||
70 | "rx-truncated-frames", | ||
71 | "ethernet-bus-error", | ||
72 | "tx-babbling-errors", | ||
73 | "tx-underrun-errors", | ||
74 | "rx-skb-missing-errors", | ||
75 | "tx-timeout-errors", | ||
76 | "tx-rx-64-frames", | ||
77 | "tx-rx-65-127-frames", | ||
78 | "tx-rx-128-255-frames", | ||
79 | "tx-rx-256-511-frames", | ||
80 | "tx-rx-512-1023-frames", | ||
81 | "tx-rx-1024-1518-frames", | ||
82 | "tx-rx-1519-1522-good-vlan", | ||
83 | "rx-bytes", | ||
84 | "rx-packets", | ||
85 | "rx-fcs-errors", | ||
86 | "receive-multicast-packet", | ||
87 | "receive-broadcast-packet", | ||
88 | "rx-control-frame-packets", | ||
89 | "rx-pause-frame-packets", | ||
90 | "rx-unknown-op-code", | ||
91 | "rx-alignment-error", | ||
92 | "rx-frame-length-error", | ||
93 | "rx-code-error", | ||
94 | "rx-carrier-sense-error", | ||
95 | "rx-undersize-packets", | ||
96 | "rx-oversize-packets", | ||
97 | "rx-fragmented-frames", | ||
98 | "rx-jabber-frames", | ||
99 | "rx-dropped-frames", | ||
100 | "tx-byte-counter", | ||
101 | "tx-packets", | ||
102 | "tx-multicast-packets", | ||
103 | "tx-broadcast-packets", | ||
104 | "tx-pause-control-frames", | ||
105 | "tx-deferral-packets", | ||
106 | "tx-excessive-deferral-packets", | ||
107 | "tx-single-collision-packets", | ||
108 | "tx-multiple-collision-packets", | ||
109 | "tx-late-collision-packets", | ||
110 | "tx-excessive-collision-packets", | ||
111 | "tx-total-collision", | ||
112 | "reserved", | ||
113 | "tx-dropped-frames", | ||
114 | "tx-jabber-frames", | ||
115 | "tx-fcs-errors", | ||
116 | "tx-control-frames", | ||
117 | "tx-oversize-frames", | ||
118 | "tx-undersize-frames", | ||
119 | "tx-fragmented-frames", | ||
120 | }; | ||
121 | |||
122 | /* Fill in a buffer with the strings which correspond to the | ||
123 | * stats */ | ||
124 | static void gfar_gstrings(struct net_device *dev, u32 stringset, u8 * buf) | ||
125 | { | ||
126 | struct gfar_private *priv = netdev_priv(dev); | ||
127 | |||
128 | if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) | ||
129 | memcpy(buf, stat_gstrings, GFAR_STATS_LEN * ETH_GSTRING_LEN); | ||
130 | else | ||
131 | memcpy(buf, stat_gstrings, | ||
132 | GFAR_EXTRA_STATS_LEN * ETH_GSTRING_LEN); | ||
133 | } | ||
134 | |||
135 | /* Fill in an array of 64-bit statistics from various sources. | ||
136 | * This array will be appended to the end of the ethtool_stats | ||
137 | * structure, and returned to user space | ||
138 | */ | ||
139 | static void gfar_fill_stats(struct net_device *dev, struct ethtool_stats *dummy, u64 * buf) | ||
140 | { | ||
141 | int i; | ||
142 | struct gfar_private *priv = netdev_priv(dev); | ||
143 | struct gfar __iomem *regs = priv->gfargrp[0].regs; | ||
144 | u64 *extra = (u64 *) & priv->extra_stats; | ||
145 | |||
146 | if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) { | ||
147 | u32 __iomem *rmon = (u32 __iomem *) ®s->rmon; | ||
148 | struct gfar_stats *stats = (struct gfar_stats *) buf; | ||
149 | |||
150 | for (i = 0; i < GFAR_RMON_LEN; i++) | ||
151 | stats->rmon[i] = (u64) gfar_read(&rmon[i]); | ||
152 | |||
153 | for (i = 0; i < GFAR_EXTRA_STATS_LEN; i++) | ||
154 | stats->extra[i] = extra[i]; | ||
155 | } else | ||
156 | for (i = 0; i < GFAR_EXTRA_STATS_LEN; i++) | ||
157 | buf[i] = extra[i]; | ||
158 | } | ||
159 | |||
160 | static int gfar_sset_count(struct net_device *dev, int sset) | ||
161 | { | ||
162 | struct gfar_private *priv = netdev_priv(dev); | ||
163 | |||
164 | switch (sset) { | ||
165 | case ETH_SS_STATS: | ||
166 | if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) | ||
167 | return GFAR_STATS_LEN; | ||
168 | else | ||
169 | return GFAR_EXTRA_STATS_LEN; | ||
170 | default: | ||
171 | return -EOPNOTSUPP; | ||
172 | } | ||
173 | } | ||
174 | |||
175 | /* Fills in the drvinfo structure with some basic info */ | ||
176 | static void gfar_gdrvinfo(struct net_device *dev, struct | ||
177 | ethtool_drvinfo *drvinfo) | ||
178 | { | ||
179 | strncpy(drvinfo->driver, DRV_NAME, GFAR_INFOSTR_LEN); | ||
180 | strncpy(drvinfo->version, gfar_driver_version, GFAR_INFOSTR_LEN); | ||
181 | strncpy(drvinfo->fw_version, "N/A", GFAR_INFOSTR_LEN); | ||
182 | strncpy(drvinfo->bus_info, "N/A", GFAR_INFOSTR_LEN); | ||
183 | drvinfo->regdump_len = 0; | ||
184 | drvinfo->eedump_len = 0; | ||
185 | } | ||
186 | |||
187 | |||
188 | static int gfar_ssettings(struct net_device *dev, struct ethtool_cmd *cmd) | ||
189 | { | ||
190 | struct gfar_private *priv = netdev_priv(dev); | ||
191 | struct phy_device *phydev = priv->phydev; | ||
192 | |||
193 | if (NULL == phydev) | ||
194 | return -ENODEV; | ||
195 | |||
196 | return phy_ethtool_sset(phydev, cmd); | ||
197 | } | ||
198 | |||
199 | |||
200 | /* Return the current settings in the ethtool_cmd structure */ | ||
201 | static int gfar_gsettings(struct net_device *dev, struct ethtool_cmd *cmd) | ||
202 | { | ||
203 | struct gfar_private *priv = netdev_priv(dev); | ||
204 | struct phy_device *phydev = priv->phydev; | ||
205 | struct gfar_priv_rx_q *rx_queue = NULL; | ||
206 | struct gfar_priv_tx_q *tx_queue = NULL; | ||
207 | |||
208 | if (NULL == phydev) | ||
209 | return -ENODEV; | ||
210 | tx_queue = priv->tx_queue[0]; | ||
211 | rx_queue = priv->rx_queue[0]; | ||
212 | |||
213 | /* etsec-1.7 and older versions have only one txic | ||
214 | * and rxic regs although they support multiple queues */ | ||
215 | cmd->maxtxpkt = get_icft_value(tx_queue->txic); | ||
216 | cmd->maxrxpkt = get_icft_value(rx_queue->rxic); | ||
217 | |||
218 | return phy_ethtool_gset(phydev, cmd); | ||
219 | } | ||
220 | |||
221 | /* Return the length of the register structure */ | ||
222 | static int gfar_reglen(struct net_device *dev) | ||
223 | { | ||
224 | return sizeof (struct gfar); | ||
225 | } | ||
226 | |||
227 | /* Return a dump of the GFAR register space */ | ||
228 | static void gfar_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *regbuf) | ||
229 | { | ||
230 | int i; | ||
231 | struct gfar_private *priv = netdev_priv(dev); | ||
232 | u32 __iomem *theregs = (u32 __iomem *) priv->gfargrp[0].regs; | ||
233 | u32 *buf = (u32 *) regbuf; | ||
234 | |||
235 | for (i = 0; i < sizeof (struct gfar) / sizeof (u32); i++) | ||
236 | buf[i] = gfar_read(&theregs[i]); | ||
237 | } | ||
238 | |||
239 | /* Convert microseconds to ethernet clock ticks, which changes | ||
240 | * depending on what speed the controller is running at */ | ||
241 | static unsigned int gfar_usecs2ticks(struct gfar_private *priv, unsigned int usecs) | ||
242 | { | ||
243 | unsigned int count; | ||
244 | |||
245 | /* The timer is different, depending on the interface speed */ | ||
246 | switch (priv->phydev->speed) { | ||
247 | case SPEED_1000: | ||
248 | count = GFAR_GBIT_TIME; | ||
249 | break; | ||
250 | case SPEED_100: | ||
251 | count = GFAR_100_TIME; | ||
252 | break; | ||
253 | case SPEED_10: | ||
254 | default: | ||
255 | count = GFAR_10_TIME; | ||
256 | break; | ||
257 | } | ||
258 | |||
259 | /* Make sure we return a number greater than 0 | ||
260 | * if usecs > 0 */ | ||
261 | return (usecs * 1000 + count - 1) / count; | ||
262 | } | ||
263 | |||
264 | /* Convert ethernet clock ticks to microseconds */ | ||
265 | static unsigned int gfar_ticks2usecs(struct gfar_private *priv, unsigned int ticks) | ||
266 | { | ||
267 | unsigned int count; | ||
268 | |||
269 | /* The timer is different, depending on the interface speed */ | ||
270 | switch (priv->phydev->speed) { | ||
271 | case SPEED_1000: | ||
272 | count = GFAR_GBIT_TIME; | ||
273 | break; | ||
274 | case SPEED_100: | ||
275 | count = GFAR_100_TIME; | ||
276 | break; | ||
277 | case SPEED_10: | ||
278 | default: | ||
279 | count = GFAR_10_TIME; | ||
280 | break; | ||
281 | } | ||
282 | |||
283 | /* Make sure we return a number greater than 0 */ | ||
284 | /* if ticks is > 0 */ | ||
285 | return (ticks * count) / 1000; | ||
286 | } | ||
287 | |||
288 | /* Get the coalescing parameters, and put them in the cvals | ||
289 | * structure. */ | ||
290 | static int gfar_gcoalesce(struct net_device *dev, struct ethtool_coalesce *cvals) | ||
291 | { | ||
292 | struct gfar_private *priv = netdev_priv(dev); | ||
293 | struct gfar_priv_rx_q *rx_queue = NULL; | ||
294 | struct gfar_priv_tx_q *tx_queue = NULL; | ||
295 | unsigned long rxtime; | ||
296 | unsigned long rxcount; | ||
297 | unsigned long txtime; | ||
298 | unsigned long txcount; | ||
299 | |||
300 | if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_COALESCE)) | ||
301 | return -EOPNOTSUPP; | ||
302 | |||
303 | if (NULL == priv->phydev) | ||
304 | return -ENODEV; | ||
305 | |||
306 | rx_queue = priv->rx_queue[0]; | ||
307 | tx_queue = priv->tx_queue[0]; | ||
308 | |||
309 | rxtime = get_ictt_value(rx_queue->rxic); | ||
310 | rxcount = get_icft_value(rx_queue->rxic); | ||
311 | txtime = get_ictt_value(tx_queue->txic); | ||
312 | txcount = get_icft_value(tx_queue->txic); | ||
313 | cvals->rx_coalesce_usecs = gfar_ticks2usecs(priv, rxtime); | ||
314 | cvals->rx_max_coalesced_frames = rxcount; | ||
315 | |||
316 | cvals->tx_coalesce_usecs = gfar_ticks2usecs(priv, txtime); | ||
317 | cvals->tx_max_coalesced_frames = txcount; | ||
318 | |||
319 | cvals->use_adaptive_rx_coalesce = 0; | ||
320 | cvals->use_adaptive_tx_coalesce = 0; | ||
321 | |||
322 | cvals->pkt_rate_low = 0; | ||
323 | cvals->rx_coalesce_usecs_low = 0; | ||
324 | cvals->rx_max_coalesced_frames_low = 0; | ||
325 | cvals->tx_coalesce_usecs_low = 0; | ||
326 | cvals->tx_max_coalesced_frames_low = 0; | ||
327 | |||
328 | /* When the packet rate is below pkt_rate_high but above | ||
329 | * pkt_rate_low (both measured in packets per second) the | ||
330 | * normal {rx,tx}_* coalescing parameters are used. | ||
331 | */ | ||
332 | |||
333 | /* When the packet rate is (measured in packets per second) | ||
334 | * is above pkt_rate_high, the {rx,tx}_*_high parameters are | ||
335 | * used. | ||
336 | */ | ||
337 | cvals->pkt_rate_high = 0; | ||
338 | cvals->rx_coalesce_usecs_high = 0; | ||
339 | cvals->rx_max_coalesced_frames_high = 0; | ||
340 | cvals->tx_coalesce_usecs_high = 0; | ||
341 | cvals->tx_max_coalesced_frames_high = 0; | ||
342 | |||
343 | /* How often to do adaptive coalescing packet rate sampling, | ||
344 | * measured in seconds. Must not be zero. | ||
345 | */ | ||
346 | cvals->rate_sample_interval = 0; | ||
347 | |||
348 | return 0; | ||
349 | } | ||
350 | |||
351 | /* Change the coalescing values. | ||
352 | * Both cvals->*_usecs and cvals->*_frames have to be > 0 | ||
353 | * in order for coalescing to be active | ||
354 | */ | ||
355 | static int gfar_scoalesce(struct net_device *dev, struct ethtool_coalesce *cvals) | ||
356 | { | ||
357 | struct gfar_private *priv = netdev_priv(dev); | ||
358 | int i = 0; | ||
359 | |||
360 | if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_COALESCE)) | ||
361 | return -EOPNOTSUPP; | ||
362 | |||
363 | /* Set up rx coalescing */ | ||
364 | /* As of now, we will enable/disable coalescing for all | ||
365 | * queues together in case of eTSEC2, this will be modified | ||
366 | * along with the ethtool interface */ | ||
367 | if ((cvals->rx_coalesce_usecs == 0) || | ||
368 | (cvals->rx_max_coalesced_frames == 0)) { | ||
369 | for (i = 0; i < priv->num_rx_queues; i++) | ||
370 | priv->rx_queue[i]->rxcoalescing = 0; | ||
371 | } else { | ||
372 | for (i = 0; i < priv->num_rx_queues; i++) | ||
373 | priv->rx_queue[i]->rxcoalescing = 1; | ||
374 | } | ||
375 | |||
376 | if (NULL == priv->phydev) | ||
377 | return -ENODEV; | ||
378 | |||
379 | /* Check the bounds of the values */ | ||
380 | if (cvals->rx_coalesce_usecs > GFAR_MAX_COAL_USECS) { | ||
381 | pr_info("Coalescing is limited to %d microseconds\n", | ||
382 | GFAR_MAX_COAL_USECS); | ||
383 | return -EINVAL; | ||
384 | } | ||
385 | |||
386 | if (cvals->rx_max_coalesced_frames > GFAR_MAX_COAL_FRAMES) { | ||
387 | pr_info("Coalescing is limited to %d frames\n", | ||
388 | GFAR_MAX_COAL_FRAMES); | ||
389 | return -EINVAL; | ||
390 | } | ||
391 | |||
392 | for (i = 0; i < priv->num_rx_queues; i++) { | ||
393 | priv->rx_queue[i]->rxic = mk_ic_value( | ||
394 | cvals->rx_max_coalesced_frames, | ||
395 | gfar_usecs2ticks(priv, cvals->rx_coalesce_usecs)); | ||
396 | } | ||
397 | |||
398 | /* Set up tx coalescing */ | ||
399 | if ((cvals->tx_coalesce_usecs == 0) || | ||
400 | (cvals->tx_max_coalesced_frames == 0)) { | ||
401 | for (i = 0; i < priv->num_tx_queues; i++) | ||
402 | priv->tx_queue[i]->txcoalescing = 0; | ||
403 | } else { | ||
404 | for (i = 0; i < priv->num_tx_queues; i++) | ||
405 | priv->tx_queue[i]->txcoalescing = 1; | ||
406 | } | ||
407 | |||
408 | /* Check the bounds of the values */ | ||
409 | if (cvals->tx_coalesce_usecs > GFAR_MAX_COAL_USECS) { | ||
410 | pr_info("Coalescing is limited to %d microseconds\n", | ||
411 | GFAR_MAX_COAL_USECS); | ||
412 | return -EINVAL; | ||
413 | } | ||
414 | |||
415 | if (cvals->tx_max_coalesced_frames > GFAR_MAX_COAL_FRAMES) { | ||
416 | pr_info("Coalescing is limited to %d frames\n", | ||
417 | GFAR_MAX_COAL_FRAMES); | ||
418 | return -EINVAL; | ||
419 | } | ||
420 | |||
421 | for (i = 0; i < priv->num_tx_queues; i++) { | ||
422 | priv->tx_queue[i]->txic = mk_ic_value( | ||
423 | cvals->tx_max_coalesced_frames, | ||
424 | gfar_usecs2ticks(priv, cvals->tx_coalesce_usecs)); | ||
425 | } | ||
426 | |||
427 | gfar_configure_coalescing(priv, 0xFF, 0xFF); | ||
428 | |||
429 | return 0; | ||
430 | } | ||
431 | |||
432 | /* Fills in rvals with the current ring parameters. Currently, | ||
433 | * rx, rx_mini, and rx_jumbo rings are the same size, as mini and | ||
434 | * jumbo are ignored by the driver */ | ||
435 | static void gfar_gringparam(struct net_device *dev, struct ethtool_ringparam *rvals) | ||
436 | { | ||
437 | struct gfar_private *priv = netdev_priv(dev); | ||
438 | struct gfar_priv_tx_q *tx_queue = NULL; | ||
439 | struct gfar_priv_rx_q *rx_queue = NULL; | ||
440 | |||
441 | tx_queue = priv->tx_queue[0]; | ||
442 | rx_queue = priv->rx_queue[0]; | ||
443 | |||
444 | rvals->rx_max_pending = GFAR_RX_MAX_RING_SIZE; | ||
445 | rvals->rx_mini_max_pending = GFAR_RX_MAX_RING_SIZE; | ||
446 | rvals->rx_jumbo_max_pending = GFAR_RX_MAX_RING_SIZE; | ||
447 | rvals->tx_max_pending = GFAR_TX_MAX_RING_SIZE; | ||
448 | |||
449 | /* Values changeable by the user. The valid values are | ||
450 | * in the range 1 to the "*_max_pending" counterpart above. | ||
451 | */ | ||
452 | rvals->rx_pending = rx_queue->rx_ring_size; | ||
453 | rvals->rx_mini_pending = rx_queue->rx_ring_size; | ||
454 | rvals->rx_jumbo_pending = rx_queue->rx_ring_size; | ||
455 | rvals->tx_pending = tx_queue->tx_ring_size; | ||
456 | } | ||
457 | |||
458 | /* Change the current ring parameters, stopping the controller if | ||
459 | * necessary so that we don't mess things up while we're in | ||
460 | * motion. We wait for the ring to be clean before reallocating | ||
461 | * the rings. */ | ||
462 | static int gfar_sringparam(struct net_device *dev, struct ethtool_ringparam *rvals) | ||
463 | { | ||
464 | struct gfar_private *priv = netdev_priv(dev); | ||
465 | int err = 0, i = 0; | ||
466 | |||
467 | if (rvals->rx_pending > GFAR_RX_MAX_RING_SIZE) | ||
468 | return -EINVAL; | ||
469 | |||
470 | if (!is_power_of_2(rvals->rx_pending)) { | ||
471 | netdev_err(dev, "Ring sizes must be a power of 2\n"); | ||
472 | return -EINVAL; | ||
473 | } | ||
474 | |||
475 | if (rvals->tx_pending > GFAR_TX_MAX_RING_SIZE) | ||
476 | return -EINVAL; | ||
477 | |||
478 | if (!is_power_of_2(rvals->tx_pending)) { | ||
479 | netdev_err(dev, "Ring sizes must be a power of 2\n"); | ||
480 | return -EINVAL; | ||
481 | } | ||
482 | |||
483 | |||
484 | if (dev->flags & IFF_UP) { | ||
485 | unsigned long flags; | ||
486 | |||
487 | /* Halt TX and RX, and process the frames which | ||
488 | * have already been received */ | ||
489 | local_irq_save(flags); | ||
490 | lock_tx_qs(priv); | ||
491 | lock_rx_qs(priv); | ||
492 | |||
493 | gfar_halt(dev); | ||
494 | |||
495 | unlock_rx_qs(priv); | ||
496 | unlock_tx_qs(priv); | ||
497 | local_irq_restore(flags); | ||
498 | |||
499 | for (i = 0; i < priv->num_rx_queues; i++) | ||
500 | gfar_clean_rx_ring(priv->rx_queue[i], | ||
501 | priv->rx_queue[i]->rx_ring_size); | ||
502 | |||
503 | /* Now we take down the rings to rebuild them */ | ||
504 | stop_gfar(dev); | ||
505 | } | ||
506 | |||
507 | /* Change the size */ | ||
508 | for (i = 0; i < priv->num_rx_queues; i++) { | ||
509 | priv->rx_queue[i]->rx_ring_size = rvals->rx_pending; | ||
510 | priv->tx_queue[i]->tx_ring_size = rvals->tx_pending; | ||
511 | priv->tx_queue[i]->num_txbdfree = priv->tx_queue[i]->tx_ring_size; | ||
512 | } | ||
513 | |||
514 | /* Rebuild the rings with the new size */ | ||
515 | if (dev->flags & IFF_UP) { | ||
516 | err = startup_gfar(dev); | ||
517 | netif_tx_wake_all_queues(dev); | ||
518 | } | ||
519 | return err; | ||
520 | } | ||
521 | |||
522 | int gfar_set_features(struct net_device *dev, u32 features) | ||
523 | { | ||
524 | struct gfar_private *priv = netdev_priv(dev); | ||
525 | unsigned long flags; | ||
526 | int err = 0, i = 0; | ||
527 | u32 changed = dev->features ^ features; | ||
528 | |||
529 | if (changed & (NETIF_F_HW_VLAN_TX|NETIF_F_HW_VLAN_RX)) | ||
530 | gfar_vlan_mode(dev, features); | ||
531 | |||
532 | if (!(changed & NETIF_F_RXCSUM)) | ||
533 | return 0; | ||
534 | |||
535 | if (dev->flags & IFF_UP) { | ||
536 | /* Halt TX and RX, and process the frames which | ||
537 | * have already been received */ | ||
538 | local_irq_save(flags); | ||
539 | lock_tx_qs(priv); | ||
540 | lock_rx_qs(priv); | ||
541 | |||
542 | gfar_halt(dev); | ||
543 | |||
544 | unlock_tx_qs(priv); | ||
545 | unlock_rx_qs(priv); | ||
546 | local_irq_restore(flags); | ||
547 | |||
548 | for (i = 0; i < priv->num_rx_queues; i++) | ||
549 | gfar_clean_rx_ring(priv->rx_queue[i], | ||
550 | priv->rx_queue[i]->rx_ring_size); | ||
551 | |||
552 | /* Now we take down the rings to rebuild them */ | ||
553 | stop_gfar(dev); | ||
554 | |||
555 | dev->features = features; | ||
556 | |||
557 | err = startup_gfar(dev); | ||
558 | netif_tx_wake_all_queues(dev); | ||
559 | } | ||
560 | return err; | ||
561 | } | ||
562 | |||
563 | static uint32_t gfar_get_msglevel(struct net_device *dev) | ||
564 | { | ||
565 | struct gfar_private *priv = netdev_priv(dev); | ||
566 | return priv->msg_enable; | ||
567 | } | ||
568 | |||
569 | static void gfar_set_msglevel(struct net_device *dev, uint32_t data) | ||
570 | { | ||
571 | struct gfar_private *priv = netdev_priv(dev); | ||
572 | priv->msg_enable = data; | ||
573 | } | ||
574 | |||
575 | #ifdef CONFIG_PM | ||
576 | static void gfar_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) | ||
577 | { | ||
578 | struct gfar_private *priv = netdev_priv(dev); | ||
579 | |||
580 | if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) { | ||
581 | wol->supported = WAKE_MAGIC; | ||
582 | wol->wolopts = priv->wol_en ? WAKE_MAGIC : 0; | ||
583 | } else { | ||
584 | wol->supported = wol->wolopts = 0; | ||
585 | } | ||
586 | } | ||
587 | |||
588 | static int gfar_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) | ||
589 | { | ||
590 | struct gfar_private *priv = netdev_priv(dev); | ||
591 | unsigned long flags; | ||
592 | |||
593 | if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) && | ||
594 | wol->wolopts != 0) | ||
595 | return -EINVAL; | ||
596 | |||
597 | if (wol->wolopts & ~WAKE_MAGIC) | ||
598 | return -EINVAL; | ||
599 | |||
600 | device_set_wakeup_enable(&dev->dev, wol->wolopts & WAKE_MAGIC); | ||
601 | |||
602 | spin_lock_irqsave(&priv->bflock, flags); | ||
603 | priv->wol_en = !!device_may_wakeup(&dev->dev); | ||
604 | spin_unlock_irqrestore(&priv->bflock, flags); | ||
605 | |||
606 | return 0; | ||
607 | } | ||
608 | #endif | ||
609 | |||
610 | static void ethflow_to_filer_rules (struct gfar_private *priv, u64 ethflow) | ||
611 | { | ||
612 | u32 fcr = 0x0, fpr = FPR_FILER_MASK; | ||
613 | |||
614 | if (ethflow & RXH_L2DA) { | ||
615 | fcr = RQFCR_PID_DAH |RQFCR_CMP_NOMATCH | | ||
616 | RQFCR_HASH | RQFCR_AND | RQFCR_HASHTBL_0; | ||
617 | priv->ftp_rqfpr[priv->cur_filer_idx] = fpr; | ||
618 | priv->ftp_rqfcr[priv->cur_filer_idx] = fcr; | ||
619 | gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr); | ||
620 | priv->cur_filer_idx = priv->cur_filer_idx - 1; | ||
621 | |||
622 | fcr = RQFCR_PID_DAL | RQFCR_AND | RQFCR_CMP_NOMATCH | | ||
623 | RQFCR_HASH | RQFCR_AND | RQFCR_HASHTBL_0; | ||
624 | priv->ftp_rqfpr[priv->cur_filer_idx] = fpr; | ||
625 | priv->ftp_rqfcr[priv->cur_filer_idx] = fcr; | ||
626 | gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr); | ||
627 | priv->cur_filer_idx = priv->cur_filer_idx - 1; | ||
628 | } | ||
629 | |||
630 | if (ethflow & RXH_VLAN) { | ||
631 | fcr = RQFCR_PID_VID | RQFCR_CMP_NOMATCH | RQFCR_HASH | | ||
632 | RQFCR_AND | RQFCR_HASHTBL_0; | ||
633 | gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr); | ||
634 | priv->ftp_rqfpr[priv->cur_filer_idx] = fpr; | ||
635 | priv->ftp_rqfcr[priv->cur_filer_idx] = fcr; | ||
636 | priv->cur_filer_idx = priv->cur_filer_idx - 1; | ||
637 | } | ||
638 | |||
639 | if (ethflow & RXH_IP_SRC) { | ||
640 | fcr = RQFCR_PID_SIA | RQFCR_CMP_NOMATCH | RQFCR_HASH | | ||
641 | RQFCR_AND | RQFCR_HASHTBL_0; | ||
642 | priv->ftp_rqfpr[priv->cur_filer_idx] = fpr; | ||
643 | priv->ftp_rqfcr[priv->cur_filer_idx] = fcr; | ||
644 | gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr); | ||
645 | priv->cur_filer_idx = priv->cur_filer_idx - 1; | ||
646 | } | ||
647 | |||
648 | if (ethflow & (RXH_IP_DST)) { | ||
649 | fcr = RQFCR_PID_DIA | RQFCR_CMP_NOMATCH | RQFCR_HASH | | ||
650 | RQFCR_AND | RQFCR_HASHTBL_0; | ||
651 | priv->ftp_rqfpr[priv->cur_filer_idx] = fpr; | ||
652 | priv->ftp_rqfcr[priv->cur_filer_idx] = fcr; | ||
653 | gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr); | ||
654 | priv->cur_filer_idx = priv->cur_filer_idx - 1; | ||
655 | } | ||
656 | |||
657 | if (ethflow & RXH_L3_PROTO) { | ||
658 | fcr = RQFCR_PID_L4P | RQFCR_CMP_NOMATCH | RQFCR_HASH | | ||
659 | RQFCR_AND | RQFCR_HASHTBL_0; | ||
660 | priv->ftp_rqfpr[priv->cur_filer_idx] = fpr; | ||
661 | priv->ftp_rqfcr[priv->cur_filer_idx] = fcr; | ||
662 | gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr); | ||
663 | priv->cur_filer_idx = priv->cur_filer_idx - 1; | ||
664 | } | ||
665 | |||
666 | if (ethflow & RXH_L4_B_0_1) { | ||
667 | fcr = RQFCR_PID_SPT | RQFCR_CMP_NOMATCH | RQFCR_HASH | | ||
668 | RQFCR_AND | RQFCR_HASHTBL_0; | ||
669 | priv->ftp_rqfpr[priv->cur_filer_idx] = fpr; | ||
670 | priv->ftp_rqfcr[priv->cur_filer_idx] = fcr; | ||
671 | gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr); | ||
672 | priv->cur_filer_idx = priv->cur_filer_idx - 1; | ||
673 | } | ||
674 | |||
675 | if (ethflow & RXH_L4_B_2_3) { | ||
676 | fcr = RQFCR_PID_DPT | RQFCR_CMP_NOMATCH | RQFCR_HASH | | ||
677 | RQFCR_AND | RQFCR_HASHTBL_0; | ||
678 | priv->ftp_rqfpr[priv->cur_filer_idx] = fpr; | ||
679 | priv->ftp_rqfcr[priv->cur_filer_idx] = fcr; | ||
680 | gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr); | ||
681 | priv->cur_filer_idx = priv->cur_filer_idx - 1; | ||
682 | } | ||
683 | } | ||
684 | |||
685 | static int gfar_ethflow_to_filer_table(struct gfar_private *priv, u64 ethflow, u64 class) | ||
686 | { | ||
687 | unsigned int last_rule_idx = priv->cur_filer_idx; | ||
688 | unsigned int cmp_rqfpr; | ||
689 | unsigned int local_rqfpr[MAX_FILER_IDX + 1]; | ||
690 | unsigned int local_rqfcr[MAX_FILER_IDX + 1]; | ||
691 | int i = 0x0, k = 0x0; | ||
692 | int j = MAX_FILER_IDX, l = 0x0; | ||
693 | |||
694 | switch (class) { | ||
695 | case TCP_V4_FLOW: | ||
696 | cmp_rqfpr = RQFPR_IPV4 |RQFPR_TCP; | ||
697 | break; | ||
698 | case UDP_V4_FLOW: | ||
699 | cmp_rqfpr = RQFPR_IPV4 |RQFPR_UDP; | ||
700 | break; | ||
701 | case TCP_V6_FLOW: | ||
702 | cmp_rqfpr = RQFPR_IPV6 |RQFPR_TCP; | ||
703 | break; | ||
704 | case UDP_V6_FLOW: | ||
705 | cmp_rqfpr = RQFPR_IPV6 |RQFPR_UDP; | ||
706 | break; | ||
707 | default: | ||
708 | pr_err("Right now this class is not supported\n"); | ||
709 | return 0; | ||
710 | } | ||
711 | |||
712 | for (i = 0; i < MAX_FILER_IDX + 1; i++) { | ||
713 | local_rqfpr[j] = priv->ftp_rqfpr[i]; | ||
714 | local_rqfcr[j] = priv->ftp_rqfcr[i]; | ||
715 | j--; | ||
716 | if ((priv->ftp_rqfcr[i] == (RQFCR_PID_PARSE | | ||
717 | RQFCR_CLE |RQFCR_AND)) && | ||
718 | (priv->ftp_rqfpr[i] == cmp_rqfpr)) | ||
719 | break; | ||
720 | } | ||
721 | |||
722 | if (i == MAX_FILER_IDX + 1) { | ||
723 | pr_err("No parse rule found, can't create hash rules\n"); | ||
724 | return 0; | ||
725 | } | ||
726 | |||
727 | /* If a match was found, then it begins the starting of a cluster rule | ||
728 | * if it was already programmed, we need to overwrite these rules | ||
729 | */ | ||
730 | for (l = i+1; l < MAX_FILER_IDX; l++) { | ||
731 | if ((priv->ftp_rqfcr[l] & RQFCR_CLE) && | ||
732 | !(priv->ftp_rqfcr[l] & RQFCR_AND)) { | ||
733 | priv->ftp_rqfcr[l] = RQFCR_CLE | RQFCR_CMP_EXACT | | ||
734 | RQFCR_HASHTBL_0 | RQFCR_PID_MASK; | ||
735 | priv->ftp_rqfpr[l] = FPR_FILER_MASK; | ||
736 | gfar_write_filer(priv, l, priv->ftp_rqfcr[l], | ||
737 | priv->ftp_rqfpr[l]); | ||
738 | break; | ||
739 | } | ||
740 | |||
741 | if (!(priv->ftp_rqfcr[l] & RQFCR_CLE) && | ||
742 | (priv->ftp_rqfcr[l] & RQFCR_AND)) | ||
743 | continue; | ||
744 | else { | ||
745 | local_rqfpr[j] = priv->ftp_rqfpr[l]; | ||
746 | local_rqfcr[j] = priv->ftp_rqfcr[l]; | ||
747 | j--; | ||
748 | } | ||
749 | } | ||
750 | |||
751 | priv->cur_filer_idx = l - 1; | ||
752 | last_rule_idx = l; | ||
753 | |||
754 | /* hash rules */ | ||
755 | ethflow_to_filer_rules(priv, ethflow); | ||
756 | |||
757 | /* Write back the popped out rules again */ | ||
758 | for (k = j+1; k < MAX_FILER_IDX; k++) { | ||
759 | priv->ftp_rqfpr[priv->cur_filer_idx] = local_rqfpr[k]; | ||
760 | priv->ftp_rqfcr[priv->cur_filer_idx] = local_rqfcr[k]; | ||
761 | gfar_write_filer(priv, priv->cur_filer_idx, | ||
762 | local_rqfcr[k], local_rqfpr[k]); | ||
763 | if (!priv->cur_filer_idx) | ||
764 | break; | ||
765 | priv->cur_filer_idx = priv->cur_filer_idx - 1; | ||
766 | } | ||
767 | |||
768 | return 1; | ||
769 | } | ||
770 | |||
771 | static int gfar_set_hash_opts(struct gfar_private *priv, struct ethtool_rxnfc *cmd) | ||
772 | { | ||
773 | /* write the filer rules here */ | ||
774 | if (!gfar_ethflow_to_filer_table(priv, cmd->data, cmd->flow_type)) | ||
775 | return -EINVAL; | ||
776 | |||
777 | return 0; | ||
778 | } | ||
779 | |||
780 | static int gfar_check_filer_hardware(struct gfar_private *priv) | ||
781 | { | ||
782 | struct gfar __iomem *regs = NULL; | ||
783 | u32 i; | ||
784 | |||
785 | regs = priv->gfargrp[0].regs; | ||
786 | |||
787 | /* Check if we are in FIFO mode */ | ||
788 | i = gfar_read(®s->ecntrl); | ||
789 | i &= ECNTRL_FIFM; | ||
790 | if (i == ECNTRL_FIFM) { | ||
791 | netdev_notice(priv->ndev, "Interface in FIFO mode\n"); | ||
792 | i = gfar_read(®s->rctrl); | ||
793 | i &= RCTRL_PRSDEP_MASK | RCTRL_PRSFM; | ||
794 | if (i == (RCTRL_PRSDEP_MASK | RCTRL_PRSFM)) { | ||
795 | netdev_info(priv->ndev, | ||
796 | "Receive Queue Filtering enabled\n"); | ||
797 | } else { | ||
798 | netdev_warn(priv->ndev, | ||
799 | "Receive Queue Filtering disabled\n"); | ||
800 | return -EOPNOTSUPP; | ||
801 | } | ||
802 | } | ||
803 | /* Or in standard mode */ | ||
804 | else { | ||
805 | i = gfar_read(®s->rctrl); | ||
806 | i &= RCTRL_PRSDEP_MASK; | ||
807 | if (i == RCTRL_PRSDEP_MASK) { | ||
808 | netdev_info(priv->ndev, | ||
809 | "Receive Queue Filtering enabled\n"); | ||
810 | } else { | ||
811 | netdev_warn(priv->ndev, | ||
812 | "Receive Queue Filtering disabled\n"); | ||
813 | return -EOPNOTSUPP; | ||
814 | } | ||
815 | } | ||
816 | |||
817 | /* Sets the properties for arbitrary filer rule | ||
818 | * to the first 4 Layer 4 Bytes */ | ||
819 | regs->rbifx = 0xC0C1C2C3; | ||
820 | return 0; | ||
821 | } | ||
822 | |||
823 | static int gfar_comp_asc(const void *a, const void *b) | ||
824 | { | ||
825 | return memcmp(a, b, 4); | ||
826 | } | ||
827 | |||
828 | static int gfar_comp_desc(const void *a, const void *b) | ||
829 | { | ||
830 | return -memcmp(a, b, 4); | ||
831 | } | ||
832 | |||
833 | static void gfar_swap(void *a, void *b, int size) | ||
834 | { | ||
835 | u32 *_a = a; | ||
836 | u32 *_b = b; | ||
837 | |||
838 | swap(_a[0], _b[0]); | ||
839 | swap(_a[1], _b[1]); | ||
840 | swap(_a[2], _b[2]); | ||
841 | swap(_a[3], _b[3]); | ||
842 | } | ||
843 | |||
844 | /* Write a mask to filer cache */ | ||
845 | static void gfar_set_mask(u32 mask, struct filer_table *tab) | ||
846 | { | ||
847 | tab->fe[tab->index].ctrl = RQFCR_AND | RQFCR_PID_MASK | RQFCR_CMP_EXACT; | ||
848 | tab->fe[tab->index].prop = mask; | ||
849 | tab->index++; | ||
850 | } | ||
851 | |||
852 | /* Sets parse bits (e.g. IP or TCP) */ | ||
853 | static void gfar_set_parse_bits(u32 value, u32 mask, struct filer_table *tab) | ||
854 | { | ||
855 | gfar_set_mask(mask, tab); | ||
856 | tab->fe[tab->index].ctrl = RQFCR_CMP_EXACT | RQFCR_PID_PARSE | ||
857 | | RQFCR_AND; | ||
858 | tab->fe[tab->index].prop = value; | ||
859 | tab->index++; | ||
860 | } | ||
861 | |||
862 | static void gfar_set_general_attribute(u32 value, u32 mask, u32 flag, | ||
863 | struct filer_table *tab) | ||
864 | { | ||
865 | gfar_set_mask(mask, tab); | ||
866 | tab->fe[tab->index].ctrl = RQFCR_CMP_EXACT | RQFCR_AND | flag; | ||
867 | tab->fe[tab->index].prop = value; | ||
868 | tab->index++; | ||
869 | } | ||
870 | |||
871 | /* | ||
872 | * For setting a tuple of value and mask of type flag | ||
873 | * Example: | ||
874 | * IP-Src = 10.0.0.0/255.0.0.0 | ||
875 | * value: 0x0A000000 mask: FF000000 flag: RQFPR_IPV4 | ||
876 | * | ||
877 | * Ethtool gives us a value=0 and mask=~0 for don't care a tuple | ||
878 | * For a don't care mask it gives us a 0 | ||
879 | * | ||
880 | * The check if don't care and the mask adjustment if mask=0 is done for VLAN | ||
881 | * and MAC stuff on an upper level (due to missing information on this level). | ||
882 | * For these guys we can discard them if they are value=0 and mask=0. | ||
883 | * | ||
884 | * Further the all masks are one-padded for better hardware efficiency. | ||
885 | */ | ||
886 | static void gfar_set_attribute(u32 value, u32 mask, u32 flag, | ||
887 | struct filer_table *tab) | ||
888 | { | ||
889 | switch (flag) { | ||
890 | /* 3bit */ | ||
891 | case RQFCR_PID_PRI: | ||
892 | if (!(value | mask)) | ||
893 | return; | ||
894 | mask |= RQFCR_PID_PRI_MASK; | ||
895 | break; | ||
896 | /* 8bit */ | ||
897 | case RQFCR_PID_L4P: | ||
898 | case RQFCR_PID_TOS: | ||
899 | if (!~(mask | RQFCR_PID_L4P_MASK)) | ||
900 | return; | ||
901 | if (!mask) | ||
902 | mask = ~0; | ||
903 | else | ||
904 | mask |= RQFCR_PID_L4P_MASK; | ||
905 | break; | ||
906 | /* 12bit */ | ||
907 | case RQFCR_PID_VID: | ||
908 | if (!(value | mask)) | ||
909 | return; | ||
910 | mask |= RQFCR_PID_VID_MASK; | ||
911 | break; | ||
912 | /* 16bit */ | ||
913 | case RQFCR_PID_DPT: | ||
914 | case RQFCR_PID_SPT: | ||
915 | case RQFCR_PID_ETY: | ||
916 | if (!~(mask | RQFCR_PID_PORT_MASK)) | ||
917 | return; | ||
918 | if (!mask) | ||
919 | mask = ~0; | ||
920 | else | ||
921 | mask |= RQFCR_PID_PORT_MASK; | ||
922 | break; | ||
923 | /* 24bit */ | ||
924 | case RQFCR_PID_DAH: | ||
925 | case RQFCR_PID_DAL: | ||
926 | case RQFCR_PID_SAH: | ||
927 | case RQFCR_PID_SAL: | ||
928 | if (!(value | mask)) | ||
929 | return; | ||
930 | mask |= RQFCR_PID_MAC_MASK; | ||
931 | break; | ||
932 | /* for all real 32bit masks */ | ||
933 | default: | ||
934 | if (!~mask) | ||
935 | return; | ||
936 | if (!mask) | ||
937 | mask = ~0; | ||
938 | break; | ||
939 | } | ||
940 | gfar_set_general_attribute(value, mask, flag, tab); | ||
941 | } | ||
942 | |||
943 | /* Translates value and mask for UDP, TCP or SCTP */ | ||
944 | static void gfar_set_basic_ip(struct ethtool_tcpip4_spec *value, | ||
945 | struct ethtool_tcpip4_spec *mask, struct filer_table *tab) | ||
946 | { | ||
947 | gfar_set_attribute(value->ip4src, mask->ip4src, RQFCR_PID_SIA, tab); | ||
948 | gfar_set_attribute(value->ip4dst, mask->ip4dst, RQFCR_PID_DIA, tab); | ||
949 | gfar_set_attribute(value->pdst, mask->pdst, RQFCR_PID_DPT, tab); | ||
950 | gfar_set_attribute(value->psrc, mask->psrc, RQFCR_PID_SPT, tab); | ||
951 | gfar_set_attribute(value->tos, mask->tos, RQFCR_PID_TOS, tab); | ||
952 | } | ||
953 | |||
954 | /* Translates value and mask for RAW-IP4 */ | ||
955 | static void gfar_set_user_ip(struct ethtool_usrip4_spec *value, | ||
956 | struct ethtool_usrip4_spec *mask, struct filer_table *tab) | ||
957 | { | ||
958 | gfar_set_attribute(value->ip4src, mask->ip4src, RQFCR_PID_SIA, tab); | ||
959 | gfar_set_attribute(value->ip4dst, mask->ip4dst, RQFCR_PID_DIA, tab); | ||
960 | gfar_set_attribute(value->tos, mask->tos, RQFCR_PID_TOS, tab); | ||
961 | gfar_set_attribute(value->proto, mask->proto, RQFCR_PID_L4P, tab); | ||
962 | gfar_set_attribute(value->l4_4_bytes, mask->l4_4_bytes, RQFCR_PID_ARB, | ||
963 | tab); | ||
964 | |||
965 | } | ||
966 | |||
967 | /* Translates value and mask for ETHER spec */ | ||
968 | static void gfar_set_ether(struct ethhdr *value, struct ethhdr *mask, | ||
969 | struct filer_table *tab) | ||
970 | { | ||
971 | u32 upper_temp_mask = 0; | ||
972 | u32 lower_temp_mask = 0; | ||
973 | /* Source address */ | ||
974 | if (!is_broadcast_ether_addr(mask->h_source)) { | ||
975 | |||
976 | if (is_zero_ether_addr(mask->h_source)) { | ||
977 | upper_temp_mask = 0xFFFFFFFF; | ||
978 | lower_temp_mask = 0xFFFFFFFF; | ||
979 | } else { | ||
980 | upper_temp_mask = mask->h_source[0] << 16 | ||
981 | | mask->h_source[1] << 8 | ||
982 | | mask->h_source[2]; | ||
983 | lower_temp_mask = mask->h_source[3] << 16 | ||
984 | | mask->h_source[4] << 8 | ||
985 | | mask->h_source[5]; | ||
986 | } | ||
987 | /* Upper 24bit */ | ||
988 | gfar_set_attribute( | ||
989 | value->h_source[0] << 16 | value->h_source[1] | ||
990 | << 8 | value->h_source[2], | ||
991 | upper_temp_mask, RQFCR_PID_SAH, tab); | ||
992 | /* And the same for the lower part */ | ||
993 | gfar_set_attribute( | ||
994 | value->h_source[3] << 16 | value->h_source[4] | ||
995 | << 8 | value->h_source[5], | ||
996 | lower_temp_mask, RQFCR_PID_SAL, tab); | ||
997 | } | ||
998 | /* Destination address */ | ||
999 | if (!is_broadcast_ether_addr(mask->h_dest)) { | ||
1000 | |||
1001 | /* Special for destination is limited broadcast */ | ||
1002 | if ((is_broadcast_ether_addr(value->h_dest) | ||
1003 | && is_zero_ether_addr(mask->h_dest))) { | ||
1004 | gfar_set_parse_bits(RQFPR_EBC, RQFPR_EBC, tab); | ||
1005 | } else { | ||
1006 | |||
1007 | if (is_zero_ether_addr(mask->h_dest)) { | ||
1008 | upper_temp_mask = 0xFFFFFFFF; | ||
1009 | lower_temp_mask = 0xFFFFFFFF; | ||
1010 | } else { | ||
1011 | upper_temp_mask = mask->h_dest[0] << 16 | ||
1012 | | mask->h_dest[1] << 8 | ||
1013 | | mask->h_dest[2]; | ||
1014 | lower_temp_mask = mask->h_dest[3] << 16 | ||
1015 | | mask->h_dest[4] << 8 | ||
1016 | | mask->h_dest[5]; | ||
1017 | } | ||
1018 | |||
1019 | /* Upper 24bit */ | ||
1020 | gfar_set_attribute( | ||
1021 | value->h_dest[0] << 16 | ||
1022 | | value->h_dest[1] << 8 | ||
1023 | | value->h_dest[2], | ||
1024 | upper_temp_mask, RQFCR_PID_DAH, tab); | ||
1025 | /* And the same for the lower part */ | ||
1026 | gfar_set_attribute( | ||
1027 | value->h_dest[3] << 16 | ||
1028 | | value->h_dest[4] << 8 | ||
1029 | | value->h_dest[5], | ||
1030 | lower_temp_mask, RQFCR_PID_DAL, tab); | ||
1031 | } | ||
1032 | } | ||
1033 | |||
1034 | gfar_set_attribute(value->h_proto, mask->h_proto, RQFCR_PID_ETY, tab); | ||
1035 | |||
1036 | } | ||
1037 | |||
1038 | /* Convert a rule to binary filter format of gianfar */ | ||
1039 | static int gfar_convert_to_filer(struct ethtool_rx_flow_spec *rule, | ||
1040 | struct filer_table *tab) | ||
1041 | { | ||
1042 | u32 vlan = 0, vlan_mask = 0; | ||
1043 | u32 id = 0, id_mask = 0; | ||
1044 | u32 cfi = 0, cfi_mask = 0; | ||
1045 | u32 prio = 0, prio_mask = 0; | ||
1046 | |||
1047 | u32 old_index = tab->index; | ||
1048 | |||
1049 | /* Check if vlan is wanted */ | ||
1050 | if ((rule->flow_type & FLOW_EXT) && (rule->m_ext.vlan_tci != 0xFFFF)) { | ||
1051 | if (!rule->m_ext.vlan_tci) | ||
1052 | rule->m_ext.vlan_tci = 0xFFFF; | ||
1053 | |||
1054 | vlan = RQFPR_VLN; | ||
1055 | vlan_mask = RQFPR_VLN; | ||
1056 | |||
1057 | /* Separate the fields */ | ||
1058 | id = rule->h_ext.vlan_tci & VLAN_VID_MASK; | ||
1059 | id_mask = rule->m_ext.vlan_tci & VLAN_VID_MASK; | ||
1060 | cfi = rule->h_ext.vlan_tci & VLAN_CFI_MASK; | ||
1061 | cfi_mask = rule->m_ext.vlan_tci & VLAN_CFI_MASK; | ||
1062 | prio = (rule->h_ext.vlan_tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT; | ||
1063 | prio_mask = (rule->m_ext.vlan_tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT; | ||
1064 | |||
1065 | if (cfi == VLAN_TAG_PRESENT && cfi_mask == VLAN_TAG_PRESENT) { | ||
1066 | vlan |= RQFPR_CFI; | ||
1067 | vlan_mask |= RQFPR_CFI; | ||
1068 | } else if (cfi != VLAN_TAG_PRESENT && cfi_mask == VLAN_TAG_PRESENT) { | ||
1069 | vlan_mask |= RQFPR_CFI; | ||
1070 | } | ||
1071 | } | ||
1072 | |||
1073 | switch (rule->flow_type & ~FLOW_EXT) { | ||
1074 | case TCP_V4_FLOW: | ||
1075 | gfar_set_parse_bits(RQFPR_IPV4 | RQFPR_TCP | vlan, | ||
1076 | RQFPR_IPV4 | RQFPR_TCP | vlan_mask, tab); | ||
1077 | gfar_set_basic_ip(&rule->h_u.tcp_ip4_spec, | ||
1078 | &rule->m_u.tcp_ip4_spec, tab); | ||
1079 | break; | ||
1080 | case UDP_V4_FLOW: | ||
1081 | gfar_set_parse_bits(RQFPR_IPV4 | RQFPR_UDP | vlan, | ||
1082 | RQFPR_IPV4 | RQFPR_UDP | vlan_mask, tab); | ||
1083 | gfar_set_basic_ip(&rule->h_u.udp_ip4_spec, | ||
1084 | &rule->m_u.udp_ip4_spec, tab); | ||
1085 | break; | ||
1086 | case SCTP_V4_FLOW: | ||
1087 | gfar_set_parse_bits(RQFPR_IPV4 | vlan, RQFPR_IPV4 | vlan_mask, | ||
1088 | tab); | ||
1089 | gfar_set_attribute(132, 0, RQFCR_PID_L4P, tab); | ||
1090 | gfar_set_basic_ip((struct ethtool_tcpip4_spec *) &rule->h_u, | ||
1091 | (struct ethtool_tcpip4_spec *) &rule->m_u, tab); | ||
1092 | break; | ||
1093 | case IP_USER_FLOW: | ||
1094 | gfar_set_parse_bits(RQFPR_IPV4 | vlan, RQFPR_IPV4 | vlan_mask, | ||
1095 | tab); | ||
1096 | gfar_set_user_ip((struct ethtool_usrip4_spec *) &rule->h_u, | ||
1097 | (struct ethtool_usrip4_spec *) &rule->m_u, tab); | ||
1098 | break; | ||
1099 | case ETHER_FLOW: | ||
1100 | if (vlan) | ||
1101 | gfar_set_parse_bits(vlan, vlan_mask, tab); | ||
1102 | gfar_set_ether((struct ethhdr *) &rule->h_u, | ||
1103 | (struct ethhdr *) &rule->m_u, tab); | ||
1104 | break; | ||
1105 | default: | ||
1106 | return -1; | ||
1107 | } | ||
1108 | |||
1109 | /* Set the vlan attributes in the end */ | ||
1110 | if (vlan) { | ||
1111 | gfar_set_attribute(id, id_mask, RQFCR_PID_VID, tab); | ||
1112 | gfar_set_attribute(prio, prio_mask, RQFCR_PID_PRI, tab); | ||
1113 | } | ||
1114 | |||
1115 | /* If there has been nothing written till now, it must be a default */ | ||
1116 | if (tab->index == old_index) { | ||
1117 | gfar_set_mask(0xFFFFFFFF, tab); | ||
1118 | tab->fe[tab->index].ctrl = 0x20; | ||
1119 | tab->fe[tab->index].prop = 0x0; | ||
1120 | tab->index++; | ||
1121 | } | ||
1122 | |||
1123 | /* Remove last AND */ | ||
1124 | tab->fe[tab->index - 1].ctrl &= (~RQFCR_AND); | ||
1125 | |||
1126 | /* Specify which queue to use or to drop */ | ||
1127 | if (rule->ring_cookie == RX_CLS_FLOW_DISC) | ||
1128 | tab->fe[tab->index - 1].ctrl |= RQFCR_RJE; | ||
1129 | else | ||
1130 | tab->fe[tab->index - 1].ctrl |= (rule->ring_cookie << 10); | ||
1131 | |||
1132 | /* Only big enough entries can be clustered */ | ||
1133 | if (tab->index > (old_index + 2)) { | ||
1134 | tab->fe[old_index + 1].ctrl |= RQFCR_CLE; | ||
1135 | tab->fe[tab->index - 1].ctrl |= RQFCR_CLE; | ||
1136 | } | ||
1137 | |||
1138 | /* In rare cases the cache can be full while there is free space in hw */ | ||
1139 | if (tab->index > MAX_FILER_CACHE_IDX - 1) | ||
1140 | return -EBUSY; | ||
1141 | |||
1142 | return 0; | ||
1143 | } | ||
1144 | |||
1145 | /* Copy size filer entries */ | ||
1146 | static void gfar_copy_filer_entries(struct gfar_filer_entry dst[0], | ||
1147 | struct gfar_filer_entry src[0], s32 size) | ||
1148 | { | ||
1149 | while (size > 0) { | ||
1150 | size--; | ||
1151 | dst[size].ctrl = src[size].ctrl; | ||
1152 | dst[size].prop = src[size].prop; | ||
1153 | } | ||
1154 | } | ||
1155 | |||
1156 | /* Delete the contents of the filer-table between start and end | ||
1157 | * and collapse them */ | ||
1158 | static int gfar_trim_filer_entries(u32 begin, u32 end, struct filer_table *tab) | ||
1159 | { | ||
1160 | int length; | ||
1161 | if (end > MAX_FILER_CACHE_IDX || end < begin) | ||
1162 | return -EINVAL; | ||
1163 | |||
1164 | end++; | ||
1165 | length = end - begin; | ||
1166 | |||
1167 | /* Copy */ | ||
1168 | while (end < tab->index) { | ||
1169 | tab->fe[begin].ctrl = tab->fe[end].ctrl; | ||
1170 | tab->fe[begin++].prop = tab->fe[end++].prop; | ||
1171 | |||
1172 | } | ||
1173 | /* Fill up with don't cares */ | ||
1174 | while (begin < tab->index) { | ||
1175 | tab->fe[begin].ctrl = 0x60; | ||
1176 | tab->fe[begin].prop = 0xFFFFFFFF; | ||
1177 | begin++; | ||
1178 | } | ||
1179 | |||
1180 | tab->index -= length; | ||
1181 | return 0; | ||
1182 | } | ||
1183 | |||
1184 | /* Make space on the wanted location */ | ||
1185 | static int gfar_expand_filer_entries(u32 begin, u32 length, | ||
1186 | struct filer_table *tab) | ||
1187 | { | ||
1188 | if (length == 0 || length + tab->index > MAX_FILER_CACHE_IDX || begin | ||
1189 | > MAX_FILER_CACHE_IDX) | ||
1190 | return -EINVAL; | ||
1191 | |||
1192 | gfar_copy_filer_entries(&(tab->fe[begin + length]), &(tab->fe[begin]), | ||
1193 | tab->index - length + 1); | ||
1194 | |||
1195 | tab->index += length; | ||
1196 | return 0; | ||
1197 | } | ||
1198 | |||
1199 | static int gfar_get_next_cluster_start(int start, struct filer_table *tab) | ||
1200 | { | ||
1201 | for (; (start < tab->index) && (start < MAX_FILER_CACHE_IDX - 1); start++) { | ||
1202 | if ((tab->fe[start].ctrl & (RQFCR_AND | RQFCR_CLE)) | ||
1203 | == (RQFCR_AND | RQFCR_CLE)) | ||
1204 | return start; | ||
1205 | } | ||
1206 | return -1; | ||
1207 | } | ||
1208 | |||
1209 | static int gfar_get_next_cluster_end(int start, struct filer_table *tab) | ||
1210 | { | ||
1211 | for (; (start < tab->index) && (start < MAX_FILER_CACHE_IDX - 1); start++) { | ||
1212 | if ((tab->fe[start].ctrl & (RQFCR_AND | RQFCR_CLE)) | ||
1213 | == (RQFCR_CLE)) | ||
1214 | return start; | ||
1215 | } | ||
1216 | return -1; | ||
1217 | } | ||
1218 | |||
1219 | /* | ||
1220 | * Uses hardwares clustering option to reduce | ||
1221 | * the number of filer table entries | ||
1222 | */ | ||
1223 | static void gfar_cluster_filer(struct filer_table *tab) | ||
1224 | { | ||
1225 | s32 i = -1, j, iend, jend; | ||
1226 | |||
1227 | while ((i = gfar_get_next_cluster_start(++i, tab)) != -1) { | ||
1228 | j = i; | ||
1229 | while ((j = gfar_get_next_cluster_start(++j, tab)) != -1) { | ||
1230 | /* | ||
1231 | * The cluster entries self and the previous one | ||
1232 | * (a mask) must be identical! | ||
1233 | */ | ||
1234 | if (tab->fe[i].ctrl != tab->fe[j].ctrl) | ||
1235 | break; | ||
1236 | if (tab->fe[i].prop != tab->fe[j].prop) | ||
1237 | break; | ||
1238 | if (tab->fe[i - 1].ctrl != tab->fe[j - 1].ctrl) | ||
1239 | break; | ||
1240 | if (tab->fe[i - 1].prop != tab->fe[j - 1].prop) | ||
1241 | break; | ||
1242 | iend = gfar_get_next_cluster_end(i, tab); | ||
1243 | jend = gfar_get_next_cluster_end(j, tab); | ||
1244 | if (jend == -1 || iend == -1) | ||
1245 | break; | ||
1246 | /* | ||
1247 | * First we make some free space, where our cluster | ||
1248 | * element should be. Then we copy it there and finally | ||
1249 | * delete in from its old location. | ||
1250 | */ | ||
1251 | |||
1252 | if (gfar_expand_filer_entries(iend, (jend - j), tab) | ||
1253 | == -EINVAL) | ||
1254 | break; | ||
1255 | |||
1256 | gfar_copy_filer_entries(&(tab->fe[iend + 1]), | ||
1257 | &(tab->fe[jend + 1]), jend - j); | ||
1258 | |||
1259 | if (gfar_trim_filer_entries(jend - 1, | ||
1260 | jend + (jend - j), tab) == -EINVAL) | ||
1261 | return; | ||
1262 | |||
1263 | /* Mask out cluster bit */ | ||
1264 | tab->fe[iend].ctrl &= ~(RQFCR_CLE); | ||
1265 | } | ||
1266 | } | ||
1267 | } | ||
1268 | |||
1269 | /* Swaps the masked bits of a1<>a2 and b1<>b2 */ | ||
1270 | static void gfar_swap_bits(struct gfar_filer_entry *a1, | ||
1271 | struct gfar_filer_entry *a2, struct gfar_filer_entry *b1, | ||
1272 | struct gfar_filer_entry *b2, u32 mask) | ||
1273 | { | ||
1274 | u32 temp[4]; | ||
1275 | temp[0] = a1->ctrl & mask; | ||
1276 | temp[1] = a2->ctrl & mask; | ||
1277 | temp[2] = b1->ctrl & mask; | ||
1278 | temp[3] = b2->ctrl & mask; | ||
1279 | |||
1280 | a1->ctrl &= ~mask; | ||
1281 | a2->ctrl &= ~mask; | ||
1282 | b1->ctrl &= ~mask; | ||
1283 | b2->ctrl &= ~mask; | ||
1284 | |||
1285 | a1->ctrl |= temp[1]; | ||
1286 | a2->ctrl |= temp[0]; | ||
1287 | b1->ctrl |= temp[3]; | ||
1288 | b2->ctrl |= temp[2]; | ||
1289 | } | ||
1290 | |||
1291 | /* | ||
1292 | * Generate a list consisting of masks values with their start and | ||
1293 | * end of validity and block as indicator for parts belonging | ||
1294 | * together (glued by ANDs) in mask_table | ||
1295 | */ | ||
1296 | static u32 gfar_generate_mask_table(struct gfar_mask_entry *mask_table, | ||
1297 | struct filer_table *tab) | ||
1298 | { | ||
1299 | u32 i, and_index = 0, block_index = 1; | ||
1300 | |||
1301 | for (i = 0; i < tab->index; i++) { | ||
1302 | |||
1303 | /* LSByte of control = 0 sets a mask */ | ||
1304 | if (!(tab->fe[i].ctrl & 0xF)) { | ||
1305 | mask_table[and_index].mask = tab->fe[i].prop; | ||
1306 | mask_table[and_index].start = i; | ||
1307 | mask_table[and_index].block = block_index; | ||
1308 | if (and_index >= 1) | ||
1309 | mask_table[and_index - 1].end = i - 1; | ||
1310 | and_index++; | ||
1311 | } | ||
1312 | /* cluster starts and ends will be separated because they should | ||
1313 | * hold their position */ | ||
1314 | if (tab->fe[i].ctrl & RQFCR_CLE) | ||
1315 | block_index++; | ||
1316 | /* A not set AND indicates the end of a depended block */ | ||
1317 | if (!(tab->fe[i].ctrl & RQFCR_AND)) | ||
1318 | block_index++; | ||
1319 | |||
1320 | } | ||
1321 | |||
1322 | mask_table[and_index - 1].end = i - 1; | ||
1323 | |||
1324 | return and_index; | ||
1325 | } | ||
1326 | |||
1327 | /* | ||
1328 | * Sorts the entries of mask_table by the values of the masks. | ||
1329 | * Important: The 0xFF80 flags of the first and last entry of a | ||
1330 | * block must hold their position (which queue, CLusterEnable, ReJEct, | ||
1331 | * AND) | ||
1332 | */ | ||
1333 | static void gfar_sort_mask_table(struct gfar_mask_entry *mask_table, | ||
1334 | struct filer_table *temp_table, u32 and_index) | ||
1335 | { | ||
1336 | /* Pointer to compare function (_asc or _desc) */ | ||
1337 | int (*gfar_comp)(const void *, const void *); | ||
1338 | |||
1339 | u32 i, size = 0, start = 0, prev = 1; | ||
1340 | u32 old_first, old_last, new_first, new_last; | ||
1341 | |||
1342 | gfar_comp = &gfar_comp_desc; | ||
1343 | |||
1344 | for (i = 0; i < and_index; i++) { | ||
1345 | |||
1346 | if (prev != mask_table[i].block) { | ||
1347 | old_first = mask_table[start].start + 1; | ||
1348 | old_last = mask_table[i - 1].end; | ||
1349 | sort(mask_table + start, size, | ||
1350 | sizeof(struct gfar_mask_entry), | ||
1351 | gfar_comp, &gfar_swap); | ||
1352 | |||
1353 | /* Toggle order for every block. This makes the | ||
1354 | * thing more efficient! */ | ||
1355 | if (gfar_comp == gfar_comp_desc) | ||
1356 | gfar_comp = &gfar_comp_asc; | ||
1357 | else | ||
1358 | gfar_comp = &gfar_comp_desc; | ||
1359 | |||
1360 | new_first = mask_table[start].start + 1; | ||
1361 | new_last = mask_table[i - 1].end; | ||
1362 | |||
1363 | gfar_swap_bits(&temp_table->fe[new_first], | ||
1364 | &temp_table->fe[old_first], | ||
1365 | &temp_table->fe[new_last], | ||
1366 | &temp_table->fe[old_last], | ||
1367 | RQFCR_QUEUE | RQFCR_CLE | | ||
1368 | RQFCR_RJE | RQFCR_AND | ||
1369 | ); | ||
1370 | |||
1371 | start = i; | ||
1372 | size = 0; | ||
1373 | } | ||
1374 | size++; | ||
1375 | prev = mask_table[i].block; | ||
1376 | } | ||
1377 | |||
1378 | } | ||
1379 | |||
1380 | /* | ||
1381 | * Reduces the number of masks needed in the filer table to save entries | ||
1382 | * This is done by sorting the masks of a depended block. A depended block is | ||
1383 | * identified by gluing ANDs or CLE. The sorting order toggles after every | ||
1384 | * block. Of course entries in scope of a mask must change their location with | ||
1385 | * it. | ||
1386 | */ | ||
1387 | static int gfar_optimize_filer_masks(struct filer_table *tab) | ||
1388 | { | ||
1389 | struct filer_table *temp_table; | ||
1390 | struct gfar_mask_entry *mask_table; | ||
1391 | |||
1392 | u32 and_index = 0, previous_mask = 0, i = 0, j = 0, size = 0; | ||
1393 | s32 ret = 0; | ||
1394 | |||
1395 | /* We need a copy of the filer table because | ||
1396 | * we want to change its order */ | ||
1397 | temp_table = kmalloc(sizeof(*temp_table), GFP_KERNEL); | ||
1398 | if (temp_table == NULL) | ||
1399 | return -ENOMEM; | ||
1400 | memcpy(temp_table, tab, sizeof(*temp_table)); | ||
1401 | |||
1402 | mask_table = kcalloc(MAX_FILER_CACHE_IDX / 2 + 1, | ||
1403 | sizeof(struct gfar_mask_entry), GFP_KERNEL); | ||
1404 | |||
1405 | if (mask_table == NULL) { | ||
1406 | ret = -ENOMEM; | ||
1407 | goto end; | ||
1408 | } | ||
1409 | |||
1410 | and_index = gfar_generate_mask_table(mask_table, tab); | ||
1411 | |||
1412 | gfar_sort_mask_table(mask_table, temp_table, and_index); | ||
1413 | |||
1414 | /* Now we can copy the data from our duplicated filer table to | ||
1415 | * the real one in the order the mask table says */ | ||
1416 | for (i = 0; i < and_index; i++) { | ||
1417 | size = mask_table[i].end - mask_table[i].start + 1; | ||
1418 | gfar_copy_filer_entries(&(tab->fe[j]), | ||
1419 | &(temp_table->fe[mask_table[i].start]), size); | ||
1420 | j += size; | ||
1421 | } | ||
1422 | |||
1423 | /* And finally we just have to check for duplicated masks and drop the | ||
1424 | * second ones */ | ||
1425 | for (i = 0; i < tab->index && i < MAX_FILER_CACHE_IDX; i++) { | ||
1426 | if (tab->fe[i].ctrl == 0x80) { | ||
1427 | previous_mask = i++; | ||
1428 | break; | ||
1429 | } | ||
1430 | } | ||
1431 | for (; i < tab->index && i < MAX_FILER_CACHE_IDX; i++) { | ||
1432 | if (tab->fe[i].ctrl == 0x80) { | ||
1433 | if (tab->fe[i].prop == tab->fe[previous_mask].prop) { | ||
1434 | /* Two identical ones found! | ||
1435 | * So drop the second one! */ | ||
1436 | gfar_trim_filer_entries(i, i, tab); | ||
1437 | } else | ||
1438 | /* Not identical! */ | ||
1439 | previous_mask = i; | ||
1440 | } | ||
1441 | } | ||
1442 | |||
1443 | kfree(mask_table); | ||
1444 | end: kfree(temp_table); | ||
1445 | return ret; | ||
1446 | } | ||
1447 | |||
1448 | /* Write the bit-pattern from software's buffer to hardware registers */ | ||
1449 | static int gfar_write_filer_table(struct gfar_private *priv, | ||
1450 | struct filer_table *tab) | ||
1451 | { | ||
1452 | u32 i = 0; | ||
1453 | if (tab->index > MAX_FILER_IDX - 1) | ||
1454 | return -EBUSY; | ||
1455 | |||
1456 | /* Avoid inconsistent filer table to be processed */ | ||
1457 | lock_rx_qs(priv); | ||
1458 | |||
1459 | /* Fill regular entries */ | ||
1460 | for (; i < MAX_FILER_IDX - 1 && (tab->fe[i].ctrl | tab->fe[i].ctrl); i++) | ||
1461 | gfar_write_filer(priv, i, tab->fe[i].ctrl, tab->fe[i].prop); | ||
1462 | /* Fill the rest with fall-troughs */ | ||
1463 | for (; i < MAX_FILER_IDX - 1; i++) | ||
1464 | gfar_write_filer(priv, i, 0x60, 0xFFFFFFFF); | ||
1465 | /* Last entry must be default accept | ||
1466 | * because that's what people expect */ | ||
1467 | gfar_write_filer(priv, i, 0x20, 0x0); | ||
1468 | |||
1469 | unlock_rx_qs(priv); | ||
1470 | |||
1471 | return 0; | ||
1472 | } | ||
1473 | |||
1474 | static int gfar_check_capability(struct ethtool_rx_flow_spec *flow, | ||
1475 | struct gfar_private *priv) | ||
1476 | { | ||
1477 | |||
1478 | if (flow->flow_type & FLOW_EXT) { | ||
1479 | if (~flow->m_ext.data[0] || ~flow->m_ext.data[1]) | ||
1480 | netdev_warn(priv->ndev, | ||
1481 | "User-specific data not supported!\n"); | ||
1482 | if (~flow->m_ext.vlan_etype) | ||
1483 | netdev_warn(priv->ndev, | ||
1484 | "VLAN-etype not supported!\n"); | ||
1485 | } | ||
1486 | if (flow->flow_type == IP_USER_FLOW) | ||
1487 | if (flow->h_u.usr_ip4_spec.ip_ver != ETH_RX_NFC_IP4) | ||
1488 | netdev_warn(priv->ndev, | ||
1489 | "IP-Version differing from IPv4 not supported!\n"); | ||
1490 | |||
1491 | return 0; | ||
1492 | } | ||
1493 | |||
1494 | static int gfar_process_filer_changes(struct gfar_private *priv) | ||
1495 | { | ||
1496 | struct ethtool_flow_spec_container *j; | ||
1497 | struct filer_table *tab; | ||
1498 | s32 i = 0; | ||
1499 | s32 ret = 0; | ||
1500 | |||
1501 | /* So index is set to zero, too! */ | ||
1502 | tab = kzalloc(sizeof(*tab), GFP_KERNEL); | ||
1503 | if (tab == NULL) | ||
1504 | return -ENOMEM; | ||
1505 | |||
1506 | /* Now convert the existing filer data from flow_spec into | ||
1507 | * filer tables binary format */ | ||
1508 | list_for_each_entry(j, &priv->rx_list.list, list) { | ||
1509 | ret = gfar_convert_to_filer(&j->fs, tab); | ||
1510 | if (ret == -EBUSY) { | ||
1511 | netdev_err(priv->ndev, "Rule not added: No free space!\n"); | ||
1512 | goto end; | ||
1513 | } | ||
1514 | if (ret == -1) { | ||
1515 | netdev_err(priv->ndev, "Rule not added: Unsupported Flow-type!\n"); | ||
1516 | goto end; | ||
1517 | } | ||
1518 | } | ||
1519 | |||
1520 | i = tab->index; | ||
1521 | |||
1522 | /* Optimizations to save entries */ | ||
1523 | gfar_cluster_filer(tab); | ||
1524 | gfar_optimize_filer_masks(tab); | ||
1525 | |||
1526 | pr_debug("\n\tSummary:\n" | ||
1527 | "\tData on hardware: %d\n" | ||
1528 | "\tCompression rate: %d%%\n", | ||
1529 | tab->index, 100 - (100 * tab->index) / i); | ||
1530 | |||
1531 | /* Write everything to hardware */ | ||
1532 | ret = gfar_write_filer_table(priv, tab); | ||
1533 | if (ret == -EBUSY) { | ||
1534 | netdev_err(priv->ndev, "Rule not added: No free space!\n"); | ||
1535 | goto end; | ||
1536 | } | ||
1537 | |||
1538 | end: kfree(tab); | ||
1539 | return ret; | ||
1540 | } | ||
1541 | |||
1542 | static void gfar_invert_masks(struct ethtool_rx_flow_spec *flow) | ||
1543 | { | ||
1544 | u32 i = 0; | ||
1545 | |||
1546 | for (i = 0; i < sizeof(flow->m_u); i++) | ||
1547 | flow->m_u.hdata[i] ^= 0xFF; | ||
1548 | |||
1549 | flow->m_ext.vlan_etype ^= 0xFFFF; | ||
1550 | flow->m_ext.vlan_tci ^= 0xFFFF; | ||
1551 | flow->m_ext.data[0] ^= ~0; | ||
1552 | flow->m_ext.data[1] ^= ~0; | ||
1553 | } | ||
1554 | |||
1555 | static int gfar_add_cls(struct gfar_private *priv, | ||
1556 | struct ethtool_rx_flow_spec *flow) | ||
1557 | { | ||
1558 | struct ethtool_flow_spec_container *temp, *comp; | ||
1559 | int ret = 0; | ||
1560 | |||
1561 | temp = kmalloc(sizeof(*temp), GFP_KERNEL); | ||
1562 | if (temp == NULL) | ||
1563 | return -ENOMEM; | ||
1564 | memcpy(&temp->fs, flow, sizeof(temp->fs)); | ||
1565 | |||
1566 | gfar_invert_masks(&temp->fs); | ||
1567 | ret = gfar_check_capability(&temp->fs, priv); | ||
1568 | if (ret) | ||
1569 | goto clean_mem; | ||
1570 | /* Link in the new element at the right @location */ | ||
1571 | if (list_empty(&priv->rx_list.list)) { | ||
1572 | ret = gfar_check_filer_hardware(priv); | ||
1573 | if (ret != 0) | ||
1574 | goto clean_mem; | ||
1575 | list_add(&temp->list, &priv->rx_list.list); | ||
1576 | goto process; | ||
1577 | } else { | ||
1578 | |||
1579 | list_for_each_entry(comp, &priv->rx_list.list, list) { | ||
1580 | if (comp->fs.location > flow->location) { | ||
1581 | list_add_tail(&temp->list, &comp->list); | ||
1582 | goto process; | ||
1583 | } | ||
1584 | if (comp->fs.location == flow->location) { | ||
1585 | netdev_err(priv->ndev, | ||
1586 | "Rule not added: ID %d not free!\n", | ||
1587 | flow->location); | ||
1588 | ret = -EBUSY; | ||
1589 | goto clean_mem; | ||
1590 | } | ||
1591 | } | ||
1592 | list_add_tail(&temp->list, &priv->rx_list.list); | ||
1593 | } | ||
1594 | |||
1595 | process: | ||
1596 | ret = gfar_process_filer_changes(priv); | ||
1597 | if (ret) | ||
1598 | goto clean_list; | ||
1599 | priv->rx_list.count++; | ||
1600 | return ret; | ||
1601 | |||
1602 | clean_list: | ||
1603 | list_del(&temp->list); | ||
1604 | clean_mem: | ||
1605 | kfree(temp); | ||
1606 | return ret; | ||
1607 | } | ||
1608 | |||
1609 | static int gfar_del_cls(struct gfar_private *priv, u32 loc) | ||
1610 | { | ||
1611 | struct ethtool_flow_spec_container *comp; | ||
1612 | u32 ret = -EINVAL; | ||
1613 | |||
1614 | if (list_empty(&priv->rx_list.list)) | ||
1615 | return ret; | ||
1616 | |||
1617 | list_for_each_entry(comp, &priv->rx_list.list, list) { | ||
1618 | if (comp->fs.location == loc) { | ||
1619 | list_del(&comp->list); | ||
1620 | kfree(comp); | ||
1621 | priv->rx_list.count--; | ||
1622 | gfar_process_filer_changes(priv); | ||
1623 | ret = 0; | ||
1624 | break; | ||
1625 | } | ||
1626 | } | ||
1627 | |||
1628 | return ret; | ||
1629 | |||
1630 | } | ||
1631 | |||
1632 | static int gfar_get_cls(struct gfar_private *priv, struct ethtool_rxnfc *cmd) | ||
1633 | { | ||
1634 | struct ethtool_flow_spec_container *comp; | ||
1635 | u32 ret = -EINVAL; | ||
1636 | |||
1637 | list_for_each_entry(comp, &priv->rx_list.list, list) { | ||
1638 | if (comp->fs.location == cmd->fs.location) { | ||
1639 | memcpy(&cmd->fs, &comp->fs, sizeof(cmd->fs)); | ||
1640 | gfar_invert_masks(&cmd->fs); | ||
1641 | ret = 0; | ||
1642 | break; | ||
1643 | } | ||
1644 | } | ||
1645 | |||
1646 | return ret; | ||
1647 | } | ||
1648 | |||
1649 | static int gfar_get_cls_all(struct gfar_private *priv, | ||
1650 | struct ethtool_rxnfc *cmd, u32 *rule_locs) | ||
1651 | { | ||
1652 | struct ethtool_flow_spec_container *comp; | ||
1653 | u32 i = 0; | ||
1654 | |||
1655 | list_for_each_entry(comp, &priv->rx_list.list, list) { | ||
1656 | if (i <= cmd->rule_cnt) { | ||
1657 | rule_locs[i] = comp->fs.location; | ||
1658 | i++; | ||
1659 | } | ||
1660 | } | ||
1661 | |||
1662 | cmd->data = MAX_FILER_IDX; | ||
1663 | |||
1664 | return 0; | ||
1665 | } | ||
1666 | |||
1667 | static int gfar_set_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd) | ||
1668 | { | ||
1669 | struct gfar_private *priv = netdev_priv(dev); | ||
1670 | int ret = 0; | ||
1671 | |||
1672 | mutex_lock(&priv->rx_queue_access); | ||
1673 | |||
1674 | switch (cmd->cmd) { | ||
1675 | case ETHTOOL_SRXFH: | ||
1676 | ret = gfar_set_hash_opts(priv, cmd); | ||
1677 | break; | ||
1678 | case ETHTOOL_SRXCLSRLINS: | ||
1679 | if (cmd->fs.ring_cookie != RX_CLS_FLOW_DISC && | ||
1680 | cmd->fs.ring_cookie >= priv->num_rx_queues) { | ||
1681 | ret = -EINVAL; | ||
1682 | break; | ||
1683 | } | ||
1684 | ret = gfar_add_cls(priv, &cmd->fs); | ||
1685 | break; | ||
1686 | case ETHTOOL_SRXCLSRLDEL: | ||
1687 | ret = gfar_del_cls(priv, cmd->fs.location); | ||
1688 | break; | ||
1689 | default: | ||
1690 | ret = -EINVAL; | ||
1691 | } | ||
1692 | |||
1693 | mutex_unlock(&priv->rx_queue_access); | ||
1694 | |||
1695 | return ret; | ||
1696 | } | ||
1697 | |||
1698 | static int gfar_get_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd, | ||
1699 | void *rule_locs) | ||
1700 | { | ||
1701 | struct gfar_private *priv = netdev_priv(dev); | ||
1702 | int ret = 0; | ||
1703 | |||
1704 | switch (cmd->cmd) { | ||
1705 | case ETHTOOL_GRXRINGS: | ||
1706 | cmd->data = priv->num_rx_queues; | ||
1707 | break; | ||
1708 | case ETHTOOL_GRXCLSRLCNT: | ||
1709 | cmd->rule_cnt = priv->rx_list.count; | ||
1710 | break; | ||
1711 | case ETHTOOL_GRXCLSRULE: | ||
1712 | ret = gfar_get_cls(priv, cmd); | ||
1713 | break; | ||
1714 | case ETHTOOL_GRXCLSRLALL: | ||
1715 | ret = gfar_get_cls_all(priv, cmd, (u32 *) rule_locs); | ||
1716 | break; | ||
1717 | default: | ||
1718 | ret = -EINVAL; | ||
1719 | break; | ||
1720 | } | ||
1721 | |||
1722 | return ret; | ||
1723 | } | ||
1724 | |||
1725 | const struct ethtool_ops gfar_ethtool_ops = { | ||
1726 | .get_settings = gfar_gsettings, | ||
1727 | .set_settings = gfar_ssettings, | ||
1728 | .get_drvinfo = gfar_gdrvinfo, | ||
1729 | .get_regs_len = gfar_reglen, | ||
1730 | .get_regs = gfar_get_regs, | ||
1731 | .get_link = ethtool_op_get_link, | ||
1732 | .get_coalesce = gfar_gcoalesce, | ||
1733 | .set_coalesce = gfar_scoalesce, | ||
1734 | .get_ringparam = gfar_gringparam, | ||
1735 | .set_ringparam = gfar_sringparam, | ||
1736 | .get_strings = gfar_gstrings, | ||
1737 | .get_sset_count = gfar_sset_count, | ||
1738 | .get_ethtool_stats = gfar_fill_stats, | ||
1739 | .get_msglevel = gfar_get_msglevel, | ||
1740 | .set_msglevel = gfar_set_msglevel, | ||
1741 | #ifdef CONFIG_PM | ||
1742 | .get_wol = gfar_get_wol, | ||
1743 | .set_wol = gfar_set_wol, | ||
1744 | #endif | ||
1745 | .set_rxnfc = gfar_set_nfc, | ||
1746 | .get_rxnfc = gfar_get_nfc, | ||
1747 | }; | ||
diff --git a/drivers/net/ethernet/freescale/gianfar_ptp.c b/drivers/net/ethernet/freescale/gianfar_ptp.c new file mode 100644 index 000000000000..f67b8aebc89c --- /dev/null +++ b/drivers/net/ethernet/freescale/gianfar_ptp.c | |||
@@ -0,0 +1,583 @@ | |||
1 | /* | ||
2 | * PTP 1588 clock using the eTSEC | ||
3 | * | ||
4 | * Copyright (C) 2010 OMICRON electronics GmbH | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License as published by | ||
8 | * the Free Software Foundation; either version 2 of the License, or | ||
9 | * (at your option) any later version. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License | ||
17 | * along with this program; if not, write to the Free Software | ||
18 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | ||
19 | */ | ||
20 | #include <linux/device.h> | ||
21 | #include <linux/hrtimer.h> | ||
22 | #include <linux/init.h> | ||
23 | #include <linux/interrupt.h> | ||
24 | #include <linux/kernel.h> | ||
25 | #include <linux/module.h> | ||
26 | #include <linux/of.h> | ||
27 | #include <linux/of_platform.h> | ||
28 | #include <linux/timex.h> | ||
29 | #include <linux/io.h> | ||
30 | |||
31 | #include <linux/ptp_clock_kernel.h> | ||
32 | |||
33 | #include "gianfar.h" | ||
34 | |||
35 | /* | ||
36 | * gianfar ptp registers | ||
37 | * Generated by regen.tcl on Thu May 13 01:38:57 PM CEST 2010 | ||
38 | */ | ||
39 | struct gianfar_ptp_registers { | ||
40 | u32 tmr_ctrl; /* Timer control register */ | ||
41 | u32 tmr_tevent; /* Timestamp event register */ | ||
42 | u32 tmr_temask; /* Timer event mask register */ | ||
43 | u32 tmr_pevent; /* Timestamp event register */ | ||
44 | u32 tmr_pemask; /* Timer event mask register */ | ||
45 | u32 tmr_stat; /* Timestamp status register */ | ||
46 | u32 tmr_cnt_h; /* Timer counter high register */ | ||
47 | u32 tmr_cnt_l; /* Timer counter low register */ | ||
48 | u32 tmr_add; /* Timer drift compensation addend register */ | ||
49 | u32 tmr_acc; /* Timer accumulator register */ | ||
50 | u32 tmr_prsc; /* Timer prescale */ | ||
51 | u8 res1[4]; | ||
52 | u32 tmroff_h; /* Timer offset high */ | ||
53 | u32 tmroff_l; /* Timer offset low */ | ||
54 | u8 res2[8]; | ||
55 | u32 tmr_alarm1_h; /* Timer alarm 1 high register */ | ||
56 | u32 tmr_alarm1_l; /* Timer alarm 1 high register */ | ||
57 | u32 tmr_alarm2_h; /* Timer alarm 2 high register */ | ||
58 | u32 tmr_alarm2_l; /* Timer alarm 2 high register */ | ||
59 | u8 res3[48]; | ||
60 | u32 tmr_fiper1; /* Timer fixed period interval */ | ||
61 | u32 tmr_fiper2; /* Timer fixed period interval */ | ||
62 | u32 tmr_fiper3; /* Timer fixed period interval */ | ||
63 | u8 res4[20]; | ||
64 | u32 tmr_etts1_h; /* Timestamp of general purpose external trigger */ | ||
65 | u32 tmr_etts1_l; /* Timestamp of general purpose external trigger */ | ||
66 | u32 tmr_etts2_h; /* Timestamp of general purpose external trigger */ | ||
67 | u32 tmr_etts2_l; /* Timestamp of general purpose external trigger */ | ||
68 | }; | ||
69 | |||
70 | /* Bit definitions for the TMR_CTRL register */ | ||
71 | #define ALM1P (1<<31) /* Alarm1 output polarity */ | ||
72 | #define ALM2P (1<<30) /* Alarm2 output polarity */ | ||
73 | #define FS (1<<28) /* FIPER start indication */ | ||
74 | #define PP1L (1<<27) /* Fiper1 pulse loopback mode enabled. */ | ||
75 | #define PP2L (1<<26) /* Fiper2 pulse loopback mode enabled. */ | ||
76 | #define TCLK_PERIOD_SHIFT (16) /* 1588 timer reference clock period. */ | ||
77 | #define TCLK_PERIOD_MASK (0x3ff) | ||
78 | #define RTPE (1<<15) /* Record Tx Timestamp to PAL Enable. */ | ||
79 | #define FRD (1<<14) /* FIPER Realignment Disable */ | ||
80 | #define ESFDP (1<<11) /* External Tx/Rx SFD Polarity. */ | ||
81 | #define ESFDE (1<<10) /* External Tx/Rx SFD Enable. */ | ||
82 | #define ETEP2 (1<<9) /* External trigger 2 edge polarity */ | ||
83 | #define ETEP1 (1<<8) /* External trigger 1 edge polarity */ | ||
84 | #define COPH (1<<7) /* Generated clock output phase. */ | ||
85 | #define CIPH (1<<6) /* External oscillator input clock phase */ | ||
86 | #define TMSR (1<<5) /* Timer soft reset. */ | ||
87 | #define BYP (1<<3) /* Bypass drift compensated clock */ | ||
88 | #define TE (1<<2) /* 1588 timer enable. */ | ||
89 | #define CKSEL_SHIFT (0) /* 1588 Timer reference clock source */ | ||
90 | #define CKSEL_MASK (0x3) | ||
91 | |||
92 | /* Bit definitions for the TMR_TEVENT register */ | ||
93 | #define ETS2 (1<<25) /* External trigger 2 timestamp sampled */ | ||
94 | #define ETS1 (1<<24) /* External trigger 1 timestamp sampled */ | ||
95 | #define ALM2 (1<<17) /* Current time = alarm time register 2 */ | ||
96 | #define ALM1 (1<<16) /* Current time = alarm time register 1 */ | ||
97 | #define PP1 (1<<7) /* periodic pulse generated on FIPER1 */ | ||
98 | #define PP2 (1<<6) /* periodic pulse generated on FIPER2 */ | ||
99 | #define PP3 (1<<5) /* periodic pulse generated on FIPER3 */ | ||
100 | |||
101 | /* Bit definitions for the TMR_TEMASK register */ | ||
102 | #define ETS2EN (1<<25) /* External trigger 2 timestamp enable */ | ||
103 | #define ETS1EN (1<<24) /* External trigger 1 timestamp enable */ | ||
104 | #define ALM2EN (1<<17) /* Timer ALM2 event enable */ | ||
105 | #define ALM1EN (1<<16) /* Timer ALM1 event enable */ | ||
106 | #define PP1EN (1<<7) /* Periodic pulse event 1 enable */ | ||
107 | #define PP2EN (1<<6) /* Periodic pulse event 2 enable */ | ||
108 | |||
109 | /* Bit definitions for the TMR_PEVENT register */ | ||
110 | #define TXP2 (1<<9) /* PTP transmitted timestamp im TXTS2 */ | ||
111 | #define TXP1 (1<<8) /* PTP transmitted timestamp in TXTS1 */ | ||
112 | #define RXP (1<<0) /* PTP frame has been received */ | ||
113 | |||
114 | /* Bit definitions for the TMR_PEMASK register */ | ||
115 | #define TXP2EN (1<<9) /* Transmit PTP packet event 2 enable */ | ||
116 | #define TXP1EN (1<<8) /* Transmit PTP packet event 1 enable */ | ||
117 | #define RXPEN (1<<0) /* Receive PTP packet event enable */ | ||
118 | |||
119 | /* Bit definitions for the TMR_STAT register */ | ||
120 | #define STAT_VEC_SHIFT (0) /* Timer general purpose status vector */ | ||
121 | #define STAT_VEC_MASK (0x3f) | ||
122 | |||
123 | /* Bit definitions for the TMR_PRSC register */ | ||
124 | #define PRSC_OCK_SHIFT (0) /* Output clock division/prescale factor. */ | ||
125 | #define PRSC_OCK_MASK (0xffff) | ||
126 | |||
127 | |||
128 | #define DRIVER "gianfar_ptp" | ||
129 | #define DEFAULT_CKSEL 1 | ||
130 | #define N_ALARM 1 /* first alarm is used internally to reset fipers */ | ||
131 | #define N_EXT_TS 2 | ||
132 | #define REG_SIZE sizeof(struct gianfar_ptp_registers) | ||
133 | |||
134 | struct etsects { | ||
135 | struct gianfar_ptp_registers *regs; | ||
136 | spinlock_t lock; /* protects regs */ | ||
137 | struct ptp_clock *clock; | ||
138 | struct ptp_clock_info caps; | ||
139 | struct resource *rsrc; | ||
140 | int irq; | ||
141 | u64 alarm_interval; /* for periodic alarm */ | ||
142 | u64 alarm_value; | ||
143 | u32 tclk_period; /* nanoseconds */ | ||
144 | u32 tmr_prsc; | ||
145 | u32 tmr_add; | ||
146 | u32 cksel; | ||
147 | u32 tmr_fiper1; | ||
148 | u32 tmr_fiper2; | ||
149 | }; | ||
150 | |||
151 | /* | ||
152 | * Register access functions | ||
153 | */ | ||
154 | |||
155 | /* Caller must hold etsects->lock. */ | ||
156 | static u64 tmr_cnt_read(struct etsects *etsects) | ||
157 | { | ||
158 | u64 ns; | ||
159 | u32 lo, hi; | ||
160 | |||
161 | lo = gfar_read(&etsects->regs->tmr_cnt_l); | ||
162 | hi = gfar_read(&etsects->regs->tmr_cnt_h); | ||
163 | ns = ((u64) hi) << 32; | ||
164 | ns |= lo; | ||
165 | return ns; | ||
166 | } | ||
167 | |||
168 | /* Caller must hold etsects->lock. */ | ||
169 | static void tmr_cnt_write(struct etsects *etsects, u64 ns) | ||
170 | { | ||
171 | u32 hi = ns >> 32; | ||
172 | u32 lo = ns & 0xffffffff; | ||
173 | |||
174 | gfar_write(&etsects->regs->tmr_cnt_l, lo); | ||
175 | gfar_write(&etsects->regs->tmr_cnt_h, hi); | ||
176 | } | ||
177 | |||
178 | /* Caller must hold etsects->lock. */ | ||
179 | static void set_alarm(struct etsects *etsects) | ||
180 | { | ||
181 | u64 ns; | ||
182 | u32 lo, hi; | ||
183 | |||
184 | ns = tmr_cnt_read(etsects) + 1500000000ULL; | ||
185 | ns = div_u64(ns, 1000000000UL) * 1000000000ULL; | ||
186 | ns -= etsects->tclk_period; | ||
187 | hi = ns >> 32; | ||
188 | lo = ns & 0xffffffff; | ||
189 | gfar_write(&etsects->regs->tmr_alarm1_l, lo); | ||
190 | gfar_write(&etsects->regs->tmr_alarm1_h, hi); | ||
191 | } | ||
192 | |||
193 | /* Caller must hold etsects->lock. */ | ||
194 | static void set_fipers(struct etsects *etsects) | ||
195 | { | ||
196 | set_alarm(etsects); | ||
197 | gfar_write(&etsects->regs->tmr_fiper1, etsects->tmr_fiper1); | ||
198 | gfar_write(&etsects->regs->tmr_fiper2, etsects->tmr_fiper2); | ||
199 | } | ||
200 | |||
201 | /* | ||
202 | * Interrupt service routine | ||
203 | */ | ||
204 | |||
205 | static irqreturn_t isr(int irq, void *priv) | ||
206 | { | ||
207 | struct etsects *etsects = priv; | ||
208 | struct ptp_clock_event event; | ||
209 | u64 ns; | ||
210 | u32 ack = 0, lo, hi, mask, val; | ||
211 | |||
212 | val = gfar_read(&etsects->regs->tmr_tevent); | ||
213 | |||
214 | if (val & ETS1) { | ||
215 | ack |= ETS1; | ||
216 | hi = gfar_read(&etsects->regs->tmr_etts1_h); | ||
217 | lo = gfar_read(&etsects->regs->tmr_etts1_l); | ||
218 | event.type = PTP_CLOCK_EXTTS; | ||
219 | event.index = 0; | ||
220 | event.timestamp = ((u64) hi) << 32; | ||
221 | event.timestamp |= lo; | ||
222 | ptp_clock_event(etsects->clock, &event); | ||
223 | } | ||
224 | |||
225 | if (val & ETS2) { | ||
226 | ack |= ETS2; | ||
227 | hi = gfar_read(&etsects->regs->tmr_etts2_h); | ||
228 | lo = gfar_read(&etsects->regs->tmr_etts2_l); | ||
229 | event.type = PTP_CLOCK_EXTTS; | ||
230 | event.index = 1; | ||
231 | event.timestamp = ((u64) hi) << 32; | ||
232 | event.timestamp |= lo; | ||
233 | ptp_clock_event(etsects->clock, &event); | ||
234 | } | ||
235 | |||
236 | if (val & ALM2) { | ||
237 | ack |= ALM2; | ||
238 | if (etsects->alarm_value) { | ||
239 | event.type = PTP_CLOCK_ALARM; | ||
240 | event.index = 0; | ||
241 | event.timestamp = etsects->alarm_value; | ||
242 | ptp_clock_event(etsects->clock, &event); | ||
243 | } | ||
244 | if (etsects->alarm_interval) { | ||
245 | ns = etsects->alarm_value + etsects->alarm_interval; | ||
246 | hi = ns >> 32; | ||
247 | lo = ns & 0xffffffff; | ||
248 | spin_lock(&etsects->lock); | ||
249 | gfar_write(&etsects->regs->tmr_alarm2_l, lo); | ||
250 | gfar_write(&etsects->regs->tmr_alarm2_h, hi); | ||
251 | spin_unlock(&etsects->lock); | ||
252 | etsects->alarm_value = ns; | ||
253 | } else { | ||
254 | gfar_write(&etsects->regs->tmr_tevent, ALM2); | ||
255 | spin_lock(&etsects->lock); | ||
256 | mask = gfar_read(&etsects->regs->tmr_temask); | ||
257 | mask &= ~ALM2EN; | ||
258 | gfar_write(&etsects->regs->tmr_temask, mask); | ||
259 | spin_unlock(&etsects->lock); | ||
260 | etsects->alarm_value = 0; | ||
261 | etsects->alarm_interval = 0; | ||
262 | } | ||
263 | } | ||
264 | |||
265 | if (val & PP1) { | ||
266 | ack |= PP1; | ||
267 | event.type = PTP_CLOCK_PPS; | ||
268 | ptp_clock_event(etsects->clock, &event); | ||
269 | } | ||
270 | |||
271 | if (ack) { | ||
272 | gfar_write(&etsects->regs->tmr_tevent, ack); | ||
273 | return IRQ_HANDLED; | ||
274 | } else | ||
275 | return IRQ_NONE; | ||
276 | } | ||
277 | |||
278 | /* | ||
279 | * PTP clock operations | ||
280 | */ | ||
281 | |||
282 | static int ptp_gianfar_adjfreq(struct ptp_clock_info *ptp, s32 ppb) | ||
283 | { | ||
284 | u64 adj; | ||
285 | u32 diff, tmr_add; | ||
286 | int neg_adj = 0; | ||
287 | struct etsects *etsects = container_of(ptp, struct etsects, caps); | ||
288 | |||
289 | if (ppb < 0) { | ||
290 | neg_adj = 1; | ||
291 | ppb = -ppb; | ||
292 | } | ||
293 | tmr_add = etsects->tmr_add; | ||
294 | adj = tmr_add; | ||
295 | adj *= ppb; | ||
296 | diff = div_u64(adj, 1000000000ULL); | ||
297 | |||
298 | tmr_add = neg_adj ? tmr_add - diff : tmr_add + diff; | ||
299 | |||
300 | gfar_write(&etsects->regs->tmr_add, tmr_add); | ||
301 | |||
302 | return 0; | ||
303 | } | ||
304 | |||
305 | static int ptp_gianfar_adjtime(struct ptp_clock_info *ptp, s64 delta) | ||
306 | { | ||
307 | s64 now; | ||
308 | unsigned long flags; | ||
309 | struct etsects *etsects = container_of(ptp, struct etsects, caps); | ||
310 | |||
311 | spin_lock_irqsave(&etsects->lock, flags); | ||
312 | |||
313 | now = tmr_cnt_read(etsects); | ||
314 | now += delta; | ||
315 | tmr_cnt_write(etsects, now); | ||
316 | |||
317 | spin_unlock_irqrestore(&etsects->lock, flags); | ||
318 | |||
319 | set_fipers(etsects); | ||
320 | |||
321 | return 0; | ||
322 | } | ||
323 | |||
324 | static int ptp_gianfar_gettime(struct ptp_clock_info *ptp, struct timespec *ts) | ||
325 | { | ||
326 | u64 ns; | ||
327 | u32 remainder; | ||
328 | unsigned long flags; | ||
329 | struct etsects *etsects = container_of(ptp, struct etsects, caps); | ||
330 | |||
331 | spin_lock_irqsave(&etsects->lock, flags); | ||
332 | |||
333 | ns = tmr_cnt_read(etsects); | ||
334 | |||
335 | spin_unlock_irqrestore(&etsects->lock, flags); | ||
336 | |||
337 | ts->tv_sec = div_u64_rem(ns, 1000000000, &remainder); | ||
338 | ts->tv_nsec = remainder; | ||
339 | return 0; | ||
340 | } | ||
341 | |||
342 | static int ptp_gianfar_settime(struct ptp_clock_info *ptp, | ||
343 | const struct timespec *ts) | ||
344 | { | ||
345 | u64 ns; | ||
346 | unsigned long flags; | ||
347 | struct etsects *etsects = container_of(ptp, struct etsects, caps); | ||
348 | |||
349 | ns = ts->tv_sec * 1000000000ULL; | ||
350 | ns += ts->tv_nsec; | ||
351 | |||
352 | spin_lock_irqsave(&etsects->lock, flags); | ||
353 | |||
354 | tmr_cnt_write(etsects, ns); | ||
355 | set_fipers(etsects); | ||
356 | |||
357 | spin_unlock_irqrestore(&etsects->lock, flags); | ||
358 | |||
359 | return 0; | ||
360 | } | ||
361 | |||
362 | static int ptp_gianfar_enable(struct ptp_clock_info *ptp, | ||
363 | struct ptp_clock_request *rq, int on) | ||
364 | { | ||
365 | struct etsects *etsects = container_of(ptp, struct etsects, caps); | ||
366 | unsigned long flags; | ||
367 | u32 bit, mask; | ||
368 | |||
369 | switch (rq->type) { | ||
370 | case PTP_CLK_REQ_EXTTS: | ||
371 | switch (rq->extts.index) { | ||
372 | case 0: | ||
373 | bit = ETS1EN; | ||
374 | break; | ||
375 | case 1: | ||
376 | bit = ETS2EN; | ||
377 | break; | ||
378 | default: | ||
379 | return -EINVAL; | ||
380 | } | ||
381 | spin_lock_irqsave(&etsects->lock, flags); | ||
382 | mask = gfar_read(&etsects->regs->tmr_temask); | ||
383 | if (on) | ||
384 | mask |= bit; | ||
385 | else | ||
386 | mask &= ~bit; | ||
387 | gfar_write(&etsects->regs->tmr_temask, mask); | ||
388 | spin_unlock_irqrestore(&etsects->lock, flags); | ||
389 | return 0; | ||
390 | |||
391 | case PTP_CLK_REQ_PPS: | ||
392 | spin_lock_irqsave(&etsects->lock, flags); | ||
393 | mask = gfar_read(&etsects->regs->tmr_temask); | ||
394 | if (on) | ||
395 | mask |= PP1EN; | ||
396 | else | ||
397 | mask &= ~PP1EN; | ||
398 | gfar_write(&etsects->regs->tmr_temask, mask); | ||
399 | spin_unlock_irqrestore(&etsects->lock, flags); | ||
400 | return 0; | ||
401 | |||
402 | default: | ||
403 | break; | ||
404 | } | ||
405 | |||
406 | return -EOPNOTSUPP; | ||
407 | } | ||
408 | |||
409 | static struct ptp_clock_info ptp_gianfar_caps = { | ||
410 | .owner = THIS_MODULE, | ||
411 | .name = "gianfar clock", | ||
412 | .max_adj = 512000, | ||
413 | .n_alarm = N_ALARM, | ||
414 | .n_ext_ts = N_EXT_TS, | ||
415 | .n_per_out = 0, | ||
416 | .pps = 1, | ||
417 | .adjfreq = ptp_gianfar_adjfreq, | ||
418 | .adjtime = ptp_gianfar_adjtime, | ||
419 | .gettime = ptp_gianfar_gettime, | ||
420 | .settime = ptp_gianfar_settime, | ||
421 | .enable = ptp_gianfar_enable, | ||
422 | }; | ||
423 | |||
424 | /* OF device tree */ | ||
425 | |||
426 | static int get_of_u32(struct device_node *node, char *str, u32 *val) | ||
427 | { | ||
428 | int plen; | ||
429 | const u32 *prop = of_get_property(node, str, &plen); | ||
430 | |||
431 | if (!prop || plen != sizeof(*prop)) | ||
432 | return -1; | ||
433 | *val = *prop; | ||
434 | return 0; | ||
435 | } | ||
436 | |||
437 | static int gianfar_ptp_probe(struct platform_device *dev) | ||
438 | { | ||
439 | struct device_node *node = dev->dev.of_node; | ||
440 | struct etsects *etsects; | ||
441 | struct timespec now; | ||
442 | int err = -ENOMEM; | ||
443 | u32 tmr_ctrl; | ||
444 | unsigned long flags; | ||
445 | |||
446 | etsects = kzalloc(sizeof(*etsects), GFP_KERNEL); | ||
447 | if (!etsects) | ||
448 | goto no_memory; | ||
449 | |||
450 | err = -ENODEV; | ||
451 | |||
452 | etsects->caps = ptp_gianfar_caps; | ||
453 | etsects->cksel = DEFAULT_CKSEL; | ||
454 | |||
455 | if (get_of_u32(node, "fsl,tclk-period", &etsects->tclk_period) || | ||
456 | get_of_u32(node, "fsl,tmr-prsc", &etsects->tmr_prsc) || | ||
457 | get_of_u32(node, "fsl,tmr-add", &etsects->tmr_add) || | ||
458 | get_of_u32(node, "fsl,tmr-fiper1", &etsects->tmr_fiper1) || | ||
459 | get_of_u32(node, "fsl,tmr-fiper2", &etsects->tmr_fiper2) || | ||
460 | get_of_u32(node, "fsl,max-adj", &etsects->caps.max_adj)) { | ||
461 | pr_err("device tree node missing required elements\n"); | ||
462 | goto no_node; | ||
463 | } | ||
464 | |||
465 | etsects->irq = platform_get_irq(dev, 0); | ||
466 | |||
467 | if (etsects->irq == NO_IRQ) { | ||
468 | pr_err("irq not in device tree\n"); | ||
469 | goto no_node; | ||
470 | } | ||
471 | if (request_irq(etsects->irq, isr, 0, DRIVER, etsects)) { | ||
472 | pr_err("request_irq failed\n"); | ||
473 | goto no_node; | ||
474 | } | ||
475 | |||
476 | etsects->rsrc = platform_get_resource(dev, IORESOURCE_MEM, 0); | ||
477 | if (!etsects->rsrc) { | ||
478 | pr_err("no resource\n"); | ||
479 | goto no_resource; | ||
480 | } | ||
481 | if (request_resource(&ioport_resource, etsects->rsrc)) { | ||
482 | pr_err("resource busy\n"); | ||
483 | goto no_resource; | ||
484 | } | ||
485 | |||
486 | spin_lock_init(&etsects->lock); | ||
487 | |||
488 | etsects->regs = ioremap(etsects->rsrc->start, | ||
489 | resource_size(etsects->rsrc)); | ||
490 | if (!etsects->regs) { | ||
491 | pr_err("ioremap ptp registers failed\n"); | ||
492 | goto no_ioremap; | ||
493 | } | ||
494 | getnstimeofday(&now); | ||
495 | ptp_gianfar_settime(&etsects->caps, &now); | ||
496 | |||
497 | tmr_ctrl = | ||
498 | (etsects->tclk_period & TCLK_PERIOD_MASK) << TCLK_PERIOD_SHIFT | | ||
499 | (etsects->cksel & CKSEL_MASK) << CKSEL_SHIFT; | ||
500 | |||
501 | spin_lock_irqsave(&etsects->lock, flags); | ||
502 | |||
503 | gfar_write(&etsects->regs->tmr_ctrl, tmr_ctrl); | ||
504 | gfar_write(&etsects->regs->tmr_add, etsects->tmr_add); | ||
505 | gfar_write(&etsects->regs->tmr_prsc, etsects->tmr_prsc); | ||
506 | gfar_write(&etsects->regs->tmr_fiper1, etsects->tmr_fiper1); | ||
507 | gfar_write(&etsects->regs->tmr_fiper2, etsects->tmr_fiper2); | ||
508 | set_alarm(etsects); | ||
509 | gfar_write(&etsects->regs->tmr_ctrl, tmr_ctrl|FS|RTPE|TE|FRD); | ||
510 | |||
511 | spin_unlock_irqrestore(&etsects->lock, flags); | ||
512 | |||
513 | etsects->clock = ptp_clock_register(&etsects->caps); | ||
514 | if (IS_ERR(etsects->clock)) { | ||
515 | err = PTR_ERR(etsects->clock); | ||
516 | goto no_clock; | ||
517 | } | ||
518 | |||
519 | dev_set_drvdata(&dev->dev, etsects); | ||
520 | |||
521 | return 0; | ||
522 | |||
523 | no_clock: | ||
524 | no_ioremap: | ||
525 | release_resource(etsects->rsrc); | ||
526 | no_resource: | ||
527 | free_irq(etsects->irq, etsects); | ||
528 | no_node: | ||
529 | kfree(etsects); | ||
530 | no_memory: | ||
531 | return err; | ||
532 | } | ||
533 | |||
534 | static int gianfar_ptp_remove(struct platform_device *dev) | ||
535 | { | ||
536 | struct etsects *etsects = dev_get_drvdata(&dev->dev); | ||
537 | |||
538 | gfar_write(&etsects->regs->tmr_temask, 0); | ||
539 | gfar_write(&etsects->regs->tmr_ctrl, 0); | ||
540 | |||
541 | ptp_clock_unregister(etsects->clock); | ||
542 | iounmap(etsects->regs); | ||
543 | release_resource(etsects->rsrc); | ||
544 | free_irq(etsects->irq, etsects); | ||
545 | kfree(etsects); | ||
546 | |||
547 | return 0; | ||
548 | } | ||
549 | |||
550 | static struct of_device_id match_table[] = { | ||
551 | { .compatible = "fsl,etsec-ptp" }, | ||
552 | {}, | ||
553 | }; | ||
554 | |||
555 | static struct platform_driver gianfar_ptp_driver = { | ||
556 | .driver = { | ||
557 | .name = "gianfar_ptp", | ||
558 | .of_match_table = match_table, | ||
559 | .owner = THIS_MODULE, | ||
560 | }, | ||
561 | .probe = gianfar_ptp_probe, | ||
562 | .remove = gianfar_ptp_remove, | ||
563 | }; | ||
564 | |||
565 | /* module operations */ | ||
566 | |||
567 | static int __init ptp_gianfar_init(void) | ||
568 | { | ||
569 | return platform_driver_register(&gianfar_ptp_driver); | ||
570 | } | ||
571 | |||
572 | module_init(ptp_gianfar_init); | ||
573 | |||
574 | static void __exit ptp_gianfar_exit(void) | ||
575 | { | ||
576 | platform_driver_unregister(&gianfar_ptp_driver); | ||
577 | } | ||
578 | |||
579 | module_exit(ptp_gianfar_exit); | ||
580 | |||
581 | MODULE_AUTHOR("Richard Cochran <richard.cochran@omicron.at>"); | ||
582 | MODULE_DESCRIPTION("PTP clock using the eTSEC"); | ||
583 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/net/ethernet/freescale/gianfar_sysfs.c b/drivers/net/ethernet/freescale/gianfar_sysfs.c new file mode 100644 index 000000000000..64f4094ac7f1 --- /dev/null +++ b/drivers/net/ethernet/freescale/gianfar_sysfs.c | |||
@@ -0,0 +1,341 @@ | |||
1 | /* | ||
2 | * drivers/net/gianfar_sysfs.c | ||
3 | * | ||
4 | * Gianfar Ethernet Driver | ||
5 | * This driver is designed for the non-CPM ethernet controllers | ||
6 | * on the 85xx and 83xx family of integrated processors | ||
7 | * Based on 8260_io/fcc_enet.c | ||
8 | * | ||
9 | * Author: Andy Fleming | ||
10 | * Maintainer: Kumar Gala (galak@kernel.crashing.org) | ||
11 | * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com> | ||
12 | * | ||
13 | * Copyright 2002-2009 Freescale Semiconductor, Inc. | ||
14 | * | ||
15 | * This program is free software; you can redistribute it and/or modify it | ||
16 | * under the terms of the GNU General Public License as published by the | ||
17 | * Free Software Foundation; either version 2 of the License, or (at your | ||
18 | * option) any later version. | ||
19 | * | ||
20 | * Sysfs file creation and management | ||
21 | */ | ||
22 | |||
23 | #include <linux/kernel.h> | ||
24 | #include <linux/string.h> | ||
25 | #include <linux/errno.h> | ||
26 | #include <linux/unistd.h> | ||
27 | #include <linux/init.h> | ||
28 | #include <linux/delay.h> | ||
29 | #include <linux/etherdevice.h> | ||
30 | #include <linux/spinlock.h> | ||
31 | #include <linux/mm.h> | ||
32 | #include <linux/device.h> | ||
33 | |||
34 | #include <asm/uaccess.h> | ||
35 | #include <linux/module.h> | ||
36 | |||
37 | #include "gianfar.h" | ||
38 | |||
39 | static ssize_t gfar_show_bd_stash(struct device *dev, | ||
40 | struct device_attribute *attr, char *buf) | ||
41 | { | ||
42 | struct gfar_private *priv = netdev_priv(to_net_dev(dev)); | ||
43 | |||
44 | return sprintf(buf, "%s\n", priv->bd_stash_en ? "on" : "off"); | ||
45 | } | ||
46 | |||
47 | static ssize_t gfar_set_bd_stash(struct device *dev, | ||
48 | struct device_attribute *attr, | ||
49 | const char *buf, size_t count) | ||
50 | { | ||
51 | struct gfar_private *priv = netdev_priv(to_net_dev(dev)); | ||
52 | struct gfar __iomem *regs = priv->gfargrp[0].regs; | ||
53 | int new_setting = 0; | ||
54 | u32 temp; | ||
55 | unsigned long flags; | ||
56 | |||
57 | if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_BD_STASHING)) | ||
58 | return count; | ||
59 | |||
60 | |||
61 | /* Find out the new setting */ | ||
62 | if (!strncmp("on", buf, count - 1) || !strncmp("1", buf, count - 1)) | ||
63 | new_setting = 1; | ||
64 | else if (!strncmp("off", buf, count - 1) || | ||
65 | !strncmp("0", buf, count - 1)) | ||
66 | new_setting = 0; | ||
67 | else | ||
68 | return count; | ||
69 | |||
70 | |||
71 | local_irq_save(flags); | ||
72 | lock_rx_qs(priv); | ||
73 | |||
74 | /* Set the new stashing value */ | ||
75 | priv->bd_stash_en = new_setting; | ||
76 | |||
77 | temp = gfar_read(®s->attr); | ||
78 | |||
79 | if (new_setting) | ||
80 | temp |= ATTR_BDSTASH; | ||
81 | else | ||
82 | temp &= ~(ATTR_BDSTASH); | ||
83 | |||
84 | gfar_write(®s->attr, temp); | ||
85 | |||
86 | unlock_rx_qs(priv); | ||
87 | local_irq_restore(flags); | ||
88 | |||
89 | return count; | ||
90 | } | ||
91 | |||
92 | static DEVICE_ATTR(bd_stash, 0644, gfar_show_bd_stash, gfar_set_bd_stash); | ||
93 | |||
94 | static ssize_t gfar_show_rx_stash_size(struct device *dev, | ||
95 | struct device_attribute *attr, char *buf) | ||
96 | { | ||
97 | struct gfar_private *priv = netdev_priv(to_net_dev(dev)); | ||
98 | |||
99 | return sprintf(buf, "%d\n", priv->rx_stash_size); | ||
100 | } | ||
101 | |||
102 | static ssize_t gfar_set_rx_stash_size(struct device *dev, | ||
103 | struct device_attribute *attr, | ||
104 | const char *buf, size_t count) | ||
105 | { | ||
106 | struct gfar_private *priv = netdev_priv(to_net_dev(dev)); | ||
107 | struct gfar __iomem *regs = priv->gfargrp[0].regs; | ||
108 | unsigned int length = simple_strtoul(buf, NULL, 0); | ||
109 | u32 temp; | ||
110 | unsigned long flags; | ||
111 | |||
112 | if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_BUF_STASHING)) | ||
113 | return count; | ||
114 | |||
115 | local_irq_save(flags); | ||
116 | lock_rx_qs(priv); | ||
117 | |||
118 | if (length > priv->rx_buffer_size) | ||
119 | goto out; | ||
120 | |||
121 | if (length == priv->rx_stash_size) | ||
122 | goto out; | ||
123 | |||
124 | priv->rx_stash_size = length; | ||
125 | |||
126 | temp = gfar_read(®s->attreli); | ||
127 | temp &= ~ATTRELI_EL_MASK; | ||
128 | temp |= ATTRELI_EL(length); | ||
129 | gfar_write(®s->attreli, temp); | ||
130 | |||
131 | /* Turn stashing on/off as appropriate */ | ||
132 | temp = gfar_read(®s->attr); | ||
133 | |||
134 | if (length) | ||
135 | temp |= ATTR_BUFSTASH; | ||
136 | else | ||
137 | temp &= ~(ATTR_BUFSTASH); | ||
138 | |||
139 | gfar_write(®s->attr, temp); | ||
140 | |||
141 | out: | ||
142 | unlock_rx_qs(priv); | ||
143 | local_irq_restore(flags); | ||
144 | |||
145 | return count; | ||
146 | } | ||
147 | |||
148 | static DEVICE_ATTR(rx_stash_size, 0644, gfar_show_rx_stash_size, | ||
149 | gfar_set_rx_stash_size); | ||
150 | |||
151 | /* Stashing will only be enabled when rx_stash_size != 0 */ | ||
152 | static ssize_t gfar_show_rx_stash_index(struct device *dev, | ||
153 | struct device_attribute *attr, | ||
154 | char *buf) | ||
155 | { | ||
156 | struct gfar_private *priv = netdev_priv(to_net_dev(dev)); | ||
157 | |||
158 | return sprintf(buf, "%d\n", priv->rx_stash_index); | ||
159 | } | ||
160 | |||
161 | static ssize_t gfar_set_rx_stash_index(struct device *dev, | ||
162 | struct device_attribute *attr, | ||
163 | const char *buf, size_t count) | ||
164 | { | ||
165 | struct gfar_private *priv = netdev_priv(to_net_dev(dev)); | ||
166 | struct gfar __iomem *regs = priv->gfargrp[0].regs; | ||
167 | unsigned short index = simple_strtoul(buf, NULL, 0); | ||
168 | u32 temp; | ||
169 | unsigned long flags; | ||
170 | |||
171 | if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_BUF_STASHING)) | ||
172 | return count; | ||
173 | |||
174 | local_irq_save(flags); | ||
175 | lock_rx_qs(priv); | ||
176 | |||
177 | if (index > priv->rx_stash_size) | ||
178 | goto out; | ||
179 | |||
180 | if (index == priv->rx_stash_index) | ||
181 | goto out; | ||
182 | |||
183 | priv->rx_stash_index = index; | ||
184 | |||
185 | temp = gfar_read(®s->attreli); | ||
186 | temp &= ~ATTRELI_EI_MASK; | ||
187 | temp |= ATTRELI_EI(index); | ||
188 | gfar_write(®s->attreli, temp); | ||
189 | |||
190 | out: | ||
191 | unlock_rx_qs(priv); | ||
192 | local_irq_restore(flags); | ||
193 | |||
194 | return count; | ||
195 | } | ||
196 | |||
197 | static DEVICE_ATTR(rx_stash_index, 0644, gfar_show_rx_stash_index, | ||
198 | gfar_set_rx_stash_index); | ||
199 | |||
200 | static ssize_t gfar_show_fifo_threshold(struct device *dev, | ||
201 | struct device_attribute *attr, | ||
202 | char *buf) | ||
203 | { | ||
204 | struct gfar_private *priv = netdev_priv(to_net_dev(dev)); | ||
205 | |||
206 | return sprintf(buf, "%d\n", priv->fifo_threshold); | ||
207 | } | ||
208 | |||
209 | static ssize_t gfar_set_fifo_threshold(struct device *dev, | ||
210 | struct device_attribute *attr, | ||
211 | const char *buf, size_t count) | ||
212 | { | ||
213 | struct gfar_private *priv = netdev_priv(to_net_dev(dev)); | ||
214 | struct gfar __iomem *regs = priv->gfargrp[0].regs; | ||
215 | unsigned int length = simple_strtoul(buf, NULL, 0); | ||
216 | u32 temp; | ||
217 | unsigned long flags; | ||
218 | |||
219 | if (length > GFAR_MAX_FIFO_THRESHOLD) | ||
220 | return count; | ||
221 | |||
222 | local_irq_save(flags); | ||
223 | lock_tx_qs(priv); | ||
224 | |||
225 | priv->fifo_threshold = length; | ||
226 | |||
227 | temp = gfar_read(®s->fifo_tx_thr); | ||
228 | temp &= ~FIFO_TX_THR_MASK; | ||
229 | temp |= length; | ||
230 | gfar_write(®s->fifo_tx_thr, temp); | ||
231 | |||
232 | unlock_tx_qs(priv); | ||
233 | local_irq_restore(flags); | ||
234 | |||
235 | return count; | ||
236 | } | ||
237 | |||
238 | static DEVICE_ATTR(fifo_threshold, 0644, gfar_show_fifo_threshold, | ||
239 | gfar_set_fifo_threshold); | ||
240 | |||
241 | static ssize_t gfar_show_fifo_starve(struct device *dev, | ||
242 | struct device_attribute *attr, char *buf) | ||
243 | { | ||
244 | struct gfar_private *priv = netdev_priv(to_net_dev(dev)); | ||
245 | |||
246 | return sprintf(buf, "%d\n", priv->fifo_starve); | ||
247 | } | ||
248 | |||
249 | static ssize_t gfar_set_fifo_starve(struct device *dev, | ||
250 | struct device_attribute *attr, | ||
251 | const char *buf, size_t count) | ||
252 | { | ||
253 | struct gfar_private *priv = netdev_priv(to_net_dev(dev)); | ||
254 | struct gfar __iomem *regs = priv->gfargrp[0].regs; | ||
255 | unsigned int num = simple_strtoul(buf, NULL, 0); | ||
256 | u32 temp; | ||
257 | unsigned long flags; | ||
258 | |||
259 | if (num > GFAR_MAX_FIFO_STARVE) | ||
260 | return count; | ||
261 | |||
262 | local_irq_save(flags); | ||
263 | lock_tx_qs(priv); | ||
264 | |||
265 | priv->fifo_starve = num; | ||
266 | |||
267 | temp = gfar_read(®s->fifo_tx_starve); | ||
268 | temp &= ~FIFO_TX_STARVE_MASK; | ||
269 | temp |= num; | ||
270 | gfar_write(®s->fifo_tx_starve, temp); | ||
271 | |||
272 | unlock_tx_qs(priv); | ||
273 | local_irq_restore(flags); | ||
274 | |||
275 | return count; | ||
276 | } | ||
277 | |||
278 | static DEVICE_ATTR(fifo_starve, 0644, gfar_show_fifo_starve, | ||
279 | gfar_set_fifo_starve); | ||
280 | |||
281 | static ssize_t gfar_show_fifo_starve_off(struct device *dev, | ||
282 | struct device_attribute *attr, | ||
283 | char *buf) | ||
284 | { | ||
285 | struct gfar_private *priv = netdev_priv(to_net_dev(dev)); | ||
286 | |||
287 | return sprintf(buf, "%d\n", priv->fifo_starve_off); | ||
288 | } | ||
289 | |||
290 | static ssize_t gfar_set_fifo_starve_off(struct device *dev, | ||
291 | struct device_attribute *attr, | ||
292 | const char *buf, size_t count) | ||
293 | { | ||
294 | struct gfar_private *priv = netdev_priv(to_net_dev(dev)); | ||
295 | struct gfar __iomem *regs = priv->gfargrp[0].regs; | ||
296 | unsigned int num = simple_strtoul(buf, NULL, 0); | ||
297 | u32 temp; | ||
298 | unsigned long flags; | ||
299 | |||
300 | if (num > GFAR_MAX_FIFO_STARVE_OFF) | ||
301 | return count; | ||
302 | |||
303 | local_irq_save(flags); | ||
304 | lock_tx_qs(priv); | ||
305 | |||
306 | priv->fifo_starve_off = num; | ||
307 | |||
308 | temp = gfar_read(®s->fifo_tx_starve_shutoff); | ||
309 | temp &= ~FIFO_TX_STARVE_OFF_MASK; | ||
310 | temp |= num; | ||
311 | gfar_write(®s->fifo_tx_starve_shutoff, temp); | ||
312 | |||
313 | unlock_tx_qs(priv); | ||
314 | local_irq_restore(flags); | ||
315 | |||
316 | return count; | ||
317 | } | ||
318 | |||
319 | static DEVICE_ATTR(fifo_starve_off, 0644, gfar_show_fifo_starve_off, | ||
320 | gfar_set_fifo_starve_off); | ||
321 | |||
322 | void gfar_init_sysfs(struct net_device *dev) | ||
323 | { | ||
324 | struct gfar_private *priv = netdev_priv(dev); | ||
325 | int rc; | ||
326 | |||
327 | /* Initialize the default values */ | ||
328 | priv->fifo_threshold = DEFAULT_FIFO_TX_THR; | ||
329 | priv->fifo_starve = DEFAULT_FIFO_TX_STARVE; | ||
330 | priv->fifo_starve_off = DEFAULT_FIFO_TX_STARVE_OFF; | ||
331 | |||
332 | /* Create our sysfs files */ | ||
333 | rc = device_create_file(&dev->dev, &dev_attr_bd_stash); | ||
334 | rc |= device_create_file(&dev->dev, &dev_attr_rx_stash_size); | ||
335 | rc |= device_create_file(&dev->dev, &dev_attr_rx_stash_index); | ||
336 | rc |= device_create_file(&dev->dev, &dev_attr_fifo_threshold); | ||
337 | rc |= device_create_file(&dev->dev, &dev_attr_fifo_starve); | ||
338 | rc |= device_create_file(&dev->dev, &dev_attr_fifo_starve_off); | ||
339 | if (rc) | ||
340 | dev_err(&dev->dev, "Error creating gianfar sysfs files.\n"); | ||
341 | } | ||
diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c new file mode 100644 index 000000000000..42f8e31b0bbb --- /dev/null +++ b/drivers/net/ethernet/freescale/ucc_geth.c | |||
@@ -0,0 +1,4026 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2006-2009 Freescale Semicondutor, Inc. All rights reserved. | ||
3 | * | ||
4 | * Author: Shlomi Gridish <gridish@freescale.com> | ||
5 | * Li Yang <leoli@freescale.com> | ||
6 | * | ||
7 | * Description: | ||
8 | * QE UCC Gigabit Ethernet Driver | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or modify it | ||
11 | * under the terms of the GNU General Public License as published by the | ||
12 | * Free Software Foundation; either version 2 of the License, or (at your | ||
13 | * option) any later version. | ||
14 | */ | ||
15 | #include <linux/kernel.h> | ||
16 | #include <linux/init.h> | ||
17 | #include <linux/errno.h> | ||
18 | #include <linux/slab.h> | ||
19 | #include <linux/stddef.h> | ||
20 | #include <linux/interrupt.h> | ||
21 | #include <linux/netdevice.h> | ||
22 | #include <linux/etherdevice.h> | ||
23 | #include <linux/skbuff.h> | ||
24 | #include <linux/spinlock.h> | ||
25 | #include <linux/mm.h> | ||
26 | #include <linux/dma-mapping.h> | ||
27 | #include <linux/mii.h> | ||
28 | #include <linux/phy.h> | ||
29 | #include <linux/workqueue.h> | ||
30 | #include <linux/of_mdio.h> | ||
31 | #include <linux/of_net.h> | ||
32 | #include <linux/of_platform.h> | ||
33 | |||
34 | #include <asm/uaccess.h> | ||
35 | #include <asm/irq.h> | ||
36 | #include <asm/io.h> | ||
37 | #include <asm/immap_qe.h> | ||
38 | #include <asm/qe.h> | ||
39 | #include <asm/ucc.h> | ||
40 | #include <asm/ucc_fast.h> | ||
41 | #include <asm/machdep.h> | ||
42 | |||
43 | #include "ucc_geth.h" | ||
44 | #include "fsl_pq_mdio.h" | ||
45 | |||
46 | #undef DEBUG | ||
47 | |||
48 | #define ugeth_printk(level, format, arg...) \ | ||
49 | printk(level format "\n", ## arg) | ||
50 | |||
51 | #define ugeth_dbg(format, arg...) \ | ||
52 | ugeth_printk(KERN_DEBUG , format , ## arg) | ||
53 | #define ugeth_err(format, arg...) \ | ||
54 | ugeth_printk(KERN_ERR , format , ## arg) | ||
55 | #define ugeth_info(format, arg...) \ | ||
56 | ugeth_printk(KERN_INFO , format , ## arg) | ||
57 | #define ugeth_warn(format, arg...) \ | ||
58 | ugeth_printk(KERN_WARNING , format , ## arg) | ||
59 | |||
60 | #ifdef UGETH_VERBOSE_DEBUG | ||
61 | #define ugeth_vdbg ugeth_dbg | ||
62 | #else | ||
63 | #define ugeth_vdbg(fmt, args...) do { } while (0) | ||
64 | #endif /* UGETH_VERBOSE_DEBUG */ | ||
65 | #define UGETH_MSG_DEFAULT (NETIF_MSG_IFUP << 1 ) - 1 | ||
66 | |||
67 | |||
68 | static DEFINE_SPINLOCK(ugeth_lock); | ||
69 | |||
70 | static struct { | ||
71 | u32 msg_enable; | ||
72 | } debug = { -1 }; | ||
73 | |||
74 | module_param_named(debug, debug.msg_enable, int, 0); | ||
75 | MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 0xffff=all)"); | ||
76 | |||
77 | static struct ucc_geth_info ugeth_primary_info = { | ||
78 | .uf_info = { | ||
79 | .bd_mem_part = MEM_PART_SYSTEM, | ||
80 | .rtsm = UCC_FAST_SEND_IDLES_BETWEEN_FRAMES, | ||
81 | .max_rx_buf_length = 1536, | ||
82 | /* adjusted at startup if max-speed 1000 */ | ||
83 | .urfs = UCC_GETH_URFS_INIT, | ||
84 | .urfet = UCC_GETH_URFET_INIT, | ||
85 | .urfset = UCC_GETH_URFSET_INIT, | ||
86 | .utfs = UCC_GETH_UTFS_INIT, | ||
87 | .utfet = UCC_GETH_UTFET_INIT, | ||
88 | .utftt = UCC_GETH_UTFTT_INIT, | ||
89 | .ufpt = 256, | ||
90 | .mode = UCC_FAST_PROTOCOL_MODE_ETHERNET, | ||
91 | .ttx_trx = UCC_FAST_GUMR_TRANSPARENT_TTX_TRX_NORMAL, | ||
92 | .tenc = UCC_FAST_TX_ENCODING_NRZ, | ||
93 | .renc = UCC_FAST_RX_ENCODING_NRZ, | ||
94 | .tcrc = UCC_FAST_16_BIT_CRC, | ||
95 | .synl = UCC_FAST_SYNC_LEN_NOT_USED, | ||
96 | }, | ||
97 | .numQueuesTx = 1, | ||
98 | .numQueuesRx = 1, | ||
99 | .extendedFilteringChainPointer = ((uint32_t) NULL), | ||
100 | .typeorlen = 3072 /*1536 */ , | ||
101 | .nonBackToBackIfgPart1 = 0x40, | ||
102 | .nonBackToBackIfgPart2 = 0x60, | ||
103 | .miminumInterFrameGapEnforcement = 0x50, | ||
104 | .backToBackInterFrameGap = 0x60, | ||
105 | .mblinterval = 128, | ||
106 | .nortsrbytetime = 5, | ||
107 | .fracsiz = 1, | ||
108 | .strictpriorityq = 0xff, | ||
109 | .altBebTruncation = 0xa, | ||
110 | .excessDefer = 1, | ||
111 | .maxRetransmission = 0xf, | ||
112 | .collisionWindow = 0x37, | ||
113 | .receiveFlowControl = 1, | ||
114 | .transmitFlowControl = 1, | ||
115 | .maxGroupAddrInHash = 4, | ||
116 | .maxIndAddrInHash = 4, | ||
117 | .prel = 7, | ||
118 | .maxFrameLength = 1518, | ||
119 | .minFrameLength = 64, | ||
120 | .maxD1Length = 1520, | ||
121 | .maxD2Length = 1520, | ||
122 | .vlantype = 0x8100, | ||
123 | .ecamptr = ((uint32_t) NULL), | ||
124 | .eventRegMask = UCCE_OTHER, | ||
125 | .pausePeriod = 0xf000, | ||
126 | .interruptcoalescingmaxvalue = {1, 1, 1, 1, 1, 1, 1, 1}, | ||
127 | .bdRingLenTx = { | ||
128 | TX_BD_RING_LEN, | ||
129 | TX_BD_RING_LEN, | ||
130 | TX_BD_RING_LEN, | ||
131 | TX_BD_RING_LEN, | ||
132 | TX_BD_RING_LEN, | ||
133 | TX_BD_RING_LEN, | ||
134 | TX_BD_RING_LEN, | ||
135 | TX_BD_RING_LEN}, | ||
136 | |||
137 | .bdRingLenRx = { | ||
138 | RX_BD_RING_LEN, | ||
139 | RX_BD_RING_LEN, | ||
140 | RX_BD_RING_LEN, | ||
141 | RX_BD_RING_LEN, | ||
142 | RX_BD_RING_LEN, | ||
143 | RX_BD_RING_LEN, | ||
144 | RX_BD_RING_LEN, | ||
145 | RX_BD_RING_LEN}, | ||
146 | |||
147 | .numStationAddresses = UCC_GETH_NUM_OF_STATION_ADDRESSES_1, | ||
148 | .largestexternallookupkeysize = | ||
149 | QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_NONE, | ||
150 | .statisticsMode = UCC_GETH_STATISTICS_GATHERING_MODE_HARDWARE | | ||
151 | UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_TX | | ||
152 | UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_RX, | ||
153 | .vlanOperationTagged = UCC_GETH_VLAN_OPERATION_TAGGED_NOP, | ||
154 | .vlanOperationNonTagged = UCC_GETH_VLAN_OPERATION_NON_TAGGED_NOP, | ||
155 | .rxQoSMode = UCC_GETH_QOS_MODE_DEFAULT, | ||
156 | .aufc = UPSMR_AUTOMATIC_FLOW_CONTROL_MODE_NONE, | ||
157 | .padAndCrc = MACCFG2_PAD_AND_CRC_MODE_PAD_AND_CRC, | ||
158 | .numThreadsTx = UCC_GETH_NUM_OF_THREADS_1, | ||
159 | .numThreadsRx = UCC_GETH_NUM_OF_THREADS_1, | ||
160 | .riscTx = QE_RISC_ALLOCATION_RISC1_AND_RISC2, | ||
161 | .riscRx = QE_RISC_ALLOCATION_RISC1_AND_RISC2, | ||
162 | }; | ||
163 | |||
164 | static struct ucc_geth_info ugeth_info[8]; | ||
165 | |||
166 | #ifdef DEBUG | ||
167 | static void mem_disp(u8 *addr, int size) | ||
168 | { | ||
169 | u8 *i; | ||
170 | int size16Aling = (size >> 4) << 4; | ||
171 | int size4Aling = (size >> 2) << 2; | ||
172 | int notAlign = 0; | ||
173 | if (size % 16) | ||
174 | notAlign = 1; | ||
175 | |||
176 | for (i = addr; (u32) i < (u32) addr + size16Aling; i += 16) | ||
177 | printk("0x%08x: %08x %08x %08x %08x\r\n", | ||
178 | (u32) i, | ||
179 | *((u32 *) (i)), | ||
180 | *((u32 *) (i + 4)), | ||
181 | *((u32 *) (i + 8)), *((u32 *) (i + 12))); | ||
182 | if (notAlign == 1) | ||
183 | printk("0x%08x: ", (u32) i); | ||
184 | for (; (u32) i < (u32) addr + size4Aling; i += 4) | ||
185 | printk("%08x ", *((u32 *) (i))); | ||
186 | for (; (u32) i < (u32) addr + size; i++) | ||
187 | printk("%02x", *((u8 *) (i))); | ||
188 | if (notAlign == 1) | ||
189 | printk("\r\n"); | ||
190 | } | ||
191 | #endif /* DEBUG */ | ||
192 | |||
193 | static struct list_head *dequeue(struct list_head *lh) | ||
194 | { | ||
195 | unsigned long flags; | ||
196 | |||
197 | spin_lock_irqsave(&ugeth_lock, flags); | ||
198 | if (!list_empty(lh)) { | ||
199 | struct list_head *node = lh->next; | ||
200 | list_del(node); | ||
201 | spin_unlock_irqrestore(&ugeth_lock, flags); | ||
202 | return node; | ||
203 | } else { | ||
204 | spin_unlock_irqrestore(&ugeth_lock, flags); | ||
205 | return NULL; | ||
206 | } | ||
207 | } | ||
208 | |||
209 | static struct sk_buff *get_new_skb(struct ucc_geth_private *ugeth, | ||
210 | u8 __iomem *bd) | ||
211 | { | ||
212 | struct sk_buff *skb = NULL; | ||
213 | |||
214 | skb = __skb_dequeue(&ugeth->rx_recycle); | ||
215 | if (!skb) | ||
216 | skb = dev_alloc_skb(ugeth->ug_info->uf_info.max_rx_buf_length + | ||
217 | UCC_GETH_RX_DATA_BUF_ALIGNMENT); | ||
218 | if (skb == NULL) | ||
219 | return NULL; | ||
220 | |||
221 | /* We need the data buffer to be aligned properly. We will reserve | ||
222 | * as many bytes as needed to align the data properly | ||
223 | */ | ||
224 | skb_reserve(skb, | ||
225 | UCC_GETH_RX_DATA_BUF_ALIGNMENT - | ||
226 | (((unsigned)skb->data) & (UCC_GETH_RX_DATA_BUF_ALIGNMENT - | ||
227 | 1))); | ||
228 | |||
229 | skb->dev = ugeth->ndev; | ||
230 | |||
231 | out_be32(&((struct qe_bd __iomem *)bd)->buf, | ||
232 | dma_map_single(ugeth->dev, | ||
233 | skb->data, | ||
234 | ugeth->ug_info->uf_info.max_rx_buf_length + | ||
235 | UCC_GETH_RX_DATA_BUF_ALIGNMENT, | ||
236 | DMA_FROM_DEVICE)); | ||
237 | |||
238 | out_be32((u32 __iomem *)bd, | ||
239 | (R_E | R_I | (in_be32((u32 __iomem*)bd) & R_W))); | ||
240 | |||
241 | return skb; | ||
242 | } | ||
243 | |||
244 | static int rx_bd_buffer_set(struct ucc_geth_private *ugeth, u8 rxQ) | ||
245 | { | ||
246 | u8 __iomem *bd; | ||
247 | u32 bd_status; | ||
248 | struct sk_buff *skb; | ||
249 | int i; | ||
250 | |||
251 | bd = ugeth->p_rx_bd_ring[rxQ]; | ||
252 | i = 0; | ||
253 | |||
254 | do { | ||
255 | bd_status = in_be32((u32 __iomem *)bd); | ||
256 | skb = get_new_skb(ugeth, bd); | ||
257 | |||
258 | if (!skb) /* If can not allocate data buffer, | ||
259 | abort. Cleanup will be elsewhere */ | ||
260 | return -ENOMEM; | ||
261 | |||
262 | ugeth->rx_skbuff[rxQ][i] = skb; | ||
263 | |||
264 | /* advance the BD pointer */ | ||
265 | bd += sizeof(struct qe_bd); | ||
266 | i++; | ||
267 | } while (!(bd_status & R_W)); | ||
268 | |||
269 | return 0; | ||
270 | } | ||
271 | |||
272 | static int fill_init_enet_entries(struct ucc_geth_private *ugeth, | ||
273 | u32 *p_start, | ||
274 | u8 num_entries, | ||
275 | u32 thread_size, | ||
276 | u32 thread_alignment, | ||
277 | unsigned int risc, | ||
278 | int skip_page_for_first_entry) | ||
279 | { | ||
280 | u32 init_enet_offset; | ||
281 | u8 i; | ||
282 | int snum; | ||
283 | |||
284 | for (i = 0; i < num_entries; i++) { | ||
285 | if ((snum = qe_get_snum()) < 0) { | ||
286 | if (netif_msg_ifup(ugeth)) | ||
287 | ugeth_err("fill_init_enet_entries: Can not get SNUM."); | ||
288 | return snum; | ||
289 | } | ||
290 | if ((i == 0) && skip_page_for_first_entry) | ||
291 | /* First entry of Rx does not have page */ | ||
292 | init_enet_offset = 0; | ||
293 | else { | ||
294 | init_enet_offset = | ||
295 | qe_muram_alloc(thread_size, thread_alignment); | ||
296 | if (IS_ERR_VALUE(init_enet_offset)) { | ||
297 | if (netif_msg_ifup(ugeth)) | ||
298 | ugeth_err("fill_init_enet_entries: Can not allocate DPRAM memory."); | ||
299 | qe_put_snum((u8) snum); | ||
300 | return -ENOMEM; | ||
301 | } | ||
302 | } | ||
303 | *(p_start++) = | ||
304 | ((u8) snum << ENET_INIT_PARAM_SNUM_SHIFT) | init_enet_offset | ||
305 | | risc; | ||
306 | } | ||
307 | |||
308 | return 0; | ||
309 | } | ||
310 | |||
311 | static int return_init_enet_entries(struct ucc_geth_private *ugeth, | ||
312 | u32 *p_start, | ||
313 | u8 num_entries, | ||
314 | unsigned int risc, | ||
315 | int skip_page_for_first_entry) | ||
316 | { | ||
317 | u32 init_enet_offset; | ||
318 | u8 i; | ||
319 | int snum; | ||
320 | |||
321 | for (i = 0; i < num_entries; i++) { | ||
322 | u32 val = *p_start; | ||
323 | |||
324 | /* Check that this entry was actually valid -- | ||
325 | needed in case failed in allocations */ | ||
326 | if ((val & ENET_INIT_PARAM_RISC_MASK) == risc) { | ||
327 | snum = | ||
328 | (u32) (val & ENET_INIT_PARAM_SNUM_MASK) >> | ||
329 | ENET_INIT_PARAM_SNUM_SHIFT; | ||
330 | qe_put_snum((u8) snum); | ||
331 | if (!((i == 0) && skip_page_for_first_entry)) { | ||
332 | /* First entry of Rx does not have page */ | ||
333 | init_enet_offset = | ||
334 | (val & ENET_INIT_PARAM_PTR_MASK); | ||
335 | qe_muram_free(init_enet_offset); | ||
336 | } | ||
337 | *p_start++ = 0; | ||
338 | } | ||
339 | } | ||
340 | |||
341 | return 0; | ||
342 | } | ||
343 | |||
344 | #ifdef DEBUG | ||
345 | static int dump_init_enet_entries(struct ucc_geth_private *ugeth, | ||
346 | u32 __iomem *p_start, | ||
347 | u8 num_entries, | ||
348 | u32 thread_size, | ||
349 | unsigned int risc, | ||
350 | int skip_page_for_first_entry) | ||
351 | { | ||
352 | u32 init_enet_offset; | ||
353 | u8 i; | ||
354 | int snum; | ||
355 | |||
356 | for (i = 0; i < num_entries; i++) { | ||
357 | u32 val = in_be32(p_start); | ||
358 | |||
359 | /* Check that this entry was actually valid -- | ||
360 | needed in case failed in allocations */ | ||
361 | if ((val & ENET_INIT_PARAM_RISC_MASK) == risc) { | ||
362 | snum = | ||
363 | (u32) (val & ENET_INIT_PARAM_SNUM_MASK) >> | ||
364 | ENET_INIT_PARAM_SNUM_SHIFT; | ||
365 | qe_put_snum((u8) snum); | ||
366 | if (!((i == 0) && skip_page_for_first_entry)) { | ||
367 | /* First entry of Rx does not have page */ | ||
368 | init_enet_offset = | ||
369 | (in_be32(p_start) & | ||
370 | ENET_INIT_PARAM_PTR_MASK); | ||
371 | ugeth_info("Init enet entry %d:", i); | ||
372 | ugeth_info("Base address: 0x%08x", | ||
373 | (u32) | ||
374 | qe_muram_addr(init_enet_offset)); | ||
375 | mem_disp(qe_muram_addr(init_enet_offset), | ||
376 | thread_size); | ||
377 | } | ||
378 | p_start++; | ||
379 | } | ||
380 | } | ||
381 | |||
382 | return 0; | ||
383 | } | ||
384 | #endif | ||
385 | |||
386 | static void put_enet_addr_container(struct enet_addr_container *enet_addr_cont) | ||
387 | { | ||
388 | kfree(enet_addr_cont); | ||
389 | } | ||
390 | |||
391 | static void set_mac_addr(__be16 __iomem *reg, u8 *mac) | ||
392 | { | ||
393 | out_be16(®[0], ((u16)mac[5] << 8) | mac[4]); | ||
394 | out_be16(®[1], ((u16)mac[3] << 8) | mac[2]); | ||
395 | out_be16(®[2], ((u16)mac[1] << 8) | mac[0]); | ||
396 | } | ||
397 | |||
398 | static int hw_clear_addr_in_paddr(struct ucc_geth_private *ugeth, u8 paddr_num) | ||
399 | { | ||
400 | struct ucc_geth_82xx_address_filtering_pram __iomem *p_82xx_addr_filt; | ||
401 | |||
402 | if (!(paddr_num < NUM_OF_PADDRS)) { | ||
403 | ugeth_warn("%s: Illagel paddr_num.", __func__); | ||
404 | return -EINVAL; | ||
405 | } | ||
406 | |||
407 | p_82xx_addr_filt = | ||
408 | (struct ucc_geth_82xx_address_filtering_pram __iomem *) ugeth->p_rx_glbl_pram-> | ||
409 | addressfiltering; | ||
410 | |||
411 | /* Writing address ff.ff.ff.ff.ff.ff disables address | ||
412 | recognition for this register */ | ||
413 | out_be16(&p_82xx_addr_filt->paddr[paddr_num].h, 0xffff); | ||
414 | out_be16(&p_82xx_addr_filt->paddr[paddr_num].m, 0xffff); | ||
415 | out_be16(&p_82xx_addr_filt->paddr[paddr_num].l, 0xffff); | ||
416 | |||
417 | return 0; | ||
418 | } | ||
419 | |||
420 | static void hw_add_addr_in_hash(struct ucc_geth_private *ugeth, | ||
421 | u8 *p_enet_addr) | ||
422 | { | ||
423 | struct ucc_geth_82xx_address_filtering_pram __iomem *p_82xx_addr_filt; | ||
424 | u32 cecr_subblock; | ||
425 | |||
426 | p_82xx_addr_filt = | ||
427 | (struct ucc_geth_82xx_address_filtering_pram __iomem *) ugeth->p_rx_glbl_pram-> | ||
428 | addressfiltering; | ||
429 | |||
430 | cecr_subblock = | ||
431 | ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num); | ||
432 | |||
433 | /* Ethernet frames are defined in Little Endian mode, | ||
434 | therefore to insert */ | ||
435 | /* the address to the hash (Big Endian mode), we reverse the bytes.*/ | ||
436 | |||
437 | set_mac_addr(&p_82xx_addr_filt->taddr.h, p_enet_addr); | ||
438 | |||
439 | qe_issue_cmd(QE_SET_GROUP_ADDRESS, cecr_subblock, | ||
440 | QE_CR_PROTOCOL_ETHERNET, 0); | ||
441 | } | ||
442 | |||
443 | static inline int compare_addr(u8 **addr1, u8 **addr2) | ||
444 | { | ||
445 | return memcmp(addr1, addr2, ENET_NUM_OCTETS_PER_ADDRESS); | ||
446 | } | ||
447 | |||
448 | #ifdef DEBUG | ||
449 | static void get_statistics(struct ucc_geth_private *ugeth, | ||
450 | struct ucc_geth_tx_firmware_statistics * | ||
451 | tx_firmware_statistics, | ||
452 | struct ucc_geth_rx_firmware_statistics * | ||
453 | rx_firmware_statistics, | ||
454 | struct ucc_geth_hardware_statistics *hardware_statistics) | ||
455 | { | ||
456 | struct ucc_fast __iomem *uf_regs; | ||
457 | struct ucc_geth __iomem *ug_regs; | ||
458 | struct ucc_geth_tx_firmware_statistics_pram *p_tx_fw_statistics_pram; | ||
459 | struct ucc_geth_rx_firmware_statistics_pram *p_rx_fw_statistics_pram; | ||
460 | |||
461 | ug_regs = ugeth->ug_regs; | ||
462 | uf_regs = (struct ucc_fast __iomem *) ug_regs; | ||
463 | p_tx_fw_statistics_pram = ugeth->p_tx_fw_statistics_pram; | ||
464 | p_rx_fw_statistics_pram = ugeth->p_rx_fw_statistics_pram; | ||
465 | |||
466 | /* Tx firmware only if user handed pointer and driver actually | ||
467 | gathers Tx firmware statistics */ | ||
468 | if (tx_firmware_statistics && p_tx_fw_statistics_pram) { | ||
469 | tx_firmware_statistics->sicoltx = | ||
470 | in_be32(&p_tx_fw_statistics_pram->sicoltx); | ||
471 | tx_firmware_statistics->mulcoltx = | ||
472 | in_be32(&p_tx_fw_statistics_pram->mulcoltx); | ||
473 | tx_firmware_statistics->latecoltxfr = | ||
474 | in_be32(&p_tx_fw_statistics_pram->latecoltxfr); | ||
475 | tx_firmware_statistics->frabortduecol = | ||
476 | in_be32(&p_tx_fw_statistics_pram->frabortduecol); | ||
477 | tx_firmware_statistics->frlostinmactxer = | ||
478 | in_be32(&p_tx_fw_statistics_pram->frlostinmactxer); | ||
479 | tx_firmware_statistics->carriersenseertx = | ||
480 | in_be32(&p_tx_fw_statistics_pram->carriersenseertx); | ||
481 | tx_firmware_statistics->frtxok = | ||
482 | in_be32(&p_tx_fw_statistics_pram->frtxok); | ||
483 | tx_firmware_statistics->txfrexcessivedefer = | ||
484 | in_be32(&p_tx_fw_statistics_pram->txfrexcessivedefer); | ||
485 | tx_firmware_statistics->txpkts256 = | ||
486 | in_be32(&p_tx_fw_statistics_pram->txpkts256); | ||
487 | tx_firmware_statistics->txpkts512 = | ||
488 | in_be32(&p_tx_fw_statistics_pram->txpkts512); | ||
489 | tx_firmware_statistics->txpkts1024 = | ||
490 | in_be32(&p_tx_fw_statistics_pram->txpkts1024); | ||
491 | tx_firmware_statistics->txpktsjumbo = | ||
492 | in_be32(&p_tx_fw_statistics_pram->txpktsjumbo); | ||
493 | } | ||
494 | |||
495 | /* Rx firmware only if user handed pointer and driver actually | ||
496 | * gathers Rx firmware statistics */ | ||
497 | if (rx_firmware_statistics && p_rx_fw_statistics_pram) { | ||
498 | int i; | ||
499 | rx_firmware_statistics->frrxfcser = | ||
500 | in_be32(&p_rx_fw_statistics_pram->frrxfcser); | ||
501 | rx_firmware_statistics->fraligner = | ||
502 | in_be32(&p_rx_fw_statistics_pram->fraligner); | ||
503 | rx_firmware_statistics->inrangelenrxer = | ||
504 | in_be32(&p_rx_fw_statistics_pram->inrangelenrxer); | ||
505 | rx_firmware_statistics->outrangelenrxer = | ||
506 | in_be32(&p_rx_fw_statistics_pram->outrangelenrxer); | ||
507 | rx_firmware_statistics->frtoolong = | ||
508 | in_be32(&p_rx_fw_statistics_pram->frtoolong); | ||
509 | rx_firmware_statistics->runt = | ||
510 | in_be32(&p_rx_fw_statistics_pram->runt); | ||
511 | rx_firmware_statistics->verylongevent = | ||
512 | in_be32(&p_rx_fw_statistics_pram->verylongevent); | ||
513 | rx_firmware_statistics->symbolerror = | ||
514 | in_be32(&p_rx_fw_statistics_pram->symbolerror); | ||
515 | rx_firmware_statistics->dropbsy = | ||
516 | in_be32(&p_rx_fw_statistics_pram->dropbsy); | ||
517 | for (i = 0; i < 0x8; i++) | ||
518 | rx_firmware_statistics->res0[i] = | ||
519 | p_rx_fw_statistics_pram->res0[i]; | ||
520 | rx_firmware_statistics->mismatchdrop = | ||
521 | in_be32(&p_rx_fw_statistics_pram->mismatchdrop); | ||
522 | rx_firmware_statistics->underpkts = | ||
523 | in_be32(&p_rx_fw_statistics_pram->underpkts); | ||
524 | rx_firmware_statistics->pkts256 = | ||
525 | in_be32(&p_rx_fw_statistics_pram->pkts256); | ||
526 | rx_firmware_statistics->pkts512 = | ||
527 | in_be32(&p_rx_fw_statistics_pram->pkts512); | ||
528 | rx_firmware_statistics->pkts1024 = | ||
529 | in_be32(&p_rx_fw_statistics_pram->pkts1024); | ||
530 | rx_firmware_statistics->pktsjumbo = | ||
531 | in_be32(&p_rx_fw_statistics_pram->pktsjumbo); | ||
532 | rx_firmware_statistics->frlossinmacer = | ||
533 | in_be32(&p_rx_fw_statistics_pram->frlossinmacer); | ||
534 | rx_firmware_statistics->pausefr = | ||
535 | in_be32(&p_rx_fw_statistics_pram->pausefr); | ||
536 | for (i = 0; i < 0x4; i++) | ||
537 | rx_firmware_statistics->res1[i] = | ||
538 | p_rx_fw_statistics_pram->res1[i]; | ||
539 | rx_firmware_statistics->removevlan = | ||
540 | in_be32(&p_rx_fw_statistics_pram->removevlan); | ||
541 | rx_firmware_statistics->replacevlan = | ||
542 | in_be32(&p_rx_fw_statistics_pram->replacevlan); | ||
543 | rx_firmware_statistics->insertvlan = | ||
544 | in_be32(&p_rx_fw_statistics_pram->insertvlan); | ||
545 | } | ||
546 | |||
547 | /* Hardware only if user handed pointer and driver actually | ||
548 | gathers hardware statistics */ | ||
549 | if (hardware_statistics && | ||
550 | (in_be32(&uf_regs->upsmr) & UCC_GETH_UPSMR_HSE)) { | ||
551 | hardware_statistics->tx64 = in_be32(&ug_regs->tx64); | ||
552 | hardware_statistics->tx127 = in_be32(&ug_regs->tx127); | ||
553 | hardware_statistics->tx255 = in_be32(&ug_regs->tx255); | ||
554 | hardware_statistics->rx64 = in_be32(&ug_regs->rx64); | ||
555 | hardware_statistics->rx127 = in_be32(&ug_regs->rx127); | ||
556 | hardware_statistics->rx255 = in_be32(&ug_regs->rx255); | ||
557 | hardware_statistics->txok = in_be32(&ug_regs->txok); | ||
558 | hardware_statistics->txcf = in_be16(&ug_regs->txcf); | ||
559 | hardware_statistics->tmca = in_be32(&ug_regs->tmca); | ||
560 | hardware_statistics->tbca = in_be32(&ug_regs->tbca); | ||
561 | hardware_statistics->rxfok = in_be32(&ug_regs->rxfok); | ||
562 | hardware_statistics->rxbok = in_be32(&ug_regs->rxbok); | ||
563 | hardware_statistics->rbyt = in_be32(&ug_regs->rbyt); | ||
564 | hardware_statistics->rmca = in_be32(&ug_regs->rmca); | ||
565 | hardware_statistics->rbca = in_be32(&ug_regs->rbca); | ||
566 | } | ||
567 | } | ||
568 | |||
569 | static void dump_bds(struct ucc_geth_private *ugeth) | ||
570 | { | ||
571 | int i; | ||
572 | int length; | ||
573 | |||
574 | for (i = 0; i < ugeth->ug_info->numQueuesTx; i++) { | ||
575 | if (ugeth->p_tx_bd_ring[i]) { | ||
576 | length = | ||
577 | (ugeth->ug_info->bdRingLenTx[i] * | ||
578 | sizeof(struct qe_bd)); | ||
579 | ugeth_info("TX BDs[%d]", i); | ||
580 | mem_disp(ugeth->p_tx_bd_ring[i], length); | ||
581 | } | ||
582 | } | ||
583 | for (i = 0; i < ugeth->ug_info->numQueuesRx; i++) { | ||
584 | if (ugeth->p_rx_bd_ring[i]) { | ||
585 | length = | ||
586 | (ugeth->ug_info->bdRingLenRx[i] * | ||
587 | sizeof(struct qe_bd)); | ||
588 | ugeth_info("RX BDs[%d]", i); | ||
589 | mem_disp(ugeth->p_rx_bd_ring[i], length); | ||
590 | } | ||
591 | } | ||
592 | } | ||
593 | |||
594 | static void dump_regs(struct ucc_geth_private *ugeth) | ||
595 | { | ||
596 | int i; | ||
597 | |||
598 | ugeth_info("UCC%d Geth registers:", ugeth->ug_info->uf_info.ucc_num + 1); | ||
599 | ugeth_info("Base address: 0x%08x", (u32) ugeth->ug_regs); | ||
600 | |||
601 | ugeth_info("maccfg1 : addr - 0x%08x, val - 0x%08x", | ||
602 | (u32) & ugeth->ug_regs->maccfg1, | ||
603 | in_be32(&ugeth->ug_regs->maccfg1)); | ||
604 | ugeth_info("maccfg2 : addr - 0x%08x, val - 0x%08x", | ||
605 | (u32) & ugeth->ug_regs->maccfg2, | ||
606 | in_be32(&ugeth->ug_regs->maccfg2)); | ||
607 | ugeth_info("ipgifg : addr - 0x%08x, val - 0x%08x", | ||
608 | (u32) & ugeth->ug_regs->ipgifg, | ||
609 | in_be32(&ugeth->ug_regs->ipgifg)); | ||
610 | ugeth_info("hafdup : addr - 0x%08x, val - 0x%08x", | ||
611 | (u32) & ugeth->ug_regs->hafdup, | ||
612 | in_be32(&ugeth->ug_regs->hafdup)); | ||
613 | ugeth_info("ifctl : addr - 0x%08x, val - 0x%08x", | ||
614 | (u32) & ugeth->ug_regs->ifctl, | ||
615 | in_be32(&ugeth->ug_regs->ifctl)); | ||
616 | ugeth_info("ifstat : addr - 0x%08x, val - 0x%08x", | ||
617 | (u32) & ugeth->ug_regs->ifstat, | ||
618 | in_be32(&ugeth->ug_regs->ifstat)); | ||
619 | ugeth_info("macstnaddr1: addr - 0x%08x, val - 0x%08x", | ||
620 | (u32) & ugeth->ug_regs->macstnaddr1, | ||
621 | in_be32(&ugeth->ug_regs->macstnaddr1)); | ||
622 | ugeth_info("macstnaddr2: addr - 0x%08x, val - 0x%08x", | ||
623 | (u32) & ugeth->ug_regs->macstnaddr2, | ||
624 | in_be32(&ugeth->ug_regs->macstnaddr2)); | ||
625 | ugeth_info("uempr : addr - 0x%08x, val - 0x%08x", | ||
626 | (u32) & ugeth->ug_regs->uempr, | ||
627 | in_be32(&ugeth->ug_regs->uempr)); | ||
628 | ugeth_info("utbipar : addr - 0x%08x, val - 0x%08x", | ||
629 | (u32) & ugeth->ug_regs->utbipar, | ||
630 | in_be32(&ugeth->ug_regs->utbipar)); | ||
631 | ugeth_info("uescr : addr - 0x%08x, val - 0x%04x", | ||
632 | (u32) & ugeth->ug_regs->uescr, | ||
633 | in_be16(&ugeth->ug_regs->uescr)); | ||
634 | ugeth_info("tx64 : addr - 0x%08x, val - 0x%08x", | ||
635 | (u32) & ugeth->ug_regs->tx64, | ||
636 | in_be32(&ugeth->ug_regs->tx64)); | ||
637 | ugeth_info("tx127 : addr - 0x%08x, val - 0x%08x", | ||
638 | (u32) & ugeth->ug_regs->tx127, | ||
639 | in_be32(&ugeth->ug_regs->tx127)); | ||
640 | ugeth_info("tx255 : addr - 0x%08x, val - 0x%08x", | ||
641 | (u32) & ugeth->ug_regs->tx255, | ||
642 | in_be32(&ugeth->ug_regs->tx255)); | ||
643 | ugeth_info("rx64 : addr - 0x%08x, val - 0x%08x", | ||
644 | (u32) & ugeth->ug_regs->rx64, | ||
645 | in_be32(&ugeth->ug_regs->rx64)); | ||
646 | ugeth_info("rx127 : addr - 0x%08x, val - 0x%08x", | ||
647 | (u32) & ugeth->ug_regs->rx127, | ||
648 | in_be32(&ugeth->ug_regs->rx127)); | ||
649 | ugeth_info("rx255 : addr - 0x%08x, val - 0x%08x", | ||
650 | (u32) & ugeth->ug_regs->rx255, | ||
651 | in_be32(&ugeth->ug_regs->rx255)); | ||
652 | ugeth_info("txok : addr - 0x%08x, val - 0x%08x", | ||
653 | (u32) & ugeth->ug_regs->txok, | ||
654 | in_be32(&ugeth->ug_regs->txok)); | ||
655 | ugeth_info("txcf : addr - 0x%08x, val - 0x%04x", | ||
656 | (u32) & ugeth->ug_regs->txcf, | ||
657 | in_be16(&ugeth->ug_regs->txcf)); | ||
658 | ugeth_info("tmca : addr - 0x%08x, val - 0x%08x", | ||
659 | (u32) & ugeth->ug_regs->tmca, | ||
660 | in_be32(&ugeth->ug_regs->tmca)); | ||
661 | ugeth_info("tbca : addr - 0x%08x, val - 0x%08x", | ||
662 | (u32) & ugeth->ug_regs->tbca, | ||
663 | in_be32(&ugeth->ug_regs->tbca)); | ||
664 | ugeth_info("rxfok : addr - 0x%08x, val - 0x%08x", | ||
665 | (u32) & ugeth->ug_regs->rxfok, | ||
666 | in_be32(&ugeth->ug_regs->rxfok)); | ||
667 | ugeth_info("rxbok : addr - 0x%08x, val - 0x%08x", | ||
668 | (u32) & ugeth->ug_regs->rxbok, | ||
669 | in_be32(&ugeth->ug_regs->rxbok)); | ||
670 | ugeth_info("rbyt : addr - 0x%08x, val - 0x%08x", | ||
671 | (u32) & ugeth->ug_regs->rbyt, | ||
672 | in_be32(&ugeth->ug_regs->rbyt)); | ||
673 | ugeth_info("rmca : addr - 0x%08x, val - 0x%08x", | ||
674 | (u32) & ugeth->ug_regs->rmca, | ||
675 | in_be32(&ugeth->ug_regs->rmca)); | ||
676 | ugeth_info("rbca : addr - 0x%08x, val - 0x%08x", | ||
677 | (u32) & ugeth->ug_regs->rbca, | ||
678 | in_be32(&ugeth->ug_regs->rbca)); | ||
679 | ugeth_info("scar : addr - 0x%08x, val - 0x%08x", | ||
680 | (u32) & ugeth->ug_regs->scar, | ||
681 | in_be32(&ugeth->ug_regs->scar)); | ||
682 | ugeth_info("scam : addr - 0x%08x, val - 0x%08x", | ||
683 | (u32) & ugeth->ug_regs->scam, | ||
684 | in_be32(&ugeth->ug_regs->scam)); | ||
685 | |||
686 | if (ugeth->p_thread_data_tx) { | ||
687 | int numThreadsTxNumerical; | ||
688 | switch (ugeth->ug_info->numThreadsTx) { | ||
689 | case UCC_GETH_NUM_OF_THREADS_1: | ||
690 | numThreadsTxNumerical = 1; | ||
691 | break; | ||
692 | case UCC_GETH_NUM_OF_THREADS_2: | ||
693 | numThreadsTxNumerical = 2; | ||
694 | break; | ||
695 | case UCC_GETH_NUM_OF_THREADS_4: | ||
696 | numThreadsTxNumerical = 4; | ||
697 | break; | ||
698 | case UCC_GETH_NUM_OF_THREADS_6: | ||
699 | numThreadsTxNumerical = 6; | ||
700 | break; | ||
701 | case UCC_GETH_NUM_OF_THREADS_8: | ||
702 | numThreadsTxNumerical = 8; | ||
703 | break; | ||
704 | default: | ||
705 | numThreadsTxNumerical = 0; | ||
706 | break; | ||
707 | } | ||
708 | |||
709 | ugeth_info("Thread data TXs:"); | ||
710 | ugeth_info("Base address: 0x%08x", | ||
711 | (u32) ugeth->p_thread_data_tx); | ||
712 | for (i = 0; i < numThreadsTxNumerical; i++) { | ||
713 | ugeth_info("Thread data TX[%d]:", i); | ||
714 | ugeth_info("Base address: 0x%08x", | ||
715 | (u32) & ugeth->p_thread_data_tx[i]); | ||
716 | mem_disp((u8 *) & ugeth->p_thread_data_tx[i], | ||
717 | sizeof(struct ucc_geth_thread_data_tx)); | ||
718 | } | ||
719 | } | ||
720 | if (ugeth->p_thread_data_rx) { | ||
721 | int numThreadsRxNumerical; | ||
722 | switch (ugeth->ug_info->numThreadsRx) { | ||
723 | case UCC_GETH_NUM_OF_THREADS_1: | ||
724 | numThreadsRxNumerical = 1; | ||
725 | break; | ||
726 | case UCC_GETH_NUM_OF_THREADS_2: | ||
727 | numThreadsRxNumerical = 2; | ||
728 | break; | ||
729 | case UCC_GETH_NUM_OF_THREADS_4: | ||
730 | numThreadsRxNumerical = 4; | ||
731 | break; | ||
732 | case UCC_GETH_NUM_OF_THREADS_6: | ||
733 | numThreadsRxNumerical = 6; | ||
734 | break; | ||
735 | case UCC_GETH_NUM_OF_THREADS_8: | ||
736 | numThreadsRxNumerical = 8; | ||
737 | break; | ||
738 | default: | ||
739 | numThreadsRxNumerical = 0; | ||
740 | break; | ||
741 | } | ||
742 | |||
743 | ugeth_info("Thread data RX:"); | ||
744 | ugeth_info("Base address: 0x%08x", | ||
745 | (u32) ugeth->p_thread_data_rx); | ||
746 | for (i = 0; i < numThreadsRxNumerical; i++) { | ||
747 | ugeth_info("Thread data RX[%d]:", i); | ||
748 | ugeth_info("Base address: 0x%08x", | ||
749 | (u32) & ugeth->p_thread_data_rx[i]); | ||
750 | mem_disp((u8 *) & ugeth->p_thread_data_rx[i], | ||
751 | sizeof(struct ucc_geth_thread_data_rx)); | ||
752 | } | ||
753 | } | ||
754 | if (ugeth->p_exf_glbl_param) { | ||
755 | ugeth_info("EXF global param:"); | ||
756 | ugeth_info("Base address: 0x%08x", | ||
757 | (u32) ugeth->p_exf_glbl_param); | ||
758 | mem_disp((u8 *) ugeth->p_exf_glbl_param, | ||
759 | sizeof(*ugeth->p_exf_glbl_param)); | ||
760 | } | ||
761 | if (ugeth->p_tx_glbl_pram) { | ||
762 | ugeth_info("TX global param:"); | ||
763 | ugeth_info("Base address: 0x%08x", (u32) ugeth->p_tx_glbl_pram); | ||
764 | ugeth_info("temoder : addr - 0x%08x, val - 0x%04x", | ||
765 | (u32) & ugeth->p_tx_glbl_pram->temoder, | ||
766 | in_be16(&ugeth->p_tx_glbl_pram->temoder)); | ||
767 | ugeth_info("sqptr : addr - 0x%08x, val - 0x%08x", | ||
768 | (u32) & ugeth->p_tx_glbl_pram->sqptr, | ||
769 | in_be32(&ugeth->p_tx_glbl_pram->sqptr)); | ||
770 | ugeth_info("schedulerbasepointer: addr - 0x%08x, val - 0x%08x", | ||
771 | (u32) & ugeth->p_tx_glbl_pram->schedulerbasepointer, | ||
772 | in_be32(&ugeth->p_tx_glbl_pram-> | ||
773 | schedulerbasepointer)); | ||
774 | ugeth_info("txrmonbaseptr: addr - 0x%08x, val - 0x%08x", | ||
775 | (u32) & ugeth->p_tx_glbl_pram->txrmonbaseptr, | ||
776 | in_be32(&ugeth->p_tx_glbl_pram->txrmonbaseptr)); | ||
777 | ugeth_info("tstate : addr - 0x%08x, val - 0x%08x", | ||
778 | (u32) & ugeth->p_tx_glbl_pram->tstate, | ||
779 | in_be32(&ugeth->p_tx_glbl_pram->tstate)); | ||
780 | ugeth_info("iphoffset[0] : addr - 0x%08x, val - 0x%02x", | ||
781 | (u32) & ugeth->p_tx_glbl_pram->iphoffset[0], | ||
782 | ugeth->p_tx_glbl_pram->iphoffset[0]); | ||
783 | ugeth_info("iphoffset[1] : addr - 0x%08x, val - 0x%02x", | ||
784 | (u32) & ugeth->p_tx_glbl_pram->iphoffset[1], | ||
785 | ugeth->p_tx_glbl_pram->iphoffset[1]); | ||
786 | ugeth_info("iphoffset[2] : addr - 0x%08x, val - 0x%02x", | ||
787 | (u32) & ugeth->p_tx_glbl_pram->iphoffset[2], | ||
788 | ugeth->p_tx_glbl_pram->iphoffset[2]); | ||
789 | ugeth_info("iphoffset[3] : addr - 0x%08x, val - 0x%02x", | ||
790 | (u32) & ugeth->p_tx_glbl_pram->iphoffset[3], | ||
791 | ugeth->p_tx_glbl_pram->iphoffset[3]); | ||
792 | ugeth_info("iphoffset[4] : addr - 0x%08x, val - 0x%02x", | ||
793 | (u32) & ugeth->p_tx_glbl_pram->iphoffset[4], | ||
794 | ugeth->p_tx_glbl_pram->iphoffset[4]); | ||
795 | ugeth_info("iphoffset[5] : addr - 0x%08x, val - 0x%02x", | ||
796 | (u32) & ugeth->p_tx_glbl_pram->iphoffset[5], | ||
797 | ugeth->p_tx_glbl_pram->iphoffset[5]); | ||
798 | ugeth_info("iphoffset[6] : addr - 0x%08x, val - 0x%02x", | ||
799 | (u32) & ugeth->p_tx_glbl_pram->iphoffset[6], | ||
800 | ugeth->p_tx_glbl_pram->iphoffset[6]); | ||
801 | ugeth_info("iphoffset[7] : addr - 0x%08x, val - 0x%02x", | ||
802 | (u32) & ugeth->p_tx_glbl_pram->iphoffset[7], | ||
803 | ugeth->p_tx_glbl_pram->iphoffset[7]); | ||
804 | ugeth_info("vtagtable[0] : addr - 0x%08x, val - 0x%08x", | ||
805 | (u32) & ugeth->p_tx_glbl_pram->vtagtable[0], | ||
806 | in_be32(&ugeth->p_tx_glbl_pram->vtagtable[0])); | ||
807 | ugeth_info("vtagtable[1] : addr - 0x%08x, val - 0x%08x", | ||
808 | (u32) & ugeth->p_tx_glbl_pram->vtagtable[1], | ||
809 | in_be32(&ugeth->p_tx_glbl_pram->vtagtable[1])); | ||
810 | ugeth_info("vtagtable[2] : addr - 0x%08x, val - 0x%08x", | ||
811 | (u32) & ugeth->p_tx_glbl_pram->vtagtable[2], | ||
812 | in_be32(&ugeth->p_tx_glbl_pram->vtagtable[2])); | ||
813 | ugeth_info("vtagtable[3] : addr - 0x%08x, val - 0x%08x", | ||
814 | (u32) & ugeth->p_tx_glbl_pram->vtagtable[3], | ||
815 | in_be32(&ugeth->p_tx_glbl_pram->vtagtable[3])); | ||
816 | ugeth_info("vtagtable[4] : addr - 0x%08x, val - 0x%08x", | ||
817 | (u32) & ugeth->p_tx_glbl_pram->vtagtable[4], | ||
818 | in_be32(&ugeth->p_tx_glbl_pram->vtagtable[4])); | ||
819 | ugeth_info("vtagtable[5] : addr - 0x%08x, val - 0x%08x", | ||
820 | (u32) & ugeth->p_tx_glbl_pram->vtagtable[5], | ||
821 | in_be32(&ugeth->p_tx_glbl_pram->vtagtable[5])); | ||
822 | ugeth_info("vtagtable[6] : addr - 0x%08x, val - 0x%08x", | ||
823 | (u32) & ugeth->p_tx_glbl_pram->vtagtable[6], | ||
824 | in_be32(&ugeth->p_tx_glbl_pram->vtagtable[6])); | ||
825 | ugeth_info("vtagtable[7] : addr - 0x%08x, val - 0x%08x", | ||
826 | (u32) & ugeth->p_tx_glbl_pram->vtagtable[7], | ||
827 | in_be32(&ugeth->p_tx_glbl_pram->vtagtable[7])); | ||
828 | ugeth_info("tqptr : addr - 0x%08x, val - 0x%08x", | ||
829 | (u32) & ugeth->p_tx_glbl_pram->tqptr, | ||
830 | in_be32(&ugeth->p_tx_glbl_pram->tqptr)); | ||
831 | } | ||
832 | if (ugeth->p_rx_glbl_pram) { | ||
833 | ugeth_info("RX global param:"); | ||
834 | ugeth_info("Base address: 0x%08x", (u32) ugeth->p_rx_glbl_pram); | ||
835 | ugeth_info("remoder : addr - 0x%08x, val - 0x%08x", | ||
836 | (u32) & ugeth->p_rx_glbl_pram->remoder, | ||
837 | in_be32(&ugeth->p_rx_glbl_pram->remoder)); | ||
838 | ugeth_info("rqptr : addr - 0x%08x, val - 0x%08x", | ||
839 | (u32) & ugeth->p_rx_glbl_pram->rqptr, | ||
840 | in_be32(&ugeth->p_rx_glbl_pram->rqptr)); | ||
841 | ugeth_info("typeorlen : addr - 0x%08x, val - 0x%04x", | ||
842 | (u32) & ugeth->p_rx_glbl_pram->typeorlen, | ||
843 | in_be16(&ugeth->p_rx_glbl_pram->typeorlen)); | ||
844 | ugeth_info("rxgstpack : addr - 0x%08x, val - 0x%02x", | ||
845 | (u32) & ugeth->p_rx_glbl_pram->rxgstpack, | ||
846 | ugeth->p_rx_glbl_pram->rxgstpack); | ||
847 | ugeth_info("rxrmonbaseptr : addr - 0x%08x, val - 0x%08x", | ||
848 | (u32) & ugeth->p_rx_glbl_pram->rxrmonbaseptr, | ||
849 | in_be32(&ugeth->p_rx_glbl_pram->rxrmonbaseptr)); | ||
850 | ugeth_info("intcoalescingptr: addr - 0x%08x, val - 0x%08x", | ||
851 | (u32) & ugeth->p_rx_glbl_pram->intcoalescingptr, | ||
852 | in_be32(&ugeth->p_rx_glbl_pram->intcoalescingptr)); | ||
853 | ugeth_info("rstate : addr - 0x%08x, val - 0x%02x", | ||
854 | (u32) & ugeth->p_rx_glbl_pram->rstate, | ||
855 | ugeth->p_rx_glbl_pram->rstate); | ||
856 | ugeth_info("mrblr : addr - 0x%08x, val - 0x%04x", | ||
857 | (u32) & ugeth->p_rx_glbl_pram->mrblr, | ||
858 | in_be16(&ugeth->p_rx_glbl_pram->mrblr)); | ||
859 | ugeth_info("rbdqptr : addr - 0x%08x, val - 0x%08x", | ||
860 | (u32) & ugeth->p_rx_glbl_pram->rbdqptr, | ||
861 | in_be32(&ugeth->p_rx_glbl_pram->rbdqptr)); | ||
862 | ugeth_info("mflr : addr - 0x%08x, val - 0x%04x", | ||
863 | (u32) & ugeth->p_rx_glbl_pram->mflr, | ||
864 | in_be16(&ugeth->p_rx_glbl_pram->mflr)); | ||
865 | ugeth_info("minflr : addr - 0x%08x, val - 0x%04x", | ||
866 | (u32) & ugeth->p_rx_glbl_pram->minflr, | ||
867 | in_be16(&ugeth->p_rx_glbl_pram->minflr)); | ||
868 | ugeth_info("maxd1 : addr - 0x%08x, val - 0x%04x", | ||
869 | (u32) & ugeth->p_rx_glbl_pram->maxd1, | ||
870 | in_be16(&ugeth->p_rx_glbl_pram->maxd1)); | ||
871 | ugeth_info("maxd2 : addr - 0x%08x, val - 0x%04x", | ||
872 | (u32) & ugeth->p_rx_glbl_pram->maxd2, | ||
873 | in_be16(&ugeth->p_rx_glbl_pram->maxd2)); | ||
874 | ugeth_info("ecamptr : addr - 0x%08x, val - 0x%08x", | ||
875 | (u32) & ugeth->p_rx_glbl_pram->ecamptr, | ||
876 | in_be32(&ugeth->p_rx_glbl_pram->ecamptr)); | ||
877 | ugeth_info("l2qt : addr - 0x%08x, val - 0x%08x", | ||
878 | (u32) & ugeth->p_rx_glbl_pram->l2qt, | ||
879 | in_be32(&ugeth->p_rx_glbl_pram->l2qt)); | ||
880 | ugeth_info("l3qt[0] : addr - 0x%08x, val - 0x%08x", | ||
881 | (u32) & ugeth->p_rx_glbl_pram->l3qt[0], | ||
882 | in_be32(&ugeth->p_rx_glbl_pram->l3qt[0])); | ||
883 | ugeth_info("l3qt[1] : addr - 0x%08x, val - 0x%08x", | ||
884 | (u32) & ugeth->p_rx_glbl_pram->l3qt[1], | ||
885 | in_be32(&ugeth->p_rx_glbl_pram->l3qt[1])); | ||
886 | ugeth_info("l3qt[2] : addr - 0x%08x, val - 0x%08x", | ||
887 | (u32) & ugeth->p_rx_glbl_pram->l3qt[2], | ||
888 | in_be32(&ugeth->p_rx_glbl_pram->l3qt[2])); | ||
889 | ugeth_info("l3qt[3] : addr - 0x%08x, val - 0x%08x", | ||
890 | (u32) & ugeth->p_rx_glbl_pram->l3qt[3], | ||
891 | in_be32(&ugeth->p_rx_glbl_pram->l3qt[3])); | ||
892 | ugeth_info("l3qt[4] : addr - 0x%08x, val - 0x%08x", | ||
893 | (u32) & ugeth->p_rx_glbl_pram->l3qt[4], | ||
894 | in_be32(&ugeth->p_rx_glbl_pram->l3qt[4])); | ||
895 | ugeth_info("l3qt[5] : addr - 0x%08x, val - 0x%08x", | ||
896 | (u32) & ugeth->p_rx_glbl_pram->l3qt[5], | ||
897 | in_be32(&ugeth->p_rx_glbl_pram->l3qt[5])); | ||
898 | ugeth_info("l3qt[6] : addr - 0x%08x, val - 0x%08x", | ||
899 | (u32) & ugeth->p_rx_glbl_pram->l3qt[6], | ||
900 | in_be32(&ugeth->p_rx_glbl_pram->l3qt[6])); | ||
901 | ugeth_info("l3qt[7] : addr - 0x%08x, val - 0x%08x", | ||
902 | (u32) & ugeth->p_rx_glbl_pram->l3qt[7], | ||
903 | in_be32(&ugeth->p_rx_glbl_pram->l3qt[7])); | ||
904 | ugeth_info("vlantype : addr - 0x%08x, val - 0x%04x", | ||
905 | (u32) & ugeth->p_rx_glbl_pram->vlantype, | ||
906 | in_be16(&ugeth->p_rx_glbl_pram->vlantype)); | ||
907 | ugeth_info("vlantci : addr - 0x%08x, val - 0x%04x", | ||
908 | (u32) & ugeth->p_rx_glbl_pram->vlantci, | ||
909 | in_be16(&ugeth->p_rx_glbl_pram->vlantci)); | ||
910 | for (i = 0; i < 64; i++) | ||
911 | ugeth_info | ||
912 | ("addressfiltering[%d]: addr - 0x%08x, val - 0x%02x", | ||
913 | i, | ||
914 | (u32) & ugeth->p_rx_glbl_pram->addressfiltering[i], | ||
915 | ugeth->p_rx_glbl_pram->addressfiltering[i]); | ||
916 | ugeth_info("exfGlobalParam : addr - 0x%08x, val - 0x%08x", | ||
917 | (u32) & ugeth->p_rx_glbl_pram->exfGlobalParam, | ||
918 | in_be32(&ugeth->p_rx_glbl_pram->exfGlobalParam)); | ||
919 | } | ||
920 | if (ugeth->p_send_q_mem_reg) { | ||
921 | ugeth_info("Send Q memory registers:"); | ||
922 | ugeth_info("Base address: 0x%08x", | ||
923 | (u32) ugeth->p_send_q_mem_reg); | ||
924 | for (i = 0; i < ugeth->ug_info->numQueuesTx; i++) { | ||
925 | ugeth_info("SQQD[%d]:", i); | ||
926 | ugeth_info("Base address: 0x%08x", | ||
927 | (u32) & ugeth->p_send_q_mem_reg->sqqd[i]); | ||
928 | mem_disp((u8 *) & ugeth->p_send_q_mem_reg->sqqd[i], | ||
929 | sizeof(struct ucc_geth_send_queue_qd)); | ||
930 | } | ||
931 | } | ||
932 | if (ugeth->p_scheduler) { | ||
933 | ugeth_info("Scheduler:"); | ||
934 | ugeth_info("Base address: 0x%08x", (u32) ugeth->p_scheduler); | ||
935 | mem_disp((u8 *) ugeth->p_scheduler, | ||
936 | sizeof(*ugeth->p_scheduler)); | ||
937 | } | ||
938 | if (ugeth->p_tx_fw_statistics_pram) { | ||
939 | ugeth_info("TX FW statistics pram:"); | ||
940 | ugeth_info("Base address: 0x%08x", | ||
941 | (u32) ugeth->p_tx_fw_statistics_pram); | ||
942 | mem_disp((u8 *) ugeth->p_tx_fw_statistics_pram, | ||
943 | sizeof(*ugeth->p_tx_fw_statistics_pram)); | ||
944 | } | ||
945 | if (ugeth->p_rx_fw_statistics_pram) { | ||
946 | ugeth_info("RX FW statistics pram:"); | ||
947 | ugeth_info("Base address: 0x%08x", | ||
948 | (u32) ugeth->p_rx_fw_statistics_pram); | ||
949 | mem_disp((u8 *) ugeth->p_rx_fw_statistics_pram, | ||
950 | sizeof(*ugeth->p_rx_fw_statistics_pram)); | ||
951 | } | ||
952 | if (ugeth->p_rx_irq_coalescing_tbl) { | ||
953 | ugeth_info("RX IRQ coalescing tables:"); | ||
954 | ugeth_info("Base address: 0x%08x", | ||
955 | (u32) ugeth->p_rx_irq_coalescing_tbl); | ||
956 | for (i = 0; i < ugeth->ug_info->numQueuesRx; i++) { | ||
957 | ugeth_info("RX IRQ coalescing table entry[%d]:", i); | ||
958 | ugeth_info("Base address: 0x%08x", | ||
959 | (u32) & ugeth->p_rx_irq_coalescing_tbl-> | ||
960 | coalescingentry[i]); | ||
961 | ugeth_info | ||
962 | ("interruptcoalescingmaxvalue: addr - 0x%08x, val - 0x%08x", | ||
963 | (u32) & ugeth->p_rx_irq_coalescing_tbl-> | ||
964 | coalescingentry[i].interruptcoalescingmaxvalue, | ||
965 | in_be32(&ugeth->p_rx_irq_coalescing_tbl-> | ||
966 | coalescingentry[i]. | ||
967 | interruptcoalescingmaxvalue)); | ||
968 | ugeth_info | ||
969 | ("interruptcoalescingcounter : addr - 0x%08x, val - 0x%08x", | ||
970 | (u32) & ugeth->p_rx_irq_coalescing_tbl-> | ||
971 | coalescingentry[i].interruptcoalescingcounter, | ||
972 | in_be32(&ugeth->p_rx_irq_coalescing_tbl-> | ||
973 | coalescingentry[i]. | ||
974 | interruptcoalescingcounter)); | ||
975 | } | ||
976 | } | ||
977 | if (ugeth->p_rx_bd_qs_tbl) { | ||
978 | ugeth_info("RX BD QS tables:"); | ||
979 | ugeth_info("Base address: 0x%08x", (u32) ugeth->p_rx_bd_qs_tbl); | ||
980 | for (i = 0; i < ugeth->ug_info->numQueuesRx; i++) { | ||
981 | ugeth_info("RX BD QS table[%d]:", i); | ||
982 | ugeth_info("Base address: 0x%08x", | ||
983 | (u32) & ugeth->p_rx_bd_qs_tbl[i]); | ||
984 | ugeth_info | ||
985 | ("bdbaseptr : addr - 0x%08x, val - 0x%08x", | ||
986 | (u32) & ugeth->p_rx_bd_qs_tbl[i].bdbaseptr, | ||
987 | in_be32(&ugeth->p_rx_bd_qs_tbl[i].bdbaseptr)); | ||
988 | ugeth_info | ||
989 | ("bdptr : addr - 0x%08x, val - 0x%08x", | ||
990 | (u32) & ugeth->p_rx_bd_qs_tbl[i].bdptr, | ||
991 | in_be32(&ugeth->p_rx_bd_qs_tbl[i].bdptr)); | ||
992 | ugeth_info | ||
993 | ("externalbdbaseptr: addr - 0x%08x, val - 0x%08x", | ||
994 | (u32) & ugeth->p_rx_bd_qs_tbl[i].externalbdbaseptr, | ||
995 | in_be32(&ugeth->p_rx_bd_qs_tbl[i]. | ||
996 | externalbdbaseptr)); | ||
997 | ugeth_info | ||
998 | ("externalbdptr : addr - 0x%08x, val - 0x%08x", | ||
999 | (u32) & ugeth->p_rx_bd_qs_tbl[i].externalbdptr, | ||
1000 | in_be32(&ugeth->p_rx_bd_qs_tbl[i].externalbdptr)); | ||
1001 | ugeth_info("ucode RX Prefetched BDs:"); | ||
1002 | ugeth_info("Base address: 0x%08x", | ||
1003 | (u32) | ||
1004 | qe_muram_addr(in_be32 | ||
1005 | (&ugeth->p_rx_bd_qs_tbl[i]. | ||
1006 | bdbaseptr))); | ||
1007 | mem_disp((u8 *) | ||
1008 | qe_muram_addr(in_be32 | ||
1009 | (&ugeth->p_rx_bd_qs_tbl[i]. | ||
1010 | bdbaseptr)), | ||
1011 | sizeof(struct ucc_geth_rx_prefetched_bds)); | ||
1012 | } | ||
1013 | } | ||
1014 | if (ugeth->p_init_enet_param_shadow) { | ||
1015 | int size; | ||
1016 | ugeth_info("Init enet param shadow:"); | ||
1017 | ugeth_info("Base address: 0x%08x", | ||
1018 | (u32) ugeth->p_init_enet_param_shadow); | ||
1019 | mem_disp((u8 *) ugeth->p_init_enet_param_shadow, | ||
1020 | sizeof(*ugeth->p_init_enet_param_shadow)); | ||
1021 | |||
1022 | size = sizeof(struct ucc_geth_thread_rx_pram); | ||
1023 | if (ugeth->ug_info->rxExtendedFiltering) { | ||
1024 | size += | ||
1025 | THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING; | ||
1026 | if (ugeth->ug_info->largestexternallookupkeysize == | ||
1027 | QE_FLTR_TABLE_LOOKUP_KEY_SIZE_8_BYTES) | ||
1028 | size += | ||
1029 | THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_8; | ||
1030 | if (ugeth->ug_info->largestexternallookupkeysize == | ||
1031 | QE_FLTR_TABLE_LOOKUP_KEY_SIZE_16_BYTES) | ||
1032 | size += | ||
1033 | THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_16; | ||
1034 | } | ||
1035 | |||
1036 | dump_init_enet_entries(ugeth, | ||
1037 | &(ugeth->p_init_enet_param_shadow-> | ||
1038 | txthread[0]), | ||
1039 | ENET_INIT_PARAM_MAX_ENTRIES_TX, | ||
1040 | sizeof(struct ucc_geth_thread_tx_pram), | ||
1041 | ugeth->ug_info->riscTx, 0); | ||
1042 | dump_init_enet_entries(ugeth, | ||
1043 | &(ugeth->p_init_enet_param_shadow-> | ||
1044 | rxthread[0]), | ||
1045 | ENET_INIT_PARAM_MAX_ENTRIES_RX, size, | ||
1046 | ugeth->ug_info->riscRx, 1); | ||
1047 | } | ||
1048 | } | ||
1049 | #endif /* DEBUG */ | ||
1050 | |||
1051 | static void init_default_reg_vals(u32 __iomem *upsmr_register, | ||
1052 | u32 __iomem *maccfg1_register, | ||
1053 | u32 __iomem *maccfg2_register) | ||
1054 | { | ||
1055 | out_be32(upsmr_register, UCC_GETH_UPSMR_INIT); | ||
1056 | out_be32(maccfg1_register, UCC_GETH_MACCFG1_INIT); | ||
1057 | out_be32(maccfg2_register, UCC_GETH_MACCFG2_INIT); | ||
1058 | } | ||
1059 | |||
1060 | static int init_half_duplex_params(int alt_beb, | ||
1061 | int back_pressure_no_backoff, | ||
1062 | int no_backoff, | ||
1063 | int excess_defer, | ||
1064 | u8 alt_beb_truncation, | ||
1065 | u8 max_retransmissions, | ||
1066 | u8 collision_window, | ||
1067 | u32 __iomem *hafdup_register) | ||
1068 | { | ||
1069 | u32 value = 0; | ||
1070 | |||
1071 | if ((alt_beb_truncation > HALFDUP_ALT_BEB_TRUNCATION_MAX) || | ||
1072 | (max_retransmissions > HALFDUP_MAX_RETRANSMISSION_MAX) || | ||
1073 | (collision_window > HALFDUP_COLLISION_WINDOW_MAX)) | ||
1074 | return -EINVAL; | ||
1075 | |||
1076 | value = (u32) (alt_beb_truncation << HALFDUP_ALT_BEB_TRUNCATION_SHIFT); | ||
1077 | |||
1078 | if (alt_beb) | ||
1079 | value |= HALFDUP_ALT_BEB; | ||
1080 | if (back_pressure_no_backoff) | ||
1081 | value |= HALFDUP_BACK_PRESSURE_NO_BACKOFF; | ||
1082 | if (no_backoff) | ||
1083 | value |= HALFDUP_NO_BACKOFF; | ||
1084 | if (excess_defer) | ||
1085 | value |= HALFDUP_EXCESSIVE_DEFER; | ||
1086 | |||
1087 | value |= (max_retransmissions << HALFDUP_MAX_RETRANSMISSION_SHIFT); | ||
1088 | |||
1089 | value |= collision_window; | ||
1090 | |||
1091 | out_be32(hafdup_register, value); | ||
1092 | return 0; | ||
1093 | } | ||
1094 | |||
1095 | static int init_inter_frame_gap_params(u8 non_btb_cs_ipg, | ||
1096 | u8 non_btb_ipg, | ||
1097 | u8 min_ifg, | ||
1098 | u8 btb_ipg, | ||
1099 | u32 __iomem *ipgifg_register) | ||
1100 | { | ||
1101 | u32 value = 0; | ||
1102 | |||
1103 | /* Non-Back-to-back IPG part 1 should be <= Non-Back-to-back | ||
1104 | IPG part 2 */ | ||
1105 | if (non_btb_cs_ipg > non_btb_ipg) | ||
1106 | return -EINVAL; | ||
1107 | |||
1108 | if ((non_btb_cs_ipg > IPGIFG_NON_BACK_TO_BACK_IFG_PART1_MAX) || | ||
1109 | (non_btb_ipg > IPGIFG_NON_BACK_TO_BACK_IFG_PART2_MAX) || | ||
1110 | /*(min_ifg > IPGIFG_MINIMUM_IFG_ENFORCEMENT_MAX) || */ | ||
1111 | (btb_ipg > IPGIFG_BACK_TO_BACK_IFG_MAX)) | ||
1112 | return -EINVAL; | ||
1113 | |||
1114 | value |= | ||
1115 | ((non_btb_cs_ipg << IPGIFG_NON_BACK_TO_BACK_IFG_PART1_SHIFT) & | ||
1116 | IPGIFG_NBTB_CS_IPG_MASK); | ||
1117 | value |= | ||
1118 | ((non_btb_ipg << IPGIFG_NON_BACK_TO_BACK_IFG_PART2_SHIFT) & | ||
1119 | IPGIFG_NBTB_IPG_MASK); | ||
1120 | value |= | ||
1121 | ((min_ifg << IPGIFG_MINIMUM_IFG_ENFORCEMENT_SHIFT) & | ||
1122 | IPGIFG_MIN_IFG_MASK); | ||
1123 | value |= (btb_ipg & IPGIFG_BTB_IPG_MASK); | ||
1124 | |||
1125 | out_be32(ipgifg_register, value); | ||
1126 | return 0; | ||
1127 | } | ||
1128 | |||
1129 | int init_flow_control_params(u32 automatic_flow_control_mode, | ||
1130 | int rx_flow_control_enable, | ||
1131 | int tx_flow_control_enable, | ||
1132 | u16 pause_period, | ||
1133 | u16 extension_field, | ||
1134 | u32 __iomem *upsmr_register, | ||
1135 | u32 __iomem *uempr_register, | ||
1136 | u32 __iomem *maccfg1_register) | ||
1137 | { | ||
1138 | u32 value = 0; | ||
1139 | |||
1140 | /* Set UEMPR register */ | ||
1141 | value = (u32) pause_period << UEMPR_PAUSE_TIME_VALUE_SHIFT; | ||
1142 | value |= (u32) extension_field << UEMPR_EXTENDED_PAUSE_TIME_VALUE_SHIFT; | ||
1143 | out_be32(uempr_register, value); | ||
1144 | |||
1145 | /* Set UPSMR register */ | ||
1146 | setbits32(upsmr_register, automatic_flow_control_mode); | ||
1147 | |||
1148 | value = in_be32(maccfg1_register); | ||
1149 | if (rx_flow_control_enable) | ||
1150 | value |= MACCFG1_FLOW_RX; | ||
1151 | if (tx_flow_control_enable) | ||
1152 | value |= MACCFG1_FLOW_TX; | ||
1153 | out_be32(maccfg1_register, value); | ||
1154 | |||
1155 | return 0; | ||
1156 | } | ||
1157 | |||
1158 | static int init_hw_statistics_gathering_mode(int enable_hardware_statistics, | ||
1159 | int auto_zero_hardware_statistics, | ||
1160 | u32 __iomem *upsmr_register, | ||
1161 | u16 __iomem *uescr_register) | ||
1162 | { | ||
1163 | u16 uescr_value = 0; | ||
1164 | |||
1165 | /* Enable hardware statistics gathering if requested */ | ||
1166 | if (enable_hardware_statistics) | ||
1167 | setbits32(upsmr_register, UCC_GETH_UPSMR_HSE); | ||
1168 | |||
1169 | /* Clear hardware statistics counters */ | ||
1170 | uescr_value = in_be16(uescr_register); | ||
1171 | uescr_value |= UESCR_CLRCNT; | ||
1172 | /* Automatically zero hardware statistics counters on read, | ||
1173 | if requested */ | ||
1174 | if (auto_zero_hardware_statistics) | ||
1175 | uescr_value |= UESCR_AUTOZ; | ||
1176 | out_be16(uescr_register, uescr_value); | ||
1177 | |||
1178 | return 0; | ||
1179 | } | ||
1180 | |||
1181 | static int init_firmware_statistics_gathering_mode(int | ||
1182 | enable_tx_firmware_statistics, | ||
1183 | int enable_rx_firmware_statistics, | ||
1184 | u32 __iomem *tx_rmon_base_ptr, | ||
1185 | u32 tx_firmware_statistics_structure_address, | ||
1186 | u32 __iomem *rx_rmon_base_ptr, | ||
1187 | u32 rx_firmware_statistics_structure_address, | ||
1188 | u16 __iomem *temoder_register, | ||
1189 | u32 __iomem *remoder_register) | ||
1190 | { | ||
1191 | /* Note: this function does not check if */ | ||
1192 | /* the parameters it receives are NULL */ | ||
1193 | |||
1194 | if (enable_tx_firmware_statistics) { | ||
1195 | out_be32(tx_rmon_base_ptr, | ||
1196 | tx_firmware_statistics_structure_address); | ||
1197 | setbits16(temoder_register, TEMODER_TX_RMON_STATISTICS_ENABLE); | ||
1198 | } | ||
1199 | |||
1200 | if (enable_rx_firmware_statistics) { | ||
1201 | out_be32(rx_rmon_base_ptr, | ||
1202 | rx_firmware_statistics_structure_address); | ||
1203 | setbits32(remoder_register, REMODER_RX_RMON_STATISTICS_ENABLE); | ||
1204 | } | ||
1205 | |||
1206 | return 0; | ||
1207 | } | ||
1208 | |||
1209 | static int init_mac_station_addr_regs(u8 address_byte_0, | ||
1210 | u8 address_byte_1, | ||
1211 | u8 address_byte_2, | ||
1212 | u8 address_byte_3, | ||
1213 | u8 address_byte_4, | ||
1214 | u8 address_byte_5, | ||
1215 | u32 __iomem *macstnaddr1_register, | ||
1216 | u32 __iomem *macstnaddr2_register) | ||
1217 | { | ||
1218 | u32 value = 0; | ||
1219 | |||
1220 | /* Example: for a station address of 0x12345678ABCD, */ | ||
1221 | /* 0x12 is byte 0, 0x34 is byte 1 and so on and 0xCD is byte 5 */ | ||
1222 | |||
1223 | /* MACSTNADDR1 Register: */ | ||
1224 | |||
1225 | /* 0 7 8 15 */ | ||
1226 | /* station address byte 5 station address byte 4 */ | ||
1227 | /* 16 23 24 31 */ | ||
1228 | /* station address byte 3 station address byte 2 */ | ||
1229 | value |= (u32) ((address_byte_2 << 0) & 0x000000FF); | ||
1230 | value |= (u32) ((address_byte_3 << 8) & 0x0000FF00); | ||
1231 | value |= (u32) ((address_byte_4 << 16) & 0x00FF0000); | ||
1232 | value |= (u32) ((address_byte_5 << 24) & 0xFF000000); | ||
1233 | |||
1234 | out_be32(macstnaddr1_register, value); | ||
1235 | |||
1236 | /* MACSTNADDR2 Register: */ | ||
1237 | |||
1238 | /* 0 7 8 15 */ | ||
1239 | /* station address byte 1 station address byte 0 */ | ||
1240 | /* 16 23 24 31 */ | ||
1241 | /* reserved reserved */ | ||
1242 | value = 0; | ||
1243 | value |= (u32) ((address_byte_0 << 16) & 0x00FF0000); | ||
1244 | value |= (u32) ((address_byte_1 << 24) & 0xFF000000); | ||
1245 | |||
1246 | out_be32(macstnaddr2_register, value); | ||
1247 | |||
1248 | return 0; | ||
1249 | } | ||
1250 | |||
1251 | static int init_check_frame_length_mode(int length_check, | ||
1252 | u32 __iomem *maccfg2_register) | ||
1253 | { | ||
1254 | u32 value = 0; | ||
1255 | |||
1256 | value = in_be32(maccfg2_register); | ||
1257 | |||
1258 | if (length_check) | ||
1259 | value |= MACCFG2_LC; | ||
1260 | else | ||
1261 | value &= ~MACCFG2_LC; | ||
1262 | |||
1263 | out_be32(maccfg2_register, value); | ||
1264 | return 0; | ||
1265 | } | ||
1266 | |||
1267 | static int init_preamble_length(u8 preamble_length, | ||
1268 | u32 __iomem *maccfg2_register) | ||
1269 | { | ||
1270 | if ((preamble_length < 3) || (preamble_length > 7)) | ||
1271 | return -EINVAL; | ||
1272 | |||
1273 | clrsetbits_be32(maccfg2_register, MACCFG2_PREL_MASK, | ||
1274 | preamble_length << MACCFG2_PREL_SHIFT); | ||
1275 | |||
1276 | return 0; | ||
1277 | } | ||
1278 | |||
1279 | static int init_rx_parameters(int reject_broadcast, | ||
1280 | int receive_short_frames, | ||
1281 | int promiscuous, u32 __iomem *upsmr_register) | ||
1282 | { | ||
1283 | u32 value = 0; | ||
1284 | |||
1285 | value = in_be32(upsmr_register); | ||
1286 | |||
1287 | if (reject_broadcast) | ||
1288 | value |= UCC_GETH_UPSMR_BRO; | ||
1289 | else | ||
1290 | value &= ~UCC_GETH_UPSMR_BRO; | ||
1291 | |||
1292 | if (receive_short_frames) | ||
1293 | value |= UCC_GETH_UPSMR_RSH; | ||
1294 | else | ||
1295 | value &= ~UCC_GETH_UPSMR_RSH; | ||
1296 | |||
1297 | if (promiscuous) | ||
1298 | value |= UCC_GETH_UPSMR_PRO; | ||
1299 | else | ||
1300 | value &= ~UCC_GETH_UPSMR_PRO; | ||
1301 | |||
1302 | out_be32(upsmr_register, value); | ||
1303 | |||
1304 | return 0; | ||
1305 | } | ||
1306 | |||
1307 | static int init_max_rx_buff_len(u16 max_rx_buf_len, | ||
1308 | u16 __iomem *mrblr_register) | ||
1309 | { | ||
1310 | /* max_rx_buf_len value must be a multiple of 128 */ | ||
1311 | if ((max_rx_buf_len == 0) || | ||
1312 | (max_rx_buf_len % UCC_GETH_MRBLR_ALIGNMENT)) | ||
1313 | return -EINVAL; | ||
1314 | |||
1315 | out_be16(mrblr_register, max_rx_buf_len); | ||
1316 | return 0; | ||
1317 | } | ||
1318 | |||
1319 | static int init_min_frame_len(u16 min_frame_length, | ||
1320 | u16 __iomem *minflr_register, | ||
1321 | u16 __iomem *mrblr_register) | ||
1322 | { | ||
1323 | u16 mrblr_value = 0; | ||
1324 | |||
1325 | mrblr_value = in_be16(mrblr_register); | ||
1326 | if (min_frame_length >= (mrblr_value - 4)) | ||
1327 | return -EINVAL; | ||
1328 | |||
1329 | out_be16(minflr_register, min_frame_length); | ||
1330 | return 0; | ||
1331 | } | ||
1332 | |||
1333 | static int adjust_enet_interface(struct ucc_geth_private *ugeth) | ||
1334 | { | ||
1335 | struct ucc_geth_info *ug_info; | ||
1336 | struct ucc_geth __iomem *ug_regs; | ||
1337 | struct ucc_fast __iomem *uf_regs; | ||
1338 | int ret_val; | ||
1339 | u32 upsmr, maccfg2; | ||
1340 | u16 value; | ||
1341 | |||
1342 | ugeth_vdbg("%s: IN", __func__); | ||
1343 | |||
1344 | ug_info = ugeth->ug_info; | ||
1345 | ug_regs = ugeth->ug_regs; | ||
1346 | uf_regs = ugeth->uccf->uf_regs; | ||
1347 | |||
1348 | /* Set MACCFG2 */ | ||
1349 | maccfg2 = in_be32(&ug_regs->maccfg2); | ||
1350 | maccfg2 &= ~MACCFG2_INTERFACE_MODE_MASK; | ||
1351 | if ((ugeth->max_speed == SPEED_10) || | ||
1352 | (ugeth->max_speed == SPEED_100)) | ||
1353 | maccfg2 |= MACCFG2_INTERFACE_MODE_NIBBLE; | ||
1354 | else if (ugeth->max_speed == SPEED_1000) | ||
1355 | maccfg2 |= MACCFG2_INTERFACE_MODE_BYTE; | ||
1356 | maccfg2 |= ug_info->padAndCrc; | ||
1357 | out_be32(&ug_regs->maccfg2, maccfg2); | ||
1358 | |||
1359 | /* Set UPSMR */ | ||
1360 | upsmr = in_be32(&uf_regs->upsmr); | ||
1361 | upsmr &= ~(UCC_GETH_UPSMR_RPM | UCC_GETH_UPSMR_R10M | | ||
1362 | UCC_GETH_UPSMR_TBIM | UCC_GETH_UPSMR_RMM); | ||
1363 | if ((ugeth->phy_interface == PHY_INTERFACE_MODE_RMII) || | ||
1364 | (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII) || | ||
1365 | (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_ID) || | ||
1366 | (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID) || | ||
1367 | (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID) || | ||
1368 | (ugeth->phy_interface == PHY_INTERFACE_MODE_RTBI)) { | ||
1369 | if (ugeth->phy_interface != PHY_INTERFACE_MODE_RMII) | ||
1370 | upsmr |= UCC_GETH_UPSMR_RPM; | ||
1371 | switch (ugeth->max_speed) { | ||
1372 | case SPEED_10: | ||
1373 | upsmr |= UCC_GETH_UPSMR_R10M; | ||
1374 | /* FALLTHROUGH */ | ||
1375 | case SPEED_100: | ||
1376 | if (ugeth->phy_interface != PHY_INTERFACE_MODE_RTBI) | ||
1377 | upsmr |= UCC_GETH_UPSMR_RMM; | ||
1378 | } | ||
1379 | } | ||
1380 | if ((ugeth->phy_interface == PHY_INTERFACE_MODE_TBI) || | ||
1381 | (ugeth->phy_interface == PHY_INTERFACE_MODE_RTBI)) { | ||
1382 | upsmr |= UCC_GETH_UPSMR_TBIM; | ||
1383 | } | ||
1384 | if ((ugeth->phy_interface == PHY_INTERFACE_MODE_SGMII)) | ||
1385 | upsmr |= UCC_GETH_UPSMR_SGMM; | ||
1386 | |||
1387 | out_be32(&uf_regs->upsmr, upsmr); | ||
1388 | |||
1389 | /* Disable autonegotiation in tbi mode, because by default it | ||
1390 | comes up in autonegotiation mode. */ | ||
1391 | /* Note that this depends on proper setting in utbipar register. */ | ||
1392 | if ((ugeth->phy_interface == PHY_INTERFACE_MODE_TBI) || | ||
1393 | (ugeth->phy_interface == PHY_INTERFACE_MODE_RTBI)) { | ||
1394 | struct ucc_geth_info *ug_info = ugeth->ug_info; | ||
1395 | struct phy_device *tbiphy; | ||
1396 | |||
1397 | if (!ug_info->tbi_node) | ||
1398 | ugeth_warn("TBI mode requires that the device " | ||
1399 | "tree specify a tbi-handle\n"); | ||
1400 | |||
1401 | tbiphy = of_phy_find_device(ug_info->tbi_node); | ||
1402 | if (!tbiphy) | ||
1403 | ugeth_warn("Could not get TBI device\n"); | ||
1404 | |||
1405 | value = phy_read(tbiphy, ENET_TBI_MII_CR); | ||
1406 | value &= ~0x1000; /* Turn off autonegotiation */ | ||
1407 | phy_write(tbiphy, ENET_TBI_MII_CR, value); | ||
1408 | } | ||
1409 | |||
1410 | init_check_frame_length_mode(ug_info->lengthCheckRx, &ug_regs->maccfg2); | ||
1411 | |||
1412 | ret_val = init_preamble_length(ug_info->prel, &ug_regs->maccfg2); | ||
1413 | if (ret_val != 0) { | ||
1414 | if (netif_msg_probe(ugeth)) | ||
1415 | ugeth_err("%s: Preamble length must be between 3 and 7 inclusive.", | ||
1416 | __func__); | ||
1417 | return ret_val; | ||
1418 | } | ||
1419 | |||
1420 | return 0; | ||
1421 | } | ||
1422 | |||
1423 | static int ugeth_graceful_stop_tx(struct ucc_geth_private *ugeth) | ||
1424 | { | ||
1425 | struct ucc_fast_private *uccf; | ||
1426 | u32 cecr_subblock; | ||
1427 | u32 temp; | ||
1428 | int i = 10; | ||
1429 | |||
1430 | uccf = ugeth->uccf; | ||
1431 | |||
1432 | /* Mask GRACEFUL STOP TX interrupt bit and clear it */ | ||
1433 | clrbits32(uccf->p_uccm, UCC_GETH_UCCE_GRA); | ||
1434 | out_be32(uccf->p_ucce, UCC_GETH_UCCE_GRA); /* clear by writing 1 */ | ||
1435 | |||
1436 | /* Issue host command */ | ||
1437 | cecr_subblock = | ||
1438 | ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num); | ||
1439 | qe_issue_cmd(QE_GRACEFUL_STOP_TX, cecr_subblock, | ||
1440 | QE_CR_PROTOCOL_ETHERNET, 0); | ||
1441 | |||
1442 | /* Wait for command to complete */ | ||
1443 | do { | ||
1444 | msleep(10); | ||
1445 | temp = in_be32(uccf->p_ucce); | ||
1446 | } while (!(temp & UCC_GETH_UCCE_GRA) && --i); | ||
1447 | |||
1448 | uccf->stopped_tx = 1; | ||
1449 | |||
1450 | return 0; | ||
1451 | } | ||
1452 | |||
1453 | static int ugeth_graceful_stop_rx(struct ucc_geth_private *ugeth) | ||
1454 | { | ||
1455 | struct ucc_fast_private *uccf; | ||
1456 | u32 cecr_subblock; | ||
1457 | u8 temp; | ||
1458 | int i = 10; | ||
1459 | |||
1460 | uccf = ugeth->uccf; | ||
1461 | |||
1462 | /* Clear acknowledge bit */ | ||
1463 | temp = in_8(&ugeth->p_rx_glbl_pram->rxgstpack); | ||
1464 | temp &= ~GRACEFUL_STOP_ACKNOWLEDGE_RX; | ||
1465 | out_8(&ugeth->p_rx_glbl_pram->rxgstpack, temp); | ||
1466 | |||
1467 | /* Keep issuing command and checking acknowledge bit until | ||
1468 | it is asserted, according to spec */ | ||
1469 | do { | ||
1470 | /* Issue host command */ | ||
1471 | cecr_subblock = | ||
1472 | ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info. | ||
1473 | ucc_num); | ||
1474 | qe_issue_cmd(QE_GRACEFUL_STOP_RX, cecr_subblock, | ||
1475 | QE_CR_PROTOCOL_ETHERNET, 0); | ||
1476 | msleep(10); | ||
1477 | temp = in_8(&ugeth->p_rx_glbl_pram->rxgstpack); | ||
1478 | } while (!(temp & GRACEFUL_STOP_ACKNOWLEDGE_RX) && --i); | ||
1479 | |||
1480 | uccf->stopped_rx = 1; | ||
1481 | |||
1482 | return 0; | ||
1483 | } | ||
1484 | |||
1485 | static int ugeth_restart_tx(struct ucc_geth_private *ugeth) | ||
1486 | { | ||
1487 | struct ucc_fast_private *uccf; | ||
1488 | u32 cecr_subblock; | ||
1489 | |||
1490 | uccf = ugeth->uccf; | ||
1491 | |||
1492 | cecr_subblock = | ||
1493 | ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num); | ||
1494 | qe_issue_cmd(QE_RESTART_TX, cecr_subblock, QE_CR_PROTOCOL_ETHERNET, 0); | ||
1495 | uccf->stopped_tx = 0; | ||
1496 | |||
1497 | return 0; | ||
1498 | } | ||
1499 | |||
1500 | static int ugeth_restart_rx(struct ucc_geth_private *ugeth) | ||
1501 | { | ||
1502 | struct ucc_fast_private *uccf; | ||
1503 | u32 cecr_subblock; | ||
1504 | |||
1505 | uccf = ugeth->uccf; | ||
1506 | |||
1507 | cecr_subblock = | ||
1508 | ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num); | ||
1509 | qe_issue_cmd(QE_RESTART_RX, cecr_subblock, QE_CR_PROTOCOL_ETHERNET, | ||
1510 | 0); | ||
1511 | uccf->stopped_rx = 0; | ||
1512 | |||
1513 | return 0; | ||
1514 | } | ||
1515 | |||
1516 | static int ugeth_enable(struct ucc_geth_private *ugeth, enum comm_dir mode) | ||
1517 | { | ||
1518 | struct ucc_fast_private *uccf; | ||
1519 | int enabled_tx, enabled_rx; | ||
1520 | |||
1521 | uccf = ugeth->uccf; | ||
1522 | |||
1523 | /* check if the UCC number is in range. */ | ||
1524 | if (ugeth->ug_info->uf_info.ucc_num >= UCC_MAX_NUM) { | ||
1525 | if (netif_msg_probe(ugeth)) | ||
1526 | ugeth_err("%s: ucc_num out of range.", __func__); | ||
1527 | return -EINVAL; | ||
1528 | } | ||
1529 | |||
1530 | enabled_tx = uccf->enabled_tx; | ||
1531 | enabled_rx = uccf->enabled_rx; | ||
1532 | |||
1533 | /* Get Tx and Rx going again, in case this channel was actively | ||
1534 | disabled. */ | ||
1535 | if ((mode & COMM_DIR_TX) && (!enabled_tx) && uccf->stopped_tx) | ||
1536 | ugeth_restart_tx(ugeth); | ||
1537 | if ((mode & COMM_DIR_RX) && (!enabled_rx) && uccf->stopped_rx) | ||
1538 | ugeth_restart_rx(ugeth); | ||
1539 | |||
1540 | ucc_fast_enable(uccf, mode); /* OK to do even if not disabled */ | ||
1541 | |||
1542 | return 0; | ||
1543 | |||
1544 | } | ||
1545 | |||
1546 | static int ugeth_disable(struct ucc_geth_private *ugeth, enum comm_dir mode) | ||
1547 | { | ||
1548 | struct ucc_fast_private *uccf; | ||
1549 | |||
1550 | uccf = ugeth->uccf; | ||
1551 | |||
1552 | /* check if the UCC number is in range. */ | ||
1553 | if (ugeth->ug_info->uf_info.ucc_num >= UCC_MAX_NUM) { | ||
1554 | if (netif_msg_probe(ugeth)) | ||
1555 | ugeth_err("%s: ucc_num out of range.", __func__); | ||
1556 | return -EINVAL; | ||
1557 | } | ||
1558 | |||
1559 | /* Stop any transmissions */ | ||
1560 | if ((mode & COMM_DIR_TX) && uccf->enabled_tx && !uccf->stopped_tx) | ||
1561 | ugeth_graceful_stop_tx(ugeth); | ||
1562 | |||
1563 | /* Stop any receptions */ | ||
1564 | if ((mode & COMM_DIR_RX) && uccf->enabled_rx && !uccf->stopped_rx) | ||
1565 | ugeth_graceful_stop_rx(ugeth); | ||
1566 | |||
1567 | ucc_fast_disable(ugeth->uccf, mode); /* OK to do even if not enabled */ | ||
1568 | |||
1569 | return 0; | ||
1570 | } | ||
1571 | |||
1572 | static void ugeth_quiesce(struct ucc_geth_private *ugeth) | ||
1573 | { | ||
1574 | /* Prevent any further xmits, plus detach the device. */ | ||
1575 | netif_device_detach(ugeth->ndev); | ||
1576 | |||
1577 | /* Wait for any current xmits to finish. */ | ||
1578 | netif_tx_disable(ugeth->ndev); | ||
1579 | |||
1580 | /* Disable the interrupt to avoid NAPI rescheduling. */ | ||
1581 | disable_irq(ugeth->ug_info->uf_info.irq); | ||
1582 | |||
1583 | /* Stop NAPI, and possibly wait for its completion. */ | ||
1584 | napi_disable(&ugeth->napi); | ||
1585 | } | ||
1586 | |||
1587 | static void ugeth_activate(struct ucc_geth_private *ugeth) | ||
1588 | { | ||
1589 | napi_enable(&ugeth->napi); | ||
1590 | enable_irq(ugeth->ug_info->uf_info.irq); | ||
1591 | netif_device_attach(ugeth->ndev); | ||
1592 | } | ||
1593 | |||
1594 | /* Called every time the controller might need to be made | ||
1595 | * aware of new link state. The PHY code conveys this | ||
1596 | * information through variables in the ugeth structure, and this | ||
1597 | * function converts those variables into the appropriate | ||
1598 | * register values, and can bring down the device if needed. | ||
1599 | */ | ||
1600 | |||
1601 | static void adjust_link(struct net_device *dev) | ||
1602 | { | ||
1603 | struct ucc_geth_private *ugeth = netdev_priv(dev); | ||
1604 | struct ucc_geth __iomem *ug_regs; | ||
1605 | struct ucc_fast __iomem *uf_regs; | ||
1606 | struct phy_device *phydev = ugeth->phydev; | ||
1607 | int new_state = 0; | ||
1608 | |||
1609 | ug_regs = ugeth->ug_regs; | ||
1610 | uf_regs = ugeth->uccf->uf_regs; | ||
1611 | |||
1612 | if (phydev->link) { | ||
1613 | u32 tempval = in_be32(&ug_regs->maccfg2); | ||
1614 | u32 upsmr = in_be32(&uf_regs->upsmr); | ||
1615 | /* Now we make sure that we can be in full duplex mode. | ||
1616 | * If not, we operate in half-duplex mode. */ | ||
1617 | if (phydev->duplex != ugeth->oldduplex) { | ||
1618 | new_state = 1; | ||
1619 | if (!(phydev->duplex)) | ||
1620 | tempval &= ~(MACCFG2_FDX); | ||
1621 | else | ||
1622 | tempval |= MACCFG2_FDX; | ||
1623 | ugeth->oldduplex = phydev->duplex; | ||
1624 | } | ||
1625 | |||
1626 | if (phydev->speed != ugeth->oldspeed) { | ||
1627 | new_state = 1; | ||
1628 | switch (phydev->speed) { | ||
1629 | case SPEED_1000: | ||
1630 | tempval = ((tempval & | ||
1631 | ~(MACCFG2_INTERFACE_MODE_MASK)) | | ||
1632 | MACCFG2_INTERFACE_MODE_BYTE); | ||
1633 | break; | ||
1634 | case SPEED_100: | ||
1635 | case SPEED_10: | ||
1636 | tempval = ((tempval & | ||
1637 | ~(MACCFG2_INTERFACE_MODE_MASK)) | | ||
1638 | MACCFG2_INTERFACE_MODE_NIBBLE); | ||
1639 | /* if reduced mode, re-set UPSMR.R10M */ | ||
1640 | if ((ugeth->phy_interface == PHY_INTERFACE_MODE_RMII) || | ||
1641 | (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII) || | ||
1642 | (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_ID) || | ||
1643 | (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID) || | ||
1644 | (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID) || | ||
1645 | (ugeth->phy_interface == PHY_INTERFACE_MODE_RTBI)) { | ||
1646 | if (phydev->speed == SPEED_10) | ||
1647 | upsmr |= UCC_GETH_UPSMR_R10M; | ||
1648 | else | ||
1649 | upsmr &= ~UCC_GETH_UPSMR_R10M; | ||
1650 | } | ||
1651 | break; | ||
1652 | default: | ||
1653 | if (netif_msg_link(ugeth)) | ||
1654 | ugeth_warn( | ||
1655 | "%s: Ack! Speed (%d) is not 10/100/1000!", | ||
1656 | dev->name, phydev->speed); | ||
1657 | break; | ||
1658 | } | ||
1659 | ugeth->oldspeed = phydev->speed; | ||
1660 | } | ||
1661 | |||
1662 | if (!ugeth->oldlink) { | ||
1663 | new_state = 1; | ||
1664 | ugeth->oldlink = 1; | ||
1665 | } | ||
1666 | |||
1667 | if (new_state) { | ||
1668 | /* | ||
1669 | * To change the MAC configuration we need to disable | ||
1670 | * the controller. To do so, we have to either grab | ||
1671 | * ugeth->lock, which is a bad idea since 'graceful | ||
1672 | * stop' commands might take quite a while, or we can | ||
1673 | * quiesce driver's activity. | ||
1674 | */ | ||
1675 | ugeth_quiesce(ugeth); | ||
1676 | ugeth_disable(ugeth, COMM_DIR_RX_AND_TX); | ||
1677 | |||
1678 | out_be32(&ug_regs->maccfg2, tempval); | ||
1679 | out_be32(&uf_regs->upsmr, upsmr); | ||
1680 | |||
1681 | ugeth_enable(ugeth, COMM_DIR_RX_AND_TX); | ||
1682 | ugeth_activate(ugeth); | ||
1683 | } | ||
1684 | } else if (ugeth->oldlink) { | ||
1685 | new_state = 1; | ||
1686 | ugeth->oldlink = 0; | ||
1687 | ugeth->oldspeed = 0; | ||
1688 | ugeth->oldduplex = -1; | ||
1689 | } | ||
1690 | |||
1691 | if (new_state && netif_msg_link(ugeth)) | ||
1692 | phy_print_status(phydev); | ||
1693 | } | ||
1694 | |||
1695 | /* Initialize TBI PHY interface for communicating with the | ||
1696 | * SERDES lynx PHY on the chip. We communicate with this PHY | ||
1697 | * through the MDIO bus on each controller, treating it as a | ||
1698 | * "normal" PHY at the address found in the UTBIPA register. We assume | ||
1699 | * that the UTBIPA register is valid. Either the MDIO bus code will set | ||
1700 | * it to a value that doesn't conflict with other PHYs on the bus, or the | ||
1701 | * value doesn't matter, as there are no other PHYs on the bus. | ||
1702 | */ | ||
1703 | static void uec_configure_serdes(struct net_device *dev) | ||
1704 | { | ||
1705 | struct ucc_geth_private *ugeth = netdev_priv(dev); | ||
1706 | struct ucc_geth_info *ug_info = ugeth->ug_info; | ||
1707 | struct phy_device *tbiphy; | ||
1708 | |||
1709 | if (!ug_info->tbi_node) { | ||
1710 | dev_warn(&dev->dev, "SGMII mode requires that the device " | ||
1711 | "tree specify a tbi-handle\n"); | ||
1712 | return; | ||
1713 | } | ||
1714 | |||
1715 | tbiphy = of_phy_find_device(ug_info->tbi_node); | ||
1716 | if (!tbiphy) { | ||
1717 | dev_err(&dev->dev, "error: Could not get TBI device\n"); | ||
1718 | return; | ||
1719 | } | ||
1720 | |||
1721 | /* | ||
1722 | * If the link is already up, we must already be ok, and don't need to | ||
1723 | * configure and reset the TBI<->SerDes link. Maybe U-Boot configured | ||
1724 | * everything for us? Resetting it takes the link down and requires | ||
1725 | * several seconds for it to come back. | ||
1726 | */ | ||
1727 | if (phy_read(tbiphy, ENET_TBI_MII_SR) & TBISR_LSTATUS) | ||
1728 | return; | ||
1729 | |||
1730 | /* Single clk mode, mii mode off(for serdes communication) */ | ||
1731 | phy_write(tbiphy, ENET_TBI_MII_ANA, TBIANA_SETTINGS); | ||
1732 | |||
1733 | phy_write(tbiphy, ENET_TBI_MII_TBICON, TBICON_CLK_SELECT); | ||
1734 | |||
1735 | phy_write(tbiphy, ENET_TBI_MII_CR, TBICR_SETTINGS); | ||
1736 | } | ||
1737 | |||
1738 | /* Configure the PHY for dev. | ||
1739 | * returns 0 if success. -1 if failure | ||
1740 | */ | ||
1741 | static int init_phy(struct net_device *dev) | ||
1742 | { | ||
1743 | struct ucc_geth_private *priv = netdev_priv(dev); | ||
1744 | struct ucc_geth_info *ug_info = priv->ug_info; | ||
1745 | struct phy_device *phydev; | ||
1746 | |||
1747 | priv->oldlink = 0; | ||
1748 | priv->oldspeed = 0; | ||
1749 | priv->oldduplex = -1; | ||
1750 | |||
1751 | phydev = of_phy_connect(dev, ug_info->phy_node, &adjust_link, 0, | ||
1752 | priv->phy_interface); | ||
1753 | if (!phydev) | ||
1754 | phydev = of_phy_connect_fixed_link(dev, &adjust_link, | ||
1755 | priv->phy_interface); | ||
1756 | if (!phydev) { | ||
1757 | dev_err(&dev->dev, "Could not attach to PHY\n"); | ||
1758 | return -ENODEV; | ||
1759 | } | ||
1760 | |||
1761 | if (priv->phy_interface == PHY_INTERFACE_MODE_SGMII) | ||
1762 | uec_configure_serdes(dev); | ||
1763 | |||
1764 | phydev->supported &= (SUPPORTED_MII | | ||
1765 | SUPPORTED_Autoneg | | ||
1766 | ADVERTISED_10baseT_Half | | ||
1767 | ADVERTISED_10baseT_Full | | ||
1768 | ADVERTISED_100baseT_Half | | ||
1769 | ADVERTISED_100baseT_Full); | ||
1770 | |||
1771 | if (priv->max_speed == SPEED_1000) | ||
1772 | phydev->supported |= ADVERTISED_1000baseT_Full; | ||
1773 | |||
1774 | phydev->advertising = phydev->supported; | ||
1775 | |||
1776 | priv->phydev = phydev; | ||
1777 | |||
1778 | return 0; | ||
1779 | } | ||
1780 | |||
1781 | static void ugeth_dump_regs(struct ucc_geth_private *ugeth) | ||
1782 | { | ||
1783 | #ifdef DEBUG | ||
1784 | ucc_fast_dump_regs(ugeth->uccf); | ||
1785 | dump_regs(ugeth); | ||
1786 | dump_bds(ugeth); | ||
1787 | #endif | ||
1788 | } | ||
1789 | |||
1790 | static int ugeth_82xx_filtering_clear_all_addr_in_hash(struct ucc_geth_private * | ||
1791 | ugeth, | ||
1792 | enum enet_addr_type | ||
1793 | enet_addr_type) | ||
1794 | { | ||
1795 | struct ucc_geth_82xx_address_filtering_pram __iomem *p_82xx_addr_filt; | ||
1796 | struct ucc_fast_private *uccf; | ||
1797 | enum comm_dir comm_dir; | ||
1798 | struct list_head *p_lh; | ||
1799 | u16 i, num; | ||
1800 | u32 __iomem *addr_h; | ||
1801 | u32 __iomem *addr_l; | ||
1802 | u8 *p_counter; | ||
1803 | |||
1804 | uccf = ugeth->uccf; | ||
1805 | |||
1806 | p_82xx_addr_filt = | ||
1807 | (struct ucc_geth_82xx_address_filtering_pram __iomem *) | ||
1808 | ugeth->p_rx_glbl_pram->addressfiltering; | ||
1809 | |||
1810 | if (enet_addr_type == ENET_ADDR_TYPE_GROUP) { | ||
1811 | addr_h = &(p_82xx_addr_filt->gaddr_h); | ||
1812 | addr_l = &(p_82xx_addr_filt->gaddr_l); | ||
1813 | p_lh = &ugeth->group_hash_q; | ||
1814 | p_counter = &(ugeth->numGroupAddrInHash); | ||
1815 | } else if (enet_addr_type == ENET_ADDR_TYPE_INDIVIDUAL) { | ||
1816 | addr_h = &(p_82xx_addr_filt->iaddr_h); | ||
1817 | addr_l = &(p_82xx_addr_filt->iaddr_l); | ||
1818 | p_lh = &ugeth->ind_hash_q; | ||
1819 | p_counter = &(ugeth->numIndAddrInHash); | ||
1820 | } else | ||
1821 | return -EINVAL; | ||
1822 | |||
1823 | comm_dir = 0; | ||
1824 | if (uccf->enabled_tx) | ||
1825 | comm_dir |= COMM_DIR_TX; | ||
1826 | if (uccf->enabled_rx) | ||
1827 | comm_dir |= COMM_DIR_RX; | ||
1828 | if (comm_dir) | ||
1829 | ugeth_disable(ugeth, comm_dir); | ||
1830 | |||
1831 | /* Clear the hash table. */ | ||
1832 | out_be32(addr_h, 0x00000000); | ||
1833 | out_be32(addr_l, 0x00000000); | ||
1834 | |||
1835 | if (!p_lh) | ||
1836 | return 0; | ||
1837 | |||
1838 | num = *p_counter; | ||
1839 | |||
1840 | /* Delete all remaining CQ elements */ | ||
1841 | for (i = 0; i < num; i++) | ||
1842 | put_enet_addr_container(ENET_ADDR_CONT_ENTRY(dequeue(p_lh))); | ||
1843 | |||
1844 | *p_counter = 0; | ||
1845 | |||
1846 | if (comm_dir) | ||
1847 | ugeth_enable(ugeth, comm_dir); | ||
1848 | |||
1849 | return 0; | ||
1850 | } | ||
1851 | |||
1852 | static int ugeth_82xx_filtering_clear_addr_in_paddr(struct ucc_geth_private *ugeth, | ||
1853 | u8 paddr_num) | ||
1854 | { | ||
1855 | ugeth->indAddrRegUsed[paddr_num] = 0; /* mark this paddr as not used */ | ||
1856 | return hw_clear_addr_in_paddr(ugeth, paddr_num);/* clear in hardware */ | ||
1857 | } | ||
1858 | |||
1859 | static void ucc_geth_memclean(struct ucc_geth_private *ugeth) | ||
1860 | { | ||
1861 | u16 i, j; | ||
1862 | u8 __iomem *bd; | ||
1863 | |||
1864 | if (!ugeth) | ||
1865 | return; | ||
1866 | |||
1867 | if (ugeth->uccf) { | ||
1868 | ucc_fast_free(ugeth->uccf); | ||
1869 | ugeth->uccf = NULL; | ||
1870 | } | ||
1871 | |||
1872 | if (ugeth->p_thread_data_tx) { | ||
1873 | qe_muram_free(ugeth->thread_dat_tx_offset); | ||
1874 | ugeth->p_thread_data_tx = NULL; | ||
1875 | } | ||
1876 | if (ugeth->p_thread_data_rx) { | ||
1877 | qe_muram_free(ugeth->thread_dat_rx_offset); | ||
1878 | ugeth->p_thread_data_rx = NULL; | ||
1879 | } | ||
1880 | if (ugeth->p_exf_glbl_param) { | ||
1881 | qe_muram_free(ugeth->exf_glbl_param_offset); | ||
1882 | ugeth->p_exf_glbl_param = NULL; | ||
1883 | } | ||
1884 | if (ugeth->p_rx_glbl_pram) { | ||
1885 | qe_muram_free(ugeth->rx_glbl_pram_offset); | ||
1886 | ugeth->p_rx_glbl_pram = NULL; | ||
1887 | } | ||
1888 | if (ugeth->p_tx_glbl_pram) { | ||
1889 | qe_muram_free(ugeth->tx_glbl_pram_offset); | ||
1890 | ugeth->p_tx_glbl_pram = NULL; | ||
1891 | } | ||
1892 | if (ugeth->p_send_q_mem_reg) { | ||
1893 | qe_muram_free(ugeth->send_q_mem_reg_offset); | ||
1894 | ugeth->p_send_q_mem_reg = NULL; | ||
1895 | } | ||
1896 | if (ugeth->p_scheduler) { | ||
1897 | qe_muram_free(ugeth->scheduler_offset); | ||
1898 | ugeth->p_scheduler = NULL; | ||
1899 | } | ||
1900 | if (ugeth->p_tx_fw_statistics_pram) { | ||
1901 | qe_muram_free(ugeth->tx_fw_statistics_pram_offset); | ||
1902 | ugeth->p_tx_fw_statistics_pram = NULL; | ||
1903 | } | ||
1904 | if (ugeth->p_rx_fw_statistics_pram) { | ||
1905 | qe_muram_free(ugeth->rx_fw_statistics_pram_offset); | ||
1906 | ugeth->p_rx_fw_statistics_pram = NULL; | ||
1907 | } | ||
1908 | if (ugeth->p_rx_irq_coalescing_tbl) { | ||
1909 | qe_muram_free(ugeth->rx_irq_coalescing_tbl_offset); | ||
1910 | ugeth->p_rx_irq_coalescing_tbl = NULL; | ||
1911 | } | ||
1912 | if (ugeth->p_rx_bd_qs_tbl) { | ||
1913 | qe_muram_free(ugeth->rx_bd_qs_tbl_offset); | ||
1914 | ugeth->p_rx_bd_qs_tbl = NULL; | ||
1915 | } | ||
1916 | if (ugeth->p_init_enet_param_shadow) { | ||
1917 | return_init_enet_entries(ugeth, | ||
1918 | &(ugeth->p_init_enet_param_shadow-> | ||
1919 | rxthread[0]), | ||
1920 | ENET_INIT_PARAM_MAX_ENTRIES_RX, | ||
1921 | ugeth->ug_info->riscRx, 1); | ||
1922 | return_init_enet_entries(ugeth, | ||
1923 | &(ugeth->p_init_enet_param_shadow-> | ||
1924 | txthread[0]), | ||
1925 | ENET_INIT_PARAM_MAX_ENTRIES_TX, | ||
1926 | ugeth->ug_info->riscTx, 0); | ||
1927 | kfree(ugeth->p_init_enet_param_shadow); | ||
1928 | ugeth->p_init_enet_param_shadow = NULL; | ||
1929 | } | ||
1930 | for (i = 0; i < ugeth->ug_info->numQueuesTx; i++) { | ||
1931 | bd = ugeth->p_tx_bd_ring[i]; | ||
1932 | if (!bd) | ||
1933 | continue; | ||
1934 | for (j = 0; j < ugeth->ug_info->bdRingLenTx[i]; j++) { | ||
1935 | if (ugeth->tx_skbuff[i][j]) { | ||
1936 | dma_unmap_single(ugeth->dev, | ||
1937 | in_be32(&((struct qe_bd __iomem *)bd)->buf), | ||
1938 | (in_be32((u32 __iomem *)bd) & | ||
1939 | BD_LENGTH_MASK), | ||
1940 | DMA_TO_DEVICE); | ||
1941 | dev_kfree_skb_any(ugeth->tx_skbuff[i][j]); | ||
1942 | ugeth->tx_skbuff[i][j] = NULL; | ||
1943 | } | ||
1944 | } | ||
1945 | |||
1946 | kfree(ugeth->tx_skbuff[i]); | ||
1947 | |||
1948 | if (ugeth->p_tx_bd_ring[i]) { | ||
1949 | if (ugeth->ug_info->uf_info.bd_mem_part == | ||
1950 | MEM_PART_SYSTEM) | ||
1951 | kfree((void *)ugeth->tx_bd_ring_offset[i]); | ||
1952 | else if (ugeth->ug_info->uf_info.bd_mem_part == | ||
1953 | MEM_PART_MURAM) | ||
1954 | qe_muram_free(ugeth->tx_bd_ring_offset[i]); | ||
1955 | ugeth->p_tx_bd_ring[i] = NULL; | ||
1956 | } | ||
1957 | } | ||
1958 | for (i = 0; i < ugeth->ug_info->numQueuesRx; i++) { | ||
1959 | if (ugeth->p_rx_bd_ring[i]) { | ||
1960 | /* Return existing data buffers in ring */ | ||
1961 | bd = ugeth->p_rx_bd_ring[i]; | ||
1962 | for (j = 0; j < ugeth->ug_info->bdRingLenRx[i]; j++) { | ||
1963 | if (ugeth->rx_skbuff[i][j]) { | ||
1964 | dma_unmap_single(ugeth->dev, | ||
1965 | in_be32(&((struct qe_bd __iomem *)bd)->buf), | ||
1966 | ugeth->ug_info-> | ||
1967 | uf_info.max_rx_buf_length + | ||
1968 | UCC_GETH_RX_DATA_BUF_ALIGNMENT, | ||
1969 | DMA_FROM_DEVICE); | ||
1970 | dev_kfree_skb_any( | ||
1971 | ugeth->rx_skbuff[i][j]); | ||
1972 | ugeth->rx_skbuff[i][j] = NULL; | ||
1973 | } | ||
1974 | bd += sizeof(struct qe_bd); | ||
1975 | } | ||
1976 | |||
1977 | kfree(ugeth->rx_skbuff[i]); | ||
1978 | |||
1979 | if (ugeth->ug_info->uf_info.bd_mem_part == | ||
1980 | MEM_PART_SYSTEM) | ||
1981 | kfree((void *)ugeth->rx_bd_ring_offset[i]); | ||
1982 | else if (ugeth->ug_info->uf_info.bd_mem_part == | ||
1983 | MEM_PART_MURAM) | ||
1984 | qe_muram_free(ugeth->rx_bd_ring_offset[i]); | ||
1985 | ugeth->p_rx_bd_ring[i] = NULL; | ||
1986 | } | ||
1987 | } | ||
1988 | while (!list_empty(&ugeth->group_hash_q)) | ||
1989 | put_enet_addr_container(ENET_ADDR_CONT_ENTRY | ||
1990 | (dequeue(&ugeth->group_hash_q))); | ||
1991 | while (!list_empty(&ugeth->ind_hash_q)) | ||
1992 | put_enet_addr_container(ENET_ADDR_CONT_ENTRY | ||
1993 | (dequeue(&ugeth->ind_hash_q))); | ||
1994 | if (ugeth->ug_regs) { | ||
1995 | iounmap(ugeth->ug_regs); | ||
1996 | ugeth->ug_regs = NULL; | ||
1997 | } | ||
1998 | |||
1999 | skb_queue_purge(&ugeth->rx_recycle); | ||
2000 | } | ||
2001 | |||
2002 | static void ucc_geth_set_multi(struct net_device *dev) | ||
2003 | { | ||
2004 | struct ucc_geth_private *ugeth; | ||
2005 | struct netdev_hw_addr *ha; | ||
2006 | struct ucc_fast __iomem *uf_regs; | ||
2007 | struct ucc_geth_82xx_address_filtering_pram __iomem *p_82xx_addr_filt; | ||
2008 | |||
2009 | ugeth = netdev_priv(dev); | ||
2010 | |||
2011 | uf_regs = ugeth->uccf->uf_regs; | ||
2012 | |||
2013 | if (dev->flags & IFF_PROMISC) { | ||
2014 | setbits32(&uf_regs->upsmr, UCC_GETH_UPSMR_PRO); | ||
2015 | } else { | ||
2016 | clrbits32(&uf_regs->upsmr, UCC_GETH_UPSMR_PRO); | ||
2017 | |||
2018 | p_82xx_addr_filt = | ||
2019 | (struct ucc_geth_82xx_address_filtering_pram __iomem *) ugeth-> | ||
2020 | p_rx_glbl_pram->addressfiltering; | ||
2021 | |||
2022 | if (dev->flags & IFF_ALLMULTI) { | ||
2023 | /* Catch all multicast addresses, so set the | ||
2024 | * filter to all 1's. | ||
2025 | */ | ||
2026 | out_be32(&p_82xx_addr_filt->gaddr_h, 0xffffffff); | ||
2027 | out_be32(&p_82xx_addr_filt->gaddr_l, 0xffffffff); | ||
2028 | } else { | ||
2029 | /* Clear filter and add the addresses in the list. | ||
2030 | */ | ||
2031 | out_be32(&p_82xx_addr_filt->gaddr_h, 0x0); | ||
2032 | out_be32(&p_82xx_addr_filt->gaddr_l, 0x0); | ||
2033 | |||
2034 | netdev_for_each_mc_addr(ha, dev) { | ||
2035 | /* Ask CPM to run CRC and set bit in | ||
2036 | * filter mask. | ||
2037 | */ | ||
2038 | hw_add_addr_in_hash(ugeth, ha->addr); | ||
2039 | } | ||
2040 | } | ||
2041 | } | ||
2042 | } | ||
2043 | |||
2044 | static void ucc_geth_stop(struct ucc_geth_private *ugeth) | ||
2045 | { | ||
2046 | struct ucc_geth __iomem *ug_regs = ugeth->ug_regs; | ||
2047 | struct phy_device *phydev = ugeth->phydev; | ||
2048 | |||
2049 | ugeth_vdbg("%s: IN", __func__); | ||
2050 | |||
2051 | /* | ||
2052 | * Tell the kernel the link is down. | ||
2053 | * Must be done before disabling the controller | ||
2054 | * or deadlock may happen. | ||
2055 | */ | ||
2056 | phy_stop(phydev); | ||
2057 | |||
2058 | /* Disable the controller */ | ||
2059 | ugeth_disable(ugeth, COMM_DIR_RX_AND_TX); | ||
2060 | |||
2061 | /* Mask all interrupts */ | ||
2062 | out_be32(ugeth->uccf->p_uccm, 0x00000000); | ||
2063 | |||
2064 | /* Clear all interrupts */ | ||
2065 | out_be32(ugeth->uccf->p_ucce, 0xffffffff); | ||
2066 | |||
2067 | /* Disable Rx and Tx */ | ||
2068 | clrbits32(&ug_regs->maccfg1, MACCFG1_ENABLE_RX | MACCFG1_ENABLE_TX); | ||
2069 | |||
2070 | ucc_geth_memclean(ugeth); | ||
2071 | } | ||
2072 | |||
2073 | static int ucc_struct_init(struct ucc_geth_private *ugeth) | ||
2074 | { | ||
2075 | struct ucc_geth_info *ug_info; | ||
2076 | struct ucc_fast_info *uf_info; | ||
2077 | int i; | ||
2078 | |||
2079 | ug_info = ugeth->ug_info; | ||
2080 | uf_info = &ug_info->uf_info; | ||
2081 | |||
2082 | if (!((uf_info->bd_mem_part == MEM_PART_SYSTEM) || | ||
2083 | (uf_info->bd_mem_part == MEM_PART_MURAM))) { | ||
2084 | if (netif_msg_probe(ugeth)) | ||
2085 | ugeth_err("%s: Bad memory partition value.", | ||
2086 | __func__); | ||
2087 | return -EINVAL; | ||
2088 | } | ||
2089 | |||
2090 | /* Rx BD lengths */ | ||
2091 | for (i = 0; i < ug_info->numQueuesRx; i++) { | ||
2092 | if ((ug_info->bdRingLenRx[i] < UCC_GETH_RX_BD_RING_SIZE_MIN) || | ||
2093 | (ug_info->bdRingLenRx[i] % | ||
2094 | UCC_GETH_RX_BD_RING_SIZE_ALIGNMENT)) { | ||
2095 | if (netif_msg_probe(ugeth)) | ||
2096 | ugeth_err | ||
2097 | ("%s: Rx BD ring length must be multiple of 4, no smaller than 8.", | ||
2098 | __func__); | ||
2099 | return -EINVAL; | ||
2100 | } | ||
2101 | } | ||
2102 | |||
2103 | /* Tx BD lengths */ | ||
2104 | for (i = 0; i < ug_info->numQueuesTx; i++) { | ||
2105 | if (ug_info->bdRingLenTx[i] < UCC_GETH_TX_BD_RING_SIZE_MIN) { | ||
2106 | if (netif_msg_probe(ugeth)) | ||
2107 | ugeth_err | ||
2108 | ("%s: Tx BD ring length must be no smaller than 2.", | ||
2109 | __func__); | ||
2110 | return -EINVAL; | ||
2111 | } | ||
2112 | } | ||
2113 | |||
2114 | /* mrblr */ | ||
2115 | if ((uf_info->max_rx_buf_length == 0) || | ||
2116 | (uf_info->max_rx_buf_length % UCC_GETH_MRBLR_ALIGNMENT)) { | ||
2117 | if (netif_msg_probe(ugeth)) | ||
2118 | ugeth_err | ||
2119 | ("%s: max_rx_buf_length must be non-zero multiple of 128.", | ||
2120 | __func__); | ||
2121 | return -EINVAL; | ||
2122 | } | ||
2123 | |||
2124 | /* num Tx queues */ | ||
2125 | if (ug_info->numQueuesTx > NUM_TX_QUEUES) { | ||
2126 | if (netif_msg_probe(ugeth)) | ||
2127 | ugeth_err("%s: number of tx queues too large.", __func__); | ||
2128 | return -EINVAL; | ||
2129 | } | ||
2130 | |||
2131 | /* num Rx queues */ | ||
2132 | if (ug_info->numQueuesRx > NUM_RX_QUEUES) { | ||
2133 | if (netif_msg_probe(ugeth)) | ||
2134 | ugeth_err("%s: number of rx queues too large.", __func__); | ||
2135 | return -EINVAL; | ||
2136 | } | ||
2137 | |||
2138 | /* l2qt */ | ||
2139 | for (i = 0; i < UCC_GETH_VLAN_PRIORITY_MAX; i++) { | ||
2140 | if (ug_info->l2qt[i] >= ug_info->numQueuesRx) { | ||
2141 | if (netif_msg_probe(ugeth)) | ||
2142 | ugeth_err | ||
2143 | ("%s: VLAN priority table entry must not be" | ||
2144 | " larger than number of Rx queues.", | ||
2145 | __func__); | ||
2146 | return -EINVAL; | ||
2147 | } | ||
2148 | } | ||
2149 | |||
2150 | /* l3qt */ | ||
2151 | for (i = 0; i < UCC_GETH_IP_PRIORITY_MAX; i++) { | ||
2152 | if (ug_info->l3qt[i] >= ug_info->numQueuesRx) { | ||
2153 | if (netif_msg_probe(ugeth)) | ||
2154 | ugeth_err | ||
2155 | ("%s: IP priority table entry must not be" | ||
2156 | " larger than number of Rx queues.", | ||
2157 | __func__); | ||
2158 | return -EINVAL; | ||
2159 | } | ||
2160 | } | ||
2161 | |||
2162 | if (ug_info->cam && !ug_info->ecamptr) { | ||
2163 | if (netif_msg_probe(ugeth)) | ||
2164 | ugeth_err("%s: If cam mode is chosen, must supply cam ptr.", | ||
2165 | __func__); | ||
2166 | return -EINVAL; | ||
2167 | } | ||
2168 | |||
2169 | if ((ug_info->numStationAddresses != | ||
2170 | UCC_GETH_NUM_OF_STATION_ADDRESSES_1) && | ||
2171 | ug_info->rxExtendedFiltering) { | ||
2172 | if (netif_msg_probe(ugeth)) | ||
2173 | ugeth_err("%s: Number of station addresses greater than 1 " | ||
2174 | "not allowed in extended parsing mode.", | ||
2175 | __func__); | ||
2176 | return -EINVAL; | ||
2177 | } | ||
2178 | |||
2179 | /* Generate uccm_mask for receive */ | ||
2180 | uf_info->uccm_mask = ug_info->eventRegMask & UCCE_OTHER;/* Errors */ | ||
2181 | for (i = 0; i < ug_info->numQueuesRx; i++) | ||
2182 | uf_info->uccm_mask |= (UCC_GETH_UCCE_RXF0 << i); | ||
2183 | |||
2184 | for (i = 0; i < ug_info->numQueuesTx; i++) | ||
2185 | uf_info->uccm_mask |= (UCC_GETH_UCCE_TXB0 << i); | ||
2186 | /* Initialize the general fast UCC block. */ | ||
2187 | if (ucc_fast_init(uf_info, &ugeth->uccf)) { | ||
2188 | if (netif_msg_probe(ugeth)) | ||
2189 | ugeth_err("%s: Failed to init uccf.", __func__); | ||
2190 | return -ENOMEM; | ||
2191 | } | ||
2192 | |||
2193 | /* read the number of risc engines, update the riscTx and riscRx | ||
2194 | * if there are 4 riscs in QE | ||
2195 | */ | ||
2196 | if (qe_get_num_of_risc() == 4) { | ||
2197 | ug_info->riscTx = QE_RISC_ALLOCATION_FOUR_RISCS; | ||
2198 | ug_info->riscRx = QE_RISC_ALLOCATION_FOUR_RISCS; | ||
2199 | } | ||
2200 | |||
2201 | ugeth->ug_regs = ioremap(uf_info->regs, sizeof(*ugeth->ug_regs)); | ||
2202 | if (!ugeth->ug_regs) { | ||
2203 | if (netif_msg_probe(ugeth)) | ||
2204 | ugeth_err("%s: Failed to ioremap regs.", __func__); | ||
2205 | return -ENOMEM; | ||
2206 | } | ||
2207 | |||
2208 | skb_queue_head_init(&ugeth->rx_recycle); | ||
2209 | |||
2210 | return 0; | ||
2211 | } | ||
2212 | |||
2213 | static int ucc_geth_startup(struct ucc_geth_private *ugeth) | ||
2214 | { | ||
2215 | struct ucc_geth_82xx_address_filtering_pram __iomem *p_82xx_addr_filt; | ||
2216 | struct ucc_geth_init_pram __iomem *p_init_enet_pram; | ||
2217 | struct ucc_fast_private *uccf; | ||
2218 | struct ucc_geth_info *ug_info; | ||
2219 | struct ucc_fast_info *uf_info; | ||
2220 | struct ucc_fast __iomem *uf_regs; | ||
2221 | struct ucc_geth __iomem *ug_regs; | ||
2222 | int ret_val = -EINVAL; | ||
2223 | u32 remoder = UCC_GETH_REMODER_INIT; | ||
2224 | u32 init_enet_pram_offset, cecr_subblock, command; | ||
2225 | u32 ifstat, i, j, size, l2qt, l3qt, length; | ||
2226 | u16 temoder = UCC_GETH_TEMODER_INIT; | ||
2227 | u16 test; | ||
2228 | u8 function_code = 0; | ||
2229 | u8 __iomem *bd; | ||
2230 | u8 __iomem *endOfRing; | ||
2231 | u8 numThreadsRxNumerical, numThreadsTxNumerical; | ||
2232 | |||
2233 | ugeth_vdbg("%s: IN", __func__); | ||
2234 | uccf = ugeth->uccf; | ||
2235 | ug_info = ugeth->ug_info; | ||
2236 | uf_info = &ug_info->uf_info; | ||
2237 | uf_regs = uccf->uf_regs; | ||
2238 | ug_regs = ugeth->ug_regs; | ||
2239 | |||
2240 | switch (ug_info->numThreadsRx) { | ||
2241 | case UCC_GETH_NUM_OF_THREADS_1: | ||
2242 | numThreadsRxNumerical = 1; | ||
2243 | break; | ||
2244 | case UCC_GETH_NUM_OF_THREADS_2: | ||
2245 | numThreadsRxNumerical = 2; | ||
2246 | break; | ||
2247 | case UCC_GETH_NUM_OF_THREADS_4: | ||
2248 | numThreadsRxNumerical = 4; | ||
2249 | break; | ||
2250 | case UCC_GETH_NUM_OF_THREADS_6: | ||
2251 | numThreadsRxNumerical = 6; | ||
2252 | break; | ||
2253 | case UCC_GETH_NUM_OF_THREADS_8: | ||
2254 | numThreadsRxNumerical = 8; | ||
2255 | break; | ||
2256 | default: | ||
2257 | if (netif_msg_ifup(ugeth)) | ||
2258 | ugeth_err("%s: Bad number of Rx threads value.", | ||
2259 | __func__); | ||
2260 | return -EINVAL; | ||
2261 | break; | ||
2262 | } | ||
2263 | |||
2264 | switch (ug_info->numThreadsTx) { | ||
2265 | case UCC_GETH_NUM_OF_THREADS_1: | ||
2266 | numThreadsTxNumerical = 1; | ||
2267 | break; | ||
2268 | case UCC_GETH_NUM_OF_THREADS_2: | ||
2269 | numThreadsTxNumerical = 2; | ||
2270 | break; | ||
2271 | case UCC_GETH_NUM_OF_THREADS_4: | ||
2272 | numThreadsTxNumerical = 4; | ||
2273 | break; | ||
2274 | case UCC_GETH_NUM_OF_THREADS_6: | ||
2275 | numThreadsTxNumerical = 6; | ||
2276 | break; | ||
2277 | case UCC_GETH_NUM_OF_THREADS_8: | ||
2278 | numThreadsTxNumerical = 8; | ||
2279 | break; | ||
2280 | default: | ||
2281 | if (netif_msg_ifup(ugeth)) | ||
2282 | ugeth_err("%s: Bad number of Tx threads value.", | ||
2283 | __func__); | ||
2284 | return -EINVAL; | ||
2285 | break; | ||
2286 | } | ||
2287 | |||
2288 | /* Calculate rx_extended_features */ | ||
2289 | ugeth->rx_non_dynamic_extended_features = ug_info->ipCheckSumCheck || | ||
2290 | ug_info->ipAddressAlignment || | ||
2291 | (ug_info->numStationAddresses != | ||
2292 | UCC_GETH_NUM_OF_STATION_ADDRESSES_1); | ||
2293 | |||
2294 | ugeth->rx_extended_features = ugeth->rx_non_dynamic_extended_features || | ||
2295 | (ug_info->vlanOperationTagged != UCC_GETH_VLAN_OPERATION_TAGGED_NOP) || | ||
2296 | (ug_info->vlanOperationNonTagged != | ||
2297 | UCC_GETH_VLAN_OPERATION_NON_TAGGED_NOP); | ||
2298 | |||
2299 | init_default_reg_vals(&uf_regs->upsmr, | ||
2300 | &ug_regs->maccfg1, &ug_regs->maccfg2); | ||
2301 | |||
2302 | /* Set UPSMR */ | ||
2303 | /* For more details see the hardware spec. */ | ||
2304 | init_rx_parameters(ug_info->bro, | ||
2305 | ug_info->rsh, ug_info->pro, &uf_regs->upsmr); | ||
2306 | |||
2307 | /* We're going to ignore other registers for now, */ | ||
2308 | /* except as needed to get up and running */ | ||
2309 | |||
2310 | /* Set MACCFG1 */ | ||
2311 | /* For more details see the hardware spec. */ | ||
2312 | init_flow_control_params(ug_info->aufc, | ||
2313 | ug_info->receiveFlowControl, | ||
2314 | ug_info->transmitFlowControl, | ||
2315 | ug_info->pausePeriod, | ||
2316 | ug_info->extensionField, | ||
2317 | &uf_regs->upsmr, | ||
2318 | &ug_regs->uempr, &ug_regs->maccfg1); | ||
2319 | |||
2320 | setbits32(&ug_regs->maccfg1, MACCFG1_ENABLE_RX | MACCFG1_ENABLE_TX); | ||
2321 | |||
2322 | /* Set IPGIFG */ | ||
2323 | /* For more details see the hardware spec. */ | ||
2324 | ret_val = init_inter_frame_gap_params(ug_info->nonBackToBackIfgPart1, | ||
2325 | ug_info->nonBackToBackIfgPart2, | ||
2326 | ug_info-> | ||
2327 | miminumInterFrameGapEnforcement, | ||
2328 | ug_info->backToBackInterFrameGap, | ||
2329 | &ug_regs->ipgifg); | ||
2330 | if (ret_val != 0) { | ||
2331 | if (netif_msg_ifup(ugeth)) | ||
2332 | ugeth_err("%s: IPGIFG initialization parameter too large.", | ||
2333 | __func__); | ||
2334 | return ret_val; | ||
2335 | } | ||
2336 | |||
2337 | /* Set HAFDUP */ | ||
2338 | /* For more details see the hardware spec. */ | ||
2339 | ret_val = init_half_duplex_params(ug_info->altBeb, | ||
2340 | ug_info->backPressureNoBackoff, | ||
2341 | ug_info->noBackoff, | ||
2342 | ug_info->excessDefer, | ||
2343 | ug_info->altBebTruncation, | ||
2344 | ug_info->maxRetransmission, | ||
2345 | ug_info->collisionWindow, | ||
2346 | &ug_regs->hafdup); | ||
2347 | if (ret_val != 0) { | ||
2348 | if (netif_msg_ifup(ugeth)) | ||
2349 | ugeth_err("%s: Half Duplex initialization parameter too large.", | ||
2350 | __func__); | ||
2351 | return ret_val; | ||
2352 | } | ||
2353 | |||
2354 | /* Set IFSTAT */ | ||
2355 | /* For more details see the hardware spec. */ | ||
2356 | /* Read only - resets upon read */ | ||
2357 | ifstat = in_be32(&ug_regs->ifstat); | ||
2358 | |||
2359 | /* Clear UEMPR */ | ||
2360 | /* For more details see the hardware spec. */ | ||
2361 | out_be32(&ug_regs->uempr, 0); | ||
2362 | |||
2363 | /* Set UESCR */ | ||
2364 | /* For more details see the hardware spec. */ | ||
2365 | init_hw_statistics_gathering_mode((ug_info->statisticsMode & | ||
2366 | UCC_GETH_STATISTICS_GATHERING_MODE_HARDWARE), | ||
2367 | 0, &uf_regs->upsmr, &ug_regs->uescr); | ||
2368 | |||
2369 | /* Allocate Tx bds */ | ||
2370 | for (j = 0; j < ug_info->numQueuesTx; j++) { | ||
2371 | /* Allocate in multiple of | ||
2372 | UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT, | ||
2373 | according to spec */ | ||
2374 | length = ((ug_info->bdRingLenTx[j] * sizeof(struct qe_bd)) | ||
2375 | / UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT) | ||
2376 | * UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT; | ||
2377 | if ((ug_info->bdRingLenTx[j] * sizeof(struct qe_bd)) % | ||
2378 | UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT) | ||
2379 | length += UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT; | ||
2380 | if (uf_info->bd_mem_part == MEM_PART_SYSTEM) { | ||
2381 | u32 align = 4; | ||
2382 | if (UCC_GETH_TX_BD_RING_ALIGNMENT > 4) | ||
2383 | align = UCC_GETH_TX_BD_RING_ALIGNMENT; | ||
2384 | ugeth->tx_bd_ring_offset[j] = | ||
2385 | (u32) kmalloc((u32) (length + align), GFP_KERNEL); | ||
2386 | |||
2387 | if (ugeth->tx_bd_ring_offset[j] != 0) | ||
2388 | ugeth->p_tx_bd_ring[j] = | ||
2389 | (u8 __iomem *)((ugeth->tx_bd_ring_offset[j] + | ||
2390 | align) & ~(align - 1)); | ||
2391 | } else if (uf_info->bd_mem_part == MEM_PART_MURAM) { | ||
2392 | ugeth->tx_bd_ring_offset[j] = | ||
2393 | qe_muram_alloc(length, | ||
2394 | UCC_GETH_TX_BD_RING_ALIGNMENT); | ||
2395 | if (!IS_ERR_VALUE(ugeth->tx_bd_ring_offset[j])) | ||
2396 | ugeth->p_tx_bd_ring[j] = | ||
2397 | (u8 __iomem *) qe_muram_addr(ugeth-> | ||
2398 | tx_bd_ring_offset[j]); | ||
2399 | } | ||
2400 | if (!ugeth->p_tx_bd_ring[j]) { | ||
2401 | if (netif_msg_ifup(ugeth)) | ||
2402 | ugeth_err | ||
2403 | ("%s: Can not allocate memory for Tx bd rings.", | ||
2404 | __func__); | ||
2405 | return -ENOMEM; | ||
2406 | } | ||
2407 | /* Zero unused end of bd ring, according to spec */ | ||
2408 | memset_io((void __iomem *)(ugeth->p_tx_bd_ring[j] + | ||
2409 | ug_info->bdRingLenTx[j] * sizeof(struct qe_bd)), 0, | ||
2410 | length - ug_info->bdRingLenTx[j] * sizeof(struct qe_bd)); | ||
2411 | } | ||
2412 | |||
2413 | /* Allocate Rx bds */ | ||
2414 | for (j = 0; j < ug_info->numQueuesRx; j++) { | ||
2415 | length = ug_info->bdRingLenRx[j] * sizeof(struct qe_bd); | ||
2416 | if (uf_info->bd_mem_part == MEM_PART_SYSTEM) { | ||
2417 | u32 align = 4; | ||
2418 | if (UCC_GETH_RX_BD_RING_ALIGNMENT > 4) | ||
2419 | align = UCC_GETH_RX_BD_RING_ALIGNMENT; | ||
2420 | ugeth->rx_bd_ring_offset[j] = | ||
2421 | (u32) kmalloc((u32) (length + align), GFP_KERNEL); | ||
2422 | if (ugeth->rx_bd_ring_offset[j] != 0) | ||
2423 | ugeth->p_rx_bd_ring[j] = | ||
2424 | (u8 __iomem *)((ugeth->rx_bd_ring_offset[j] + | ||
2425 | align) & ~(align - 1)); | ||
2426 | } else if (uf_info->bd_mem_part == MEM_PART_MURAM) { | ||
2427 | ugeth->rx_bd_ring_offset[j] = | ||
2428 | qe_muram_alloc(length, | ||
2429 | UCC_GETH_RX_BD_RING_ALIGNMENT); | ||
2430 | if (!IS_ERR_VALUE(ugeth->rx_bd_ring_offset[j])) | ||
2431 | ugeth->p_rx_bd_ring[j] = | ||
2432 | (u8 __iomem *) qe_muram_addr(ugeth-> | ||
2433 | rx_bd_ring_offset[j]); | ||
2434 | } | ||
2435 | if (!ugeth->p_rx_bd_ring[j]) { | ||
2436 | if (netif_msg_ifup(ugeth)) | ||
2437 | ugeth_err | ||
2438 | ("%s: Can not allocate memory for Rx bd rings.", | ||
2439 | __func__); | ||
2440 | return -ENOMEM; | ||
2441 | } | ||
2442 | } | ||
2443 | |||
2444 | /* Init Tx bds */ | ||
2445 | for (j = 0; j < ug_info->numQueuesTx; j++) { | ||
2446 | /* Setup the skbuff rings */ | ||
2447 | ugeth->tx_skbuff[j] = kmalloc(sizeof(struct sk_buff *) * | ||
2448 | ugeth->ug_info->bdRingLenTx[j], | ||
2449 | GFP_KERNEL); | ||
2450 | |||
2451 | if (ugeth->tx_skbuff[j] == NULL) { | ||
2452 | if (netif_msg_ifup(ugeth)) | ||
2453 | ugeth_err("%s: Could not allocate tx_skbuff", | ||
2454 | __func__); | ||
2455 | return -ENOMEM; | ||
2456 | } | ||
2457 | |||
2458 | for (i = 0; i < ugeth->ug_info->bdRingLenTx[j]; i++) | ||
2459 | ugeth->tx_skbuff[j][i] = NULL; | ||
2460 | |||
2461 | ugeth->skb_curtx[j] = ugeth->skb_dirtytx[j] = 0; | ||
2462 | bd = ugeth->confBd[j] = ugeth->txBd[j] = ugeth->p_tx_bd_ring[j]; | ||
2463 | for (i = 0; i < ug_info->bdRingLenTx[j]; i++) { | ||
2464 | /* clear bd buffer */ | ||
2465 | out_be32(&((struct qe_bd __iomem *)bd)->buf, 0); | ||
2466 | /* set bd status and length */ | ||
2467 | out_be32((u32 __iomem *)bd, 0); | ||
2468 | bd += sizeof(struct qe_bd); | ||
2469 | } | ||
2470 | bd -= sizeof(struct qe_bd); | ||
2471 | /* set bd status and length */ | ||
2472 | out_be32((u32 __iomem *)bd, T_W); /* for last BD set Wrap bit */ | ||
2473 | } | ||
2474 | |||
2475 | /* Init Rx bds */ | ||
2476 | for (j = 0; j < ug_info->numQueuesRx; j++) { | ||
2477 | /* Setup the skbuff rings */ | ||
2478 | ugeth->rx_skbuff[j] = kmalloc(sizeof(struct sk_buff *) * | ||
2479 | ugeth->ug_info->bdRingLenRx[j], | ||
2480 | GFP_KERNEL); | ||
2481 | |||
2482 | if (ugeth->rx_skbuff[j] == NULL) { | ||
2483 | if (netif_msg_ifup(ugeth)) | ||
2484 | ugeth_err("%s: Could not allocate rx_skbuff", | ||
2485 | __func__); | ||
2486 | return -ENOMEM; | ||
2487 | } | ||
2488 | |||
2489 | for (i = 0; i < ugeth->ug_info->bdRingLenRx[j]; i++) | ||
2490 | ugeth->rx_skbuff[j][i] = NULL; | ||
2491 | |||
2492 | ugeth->skb_currx[j] = 0; | ||
2493 | bd = ugeth->rxBd[j] = ugeth->p_rx_bd_ring[j]; | ||
2494 | for (i = 0; i < ug_info->bdRingLenRx[j]; i++) { | ||
2495 | /* set bd status and length */ | ||
2496 | out_be32((u32 __iomem *)bd, R_I); | ||
2497 | /* clear bd buffer */ | ||
2498 | out_be32(&((struct qe_bd __iomem *)bd)->buf, 0); | ||
2499 | bd += sizeof(struct qe_bd); | ||
2500 | } | ||
2501 | bd -= sizeof(struct qe_bd); | ||
2502 | /* set bd status and length */ | ||
2503 | out_be32((u32 __iomem *)bd, R_W); /* for last BD set Wrap bit */ | ||
2504 | } | ||
2505 | |||
2506 | /* | ||
2507 | * Global PRAM | ||
2508 | */ | ||
2509 | /* Tx global PRAM */ | ||
2510 | /* Allocate global tx parameter RAM page */ | ||
2511 | ugeth->tx_glbl_pram_offset = | ||
2512 | qe_muram_alloc(sizeof(struct ucc_geth_tx_global_pram), | ||
2513 | UCC_GETH_TX_GLOBAL_PRAM_ALIGNMENT); | ||
2514 | if (IS_ERR_VALUE(ugeth->tx_glbl_pram_offset)) { | ||
2515 | if (netif_msg_ifup(ugeth)) | ||
2516 | ugeth_err | ||
2517 | ("%s: Can not allocate DPRAM memory for p_tx_glbl_pram.", | ||
2518 | __func__); | ||
2519 | return -ENOMEM; | ||
2520 | } | ||
2521 | ugeth->p_tx_glbl_pram = | ||
2522 | (struct ucc_geth_tx_global_pram __iomem *) qe_muram_addr(ugeth-> | ||
2523 | tx_glbl_pram_offset); | ||
2524 | /* Zero out p_tx_glbl_pram */ | ||
2525 | memset_io((void __iomem *)ugeth->p_tx_glbl_pram, 0, sizeof(struct ucc_geth_tx_global_pram)); | ||
2526 | |||
2527 | /* Fill global PRAM */ | ||
2528 | |||
2529 | /* TQPTR */ | ||
2530 | /* Size varies with number of Tx threads */ | ||
2531 | ugeth->thread_dat_tx_offset = | ||
2532 | qe_muram_alloc(numThreadsTxNumerical * | ||
2533 | sizeof(struct ucc_geth_thread_data_tx) + | ||
2534 | 32 * (numThreadsTxNumerical == 1), | ||
2535 | UCC_GETH_THREAD_DATA_ALIGNMENT); | ||
2536 | if (IS_ERR_VALUE(ugeth->thread_dat_tx_offset)) { | ||
2537 | if (netif_msg_ifup(ugeth)) | ||
2538 | ugeth_err | ||
2539 | ("%s: Can not allocate DPRAM memory for p_thread_data_tx.", | ||
2540 | __func__); | ||
2541 | return -ENOMEM; | ||
2542 | } | ||
2543 | |||
2544 | ugeth->p_thread_data_tx = | ||
2545 | (struct ucc_geth_thread_data_tx __iomem *) qe_muram_addr(ugeth-> | ||
2546 | thread_dat_tx_offset); | ||
2547 | out_be32(&ugeth->p_tx_glbl_pram->tqptr, ugeth->thread_dat_tx_offset); | ||
2548 | |||
2549 | /* vtagtable */ | ||
2550 | for (i = 0; i < UCC_GETH_TX_VTAG_TABLE_ENTRY_MAX; i++) | ||
2551 | out_be32(&ugeth->p_tx_glbl_pram->vtagtable[i], | ||
2552 | ug_info->vtagtable[i]); | ||
2553 | |||
2554 | /* iphoffset */ | ||
2555 | for (i = 0; i < TX_IP_OFFSET_ENTRY_MAX; i++) | ||
2556 | out_8(&ugeth->p_tx_glbl_pram->iphoffset[i], | ||
2557 | ug_info->iphoffset[i]); | ||
2558 | |||
2559 | /* SQPTR */ | ||
2560 | /* Size varies with number of Tx queues */ | ||
2561 | ugeth->send_q_mem_reg_offset = | ||
2562 | qe_muram_alloc(ug_info->numQueuesTx * | ||
2563 | sizeof(struct ucc_geth_send_queue_qd), | ||
2564 | UCC_GETH_SEND_QUEUE_QUEUE_DESCRIPTOR_ALIGNMENT); | ||
2565 | if (IS_ERR_VALUE(ugeth->send_q_mem_reg_offset)) { | ||
2566 | if (netif_msg_ifup(ugeth)) | ||
2567 | ugeth_err | ||
2568 | ("%s: Can not allocate DPRAM memory for p_send_q_mem_reg.", | ||
2569 | __func__); | ||
2570 | return -ENOMEM; | ||
2571 | } | ||
2572 | |||
2573 | ugeth->p_send_q_mem_reg = | ||
2574 | (struct ucc_geth_send_queue_mem_region __iomem *) qe_muram_addr(ugeth-> | ||
2575 | send_q_mem_reg_offset); | ||
2576 | out_be32(&ugeth->p_tx_glbl_pram->sqptr, ugeth->send_q_mem_reg_offset); | ||
2577 | |||
2578 | /* Setup the table */ | ||
2579 | /* Assume BD rings are already established */ | ||
2580 | for (i = 0; i < ug_info->numQueuesTx; i++) { | ||
2581 | endOfRing = | ||
2582 | ugeth->p_tx_bd_ring[i] + (ug_info->bdRingLenTx[i] - | ||
2583 | 1) * sizeof(struct qe_bd); | ||
2584 | if (ugeth->ug_info->uf_info.bd_mem_part == MEM_PART_SYSTEM) { | ||
2585 | out_be32(&ugeth->p_send_q_mem_reg->sqqd[i].bd_ring_base, | ||
2586 | (u32) virt_to_phys(ugeth->p_tx_bd_ring[i])); | ||
2587 | out_be32(&ugeth->p_send_q_mem_reg->sqqd[i]. | ||
2588 | last_bd_completed_address, | ||
2589 | (u32) virt_to_phys(endOfRing)); | ||
2590 | } else if (ugeth->ug_info->uf_info.bd_mem_part == | ||
2591 | MEM_PART_MURAM) { | ||
2592 | out_be32(&ugeth->p_send_q_mem_reg->sqqd[i].bd_ring_base, | ||
2593 | (u32) immrbar_virt_to_phys(ugeth-> | ||
2594 | p_tx_bd_ring[i])); | ||
2595 | out_be32(&ugeth->p_send_q_mem_reg->sqqd[i]. | ||
2596 | last_bd_completed_address, | ||
2597 | (u32) immrbar_virt_to_phys(endOfRing)); | ||
2598 | } | ||
2599 | } | ||
2600 | |||
2601 | /* schedulerbasepointer */ | ||
2602 | |||
2603 | if (ug_info->numQueuesTx > 1) { | ||
2604 | /* scheduler exists only if more than 1 tx queue */ | ||
2605 | ugeth->scheduler_offset = | ||
2606 | qe_muram_alloc(sizeof(struct ucc_geth_scheduler), | ||
2607 | UCC_GETH_SCHEDULER_ALIGNMENT); | ||
2608 | if (IS_ERR_VALUE(ugeth->scheduler_offset)) { | ||
2609 | if (netif_msg_ifup(ugeth)) | ||
2610 | ugeth_err | ||
2611 | ("%s: Can not allocate DPRAM memory for p_scheduler.", | ||
2612 | __func__); | ||
2613 | return -ENOMEM; | ||
2614 | } | ||
2615 | |||
2616 | ugeth->p_scheduler = | ||
2617 | (struct ucc_geth_scheduler __iomem *) qe_muram_addr(ugeth-> | ||
2618 | scheduler_offset); | ||
2619 | out_be32(&ugeth->p_tx_glbl_pram->schedulerbasepointer, | ||
2620 | ugeth->scheduler_offset); | ||
2621 | /* Zero out p_scheduler */ | ||
2622 | memset_io((void __iomem *)ugeth->p_scheduler, 0, sizeof(struct ucc_geth_scheduler)); | ||
2623 | |||
2624 | /* Set values in scheduler */ | ||
2625 | out_be32(&ugeth->p_scheduler->mblinterval, | ||
2626 | ug_info->mblinterval); | ||
2627 | out_be16(&ugeth->p_scheduler->nortsrbytetime, | ||
2628 | ug_info->nortsrbytetime); | ||
2629 | out_8(&ugeth->p_scheduler->fracsiz, ug_info->fracsiz); | ||
2630 | out_8(&ugeth->p_scheduler->strictpriorityq, | ||
2631 | ug_info->strictpriorityq); | ||
2632 | out_8(&ugeth->p_scheduler->txasap, ug_info->txasap); | ||
2633 | out_8(&ugeth->p_scheduler->extrabw, ug_info->extrabw); | ||
2634 | for (i = 0; i < NUM_TX_QUEUES; i++) | ||
2635 | out_8(&ugeth->p_scheduler->weightfactor[i], | ||
2636 | ug_info->weightfactor[i]); | ||
2637 | |||
2638 | /* Set pointers to cpucount registers in scheduler */ | ||
2639 | ugeth->p_cpucount[0] = &(ugeth->p_scheduler->cpucount0); | ||
2640 | ugeth->p_cpucount[1] = &(ugeth->p_scheduler->cpucount1); | ||
2641 | ugeth->p_cpucount[2] = &(ugeth->p_scheduler->cpucount2); | ||
2642 | ugeth->p_cpucount[3] = &(ugeth->p_scheduler->cpucount3); | ||
2643 | ugeth->p_cpucount[4] = &(ugeth->p_scheduler->cpucount4); | ||
2644 | ugeth->p_cpucount[5] = &(ugeth->p_scheduler->cpucount5); | ||
2645 | ugeth->p_cpucount[6] = &(ugeth->p_scheduler->cpucount6); | ||
2646 | ugeth->p_cpucount[7] = &(ugeth->p_scheduler->cpucount7); | ||
2647 | } | ||
2648 | |||
2649 | /* schedulerbasepointer */ | ||
2650 | /* TxRMON_PTR (statistics) */ | ||
2651 | if (ug_info-> | ||
2652 | statisticsMode & UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_TX) { | ||
2653 | ugeth->tx_fw_statistics_pram_offset = | ||
2654 | qe_muram_alloc(sizeof | ||
2655 | (struct ucc_geth_tx_firmware_statistics_pram), | ||
2656 | UCC_GETH_TX_STATISTICS_ALIGNMENT); | ||
2657 | if (IS_ERR_VALUE(ugeth->tx_fw_statistics_pram_offset)) { | ||
2658 | if (netif_msg_ifup(ugeth)) | ||
2659 | ugeth_err | ||
2660 | ("%s: Can not allocate DPRAM memory for" | ||
2661 | " p_tx_fw_statistics_pram.", | ||
2662 | __func__); | ||
2663 | return -ENOMEM; | ||
2664 | } | ||
2665 | ugeth->p_tx_fw_statistics_pram = | ||
2666 | (struct ucc_geth_tx_firmware_statistics_pram __iomem *) | ||
2667 | qe_muram_addr(ugeth->tx_fw_statistics_pram_offset); | ||
2668 | /* Zero out p_tx_fw_statistics_pram */ | ||
2669 | memset_io((void __iomem *)ugeth->p_tx_fw_statistics_pram, | ||
2670 | 0, sizeof(struct ucc_geth_tx_firmware_statistics_pram)); | ||
2671 | } | ||
2672 | |||
2673 | /* temoder */ | ||
2674 | /* Already has speed set */ | ||
2675 | |||
2676 | if (ug_info->numQueuesTx > 1) | ||
2677 | temoder |= TEMODER_SCHEDULER_ENABLE; | ||
2678 | if (ug_info->ipCheckSumGenerate) | ||
2679 | temoder |= TEMODER_IP_CHECKSUM_GENERATE; | ||
2680 | temoder |= ((ug_info->numQueuesTx - 1) << TEMODER_NUM_OF_QUEUES_SHIFT); | ||
2681 | out_be16(&ugeth->p_tx_glbl_pram->temoder, temoder); | ||
2682 | |||
2683 | test = in_be16(&ugeth->p_tx_glbl_pram->temoder); | ||
2684 | |||
2685 | /* Function code register value to be used later */ | ||
2686 | function_code = UCC_BMR_BO_BE | UCC_BMR_GBL; | ||
2687 | /* Required for QE */ | ||
2688 | |||
2689 | /* function code register */ | ||
2690 | out_be32(&ugeth->p_tx_glbl_pram->tstate, ((u32) function_code) << 24); | ||
2691 | |||
2692 | /* Rx global PRAM */ | ||
2693 | /* Allocate global rx parameter RAM page */ | ||
2694 | ugeth->rx_glbl_pram_offset = | ||
2695 | qe_muram_alloc(sizeof(struct ucc_geth_rx_global_pram), | ||
2696 | UCC_GETH_RX_GLOBAL_PRAM_ALIGNMENT); | ||
2697 | if (IS_ERR_VALUE(ugeth->rx_glbl_pram_offset)) { | ||
2698 | if (netif_msg_ifup(ugeth)) | ||
2699 | ugeth_err | ||
2700 | ("%s: Can not allocate DPRAM memory for p_rx_glbl_pram.", | ||
2701 | __func__); | ||
2702 | return -ENOMEM; | ||
2703 | } | ||
2704 | ugeth->p_rx_glbl_pram = | ||
2705 | (struct ucc_geth_rx_global_pram __iomem *) qe_muram_addr(ugeth-> | ||
2706 | rx_glbl_pram_offset); | ||
2707 | /* Zero out p_rx_glbl_pram */ | ||
2708 | memset_io((void __iomem *)ugeth->p_rx_glbl_pram, 0, sizeof(struct ucc_geth_rx_global_pram)); | ||
2709 | |||
2710 | /* Fill global PRAM */ | ||
2711 | |||
2712 | /* RQPTR */ | ||
2713 | /* Size varies with number of Rx threads */ | ||
2714 | ugeth->thread_dat_rx_offset = | ||
2715 | qe_muram_alloc(numThreadsRxNumerical * | ||
2716 | sizeof(struct ucc_geth_thread_data_rx), | ||
2717 | UCC_GETH_THREAD_DATA_ALIGNMENT); | ||
2718 | if (IS_ERR_VALUE(ugeth->thread_dat_rx_offset)) { | ||
2719 | if (netif_msg_ifup(ugeth)) | ||
2720 | ugeth_err | ||
2721 | ("%s: Can not allocate DPRAM memory for p_thread_data_rx.", | ||
2722 | __func__); | ||
2723 | return -ENOMEM; | ||
2724 | } | ||
2725 | |||
2726 | ugeth->p_thread_data_rx = | ||
2727 | (struct ucc_geth_thread_data_rx __iomem *) qe_muram_addr(ugeth-> | ||
2728 | thread_dat_rx_offset); | ||
2729 | out_be32(&ugeth->p_rx_glbl_pram->rqptr, ugeth->thread_dat_rx_offset); | ||
2730 | |||
2731 | /* typeorlen */ | ||
2732 | out_be16(&ugeth->p_rx_glbl_pram->typeorlen, ug_info->typeorlen); | ||
2733 | |||
2734 | /* rxrmonbaseptr (statistics) */ | ||
2735 | if (ug_info-> | ||
2736 | statisticsMode & UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_RX) { | ||
2737 | ugeth->rx_fw_statistics_pram_offset = | ||
2738 | qe_muram_alloc(sizeof | ||
2739 | (struct ucc_geth_rx_firmware_statistics_pram), | ||
2740 | UCC_GETH_RX_STATISTICS_ALIGNMENT); | ||
2741 | if (IS_ERR_VALUE(ugeth->rx_fw_statistics_pram_offset)) { | ||
2742 | if (netif_msg_ifup(ugeth)) | ||
2743 | ugeth_err | ||
2744 | ("%s: Can not allocate DPRAM memory for" | ||
2745 | " p_rx_fw_statistics_pram.", __func__); | ||
2746 | return -ENOMEM; | ||
2747 | } | ||
2748 | ugeth->p_rx_fw_statistics_pram = | ||
2749 | (struct ucc_geth_rx_firmware_statistics_pram __iomem *) | ||
2750 | qe_muram_addr(ugeth->rx_fw_statistics_pram_offset); | ||
2751 | /* Zero out p_rx_fw_statistics_pram */ | ||
2752 | memset_io((void __iomem *)ugeth->p_rx_fw_statistics_pram, 0, | ||
2753 | sizeof(struct ucc_geth_rx_firmware_statistics_pram)); | ||
2754 | } | ||
2755 | |||
2756 | /* intCoalescingPtr */ | ||
2757 | |||
2758 | /* Size varies with number of Rx queues */ | ||
2759 | ugeth->rx_irq_coalescing_tbl_offset = | ||
2760 | qe_muram_alloc(ug_info->numQueuesRx * | ||
2761 | sizeof(struct ucc_geth_rx_interrupt_coalescing_entry) | ||
2762 | + 4, UCC_GETH_RX_INTERRUPT_COALESCING_ALIGNMENT); | ||
2763 | if (IS_ERR_VALUE(ugeth->rx_irq_coalescing_tbl_offset)) { | ||
2764 | if (netif_msg_ifup(ugeth)) | ||
2765 | ugeth_err | ||
2766 | ("%s: Can not allocate DPRAM memory for" | ||
2767 | " p_rx_irq_coalescing_tbl.", __func__); | ||
2768 | return -ENOMEM; | ||
2769 | } | ||
2770 | |||
2771 | ugeth->p_rx_irq_coalescing_tbl = | ||
2772 | (struct ucc_geth_rx_interrupt_coalescing_table __iomem *) | ||
2773 | qe_muram_addr(ugeth->rx_irq_coalescing_tbl_offset); | ||
2774 | out_be32(&ugeth->p_rx_glbl_pram->intcoalescingptr, | ||
2775 | ugeth->rx_irq_coalescing_tbl_offset); | ||
2776 | |||
2777 | /* Fill interrupt coalescing table */ | ||
2778 | for (i = 0; i < ug_info->numQueuesRx; i++) { | ||
2779 | out_be32(&ugeth->p_rx_irq_coalescing_tbl->coalescingentry[i]. | ||
2780 | interruptcoalescingmaxvalue, | ||
2781 | ug_info->interruptcoalescingmaxvalue[i]); | ||
2782 | out_be32(&ugeth->p_rx_irq_coalescing_tbl->coalescingentry[i]. | ||
2783 | interruptcoalescingcounter, | ||
2784 | ug_info->interruptcoalescingmaxvalue[i]); | ||
2785 | } | ||
2786 | |||
2787 | /* MRBLR */ | ||
2788 | init_max_rx_buff_len(uf_info->max_rx_buf_length, | ||
2789 | &ugeth->p_rx_glbl_pram->mrblr); | ||
2790 | /* MFLR */ | ||
2791 | out_be16(&ugeth->p_rx_glbl_pram->mflr, ug_info->maxFrameLength); | ||
2792 | /* MINFLR */ | ||
2793 | init_min_frame_len(ug_info->minFrameLength, | ||
2794 | &ugeth->p_rx_glbl_pram->minflr, | ||
2795 | &ugeth->p_rx_glbl_pram->mrblr); | ||
2796 | /* MAXD1 */ | ||
2797 | out_be16(&ugeth->p_rx_glbl_pram->maxd1, ug_info->maxD1Length); | ||
2798 | /* MAXD2 */ | ||
2799 | out_be16(&ugeth->p_rx_glbl_pram->maxd2, ug_info->maxD2Length); | ||
2800 | |||
2801 | /* l2qt */ | ||
2802 | l2qt = 0; | ||
2803 | for (i = 0; i < UCC_GETH_VLAN_PRIORITY_MAX; i++) | ||
2804 | l2qt |= (ug_info->l2qt[i] << (28 - 4 * i)); | ||
2805 | out_be32(&ugeth->p_rx_glbl_pram->l2qt, l2qt); | ||
2806 | |||
2807 | /* l3qt */ | ||
2808 | for (j = 0; j < UCC_GETH_IP_PRIORITY_MAX; j += 8) { | ||
2809 | l3qt = 0; | ||
2810 | for (i = 0; i < 8; i++) | ||
2811 | l3qt |= (ug_info->l3qt[j + i] << (28 - 4 * i)); | ||
2812 | out_be32(&ugeth->p_rx_glbl_pram->l3qt[j/8], l3qt); | ||
2813 | } | ||
2814 | |||
2815 | /* vlantype */ | ||
2816 | out_be16(&ugeth->p_rx_glbl_pram->vlantype, ug_info->vlantype); | ||
2817 | |||
2818 | /* vlantci */ | ||
2819 | out_be16(&ugeth->p_rx_glbl_pram->vlantci, ug_info->vlantci); | ||
2820 | |||
2821 | /* ecamptr */ | ||
2822 | out_be32(&ugeth->p_rx_glbl_pram->ecamptr, ug_info->ecamptr); | ||
2823 | |||
2824 | /* RBDQPTR */ | ||
2825 | /* Size varies with number of Rx queues */ | ||
2826 | ugeth->rx_bd_qs_tbl_offset = | ||
2827 | qe_muram_alloc(ug_info->numQueuesRx * | ||
2828 | (sizeof(struct ucc_geth_rx_bd_queues_entry) + | ||
2829 | sizeof(struct ucc_geth_rx_prefetched_bds)), | ||
2830 | UCC_GETH_RX_BD_QUEUES_ALIGNMENT); | ||
2831 | if (IS_ERR_VALUE(ugeth->rx_bd_qs_tbl_offset)) { | ||
2832 | if (netif_msg_ifup(ugeth)) | ||
2833 | ugeth_err | ||
2834 | ("%s: Can not allocate DPRAM memory for p_rx_bd_qs_tbl.", | ||
2835 | __func__); | ||
2836 | return -ENOMEM; | ||
2837 | } | ||
2838 | |||
2839 | ugeth->p_rx_bd_qs_tbl = | ||
2840 | (struct ucc_geth_rx_bd_queues_entry __iomem *) qe_muram_addr(ugeth-> | ||
2841 | rx_bd_qs_tbl_offset); | ||
2842 | out_be32(&ugeth->p_rx_glbl_pram->rbdqptr, ugeth->rx_bd_qs_tbl_offset); | ||
2843 | /* Zero out p_rx_bd_qs_tbl */ | ||
2844 | memset_io((void __iomem *)ugeth->p_rx_bd_qs_tbl, | ||
2845 | 0, | ||
2846 | ug_info->numQueuesRx * (sizeof(struct ucc_geth_rx_bd_queues_entry) + | ||
2847 | sizeof(struct ucc_geth_rx_prefetched_bds))); | ||
2848 | |||
2849 | /* Setup the table */ | ||
2850 | /* Assume BD rings are already established */ | ||
2851 | for (i = 0; i < ug_info->numQueuesRx; i++) { | ||
2852 | if (ugeth->ug_info->uf_info.bd_mem_part == MEM_PART_SYSTEM) { | ||
2853 | out_be32(&ugeth->p_rx_bd_qs_tbl[i].externalbdbaseptr, | ||
2854 | (u32) virt_to_phys(ugeth->p_rx_bd_ring[i])); | ||
2855 | } else if (ugeth->ug_info->uf_info.bd_mem_part == | ||
2856 | MEM_PART_MURAM) { | ||
2857 | out_be32(&ugeth->p_rx_bd_qs_tbl[i].externalbdbaseptr, | ||
2858 | (u32) immrbar_virt_to_phys(ugeth-> | ||
2859 | p_rx_bd_ring[i])); | ||
2860 | } | ||
2861 | /* rest of fields handled by QE */ | ||
2862 | } | ||
2863 | |||
2864 | /* remoder */ | ||
2865 | /* Already has speed set */ | ||
2866 | |||
2867 | if (ugeth->rx_extended_features) | ||
2868 | remoder |= REMODER_RX_EXTENDED_FEATURES; | ||
2869 | if (ug_info->rxExtendedFiltering) | ||
2870 | remoder |= REMODER_RX_EXTENDED_FILTERING; | ||
2871 | if (ug_info->dynamicMaxFrameLength) | ||
2872 | remoder |= REMODER_DYNAMIC_MAX_FRAME_LENGTH; | ||
2873 | if (ug_info->dynamicMinFrameLength) | ||
2874 | remoder |= REMODER_DYNAMIC_MIN_FRAME_LENGTH; | ||
2875 | remoder |= | ||
2876 | ug_info->vlanOperationTagged << REMODER_VLAN_OPERATION_TAGGED_SHIFT; | ||
2877 | remoder |= | ||
2878 | ug_info-> | ||
2879 | vlanOperationNonTagged << REMODER_VLAN_OPERATION_NON_TAGGED_SHIFT; | ||
2880 | remoder |= ug_info->rxQoSMode << REMODER_RX_QOS_MODE_SHIFT; | ||
2881 | remoder |= ((ug_info->numQueuesRx - 1) << REMODER_NUM_OF_QUEUES_SHIFT); | ||
2882 | if (ug_info->ipCheckSumCheck) | ||
2883 | remoder |= REMODER_IP_CHECKSUM_CHECK; | ||
2884 | if (ug_info->ipAddressAlignment) | ||
2885 | remoder |= REMODER_IP_ADDRESS_ALIGNMENT; | ||
2886 | out_be32(&ugeth->p_rx_glbl_pram->remoder, remoder); | ||
2887 | |||
2888 | /* Note that this function must be called */ | ||
2889 | /* ONLY AFTER p_tx_fw_statistics_pram */ | ||
2890 | /* andp_UccGethRxFirmwareStatisticsPram are allocated ! */ | ||
2891 | init_firmware_statistics_gathering_mode((ug_info-> | ||
2892 | statisticsMode & | ||
2893 | UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_TX), | ||
2894 | (ug_info->statisticsMode & | ||
2895 | UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_RX), | ||
2896 | &ugeth->p_tx_glbl_pram->txrmonbaseptr, | ||
2897 | ugeth->tx_fw_statistics_pram_offset, | ||
2898 | &ugeth->p_rx_glbl_pram->rxrmonbaseptr, | ||
2899 | ugeth->rx_fw_statistics_pram_offset, | ||
2900 | &ugeth->p_tx_glbl_pram->temoder, | ||
2901 | &ugeth->p_rx_glbl_pram->remoder); | ||
2902 | |||
2903 | /* function code register */ | ||
2904 | out_8(&ugeth->p_rx_glbl_pram->rstate, function_code); | ||
2905 | |||
2906 | /* initialize extended filtering */ | ||
2907 | if (ug_info->rxExtendedFiltering) { | ||
2908 | if (!ug_info->extendedFilteringChainPointer) { | ||
2909 | if (netif_msg_ifup(ugeth)) | ||
2910 | ugeth_err("%s: Null Extended Filtering Chain Pointer.", | ||
2911 | __func__); | ||
2912 | return -EINVAL; | ||
2913 | } | ||
2914 | |||
2915 | /* Allocate memory for extended filtering Mode Global | ||
2916 | Parameters */ | ||
2917 | ugeth->exf_glbl_param_offset = | ||
2918 | qe_muram_alloc(sizeof(struct ucc_geth_exf_global_pram), | ||
2919 | UCC_GETH_RX_EXTENDED_FILTERING_GLOBAL_PARAMETERS_ALIGNMENT); | ||
2920 | if (IS_ERR_VALUE(ugeth->exf_glbl_param_offset)) { | ||
2921 | if (netif_msg_ifup(ugeth)) | ||
2922 | ugeth_err | ||
2923 | ("%s: Can not allocate DPRAM memory for" | ||
2924 | " p_exf_glbl_param.", __func__); | ||
2925 | return -ENOMEM; | ||
2926 | } | ||
2927 | |||
2928 | ugeth->p_exf_glbl_param = | ||
2929 | (struct ucc_geth_exf_global_pram __iomem *) qe_muram_addr(ugeth-> | ||
2930 | exf_glbl_param_offset); | ||
2931 | out_be32(&ugeth->p_rx_glbl_pram->exfGlobalParam, | ||
2932 | ugeth->exf_glbl_param_offset); | ||
2933 | out_be32(&ugeth->p_exf_glbl_param->l2pcdptr, | ||
2934 | (u32) ug_info->extendedFilteringChainPointer); | ||
2935 | |||
2936 | } else { /* initialize 82xx style address filtering */ | ||
2937 | |||
2938 | /* Init individual address recognition registers to disabled */ | ||
2939 | |||
2940 | for (j = 0; j < NUM_OF_PADDRS; j++) | ||
2941 | ugeth_82xx_filtering_clear_addr_in_paddr(ugeth, (u8) j); | ||
2942 | |||
2943 | p_82xx_addr_filt = | ||
2944 | (struct ucc_geth_82xx_address_filtering_pram __iomem *) ugeth-> | ||
2945 | p_rx_glbl_pram->addressfiltering; | ||
2946 | |||
2947 | ugeth_82xx_filtering_clear_all_addr_in_hash(ugeth, | ||
2948 | ENET_ADDR_TYPE_GROUP); | ||
2949 | ugeth_82xx_filtering_clear_all_addr_in_hash(ugeth, | ||
2950 | ENET_ADDR_TYPE_INDIVIDUAL); | ||
2951 | } | ||
2952 | |||
2953 | /* | ||
2954 | * Initialize UCC at QE level | ||
2955 | */ | ||
2956 | |||
2957 | command = QE_INIT_TX_RX; | ||
2958 | |||
2959 | /* Allocate shadow InitEnet command parameter structure. | ||
2960 | * This is needed because after the InitEnet command is executed, | ||
2961 | * the structure in DPRAM is released, because DPRAM is a premium | ||
2962 | * resource. | ||
2963 | * This shadow structure keeps a copy of what was done so that the | ||
2964 | * allocated resources can be released when the channel is freed. | ||
2965 | */ | ||
2966 | if (!(ugeth->p_init_enet_param_shadow = | ||
2967 | kmalloc(sizeof(struct ucc_geth_init_pram), GFP_KERNEL))) { | ||
2968 | if (netif_msg_ifup(ugeth)) | ||
2969 | ugeth_err | ||
2970 | ("%s: Can not allocate memory for" | ||
2971 | " p_UccInitEnetParamShadows.", __func__); | ||
2972 | return -ENOMEM; | ||
2973 | } | ||
2974 | /* Zero out *p_init_enet_param_shadow */ | ||
2975 | memset((char *)ugeth->p_init_enet_param_shadow, | ||
2976 | 0, sizeof(struct ucc_geth_init_pram)); | ||
2977 | |||
2978 | /* Fill shadow InitEnet command parameter structure */ | ||
2979 | |||
2980 | ugeth->p_init_enet_param_shadow->resinit1 = | ||
2981 | ENET_INIT_PARAM_MAGIC_RES_INIT1; | ||
2982 | ugeth->p_init_enet_param_shadow->resinit2 = | ||
2983 | ENET_INIT_PARAM_MAGIC_RES_INIT2; | ||
2984 | ugeth->p_init_enet_param_shadow->resinit3 = | ||
2985 | ENET_INIT_PARAM_MAGIC_RES_INIT3; | ||
2986 | ugeth->p_init_enet_param_shadow->resinit4 = | ||
2987 | ENET_INIT_PARAM_MAGIC_RES_INIT4; | ||
2988 | ugeth->p_init_enet_param_shadow->resinit5 = | ||
2989 | ENET_INIT_PARAM_MAGIC_RES_INIT5; | ||
2990 | ugeth->p_init_enet_param_shadow->rgftgfrxglobal |= | ||
2991 | ((u32) ug_info->numThreadsRx) << ENET_INIT_PARAM_RGF_SHIFT; | ||
2992 | ugeth->p_init_enet_param_shadow->rgftgfrxglobal |= | ||
2993 | ((u32) ug_info->numThreadsTx) << ENET_INIT_PARAM_TGF_SHIFT; | ||
2994 | |||
2995 | ugeth->p_init_enet_param_shadow->rgftgfrxglobal |= | ||
2996 | ugeth->rx_glbl_pram_offset | ug_info->riscRx; | ||
2997 | if ((ug_info->largestexternallookupkeysize != | ||
2998 | QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_NONE) && | ||
2999 | (ug_info->largestexternallookupkeysize != | ||
3000 | QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_8_BYTES) && | ||
3001 | (ug_info->largestexternallookupkeysize != | ||
3002 | QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_16_BYTES)) { | ||
3003 | if (netif_msg_ifup(ugeth)) | ||
3004 | ugeth_err("%s: Invalid largest External Lookup Key Size.", | ||
3005 | __func__); | ||
3006 | return -EINVAL; | ||
3007 | } | ||
3008 | ugeth->p_init_enet_param_shadow->largestexternallookupkeysize = | ||
3009 | ug_info->largestexternallookupkeysize; | ||
3010 | size = sizeof(struct ucc_geth_thread_rx_pram); | ||
3011 | if (ug_info->rxExtendedFiltering) { | ||
3012 | size += THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING; | ||
3013 | if (ug_info->largestexternallookupkeysize == | ||
3014 | QE_FLTR_TABLE_LOOKUP_KEY_SIZE_8_BYTES) | ||
3015 | size += | ||
3016 | THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_8; | ||
3017 | if (ug_info->largestexternallookupkeysize == | ||
3018 | QE_FLTR_TABLE_LOOKUP_KEY_SIZE_16_BYTES) | ||
3019 | size += | ||
3020 | THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_16; | ||
3021 | } | ||
3022 | |||
3023 | if ((ret_val = fill_init_enet_entries(ugeth, &(ugeth-> | ||
3024 | p_init_enet_param_shadow->rxthread[0]), | ||
3025 | (u8) (numThreadsRxNumerical + 1) | ||
3026 | /* Rx needs one extra for terminator */ | ||
3027 | , size, UCC_GETH_THREAD_RX_PRAM_ALIGNMENT, | ||
3028 | ug_info->riscRx, 1)) != 0) { | ||
3029 | if (netif_msg_ifup(ugeth)) | ||
3030 | ugeth_err("%s: Can not fill p_init_enet_param_shadow.", | ||
3031 | __func__); | ||
3032 | return ret_val; | ||
3033 | } | ||
3034 | |||
3035 | ugeth->p_init_enet_param_shadow->txglobal = | ||
3036 | ugeth->tx_glbl_pram_offset | ug_info->riscTx; | ||
3037 | if ((ret_val = | ||
3038 | fill_init_enet_entries(ugeth, | ||
3039 | &(ugeth->p_init_enet_param_shadow-> | ||
3040 | txthread[0]), numThreadsTxNumerical, | ||
3041 | sizeof(struct ucc_geth_thread_tx_pram), | ||
3042 | UCC_GETH_THREAD_TX_PRAM_ALIGNMENT, | ||
3043 | ug_info->riscTx, 0)) != 0) { | ||
3044 | if (netif_msg_ifup(ugeth)) | ||
3045 | ugeth_err("%s: Can not fill p_init_enet_param_shadow.", | ||
3046 | __func__); | ||
3047 | return ret_val; | ||
3048 | } | ||
3049 | |||
3050 | /* Load Rx bds with buffers */ | ||
3051 | for (i = 0; i < ug_info->numQueuesRx; i++) { | ||
3052 | if ((ret_val = rx_bd_buffer_set(ugeth, (u8) i)) != 0) { | ||
3053 | if (netif_msg_ifup(ugeth)) | ||
3054 | ugeth_err("%s: Can not fill Rx bds with buffers.", | ||
3055 | __func__); | ||
3056 | return ret_val; | ||
3057 | } | ||
3058 | } | ||
3059 | |||
3060 | /* Allocate InitEnet command parameter structure */ | ||
3061 | init_enet_pram_offset = qe_muram_alloc(sizeof(struct ucc_geth_init_pram), 4); | ||
3062 | if (IS_ERR_VALUE(init_enet_pram_offset)) { | ||
3063 | if (netif_msg_ifup(ugeth)) | ||
3064 | ugeth_err | ||
3065 | ("%s: Can not allocate DPRAM memory for p_init_enet_pram.", | ||
3066 | __func__); | ||
3067 | return -ENOMEM; | ||
3068 | } | ||
3069 | p_init_enet_pram = | ||
3070 | (struct ucc_geth_init_pram __iomem *) qe_muram_addr(init_enet_pram_offset); | ||
3071 | |||
3072 | /* Copy shadow InitEnet command parameter structure into PRAM */ | ||
3073 | out_8(&p_init_enet_pram->resinit1, | ||
3074 | ugeth->p_init_enet_param_shadow->resinit1); | ||
3075 | out_8(&p_init_enet_pram->resinit2, | ||
3076 | ugeth->p_init_enet_param_shadow->resinit2); | ||
3077 | out_8(&p_init_enet_pram->resinit3, | ||
3078 | ugeth->p_init_enet_param_shadow->resinit3); | ||
3079 | out_8(&p_init_enet_pram->resinit4, | ||
3080 | ugeth->p_init_enet_param_shadow->resinit4); | ||
3081 | out_be16(&p_init_enet_pram->resinit5, | ||
3082 | ugeth->p_init_enet_param_shadow->resinit5); | ||
3083 | out_8(&p_init_enet_pram->largestexternallookupkeysize, | ||
3084 | ugeth->p_init_enet_param_shadow->largestexternallookupkeysize); | ||
3085 | out_be32(&p_init_enet_pram->rgftgfrxglobal, | ||
3086 | ugeth->p_init_enet_param_shadow->rgftgfrxglobal); | ||
3087 | for (i = 0; i < ENET_INIT_PARAM_MAX_ENTRIES_RX; i++) | ||
3088 | out_be32(&p_init_enet_pram->rxthread[i], | ||
3089 | ugeth->p_init_enet_param_shadow->rxthread[i]); | ||
3090 | out_be32(&p_init_enet_pram->txglobal, | ||
3091 | ugeth->p_init_enet_param_shadow->txglobal); | ||
3092 | for (i = 0; i < ENET_INIT_PARAM_MAX_ENTRIES_TX; i++) | ||
3093 | out_be32(&p_init_enet_pram->txthread[i], | ||
3094 | ugeth->p_init_enet_param_shadow->txthread[i]); | ||
3095 | |||
3096 | /* Issue QE command */ | ||
3097 | cecr_subblock = | ||
3098 | ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num); | ||
3099 | qe_issue_cmd(command, cecr_subblock, QE_CR_PROTOCOL_ETHERNET, | ||
3100 | init_enet_pram_offset); | ||
3101 | |||
3102 | /* Free InitEnet command parameter */ | ||
3103 | qe_muram_free(init_enet_pram_offset); | ||
3104 | |||
3105 | return 0; | ||
3106 | } | ||
3107 | |||
3108 | /* This is called by the kernel when a frame is ready for transmission. */ | ||
3109 | /* It is pointed to by the dev->hard_start_xmit function pointer */ | ||
3110 | static int ucc_geth_start_xmit(struct sk_buff *skb, struct net_device *dev) | ||
3111 | { | ||
3112 | struct ucc_geth_private *ugeth = netdev_priv(dev); | ||
3113 | #ifdef CONFIG_UGETH_TX_ON_DEMAND | ||
3114 | struct ucc_fast_private *uccf; | ||
3115 | #endif | ||
3116 | u8 __iomem *bd; /* BD pointer */ | ||
3117 | u32 bd_status; | ||
3118 | u8 txQ = 0; | ||
3119 | unsigned long flags; | ||
3120 | |||
3121 | ugeth_vdbg("%s: IN", __func__); | ||
3122 | |||
3123 | spin_lock_irqsave(&ugeth->lock, flags); | ||
3124 | |||
3125 | dev->stats.tx_bytes += skb->len; | ||
3126 | |||
3127 | /* Start from the next BD that should be filled */ | ||
3128 | bd = ugeth->txBd[txQ]; | ||
3129 | bd_status = in_be32((u32 __iomem *)bd); | ||
3130 | /* Save the skb pointer so we can free it later */ | ||
3131 | ugeth->tx_skbuff[txQ][ugeth->skb_curtx[txQ]] = skb; | ||
3132 | |||
3133 | /* Update the current skb pointer (wrapping if this was the last) */ | ||
3134 | ugeth->skb_curtx[txQ] = | ||
3135 | (ugeth->skb_curtx[txQ] + | ||
3136 | 1) & TX_RING_MOD_MASK(ugeth->ug_info->bdRingLenTx[txQ]); | ||
3137 | |||
3138 | /* set up the buffer descriptor */ | ||
3139 | out_be32(&((struct qe_bd __iomem *)bd)->buf, | ||
3140 | dma_map_single(ugeth->dev, skb->data, | ||
3141 | skb->len, DMA_TO_DEVICE)); | ||
3142 | |||
3143 | /* printk(KERN_DEBUG"skb->data is 0x%x\n",skb->data); */ | ||
3144 | |||
3145 | bd_status = (bd_status & T_W) | T_R | T_I | T_L | skb->len; | ||
3146 | |||
3147 | /* set bd status and length */ | ||
3148 | out_be32((u32 __iomem *)bd, bd_status); | ||
3149 | |||
3150 | /* Move to next BD in the ring */ | ||
3151 | if (!(bd_status & T_W)) | ||
3152 | bd += sizeof(struct qe_bd); | ||
3153 | else | ||
3154 | bd = ugeth->p_tx_bd_ring[txQ]; | ||
3155 | |||
3156 | /* If the next BD still needs to be cleaned up, then the bds | ||
3157 | are full. We need to tell the kernel to stop sending us stuff. */ | ||
3158 | if (bd == ugeth->confBd[txQ]) { | ||
3159 | if (!netif_queue_stopped(dev)) | ||
3160 | netif_stop_queue(dev); | ||
3161 | } | ||
3162 | |||
3163 | ugeth->txBd[txQ] = bd; | ||
3164 | |||
3165 | skb_tx_timestamp(skb); | ||
3166 | |||
3167 | if (ugeth->p_scheduler) { | ||
3168 | ugeth->cpucount[txQ]++; | ||
3169 | /* Indicate to QE that there are more Tx bds ready for | ||
3170 | transmission */ | ||
3171 | /* This is done by writing a running counter of the bd | ||
3172 | count to the scheduler PRAM. */ | ||
3173 | out_be16(ugeth->p_cpucount[txQ], ugeth->cpucount[txQ]); | ||
3174 | } | ||
3175 | |||
3176 | #ifdef CONFIG_UGETH_TX_ON_DEMAND | ||
3177 | uccf = ugeth->uccf; | ||
3178 | out_be16(uccf->p_utodr, UCC_FAST_TOD); | ||
3179 | #endif | ||
3180 | spin_unlock_irqrestore(&ugeth->lock, flags); | ||
3181 | |||
3182 | return NETDEV_TX_OK; | ||
3183 | } | ||
3184 | |||
3185 | static int ucc_geth_rx(struct ucc_geth_private *ugeth, u8 rxQ, int rx_work_limit) | ||
3186 | { | ||
3187 | struct sk_buff *skb; | ||
3188 | u8 __iomem *bd; | ||
3189 | u16 length, howmany = 0; | ||
3190 | u32 bd_status; | ||
3191 | u8 *bdBuffer; | ||
3192 | struct net_device *dev; | ||
3193 | |||
3194 | ugeth_vdbg("%s: IN", __func__); | ||
3195 | |||
3196 | dev = ugeth->ndev; | ||
3197 | |||
3198 | /* collect received buffers */ | ||
3199 | bd = ugeth->rxBd[rxQ]; | ||
3200 | |||
3201 | bd_status = in_be32((u32 __iomem *)bd); | ||
3202 | |||
3203 | /* while there are received buffers and BD is full (~R_E) */ | ||
3204 | while (!((bd_status & (R_E)) || (--rx_work_limit < 0))) { | ||
3205 | bdBuffer = (u8 *) in_be32(&((struct qe_bd __iomem *)bd)->buf); | ||
3206 | length = (u16) ((bd_status & BD_LENGTH_MASK) - 4); | ||
3207 | skb = ugeth->rx_skbuff[rxQ][ugeth->skb_currx[rxQ]]; | ||
3208 | |||
3209 | /* determine whether buffer is first, last, first and last | ||
3210 | (single buffer frame) or middle (not first and not last) */ | ||
3211 | if (!skb || | ||
3212 | (!(bd_status & (R_F | R_L))) || | ||
3213 | (bd_status & R_ERRORS_FATAL)) { | ||
3214 | if (netif_msg_rx_err(ugeth)) | ||
3215 | ugeth_err("%s, %d: ERROR!!! skb - 0x%08x", | ||
3216 | __func__, __LINE__, (u32) skb); | ||
3217 | if (skb) { | ||
3218 | skb->data = skb->head + NET_SKB_PAD; | ||
3219 | skb->len = 0; | ||
3220 | skb_reset_tail_pointer(skb); | ||
3221 | __skb_queue_head(&ugeth->rx_recycle, skb); | ||
3222 | } | ||
3223 | |||
3224 | ugeth->rx_skbuff[rxQ][ugeth->skb_currx[rxQ]] = NULL; | ||
3225 | dev->stats.rx_dropped++; | ||
3226 | } else { | ||
3227 | dev->stats.rx_packets++; | ||
3228 | howmany++; | ||
3229 | |||
3230 | /* Prep the skb for the packet */ | ||
3231 | skb_put(skb, length); | ||
3232 | |||
3233 | /* Tell the skb what kind of packet this is */ | ||
3234 | skb->protocol = eth_type_trans(skb, ugeth->ndev); | ||
3235 | |||
3236 | dev->stats.rx_bytes += length; | ||
3237 | /* Send the packet up the stack */ | ||
3238 | netif_receive_skb(skb); | ||
3239 | } | ||
3240 | |||
3241 | skb = get_new_skb(ugeth, bd); | ||
3242 | if (!skb) { | ||
3243 | if (netif_msg_rx_err(ugeth)) | ||
3244 | ugeth_warn("%s: No Rx Data Buffer", __func__); | ||
3245 | dev->stats.rx_dropped++; | ||
3246 | break; | ||
3247 | } | ||
3248 | |||
3249 | ugeth->rx_skbuff[rxQ][ugeth->skb_currx[rxQ]] = skb; | ||
3250 | |||
3251 | /* update to point at the next skb */ | ||
3252 | ugeth->skb_currx[rxQ] = | ||
3253 | (ugeth->skb_currx[rxQ] + | ||
3254 | 1) & RX_RING_MOD_MASK(ugeth->ug_info->bdRingLenRx[rxQ]); | ||
3255 | |||
3256 | if (bd_status & R_W) | ||
3257 | bd = ugeth->p_rx_bd_ring[rxQ]; | ||
3258 | else | ||
3259 | bd += sizeof(struct qe_bd); | ||
3260 | |||
3261 | bd_status = in_be32((u32 __iomem *)bd); | ||
3262 | } | ||
3263 | |||
3264 | ugeth->rxBd[rxQ] = bd; | ||
3265 | return howmany; | ||
3266 | } | ||
3267 | |||
3268 | static int ucc_geth_tx(struct net_device *dev, u8 txQ) | ||
3269 | { | ||
3270 | /* Start from the next BD that should be filled */ | ||
3271 | struct ucc_geth_private *ugeth = netdev_priv(dev); | ||
3272 | u8 __iomem *bd; /* BD pointer */ | ||
3273 | u32 bd_status; | ||
3274 | |||
3275 | bd = ugeth->confBd[txQ]; | ||
3276 | bd_status = in_be32((u32 __iomem *)bd); | ||
3277 | |||
3278 | /* Normal processing. */ | ||
3279 | while ((bd_status & T_R) == 0) { | ||
3280 | struct sk_buff *skb; | ||
3281 | |||
3282 | /* BD contains already transmitted buffer. */ | ||
3283 | /* Handle the transmitted buffer and release */ | ||
3284 | /* the BD to be used with the current frame */ | ||
3285 | |||
3286 | skb = ugeth->tx_skbuff[txQ][ugeth->skb_dirtytx[txQ]]; | ||
3287 | if (!skb) | ||
3288 | break; | ||
3289 | |||
3290 | dev->stats.tx_packets++; | ||
3291 | |||
3292 | if (skb_queue_len(&ugeth->rx_recycle) < RX_BD_RING_LEN && | ||
3293 | skb_recycle_check(skb, | ||
3294 | ugeth->ug_info->uf_info.max_rx_buf_length + | ||
3295 | UCC_GETH_RX_DATA_BUF_ALIGNMENT)) | ||
3296 | __skb_queue_head(&ugeth->rx_recycle, skb); | ||
3297 | else | ||
3298 | dev_kfree_skb(skb); | ||
3299 | |||
3300 | ugeth->tx_skbuff[txQ][ugeth->skb_dirtytx[txQ]] = NULL; | ||
3301 | ugeth->skb_dirtytx[txQ] = | ||
3302 | (ugeth->skb_dirtytx[txQ] + | ||
3303 | 1) & TX_RING_MOD_MASK(ugeth->ug_info->bdRingLenTx[txQ]); | ||
3304 | |||
3305 | /* We freed a buffer, so now we can restart transmission */ | ||
3306 | if (netif_queue_stopped(dev)) | ||
3307 | netif_wake_queue(dev); | ||
3308 | |||
3309 | /* Advance the confirmation BD pointer */ | ||
3310 | if (!(bd_status & T_W)) | ||
3311 | bd += sizeof(struct qe_bd); | ||
3312 | else | ||
3313 | bd = ugeth->p_tx_bd_ring[txQ]; | ||
3314 | bd_status = in_be32((u32 __iomem *)bd); | ||
3315 | } | ||
3316 | ugeth->confBd[txQ] = bd; | ||
3317 | return 0; | ||
3318 | } | ||
3319 | |||
3320 | static int ucc_geth_poll(struct napi_struct *napi, int budget) | ||
3321 | { | ||
3322 | struct ucc_geth_private *ugeth = container_of(napi, struct ucc_geth_private, napi); | ||
3323 | struct ucc_geth_info *ug_info; | ||
3324 | int howmany, i; | ||
3325 | |||
3326 | ug_info = ugeth->ug_info; | ||
3327 | |||
3328 | /* Tx event processing */ | ||
3329 | spin_lock(&ugeth->lock); | ||
3330 | for (i = 0; i < ug_info->numQueuesTx; i++) | ||
3331 | ucc_geth_tx(ugeth->ndev, i); | ||
3332 | spin_unlock(&ugeth->lock); | ||
3333 | |||
3334 | howmany = 0; | ||
3335 | for (i = 0; i < ug_info->numQueuesRx; i++) | ||
3336 | howmany += ucc_geth_rx(ugeth, i, budget - howmany); | ||
3337 | |||
3338 | if (howmany < budget) { | ||
3339 | napi_complete(napi); | ||
3340 | setbits32(ugeth->uccf->p_uccm, UCCE_RX_EVENTS | UCCE_TX_EVENTS); | ||
3341 | } | ||
3342 | |||
3343 | return howmany; | ||
3344 | } | ||
3345 | |||
3346 | static irqreturn_t ucc_geth_irq_handler(int irq, void *info) | ||
3347 | { | ||
3348 | struct net_device *dev = info; | ||
3349 | struct ucc_geth_private *ugeth = netdev_priv(dev); | ||
3350 | struct ucc_fast_private *uccf; | ||
3351 | struct ucc_geth_info *ug_info; | ||
3352 | register u32 ucce; | ||
3353 | register u32 uccm; | ||
3354 | |||
3355 | ugeth_vdbg("%s: IN", __func__); | ||
3356 | |||
3357 | uccf = ugeth->uccf; | ||
3358 | ug_info = ugeth->ug_info; | ||
3359 | |||
3360 | /* read and clear events */ | ||
3361 | ucce = (u32) in_be32(uccf->p_ucce); | ||
3362 | uccm = (u32) in_be32(uccf->p_uccm); | ||
3363 | ucce &= uccm; | ||
3364 | out_be32(uccf->p_ucce, ucce); | ||
3365 | |||
3366 | /* check for receive events that require processing */ | ||
3367 | if (ucce & (UCCE_RX_EVENTS | UCCE_TX_EVENTS)) { | ||
3368 | if (napi_schedule_prep(&ugeth->napi)) { | ||
3369 | uccm &= ~(UCCE_RX_EVENTS | UCCE_TX_EVENTS); | ||
3370 | out_be32(uccf->p_uccm, uccm); | ||
3371 | __napi_schedule(&ugeth->napi); | ||
3372 | } | ||
3373 | } | ||
3374 | |||
3375 | /* Errors and other events */ | ||
3376 | if (ucce & UCCE_OTHER) { | ||
3377 | if (ucce & UCC_GETH_UCCE_BSY) | ||
3378 | dev->stats.rx_errors++; | ||
3379 | if (ucce & UCC_GETH_UCCE_TXE) | ||
3380 | dev->stats.tx_errors++; | ||
3381 | } | ||
3382 | |||
3383 | return IRQ_HANDLED; | ||
3384 | } | ||
3385 | |||
3386 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
3387 | /* | ||
3388 | * Polling 'interrupt' - used by things like netconsole to send skbs | ||
3389 | * without having to re-enable interrupts. It's not called while | ||
3390 | * the interrupt routine is executing. | ||
3391 | */ | ||
3392 | static void ucc_netpoll(struct net_device *dev) | ||
3393 | { | ||
3394 | struct ucc_geth_private *ugeth = netdev_priv(dev); | ||
3395 | int irq = ugeth->ug_info->uf_info.irq; | ||
3396 | |||
3397 | disable_irq(irq); | ||
3398 | ucc_geth_irq_handler(irq, dev); | ||
3399 | enable_irq(irq); | ||
3400 | } | ||
3401 | #endif /* CONFIG_NET_POLL_CONTROLLER */ | ||
3402 | |||
3403 | static int ucc_geth_set_mac_addr(struct net_device *dev, void *p) | ||
3404 | { | ||
3405 | struct ucc_geth_private *ugeth = netdev_priv(dev); | ||
3406 | struct sockaddr *addr = p; | ||
3407 | |||
3408 | if (!is_valid_ether_addr(addr->sa_data)) | ||
3409 | return -EADDRNOTAVAIL; | ||
3410 | |||
3411 | memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); | ||
3412 | |||
3413 | /* | ||
3414 | * If device is not running, we will set mac addr register | ||
3415 | * when opening the device. | ||
3416 | */ | ||
3417 | if (!netif_running(dev)) | ||
3418 | return 0; | ||
3419 | |||
3420 | spin_lock_irq(&ugeth->lock); | ||
3421 | init_mac_station_addr_regs(dev->dev_addr[0], | ||
3422 | dev->dev_addr[1], | ||
3423 | dev->dev_addr[2], | ||
3424 | dev->dev_addr[3], | ||
3425 | dev->dev_addr[4], | ||
3426 | dev->dev_addr[5], | ||
3427 | &ugeth->ug_regs->macstnaddr1, | ||
3428 | &ugeth->ug_regs->macstnaddr2); | ||
3429 | spin_unlock_irq(&ugeth->lock); | ||
3430 | |||
3431 | return 0; | ||
3432 | } | ||
3433 | |||
3434 | static int ucc_geth_init_mac(struct ucc_geth_private *ugeth) | ||
3435 | { | ||
3436 | struct net_device *dev = ugeth->ndev; | ||
3437 | int err; | ||
3438 | |||
3439 | err = ucc_struct_init(ugeth); | ||
3440 | if (err) { | ||
3441 | if (netif_msg_ifup(ugeth)) | ||
3442 | ugeth_err("%s: Cannot configure internal struct, " | ||
3443 | "aborting.", dev->name); | ||
3444 | goto err; | ||
3445 | } | ||
3446 | |||
3447 | err = ucc_geth_startup(ugeth); | ||
3448 | if (err) { | ||
3449 | if (netif_msg_ifup(ugeth)) | ||
3450 | ugeth_err("%s: Cannot configure net device, aborting.", | ||
3451 | dev->name); | ||
3452 | goto err; | ||
3453 | } | ||
3454 | |||
3455 | err = adjust_enet_interface(ugeth); | ||
3456 | if (err) { | ||
3457 | if (netif_msg_ifup(ugeth)) | ||
3458 | ugeth_err("%s: Cannot configure net device, aborting.", | ||
3459 | dev->name); | ||
3460 | goto err; | ||
3461 | } | ||
3462 | |||
3463 | /* Set MACSTNADDR1, MACSTNADDR2 */ | ||
3464 | /* For more details see the hardware spec. */ | ||
3465 | init_mac_station_addr_regs(dev->dev_addr[0], | ||
3466 | dev->dev_addr[1], | ||
3467 | dev->dev_addr[2], | ||
3468 | dev->dev_addr[3], | ||
3469 | dev->dev_addr[4], | ||
3470 | dev->dev_addr[5], | ||
3471 | &ugeth->ug_regs->macstnaddr1, | ||
3472 | &ugeth->ug_regs->macstnaddr2); | ||
3473 | |||
3474 | err = ugeth_enable(ugeth, COMM_DIR_RX_AND_TX); | ||
3475 | if (err) { | ||
3476 | if (netif_msg_ifup(ugeth)) | ||
3477 | ugeth_err("%s: Cannot enable net device, aborting.", dev->name); | ||
3478 | goto err; | ||
3479 | } | ||
3480 | |||
3481 | return 0; | ||
3482 | err: | ||
3483 | ucc_geth_stop(ugeth); | ||
3484 | return err; | ||
3485 | } | ||
3486 | |||
3487 | /* Called when something needs to use the ethernet device */ | ||
3488 | /* Returns 0 for success. */ | ||
3489 | static int ucc_geth_open(struct net_device *dev) | ||
3490 | { | ||
3491 | struct ucc_geth_private *ugeth = netdev_priv(dev); | ||
3492 | int err; | ||
3493 | |||
3494 | ugeth_vdbg("%s: IN", __func__); | ||
3495 | |||
3496 | /* Test station address */ | ||
3497 | if (dev->dev_addr[0] & ENET_GROUP_ADDR) { | ||
3498 | if (netif_msg_ifup(ugeth)) | ||
3499 | ugeth_err("%s: Multicast address used for station " | ||
3500 | "address - is this what you wanted?", | ||
3501 | __func__); | ||
3502 | return -EINVAL; | ||
3503 | } | ||
3504 | |||
3505 | err = init_phy(dev); | ||
3506 | if (err) { | ||
3507 | if (netif_msg_ifup(ugeth)) | ||
3508 | ugeth_err("%s: Cannot initialize PHY, aborting.", | ||
3509 | dev->name); | ||
3510 | return err; | ||
3511 | } | ||
3512 | |||
3513 | err = ucc_geth_init_mac(ugeth); | ||
3514 | if (err) { | ||
3515 | if (netif_msg_ifup(ugeth)) | ||
3516 | ugeth_err("%s: Cannot initialize MAC, aborting.", | ||
3517 | dev->name); | ||
3518 | goto err; | ||
3519 | } | ||
3520 | |||
3521 | err = request_irq(ugeth->ug_info->uf_info.irq, ucc_geth_irq_handler, | ||
3522 | 0, "UCC Geth", dev); | ||
3523 | if (err) { | ||
3524 | if (netif_msg_ifup(ugeth)) | ||
3525 | ugeth_err("%s: Cannot get IRQ for net device, aborting.", | ||
3526 | dev->name); | ||
3527 | goto err; | ||
3528 | } | ||
3529 | |||
3530 | phy_start(ugeth->phydev); | ||
3531 | napi_enable(&ugeth->napi); | ||
3532 | netif_start_queue(dev); | ||
3533 | |||
3534 | device_set_wakeup_capable(&dev->dev, | ||
3535 | qe_alive_during_sleep() || ugeth->phydev->irq); | ||
3536 | device_set_wakeup_enable(&dev->dev, ugeth->wol_en); | ||
3537 | |||
3538 | return err; | ||
3539 | |||
3540 | err: | ||
3541 | ucc_geth_stop(ugeth); | ||
3542 | return err; | ||
3543 | } | ||
3544 | |||
3545 | /* Stops the kernel queue, and halts the controller */ | ||
3546 | static int ucc_geth_close(struct net_device *dev) | ||
3547 | { | ||
3548 | struct ucc_geth_private *ugeth = netdev_priv(dev); | ||
3549 | |||
3550 | ugeth_vdbg("%s: IN", __func__); | ||
3551 | |||
3552 | napi_disable(&ugeth->napi); | ||
3553 | |||
3554 | cancel_work_sync(&ugeth->timeout_work); | ||
3555 | ucc_geth_stop(ugeth); | ||
3556 | phy_disconnect(ugeth->phydev); | ||
3557 | ugeth->phydev = NULL; | ||
3558 | |||
3559 | free_irq(ugeth->ug_info->uf_info.irq, ugeth->ndev); | ||
3560 | |||
3561 | netif_stop_queue(dev); | ||
3562 | |||
3563 | return 0; | ||
3564 | } | ||
3565 | |||
3566 | /* Reopen device. This will reset the MAC and PHY. */ | ||
3567 | static void ucc_geth_timeout_work(struct work_struct *work) | ||
3568 | { | ||
3569 | struct ucc_geth_private *ugeth; | ||
3570 | struct net_device *dev; | ||
3571 | |||
3572 | ugeth = container_of(work, struct ucc_geth_private, timeout_work); | ||
3573 | dev = ugeth->ndev; | ||
3574 | |||
3575 | ugeth_vdbg("%s: IN", __func__); | ||
3576 | |||
3577 | dev->stats.tx_errors++; | ||
3578 | |||
3579 | ugeth_dump_regs(ugeth); | ||
3580 | |||
3581 | if (dev->flags & IFF_UP) { | ||
3582 | /* | ||
3583 | * Must reset MAC *and* PHY. This is done by reopening | ||
3584 | * the device. | ||
3585 | */ | ||
3586 | netif_tx_stop_all_queues(dev); | ||
3587 | ucc_geth_stop(ugeth); | ||
3588 | ucc_geth_init_mac(ugeth); | ||
3589 | /* Must start PHY here */ | ||
3590 | phy_start(ugeth->phydev); | ||
3591 | netif_tx_start_all_queues(dev); | ||
3592 | } | ||
3593 | |||
3594 | netif_tx_schedule_all(dev); | ||
3595 | } | ||
3596 | |||
3597 | /* | ||
3598 | * ucc_geth_timeout gets called when a packet has not been | ||
3599 | * transmitted after a set amount of time. | ||
3600 | */ | ||
3601 | static void ucc_geth_timeout(struct net_device *dev) | ||
3602 | { | ||
3603 | struct ucc_geth_private *ugeth = netdev_priv(dev); | ||
3604 | |||
3605 | schedule_work(&ugeth->timeout_work); | ||
3606 | } | ||
3607 | |||
3608 | |||
3609 | #ifdef CONFIG_PM | ||
3610 | |||
3611 | static int ucc_geth_suspend(struct platform_device *ofdev, pm_message_t state) | ||
3612 | { | ||
3613 | struct net_device *ndev = dev_get_drvdata(&ofdev->dev); | ||
3614 | struct ucc_geth_private *ugeth = netdev_priv(ndev); | ||
3615 | |||
3616 | if (!netif_running(ndev)) | ||
3617 | return 0; | ||
3618 | |||
3619 | netif_device_detach(ndev); | ||
3620 | napi_disable(&ugeth->napi); | ||
3621 | |||
3622 | /* | ||
3623 | * Disable the controller, otherwise we'll wakeup on any network | ||
3624 | * activity. | ||
3625 | */ | ||
3626 | ugeth_disable(ugeth, COMM_DIR_RX_AND_TX); | ||
3627 | |||
3628 | if (ugeth->wol_en & WAKE_MAGIC) { | ||
3629 | setbits32(ugeth->uccf->p_uccm, UCC_GETH_UCCE_MPD); | ||
3630 | setbits32(&ugeth->ug_regs->maccfg2, MACCFG2_MPE); | ||
3631 | ucc_fast_enable(ugeth->uccf, COMM_DIR_RX_AND_TX); | ||
3632 | } else if (!(ugeth->wol_en & WAKE_PHY)) { | ||
3633 | phy_stop(ugeth->phydev); | ||
3634 | } | ||
3635 | |||
3636 | return 0; | ||
3637 | } | ||
3638 | |||
3639 | static int ucc_geth_resume(struct platform_device *ofdev) | ||
3640 | { | ||
3641 | struct net_device *ndev = dev_get_drvdata(&ofdev->dev); | ||
3642 | struct ucc_geth_private *ugeth = netdev_priv(ndev); | ||
3643 | int err; | ||
3644 | |||
3645 | if (!netif_running(ndev)) | ||
3646 | return 0; | ||
3647 | |||
3648 | if (qe_alive_during_sleep()) { | ||
3649 | if (ugeth->wol_en & WAKE_MAGIC) { | ||
3650 | ucc_fast_disable(ugeth->uccf, COMM_DIR_RX_AND_TX); | ||
3651 | clrbits32(&ugeth->ug_regs->maccfg2, MACCFG2_MPE); | ||
3652 | clrbits32(ugeth->uccf->p_uccm, UCC_GETH_UCCE_MPD); | ||
3653 | } | ||
3654 | ugeth_enable(ugeth, COMM_DIR_RX_AND_TX); | ||
3655 | } else { | ||
3656 | /* | ||
3657 | * Full reinitialization is required if QE shuts down | ||
3658 | * during sleep. | ||
3659 | */ | ||
3660 | ucc_geth_memclean(ugeth); | ||
3661 | |||
3662 | err = ucc_geth_init_mac(ugeth); | ||
3663 | if (err) { | ||
3664 | ugeth_err("%s: Cannot initialize MAC, aborting.", | ||
3665 | ndev->name); | ||
3666 | return err; | ||
3667 | } | ||
3668 | } | ||
3669 | |||
3670 | ugeth->oldlink = 0; | ||
3671 | ugeth->oldspeed = 0; | ||
3672 | ugeth->oldduplex = -1; | ||
3673 | |||
3674 | phy_stop(ugeth->phydev); | ||
3675 | phy_start(ugeth->phydev); | ||
3676 | |||
3677 | napi_enable(&ugeth->napi); | ||
3678 | netif_device_attach(ndev); | ||
3679 | |||
3680 | return 0; | ||
3681 | } | ||
3682 | |||
3683 | #else | ||
3684 | #define ucc_geth_suspend NULL | ||
3685 | #define ucc_geth_resume NULL | ||
3686 | #endif | ||
3687 | |||
3688 | static phy_interface_t to_phy_interface(const char *phy_connection_type) | ||
3689 | { | ||
3690 | if (strcasecmp(phy_connection_type, "mii") == 0) | ||
3691 | return PHY_INTERFACE_MODE_MII; | ||
3692 | if (strcasecmp(phy_connection_type, "gmii") == 0) | ||
3693 | return PHY_INTERFACE_MODE_GMII; | ||
3694 | if (strcasecmp(phy_connection_type, "tbi") == 0) | ||
3695 | return PHY_INTERFACE_MODE_TBI; | ||
3696 | if (strcasecmp(phy_connection_type, "rmii") == 0) | ||
3697 | return PHY_INTERFACE_MODE_RMII; | ||
3698 | if (strcasecmp(phy_connection_type, "rgmii") == 0) | ||
3699 | return PHY_INTERFACE_MODE_RGMII; | ||
3700 | if (strcasecmp(phy_connection_type, "rgmii-id") == 0) | ||
3701 | return PHY_INTERFACE_MODE_RGMII_ID; | ||
3702 | if (strcasecmp(phy_connection_type, "rgmii-txid") == 0) | ||
3703 | return PHY_INTERFACE_MODE_RGMII_TXID; | ||
3704 | if (strcasecmp(phy_connection_type, "rgmii-rxid") == 0) | ||
3705 | return PHY_INTERFACE_MODE_RGMII_RXID; | ||
3706 | if (strcasecmp(phy_connection_type, "rtbi") == 0) | ||
3707 | return PHY_INTERFACE_MODE_RTBI; | ||
3708 | if (strcasecmp(phy_connection_type, "sgmii") == 0) | ||
3709 | return PHY_INTERFACE_MODE_SGMII; | ||
3710 | |||
3711 | return PHY_INTERFACE_MODE_MII; | ||
3712 | } | ||
3713 | |||
3714 | static int ucc_geth_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) | ||
3715 | { | ||
3716 | struct ucc_geth_private *ugeth = netdev_priv(dev); | ||
3717 | |||
3718 | if (!netif_running(dev)) | ||
3719 | return -EINVAL; | ||
3720 | |||
3721 | if (!ugeth->phydev) | ||
3722 | return -ENODEV; | ||
3723 | |||
3724 | return phy_mii_ioctl(ugeth->phydev, rq, cmd); | ||
3725 | } | ||
3726 | |||
3727 | static const struct net_device_ops ucc_geth_netdev_ops = { | ||
3728 | .ndo_open = ucc_geth_open, | ||
3729 | .ndo_stop = ucc_geth_close, | ||
3730 | .ndo_start_xmit = ucc_geth_start_xmit, | ||
3731 | .ndo_validate_addr = eth_validate_addr, | ||
3732 | .ndo_set_mac_address = ucc_geth_set_mac_addr, | ||
3733 | .ndo_change_mtu = eth_change_mtu, | ||
3734 | .ndo_set_multicast_list = ucc_geth_set_multi, | ||
3735 | .ndo_tx_timeout = ucc_geth_timeout, | ||
3736 | .ndo_do_ioctl = ucc_geth_ioctl, | ||
3737 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
3738 | .ndo_poll_controller = ucc_netpoll, | ||
3739 | #endif | ||
3740 | }; | ||
3741 | |||
3742 | static int ucc_geth_probe(struct platform_device* ofdev) | ||
3743 | { | ||
3744 | struct device *device = &ofdev->dev; | ||
3745 | struct device_node *np = ofdev->dev.of_node; | ||
3746 | struct net_device *dev = NULL; | ||
3747 | struct ucc_geth_private *ugeth = NULL; | ||
3748 | struct ucc_geth_info *ug_info; | ||
3749 | struct resource res; | ||
3750 | int err, ucc_num, max_speed = 0; | ||
3751 | const unsigned int *prop; | ||
3752 | const char *sprop; | ||
3753 | const void *mac_addr; | ||
3754 | phy_interface_t phy_interface; | ||
3755 | static const int enet_to_speed[] = { | ||
3756 | SPEED_10, SPEED_10, SPEED_10, | ||
3757 | SPEED_100, SPEED_100, SPEED_100, | ||
3758 | SPEED_1000, SPEED_1000, SPEED_1000, SPEED_1000, | ||
3759 | }; | ||
3760 | static const phy_interface_t enet_to_phy_interface[] = { | ||
3761 | PHY_INTERFACE_MODE_MII, PHY_INTERFACE_MODE_RMII, | ||
3762 | PHY_INTERFACE_MODE_RGMII, PHY_INTERFACE_MODE_MII, | ||
3763 | PHY_INTERFACE_MODE_RMII, PHY_INTERFACE_MODE_RGMII, | ||
3764 | PHY_INTERFACE_MODE_GMII, PHY_INTERFACE_MODE_RGMII, | ||
3765 | PHY_INTERFACE_MODE_TBI, PHY_INTERFACE_MODE_RTBI, | ||
3766 | PHY_INTERFACE_MODE_SGMII, | ||
3767 | }; | ||
3768 | |||
3769 | ugeth_vdbg("%s: IN", __func__); | ||
3770 | |||
3771 | prop = of_get_property(np, "cell-index", NULL); | ||
3772 | if (!prop) { | ||
3773 | prop = of_get_property(np, "device-id", NULL); | ||
3774 | if (!prop) | ||
3775 | return -ENODEV; | ||
3776 | } | ||
3777 | |||
3778 | ucc_num = *prop - 1; | ||
3779 | if ((ucc_num < 0) || (ucc_num > 7)) | ||
3780 | return -ENODEV; | ||
3781 | |||
3782 | ug_info = &ugeth_info[ucc_num]; | ||
3783 | if (ug_info == NULL) { | ||
3784 | if (netif_msg_probe(&debug)) | ||
3785 | ugeth_err("%s: [%d] Missing additional data!", | ||
3786 | __func__, ucc_num); | ||
3787 | return -ENODEV; | ||
3788 | } | ||
3789 | |||
3790 | ug_info->uf_info.ucc_num = ucc_num; | ||
3791 | |||
3792 | sprop = of_get_property(np, "rx-clock-name", NULL); | ||
3793 | if (sprop) { | ||
3794 | ug_info->uf_info.rx_clock = qe_clock_source(sprop); | ||
3795 | if ((ug_info->uf_info.rx_clock < QE_CLK_NONE) || | ||
3796 | (ug_info->uf_info.rx_clock > QE_CLK24)) { | ||
3797 | printk(KERN_ERR | ||
3798 | "ucc_geth: invalid rx-clock-name property\n"); | ||
3799 | return -EINVAL; | ||
3800 | } | ||
3801 | } else { | ||
3802 | prop = of_get_property(np, "rx-clock", NULL); | ||
3803 | if (!prop) { | ||
3804 | /* If both rx-clock-name and rx-clock are missing, | ||
3805 | we want to tell people to use rx-clock-name. */ | ||
3806 | printk(KERN_ERR | ||
3807 | "ucc_geth: missing rx-clock-name property\n"); | ||
3808 | return -EINVAL; | ||
3809 | } | ||
3810 | if ((*prop < QE_CLK_NONE) || (*prop > QE_CLK24)) { | ||
3811 | printk(KERN_ERR | ||
3812 | "ucc_geth: invalid rx-clock propperty\n"); | ||
3813 | return -EINVAL; | ||
3814 | } | ||
3815 | ug_info->uf_info.rx_clock = *prop; | ||
3816 | } | ||
3817 | |||
3818 | sprop = of_get_property(np, "tx-clock-name", NULL); | ||
3819 | if (sprop) { | ||
3820 | ug_info->uf_info.tx_clock = qe_clock_source(sprop); | ||
3821 | if ((ug_info->uf_info.tx_clock < QE_CLK_NONE) || | ||
3822 | (ug_info->uf_info.tx_clock > QE_CLK24)) { | ||
3823 | printk(KERN_ERR | ||
3824 | "ucc_geth: invalid tx-clock-name property\n"); | ||
3825 | return -EINVAL; | ||
3826 | } | ||
3827 | } else { | ||
3828 | prop = of_get_property(np, "tx-clock", NULL); | ||
3829 | if (!prop) { | ||
3830 | printk(KERN_ERR | ||
3831 | "ucc_geth: missing tx-clock-name property\n"); | ||
3832 | return -EINVAL; | ||
3833 | } | ||
3834 | if ((*prop < QE_CLK_NONE) || (*prop > QE_CLK24)) { | ||
3835 | printk(KERN_ERR | ||
3836 | "ucc_geth: invalid tx-clock property\n"); | ||
3837 | return -EINVAL; | ||
3838 | } | ||
3839 | ug_info->uf_info.tx_clock = *prop; | ||
3840 | } | ||
3841 | |||
3842 | err = of_address_to_resource(np, 0, &res); | ||
3843 | if (err) | ||
3844 | return -EINVAL; | ||
3845 | |||
3846 | ug_info->uf_info.regs = res.start; | ||
3847 | ug_info->uf_info.irq = irq_of_parse_and_map(np, 0); | ||
3848 | |||
3849 | ug_info->phy_node = of_parse_phandle(np, "phy-handle", 0); | ||
3850 | |||
3851 | /* Find the TBI PHY node. If it's not there, we don't support SGMII */ | ||
3852 | ug_info->tbi_node = of_parse_phandle(np, "tbi-handle", 0); | ||
3853 | |||
3854 | /* get the phy interface type, or default to MII */ | ||
3855 | prop = of_get_property(np, "phy-connection-type", NULL); | ||
3856 | if (!prop) { | ||
3857 | /* handle interface property present in old trees */ | ||
3858 | prop = of_get_property(ug_info->phy_node, "interface", NULL); | ||
3859 | if (prop != NULL) { | ||
3860 | phy_interface = enet_to_phy_interface[*prop]; | ||
3861 | max_speed = enet_to_speed[*prop]; | ||
3862 | } else | ||
3863 | phy_interface = PHY_INTERFACE_MODE_MII; | ||
3864 | } else { | ||
3865 | phy_interface = to_phy_interface((const char *)prop); | ||
3866 | } | ||
3867 | |||
3868 | /* get speed, or derive from PHY interface */ | ||
3869 | if (max_speed == 0) | ||
3870 | switch (phy_interface) { | ||
3871 | case PHY_INTERFACE_MODE_GMII: | ||
3872 | case PHY_INTERFACE_MODE_RGMII: | ||
3873 | case PHY_INTERFACE_MODE_RGMII_ID: | ||
3874 | case PHY_INTERFACE_MODE_RGMII_RXID: | ||
3875 | case PHY_INTERFACE_MODE_RGMII_TXID: | ||
3876 | case PHY_INTERFACE_MODE_TBI: | ||
3877 | case PHY_INTERFACE_MODE_RTBI: | ||
3878 | case PHY_INTERFACE_MODE_SGMII: | ||
3879 | max_speed = SPEED_1000; | ||
3880 | break; | ||
3881 | default: | ||
3882 | max_speed = SPEED_100; | ||
3883 | break; | ||
3884 | } | ||
3885 | |||
3886 | if (max_speed == SPEED_1000) { | ||
3887 | /* configure muram FIFOs for gigabit operation */ | ||
3888 | ug_info->uf_info.urfs = UCC_GETH_URFS_GIGA_INIT; | ||
3889 | ug_info->uf_info.urfet = UCC_GETH_URFET_GIGA_INIT; | ||
3890 | ug_info->uf_info.urfset = UCC_GETH_URFSET_GIGA_INIT; | ||
3891 | ug_info->uf_info.utfs = UCC_GETH_UTFS_GIGA_INIT; | ||
3892 | ug_info->uf_info.utfet = UCC_GETH_UTFET_GIGA_INIT; | ||
3893 | ug_info->uf_info.utftt = UCC_GETH_UTFTT_GIGA_INIT; | ||
3894 | ug_info->numThreadsTx = UCC_GETH_NUM_OF_THREADS_4; | ||
3895 | |||
3896 | /* If QE's snum number is 46 which means we need to support | ||
3897 | * 4 UECs at 1000Base-T simultaneously, we need to allocate | ||
3898 | * more Threads to Rx. | ||
3899 | */ | ||
3900 | if (qe_get_num_of_snums() == 46) | ||
3901 | ug_info->numThreadsRx = UCC_GETH_NUM_OF_THREADS_6; | ||
3902 | else | ||
3903 | ug_info->numThreadsRx = UCC_GETH_NUM_OF_THREADS_4; | ||
3904 | } | ||
3905 | |||
3906 | if (netif_msg_probe(&debug)) | ||
3907 | printk(KERN_INFO "ucc_geth: UCC%1d at 0x%8x (irq = %d)\n", | ||
3908 | ug_info->uf_info.ucc_num + 1, ug_info->uf_info.regs, | ||
3909 | ug_info->uf_info.irq); | ||
3910 | |||
3911 | /* Create an ethernet device instance */ | ||
3912 | dev = alloc_etherdev(sizeof(*ugeth)); | ||
3913 | |||
3914 | if (dev == NULL) | ||
3915 | return -ENOMEM; | ||
3916 | |||
3917 | ugeth = netdev_priv(dev); | ||
3918 | spin_lock_init(&ugeth->lock); | ||
3919 | |||
3920 | /* Create CQs for hash tables */ | ||
3921 | INIT_LIST_HEAD(&ugeth->group_hash_q); | ||
3922 | INIT_LIST_HEAD(&ugeth->ind_hash_q); | ||
3923 | |||
3924 | dev_set_drvdata(device, dev); | ||
3925 | |||
3926 | /* Set the dev->base_addr to the gfar reg region */ | ||
3927 | dev->base_addr = (unsigned long)(ug_info->uf_info.regs); | ||
3928 | |||
3929 | SET_NETDEV_DEV(dev, device); | ||
3930 | |||
3931 | /* Fill in the dev structure */ | ||
3932 | uec_set_ethtool_ops(dev); | ||
3933 | dev->netdev_ops = &ucc_geth_netdev_ops; | ||
3934 | dev->watchdog_timeo = TX_TIMEOUT; | ||
3935 | INIT_WORK(&ugeth->timeout_work, ucc_geth_timeout_work); | ||
3936 | netif_napi_add(dev, &ugeth->napi, ucc_geth_poll, 64); | ||
3937 | dev->mtu = 1500; | ||
3938 | |||
3939 | ugeth->msg_enable = netif_msg_init(debug.msg_enable, UGETH_MSG_DEFAULT); | ||
3940 | ugeth->phy_interface = phy_interface; | ||
3941 | ugeth->max_speed = max_speed; | ||
3942 | |||
3943 | err = register_netdev(dev); | ||
3944 | if (err) { | ||
3945 | if (netif_msg_probe(ugeth)) | ||
3946 | ugeth_err("%s: Cannot register net device, aborting.", | ||
3947 | dev->name); | ||
3948 | free_netdev(dev); | ||
3949 | return err; | ||
3950 | } | ||
3951 | |||
3952 | mac_addr = of_get_mac_address(np); | ||
3953 | if (mac_addr) | ||
3954 | memcpy(dev->dev_addr, mac_addr, 6); | ||
3955 | |||
3956 | ugeth->ug_info = ug_info; | ||
3957 | ugeth->dev = device; | ||
3958 | ugeth->ndev = dev; | ||
3959 | ugeth->node = np; | ||
3960 | |||
3961 | return 0; | ||
3962 | } | ||
3963 | |||
3964 | static int ucc_geth_remove(struct platform_device* ofdev) | ||
3965 | { | ||
3966 | struct device *device = &ofdev->dev; | ||
3967 | struct net_device *dev = dev_get_drvdata(device); | ||
3968 | struct ucc_geth_private *ugeth = netdev_priv(dev); | ||
3969 | |||
3970 | unregister_netdev(dev); | ||
3971 | free_netdev(dev); | ||
3972 | ucc_geth_memclean(ugeth); | ||
3973 | dev_set_drvdata(device, NULL); | ||
3974 | |||
3975 | return 0; | ||
3976 | } | ||
3977 | |||
3978 | static struct of_device_id ucc_geth_match[] = { | ||
3979 | { | ||
3980 | .type = "network", | ||
3981 | .compatible = "ucc_geth", | ||
3982 | }, | ||
3983 | {}, | ||
3984 | }; | ||
3985 | |||
3986 | MODULE_DEVICE_TABLE(of, ucc_geth_match); | ||
3987 | |||
3988 | static struct platform_driver ucc_geth_driver = { | ||
3989 | .driver = { | ||
3990 | .name = DRV_NAME, | ||
3991 | .owner = THIS_MODULE, | ||
3992 | .of_match_table = ucc_geth_match, | ||
3993 | }, | ||
3994 | .probe = ucc_geth_probe, | ||
3995 | .remove = ucc_geth_remove, | ||
3996 | .suspend = ucc_geth_suspend, | ||
3997 | .resume = ucc_geth_resume, | ||
3998 | }; | ||
3999 | |||
4000 | static int __init ucc_geth_init(void) | ||
4001 | { | ||
4002 | int i, ret; | ||
4003 | |||
4004 | if (netif_msg_drv(&debug)) | ||
4005 | printk(KERN_INFO "ucc_geth: " DRV_DESC "\n"); | ||
4006 | for (i = 0; i < 8; i++) | ||
4007 | memcpy(&(ugeth_info[i]), &ugeth_primary_info, | ||
4008 | sizeof(ugeth_primary_info)); | ||
4009 | |||
4010 | ret = platform_driver_register(&ucc_geth_driver); | ||
4011 | |||
4012 | return ret; | ||
4013 | } | ||
4014 | |||
4015 | static void __exit ucc_geth_exit(void) | ||
4016 | { | ||
4017 | platform_driver_unregister(&ucc_geth_driver); | ||
4018 | } | ||
4019 | |||
4020 | module_init(ucc_geth_init); | ||
4021 | module_exit(ucc_geth_exit); | ||
4022 | |||
4023 | MODULE_AUTHOR("Freescale Semiconductor, Inc"); | ||
4024 | MODULE_DESCRIPTION(DRV_DESC); | ||
4025 | MODULE_VERSION(DRV_VERSION); | ||
4026 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/net/ethernet/freescale/ucc_geth.h b/drivers/net/ethernet/freescale/ucc_geth.h new file mode 100644 index 000000000000..d12fcad145e9 --- /dev/null +++ b/drivers/net/ethernet/freescale/ucc_geth.h | |||
@@ -0,0 +1,1240 @@ | |||
1 | /* | ||
2 | * Copyright (C) Freescale Semicondutor, Inc. 2006-2009. All rights reserved. | ||
3 | * | ||
4 | * Author: Shlomi Gridish <gridish@freescale.com> | ||
5 | * | ||
6 | * Description: | ||
7 | * Internal header file for UCC Gigabit Ethernet unit routines. | ||
8 | * | ||
9 | * Changelog: | ||
10 | * Jun 28, 2006 Li Yang <LeoLi@freescale.com> | ||
11 | * - Rearrange code and style fixes | ||
12 | * | ||
13 | * This program is free software; you can redistribute it and/or modify it | ||
14 | * under the terms of the GNU General Public License as published by the | ||
15 | * Free Software Foundation; either version 2 of the License, or (at your | ||
16 | * option) any later version. | ||
17 | */ | ||
18 | #ifndef __UCC_GETH_H__ | ||
19 | #define __UCC_GETH_H__ | ||
20 | |||
21 | #include <linux/kernel.h> | ||
22 | #include <linux/list.h> | ||
23 | |||
24 | #include <asm/immap_qe.h> | ||
25 | #include <asm/qe.h> | ||
26 | |||
27 | #include <asm/ucc.h> | ||
28 | #include <asm/ucc_fast.h> | ||
29 | |||
30 | #define DRV_DESC "QE UCC Gigabit Ethernet Controller" | ||
31 | #define DRV_NAME "ucc_geth" | ||
32 | #define DRV_VERSION "1.1" | ||
33 | |||
34 | #define NUM_TX_QUEUES 8 | ||
35 | #define NUM_RX_QUEUES 8 | ||
36 | #define NUM_BDS_IN_PREFETCHED_BDS 4 | ||
37 | #define TX_IP_OFFSET_ENTRY_MAX 8 | ||
38 | #define NUM_OF_PADDRS 4 | ||
39 | #define ENET_INIT_PARAM_MAX_ENTRIES_RX 9 | ||
40 | #define ENET_INIT_PARAM_MAX_ENTRIES_TX 8 | ||
41 | |||
42 | struct ucc_geth { | ||
43 | struct ucc_fast uccf; | ||
44 | u8 res0[0x100 - sizeof(struct ucc_fast)]; | ||
45 | |||
46 | u32 maccfg1; /* mac configuration reg. 1 */ | ||
47 | u32 maccfg2; /* mac configuration reg. 2 */ | ||
48 | u32 ipgifg; /* interframe gap reg. */ | ||
49 | u32 hafdup; /* half-duplex reg. */ | ||
50 | u8 res1[0x10]; | ||
51 | u8 miimng[0x18]; /* MII management structure moved to _mii.h */ | ||
52 | u32 ifctl; /* interface control reg */ | ||
53 | u32 ifstat; /* interface statux reg */ | ||
54 | u32 macstnaddr1; /* mac station address part 1 reg */ | ||
55 | u32 macstnaddr2; /* mac station address part 2 reg */ | ||
56 | u8 res2[0x8]; | ||
57 | u32 uempr; /* UCC Ethernet Mac parameter reg */ | ||
58 | u32 utbipar; /* UCC tbi address reg */ | ||
59 | u16 uescr; /* UCC Ethernet statistics control reg */ | ||
60 | u8 res3[0x180 - 0x15A]; | ||
61 | u32 tx64; /* Total number of frames (including bad | ||
62 | frames) transmitted that were exactly of the | ||
63 | minimal length (64 for un tagged, 68 for | ||
64 | tagged, or with length exactly equal to the | ||
65 | parameter MINLength */ | ||
66 | u32 tx127; /* Total number of frames (including bad | ||
67 | frames) transmitted that were between | ||
68 | MINLength (Including FCS length==4) and 127 | ||
69 | octets */ | ||
70 | u32 tx255; /* Total number of frames (including bad | ||
71 | frames) transmitted that were between 128 | ||
72 | (Including FCS length==4) and 255 octets */ | ||
73 | u32 rx64; /* Total number of frames received including | ||
74 | bad frames that were exactly of the mninimal | ||
75 | length (64 bytes) */ | ||
76 | u32 rx127; /* Total number of frames (including bad | ||
77 | frames) received that were between MINLength | ||
78 | (Including FCS length==4) and 127 octets */ | ||
79 | u32 rx255; /* Total number of frames (including bad | ||
80 | frames) received that were between 128 | ||
81 | (Including FCS length==4) and 255 octets */ | ||
82 | u32 txok; /* Total number of octets residing in frames | ||
83 | that where involved in successful | ||
84 | transmission */ | ||
85 | u16 txcf; /* Total number of PAUSE control frames | ||
86 | transmitted by this MAC */ | ||
87 | u8 res4[0x2]; | ||
88 | u32 tmca; /* Total number of frames that were transmitted | ||
89 | successfully with the group address bit set | ||
90 | that are not broadcast frames */ | ||
91 | u32 tbca; /* Total number of frames transmitted | ||
92 | successfully that had destination address | ||
93 | field equal to the broadcast address */ | ||
94 | u32 rxfok; /* Total number of frames received OK */ | ||
95 | u32 rxbok; /* Total number of octets received OK */ | ||
96 | u32 rbyt; /* Total number of octets received including | ||
97 | octets in bad frames. Must be implemented in | ||
98 | HW because it includes octets in frames that | ||
99 | never even reach the UCC */ | ||
100 | u32 rmca; /* Total number of frames that were received | ||
101 | successfully with the group address bit set | ||
102 | that are not broadcast frames */ | ||
103 | u32 rbca; /* Total number of frames received successfully | ||
104 | that had destination address equal to the | ||
105 | broadcast address */ | ||
106 | u32 scar; /* Statistics carry register */ | ||
107 | u32 scam; /* Statistics caryy mask register */ | ||
108 | u8 res5[0x200 - 0x1c4]; | ||
109 | } __packed; | ||
110 | |||
111 | /* UCC GETH TEMODR Register */ | ||
112 | #define TEMODER_TX_RMON_STATISTICS_ENABLE 0x0100 /* enable Tx statistics | ||
113 | */ | ||
114 | #define TEMODER_SCHEDULER_ENABLE 0x2000 /* enable scheduler */ | ||
115 | #define TEMODER_IP_CHECKSUM_GENERATE 0x0400 /* generate IPv4 | ||
116 | checksums */ | ||
117 | #define TEMODER_PERFORMANCE_OPTIMIZATION_MODE1 0x0200 /* enable performance | ||
118 | optimization | ||
119 | enhancement (mode1) */ | ||
120 | #define TEMODER_RMON_STATISTICS 0x0100 /* enable tx statistics | ||
121 | */ | ||
122 | #define TEMODER_NUM_OF_QUEUES_SHIFT (15-15) /* Number of queues << | ||
123 | shift */ | ||
124 | |||
125 | /* UCC GETH TEMODR Register */ | ||
126 | #define REMODER_RX_RMON_STATISTICS_ENABLE 0x00001000 /* enable Rx | ||
127 | statistics */ | ||
128 | #define REMODER_RX_EXTENDED_FEATURES 0x80000000 /* enable | ||
129 | extended | ||
130 | features */ | ||
131 | #define REMODER_VLAN_OPERATION_TAGGED_SHIFT (31-9 ) /* vlan operation | ||
132 | tagged << shift */ | ||
133 | #define REMODER_VLAN_OPERATION_NON_TAGGED_SHIFT (31-10) /* vlan operation non | ||
134 | tagged << shift */ | ||
135 | #define REMODER_RX_QOS_MODE_SHIFT (31-15) /* rx QoS mode << shift | ||
136 | */ | ||
137 | #define REMODER_RMON_STATISTICS 0x00001000 /* enable rx | ||
138 | statistics */ | ||
139 | #define REMODER_RX_EXTENDED_FILTERING 0x00000800 /* extended | ||
140 | filtering | ||
141 | vs. | ||
142 | mpc82xx-like | ||
143 | filtering */ | ||
144 | #define REMODER_NUM_OF_QUEUES_SHIFT (31-23) /* Number of queues << | ||
145 | shift */ | ||
146 | #define REMODER_DYNAMIC_MAX_FRAME_LENGTH 0x00000008 /* enable | ||
147 | dynamic max | ||
148 | frame length | ||
149 | */ | ||
150 | #define REMODER_DYNAMIC_MIN_FRAME_LENGTH 0x00000004 /* enable | ||
151 | dynamic min | ||
152 | frame length | ||
153 | */ | ||
154 | #define REMODER_IP_CHECKSUM_CHECK 0x00000002 /* check IPv4 | ||
155 | checksums */ | ||
156 | #define REMODER_IP_ADDRESS_ALIGNMENT 0x00000001 /* align ip | ||
157 | address to | ||
158 | 4-byte | ||
159 | boundary */ | ||
160 | |||
161 | /* UCC GETH Event Register */ | ||
162 | #define UCCE_TXB (UCC_GETH_UCCE_TXB7 | UCC_GETH_UCCE_TXB6 | \ | ||
163 | UCC_GETH_UCCE_TXB5 | UCC_GETH_UCCE_TXB4 | \ | ||
164 | UCC_GETH_UCCE_TXB3 | UCC_GETH_UCCE_TXB2 | \ | ||
165 | UCC_GETH_UCCE_TXB1 | UCC_GETH_UCCE_TXB0) | ||
166 | |||
167 | #define UCCE_RXB (UCC_GETH_UCCE_RXB7 | UCC_GETH_UCCE_RXB6 | \ | ||
168 | UCC_GETH_UCCE_RXB5 | UCC_GETH_UCCE_RXB4 | \ | ||
169 | UCC_GETH_UCCE_RXB3 | UCC_GETH_UCCE_RXB2 | \ | ||
170 | UCC_GETH_UCCE_RXB1 | UCC_GETH_UCCE_RXB0) | ||
171 | |||
172 | #define UCCE_RXF (UCC_GETH_UCCE_RXF7 | UCC_GETH_UCCE_RXF6 | \ | ||
173 | UCC_GETH_UCCE_RXF5 | UCC_GETH_UCCE_RXF4 | \ | ||
174 | UCC_GETH_UCCE_RXF3 | UCC_GETH_UCCE_RXF2 | \ | ||
175 | UCC_GETH_UCCE_RXF1 | UCC_GETH_UCCE_RXF0) | ||
176 | |||
177 | #define UCCE_OTHER (UCC_GETH_UCCE_SCAR | UCC_GETH_UCCE_GRA | \ | ||
178 | UCC_GETH_UCCE_CBPR | UCC_GETH_UCCE_BSY | \ | ||
179 | UCC_GETH_UCCE_RXC | UCC_GETH_UCCE_TXC | UCC_GETH_UCCE_TXE) | ||
180 | |||
181 | #define UCCE_RX_EVENTS (UCCE_RXF | UCC_GETH_UCCE_BSY) | ||
182 | #define UCCE_TX_EVENTS (UCCE_TXB | UCC_GETH_UCCE_TXE) | ||
183 | |||
184 | /* TBI defines */ | ||
185 | #define ENET_TBI_MII_CR 0x00 /* Control */ | ||
186 | #define ENET_TBI_MII_SR 0x01 /* Status */ | ||
187 | #define ENET_TBI_MII_ANA 0x04 /* AN advertisement */ | ||
188 | #define ENET_TBI_MII_ANLPBPA 0x05 /* AN link partner base page ability */ | ||
189 | #define ENET_TBI_MII_ANEX 0x06 /* AN expansion */ | ||
190 | #define ENET_TBI_MII_ANNPT 0x07 /* AN next page transmit */ | ||
191 | #define ENET_TBI_MII_ANLPANP 0x08 /* AN link partner ability next page */ | ||
192 | #define ENET_TBI_MII_EXST 0x0F /* Extended status */ | ||
193 | #define ENET_TBI_MII_JD 0x10 /* Jitter diagnostics */ | ||
194 | #define ENET_TBI_MII_TBICON 0x11 /* TBI control */ | ||
195 | |||
196 | /* TBI MDIO register bit fields*/ | ||
197 | #define TBISR_LSTATUS 0x0004 | ||
198 | #define TBICON_CLK_SELECT 0x0020 | ||
199 | #define TBIANA_ASYMMETRIC_PAUSE 0x0100 | ||
200 | #define TBIANA_SYMMETRIC_PAUSE 0x0080 | ||
201 | #define TBIANA_HALF_DUPLEX 0x0040 | ||
202 | #define TBIANA_FULL_DUPLEX 0x0020 | ||
203 | #define TBICR_PHY_RESET 0x8000 | ||
204 | #define TBICR_ANEG_ENABLE 0x1000 | ||
205 | #define TBICR_RESTART_ANEG 0x0200 | ||
206 | #define TBICR_FULL_DUPLEX 0x0100 | ||
207 | #define TBICR_SPEED1_SET 0x0040 | ||
208 | |||
209 | #define TBIANA_SETTINGS ( \ | ||
210 | TBIANA_ASYMMETRIC_PAUSE \ | ||
211 | | TBIANA_SYMMETRIC_PAUSE \ | ||
212 | | TBIANA_FULL_DUPLEX \ | ||
213 | ) | ||
214 | #define TBICR_SETTINGS ( \ | ||
215 | TBICR_PHY_RESET \ | ||
216 | | TBICR_ANEG_ENABLE \ | ||
217 | | TBICR_FULL_DUPLEX \ | ||
218 | | TBICR_SPEED1_SET \ | ||
219 | ) | ||
220 | |||
221 | /* UCC GETH MACCFG1 (MAC Configuration 1 Register) */ | ||
222 | #define MACCFG1_FLOW_RX 0x00000020 /* Flow Control | ||
223 | Rx */ | ||
224 | #define MACCFG1_FLOW_TX 0x00000010 /* Flow Control | ||
225 | Tx */ | ||
226 | #define MACCFG1_ENABLE_SYNCHED_RX 0x00000008 /* Rx Enable | ||
227 | synchronized | ||
228 | to Rx stream | ||
229 | */ | ||
230 | #define MACCFG1_ENABLE_RX 0x00000004 /* Enable Rx */ | ||
231 | #define MACCFG1_ENABLE_SYNCHED_TX 0x00000002 /* Tx Enable | ||
232 | synchronized | ||
233 | to Tx stream | ||
234 | */ | ||
235 | #define MACCFG1_ENABLE_TX 0x00000001 /* Enable Tx */ | ||
236 | |||
237 | /* UCC GETH MACCFG2 (MAC Configuration 2 Register) */ | ||
238 | #define MACCFG2_PREL_SHIFT (31 - 19) /* Preamble | ||
239 | Length << | ||
240 | shift */ | ||
241 | #define MACCFG2_PREL_MASK 0x0000f000 /* Preamble | ||
242 | Length mask */ | ||
243 | #define MACCFG2_SRP 0x00000080 /* Soft Receive | ||
244 | Preamble */ | ||
245 | #define MACCFG2_STP 0x00000040 /* Soft | ||
246 | Transmit | ||
247 | Preamble */ | ||
248 | #define MACCFG2_RESERVED_1 0x00000020 /* Reserved - | ||
249 | must be set | ||
250 | to 1 */ | ||
251 | #define MACCFG2_LC 0x00000010 /* Length Check | ||
252 | */ | ||
253 | #define MACCFG2_MPE 0x00000008 /* Magic packet | ||
254 | detect */ | ||
255 | #define MACCFG2_FDX 0x00000001 /* Full Duplex */ | ||
256 | #define MACCFG2_FDX_MASK 0x00000001 /* Full Duplex | ||
257 | mask */ | ||
258 | #define MACCFG2_PAD_CRC 0x00000004 | ||
259 | #define MACCFG2_CRC_EN 0x00000002 | ||
260 | #define MACCFG2_PAD_AND_CRC_MODE_NONE 0x00000000 /* Neither | ||
261 | Padding | ||
262 | short frames | ||
263 | nor CRC */ | ||
264 | #define MACCFG2_PAD_AND_CRC_MODE_CRC_ONLY 0x00000002 /* Append CRC | ||
265 | only */ | ||
266 | #define MACCFG2_PAD_AND_CRC_MODE_PAD_AND_CRC 0x00000004 | ||
267 | #define MACCFG2_INTERFACE_MODE_NIBBLE 0x00000100 /* nibble mode | ||
268 | (MII/RMII/RGMII | ||
269 | 10/100bps) */ | ||
270 | #define MACCFG2_INTERFACE_MODE_BYTE 0x00000200 /* byte mode | ||
271 | (GMII/TBI/RTB/RGMII | ||
272 | 1000bps ) */ | ||
273 | #define MACCFG2_INTERFACE_MODE_MASK 0x00000300 /* mask | ||
274 | covering all | ||
275 | relevant | ||
276 | bits */ | ||
277 | |||
278 | /* UCC GETH IPGIFG (Inter-frame Gap / Inter-Frame Gap Register) */ | ||
279 | #define IPGIFG_NON_BACK_TO_BACK_IFG_PART1_SHIFT (31 - 7) /* Non | ||
280 | back-to-back | ||
281 | inter frame | ||
282 | gap part 1. | ||
283 | << shift */ | ||
284 | #define IPGIFG_NON_BACK_TO_BACK_IFG_PART2_SHIFT (31 - 15) /* Non | ||
285 | back-to-back | ||
286 | inter frame | ||
287 | gap part 2. | ||
288 | << shift */ | ||
289 | #define IPGIFG_MINIMUM_IFG_ENFORCEMENT_SHIFT (31 - 23) /* Mimimum IFG | ||
290 | Enforcement | ||
291 | << shift */ | ||
292 | #define IPGIFG_BACK_TO_BACK_IFG_SHIFT (31 - 31) /* back-to-back | ||
293 | inter frame | ||
294 | gap << shift | ||
295 | */ | ||
296 | #define IPGIFG_NON_BACK_TO_BACK_IFG_PART1_MAX 127 /* Non back-to-back | ||
297 | inter frame gap part | ||
298 | 1. max val */ | ||
299 | #define IPGIFG_NON_BACK_TO_BACK_IFG_PART2_MAX 127 /* Non back-to-back | ||
300 | inter frame gap part | ||
301 | 2. max val */ | ||
302 | #define IPGIFG_MINIMUM_IFG_ENFORCEMENT_MAX 255 /* Mimimum IFG | ||
303 | Enforcement max val */ | ||
304 | #define IPGIFG_BACK_TO_BACK_IFG_MAX 127 /* back-to-back inter | ||
305 | frame gap max val */ | ||
306 | #define IPGIFG_NBTB_CS_IPG_MASK 0x7F000000 | ||
307 | #define IPGIFG_NBTB_IPG_MASK 0x007F0000 | ||
308 | #define IPGIFG_MIN_IFG_MASK 0x0000FF00 | ||
309 | #define IPGIFG_BTB_IPG_MASK 0x0000007F | ||
310 | |||
311 | /* UCC GETH HAFDUP (Half Duplex Register) */ | ||
312 | #define HALFDUP_ALT_BEB_TRUNCATION_SHIFT (31 - 11) /* Alternate | ||
313 | Binary | ||
314 | Exponential | ||
315 | Backoff | ||
316 | Truncation | ||
317 | << shift */ | ||
318 | #define HALFDUP_ALT_BEB_TRUNCATION_MAX 0xf /* Alternate Binary | ||
319 | Exponential Backoff | ||
320 | Truncation max val */ | ||
321 | #define HALFDUP_ALT_BEB 0x00080000 /* Alternate | ||
322 | Binary | ||
323 | Exponential | ||
324 | Backoff */ | ||
325 | #define HALFDUP_BACK_PRESSURE_NO_BACKOFF 0x00040000 /* Back | ||
326 | pressure no | ||
327 | backoff */ | ||
328 | #define HALFDUP_NO_BACKOFF 0x00020000 /* No Backoff */ | ||
329 | #define HALFDUP_EXCESSIVE_DEFER 0x00010000 /* Excessive | ||
330 | Defer */ | ||
331 | #define HALFDUP_MAX_RETRANSMISSION_SHIFT (31 - 19) /* Maximum | ||
332 | Retransmission | ||
333 | << shift */ | ||
334 | #define HALFDUP_MAX_RETRANSMISSION_MAX 0xf /* Maximum | ||
335 | Retransmission max | ||
336 | val */ | ||
337 | #define HALFDUP_COLLISION_WINDOW_SHIFT (31 - 31) /* Collision | ||
338 | Window << | ||
339 | shift */ | ||
340 | #define HALFDUP_COLLISION_WINDOW_MAX 0x3f /* Collision Window max | ||
341 | val */ | ||
342 | #define HALFDUP_ALT_BEB_TR_MASK 0x00F00000 | ||
343 | #define HALFDUP_RETRANS_MASK 0x0000F000 | ||
344 | #define HALFDUP_COL_WINDOW_MASK 0x0000003F | ||
345 | |||
346 | /* UCC GETH UCCS (Ethernet Status Register) */ | ||
347 | #define UCCS_BPR 0x02 /* Back pressure (in | ||
348 | half duplex mode) */ | ||
349 | #define UCCS_PAU 0x02 /* Pause state (in full | ||
350 | duplex mode) */ | ||
351 | #define UCCS_MPD 0x01 /* Magic Packet | ||
352 | Detected */ | ||
353 | |||
354 | /* UCC GETH IFSTAT (Interface Status Register) */ | ||
355 | #define IFSTAT_EXCESS_DEFER 0x00000200 /* Excessive | ||
356 | transmission | ||
357 | defer */ | ||
358 | |||
359 | /* UCC GETH MACSTNADDR1 (Station Address Part 1 Register) */ | ||
360 | #define MACSTNADDR1_OCTET_6_SHIFT (31 - 7) /* Station | ||
361 | address 6th | ||
362 | octet << | ||
363 | shift */ | ||
364 | #define MACSTNADDR1_OCTET_5_SHIFT (31 - 15) /* Station | ||
365 | address 5th | ||
366 | octet << | ||
367 | shift */ | ||
368 | #define MACSTNADDR1_OCTET_4_SHIFT (31 - 23) /* Station | ||
369 | address 4th | ||
370 | octet << | ||
371 | shift */ | ||
372 | #define MACSTNADDR1_OCTET_3_SHIFT (31 - 31) /* Station | ||
373 | address 3rd | ||
374 | octet << | ||
375 | shift */ | ||
376 | |||
377 | /* UCC GETH MACSTNADDR2 (Station Address Part 2 Register) */ | ||
378 | #define MACSTNADDR2_OCTET_2_SHIFT (31 - 7) /* Station | ||
379 | address 2nd | ||
380 | octet << | ||
381 | shift */ | ||
382 | #define MACSTNADDR2_OCTET_1_SHIFT (31 - 15) /* Station | ||
383 | address 1st | ||
384 | octet << | ||
385 | shift */ | ||
386 | |||
387 | /* UCC GETH UEMPR (Ethernet Mac Parameter Register) */ | ||
388 | #define UEMPR_PAUSE_TIME_VALUE_SHIFT (31 - 15) /* Pause time | ||
389 | value << | ||
390 | shift */ | ||
391 | #define UEMPR_EXTENDED_PAUSE_TIME_VALUE_SHIFT (31 - 31) /* Extended | ||
392 | pause time | ||
393 | value << | ||
394 | shift */ | ||
395 | |||
396 | /* UCC GETH UTBIPAR (Ten Bit Interface Physical Address Register) */ | ||
397 | #define UTBIPAR_PHY_ADDRESS_SHIFT (31 - 31) /* Phy address | ||
398 | << shift */ | ||
399 | #define UTBIPAR_PHY_ADDRESS_MASK 0x0000001f /* Phy address | ||
400 | mask */ | ||
401 | |||
402 | /* UCC GETH UESCR (Ethernet Statistics Control Register) */ | ||
403 | #define UESCR_AUTOZ 0x8000 /* Automatically zero | ||
404 | addressed | ||
405 | statistical counter | ||
406 | values */ | ||
407 | #define UESCR_CLRCNT 0x4000 /* Clear all statistics | ||
408 | counters */ | ||
409 | #define UESCR_MAXCOV_SHIFT (15 - 7) /* Max | ||
410 | Coalescing | ||
411 | Value << | ||
412 | shift */ | ||
413 | #define UESCR_SCOV_SHIFT (15 - 15) /* Status | ||
414 | Coalescing | ||
415 | Value << | ||
416 | shift */ | ||
417 | |||
418 | /* UCC GETH UDSR (Data Synchronization Register) */ | ||
419 | #define UDSR_MAGIC 0x067E | ||
420 | |||
421 | struct ucc_geth_thread_data_tx { | ||
422 | u8 res0[104]; | ||
423 | } __packed; | ||
424 | |||
425 | struct ucc_geth_thread_data_rx { | ||
426 | u8 res0[40]; | ||
427 | } __packed; | ||
428 | |||
429 | /* Send Queue Queue-Descriptor */ | ||
430 | struct ucc_geth_send_queue_qd { | ||
431 | u32 bd_ring_base; /* pointer to BD ring base address */ | ||
432 | u8 res0[0x8]; | ||
433 | u32 last_bd_completed_address;/* initialize to last entry in BD ring */ | ||
434 | u8 res1[0x30]; | ||
435 | } __packed; | ||
436 | |||
437 | struct ucc_geth_send_queue_mem_region { | ||
438 | struct ucc_geth_send_queue_qd sqqd[NUM_TX_QUEUES]; | ||
439 | } __packed; | ||
440 | |||
441 | struct ucc_geth_thread_tx_pram { | ||
442 | u8 res0[64]; | ||
443 | } __packed; | ||
444 | |||
445 | struct ucc_geth_thread_rx_pram { | ||
446 | u8 res0[128]; | ||
447 | } __packed; | ||
448 | |||
449 | #define THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING 64 | ||
450 | #define THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_8 64 | ||
451 | #define THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_16 96 | ||
452 | |||
453 | struct ucc_geth_scheduler { | ||
454 | u16 cpucount0; /* CPU packet counter */ | ||
455 | u16 cpucount1; /* CPU packet counter */ | ||
456 | u16 cecount0; /* QE packet counter */ | ||
457 | u16 cecount1; /* QE packet counter */ | ||
458 | u16 cpucount2; /* CPU packet counter */ | ||
459 | u16 cpucount3; /* CPU packet counter */ | ||
460 | u16 cecount2; /* QE packet counter */ | ||
461 | u16 cecount3; /* QE packet counter */ | ||
462 | u16 cpucount4; /* CPU packet counter */ | ||
463 | u16 cpucount5; /* CPU packet counter */ | ||
464 | u16 cecount4; /* QE packet counter */ | ||
465 | u16 cecount5; /* QE packet counter */ | ||
466 | u16 cpucount6; /* CPU packet counter */ | ||
467 | u16 cpucount7; /* CPU packet counter */ | ||
468 | u16 cecount6; /* QE packet counter */ | ||
469 | u16 cecount7; /* QE packet counter */ | ||
470 | u32 weightstatus[NUM_TX_QUEUES]; /* accumulated weight factor */ | ||
471 | u32 rtsrshadow; /* temporary variable handled by QE */ | ||
472 | u32 time; /* temporary variable handled by QE */ | ||
473 | u32 ttl; /* temporary variable handled by QE */ | ||
474 | u32 mblinterval; /* max burst length interval */ | ||
475 | u16 nortsrbytetime; /* normalized value of byte time in tsr units */ | ||
476 | u8 fracsiz; /* radix 2 log value of denom. of | ||
477 | NorTSRByteTime */ | ||
478 | u8 res0[1]; | ||
479 | u8 strictpriorityq; /* Strict Priority Mask register */ | ||
480 | u8 txasap; /* Transmit ASAP register */ | ||
481 | u8 extrabw; /* Extra BandWidth register */ | ||
482 | u8 oldwfqmask; /* temporary variable handled by QE */ | ||
483 | u8 weightfactor[NUM_TX_QUEUES]; | ||
484 | /**< weight factor for queues */ | ||
485 | u32 minw; /* temporary variable handled by QE */ | ||
486 | u8 res1[0x70 - 0x64]; | ||
487 | } __packed; | ||
488 | |||
489 | struct ucc_geth_tx_firmware_statistics_pram { | ||
490 | u32 sicoltx; /* single collision */ | ||
491 | u32 mulcoltx; /* multiple collision */ | ||
492 | u32 latecoltxfr; /* late collision */ | ||
493 | u32 frabortduecol; /* frames aborted due to transmit collision */ | ||
494 | u32 frlostinmactxer; /* frames lost due to internal MAC error | ||
495 | transmission that are not counted on any | ||
496 | other counter */ | ||
497 | u32 carriersenseertx; /* carrier sense error */ | ||
498 | u32 frtxok; /* frames transmitted OK */ | ||
499 | u32 txfrexcessivedefer; /* frames with defferal time greater than | ||
500 | specified threshold */ | ||
501 | u32 txpkts256; /* total packets (including bad) between 256 | ||
502 | and 511 octets */ | ||
503 | u32 txpkts512; /* total packets (including bad) between 512 | ||
504 | and 1023 octets */ | ||
505 | u32 txpkts1024; /* total packets (including bad) between 1024 | ||
506 | and 1518 octets */ | ||
507 | u32 txpktsjumbo; /* total packets (including bad) between 1024 | ||
508 | and MAXLength octets */ | ||
509 | } __packed; | ||
510 | |||
511 | struct ucc_geth_rx_firmware_statistics_pram { | ||
512 | u32 frrxfcser; /* frames with crc error */ | ||
513 | u32 fraligner; /* frames with alignment error */ | ||
514 | u32 inrangelenrxer; /* in range length error */ | ||
515 | u32 outrangelenrxer; /* out of range length error */ | ||
516 | u32 frtoolong; /* frame too long */ | ||
517 | u32 runt; /* runt */ | ||
518 | u32 verylongevent; /* very long event */ | ||
519 | u32 symbolerror; /* symbol error */ | ||
520 | u32 dropbsy; /* drop because of BD not ready */ | ||
521 | u8 res0[0x8]; | ||
522 | u32 mismatchdrop; /* drop because of MAC filtering (e.g. address | ||
523 | or type mismatch) */ | ||
524 | u32 underpkts; /* total frames less than 64 octets */ | ||
525 | u32 pkts256; /* total frames (including bad) between 256 and | ||
526 | 511 octets */ | ||
527 | u32 pkts512; /* total frames (including bad) between 512 and | ||
528 | 1023 octets */ | ||
529 | u32 pkts1024; /* total frames (including bad) between 1024 | ||
530 | and 1518 octets */ | ||
531 | u32 pktsjumbo; /* total frames (including bad) between 1024 | ||
532 | and MAXLength octets */ | ||
533 | u32 frlossinmacer; /* frames lost because of internal MAC error | ||
534 | that is not counted in any other counter */ | ||
535 | u32 pausefr; /* pause frames */ | ||
536 | u8 res1[0x4]; | ||
537 | u32 removevlan; /* total frames that had their VLAN tag removed | ||
538 | */ | ||
539 | u32 replacevlan; /* total frames that had their VLAN tag | ||
540 | replaced */ | ||
541 | u32 insertvlan; /* total frames that had their VLAN tag | ||
542 | inserted */ | ||
543 | } __packed; | ||
544 | |||
545 | struct ucc_geth_rx_interrupt_coalescing_entry { | ||
546 | u32 interruptcoalescingmaxvalue; /* interrupt coalescing max | ||
547 | value */ | ||
548 | u32 interruptcoalescingcounter; /* interrupt coalescing counter, | ||
549 | initialize to | ||
550 | interruptcoalescingmaxvalue */ | ||
551 | } __packed; | ||
552 | |||
553 | struct ucc_geth_rx_interrupt_coalescing_table { | ||
554 | struct ucc_geth_rx_interrupt_coalescing_entry coalescingentry[NUM_RX_QUEUES]; | ||
555 | /**< interrupt coalescing entry */ | ||
556 | } __packed; | ||
557 | |||
558 | struct ucc_geth_rx_prefetched_bds { | ||
559 | struct qe_bd bd[NUM_BDS_IN_PREFETCHED_BDS]; /* prefetched bd */ | ||
560 | } __packed; | ||
561 | |||
562 | struct ucc_geth_rx_bd_queues_entry { | ||
563 | u32 bdbaseptr; /* BD base pointer */ | ||
564 | u32 bdptr; /* BD pointer */ | ||
565 | u32 externalbdbaseptr; /* external BD base pointer */ | ||
566 | u32 externalbdptr; /* external BD pointer */ | ||
567 | } __packed; | ||
568 | |||
569 | struct ucc_geth_tx_global_pram { | ||
570 | u16 temoder; | ||
571 | u8 res0[0x38 - 0x02]; | ||
572 | u32 sqptr; /* a base pointer to send queue memory region */ | ||
573 | u32 schedulerbasepointer; /* a base pointer to scheduler memory | ||
574 | region */ | ||
575 | u32 txrmonbaseptr; /* base pointer to Tx RMON statistics counter */ | ||
576 | u32 tstate; /* tx internal state. High byte contains | ||
577 | function code */ | ||
578 | u8 iphoffset[TX_IP_OFFSET_ENTRY_MAX]; | ||
579 | u32 vtagtable[0x8]; /* 8 4-byte VLAN tags */ | ||
580 | u32 tqptr; /* a base pointer to the Tx Queues Memory | ||
581 | Region */ | ||
582 | u8 res2[0x80 - 0x74]; | ||
583 | } __packed; | ||
584 | |||
585 | /* structure representing Extended Filtering Global Parameters in PRAM */ | ||
586 | struct ucc_geth_exf_global_pram { | ||
587 | u32 l2pcdptr; /* individual address filter, high */ | ||
588 | u8 res0[0x10 - 0x04]; | ||
589 | } __packed; | ||
590 | |||
591 | struct ucc_geth_rx_global_pram { | ||
592 | u32 remoder; /* ethernet mode reg. */ | ||
593 | u32 rqptr; /* base pointer to the Rx Queues Memory Region*/ | ||
594 | u32 res0[0x1]; | ||
595 | u8 res1[0x20 - 0xC]; | ||
596 | u16 typeorlen; /* cutoff point less than which, type/len field | ||
597 | is considered length */ | ||
598 | u8 res2[0x1]; | ||
599 | u8 rxgstpack; /* acknowledgement on GRACEFUL STOP RX command*/ | ||
600 | u32 rxrmonbaseptr; /* base pointer to Rx RMON statistics counter */ | ||
601 | u8 res3[0x30 - 0x28]; | ||
602 | u32 intcoalescingptr; /* Interrupt coalescing table pointer */ | ||
603 | u8 res4[0x36 - 0x34]; | ||
604 | u8 rstate; /* rx internal state. High byte contains | ||
605 | function code */ | ||
606 | u8 res5[0x46 - 0x37]; | ||
607 | u16 mrblr; /* max receive buffer length reg. */ | ||
608 | u32 rbdqptr; /* base pointer to RxBD parameter table | ||
609 | description */ | ||
610 | u16 mflr; /* max frame length reg. */ | ||
611 | u16 minflr; /* min frame length reg. */ | ||
612 | u16 maxd1; /* max dma1 length reg. */ | ||
613 | u16 maxd2; /* max dma2 length reg. */ | ||
614 | u32 ecamptr; /* external CAM address */ | ||
615 | u32 l2qt; /* VLAN priority mapping table. */ | ||
616 | u32 l3qt[0x8]; /* IP priority mapping table. */ | ||
617 | u16 vlantype; /* vlan type */ | ||
618 | u16 vlantci; /* default vlan tci */ | ||
619 | u8 addressfiltering[64]; /* address filtering data structure */ | ||
620 | u32 exfGlobalParam; /* base address for extended filtering global | ||
621 | parameters */ | ||
622 | u8 res6[0x100 - 0xC4]; /* Initialize to zero */ | ||
623 | } __packed; | ||
624 | |||
625 | #define GRACEFUL_STOP_ACKNOWLEDGE_RX 0x01 | ||
626 | |||
627 | /* structure representing InitEnet command */ | ||
628 | struct ucc_geth_init_pram { | ||
629 | u8 resinit1; | ||
630 | u8 resinit2; | ||
631 | u8 resinit3; | ||
632 | u8 resinit4; | ||
633 | u16 resinit5; | ||
634 | u8 res1[0x1]; | ||
635 | u8 largestexternallookupkeysize; | ||
636 | u32 rgftgfrxglobal; | ||
637 | u32 rxthread[ENET_INIT_PARAM_MAX_ENTRIES_RX]; /* rx threads */ | ||
638 | u8 res2[0x38 - 0x30]; | ||
639 | u32 txglobal; /* tx global */ | ||
640 | u32 txthread[ENET_INIT_PARAM_MAX_ENTRIES_TX]; /* tx threads */ | ||
641 | u8 res3[0x1]; | ||
642 | } __packed; | ||
643 | |||
644 | #define ENET_INIT_PARAM_RGF_SHIFT (32 - 4) | ||
645 | #define ENET_INIT_PARAM_TGF_SHIFT (32 - 8) | ||
646 | |||
647 | #define ENET_INIT_PARAM_RISC_MASK 0x0000003f | ||
648 | #define ENET_INIT_PARAM_PTR_MASK 0x00ffffc0 | ||
649 | #define ENET_INIT_PARAM_SNUM_MASK 0xff000000 | ||
650 | #define ENET_INIT_PARAM_SNUM_SHIFT 24 | ||
651 | |||
652 | #define ENET_INIT_PARAM_MAGIC_RES_INIT1 0x06 | ||
653 | #define ENET_INIT_PARAM_MAGIC_RES_INIT2 0x30 | ||
654 | #define ENET_INIT_PARAM_MAGIC_RES_INIT3 0xff | ||
655 | #define ENET_INIT_PARAM_MAGIC_RES_INIT4 0x00 | ||
656 | #define ENET_INIT_PARAM_MAGIC_RES_INIT5 0x0400 | ||
657 | |||
658 | /* structure representing 82xx Address Filtering Enet Address in PRAM */ | ||
659 | struct ucc_geth_82xx_enet_address { | ||
660 | u8 res1[0x2]; | ||
661 | u16 h; /* address (MSB) */ | ||
662 | u16 m; /* address */ | ||
663 | u16 l; /* address (LSB) */ | ||
664 | } __packed; | ||
665 | |||
666 | /* structure representing 82xx Address Filtering PRAM */ | ||
667 | struct ucc_geth_82xx_address_filtering_pram { | ||
668 | u32 iaddr_h; /* individual address filter, high */ | ||
669 | u32 iaddr_l; /* individual address filter, low */ | ||
670 | u32 gaddr_h; /* group address filter, high */ | ||
671 | u32 gaddr_l; /* group address filter, low */ | ||
672 | struct ucc_geth_82xx_enet_address __iomem taddr; | ||
673 | struct ucc_geth_82xx_enet_address __iomem paddr[NUM_OF_PADDRS]; | ||
674 | u8 res0[0x40 - 0x38]; | ||
675 | } __packed; | ||
676 | |||
677 | /* GETH Tx firmware statistics structure, used when calling | ||
678 | UCC_GETH_GetStatistics. */ | ||
679 | struct ucc_geth_tx_firmware_statistics { | ||
680 | u32 sicoltx; /* single collision */ | ||
681 | u32 mulcoltx; /* multiple collision */ | ||
682 | u32 latecoltxfr; /* late collision */ | ||
683 | u32 frabortduecol; /* frames aborted due to transmit collision */ | ||
684 | u32 frlostinmactxer; /* frames lost due to internal MAC error | ||
685 | transmission that are not counted on any | ||
686 | other counter */ | ||
687 | u32 carriersenseertx; /* carrier sense error */ | ||
688 | u32 frtxok; /* frames transmitted OK */ | ||
689 | u32 txfrexcessivedefer; /* frames with defferal time greater than | ||
690 | specified threshold */ | ||
691 | u32 txpkts256; /* total packets (including bad) between 256 | ||
692 | and 511 octets */ | ||
693 | u32 txpkts512; /* total packets (including bad) between 512 | ||
694 | and 1023 octets */ | ||
695 | u32 txpkts1024; /* total packets (including bad) between 1024 | ||
696 | and 1518 octets */ | ||
697 | u32 txpktsjumbo; /* total packets (including bad) between 1024 | ||
698 | and MAXLength octets */ | ||
699 | } __packed; | ||
700 | |||
701 | /* GETH Rx firmware statistics structure, used when calling | ||
702 | UCC_GETH_GetStatistics. */ | ||
703 | struct ucc_geth_rx_firmware_statistics { | ||
704 | u32 frrxfcser; /* frames with crc error */ | ||
705 | u32 fraligner; /* frames with alignment error */ | ||
706 | u32 inrangelenrxer; /* in range length error */ | ||
707 | u32 outrangelenrxer; /* out of range length error */ | ||
708 | u32 frtoolong; /* frame too long */ | ||
709 | u32 runt; /* runt */ | ||
710 | u32 verylongevent; /* very long event */ | ||
711 | u32 symbolerror; /* symbol error */ | ||
712 | u32 dropbsy; /* drop because of BD not ready */ | ||
713 | u8 res0[0x8]; | ||
714 | u32 mismatchdrop; /* drop because of MAC filtering (e.g. address | ||
715 | or type mismatch) */ | ||
716 | u32 underpkts; /* total frames less than 64 octets */ | ||
717 | u32 pkts256; /* total frames (including bad) between 256 and | ||
718 | 511 octets */ | ||
719 | u32 pkts512; /* total frames (including bad) between 512 and | ||
720 | 1023 octets */ | ||
721 | u32 pkts1024; /* total frames (including bad) between 1024 | ||
722 | and 1518 octets */ | ||
723 | u32 pktsjumbo; /* total frames (including bad) between 1024 | ||
724 | and MAXLength octets */ | ||
725 | u32 frlossinmacer; /* frames lost because of internal MAC error | ||
726 | that is not counted in any other counter */ | ||
727 | u32 pausefr; /* pause frames */ | ||
728 | u8 res1[0x4]; | ||
729 | u32 removevlan; /* total frames that had their VLAN tag removed | ||
730 | */ | ||
731 | u32 replacevlan; /* total frames that had their VLAN tag | ||
732 | replaced */ | ||
733 | u32 insertvlan; /* total frames that had their VLAN tag | ||
734 | inserted */ | ||
735 | } __packed; | ||
736 | |||
737 | /* GETH hardware statistics structure, used when calling | ||
738 | UCC_GETH_GetStatistics. */ | ||
739 | struct ucc_geth_hardware_statistics { | ||
740 | u32 tx64; /* Total number of frames (including bad | ||
741 | frames) transmitted that were exactly of the | ||
742 | minimal length (64 for un tagged, 68 for | ||
743 | tagged, or with length exactly equal to the | ||
744 | parameter MINLength */ | ||
745 | u32 tx127; /* Total number of frames (including bad | ||
746 | frames) transmitted that were between | ||
747 | MINLength (Including FCS length==4) and 127 | ||
748 | octets */ | ||
749 | u32 tx255; /* Total number of frames (including bad | ||
750 | frames) transmitted that were between 128 | ||
751 | (Including FCS length==4) and 255 octets */ | ||
752 | u32 rx64; /* Total number of frames received including | ||
753 | bad frames that were exactly of the mninimal | ||
754 | length (64 bytes) */ | ||
755 | u32 rx127; /* Total number of frames (including bad | ||
756 | frames) received that were between MINLength | ||
757 | (Including FCS length==4) and 127 octets */ | ||
758 | u32 rx255; /* Total number of frames (including bad | ||
759 | frames) received that were between 128 | ||
760 | (Including FCS length==4) and 255 octets */ | ||
761 | u32 txok; /* Total number of octets residing in frames | ||
762 | that where involved in successful | ||
763 | transmission */ | ||
764 | u16 txcf; /* Total number of PAUSE control frames | ||
765 | transmitted by this MAC */ | ||
766 | u32 tmca; /* Total number of frames that were transmitted | ||
767 | successfully with the group address bit set | ||
768 | that are not broadcast frames */ | ||
769 | u32 tbca; /* Total number of frames transmitted | ||
770 | successfully that had destination address | ||
771 | field equal to the broadcast address */ | ||
772 | u32 rxfok; /* Total number of frames received OK */ | ||
773 | u32 rxbok; /* Total number of octets received OK */ | ||
774 | u32 rbyt; /* Total number of octets received including | ||
775 | octets in bad frames. Must be implemented in | ||
776 | HW because it includes octets in frames that | ||
777 | never even reach the UCC */ | ||
778 | u32 rmca; /* Total number of frames that were received | ||
779 | successfully with the group address bit set | ||
780 | that are not broadcast frames */ | ||
781 | u32 rbca; /* Total number of frames received successfully | ||
782 | that had destination address equal to the | ||
783 | broadcast address */ | ||
784 | } __packed; | ||
785 | |||
786 | /* UCC GETH Tx errors returned via TxConf callback */ | ||
787 | #define TX_ERRORS_DEF 0x0200 | ||
788 | #define TX_ERRORS_EXDEF 0x0100 | ||
789 | #define TX_ERRORS_LC 0x0080 | ||
790 | #define TX_ERRORS_RL 0x0040 | ||
791 | #define TX_ERRORS_RC_MASK 0x003C | ||
792 | #define TX_ERRORS_RC_SHIFT 2 | ||
793 | #define TX_ERRORS_UN 0x0002 | ||
794 | #define TX_ERRORS_CSL 0x0001 | ||
795 | |||
796 | /* UCC GETH Rx errors returned via RxStore callback */ | ||
797 | #define RX_ERRORS_CMR 0x0200 | ||
798 | #define RX_ERRORS_M 0x0100 | ||
799 | #define RX_ERRORS_BC 0x0080 | ||
800 | #define RX_ERRORS_MC 0x0040 | ||
801 | |||
802 | /* Transmit BD. These are in addition to values defined in uccf. */ | ||
803 | #define T_VID 0x003c0000 /* insert VLAN id index mask. */ | ||
804 | #define T_DEF (((u32) TX_ERRORS_DEF ) << 16) | ||
805 | #define T_EXDEF (((u32) TX_ERRORS_EXDEF ) << 16) | ||
806 | #define T_LC (((u32) TX_ERRORS_LC ) << 16) | ||
807 | #define T_RL (((u32) TX_ERRORS_RL ) << 16) | ||
808 | #define T_RC_MASK (((u32) TX_ERRORS_RC_MASK ) << 16) | ||
809 | #define T_UN (((u32) TX_ERRORS_UN ) << 16) | ||
810 | #define T_CSL (((u32) TX_ERRORS_CSL ) << 16) | ||
811 | #define T_ERRORS_REPORT (T_DEF | T_EXDEF | T_LC | T_RL | T_RC_MASK \ | ||
812 | | T_UN | T_CSL) /* transmit errors to report */ | ||
813 | |||
814 | /* Receive BD. These are in addition to values defined in uccf. */ | ||
815 | #define R_LG 0x00200000 /* Frame length violation. */ | ||
816 | #define R_NO 0x00100000 /* Non-octet aligned frame. */ | ||
817 | #define R_SH 0x00080000 /* Short frame. */ | ||
818 | #define R_CR 0x00040000 /* CRC error. */ | ||
819 | #define R_OV 0x00020000 /* Overrun. */ | ||
820 | #define R_IPCH 0x00010000 /* IP checksum check failed. */ | ||
821 | #define R_CMR (((u32) RX_ERRORS_CMR ) << 16) | ||
822 | #define R_M (((u32) RX_ERRORS_M ) << 16) | ||
823 | #define R_BC (((u32) RX_ERRORS_BC ) << 16) | ||
824 | #define R_MC (((u32) RX_ERRORS_MC ) << 16) | ||
825 | #define R_ERRORS_REPORT (R_CMR | R_M | R_BC | R_MC) /* receive errors to | ||
826 | report */ | ||
827 | #define R_ERRORS_FATAL (R_LG | R_NO | R_SH | R_CR | \ | ||
828 | R_OV | R_IPCH) /* receive errors to discard */ | ||
829 | |||
830 | /* Alignments */ | ||
831 | #define UCC_GETH_RX_GLOBAL_PRAM_ALIGNMENT 256 | ||
832 | #define UCC_GETH_TX_GLOBAL_PRAM_ALIGNMENT 128 | ||
833 | #define UCC_GETH_THREAD_RX_PRAM_ALIGNMENT 128 | ||
834 | #define UCC_GETH_THREAD_TX_PRAM_ALIGNMENT 64 | ||
835 | #define UCC_GETH_THREAD_DATA_ALIGNMENT 256 /* spec gives values | ||
836 | based on num of | ||
837 | threads, but always | ||
838 | using the maximum is | ||
839 | easier */ | ||
840 | #define UCC_GETH_SEND_QUEUE_QUEUE_DESCRIPTOR_ALIGNMENT 32 | ||
841 | #define UCC_GETH_SCHEDULER_ALIGNMENT 8 /* This is a guess */ | ||
842 | #define UCC_GETH_TX_STATISTICS_ALIGNMENT 4 /* This is a guess */ | ||
843 | #define UCC_GETH_RX_STATISTICS_ALIGNMENT 4 /* This is a guess */ | ||
844 | #define UCC_GETH_RX_INTERRUPT_COALESCING_ALIGNMENT 64 | ||
845 | #define UCC_GETH_RX_BD_QUEUES_ALIGNMENT 8 /* This is a guess */ | ||
846 | #define UCC_GETH_RX_PREFETCHED_BDS_ALIGNMENT 128 /* This is a guess */ | ||
847 | #define UCC_GETH_RX_EXTENDED_FILTERING_GLOBAL_PARAMETERS_ALIGNMENT 8 /* This | ||
848 | is a | ||
849 | guess | ||
850 | */ | ||
851 | #define UCC_GETH_RX_BD_RING_ALIGNMENT 32 | ||
852 | #define UCC_GETH_TX_BD_RING_ALIGNMENT 32 | ||
853 | #define UCC_GETH_MRBLR_ALIGNMENT 128 | ||
854 | #define UCC_GETH_RX_BD_RING_SIZE_ALIGNMENT 4 | ||
855 | #define UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT 32 | ||
856 | #define UCC_GETH_RX_DATA_BUF_ALIGNMENT 64 | ||
857 | |||
858 | #define UCC_GETH_TAD_EF 0x80 | ||
859 | #define UCC_GETH_TAD_V 0x40 | ||
860 | #define UCC_GETH_TAD_REJ 0x20 | ||
861 | #define UCC_GETH_TAD_VTAG_OP_RIGHT_SHIFT 2 | ||
862 | #define UCC_GETH_TAD_VTAG_OP_SHIFT 6 | ||
863 | #define UCC_GETH_TAD_V_NON_VTAG_OP 0x20 | ||
864 | #define UCC_GETH_TAD_RQOS_SHIFT 0 | ||
865 | #define UCC_GETH_TAD_V_PRIORITY_SHIFT 5 | ||
866 | #define UCC_GETH_TAD_CFI 0x10 | ||
867 | |||
868 | #define UCC_GETH_VLAN_PRIORITY_MAX 8 | ||
869 | #define UCC_GETH_IP_PRIORITY_MAX 64 | ||
870 | #define UCC_GETH_TX_VTAG_TABLE_ENTRY_MAX 8 | ||
871 | #define UCC_GETH_RX_BD_RING_SIZE_MIN 8 | ||
872 | #define UCC_GETH_TX_BD_RING_SIZE_MIN 2 | ||
873 | #define UCC_GETH_BD_RING_SIZE_MAX 0xffff | ||
874 | |||
875 | #define UCC_GETH_SIZE_OF_BD QE_SIZEOF_BD | ||
876 | |||
877 | /* Driver definitions */ | ||
878 | #define TX_BD_RING_LEN 0x10 | ||
879 | #define RX_BD_RING_LEN 0x10 | ||
880 | |||
881 | #define TX_RING_MOD_MASK(size) (size-1) | ||
882 | #define RX_RING_MOD_MASK(size) (size-1) | ||
883 | |||
884 | #define ENET_NUM_OCTETS_PER_ADDRESS 6 | ||
885 | #define ENET_GROUP_ADDR 0x01 /* Group address mask | ||
886 | for ethernet | ||
887 | addresses */ | ||
888 | |||
889 | #define TX_TIMEOUT (1*HZ) | ||
890 | #define SKB_ALLOC_TIMEOUT 100000 | ||
891 | #define PHY_INIT_TIMEOUT 100000 | ||
892 | #define PHY_CHANGE_TIME 2 | ||
893 | |||
894 | /* Fast Ethernet (10/100 Mbps) */ | ||
895 | #define UCC_GETH_URFS_INIT 512 /* Rx virtual FIFO size | ||
896 | */ | ||
897 | #define UCC_GETH_URFET_INIT 256 /* 1/2 urfs */ | ||
898 | #define UCC_GETH_URFSET_INIT 384 /* 3/4 urfs */ | ||
899 | #define UCC_GETH_UTFS_INIT 512 /* Tx virtual FIFO size | ||
900 | */ | ||
901 | #define UCC_GETH_UTFET_INIT 256 /* 1/2 utfs */ | ||
902 | #define UCC_GETH_UTFTT_INIT 256 /* 1/2 utfs | ||
903 | due to errata */ | ||
904 | /* Gigabit Ethernet (1000 Mbps) */ | ||
905 | #define UCC_GETH_URFS_GIGA_INIT 4096/*2048*/ /* Rx virtual | ||
906 | FIFO size */ | ||
907 | #define UCC_GETH_URFET_GIGA_INIT 2048/*1024*/ /* 1/2 urfs */ | ||
908 | #define UCC_GETH_URFSET_GIGA_INIT 3072/*1536*/ /* 3/4 urfs */ | ||
909 | #define UCC_GETH_UTFS_GIGA_INIT 4096/*2048*/ /* Tx virtual | ||
910 | FIFO size */ | ||
911 | #define UCC_GETH_UTFET_GIGA_INIT 2048/*1024*/ /* 1/2 utfs */ | ||
912 | #define UCC_GETH_UTFTT_GIGA_INIT 4096/*0x40*/ /* Tx virtual | ||
913 | FIFO size */ | ||
914 | |||
915 | #define UCC_GETH_REMODER_INIT 0 /* bits that must be | ||
916 | set */ | ||
917 | #define UCC_GETH_TEMODER_INIT 0xC000 /* bits that must */ | ||
918 | |||
919 | /* Initial value for UPSMR */ | ||
920 | #define UCC_GETH_UPSMR_INIT UCC_GETH_UPSMR_RES1 | ||
921 | |||
922 | #define UCC_GETH_MACCFG1_INIT 0 | ||
923 | #define UCC_GETH_MACCFG2_INIT (MACCFG2_RESERVED_1) | ||
924 | |||
925 | /* Ethernet Address Type. */ | ||
926 | enum enet_addr_type { | ||
927 | ENET_ADDR_TYPE_INDIVIDUAL, | ||
928 | ENET_ADDR_TYPE_GROUP, | ||
929 | ENET_ADDR_TYPE_BROADCAST | ||
930 | }; | ||
931 | |||
932 | /* UCC GETH 82xx Ethernet Address Recognition Location */ | ||
933 | enum ucc_geth_enet_address_recognition_location { | ||
934 | UCC_GETH_ENET_ADDRESS_RECOGNITION_LOCATION_STATION_ADDRESS,/* station | ||
935 | address */ | ||
936 | UCC_GETH_ENET_ADDRESS_RECOGNITION_LOCATION_PADDR_FIRST, /* additional | ||
937 | station | ||
938 | address | ||
939 | paddr1 */ | ||
940 | UCC_GETH_ENET_ADDRESS_RECOGNITION_LOCATION_PADDR2, /* additional | ||
941 | station | ||
942 | address | ||
943 | paddr2 */ | ||
944 | UCC_GETH_ENET_ADDRESS_RECOGNITION_LOCATION_PADDR3, /* additional | ||
945 | station | ||
946 | address | ||
947 | paddr3 */ | ||
948 | UCC_GETH_ENET_ADDRESS_RECOGNITION_LOCATION_PADDR_LAST, /* additional | ||
949 | station | ||
950 | address | ||
951 | paddr4 */ | ||
952 | UCC_GETH_ENET_ADDRESS_RECOGNITION_LOCATION_GROUP_HASH, /* group hash */ | ||
953 | UCC_GETH_ENET_ADDRESS_RECOGNITION_LOCATION_INDIVIDUAL_HASH /* individual | ||
954 | hash */ | ||
955 | }; | ||
956 | |||
957 | /* UCC GETH vlan operation tagged */ | ||
958 | enum ucc_geth_vlan_operation_tagged { | ||
959 | UCC_GETH_VLAN_OPERATION_TAGGED_NOP = 0x0, /* Tagged - nop */ | ||
960 | UCC_GETH_VLAN_OPERATION_TAGGED_REPLACE_VID_PORTION_OF_Q_TAG | ||
961 | = 0x1, /* Tagged - replace vid portion of q tag */ | ||
962 | UCC_GETH_VLAN_OPERATION_TAGGED_IF_VID0_REPLACE_VID_WITH_DEFAULT_VALUE | ||
963 | = 0x2, /* Tagged - if vid0 replace vid with default value */ | ||
964 | UCC_GETH_VLAN_OPERATION_TAGGED_EXTRACT_Q_TAG_FROM_FRAME | ||
965 | = 0x3 /* Tagged - extract q tag from frame */ | ||
966 | }; | ||
967 | |||
968 | /* UCC GETH vlan operation non-tagged */ | ||
969 | enum ucc_geth_vlan_operation_non_tagged { | ||
970 | UCC_GETH_VLAN_OPERATION_NON_TAGGED_NOP = 0x0, /* Non tagged - nop */ | ||
971 | UCC_GETH_VLAN_OPERATION_NON_TAGGED_Q_TAG_INSERT = 0x1 /* Non tagged - | ||
972 | q tag insert | ||
973 | */ | ||
974 | }; | ||
975 | |||
976 | /* UCC GETH Rx Quality of Service Mode */ | ||
977 | enum ucc_geth_qos_mode { | ||
978 | UCC_GETH_QOS_MODE_DEFAULT = 0x0, /* default queue */ | ||
979 | UCC_GETH_QOS_MODE_QUEUE_NUM_FROM_L2_CRITERIA = 0x1, /* queue | ||
980 | determined | ||
981 | by L2 | ||
982 | criteria */ | ||
983 | UCC_GETH_QOS_MODE_QUEUE_NUM_FROM_L3_CRITERIA = 0x2 /* queue | ||
984 | determined | ||
985 | by L3 | ||
986 | criteria */ | ||
987 | }; | ||
988 | |||
989 | /* UCC GETH Statistics Gathering Mode - These are bit flags, 'or' them together | ||
990 | for combined functionality */ | ||
991 | enum ucc_geth_statistics_gathering_mode { | ||
992 | UCC_GETH_STATISTICS_GATHERING_MODE_NONE = 0x00000000, /* No | ||
993 | statistics | ||
994 | gathering */ | ||
995 | UCC_GETH_STATISTICS_GATHERING_MODE_HARDWARE = 0x00000001,/* Enable | ||
996 | hardware | ||
997 | statistics | ||
998 | gathering | ||
999 | */ | ||
1000 | UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_TX = 0x00000004,/*Enable | ||
1001 | firmware | ||
1002 | tx | ||
1003 | statistics | ||
1004 | gathering | ||
1005 | */ | ||
1006 | UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_RX = 0x00000008/* Enable | ||
1007 | firmware | ||
1008 | rx | ||
1009 | statistics | ||
1010 | gathering | ||
1011 | */ | ||
1012 | }; | ||
1013 | |||
1014 | /* UCC GETH Pad and CRC Mode - Note, Padding without CRC is not possible */ | ||
1015 | enum ucc_geth_maccfg2_pad_and_crc_mode { | ||
1016 | UCC_GETH_PAD_AND_CRC_MODE_NONE | ||
1017 | = MACCFG2_PAD_AND_CRC_MODE_NONE, /* Neither Padding | ||
1018 | short frames | ||
1019 | nor CRC */ | ||
1020 | UCC_GETH_PAD_AND_CRC_MODE_CRC_ONLY | ||
1021 | = MACCFG2_PAD_AND_CRC_MODE_CRC_ONLY, /* Append | ||
1022 | CRC only */ | ||
1023 | UCC_GETH_PAD_AND_CRC_MODE_PAD_AND_CRC = | ||
1024 | MACCFG2_PAD_AND_CRC_MODE_PAD_AND_CRC | ||
1025 | }; | ||
1026 | |||
1027 | /* UCC GETH upsmr Flow Control Mode */ | ||
1028 | enum ucc_geth_flow_control_mode { | ||
1029 | UPSMR_AUTOMATIC_FLOW_CONTROL_MODE_NONE = 0x00000000, /* No automatic | ||
1030 | flow control | ||
1031 | */ | ||
1032 | UPSMR_AUTOMATIC_FLOW_CONTROL_MODE_PAUSE_WHEN_EMERGENCY | ||
1033 | = 0x00004000 /* Send pause frame when RxFIFO reaches its | ||
1034 | emergency threshold */ | ||
1035 | }; | ||
1036 | |||
1037 | /* UCC GETH number of threads */ | ||
1038 | enum ucc_geth_num_of_threads { | ||
1039 | UCC_GETH_NUM_OF_THREADS_1 = 0x1, /* 1 */ | ||
1040 | UCC_GETH_NUM_OF_THREADS_2 = 0x2, /* 2 */ | ||
1041 | UCC_GETH_NUM_OF_THREADS_4 = 0x0, /* 4 */ | ||
1042 | UCC_GETH_NUM_OF_THREADS_6 = 0x3, /* 6 */ | ||
1043 | UCC_GETH_NUM_OF_THREADS_8 = 0x4 /* 8 */ | ||
1044 | }; | ||
1045 | |||
1046 | /* UCC GETH number of station addresses */ | ||
1047 | enum ucc_geth_num_of_station_addresses { | ||
1048 | UCC_GETH_NUM_OF_STATION_ADDRESSES_1, /* 1 */ | ||
1049 | UCC_GETH_NUM_OF_STATION_ADDRESSES_5 /* 5 */ | ||
1050 | }; | ||
1051 | |||
1052 | /* UCC GETH 82xx Ethernet Address Container */ | ||
1053 | struct enet_addr_container { | ||
1054 | u8 address[ENET_NUM_OCTETS_PER_ADDRESS]; /* ethernet address */ | ||
1055 | enum ucc_geth_enet_address_recognition_location location; /* location in | ||
1056 | 82xx address | ||
1057 | recognition | ||
1058 | hardware */ | ||
1059 | struct list_head node; | ||
1060 | }; | ||
1061 | |||
1062 | #define ENET_ADDR_CONT_ENTRY(ptr) list_entry(ptr, struct enet_addr_container, node) | ||
1063 | |||
1064 | /* UCC GETH Termination Action Descriptor (TAD) structure. */ | ||
1065 | struct ucc_geth_tad_params { | ||
1066 | int rx_non_dynamic_extended_features_mode; | ||
1067 | int reject_frame; | ||
1068 | enum ucc_geth_vlan_operation_tagged vtag_op; | ||
1069 | enum ucc_geth_vlan_operation_non_tagged vnontag_op; | ||
1070 | enum ucc_geth_qos_mode rqos; | ||
1071 | u8 vpri; | ||
1072 | u16 vid; | ||
1073 | }; | ||
1074 | |||
1075 | /* GETH protocol initialization structure */ | ||
1076 | struct ucc_geth_info { | ||
1077 | struct ucc_fast_info uf_info; | ||
1078 | u8 numQueuesTx; | ||
1079 | u8 numQueuesRx; | ||
1080 | int ipCheckSumCheck; | ||
1081 | int ipCheckSumGenerate; | ||
1082 | int rxExtendedFiltering; | ||
1083 | u32 extendedFilteringChainPointer; | ||
1084 | u16 typeorlen; | ||
1085 | int dynamicMaxFrameLength; | ||
1086 | int dynamicMinFrameLength; | ||
1087 | u8 nonBackToBackIfgPart1; | ||
1088 | u8 nonBackToBackIfgPart2; | ||
1089 | u8 miminumInterFrameGapEnforcement; | ||
1090 | u8 backToBackInterFrameGap; | ||
1091 | int ipAddressAlignment; | ||
1092 | int lengthCheckRx; | ||
1093 | u32 mblinterval; | ||
1094 | u16 nortsrbytetime; | ||
1095 | u8 fracsiz; | ||
1096 | u8 strictpriorityq; | ||
1097 | u8 txasap; | ||
1098 | u8 extrabw; | ||
1099 | int miiPreambleSupress; | ||
1100 | u8 altBebTruncation; | ||
1101 | int altBeb; | ||
1102 | int backPressureNoBackoff; | ||
1103 | int noBackoff; | ||
1104 | int excessDefer; | ||
1105 | u8 maxRetransmission; | ||
1106 | u8 collisionWindow; | ||
1107 | int pro; | ||
1108 | int cap; | ||
1109 | int rsh; | ||
1110 | int rlpb; | ||
1111 | int cam; | ||
1112 | int bro; | ||
1113 | int ecm; | ||
1114 | int receiveFlowControl; | ||
1115 | int transmitFlowControl; | ||
1116 | u8 maxGroupAddrInHash; | ||
1117 | u8 maxIndAddrInHash; | ||
1118 | u8 prel; | ||
1119 | u16 maxFrameLength; | ||
1120 | u16 minFrameLength; | ||
1121 | u16 maxD1Length; | ||
1122 | u16 maxD2Length; | ||
1123 | u16 vlantype; | ||
1124 | u16 vlantci; | ||
1125 | u32 ecamptr; | ||
1126 | u32 eventRegMask; | ||
1127 | u16 pausePeriod; | ||
1128 | u16 extensionField; | ||
1129 | struct device_node *phy_node; | ||
1130 | struct device_node *tbi_node; | ||
1131 | u8 weightfactor[NUM_TX_QUEUES]; | ||
1132 | u8 interruptcoalescingmaxvalue[NUM_RX_QUEUES]; | ||
1133 | u8 l2qt[UCC_GETH_VLAN_PRIORITY_MAX]; | ||
1134 | u8 l3qt[UCC_GETH_IP_PRIORITY_MAX]; | ||
1135 | u32 vtagtable[UCC_GETH_TX_VTAG_TABLE_ENTRY_MAX]; | ||
1136 | u8 iphoffset[TX_IP_OFFSET_ENTRY_MAX]; | ||
1137 | u16 bdRingLenTx[NUM_TX_QUEUES]; | ||
1138 | u16 bdRingLenRx[NUM_RX_QUEUES]; | ||
1139 | enum ucc_geth_num_of_station_addresses numStationAddresses; | ||
1140 | enum qe_fltr_largest_external_tbl_lookup_key_size | ||
1141 | largestexternallookupkeysize; | ||
1142 | enum ucc_geth_statistics_gathering_mode statisticsMode; | ||
1143 | enum ucc_geth_vlan_operation_tagged vlanOperationTagged; | ||
1144 | enum ucc_geth_vlan_operation_non_tagged vlanOperationNonTagged; | ||
1145 | enum ucc_geth_qos_mode rxQoSMode; | ||
1146 | enum ucc_geth_flow_control_mode aufc; | ||
1147 | enum ucc_geth_maccfg2_pad_and_crc_mode padAndCrc; | ||
1148 | enum ucc_geth_num_of_threads numThreadsTx; | ||
1149 | enum ucc_geth_num_of_threads numThreadsRx; | ||
1150 | unsigned int riscTx; | ||
1151 | unsigned int riscRx; | ||
1152 | }; | ||
1153 | |||
1154 | /* structure representing UCC GETH */ | ||
1155 | struct ucc_geth_private { | ||
1156 | struct ucc_geth_info *ug_info; | ||
1157 | struct ucc_fast_private *uccf; | ||
1158 | struct device *dev; | ||
1159 | struct net_device *ndev; | ||
1160 | struct napi_struct napi; | ||
1161 | struct work_struct timeout_work; | ||
1162 | struct ucc_geth __iomem *ug_regs; | ||
1163 | struct ucc_geth_init_pram *p_init_enet_param_shadow; | ||
1164 | struct ucc_geth_exf_global_pram __iomem *p_exf_glbl_param; | ||
1165 | u32 exf_glbl_param_offset; | ||
1166 | struct ucc_geth_rx_global_pram __iomem *p_rx_glbl_pram; | ||
1167 | u32 rx_glbl_pram_offset; | ||
1168 | struct ucc_geth_tx_global_pram __iomem *p_tx_glbl_pram; | ||
1169 | u32 tx_glbl_pram_offset; | ||
1170 | struct ucc_geth_send_queue_mem_region __iomem *p_send_q_mem_reg; | ||
1171 | u32 send_q_mem_reg_offset; | ||
1172 | struct ucc_geth_thread_data_tx __iomem *p_thread_data_tx; | ||
1173 | u32 thread_dat_tx_offset; | ||
1174 | struct ucc_geth_thread_data_rx __iomem *p_thread_data_rx; | ||
1175 | u32 thread_dat_rx_offset; | ||
1176 | struct ucc_geth_scheduler __iomem *p_scheduler; | ||
1177 | u32 scheduler_offset; | ||
1178 | struct ucc_geth_tx_firmware_statistics_pram __iomem *p_tx_fw_statistics_pram; | ||
1179 | u32 tx_fw_statistics_pram_offset; | ||
1180 | struct ucc_geth_rx_firmware_statistics_pram __iomem *p_rx_fw_statistics_pram; | ||
1181 | u32 rx_fw_statistics_pram_offset; | ||
1182 | struct ucc_geth_rx_interrupt_coalescing_table __iomem *p_rx_irq_coalescing_tbl; | ||
1183 | u32 rx_irq_coalescing_tbl_offset; | ||
1184 | struct ucc_geth_rx_bd_queues_entry __iomem *p_rx_bd_qs_tbl; | ||
1185 | u32 rx_bd_qs_tbl_offset; | ||
1186 | u8 __iomem *p_tx_bd_ring[NUM_TX_QUEUES]; | ||
1187 | u32 tx_bd_ring_offset[NUM_TX_QUEUES]; | ||
1188 | u8 __iomem *p_rx_bd_ring[NUM_RX_QUEUES]; | ||
1189 | u32 rx_bd_ring_offset[NUM_RX_QUEUES]; | ||
1190 | u8 __iomem *confBd[NUM_TX_QUEUES]; | ||
1191 | u8 __iomem *txBd[NUM_TX_QUEUES]; | ||
1192 | u8 __iomem *rxBd[NUM_RX_QUEUES]; | ||
1193 | int badFrame[NUM_RX_QUEUES]; | ||
1194 | u16 cpucount[NUM_TX_QUEUES]; | ||
1195 | u16 __iomem *p_cpucount[NUM_TX_QUEUES]; | ||
1196 | int indAddrRegUsed[NUM_OF_PADDRS]; | ||
1197 | u8 paddr[NUM_OF_PADDRS][ENET_NUM_OCTETS_PER_ADDRESS]; /* ethernet address */ | ||
1198 | u8 numGroupAddrInHash; | ||
1199 | u8 numIndAddrInHash; | ||
1200 | u8 numIndAddrInReg; | ||
1201 | int rx_extended_features; | ||
1202 | int rx_non_dynamic_extended_features; | ||
1203 | struct list_head conf_skbs; | ||
1204 | struct list_head group_hash_q; | ||
1205 | struct list_head ind_hash_q; | ||
1206 | u32 saved_uccm; | ||
1207 | spinlock_t lock; | ||
1208 | /* pointers to arrays of skbuffs for tx and rx */ | ||
1209 | struct sk_buff **tx_skbuff[NUM_TX_QUEUES]; | ||
1210 | struct sk_buff **rx_skbuff[NUM_RX_QUEUES]; | ||
1211 | /* indices pointing to the next free sbk in skb arrays */ | ||
1212 | u16 skb_curtx[NUM_TX_QUEUES]; | ||
1213 | u16 skb_currx[NUM_RX_QUEUES]; | ||
1214 | /* index of the first skb which hasn't been transmitted yet. */ | ||
1215 | u16 skb_dirtytx[NUM_TX_QUEUES]; | ||
1216 | |||
1217 | struct sk_buff_head rx_recycle; | ||
1218 | |||
1219 | struct ugeth_mii_info *mii_info; | ||
1220 | struct phy_device *phydev; | ||
1221 | phy_interface_t phy_interface; | ||
1222 | int max_speed; | ||
1223 | uint32_t msg_enable; | ||
1224 | int oldspeed; | ||
1225 | int oldduplex; | ||
1226 | int oldlink; | ||
1227 | int wol_en; | ||
1228 | |||
1229 | struct device_node *node; | ||
1230 | }; | ||
1231 | |||
1232 | void uec_set_ethtool_ops(struct net_device *netdev); | ||
1233 | int init_flow_control_params(u32 automatic_flow_control_mode, | ||
1234 | int rx_flow_control_enable, int tx_flow_control_enable, | ||
1235 | u16 pause_period, u16 extension_field, | ||
1236 | u32 __iomem *upsmr_register, u32 __iomem *uempr_register, | ||
1237 | u32 __iomem *maccfg1_register); | ||
1238 | |||
1239 | |||
1240 | #endif /* __UCC_GETH_H__ */ | ||
diff --git a/drivers/net/ethernet/freescale/ucc_geth_ethtool.c b/drivers/net/ethernet/freescale/ucc_geth_ethtool.c new file mode 100644 index 000000000000..a97257f91a3d --- /dev/null +++ b/drivers/net/ethernet/freescale/ucc_geth_ethtool.c | |||
@@ -0,0 +1,423 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2007 Freescale Semiconductor, Inc. All rights reserved. | ||
3 | * | ||
4 | * Description: QE UCC Gigabit Ethernet Ethtool API Set | ||
5 | * | ||
6 | * Author: Li Yang <leoli@freescale.com> | ||
7 | * | ||
8 | * Limitation: | ||
9 | * Can only get/set settings of the first queue. | ||
10 | * Need to re-open the interface manually after changing some parameters. | ||
11 | * | ||
12 | * This program is free software; you can redistribute it and/or modify it | ||
13 | * under the terms of the GNU General Public License as published by the | ||
14 | * Free Software Foundation; either version 2 of the License, or (at your | ||
15 | * option) any later version. | ||
16 | */ | ||
17 | |||
18 | #include <linux/kernel.h> | ||
19 | #include <linux/init.h> | ||
20 | #include <linux/errno.h> | ||
21 | #include <linux/stddef.h> | ||
22 | #include <linux/interrupt.h> | ||
23 | #include <linux/netdevice.h> | ||
24 | #include <linux/etherdevice.h> | ||
25 | #include <linux/skbuff.h> | ||
26 | #include <linux/spinlock.h> | ||
27 | #include <linux/mm.h> | ||
28 | #include <linux/delay.h> | ||
29 | #include <linux/dma-mapping.h> | ||
30 | #include <linux/ethtool.h> | ||
31 | #include <linux/mii.h> | ||
32 | #include <linux/phy.h> | ||
33 | |||
34 | #include <asm/io.h> | ||
35 | #include <asm/irq.h> | ||
36 | #include <asm/uaccess.h> | ||
37 | #include <asm/types.h> | ||
38 | |||
39 | #include "ucc_geth.h" | ||
40 | |||
41 | static char hw_stat_gstrings[][ETH_GSTRING_LEN] = { | ||
42 | "tx-64-frames", | ||
43 | "tx-65-127-frames", | ||
44 | "tx-128-255-frames", | ||
45 | "rx-64-frames", | ||
46 | "rx-65-127-frames", | ||
47 | "rx-128-255-frames", | ||
48 | "tx-bytes-ok", | ||
49 | "tx-pause-frames", | ||
50 | "tx-multicast-frames", | ||
51 | "tx-broadcast-frames", | ||
52 | "rx-frames", | ||
53 | "rx-bytes-ok", | ||
54 | "rx-bytes-all", | ||
55 | "rx-multicast-frames", | ||
56 | "rx-broadcast-frames", | ||
57 | "stats-counter-carry", | ||
58 | "stats-counter-mask", | ||
59 | "rx-dropped-frames", | ||
60 | }; | ||
61 | |||
62 | static char tx_fw_stat_gstrings[][ETH_GSTRING_LEN] = { | ||
63 | "tx-single-collision", | ||
64 | "tx-multiple-collision", | ||
65 | "tx-late-collsion", | ||
66 | "tx-aborted-frames", | ||
67 | "tx-lost-frames", | ||
68 | "tx-carrier-sense-errors", | ||
69 | "tx-frames-ok", | ||
70 | "tx-excessive-differ-frames", | ||
71 | "tx-256-511-frames", | ||
72 | "tx-512-1023-frames", | ||
73 | "tx-1024-1518-frames", | ||
74 | "tx-jumbo-frames", | ||
75 | }; | ||
76 | |||
77 | static char rx_fw_stat_gstrings[][ETH_GSTRING_LEN] = { | ||
78 | "rx-crc-errors", | ||
79 | "rx-alignment-errors", | ||
80 | "rx-in-range-length-errors", | ||
81 | "rx-out-of-range-length-errors", | ||
82 | "rx-too-long-frames", | ||
83 | "rx-runt", | ||
84 | "rx-very-long-event", | ||
85 | "rx-symbol-errors", | ||
86 | "rx-busy-drop-frames", | ||
87 | "reserved", | ||
88 | "reserved", | ||
89 | "rx-mismatch-drop-frames", | ||
90 | "rx-small-than-64", | ||
91 | "rx-256-511-frames", | ||
92 | "rx-512-1023-frames", | ||
93 | "rx-1024-1518-frames", | ||
94 | "rx-jumbo-frames", | ||
95 | "rx-mac-error-loss", | ||
96 | "rx-pause-frames", | ||
97 | "reserved", | ||
98 | "rx-vlan-removed", | ||
99 | "rx-vlan-replaced", | ||
100 | "rx-vlan-inserted", | ||
101 | "rx-ip-checksum-errors", | ||
102 | }; | ||
103 | |||
104 | #define UEC_HW_STATS_LEN ARRAY_SIZE(hw_stat_gstrings) | ||
105 | #define UEC_TX_FW_STATS_LEN ARRAY_SIZE(tx_fw_stat_gstrings) | ||
106 | #define UEC_RX_FW_STATS_LEN ARRAY_SIZE(rx_fw_stat_gstrings) | ||
107 | |||
108 | static int | ||
109 | uec_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) | ||
110 | { | ||
111 | struct ucc_geth_private *ugeth = netdev_priv(netdev); | ||
112 | struct phy_device *phydev = ugeth->phydev; | ||
113 | struct ucc_geth_info *ug_info = ugeth->ug_info; | ||
114 | |||
115 | if (!phydev) | ||
116 | return -ENODEV; | ||
117 | |||
118 | ecmd->maxtxpkt = 1; | ||
119 | ecmd->maxrxpkt = ug_info->interruptcoalescingmaxvalue[0]; | ||
120 | |||
121 | return phy_ethtool_gset(phydev, ecmd); | ||
122 | } | ||
123 | |||
124 | static int | ||
125 | uec_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) | ||
126 | { | ||
127 | struct ucc_geth_private *ugeth = netdev_priv(netdev); | ||
128 | struct phy_device *phydev = ugeth->phydev; | ||
129 | |||
130 | if (!phydev) | ||
131 | return -ENODEV; | ||
132 | |||
133 | return phy_ethtool_sset(phydev, ecmd); | ||
134 | } | ||
135 | |||
136 | static void | ||
137 | uec_get_pauseparam(struct net_device *netdev, | ||
138 | struct ethtool_pauseparam *pause) | ||
139 | { | ||
140 | struct ucc_geth_private *ugeth = netdev_priv(netdev); | ||
141 | |||
142 | pause->autoneg = ugeth->phydev->autoneg; | ||
143 | |||
144 | if (ugeth->ug_info->receiveFlowControl) | ||
145 | pause->rx_pause = 1; | ||
146 | if (ugeth->ug_info->transmitFlowControl) | ||
147 | pause->tx_pause = 1; | ||
148 | } | ||
149 | |||
150 | static int | ||
151 | uec_set_pauseparam(struct net_device *netdev, | ||
152 | struct ethtool_pauseparam *pause) | ||
153 | { | ||
154 | struct ucc_geth_private *ugeth = netdev_priv(netdev); | ||
155 | int ret = 0; | ||
156 | |||
157 | ugeth->ug_info->receiveFlowControl = pause->rx_pause; | ||
158 | ugeth->ug_info->transmitFlowControl = pause->tx_pause; | ||
159 | |||
160 | if (ugeth->phydev->autoneg) { | ||
161 | if (netif_running(netdev)) { | ||
162 | /* FIXME: automatically restart */ | ||
163 | printk(KERN_INFO | ||
164 | "Please re-open the interface.\n"); | ||
165 | } | ||
166 | } else { | ||
167 | struct ucc_geth_info *ug_info = ugeth->ug_info; | ||
168 | |||
169 | ret = init_flow_control_params(ug_info->aufc, | ||
170 | ug_info->receiveFlowControl, | ||
171 | ug_info->transmitFlowControl, | ||
172 | ug_info->pausePeriod, | ||
173 | ug_info->extensionField, | ||
174 | &ugeth->uccf->uf_regs->upsmr, | ||
175 | &ugeth->ug_regs->uempr, | ||
176 | &ugeth->ug_regs->maccfg1); | ||
177 | } | ||
178 | |||
179 | return ret; | ||
180 | } | ||
181 | |||
182 | static uint32_t | ||
183 | uec_get_msglevel(struct net_device *netdev) | ||
184 | { | ||
185 | struct ucc_geth_private *ugeth = netdev_priv(netdev); | ||
186 | return ugeth->msg_enable; | ||
187 | } | ||
188 | |||
189 | static void | ||
190 | uec_set_msglevel(struct net_device *netdev, uint32_t data) | ||
191 | { | ||
192 | struct ucc_geth_private *ugeth = netdev_priv(netdev); | ||
193 | ugeth->msg_enable = data; | ||
194 | } | ||
195 | |||
196 | static int | ||
197 | uec_get_regs_len(struct net_device *netdev) | ||
198 | { | ||
199 | return sizeof(struct ucc_geth); | ||
200 | } | ||
201 | |||
202 | static void | ||
203 | uec_get_regs(struct net_device *netdev, | ||
204 | struct ethtool_regs *regs, void *p) | ||
205 | { | ||
206 | int i; | ||
207 | struct ucc_geth_private *ugeth = netdev_priv(netdev); | ||
208 | u32 __iomem *ug_regs = (u32 __iomem *)ugeth->ug_regs; | ||
209 | u32 *buff = p; | ||
210 | |||
211 | for (i = 0; i < sizeof(struct ucc_geth) / sizeof(u32); i++) | ||
212 | buff[i] = in_be32(&ug_regs[i]); | ||
213 | } | ||
214 | |||
215 | static void | ||
216 | uec_get_ringparam(struct net_device *netdev, | ||
217 | struct ethtool_ringparam *ring) | ||
218 | { | ||
219 | struct ucc_geth_private *ugeth = netdev_priv(netdev); | ||
220 | struct ucc_geth_info *ug_info = ugeth->ug_info; | ||
221 | int queue = 0; | ||
222 | |||
223 | ring->rx_max_pending = UCC_GETH_BD_RING_SIZE_MAX; | ||
224 | ring->rx_mini_max_pending = UCC_GETH_BD_RING_SIZE_MAX; | ||
225 | ring->rx_jumbo_max_pending = UCC_GETH_BD_RING_SIZE_MAX; | ||
226 | ring->tx_max_pending = UCC_GETH_BD_RING_SIZE_MAX; | ||
227 | |||
228 | ring->rx_pending = ug_info->bdRingLenRx[queue]; | ||
229 | ring->rx_mini_pending = ug_info->bdRingLenRx[queue]; | ||
230 | ring->rx_jumbo_pending = ug_info->bdRingLenRx[queue]; | ||
231 | ring->tx_pending = ug_info->bdRingLenTx[queue]; | ||
232 | } | ||
233 | |||
234 | static int | ||
235 | uec_set_ringparam(struct net_device *netdev, | ||
236 | struct ethtool_ringparam *ring) | ||
237 | { | ||
238 | struct ucc_geth_private *ugeth = netdev_priv(netdev); | ||
239 | struct ucc_geth_info *ug_info = ugeth->ug_info; | ||
240 | int queue = 0, ret = 0; | ||
241 | |||
242 | if (ring->rx_pending < UCC_GETH_RX_BD_RING_SIZE_MIN) { | ||
243 | printk("%s: RxBD ring size must be no smaller than %d.\n", | ||
244 | netdev->name, UCC_GETH_RX_BD_RING_SIZE_MIN); | ||
245 | return -EINVAL; | ||
246 | } | ||
247 | if (ring->rx_pending % UCC_GETH_RX_BD_RING_SIZE_ALIGNMENT) { | ||
248 | printk("%s: RxBD ring size must be multiple of %d.\n", | ||
249 | netdev->name, UCC_GETH_RX_BD_RING_SIZE_ALIGNMENT); | ||
250 | return -EINVAL; | ||
251 | } | ||
252 | if (ring->tx_pending < UCC_GETH_TX_BD_RING_SIZE_MIN) { | ||
253 | printk("%s: TxBD ring size must be no smaller than %d.\n", | ||
254 | netdev->name, UCC_GETH_TX_BD_RING_SIZE_MIN); | ||
255 | return -EINVAL; | ||
256 | } | ||
257 | |||
258 | ug_info->bdRingLenRx[queue] = ring->rx_pending; | ||
259 | ug_info->bdRingLenTx[queue] = ring->tx_pending; | ||
260 | |||
261 | if (netif_running(netdev)) { | ||
262 | /* FIXME: restart automatically */ | ||
263 | printk(KERN_INFO | ||
264 | "Please re-open the interface.\n"); | ||
265 | } | ||
266 | |||
267 | return ret; | ||
268 | } | ||
269 | |||
270 | static int uec_get_sset_count(struct net_device *netdev, int sset) | ||
271 | { | ||
272 | struct ucc_geth_private *ugeth = netdev_priv(netdev); | ||
273 | u32 stats_mode = ugeth->ug_info->statisticsMode; | ||
274 | int len = 0; | ||
275 | |||
276 | switch (sset) { | ||
277 | case ETH_SS_STATS: | ||
278 | if (stats_mode & UCC_GETH_STATISTICS_GATHERING_MODE_HARDWARE) | ||
279 | len += UEC_HW_STATS_LEN; | ||
280 | if (stats_mode & UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_TX) | ||
281 | len += UEC_TX_FW_STATS_LEN; | ||
282 | if (stats_mode & UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_RX) | ||
283 | len += UEC_RX_FW_STATS_LEN; | ||
284 | |||
285 | return len; | ||
286 | |||
287 | default: | ||
288 | return -EOPNOTSUPP; | ||
289 | } | ||
290 | } | ||
291 | |||
292 | static void uec_get_strings(struct net_device *netdev, u32 stringset, u8 *buf) | ||
293 | { | ||
294 | struct ucc_geth_private *ugeth = netdev_priv(netdev); | ||
295 | u32 stats_mode = ugeth->ug_info->statisticsMode; | ||
296 | |||
297 | if (stats_mode & UCC_GETH_STATISTICS_GATHERING_MODE_HARDWARE) { | ||
298 | memcpy(buf, hw_stat_gstrings, UEC_HW_STATS_LEN * | ||
299 | ETH_GSTRING_LEN); | ||
300 | buf += UEC_HW_STATS_LEN * ETH_GSTRING_LEN; | ||
301 | } | ||
302 | if (stats_mode & UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_TX) { | ||
303 | memcpy(buf, tx_fw_stat_gstrings, UEC_TX_FW_STATS_LEN * | ||
304 | ETH_GSTRING_LEN); | ||
305 | buf += UEC_TX_FW_STATS_LEN * ETH_GSTRING_LEN; | ||
306 | } | ||
307 | if (stats_mode & UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_RX) | ||
308 | memcpy(buf, rx_fw_stat_gstrings, UEC_RX_FW_STATS_LEN * | ||
309 | ETH_GSTRING_LEN); | ||
310 | } | ||
311 | |||
312 | static void uec_get_ethtool_stats(struct net_device *netdev, | ||
313 | struct ethtool_stats *stats, uint64_t *data) | ||
314 | { | ||
315 | struct ucc_geth_private *ugeth = netdev_priv(netdev); | ||
316 | u32 stats_mode = ugeth->ug_info->statisticsMode; | ||
317 | u32 __iomem *base; | ||
318 | int i, j = 0; | ||
319 | |||
320 | if (stats_mode & UCC_GETH_STATISTICS_GATHERING_MODE_HARDWARE) { | ||
321 | if (ugeth->ug_regs) | ||
322 | base = (u32 __iomem *)&ugeth->ug_regs->tx64; | ||
323 | else | ||
324 | base = NULL; | ||
325 | |||
326 | for (i = 0; i < UEC_HW_STATS_LEN; i++) | ||
327 | data[j++] = base ? in_be32(&base[i]) : 0; | ||
328 | } | ||
329 | if (stats_mode & UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_TX) { | ||
330 | base = (u32 __iomem *)ugeth->p_tx_fw_statistics_pram; | ||
331 | for (i = 0; i < UEC_TX_FW_STATS_LEN; i++) | ||
332 | data[j++] = base ? in_be32(&base[i]) : 0; | ||
333 | } | ||
334 | if (stats_mode & UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_RX) { | ||
335 | base = (u32 __iomem *)ugeth->p_rx_fw_statistics_pram; | ||
336 | for (i = 0; i < UEC_RX_FW_STATS_LEN; i++) | ||
337 | data[j++] = base ? in_be32(&base[i]) : 0; | ||
338 | } | ||
339 | } | ||
340 | |||
341 | static int uec_nway_reset(struct net_device *netdev) | ||
342 | { | ||
343 | struct ucc_geth_private *ugeth = netdev_priv(netdev); | ||
344 | |||
345 | return phy_start_aneg(ugeth->phydev); | ||
346 | } | ||
347 | |||
348 | /* Report driver information */ | ||
349 | static void | ||
350 | uec_get_drvinfo(struct net_device *netdev, | ||
351 | struct ethtool_drvinfo *drvinfo) | ||
352 | { | ||
353 | strncpy(drvinfo->driver, DRV_NAME, 32); | ||
354 | strncpy(drvinfo->version, DRV_VERSION, 32); | ||
355 | strncpy(drvinfo->fw_version, "N/A", 32); | ||
356 | strncpy(drvinfo->bus_info, "QUICC ENGINE", 32); | ||
357 | drvinfo->eedump_len = 0; | ||
358 | drvinfo->regdump_len = uec_get_regs_len(netdev); | ||
359 | } | ||
360 | |||
361 | #ifdef CONFIG_PM | ||
362 | |||
363 | static void uec_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) | ||
364 | { | ||
365 | struct ucc_geth_private *ugeth = netdev_priv(netdev); | ||
366 | struct phy_device *phydev = ugeth->phydev; | ||
367 | |||
368 | if (phydev && phydev->irq) | ||
369 | wol->supported |= WAKE_PHY; | ||
370 | if (qe_alive_during_sleep()) | ||
371 | wol->supported |= WAKE_MAGIC; | ||
372 | |||
373 | wol->wolopts = ugeth->wol_en; | ||
374 | } | ||
375 | |||
376 | static int uec_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) | ||
377 | { | ||
378 | struct ucc_geth_private *ugeth = netdev_priv(netdev); | ||
379 | struct phy_device *phydev = ugeth->phydev; | ||
380 | |||
381 | if (wol->wolopts & ~(WAKE_PHY | WAKE_MAGIC)) | ||
382 | return -EINVAL; | ||
383 | else if (wol->wolopts & WAKE_PHY && (!phydev || !phydev->irq)) | ||
384 | return -EINVAL; | ||
385 | else if (wol->wolopts & WAKE_MAGIC && !qe_alive_during_sleep()) | ||
386 | return -EINVAL; | ||
387 | |||
388 | ugeth->wol_en = wol->wolopts; | ||
389 | device_set_wakeup_enable(&netdev->dev, ugeth->wol_en); | ||
390 | |||
391 | return 0; | ||
392 | } | ||
393 | |||
394 | #else | ||
395 | #define uec_get_wol NULL | ||
396 | #define uec_set_wol NULL | ||
397 | #endif /* CONFIG_PM */ | ||
398 | |||
399 | static const struct ethtool_ops uec_ethtool_ops = { | ||
400 | .get_settings = uec_get_settings, | ||
401 | .set_settings = uec_set_settings, | ||
402 | .get_drvinfo = uec_get_drvinfo, | ||
403 | .get_regs_len = uec_get_regs_len, | ||
404 | .get_regs = uec_get_regs, | ||
405 | .get_msglevel = uec_get_msglevel, | ||
406 | .set_msglevel = uec_set_msglevel, | ||
407 | .nway_reset = uec_nway_reset, | ||
408 | .get_link = ethtool_op_get_link, | ||
409 | .get_ringparam = uec_get_ringparam, | ||
410 | .set_ringparam = uec_set_ringparam, | ||
411 | .get_pauseparam = uec_get_pauseparam, | ||
412 | .set_pauseparam = uec_set_pauseparam, | ||
413 | .get_sset_count = uec_get_sset_count, | ||
414 | .get_strings = uec_get_strings, | ||
415 | .get_ethtool_stats = uec_get_ethtool_stats, | ||
416 | .get_wol = uec_get_wol, | ||
417 | .set_wol = uec_set_wol, | ||
418 | }; | ||
419 | |||
420 | void uec_set_ethtool_ops(struct net_device *netdev) | ||
421 | { | ||
422 | SET_ETHTOOL_OPS(netdev, &uec_ethtool_ops); | ||
423 | } | ||