aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/ibm/emac
diff options
context:
space:
mode:
authorJeff Kirsher <jeffrey.t.kirsher@intel.com>2011-05-13 17:29:12 -0400
committerJeff Kirsher <jeffrey.t.kirsher@intel.com>2011-08-11 05:41:59 -0400
commit9aa3283595451ca093500ff0977b106e1f465586 (patch)
tree89cd128f037b029b67f73fbff7d8cc38177c2b27 /drivers/net/ethernet/ibm/emac
parent86387e1ac4fcaa45ff5578013a78593d1a0ba279 (diff)
ehea/ibm*: Move the IBM drivers
Move the IBM drivers into drivers/net/ethernet/ibm/ and make the necessary Kconfig and Makefile changes. - Renamed ibm_new_emac to emac - Cleaned up Makefile and Kconfig options which referred to IBM_NEW_EMAC to IBM_EMAC - ibmlana driver is a National Semiconductor SONIC driver so it was not moved CC: Christoph Raisch <raisch@de.ibm.com> CC: Santiago Leon <santil@linux.vnet.ibm.com> CC: Benjamin Herrenschmidt <benh@kernel.crashing.org> CC: David Gibson <dwg@au1.ibm.com> CC: Kyle Lucke <klucke@us.ibm.com> CC: Michael Ellerman <michael@ellerman.id.au> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Diffstat (limited to 'drivers/net/ethernet/ibm/emac')
-rw-r--r--drivers/net/ethernet/ibm/emac/Kconfig76
-rw-r--r--drivers/net/ethernet/ibm/emac/Makefile11
-rw-r--r--drivers/net/ethernet/ibm/emac/core.c3074
-rw-r--r--drivers/net/ethernet/ibm/emac/core.h462
-rw-r--r--drivers/net/ethernet/ibm/emac/debug.c270
-rw-r--r--drivers/net/ethernet/ibm/emac/debug.h83
-rw-r--r--drivers/net/ethernet/ibm/emac/emac.h312
-rw-r--r--drivers/net/ethernet/ibm/emac/mal.c809
-rw-r--r--drivers/net/ethernet/ibm/emac/mal.h316
-rw-r--r--drivers/net/ethernet/ibm/emac/phy.c541
-rw-r--r--drivers/net/ethernet/ibm/emac/phy.h87
-rw-r--r--drivers/net/ethernet/ibm/emac/rgmii.c338
-rw-r--r--drivers/net/ethernet/ibm/emac/rgmii.h82
-rw-r--r--drivers/net/ethernet/ibm/emac/tah.c185
-rw-r--r--drivers/net/ethernet/ibm/emac/tah.h95
-rw-r--r--drivers/net/ethernet/ibm/emac/zmii.c332
-rw-r--r--drivers/net/ethernet/ibm/emac/zmii.h78
17 files changed, 7151 insertions, 0 deletions
diff --git a/drivers/net/ethernet/ibm/emac/Kconfig b/drivers/net/ethernet/ibm/emac/Kconfig
new file mode 100644
index 00000000000..3f44a30e061
--- /dev/null
+++ b/drivers/net/ethernet/ibm/emac/Kconfig
@@ -0,0 +1,76 @@
1config IBM_EMAC
2 tristate "IBM EMAC Ethernet support"
3 depends on PPC_DCR
4 select CRC32
5 help
6 This driver supports the IBM EMAC family of Ethernet controllers
7 typically found on 4xx embedded PowerPC chips, but also on the
8 Axon southbridge for Cell.
9
10config IBM_EMAC_RXB
11 int "Number of receive buffers"
12 depends on IBM_EMAC
13 default "128"
14
15config IBM_EMAC_TXB
16 int "Number of transmit buffers"
17 depends on IBM_EMAC
18 default "64"
19
20config IBM_EMAC_POLL_WEIGHT
21 int "MAL NAPI polling weight"
22 depends on IBM_EMAC
23 default "32"
24
25config IBM_EMAC_RX_COPY_THRESHOLD
26 int "RX skb copy threshold (bytes)"
27 depends on IBM_EMAC
28 default "256"
29
30config IBM_EMAC_RX_SKB_HEADROOM
31 int "Additional RX skb headroom (bytes)"
32 depends on IBM_EMAC
33 default "0"
34 help
35 Additional receive skb headroom. Note, that driver
36 will always reserve at least 2 bytes to make IP header
37 aligned, so usually there is no need to add any additional
38 headroom.
39
40 If unsure, set to 0.
41
42config IBM_EMAC_DEBUG
43 bool "Debugging"
44 depends on IBM_EMAC
45 default n
46
47# The options below has to be select'ed by the respective
48# processor types or platforms
49
50config IBM_EMAC_ZMII
51 bool
52 default n
53
54config IBM_EMAC_RGMII
55 bool
56 default n
57
58config IBM_EMAC_TAH
59 bool
60 default n
61
62config IBM_EMAC_EMAC4
63 bool
64 default n
65
66config IBM_EMAC_NO_FLOW_CTRL
67 bool
68 default n
69
70config IBM_EMAC_MAL_CLR_ICINTSTAT
71 bool
72 default n
73
74config IBM_EMAC_MAL_COMMON_ERR
75 bool
76 default n
diff --git a/drivers/net/ethernet/ibm/emac/Makefile b/drivers/net/ethernet/ibm/emac/Makefile
new file mode 100644
index 00000000000..0b5c9951276
--- /dev/null
+++ b/drivers/net/ethernet/ibm/emac/Makefile
@@ -0,0 +1,11 @@
1#
2# Makefile for the PowerPC 4xx on-chip ethernet driver
3#
4
5obj-$(CONFIG_IBM_NEW_EMAC) += ibm_newemac.o
6
7ibm_newemac-y := mal.o core.o phy.o
8ibm_newemac-$(CONFIG_IBM_NEW_EMAC_ZMII) += zmii.o
9ibm_newemac-$(CONFIG_IBM_NEW_EMAC_RGMII) += rgmii.o
10ibm_newemac-$(CONFIG_IBM_NEW_EMAC_TAH) += tah.o
11ibm_newemac-$(CONFIG_IBM_NEW_EMAC_DEBUG) += debug.o
diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c
new file mode 100644
index 00000000000..70cb7d8a3b5
--- /dev/null
+++ b/drivers/net/ethernet/ibm/emac/core.c
@@ -0,0 +1,3074 @@
1/*
2 * drivers/net/ibm_newemac/core.c
3 *
4 * Driver for PowerPC 4xx on-chip ethernet controller.
5 *
6 * Copyright 2007 Benjamin Herrenschmidt, IBM Corp.
7 * <benh@kernel.crashing.org>
8 *
9 * Based on the arch/ppc version of the driver:
10 *
11 * Copyright (c) 2004, 2005 Zultys Technologies.
12 * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>
13 *
14 * Based on original work by
15 * Matt Porter <mporter@kernel.crashing.org>
16 * (c) 2003 Benjamin Herrenschmidt <benh@kernel.crashing.org>
17 * Armin Kuster <akuster@mvista.com>
18 * Johnnie Peters <jpeters@mvista.com>
19 *
20 * This program is free software; you can redistribute it and/or modify it
21 * under the terms of the GNU General Public License as published by the
22 * Free Software Foundation; either version 2 of the License, or (at your
23 * option) any later version.
24 *
25 */
26
27#include <linux/module.h>
28#include <linux/sched.h>
29#include <linux/string.h>
30#include <linux/errno.h>
31#include <linux/delay.h>
32#include <linux/types.h>
33#include <linux/pci.h>
34#include <linux/etherdevice.h>
35#include <linux/skbuff.h>
36#include <linux/crc32.h>
37#include <linux/ethtool.h>
38#include <linux/mii.h>
39#include <linux/bitops.h>
40#include <linux/workqueue.h>
41#include <linux/of.h>
42#include <linux/of_net.h>
43#include <linux/slab.h>
44
45#include <asm/processor.h>
46#include <asm/io.h>
47#include <asm/dma.h>
48#include <asm/uaccess.h>
49#include <asm/dcr.h>
50#include <asm/dcr-regs.h>
51
52#include "core.h"
53
54/*
55 * Lack of dma_unmap_???? calls is intentional.
56 *
57 * API-correct usage requires additional support state information to be
58 * maintained for every RX and TX buffer descriptor (BD). Unfortunately, due to
59 * EMAC design (e.g. TX buffer passed from network stack can be split into
60 * several BDs, dma_map_single/dma_map_page can be used to map particular BD),
61 * maintaining such information will add additional overhead.
62 * Current DMA API implementation for 4xx processors only ensures cache coherency
63 * and dma_unmap_???? routines are empty and are likely to stay this way.
64 * I decided to omit dma_unmap_??? calls because I don't want to add additional
65 * complexity just for the sake of following some abstract API, when it doesn't
66 * add any real benefit to the driver. I understand that this decision maybe
67 * controversial, but I really tried to make code API-correct and efficient
68 * at the same time and didn't come up with code I liked :(. --ebs
69 */
70
71#define DRV_NAME "emac"
72#define DRV_VERSION "3.54"
73#define DRV_DESC "PPC 4xx OCP EMAC driver"
74
75MODULE_DESCRIPTION(DRV_DESC);
76MODULE_AUTHOR
77 ("Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>");
78MODULE_LICENSE("GPL");
79
80/*
81 * PPC64 doesn't (yet) have a cacheable_memcpy
82 */
83#ifdef CONFIG_PPC64
84#define cacheable_memcpy(d,s,n) memcpy((d),(s),(n))
85#endif
86
87/* minimum number of free TX descriptors required to wake up TX process */
88#define EMAC_TX_WAKEUP_THRESH (NUM_TX_BUFF / 4)
89
90/* If packet size is less than this number, we allocate small skb and copy packet
91 * contents into it instead of just sending original big skb up
92 */
93#define EMAC_RX_COPY_THRESH CONFIG_IBM_NEW_EMAC_RX_COPY_THRESHOLD
94
95/* Since multiple EMACs share MDIO lines in various ways, we need
96 * to avoid re-using the same PHY ID in cases where the arch didn't
97 * setup precise phy_map entries
98 *
99 * XXX This is something that needs to be reworked as we can have multiple
100 * EMAC "sets" (multiple ASICs containing several EMACs) though we can
101 * probably require in that case to have explicit PHY IDs in the device-tree
102 */
103static u32 busy_phy_map;
104static DEFINE_MUTEX(emac_phy_map_lock);
105
106/* This is the wait queue used to wait on any event related to probe, that
107 * is discovery of MALs, other EMACs, ZMII/RGMIIs, etc...
108 */
109static DECLARE_WAIT_QUEUE_HEAD(emac_probe_wait);
110
111/* Having stable interface names is a doomed idea. However, it would be nice
112 * if we didn't have completely random interface names at boot too :-) It's
113 * just a matter of making everybody's life easier. Since we are doing
114 * threaded probing, it's a bit harder though. The base idea here is that
115 * we make up a list of all emacs in the device-tree before we register the
116 * driver. Every emac will then wait for the previous one in the list to
117 * initialize before itself. We should also keep that list ordered by
118 * cell_index.
119 * That list is only 4 entries long, meaning that additional EMACs don't
120 * get ordering guarantees unless EMAC_BOOT_LIST_SIZE is increased.
121 */
122
123#define EMAC_BOOT_LIST_SIZE 4
124static struct device_node *emac_boot_list[EMAC_BOOT_LIST_SIZE];
125
126/* How long should I wait for dependent devices ? */
127#define EMAC_PROBE_DEP_TIMEOUT (HZ * 5)
128
129/* I don't want to litter system log with timeout errors
130 * when we have brain-damaged PHY.
131 */
132static inline void emac_report_timeout_error(struct emac_instance *dev,
133 const char *error)
134{
135 if (emac_has_feature(dev, EMAC_FTR_440GX_PHY_CLK_FIX |
136 EMAC_FTR_460EX_PHY_CLK_FIX |
137 EMAC_FTR_440EP_PHY_CLK_FIX))
138 DBG(dev, "%s" NL, error);
139 else if (net_ratelimit())
140 printk(KERN_ERR "%s: %s\n", dev->ofdev->dev.of_node->full_name,
141 error);
142}
143
144/* EMAC PHY clock workaround:
145 * 440EP/440GR has more sane SDR0_MFR register implementation than 440GX,
146 * which allows controlling each EMAC clock
147 */
148static inline void emac_rx_clk_tx(struct emac_instance *dev)
149{
150#ifdef CONFIG_PPC_DCR_NATIVE
151 if (emac_has_feature(dev, EMAC_FTR_440EP_PHY_CLK_FIX))
152 dcri_clrset(SDR0, SDR0_MFR,
153 0, SDR0_MFR_ECS >> dev->cell_index);
154#endif
155}
156
157static inline void emac_rx_clk_default(struct emac_instance *dev)
158{
159#ifdef CONFIG_PPC_DCR_NATIVE
160 if (emac_has_feature(dev, EMAC_FTR_440EP_PHY_CLK_FIX))
161 dcri_clrset(SDR0, SDR0_MFR,
162 SDR0_MFR_ECS >> dev->cell_index, 0);
163#endif
164}
165
166/* PHY polling intervals */
167#define PHY_POLL_LINK_ON HZ
168#define PHY_POLL_LINK_OFF (HZ / 5)
169
170/* Graceful stop timeouts in us.
171 * We should allow up to 1 frame time (full-duplex, ignoring collisions)
172 */
173#define STOP_TIMEOUT_10 1230
174#define STOP_TIMEOUT_100 124
175#define STOP_TIMEOUT_1000 13
176#define STOP_TIMEOUT_1000_JUMBO 73
177
178static unsigned char default_mcast_addr[] = {
179 0x01, 0x80, 0xC2, 0x00, 0x00, 0x01
180};
181
182/* Please, keep in sync with struct ibm_emac_stats/ibm_emac_error_stats */
183static const char emac_stats_keys[EMAC_ETHTOOL_STATS_COUNT][ETH_GSTRING_LEN] = {
184 "rx_packets", "rx_bytes", "tx_packets", "tx_bytes", "rx_packets_csum",
185 "tx_packets_csum", "tx_undo", "rx_dropped_stack", "rx_dropped_oom",
186 "rx_dropped_error", "rx_dropped_resize", "rx_dropped_mtu",
187 "rx_stopped", "rx_bd_errors", "rx_bd_overrun", "rx_bd_bad_packet",
188 "rx_bd_runt_packet", "rx_bd_short_event", "rx_bd_alignment_error",
189 "rx_bd_bad_fcs", "rx_bd_packet_too_long", "rx_bd_out_of_range",
190 "rx_bd_in_range", "rx_parity", "rx_fifo_overrun", "rx_overrun",
191 "rx_bad_packet", "rx_runt_packet", "rx_short_event",
192 "rx_alignment_error", "rx_bad_fcs", "rx_packet_too_long",
193 "rx_out_of_range", "rx_in_range", "tx_dropped", "tx_bd_errors",
194 "tx_bd_bad_fcs", "tx_bd_carrier_loss", "tx_bd_excessive_deferral",
195 "tx_bd_excessive_collisions", "tx_bd_late_collision",
196 "tx_bd_multple_collisions", "tx_bd_single_collision",
197 "tx_bd_underrun", "tx_bd_sqe", "tx_parity", "tx_underrun", "tx_sqe",
198 "tx_errors"
199};
200
201static irqreturn_t emac_irq(int irq, void *dev_instance);
202static void emac_clean_tx_ring(struct emac_instance *dev);
203static void __emac_set_multicast_list(struct emac_instance *dev);
204
205static inline int emac_phy_supports_gige(int phy_mode)
206{
207 return phy_mode == PHY_MODE_GMII ||
208 phy_mode == PHY_MODE_RGMII ||
209 phy_mode == PHY_MODE_SGMII ||
210 phy_mode == PHY_MODE_TBI ||
211 phy_mode == PHY_MODE_RTBI;
212}
213
214static inline int emac_phy_gpcs(int phy_mode)
215{
216 return phy_mode == PHY_MODE_SGMII ||
217 phy_mode == PHY_MODE_TBI ||
218 phy_mode == PHY_MODE_RTBI;
219}
220
221static inline void emac_tx_enable(struct emac_instance *dev)
222{
223 struct emac_regs __iomem *p = dev->emacp;
224 u32 r;
225
226 DBG(dev, "tx_enable" NL);
227
228 r = in_be32(&p->mr0);
229 if (!(r & EMAC_MR0_TXE))
230 out_be32(&p->mr0, r | EMAC_MR0_TXE);
231}
232
233static void emac_tx_disable(struct emac_instance *dev)
234{
235 struct emac_regs __iomem *p = dev->emacp;
236 u32 r;
237
238 DBG(dev, "tx_disable" NL);
239
240 r = in_be32(&p->mr0);
241 if (r & EMAC_MR0_TXE) {
242 int n = dev->stop_timeout;
243 out_be32(&p->mr0, r & ~EMAC_MR0_TXE);
244 while (!(in_be32(&p->mr0) & EMAC_MR0_TXI) && n) {
245 udelay(1);
246 --n;
247 }
248 if (unlikely(!n))
249 emac_report_timeout_error(dev, "TX disable timeout");
250 }
251}
252
253static void emac_rx_enable(struct emac_instance *dev)
254{
255 struct emac_regs __iomem *p = dev->emacp;
256 u32 r;
257
258 if (unlikely(test_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags)))
259 goto out;
260
261 DBG(dev, "rx_enable" NL);
262
263 r = in_be32(&p->mr0);
264 if (!(r & EMAC_MR0_RXE)) {
265 if (unlikely(!(r & EMAC_MR0_RXI))) {
266 /* Wait if previous async disable is still in progress */
267 int n = dev->stop_timeout;
268 while (!(r = in_be32(&p->mr0) & EMAC_MR0_RXI) && n) {
269 udelay(1);
270 --n;
271 }
272 if (unlikely(!n))
273 emac_report_timeout_error(dev,
274 "RX disable timeout");
275 }
276 out_be32(&p->mr0, r | EMAC_MR0_RXE);
277 }
278 out:
279 ;
280}
281
282static void emac_rx_disable(struct emac_instance *dev)
283{
284 struct emac_regs __iomem *p = dev->emacp;
285 u32 r;
286
287 DBG(dev, "rx_disable" NL);
288
289 r = in_be32(&p->mr0);
290 if (r & EMAC_MR0_RXE) {
291 int n = dev->stop_timeout;
292 out_be32(&p->mr0, r & ~EMAC_MR0_RXE);
293 while (!(in_be32(&p->mr0) & EMAC_MR0_RXI) && n) {
294 udelay(1);
295 --n;
296 }
297 if (unlikely(!n))
298 emac_report_timeout_error(dev, "RX disable timeout");
299 }
300}
301
302static inline void emac_netif_stop(struct emac_instance *dev)
303{
304 netif_tx_lock_bh(dev->ndev);
305 netif_addr_lock(dev->ndev);
306 dev->no_mcast = 1;
307 netif_addr_unlock(dev->ndev);
308 netif_tx_unlock_bh(dev->ndev);
309 dev->ndev->trans_start = jiffies; /* prevent tx timeout */
310 mal_poll_disable(dev->mal, &dev->commac);
311 netif_tx_disable(dev->ndev);
312}
313
314static inline void emac_netif_start(struct emac_instance *dev)
315{
316 netif_tx_lock_bh(dev->ndev);
317 netif_addr_lock(dev->ndev);
318 dev->no_mcast = 0;
319 if (dev->mcast_pending && netif_running(dev->ndev))
320 __emac_set_multicast_list(dev);
321 netif_addr_unlock(dev->ndev);
322 netif_tx_unlock_bh(dev->ndev);
323
324 netif_wake_queue(dev->ndev);
325
326 /* NOTE: unconditional netif_wake_queue is only appropriate
327 * so long as all callers are assured to have free tx slots
328 * (taken from tg3... though the case where that is wrong is
329 * not terribly harmful)
330 */
331 mal_poll_enable(dev->mal, &dev->commac);
332}
333
334static inline void emac_rx_disable_async(struct emac_instance *dev)
335{
336 struct emac_regs __iomem *p = dev->emacp;
337 u32 r;
338
339 DBG(dev, "rx_disable_async" NL);
340
341 r = in_be32(&p->mr0);
342 if (r & EMAC_MR0_RXE)
343 out_be32(&p->mr0, r & ~EMAC_MR0_RXE);
344}
345
346static int emac_reset(struct emac_instance *dev)
347{
348 struct emac_regs __iomem *p = dev->emacp;
349 int n = 20;
350
351 DBG(dev, "reset" NL);
352
353 if (!dev->reset_failed) {
354 /* 40x erratum suggests stopping RX channel before reset,
355 * we stop TX as well
356 */
357 emac_rx_disable(dev);
358 emac_tx_disable(dev);
359 }
360
361#ifdef CONFIG_PPC_DCR_NATIVE
362 /* Enable internal clock source */
363 if (emac_has_feature(dev, EMAC_FTR_460EX_PHY_CLK_FIX))
364 dcri_clrset(SDR0, SDR0_ETH_CFG,
365 0, SDR0_ETH_CFG_ECS << dev->cell_index);
366#endif
367
368 out_be32(&p->mr0, EMAC_MR0_SRST);
369 while ((in_be32(&p->mr0) & EMAC_MR0_SRST) && n)
370 --n;
371
372#ifdef CONFIG_PPC_DCR_NATIVE
373 /* Enable external clock source */
374 if (emac_has_feature(dev, EMAC_FTR_460EX_PHY_CLK_FIX))
375 dcri_clrset(SDR0, SDR0_ETH_CFG,
376 SDR0_ETH_CFG_ECS << dev->cell_index, 0);
377#endif
378
379 if (n) {
380 dev->reset_failed = 0;
381 return 0;
382 } else {
383 emac_report_timeout_error(dev, "reset timeout");
384 dev->reset_failed = 1;
385 return -ETIMEDOUT;
386 }
387}
388
389static void emac_hash_mc(struct emac_instance *dev)
390{
391 const int regs = EMAC_XAHT_REGS(dev);
392 u32 *gaht_base = emac_gaht_base(dev);
393 u32 gaht_temp[regs];
394 struct netdev_hw_addr *ha;
395 int i;
396
397 DBG(dev, "hash_mc %d" NL, netdev_mc_count(dev->ndev));
398
399 memset(gaht_temp, 0, sizeof (gaht_temp));
400
401 netdev_for_each_mc_addr(ha, dev->ndev) {
402 int slot, reg, mask;
403 DBG2(dev, "mc %pM" NL, ha->addr);
404
405 slot = EMAC_XAHT_CRC_TO_SLOT(dev,
406 ether_crc(ETH_ALEN, ha->addr));
407 reg = EMAC_XAHT_SLOT_TO_REG(dev, slot);
408 mask = EMAC_XAHT_SLOT_TO_MASK(dev, slot);
409
410 gaht_temp[reg] |= mask;
411 }
412
413 for (i = 0; i < regs; i++)
414 out_be32(gaht_base + i, gaht_temp[i]);
415}
416
417static inline u32 emac_iff2rmr(struct net_device *ndev)
418{
419 struct emac_instance *dev = netdev_priv(ndev);
420 u32 r;
421
422 r = EMAC_RMR_SP | EMAC_RMR_SFCS | EMAC_RMR_IAE | EMAC_RMR_BAE;
423
424 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
425 r |= EMAC4_RMR_BASE;
426 else
427 r |= EMAC_RMR_BASE;
428
429 if (ndev->flags & IFF_PROMISC)
430 r |= EMAC_RMR_PME;
431 else if (ndev->flags & IFF_ALLMULTI ||
432 (netdev_mc_count(ndev) > EMAC_XAHT_SLOTS(dev)))
433 r |= EMAC_RMR_PMME;
434 else if (!netdev_mc_empty(ndev))
435 r |= EMAC_RMR_MAE;
436
437 return r;
438}
439
440static u32 __emac_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_size)
441{
442 u32 ret = EMAC_MR1_VLE | EMAC_MR1_IST | EMAC_MR1_TR0_MULT;
443
444 DBG2(dev, "__emac_calc_base_mr1" NL);
445
446 switch(tx_size) {
447 case 2048:
448 ret |= EMAC_MR1_TFS_2K;
449 break;
450 default:
451 printk(KERN_WARNING "%s: Unknown Tx FIFO size %d\n",
452 dev->ndev->name, tx_size);
453 }
454
455 switch(rx_size) {
456 case 16384:
457 ret |= EMAC_MR1_RFS_16K;
458 break;
459 case 4096:
460 ret |= EMAC_MR1_RFS_4K;
461 break;
462 default:
463 printk(KERN_WARNING "%s: Unknown Rx FIFO size %d\n",
464 dev->ndev->name, rx_size);
465 }
466
467 return ret;
468}
469
470static u32 __emac4_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_size)
471{
472 u32 ret = EMAC_MR1_VLE | EMAC_MR1_IST | EMAC4_MR1_TR |
473 EMAC4_MR1_OBCI(dev->opb_bus_freq / 1000000);
474
475 DBG2(dev, "__emac4_calc_base_mr1" NL);
476
477 switch(tx_size) {
478 case 16384:
479 ret |= EMAC4_MR1_TFS_16K;
480 break;
481 case 4096:
482 ret |= EMAC4_MR1_TFS_4K;
483 break;
484 case 2048:
485 ret |= EMAC4_MR1_TFS_2K;
486 break;
487 default:
488 printk(KERN_WARNING "%s: Unknown Tx FIFO size %d\n",
489 dev->ndev->name, tx_size);
490 }
491
492 switch(rx_size) {
493 case 16384:
494 ret |= EMAC4_MR1_RFS_16K;
495 break;
496 case 4096:
497 ret |= EMAC4_MR1_RFS_4K;
498 break;
499 case 2048:
500 ret |= EMAC4_MR1_RFS_2K;
501 break;
502 default:
503 printk(KERN_WARNING "%s: Unknown Rx FIFO size %d\n",
504 dev->ndev->name, rx_size);
505 }
506
507 return ret;
508}
509
510static u32 emac_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_size)
511{
512 return emac_has_feature(dev, EMAC_FTR_EMAC4) ?
513 __emac4_calc_base_mr1(dev, tx_size, rx_size) :
514 __emac_calc_base_mr1(dev, tx_size, rx_size);
515}
516
517static inline u32 emac_calc_trtr(struct emac_instance *dev, unsigned int size)
518{
519 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
520 return ((size >> 6) - 1) << EMAC_TRTR_SHIFT_EMAC4;
521 else
522 return ((size >> 6) - 1) << EMAC_TRTR_SHIFT;
523}
524
525static inline u32 emac_calc_rwmr(struct emac_instance *dev,
526 unsigned int low, unsigned int high)
527{
528 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
529 return (low << 22) | ( (high & 0x3ff) << 6);
530 else
531 return (low << 23) | ( (high & 0x1ff) << 7);
532}
533
534static int emac_configure(struct emac_instance *dev)
535{
536 struct emac_regs __iomem *p = dev->emacp;
537 struct net_device *ndev = dev->ndev;
538 int tx_size, rx_size, link = netif_carrier_ok(dev->ndev);
539 u32 r, mr1 = 0;
540
541 DBG(dev, "configure" NL);
542
543 if (!link) {
544 out_be32(&p->mr1, in_be32(&p->mr1)
545 | EMAC_MR1_FDE | EMAC_MR1_ILE);
546 udelay(100);
547 } else if (emac_reset(dev) < 0)
548 return -ETIMEDOUT;
549
550 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
551 tah_reset(dev->tah_dev);
552
553 DBG(dev, " link = %d duplex = %d, pause = %d, asym_pause = %d\n",
554 link, dev->phy.duplex, dev->phy.pause, dev->phy.asym_pause);
555
556 /* Default fifo sizes */
557 tx_size = dev->tx_fifo_size;
558 rx_size = dev->rx_fifo_size;
559
560 /* No link, force loopback */
561 if (!link)
562 mr1 = EMAC_MR1_FDE | EMAC_MR1_ILE;
563
564 /* Check for full duplex */
565 else if (dev->phy.duplex == DUPLEX_FULL)
566 mr1 |= EMAC_MR1_FDE | EMAC_MR1_MWSW_001;
567
568 /* Adjust fifo sizes, mr1 and timeouts based on link speed */
569 dev->stop_timeout = STOP_TIMEOUT_10;
570 switch (dev->phy.speed) {
571 case SPEED_1000:
572 if (emac_phy_gpcs(dev->phy.mode)) {
573 mr1 |= EMAC_MR1_MF_1000GPCS | EMAC_MR1_MF_IPPA(
574 (dev->phy.gpcs_address != 0xffffffff) ?
575 dev->phy.gpcs_address : dev->phy.address);
576
577 /* Put some arbitrary OUI, Manuf & Rev IDs so we can
578 * identify this GPCS PHY later.
579 */
580 out_be32(&p->u1.emac4.ipcr, 0xdeadbeef);
581 } else
582 mr1 |= EMAC_MR1_MF_1000;
583
584 /* Extended fifo sizes */
585 tx_size = dev->tx_fifo_size_gige;
586 rx_size = dev->rx_fifo_size_gige;
587
588 if (dev->ndev->mtu > ETH_DATA_LEN) {
589 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
590 mr1 |= EMAC4_MR1_JPSM;
591 else
592 mr1 |= EMAC_MR1_JPSM;
593 dev->stop_timeout = STOP_TIMEOUT_1000_JUMBO;
594 } else
595 dev->stop_timeout = STOP_TIMEOUT_1000;
596 break;
597 case SPEED_100:
598 mr1 |= EMAC_MR1_MF_100;
599 dev->stop_timeout = STOP_TIMEOUT_100;
600 break;
601 default: /* make gcc happy */
602 break;
603 }
604
605 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
606 rgmii_set_speed(dev->rgmii_dev, dev->rgmii_port,
607 dev->phy.speed);
608 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
609 zmii_set_speed(dev->zmii_dev, dev->zmii_port, dev->phy.speed);
610
611 /* on 40x erratum forces us to NOT use integrated flow control,
612 * let's hope it works on 44x ;)
613 */
614 if (!emac_has_feature(dev, EMAC_FTR_NO_FLOW_CONTROL_40x) &&
615 dev->phy.duplex == DUPLEX_FULL) {
616 if (dev->phy.pause)
617 mr1 |= EMAC_MR1_EIFC | EMAC_MR1_APP;
618 else if (dev->phy.asym_pause)
619 mr1 |= EMAC_MR1_APP;
620 }
621
622 /* Add base settings & fifo sizes & program MR1 */
623 mr1 |= emac_calc_base_mr1(dev, tx_size, rx_size);
624 out_be32(&p->mr1, mr1);
625
626 /* Set individual MAC address */
627 out_be32(&p->iahr, (ndev->dev_addr[0] << 8) | ndev->dev_addr[1]);
628 out_be32(&p->ialr, (ndev->dev_addr[2] << 24) |
629 (ndev->dev_addr[3] << 16) | (ndev->dev_addr[4] << 8) |
630 ndev->dev_addr[5]);
631
632 /* VLAN Tag Protocol ID */
633 out_be32(&p->vtpid, 0x8100);
634
635 /* Receive mode register */
636 r = emac_iff2rmr(ndev);
637 if (r & EMAC_RMR_MAE)
638 emac_hash_mc(dev);
639 out_be32(&p->rmr, r);
640
641 /* FIFOs thresholds */
642 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
643 r = EMAC4_TMR1((dev->mal_burst_size / dev->fifo_entry_size) + 1,
644 tx_size / 2 / dev->fifo_entry_size);
645 else
646 r = EMAC_TMR1((dev->mal_burst_size / dev->fifo_entry_size) + 1,
647 tx_size / 2 / dev->fifo_entry_size);
648 out_be32(&p->tmr1, r);
649 out_be32(&p->trtr, emac_calc_trtr(dev, tx_size / 2));
650
651 /* PAUSE frame is sent when RX FIFO reaches its high-water mark,
652 there should be still enough space in FIFO to allow the our link
653 partner time to process this frame and also time to send PAUSE
654 frame itself.
655
656 Here is the worst case scenario for the RX FIFO "headroom"
657 (from "The Switch Book") (100Mbps, without preamble, inter-frame gap):
658
659 1) One maximum-length frame on TX 1522 bytes
660 2) One PAUSE frame time 64 bytes
661 3) PAUSE frame decode time allowance 64 bytes
662 4) One maximum-length frame on RX 1522 bytes
663 5) Round-trip propagation delay of the link (100Mb) 15 bytes
664 ----------
665 3187 bytes
666
667 I chose to set high-water mark to RX_FIFO_SIZE / 4 (1024 bytes)
668 low-water mark to RX_FIFO_SIZE / 8 (512 bytes)
669 */
670 r = emac_calc_rwmr(dev, rx_size / 8 / dev->fifo_entry_size,
671 rx_size / 4 / dev->fifo_entry_size);
672 out_be32(&p->rwmr, r);
673
674 /* Set PAUSE timer to the maximum */
675 out_be32(&p->ptr, 0xffff);
676
677 /* IRQ sources */
678 r = EMAC_ISR_OVR | EMAC_ISR_BP | EMAC_ISR_SE |
679 EMAC_ISR_ALE | EMAC_ISR_BFCS | EMAC_ISR_PTLE | EMAC_ISR_ORE |
680 EMAC_ISR_IRE | EMAC_ISR_TE;
681 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
682 r |= EMAC4_ISR_TXPE | EMAC4_ISR_RXPE /* | EMAC4_ISR_TXUE |
683 EMAC4_ISR_RXOE | */;
684 out_be32(&p->iser, r);
685
686 /* We need to take GPCS PHY out of isolate mode after EMAC reset */
687 if (emac_phy_gpcs(dev->phy.mode)) {
688 if (dev->phy.gpcs_address != 0xffffffff)
689 emac_mii_reset_gpcs(&dev->phy);
690 else
691 emac_mii_reset_phy(&dev->phy);
692 }
693
694 return 0;
695}
696
697static void emac_reinitialize(struct emac_instance *dev)
698{
699 DBG(dev, "reinitialize" NL);
700
701 emac_netif_stop(dev);
702 if (!emac_configure(dev)) {
703 emac_tx_enable(dev);
704 emac_rx_enable(dev);
705 }
706 emac_netif_start(dev);
707}
708
709static void emac_full_tx_reset(struct emac_instance *dev)
710{
711 DBG(dev, "full_tx_reset" NL);
712
713 emac_tx_disable(dev);
714 mal_disable_tx_channel(dev->mal, dev->mal_tx_chan);
715 emac_clean_tx_ring(dev);
716 dev->tx_cnt = dev->tx_slot = dev->ack_slot = 0;
717
718 emac_configure(dev);
719
720 mal_enable_tx_channel(dev->mal, dev->mal_tx_chan);
721 emac_tx_enable(dev);
722 emac_rx_enable(dev);
723}
724
725static void emac_reset_work(struct work_struct *work)
726{
727 struct emac_instance *dev = container_of(work, struct emac_instance, reset_work);
728
729 DBG(dev, "reset_work" NL);
730
731 mutex_lock(&dev->link_lock);
732 if (dev->opened) {
733 emac_netif_stop(dev);
734 emac_full_tx_reset(dev);
735 emac_netif_start(dev);
736 }
737 mutex_unlock(&dev->link_lock);
738}
739
740static void emac_tx_timeout(struct net_device *ndev)
741{
742 struct emac_instance *dev = netdev_priv(ndev);
743
744 DBG(dev, "tx_timeout" NL);
745
746 schedule_work(&dev->reset_work);
747}
748
749
750static inline int emac_phy_done(struct emac_instance *dev, u32 stacr)
751{
752 int done = !!(stacr & EMAC_STACR_OC);
753
754 if (emac_has_feature(dev, EMAC_FTR_STACR_OC_INVERT))
755 done = !done;
756
757 return done;
758};
759
760static int __emac_mdio_read(struct emac_instance *dev, u8 id, u8 reg)
761{
762 struct emac_regs __iomem *p = dev->emacp;
763 u32 r = 0;
764 int n, err = -ETIMEDOUT;
765
766 mutex_lock(&dev->mdio_lock);
767
768 DBG2(dev, "mdio_read(%02x,%02x)" NL, id, reg);
769
770 /* Enable proper MDIO port */
771 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
772 zmii_get_mdio(dev->zmii_dev, dev->zmii_port);
773 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
774 rgmii_get_mdio(dev->rgmii_dev, dev->rgmii_port);
775
776 /* Wait for management interface to become idle */
777 n = 20;
778 while (!emac_phy_done(dev, in_be32(&p->stacr))) {
779 udelay(1);
780 if (!--n) {
781 DBG2(dev, " -> timeout wait idle\n");
782 goto bail;
783 }
784 }
785
786 /* Issue read command */
787 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
788 r = EMAC4_STACR_BASE(dev->opb_bus_freq);
789 else
790 r = EMAC_STACR_BASE(dev->opb_bus_freq);
791 if (emac_has_feature(dev, EMAC_FTR_STACR_OC_INVERT))
792 r |= EMAC_STACR_OC;
793 if (emac_has_feature(dev, EMAC_FTR_HAS_NEW_STACR))
794 r |= EMACX_STACR_STAC_READ;
795 else
796 r |= EMAC_STACR_STAC_READ;
797 r |= (reg & EMAC_STACR_PRA_MASK)
798 | ((id & EMAC_STACR_PCDA_MASK) << EMAC_STACR_PCDA_SHIFT);
799 out_be32(&p->stacr, r);
800
801 /* Wait for read to complete */
802 n = 200;
803 while (!emac_phy_done(dev, (r = in_be32(&p->stacr)))) {
804 udelay(1);
805 if (!--n) {
806 DBG2(dev, " -> timeout wait complete\n");
807 goto bail;
808 }
809 }
810
811 if (unlikely(r & EMAC_STACR_PHYE)) {
812 DBG(dev, "mdio_read(%02x, %02x) failed" NL, id, reg);
813 err = -EREMOTEIO;
814 goto bail;
815 }
816
817 r = ((r >> EMAC_STACR_PHYD_SHIFT) & EMAC_STACR_PHYD_MASK);
818
819 DBG2(dev, "mdio_read -> %04x" NL, r);
820 err = 0;
821 bail:
822 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
823 rgmii_put_mdio(dev->rgmii_dev, dev->rgmii_port);
824 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
825 zmii_put_mdio(dev->zmii_dev, dev->zmii_port);
826 mutex_unlock(&dev->mdio_lock);
827
828 return err == 0 ? r : err;
829}
830
831static void __emac_mdio_write(struct emac_instance *dev, u8 id, u8 reg,
832 u16 val)
833{
834 struct emac_regs __iomem *p = dev->emacp;
835 u32 r = 0;
836 int n, err = -ETIMEDOUT;
837
838 mutex_lock(&dev->mdio_lock);
839
840 DBG2(dev, "mdio_write(%02x,%02x,%04x)" NL, id, reg, val);
841
842 /* Enable proper MDIO port */
843 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
844 zmii_get_mdio(dev->zmii_dev, dev->zmii_port);
845 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
846 rgmii_get_mdio(dev->rgmii_dev, dev->rgmii_port);
847
848 /* Wait for management interface to be idle */
849 n = 20;
850 while (!emac_phy_done(dev, in_be32(&p->stacr))) {
851 udelay(1);
852 if (!--n) {
853 DBG2(dev, " -> timeout wait idle\n");
854 goto bail;
855 }
856 }
857
858 /* Issue write command */
859 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
860 r = EMAC4_STACR_BASE(dev->opb_bus_freq);
861 else
862 r = EMAC_STACR_BASE(dev->opb_bus_freq);
863 if (emac_has_feature(dev, EMAC_FTR_STACR_OC_INVERT))
864 r |= EMAC_STACR_OC;
865 if (emac_has_feature(dev, EMAC_FTR_HAS_NEW_STACR))
866 r |= EMACX_STACR_STAC_WRITE;
867 else
868 r |= EMAC_STACR_STAC_WRITE;
869 r |= (reg & EMAC_STACR_PRA_MASK) |
870 ((id & EMAC_STACR_PCDA_MASK) << EMAC_STACR_PCDA_SHIFT) |
871 (val << EMAC_STACR_PHYD_SHIFT);
872 out_be32(&p->stacr, r);
873
874 /* Wait for write to complete */
875 n = 200;
876 while (!emac_phy_done(dev, in_be32(&p->stacr))) {
877 udelay(1);
878 if (!--n) {
879 DBG2(dev, " -> timeout wait complete\n");
880 goto bail;
881 }
882 }
883 err = 0;
884 bail:
885 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
886 rgmii_put_mdio(dev->rgmii_dev, dev->rgmii_port);
887 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
888 zmii_put_mdio(dev->zmii_dev, dev->zmii_port);
889 mutex_unlock(&dev->mdio_lock);
890}
891
892static int emac_mdio_read(struct net_device *ndev, int id, int reg)
893{
894 struct emac_instance *dev = netdev_priv(ndev);
895 int res;
896
897 res = __emac_mdio_read((dev->mdio_instance &&
898 dev->phy.gpcs_address != id) ?
899 dev->mdio_instance : dev,
900 (u8) id, (u8) reg);
901 return res;
902}
903
904static void emac_mdio_write(struct net_device *ndev, int id, int reg, int val)
905{
906 struct emac_instance *dev = netdev_priv(ndev);
907
908 __emac_mdio_write((dev->mdio_instance &&
909 dev->phy.gpcs_address != id) ?
910 dev->mdio_instance : dev,
911 (u8) id, (u8) reg, (u16) val);
912}
913
914/* Tx lock BH */
915static void __emac_set_multicast_list(struct emac_instance *dev)
916{
917 struct emac_regs __iomem *p = dev->emacp;
918 u32 rmr = emac_iff2rmr(dev->ndev);
919
920 DBG(dev, "__multicast %08x" NL, rmr);
921
922 /* I decided to relax register access rules here to avoid
923 * full EMAC reset.
924 *
925 * There is a real problem with EMAC4 core if we use MWSW_001 bit
926 * in MR1 register and do a full EMAC reset.
927 * One TX BD status update is delayed and, after EMAC reset, it
928 * never happens, resulting in TX hung (it'll be recovered by TX
929 * timeout handler eventually, but this is just gross).
930 * So we either have to do full TX reset or try to cheat here :)
931 *
932 * The only required change is to RX mode register, so I *think* all
933 * we need is just to stop RX channel. This seems to work on all
934 * tested SoCs. --ebs
935 *
936 * If we need the full reset, we might just trigger the workqueue
937 * and do it async... a bit nasty but should work --BenH
938 */
939 dev->mcast_pending = 0;
940 emac_rx_disable(dev);
941 if (rmr & EMAC_RMR_MAE)
942 emac_hash_mc(dev);
943 out_be32(&p->rmr, rmr);
944 emac_rx_enable(dev);
945}
946
947/* Tx lock BH */
948static void emac_set_multicast_list(struct net_device *ndev)
949{
950 struct emac_instance *dev = netdev_priv(ndev);
951
952 DBG(dev, "multicast" NL);
953
954 BUG_ON(!netif_running(dev->ndev));
955
956 if (dev->no_mcast) {
957 dev->mcast_pending = 1;
958 return;
959 }
960 __emac_set_multicast_list(dev);
961}
962
963static int emac_resize_rx_ring(struct emac_instance *dev, int new_mtu)
964{
965 int rx_sync_size = emac_rx_sync_size(new_mtu);
966 int rx_skb_size = emac_rx_skb_size(new_mtu);
967 int i, ret = 0;
968
969 mutex_lock(&dev->link_lock);
970 emac_netif_stop(dev);
971 emac_rx_disable(dev);
972 mal_disable_rx_channel(dev->mal, dev->mal_rx_chan);
973
974 if (dev->rx_sg_skb) {
975 ++dev->estats.rx_dropped_resize;
976 dev_kfree_skb(dev->rx_sg_skb);
977 dev->rx_sg_skb = NULL;
978 }
979
980 /* Make a first pass over RX ring and mark BDs ready, dropping
981 * non-processed packets on the way. We need this as a separate pass
982 * to simplify error recovery in the case of allocation failure later.
983 */
984 for (i = 0; i < NUM_RX_BUFF; ++i) {
985 if (dev->rx_desc[i].ctrl & MAL_RX_CTRL_FIRST)
986 ++dev->estats.rx_dropped_resize;
987
988 dev->rx_desc[i].data_len = 0;
989 dev->rx_desc[i].ctrl = MAL_RX_CTRL_EMPTY |
990 (i == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
991 }
992
993 /* Reallocate RX ring only if bigger skb buffers are required */
994 if (rx_skb_size <= dev->rx_skb_size)
995 goto skip;
996
997 /* Second pass, allocate new skbs */
998 for (i = 0; i < NUM_RX_BUFF; ++i) {
999 struct sk_buff *skb = alloc_skb(rx_skb_size, GFP_ATOMIC);
1000 if (!skb) {
1001 ret = -ENOMEM;
1002 goto oom;
1003 }
1004
1005 BUG_ON(!dev->rx_skb[i]);
1006 dev_kfree_skb(dev->rx_skb[i]);
1007
1008 skb_reserve(skb, EMAC_RX_SKB_HEADROOM + 2);
1009 dev->rx_desc[i].data_ptr =
1010 dma_map_single(&dev->ofdev->dev, skb->data - 2, rx_sync_size,
1011 DMA_FROM_DEVICE) + 2;
1012 dev->rx_skb[i] = skb;
1013 }
1014 skip:
1015 /* Check if we need to change "Jumbo" bit in MR1 */
1016 if ((new_mtu > ETH_DATA_LEN) ^ (dev->ndev->mtu > ETH_DATA_LEN)) {
1017 /* This is to prevent starting RX channel in emac_rx_enable() */
1018 set_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
1019
1020 dev->ndev->mtu = new_mtu;
1021 emac_full_tx_reset(dev);
1022 }
1023
1024 mal_set_rcbs(dev->mal, dev->mal_rx_chan, emac_rx_size(new_mtu));
1025 oom:
1026 /* Restart RX */
1027 clear_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
1028 dev->rx_slot = 0;
1029 mal_enable_rx_channel(dev->mal, dev->mal_rx_chan);
1030 emac_rx_enable(dev);
1031 emac_netif_start(dev);
1032 mutex_unlock(&dev->link_lock);
1033
1034 return ret;
1035}
1036
1037/* Process ctx, rtnl_lock semaphore */
1038static int emac_change_mtu(struct net_device *ndev, int new_mtu)
1039{
1040 struct emac_instance *dev = netdev_priv(ndev);
1041 int ret = 0;
1042
1043 if (new_mtu < EMAC_MIN_MTU || new_mtu > dev->max_mtu)
1044 return -EINVAL;
1045
1046 DBG(dev, "change_mtu(%d)" NL, new_mtu);
1047
1048 if (netif_running(ndev)) {
1049 /* Check if we really need to reinitialize RX ring */
1050 if (emac_rx_skb_size(ndev->mtu) != emac_rx_skb_size(new_mtu))
1051 ret = emac_resize_rx_ring(dev, new_mtu);
1052 }
1053
1054 if (!ret) {
1055 ndev->mtu = new_mtu;
1056 dev->rx_skb_size = emac_rx_skb_size(new_mtu);
1057 dev->rx_sync_size = emac_rx_sync_size(new_mtu);
1058 }
1059
1060 return ret;
1061}
1062
1063static void emac_clean_tx_ring(struct emac_instance *dev)
1064{
1065 int i;
1066
1067 for (i = 0; i < NUM_TX_BUFF; ++i) {
1068 if (dev->tx_skb[i]) {
1069 dev_kfree_skb(dev->tx_skb[i]);
1070 dev->tx_skb[i] = NULL;
1071 if (dev->tx_desc[i].ctrl & MAL_TX_CTRL_READY)
1072 ++dev->estats.tx_dropped;
1073 }
1074 dev->tx_desc[i].ctrl = 0;
1075 dev->tx_desc[i].data_ptr = 0;
1076 }
1077}
1078
1079static void emac_clean_rx_ring(struct emac_instance *dev)
1080{
1081 int i;
1082
1083 for (i = 0; i < NUM_RX_BUFF; ++i)
1084 if (dev->rx_skb[i]) {
1085 dev->rx_desc[i].ctrl = 0;
1086 dev_kfree_skb(dev->rx_skb[i]);
1087 dev->rx_skb[i] = NULL;
1088 dev->rx_desc[i].data_ptr = 0;
1089 }
1090
1091 if (dev->rx_sg_skb) {
1092 dev_kfree_skb(dev->rx_sg_skb);
1093 dev->rx_sg_skb = NULL;
1094 }
1095}
1096
1097static inline int emac_alloc_rx_skb(struct emac_instance *dev, int slot,
1098 gfp_t flags)
1099{
1100 struct sk_buff *skb = alloc_skb(dev->rx_skb_size, flags);
1101 if (unlikely(!skb))
1102 return -ENOMEM;
1103
1104 dev->rx_skb[slot] = skb;
1105 dev->rx_desc[slot].data_len = 0;
1106
1107 skb_reserve(skb, EMAC_RX_SKB_HEADROOM + 2);
1108 dev->rx_desc[slot].data_ptr =
1109 dma_map_single(&dev->ofdev->dev, skb->data - 2, dev->rx_sync_size,
1110 DMA_FROM_DEVICE) + 2;
1111 wmb();
1112 dev->rx_desc[slot].ctrl = MAL_RX_CTRL_EMPTY |
1113 (slot == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
1114
1115 return 0;
1116}
1117
1118static void emac_print_link_status(struct emac_instance *dev)
1119{
1120 if (netif_carrier_ok(dev->ndev))
1121 printk(KERN_INFO "%s: link is up, %d %s%s\n",
1122 dev->ndev->name, dev->phy.speed,
1123 dev->phy.duplex == DUPLEX_FULL ? "FDX" : "HDX",
1124 dev->phy.pause ? ", pause enabled" :
1125 dev->phy.asym_pause ? ", asymmetric pause enabled" : "");
1126 else
1127 printk(KERN_INFO "%s: link is down\n", dev->ndev->name);
1128}
1129
1130/* Process ctx, rtnl_lock semaphore */
1131static int emac_open(struct net_device *ndev)
1132{
1133 struct emac_instance *dev = netdev_priv(ndev);
1134 int err, i;
1135
1136 DBG(dev, "open" NL);
1137
1138 /* Setup error IRQ handler */
1139 err = request_irq(dev->emac_irq, emac_irq, 0, "EMAC", dev);
1140 if (err) {
1141 printk(KERN_ERR "%s: failed to request IRQ %d\n",
1142 ndev->name, dev->emac_irq);
1143 return err;
1144 }
1145
1146 /* Allocate RX ring */
1147 for (i = 0; i < NUM_RX_BUFF; ++i)
1148 if (emac_alloc_rx_skb(dev, i, GFP_KERNEL)) {
1149 printk(KERN_ERR "%s: failed to allocate RX ring\n",
1150 ndev->name);
1151 goto oom;
1152 }
1153
1154 dev->tx_cnt = dev->tx_slot = dev->ack_slot = dev->rx_slot = 0;
1155 clear_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
1156 dev->rx_sg_skb = NULL;
1157
1158 mutex_lock(&dev->link_lock);
1159 dev->opened = 1;
1160
1161 /* Start PHY polling now.
1162 */
1163 if (dev->phy.address >= 0) {
1164 int link_poll_interval;
1165 if (dev->phy.def->ops->poll_link(&dev->phy)) {
1166 dev->phy.def->ops->read_link(&dev->phy);
1167 emac_rx_clk_default(dev);
1168 netif_carrier_on(dev->ndev);
1169 link_poll_interval = PHY_POLL_LINK_ON;
1170 } else {
1171 emac_rx_clk_tx(dev);
1172 netif_carrier_off(dev->ndev);
1173 link_poll_interval = PHY_POLL_LINK_OFF;
1174 }
1175 dev->link_polling = 1;
1176 wmb();
1177 schedule_delayed_work(&dev->link_work, link_poll_interval);
1178 emac_print_link_status(dev);
1179 } else
1180 netif_carrier_on(dev->ndev);
1181
1182 /* Required for Pause packet support in EMAC */
1183 dev_mc_add_global(ndev, default_mcast_addr);
1184
1185 emac_configure(dev);
1186 mal_poll_add(dev->mal, &dev->commac);
1187 mal_enable_tx_channel(dev->mal, dev->mal_tx_chan);
1188 mal_set_rcbs(dev->mal, dev->mal_rx_chan, emac_rx_size(ndev->mtu));
1189 mal_enable_rx_channel(dev->mal, dev->mal_rx_chan);
1190 emac_tx_enable(dev);
1191 emac_rx_enable(dev);
1192 emac_netif_start(dev);
1193
1194 mutex_unlock(&dev->link_lock);
1195
1196 return 0;
1197 oom:
1198 emac_clean_rx_ring(dev);
1199 free_irq(dev->emac_irq, dev);
1200
1201 return -ENOMEM;
1202}
1203
1204/* BHs disabled */
1205#if 0
1206static int emac_link_differs(struct emac_instance *dev)
1207{
1208 u32 r = in_be32(&dev->emacp->mr1);
1209
1210 int duplex = r & EMAC_MR1_FDE ? DUPLEX_FULL : DUPLEX_HALF;
1211 int speed, pause, asym_pause;
1212
1213 if (r & EMAC_MR1_MF_1000)
1214 speed = SPEED_1000;
1215 else if (r & EMAC_MR1_MF_100)
1216 speed = SPEED_100;
1217 else
1218 speed = SPEED_10;
1219
1220 switch (r & (EMAC_MR1_EIFC | EMAC_MR1_APP)) {
1221 case (EMAC_MR1_EIFC | EMAC_MR1_APP):
1222 pause = 1;
1223 asym_pause = 0;
1224 break;
1225 case EMAC_MR1_APP:
1226 pause = 0;
1227 asym_pause = 1;
1228 break;
1229 default:
1230 pause = asym_pause = 0;
1231 }
1232 return speed != dev->phy.speed || duplex != dev->phy.duplex ||
1233 pause != dev->phy.pause || asym_pause != dev->phy.asym_pause;
1234}
1235#endif
1236
1237static void emac_link_timer(struct work_struct *work)
1238{
1239 struct emac_instance *dev =
1240 container_of(to_delayed_work(work),
1241 struct emac_instance, link_work);
1242 int link_poll_interval;
1243
1244 mutex_lock(&dev->link_lock);
1245 DBG2(dev, "link timer" NL);
1246
1247 if (!dev->opened)
1248 goto bail;
1249
1250 if (dev->phy.def->ops->poll_link(&dev->phy)) {
1251 if (!netif_carrier_ok(dev->ndev)) {
1252 emac_rx_clk_default(dev);
1253 /* Get new link parameters */
1254 dev->phy.def->ops->read_link(&dev->phy);
1255
1256 netif_carrier_on(dev->ndev);
1257 emac_netif_stop(dev);
1258 emac_full_tx_reset(dev);
1259 emac_netif_start(dev);
1260 emac_print_link_status(dev);
1261 }
1262 link_poll_interval = PHY_POLL_LINK_ON;
1263 } else {
1264 if (netif_carrier_ok(dev->ndev)) {
1265 emac_rx_clk_tx(dev);
1266 netif_carrier_off(dev->ndev);
1267 netif_tx_disable(dev->ndev);
1268 emac_reinitialize(dev);
1269 emac_print_link_status(dev);
1270 }
1271 link_poll_interval = PHY_POLL_LINK_OFF;
1272 }
1273 schedule_delayed_work(&dev->link_work, link_poll_interval);
1274 bail:
1275 mutex_unlock(&dev->link_lock);
1276}
1277
1278static void emac_force_link_update(struct emac_instance *dev)
1279{
1280 netif_carrier_off(dev->ndev);
1281 smp_rmb();
1282 if (dev->link_polling) {
1283 cancel_delayed_work_sync(&dev->link_work);
1284 if (dev->link_polling)
1285 schedule_delayed_work(&dev->link_work, PHY_POLL_LINK_OFF);
1286 }
1287}
1288
1289/* Process ctx, rtnl_lock semaphore */
1290static int emac_close(struct net_device *ndev)
1291{
1292 struct emac_instance *dev = netdev_priv(ndev);
1293
1294 DBG(dev, "close" NL);
1295
1296 if (dev->phy.address >= 0) {
1297 dev->link_polling = 0;
1298 cancel_delayed_work_sync(&dev->link_work);
1299 }
1300 mutex_lock(&dev->link_lock);
1301 emac_netif_stop(dev);
1302 dev->opened = 0;
1303 mutex_unlock(&dev->link_lock);
1304
1305 emac_rx_disable(dev);
1306 emac_tx_disable(dev);
1307 mal_disable_rx_channel(dev->mal, dev->mal_rx_chan);
1308 mal_disable_tx_channel(dev->mal, dev->mal_tx_chan);
1309 mal_poll_del(dev->mal, &dev->commac);
1310
1311 emac_clean_tx_ring(dev);
1312 emac_clean_rx_ring(dev);
1313
1314 free_irq(dev->emac_irq, dev);
1315
1316 netif_carrier_off(ndev);
1317
1318 return 0;
1319}
1320
1321static inline u16 emac_tx_csum(struct emac_instance *dev,
1322 struct sk_buff *skb)
1323{
1324 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH) &&
1325 (skb->ip_summed == CHECKSUM_PARTIAL)) {
1326 ++dev->stats.tx_packets_csum;
1327 return EMAC_TX_CTRL_TAH_CSUM;
1328 }
1329 return 0;
1330}
1331
1332static inline int emac_xmit_finish(struct emac_instance *dev, int len)
1333{
1334 struct emac_regs __iomem *p = dev->emacp;
1335 struct net_device *ndev = dev->ndev;
1336
1337 /* Send the packet out. If the if makes a significant perf
1338 * difference, then we can store the TMR0 value in "dev"
1339 * instead
1340 */
1341 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
1342 out_be32(&p->tmr0, EMAC4_TMR0_XMIT);
1343 else
1344 out_be32(&p->tmr0, EMAC_TMR0_XMIT);
1345
1346 if (unlikely(++dev->tx_cnt == NUM_TX_BUFF)) {
1347 netif_stop_queue(ndev);
1348 DBG2(dev, "stopped TX queue" NL);
1349 }
1350
1351 ndev->trans_start = jiffies;
1352 ++dev->stats.tx_packets;
1353 dev->stats.tx_bytes += len;
1354
1355 return NETDEV_TX_OK;
1356}
1357
1358/* Tx lock BH */
1359static int emac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1360{
1361 struct emac_instance *dev = netdev_priv(ndev);
1362 unsigned int len = skb->len;
1363 int slot;
1364
1365 u16 ctrl = EMAC_TX_CTRL_GFCS | EMAC_TX_CTRL_GP | MAL_TX_CTRL_READY |
1366 MAL_TX_CTRL_LAST | emac_tx_csum(dev, skb);
1367
1368 slot = dev->tx_slot++;
1369 if (dev->tx_slot == NUM_TX_BUFF) {
1370 dev->tx_slot = 0;
1371 ctrl |= MAL_TX_CTRL_WRAP;
1372 }
1373
1374 DBG2(dev, "xmit(%u) %d" NL, len, slot);
1375
1376 dev->tx_skb[slot] = skb;
1377 dev->tx_desc[slot].data_ptr = dma_map_single(&dev->ofdev->dev,
1378 skb->data, len,
1379 DMA_TO_DEVICE);
1380 dev->tx_desc[slot].data_len = (u16) len;
1381 wmb();
1382 dev->tx_desc[slot].ctrl = ctrl;
1383
1384 return emac_xmit_finish(dev, len);
1385}
1386
1387static inline int emac_xmit_split(struct emac_instance *dev, int slot,
1388 u32 pd, int len, int last, u16 base_ctrl)
1389{
1390 while (1) {
1391 u16 ctrl = base_ctrl;
1392 int chunk = min(len, MAL_MAX_TX_SIZE);
1393 len -= chunk;
1394
1395 slot = (slot + 1) % NUM_TX_BUFF;
1396
1397 if (last && !len)
1398 ctrl |= MAL_TX_CTRL_LAST;
1399 if (slot == NUM_TX_BUFF - 1)
1400 ctrl |= MAL_TX_CTRL_WRAP;
1401
1402 dev->tx_skb[slot] = NULL;
1403 dev->tx_desc[slot].data_ptr = pd;
1404 dev->tx_desc[slot].data_len = (u16) chunk;
1405 dev->tx_desc[slot].ctrl = ctrl;
1406 ++dev->tx_cnt;
1407
1408 if (!len)
1409 break;
1410
1411 pd += chunk;
1412 }
1413 return slot;
1414}
1415
1416/* Tx lock BH disabled (SG version for TAH equipped EMACs) */
1417static int emac_start_xmit_sg(struct sk_buff *skb, struct net_device *ndev)
1418{
1419 struct emac_instance *dev = netdev_priv(ndev);
1420 int nr_frags = skb_shinfo(skb)->nr_frags;
1421 int len = skb->len, chunk;
1422 int slot, i;
1423 u16 ctrl;
1424 u32 pd;
1425
1426 /* This is common "fast" path */
1427 if (likely(!nr_frags && len <= MAL_MAX_TX_SIZE))
1428 return emac_start_xmit(skb, ndev);
1429
1430 len -= skb->data_len;
1431
1432 /* Note, this is only an *estimation*, we can still run out of empty
1433 * slots because of the additional fragmentation into
1434 * MAL_MAX_TX_SIZE-sized chunks
1435 */
1436 if (unlikely(dev->tx_cnt + nr_frags + mal_tx_chunks(len) > NUM_TX_BUFF))
1437 goto stop_queue;
1438
1439 ctrl = EMAC_TX_CTRL_GFCS | EMAC_TX_CTRL_GP | MAL_TX_CTRL_READY |
1440 emac_tx_csum(dev, skb);
1441 slot = dev->tx_slot;
1442
1443 /* skb data */
1444 dev->tx_skb[slot] = NULL;
1445 chunk = min(len, MAL_MAX_TX_SIZE);
1446 dev->tx_desc[slot].data_ptr = pd =
1447 dma_map_single(&dev->ofdev->dev, skb->data, len, DMA_TO_DEVICE);
1448 dev->tx_desc[slot].data_len = (u16) chunk;
1449 len -= chunk;
1450 if (unlikely(len))
1451 slot = emac_xmit_split(dev, slot, pd + chunk, len, !nr_frags,
1452 ctrl);
1453 /* skb fragments */
1454 for (i = 0; i < nr_frags; ++i) {
1455 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
1456 len = frag->size;
1457
1458 if (unlikely(dev->tx_cnt + mal_tx_chunks(len) >= NUM_TX_BUFF))
1459 goto undo_frame;
1460
1461 pd = dma_map_page(&dev->ofdev->dev, frag->page, frag->page_offset, len,
1462 DMA_TO_DEVICE);
1463
1464 slot = emac_xmit_split(dev, slot, pd, len, i == nr_frags - 1,
1465 ctrl);
1466 }
1467
1468 DBG2(dev, "xmit_sg(%u) %d - %d" NL, skb->len, dev->tx_slot, slot);
1469
1470 /* Attach skb to the last slot so we don't release it too early */
1471 dev->tx_skb[slot] = skb;
1472
1473 /* Send the packet out */
1474 if (dev->tx_slot == NUM_TX_BUFF - 1)
1475 ctrl |= MAL_TX_CTRL_WRAP;
1476 wmb();
1477 dev->tx_desc[dev->tx_slot].ctrl = ctrl;
1478 dev->tx_slot = (slot + 1) % NUM_TX_BUFF;
1479
1480 return emac_xmit_finish(dev, skb->len);
1481
1482 undo_frame:
1483 /* Well, too bad. Our previous estimation was overly optimistic.
1484 * Undo everything.
1485 */
1486 while (slot != dev->tx_slot) {
1487 dev->tx_desc[slot].ctrl = 0;
1488 --dev->tx_cnt;
1489 if (--slot < 0)
1490 slot = NUM_TX_BUFF - 1;
1491 }
1492 ++dev->estats.tx_undo;
1493
1494 stop_queue:
1495 netif_stop_queue(ndev);
1496 DBG2(dev, "stopped TX queue" NL);
1497 return NETDEV_TX_BUSY;
1498}
1499
1500/* Tx lock BHs */
1501static void emac_parse_tx_error(struct emac_instance *dev, u16 ctrl)
1502{
1503 struct emac_error_stats *st = &dev->estats;
1504
1505 DBG(dev, "BD TX error %04x" NL, ctrl);
1506
1507 ++st->tx_bd_errors;
1508 if (ctrl & EMAC_TX_ST_BFCS)
1509 ++st->tx_bd_bad_fcs;
1510 if (ctrl & EMAC_TX_ST_LCS)
1511 ++st->tx_bd_carrier_loss;
1512 if (ctrl & EMAC_TX_ST_ED)
1513 ++st->tx_bd_excessive_deferral;
1514 if (ctrl & EMAC_TX_ST_EC)
1515 ++st->tx_bd_excessive_collisions;
1516 if (ctrl & EMAC_TX_ST_LC)
1517 ++st->tx_bd_late_collision;
1518 if (ctrl & EMAC_TX_ST_MC)
1519 ++st->tx_bd_multple_collisions;
1520 if (ctrl & EMAC_TX_ST_SC)
1521 ++st->tx_bd_single_collision;
1522 if (ctrl & EMAC_TX_ST_UR)
1523 ++st->tx_bd_underrun;
1524 if (ctrl & EMAC_TX_ST_SQE)
1525 ++st->tx_bd_sqe;
1526}
1527
1528static void emac_poll_tx(void *param)
1529{
1530 struct emac_instance *dev = param;
1531 u32 bad_mask;
1532
1533 DBG2(dev, "poll_tx, %d %d" NL, dev->tx_cnt, dev->ack_slot);
1534
1535 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
1536 bad_mask = EMAC_IS_BAD_TX_TAH;
1537 else
1538 bad_mask = EMAC_IS_BAD_TX;
1539
1540 netif_tx_lock_bh(dev->ndev);
1541 if (dev->tx_cnt) {
1542 u16 ctrl;
1543 int slot = dev->ack_slot, n = 0;
1544 again:
1545 ctrl = dev->tx_desc[slot].ctrl;
1546 if (!(ctrl & MAL_TX_CTRL_READY)) {
1547 struct sk_buff *skb = dev->tx_skb[slot];
1548 ++n;
1549
1550 if (skb) {
1551 dev_kfree_skb(skb);
1552 dev->tx_skb[slot] = NULL;
1553 }
1554 slot = (slot + 1) % NUM_TX_BUFF;
1555
1556 if (unlikely(ctrl & bad_mask))
1557 emac_parse_tx_error(dev, ctrl);
1558
1559 if (--dev->tx_cnt)
1560 goto again;
1561 }
1562 if (n) {
1563 dev->ack_slot = slot;
1564 if (netif_queue_stopped(dev->ndev) &&
1565 dev->tx_cnt < EMAC_TX_WAKEUP_THRESH)
1566 netif_wake_queue(dev->ndev);
1567
1568 DBG2(dev, "tx %d pkts" NL, n);
1569 }
1570 }
1571 netif_tx_unlock_bh(dev->ndev);
1572}
1573
1574static inline void emac_recycle_rx_skb(struct emac_instance *dev, int slot,
1575 int len)
1576{
1577 struct sk_buff *skb = dev->rx_skb[slot];
1578
1579 DBG2(dev, "recycle %d %d" NL, slot, len);
1580
1581 if (len)
1582 dma_map_single(&dev->ofdev->dev, skb->data - 2,
1583 EMAC_DMA_ALIGN(len + 2), DMA_FROM_DEVICE);
1584
1585 dev->rx_desc[slot].data_len = 0;
1586 wmb();
1587 dev->rx_desc[slot].ctrl = MAL_RX_CTRL_EMPTY |
1588 (slot == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
1589}
1590
1591static void emac_parse_rx_error(struct emac_instance *dev, u16 ctrl)
1592{
1593 struct emac_error_stats *st = &dev->estats;
1594
1595 DBG(dev, "BD RX error %04x" NL, ctrl);
1596
1597 ++st->rx_bd_errors;
1598 if (ctrl & EMAC_RX_ST_OE)
1599 ++st->rx_bd_overrun;
1600 if (ctrl & EMAC_RX_ST_BP)
1601 ++st->rx_bd_bad_packet;
1602 if (ctrl & EMAC_RX_ST_RP)
1603 ++st->rx_bd_runt_packet;
1604 if (ctrl & EMAC_RX_ST_SE)
1605 ++st->rx_bd_short_event;
1606 if (ctrl & EMAC_RX_ST_AE)
1607 ++st->rx_bd_alignment_error;
1608 if (ctrl & EMAC_RX_ST_BFCS)
1609 ++st->rx_bd_bad_fcs;
1610 if (ctrl & EMAC_RX_ST_PTL)
1611 ++st->rx_bd_packet_too_long;
1612 if (ctrl & EMAC_RX_ST_ORE)
1613 ++st->rx_bd_out_of_range;
1614 if (ctrl & EMAC_RX_ST_IRE)
1615 ++st->rx_bd_in_range;
1616}
1617
1618static inline void emac_rx_csum(struct emac_instance *dev,
1619 struct sk_buff *skb, u16 ctrl)
1620{
1621#ifdef CONFIG_IBM_NEW_EMAC_TAH
1622 if (!ctrl && dev->tah_dev) {
1623 skb->ip_summed = CHECKSUM_UNNECESSARY;
1624 ++dev->stats.rx_packets_csum;
1625 }
1626#endif
1627}
1628
1629static inline int emac_rx_sg_append(struct emac_instance *dev, int slot)
1630{
1631 if (likely(dev->rx_sg_skb != NULL)) {
1632 int len = dev->rx_desc[slot].data_len;
1633 int tot_len = dev->rx_sg_skb->len + len;
1634
1635 if (unlikely(tot_len + 2 > dev->rx_skb_size)) {
1636 ++dev->estats.rx_dropped_mtu;
1637 dev_kfree_skb(dev->rx_sg_skb);
1638 dev->rx_sg_skb = NULL;
1639 } else {
1640 cacheable_memcpy(skb_tail_pointer(dev->rx_sg_skb),
1641 dev->rx_skb[slot]->data, len);
1642 skb_put(dev->rx_sg_skb, len);
1643 emac_recycle_rx_skb(dev, slot, len);
1644 return 0;
1645 }
1646 }
1647 emac_recycle_rx_skb(dev, slot, 0);
1648 return -1;
1649}
1650
1651/* NAPI poll context */
1652static int emac_poll_rx(void *param, int budget)
1653{
1654 struct emac_instance *dev = param;
1655 int slot = dev->rx_slot, received = 0;
1656
1657 DBG2(dev, "poll_rx(%d)" NL, budget);
1658
1659 again:
1660 while (budget > 0) {
1661 int len;
1662 struct sk_buff *skb;
1663 u16 ctrl = dev->rx_desc[slot].ctrl;
1664
1665 if (ctrl & MAL_RX_CTRL_EMPTY)
1666 break;
1667
1668 skb = dev->rx_skb[slot];
1669 mb();
1670 len = dev->rx_desc[slot].data_len;
1671
1672 if (unlikely(!MAL_IS_SINGLE_RX(ctrl)))
1673 goto sg;
1674
1675 ctrl &= EMAC_BAD_RX_MASK;
1676 if (unlikely(ctrl && ctrl != EMAC_RX_TAH_BAD_CSUM)) {
1677 emac_parse_rx_error(dev, ctrl);
1678 ++dev->estats.rx_dropped_error;
1679 emac_recycle_rx_skb(dev, slot, 0);
1680 len = 0;
1681 goto next;
1682 }
1683
1684 if (len < ETH_HLEN) {
1685 ++dev->estats.rx_dropped_stack;
1686 emac_recycle_rx_skb(dev, slot, len);
1687 goto next;
1688 }
1689
1690 if (len && len < EMAC_RX_COPY_THRESH) {
1691 struct sk_buff *copy_skb =
1692 alloc_skb(len + EMAC_RX_SKB_HEADROOM + 2, GFP_ATOMIC);
1693 if (unlikely(!copy_skb))
1694 goto oom;
1695
1696 skb_reserve(copy_skb, EMAC_RX_SKB_HEADROOM + 2);
1697 cacheable_memcpy(copy_skb->data - 2, skb->data - 2,
1698 len + 2);
1699 emac_recycle_rx_skb(dev, slot, len);
1700 skb = copy_skb;
1701 } else if (unlikely(emac_alloc_rx_skb(dev, slot, GFP_ATOMIC)))
1702 goto oom;
1703
1704 skb_put(skb, len);
1705 push_packet:
1706 skb->protocol = eth_type_trans(skb, dev->ndev);
1707 emac_rx_csum(dev, skb, ctrl);
1708
1709 if (unlikely(netif_receive_skb(skb) == NET_RX_DROP))
1710 ++dev->estats.rx_dropped_stack;
1711 next:
1712 ++dev->stats.rx_packets;
1713 skip:
1714 dev->stats.rx_bytes += len;
1715 slot = (slot + 1) % NUM_RX_BUFF;
1716 --budget;
1717 ++received;
1718 continue;
1719 sg:
1720 if (ctrl & MAL_RX_CTRL_FIRST) {
1721 BUG_ON(dev->rx_sg_skb);
1722 if (unlikely(emac_alloc_rx_skb(dev, slot, GFP_ATOMIC))) {
1723 DBG(dev, "rx OOM %d" NL, slot);
1724 ++dev->estats.rx_dropped_oom;
1725 emac_recycle_rx_skb(dev, slot, 0);
1726 } else {
1727 dev->rx_sg_skb = skb;
1728 skb_put(skb, len);
1729 }
1730 } else if (!emac_rx_sg_append(dev, slot) &&
1731 (ctrl & MAL_RX_CTRL_LAST)) {
1732
1733 skb = dev->rx_sg_skb;
1734 dev->rx_sg_skb = NULL;
1735
1736 ctrl &= EMAC_BAD_RX_MASK;
1737 if (unlikely(ctrl && ctrl != EMAC_RX_TAH_BAD_CSUM)) {
1738 emac_parse_rx_error(dev, ctrl);
1739 ++dev->estats.rx_dropped_error;
1740 dev_kfree_skb(skb);
1741 len = 0;
1742 } else
1743 goto push_packet;
1744 }
1745 goto skip;
1746 oom:
1747 DBG(dev, "rx OOM %d" NL, slot);
1748 /* Drop the packet and recycle skb */
1749 ++dev->estats.rx_dropped_oom;
1750 emac_recycle_rx_skb(dev, slot, 0);
1751 goto next;
1752 }
1753
1754 if (received) {
1755 DBG2(dev, "rx %d BDs" NL, received);
1756 dev->rx_slot = slot;
1757 }
1758
1759 if (unlikely(budget && test_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags))) {
1760 mb();
1761 if (!(dev->rx_desc[slot].ctrl & MAL_RX_CTRL_EMPTY)) {
1762 DBG2(dev, "rx restart" NL);
1763 received = 0;
1764 goto again;
1765 }
1766
1767 if (dev->rx_sg_skb) {
1768 DBG2(dev, "dropping partial rx packet" NL);
1769 ++dev->estats.rx_dropped_error;
1770 dev_kfree_skb(dev->rx_sg_skb);
1771 dev->rx_sg_skb = NULL;
1772 }
1773
1774 clear_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
1775 mal_enable_rx_channel(dev->mal, dev->mal_rx_chan);
1776 emac_rx_enable(dev);
1777 dev->rx_slot = 0;
1778 }
1779 return received;
1780}
1781
1782/* NAPI poll context */
1783static int emac_peek_rx(void *param)
1784{
1785 struct emac_instance *dev = param;
1786
1787 return !(dev->rx_desc[dev->rx_slot].ctrl & MAL_RX_CTRL_EMPTY);
1788}
1789
1790/* NAPI poll context */
1791static int emac_peek_rx_sg(void *param)
1792{
1793 struct emac_instance *dev = param;
1794
1795 int slot = dev->rx_slot;
1796 while (1) {
1797 u16 ctrl = dev->rx_desc[slot].ctrl;
1798 if (ctrl & MAL_RX_CTRL_EMPTY)
1799 return 0;
1800 else if (ctrl & MAL_RX_CTRL_LAST)
1801 return 1;
1802
1803 slot = (slot + 1) % NUM_RX_BUFF;
1804
1805 /* I'm just being paranoid here :) */
1806 if (unlikely(slot == dev->rx_slot))
1807 return 0;
1808 }
1809}
1810
1811/* Hard IRQ */
1812static void emac_rxde(void *param)
1813{
1814 struct emac_instance *dev = param;
1815
1816 ++dev->estats.rx_stopped;
1817 emac_rx_disable_async(dev);
1818}
1819
1820/* Hard IRQ */
1821static irqreturn_t emac_irq(int irq, void *dev_instance)
1822{
1823 struct emac_instance *dev = dev_instance;
1824 struct emac_regs __iomem *p = dev->emacp;
1825 struct emac_error_stats *st = &dev->estats;
1826 u32 isr;
1827
1828 spin_lock(&dev->lock);
1829
1830 isr = in_be32(&p->isr);
1831 out_be32(&p->isr, isr);
1832
1833 DBG(dev, "isr = %08x" NL, isr);
1834
1835 if (isr & EMAC4_ISR_TXPE)
1836 ++st->tx_parity;
1837 if (isr & EMAC4_ISR_RXPE)
1838 ++st->rx_parity;
1839 if (isr & EMAC4_ISR_TXUE)
1840 ++st->tx_underrun;
1841 if (isr & EMAC4_ISR_RXOE)
1842 ++st->rx_fifo_overrun;
1843 if (isr & EMAC_ISR_OVR)
1844 ++st->rx_overrun;
1845 if (isr & EMAC_ISR_BP)
1846 ++st->rx_bad_packet;
1847 if (isr & EMAC_ISR_RP)
1848 ++st->rx_runt_packet;
1849 if (isr & EMAC_ISR_SE)
1850 ++st->rx_short_event;
1851 if (isr & EMAC_ISR_ALE)
1852 ++st->rx_alignment_error;
1853 if (isr & EMAC_ISR_BFCS)
1854 ++st->rx_bad_fcs;
1855 if (isr & EMAC_ISR_PTLE)
1856 ++st->rx_packet_too_long;
1857 if (isr & EMAC_ISR_ORE)
1858 ++st->rx_out_of_range;
1859 if (isr & EMAC_ISR_IRE)
1860 ++st->rx_in_range;
1861 if (isr & EMAC_ISR_SQE)
1862 ++st->tx_sqe;
1863 if (isr & EMAC_ISR_TE)
1864 ++st->tx_errors;
1865
1866 spin_unlock(&dev->lock);
1867
1868 return IRQ_HANDLED;
1869}
1870
1871static struct net_device_stats *emac_stats(struct net_device *ndev)
1872{
1873 struct emac_instance *dev = netdev_priv(ndev);
1874 struct emac_stats *st = &dev->stats;
1875 struct emac_error_stats *est = &dev->estats;
1876 struct net_device_stats *nst = &dev->nstats;
1877 unsigned long flags;
1878
1879 DBG2(dev, "stats" NL);
1880
1881 /* Compute "legacy" statistics */
1882 spin_lock_irqsave(&dev->lock, flags);
1883 nst->rx_packets = (unsigned long)st->rx_packets;
1884 nst->rx_bytes = (unsigned long)st->rx_bytes;
1885 nst->tx_packets = (unsigned long)st->tx_packets;
1886 nst->tx_bytes = (unsigned long)st->tx_bytes;
1887 nst->rx_dropped = (unsigned long)(est->rx_dropped_oom +
1888 est->rx_dropped_error +
1889 est->rx_dropped_resize +
1890 est->rx_dropped_mtu);
1891 nst->tx_dropped = (unsigned long)est->tx_dropped;
1892
1893 nst->rx_errors = (unsigned long)est->rx_bd_errors;
1894 nst->rx_fifo_errors = (unsigned long)(est->rx_bd_overrun +
1895 est->rx_fifo_overrun +
1896 est->rx_overrun);
1897 nst->rx_frame_errors = (unsigned long)(est->rx_bd_alignment_error +
1898 est->rx_alignment_error);
1899 nst->rx_crc_errors = (unsigned long)(est->rx_bd_bad_fcs +
1900 est->rx_bad_fcs);
1901 nst->rx_length_errors = (unsigned long)(est->rx_bd_runt_packet +
1902 est->rx_bd_short_event +
1903 est->rx_bd_packet_too_long +
1904 est->rx_bd_out_of_range +
1905 est->rx_bd_in_range +
1906 est->rx_runt_packet +
1907 est->rx_short_event +
1908 est->rx_packet_too_long +
1909 est->rx_out_of_range +
1910 est->rx_in_range);
1911
1912 nst->tx_errors = (unsigned long)(est->tx_bd_errors + est->tx_errors);
1913 nst->tx_fifo_errors = (unsigned long)(est->tx_bd_underrun +
1914 est->tx_underrun);
1915 nst->tx_carrier_errors = (unsigned long)est->tx_bd_carrier_loss;
1916 nst->collisions = (unsigned long)(est->tx_bd_excessive_deferral +
1917 est->tx_bd_excessive_collisions +
1918 est->tx_bd_late_collision +
1919 est->tx_bd_multple_collisions);
1920 spin_unlock_irqrestore(&dev->lock, flags);
1921 return nst;
1922}
1923
1924static struct mal_commac_ops emac_commac_ops = {
1925 .poll_tx = &emac_poll_tx,
1926 .poll_rx = &emac_poll_rx,
1927 .peek_rx = &emac_peek_rx,
1928 .rxde = &emac_rxde,
1929};
1930
1931static struct mal_commac_ops emac_commac_sg_ops = {
1932 .poll_tx = &emac_poll_tx,
1933 .poll_rx = &emac_poll_rx,
1934 .peek_rx = &emac_peek_rx_sg,
1935 .rxde = &emac_rxde,
1936};
1937
1938/* Ethtool support */
1939static int emac_ethtool_get_settings(struct net_device *ndev,
1940 struct ethtool_cmd *cmd)
1941{
1942 struct emac_instance *dev = netdev_priv(ndev);
1943
1944 cmd->supported = dev->phy.features;
1945 cmd->port = PORT_MII;
1946 cmd->phy_address = dev->phy.address;
1947 cmd->transceiver =
1948 dev->phy.address >= 0 ? XCVR_EXTERNAL : XCVR_INTERNAL;
1949
1950 mutex_lock(&dev->link_lock);
1951 cmd->advertising = dev->phy.advertising;
1952 cmd->autoneg = dev->phy.autoneg;
1953 cmd->speed = dev->phy.speed;
1954 cmd->duplex = dev->phy.duplex;
1955 mutex_unlock(&dev->link_lock);
1956
1957 return 0;
1958}
1959
1960static int emac_ethtool_set_settings(struct net_device *ndev,
1961 struct ethtool_cmd *cmd)
1962{
1963 struct emac_instance *dev = netdev_priv(ndev);
1964 u32 f = dev->phy.features;
1965
1966 DBG(dev, "set_settings(%d, %d, %d, 0x%08x)" NL,
1967 cmd->autoneg, cmd->speed, cmd->duplex, cmd->advertising);
1968
1969 /* Basic sanity checks */
1970 if (dev->phy.address < 0)
1971 return -EOPNOTSUPP;
1972 if (cmd->autoneg != AUTONEG_ENABLE && cmd->autoneg != AUTONEG_DISABLE)
1973 return -EINVAL;
1974 if (cmd->autoneg == AUTONEG_ENABLE && cmd->advertising == 0)
1975 return -EINVAL;
1976 if (cmd->duplex != DUPLEX_HALF && cmd->duplex != DUPLEX_FULL)
1977 return -EINVAL;
1978
1979 if (cmd->autoneg == AUTONEG_DISABLE) {
1980 switch (cmd->speed) {
1981 case SPEED_10:
1982 if (cmd->duplex == DUPLEX_HALF &&
1983 !(f & SUPPORTED_10baseT_Half))
1984 return -EINVAL;
1985 if (cmd->duplex == DUPLEX_FULL &&
1986 !(f & SUPPORTED_10baseT_Full))
1987 return -EINVAL;
1988 break;
1989 case SPEED_100:
1990 if (cmd->duplex == DUPLEX_HALF &&
1991 !(f & SUPPORTED_100baseT_Half))
1992 return -EINVAL;
1993 if (cmd->duplex == DUPLEX_FULL &&
1994 !(f & SUPPORTED_100baseT_Full))
1995 return -EINVAL;
1996 break;
1997 case SPEED_1000:
1998 if (cmd->duplex == DUPLEX_HALF &&
1999 !(f & SUPPORTED_1000baseT_Half))
2000 return -EINVAL;
2001 if (cmd->duplex == DUPLEX_FULL &&
2002 !(f & SUPPORTED_1000baseT_Full))
2003 return -EINVAL;
2004 break;
2005 default:
2006 return -EINVAL;
2007 }
2008
2009 mutex_lock(&dev->link_lock);
2010 dev->phy.def->ops->setup_forced(&dev->phy, cmd->speed,
2011 cmd->duplex);
2012 mutex_unlock(&dev->link_lock);
2013
2014 } else {
2015 if (!(f & SUPPORTED_Autoneg))
2016 return -EINVAL;
2017
2018 mutex_lock(&dev->link_lock);
2019 dev->phy.def->ops->setup_aneg(&dev->phy,
2020 (cmd->advertising & f) |
2021 (dev->phy.advertising &
2022 (ADVERTISED_Pause |
2023 ADVERTISED_Asym_Pause)));
2024 mutex_unlock(&dev->link_lock);
2025 }
2026 emac_force_link_update(dev);
2027
2028 return 0;
2029}
2030
2031static void emac_ethtool_get_ringparam(struct net_device *ndev,
2032 struct ethtool_ringparam *rp)
2033{
2034 rp->rx_max_pending = rp->rx_pending = NUM_RX_BUFF;
2035 rp->tx_max_pending = rp->tx_pending = NUM_TX_BUFF;
2036}
2037
2038static void emac_ethtool_get_pauseparam(struct net_device *ndev,
2039 struct ethtool_pauseparam *pp)
2040{
2041 struct emac_instance *dev = netdev_priv(ndev);
2042
2043 mutex_lock(&dev->link_lock);
2044 if ((dev->phy.features & SUPPORTED_Autoneg) &&
2045 (dev->phy.advertising & (ADVERTISED_Pause | ADVERTISED_Asym_Pause)))
2046 pp->autoneg = 1;
2047
2048 if (dev->phy.duplex == DUPLEX_FULL) {
2049 if (dev->phy.pause)
2050 pp->rx_pause = pp->tx_pause = 1;
2051 else if (dev->phy.asym_pause)
2052 pp->tx_pause = 1;
2053 }
2054 mutex_unlock(&dev->link_lock);
2055}
2056
2057static int emac_get_regs_len(struct emac_instance *dev)
2058{
2059 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
2060 return sizeof(struct emac_ethtool_regs_subhdr) +
2061 EMAC4_ETHTOOL_REGS_SIZE(dev);
2062 else
2063 return sizeof(struct emac_ethtool_regs_subhdr) +
2064 EMAC_ETHTOOL_REGS_SIZE(dev);
2065}
2066
2067static int emac_ethtool_get_regs_len(struct net_device *ndev)
2068{
2069 struct emac_instance *dev = netdev_priv(ndev);
2070 int size;
2071
2072 size = sizeof(struct emac_ethtool_regs_hdr) +
2073 emac_get_regs_len(dev) + mal_get_regs_len(dev->mal);
2074 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
2075 size += zmii_get_regs_len(dev->zmii_dev);
2076 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
2077 size += rgmii_get_regs_len(dev->rgmii_dev);
2078 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
2079 size += tah_get_regs_len(dev->tah_dev);
2080
2081 return size;
2082}
2083
2084static void *emac_dump_regs(struct emac_instance *dev, void *buf)
2085{
2086 struct emac_ethtool_regs_subhdr *hdr = buf;
2087
2088 hdr->index = dev->cell_index;
2089 if (emac_has_feature(dev, EMAC_FTR_EMAC4)) {
2090 hdr->version = EMAC4_ETHTOOL_REGS_VER;
2091 memcpy_fromio(hdr + 1, dev->emacp, EMAC4_ETHTOOL_REGS_SIZE(dev));
2092 return (void *)(hdr + 1) + EMAC4_ETHTOOL_REGS_SIZE(dev);
2093 } else {
2094 hdr->version = EMAC_ETHTOOL_REGS_VER;
2095 memcpy_fromio(hdr + 1, dev->emacp, EMAC_ETHTOOL_REGS_SIZE(dev));
2096 return (void *)(hdr + 1) + EMAC_ETHTOOL_REGS_SIZE(dev);
2097 }
2098}
2099
2100static void emac_ethtool_get_regs(struct net_device *ndev,
2101 struct ethtool_regs *regs, void *buf)
2102{
2103 struct emac_instance *dev = netdev_priv(ndev);
2104 struct emac_ethtool_regs_hdr *hdr = buf;
2105
2106 hdr->components = 0;
2107 buf = hdr + 1;
2108
2109 buf = mal_dump_regs(dev->mal, buf);
2110 buf = emac_dump_regs(dev, buf);
2111 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII)) {
2112 hdr->components |= EMAC_ETHTOOL_REGS_ZMII;
2113 buf = zmii_dump_regs(dev->zmii_dev, buf);
2114 }
2115 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII)) {
2116 hdr->components |= EMAC_ETHTOOL_REGS_RGMII;
2117 buf = rgmii_dump_regs(dev->rgmii_dev, buf);
2118 }
2119 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH)) {
2120 hdr->components |= EMAC_ETHTOOL_REGS_TAH;
2121 buf = tah_dump_regs(dev->tah_dev, buf);
2122 }
2123}
2124
2125static int emac_ethtool_nway_reset(struct net_device *ndev)
2126{
2127 struct emac_instance *dev = netdev_priv(ndev);
2128 int res = 0;
2129
2130 DBG(dev, "nway_reset" NL);
2131
2132 if (dev->phy.address < 0)
2133 return -EOPNOTSUPP;
2134
2135 mutex_lock(&dev->link_lock);
2136 if (!dev->phy.autoneg) {
2137 res = -EINVAL;
2138 goto out;
2139 }
2140
2141 dev->phy.def->ops->setup_aneg(&dev->phy, dev->phy.advertising);
2142 out:
2143 mutex_unlock(&dev->link_lock);
2144 emac_force_link_update(dev);
2145 return res;
2146}
2147
2148static int emac_ethtool_get_sset_count(struct net_device *ndev, int stringset)
2149{
2150 if (stringset == ETH_SS_STATS)
2151 return EMAC_ETHTOOL_STATS_COUNT;
2152 else
2153 return -EINVAL;
2154}
2155
2156static void emac_ethtool_get_strings(struct net_device *ndev, u32 stringset,
2157 u8 * buf)
2158{
2159 if (stringset == ETH_SS_STATS)
2160 memcpy(buf, &emac_stats_keys, sizeof(emac_stats_keys));
2161}
2162
2163static void emac_ethtool_get_ethtool_stats(struct net_device *ndev,
2164 struct ethtool_stats *estats,
2165 u64 * tmp_stats)
2166{
2167 struct emac_instance *dev = netdev_priv(ndev);
2168
2169 memcpy(tmp_stats, &dev->stats, sizeof(dev->stats));
2170 tmp_stats += sizeof(dev->stats) / sizeof(u64);
2171 memcpy(tmp_stats, &dev->estats, sizeof(dev->estats));
2172}
2173
2174static void emac_ethtool_get_drvinfo(struct net_device *ndev,
2175 struct ethtool_drvinfo *info)
2176{
2177 struct emac_instance *dev = netdev_priv(ndev);
2178
2179 strcpy(info->driver, "ibm_emac");
2180 strcpy(info->version, DRV_VERSION);
2181 info->fw_version[0] = '\0';
2182 sprintf(info->bus_info, "PPC 4xx EMAC-%d %s",
2183 dev->cell_index, dev->ofdev->dev.of_node->full_name);
2184 info->regdump_len = emac_ethtool_get_regs_len(ndev);
2185}
2186
2187static const struct ethtool_ops emac_ethtool_ops = {
2188 .get_settings = emac_ethtool_get_settings,
2189 .set_settings = emac_ethtool_set_settings,
2190 .get_drvinfo = emac_ethtool_get_drvinfo,
2191
2192 .get_regs_len = emac_ethtool_get_regs_len,
2193 .get_regs = emac_ethtool_get_regs,
2194
2195 .nway_reset = emac_ethtool_nway_reset,
2196
2197 .get_ringparam = emac_ethtool_get_ringparam,
2198 .get_pauseparam = emac_ethtool_get_pauseparam,
2199
2200 .get_strings = emac_ethtool_get_strings,
2201 .get_sset_count = emac_ethtool_get_sset_count,
2202 .get_ethtool_stats = emac_ethtool_get_ethtool_stats,
2203
2204 .get_link = ethtool_op_get_link,
2205};
2206
2207static int emac_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
2208{
2209 struct emac_instance *dev = netdev_priv(ndev);
2210 struct mii_ioctl_data *data = if_mii(rq);
2211
2212 DBG(dev, "ioctl %08x" NL, cmd);
2213
2214 if (dev->phy.address < 0)
2215 return -EOPNOTSUPP;
2216
2217 switch (cmd) {
2218 case SIOCGMIIPHY:
2219 data->phy_id = dev->phy.address;
2220 /* Fall through */
2221 case SIOCGMIIREG:
2222 data->val_out = emac_mdio_read(ndev, dev->phy.address,
2223 data->reg_num);
2224 return 0;
2225
2226 case SIOCSMIIREG:
2227 emac_mdio_write(ndev, dev->phy.address, data->reg_num,
2228 data->val_in);
2229 return 0;
2230 default:
2231 return -EOPNOTSUPP;
2232 }
2233}
2234
2235struct emac_depentry {
2236 u32 phandle;
2237 struct device_node *node;
2238 struct platform_device *ofdev;
2239 void *drvdata;
2240};
2241
2242#define EMAC_DEP_MAL_IDX 0
2243#define EMAC_DEP_ZMII_IDX 1
2244#define EMAC_DEP_RGMII_IDX 2
2245#define EMAC_DEP_TAH_IDX 3
2246#define EMAC_DEP_MDIO_IDX 4
2247#define EMAC_DEP_PREV_IDX 5
2248#define EMAC_DEP_COUNT 6
2249
2250static int __devinit emac_check_deps(struct emac_instance *dev,
2251 struct emac_depentry *deps)
2252{
2253 int i, there = 0;
2254 struct device_node *np;
2255
2256 for (i = 0; i < EMAC_DEP_COUNT; i++) {
2257 /* no dependency on that item, allright */
2258 if (deps[i].phandle == 0) {
2259 there++;
2260 continue;
2261 }
2262 /* special case for blist as the dependency might go away */
2263 if (i == EMAC_DEP_PREV_IDX) {
2264 np = *(dev->blist - 1);
2265 if (np == NULL) {
2266 deps[i].phandle = 0;
2267 there++;
2268 continue;
2269 }
2270 if (deps[i].node == NULL)
2271 deps[i].node = of_node_get(np);
2272 }
2273 if (deps[i].node == NULL)
2274 deps[i].node = of_find_node_by_phandle(deps[i].phandle);
2275 if (deps[i].node == NULL)
2276 continue;
2277 if (deps[i].ofdev == NULL)
2278 deps[i].ofdev = of_find_device_by_node(deps[i].node);
2279 if (deps[i].ofdev == NULL)
2280 continue;
2281 if (deps[i].drvdata == NULL)
2282 deps[i].drvdata = dev_get_drvdata(&deps[i].ofdev->dev);
2283 if (deps[i].drvdata != NULL)
2284 there++;
2285 }
2286 return there == EMAC_DEP_COUNT;
2287}
2288
2289static void emac_put_deps(struct emac_instance *dev)
2290{
2291 if (dev->mal_dev)
2292 of_dev_put(dev->mal_dev);
2293 if (dev->zmii_dev)
2294 of_dev_put(dev->zmii_dev);
2295 if (dev->rgmii_dev)
2296 of_dev_put(dev->rgmii_dev);
2297 if (dev->mdio_dev)
2298 of_dev_put(dev->mdio_dev);
2299 if (dev->tah_dev)
2300 of_dev_put(dev->tah_dev);
2301}
2302
2303static int __devinit emac_of_bus_notify(struct notifier_block *nb,
2304 unsigned long action, void *data)
2305{
2306 /* We are only intereted in device addition */
2307 if (action == BUS_NOTIFY_BOUND_DRIVER)
2308 wake_up_all(&emac_probe_wait);
2309 return 0;
2310}
2311
2312static struct notifier_block emac_of_bus_notifier __devinitdata = {
2313 .notifier_call = emac_of_bus_notify
2314};
2315
2316static int __devinit emac_wait_deps(struct emac_instance *dev)
2317{
2318 struct emac_depentry deps[EMAC_DEP_COUNT];
2319 int i, err;
2320
2321 memset(&deps, 0, sizeof(deps));
2322
2323 deps[EMAC_DEP_MAL_IDX].phandle = dev->mal_ph;
2324 deps[EMAC_DEP_ZMII_IDX].phandle = dev->zmii_ph;
2325 deps[EMAC_DEP_RGMII_IDX].phandle = dev->rgmii_ph;
2326 if (dev->tah_ph)
2327 deps[EMAC_DEP_TAH_IDX].phandle = dev->tah_ph;
2328 if (dev->mdio_ph)
2329 deps[EMAC_DEP_MDIO_IDX].phandle = dev->mdio_ph;
2330 if (dev->blist && dev->blist > emac_boot_list)
2331 deps[EMAC_DEP_PREV_IDX].phandle = 0xffffffffu;
2332 bus_register_notifier(&platform_bus_type, &emac_of_bus_notifier);
2333 wait_event_timeout(emac_probe_wait,
2334 emac_check_deps(dev, deps),
2335 EMAC_PROBE_DEP_TIMEOUT);
2336 bus_unregister_notifier(&platform_bus_type, &emac_of_bus_notifier);
2337 err = emac_check_deps(dev, deps) ? 0 : -ENODEV;
2338 for (i = 0; i < EMAC_DEP_COUNT; i++) {
2339 if (deps[i].node)
2340 of_node_put(deps[i].node);
2341 if (err && deps[i].ofdev)
2342 of_dev_put(deps[i].ofdev);
2343 }
2344 if (err == 0) {
2345 dev->mal_dev = deps[EMAC_DEP_MAL_IDX].ofdev;
2346 dev->zmii_dev = deps[EMAC_DEP_ZMII_IDX].ofdev;
2347 dev->rgmii_dev = deps[EMAC_DEP_RGMII_IDX].ofdev;
2348 dev->tah_dev = deps[EMAC_DEP_TAH_IDX].ofdev;
2349 dev->mdio_dev = deps[EMAC_DEP_MDIO_IDX].ofdev;
2350 }
2351 if (deps[EMAC_DEP_PREV_IDX].ofdev)
2352 of_dev_put(deps[EMAC_DEP_PREV_IDX].ofdev);
2353 return err;
2354}
2355
2356static int __devinit emac_read_uint_prop(struct device_node *np, const char *name,
2357 u32 *val, int fatal)
2358{
2359 int len;
2360 const u32 *prop = of_get_property(np, name, &len);
2361 if (prop == NULL || len < sizeof(u32)) {
2362 if (fatal)
2363 printk(KERN_ERR "%s: missing %s property\n",
2364 np->full_name, name);
2365 return -ENODEV;
2366 }
2367 *val = *prop;
2368 return 0;
2369}
2370
2371static int __devinit emac_init_phy(struct emac_instance *dev)
2372{
2373 struct device_node *np = dev->ofdev->dev.of_node;
2374 struct net_device *ndev = dev->ndev;
2375 u32 phy_map, adv;
2376 int i;
2377
2378 dev->phy.dev = ndev;
2379 dev->phy.mode = dev->phy_mode;
2380
2381 /* PHY-less configuration.
2382 * XXX I probably should move these settings to the dev tree
2383 */
2384 if (dev->phy_address == 0xffffffff && dev->phy_map == 0xffffffff) {
2385 emac_reset(dev);
2386
2387 /* PHY-less configuration.
2388 * XXX I probably should move these settings to the dev tree
2389 */
2390 dev->phy.address = -1;
2391 dev->phy.features = SUPPORTED_MII;
2392 if (emac_phy_supports_gige(dev->phy_mode))
2393 dev->phy.features |= SUPPORTED_1000baseT_Full;
2394 else
2395 dev->phy.features |= SUPPORTED_100baseT_Full;
2396 dev->phy.pause = 1;
2397
2398 return 0;
2399 }
2400
2401 mutex_lock(&emac_phy_map_lock);
2402 phy_map = dev->phy_map | busy_phy_map;
2403
2404 DBG(dev, "PHY maps %08x %08x" NL, dev->phy_map, busy_phy_map);
2405
2406 dev->phy.mdio_read = emac_mdio_read;
2407 dev->phy.mdio_write = emac_mdio_write;
2408
2409 /* Enable internal clock source */
2410#ifdef CONFIG_PPC_DCR_NATIVE
2411 if (emac_has_feature(dev, EMAC_FTR_440GX_PHY_CLK_FIX))
2412 dcri_clrset(SDR0, SDR0_MFR, 0, SDR0_MFR_ECS);
2413#endif
2414 /* PHY clock workaround */
2415 emac_rx_clk_tx(dev);
2416
2417 /* Enable internal clock source on 440GX*/
2418#ifdef CONFIG_PPC_DCR_NATIVE
2419 if (emac_has_feature(dev, EMAC_FTR_440GX_PHY_CLK_FIX))
2420 dcri_clrset(SDR0, SDR0_MFR, 0, SDR0_MFR_ECS);
2421#endif
2422 /* Configure EMAC with defaults so we can at least use MDIO
2423 * This is needed mostly for 440GX
2424 */
2425 if (emac_phy_gpcs(dev->phy.mode)) {
2426 /* XXX
2427 * Make GPCS PHY address equal to EMAC index.
2428 * We probably should take into account busy_phy_map
2429 * and/or phy_map here.
2430 *
2431 * Note that the busy_phy_map is currently global
2432 * while it should probably be per-ASIC...
2433 */
2434 dev->phy.gpcs_address = dev->gpcs_address;
2435 if (dev->phy.gpcs_address == 0xffffffff)
2436 dev->phy.address = dev->cell_index;
2437 }
2438
2439 emac_configure(dev);
2440
2441 if (dev->phy_address != 0xffffffff)
2442 phy_map = ~(1 << dev->phy_address);
2443
2444 for (i = 0; i < 0x20; phy_map >>= 1, ++i)
2445 if (!(phy_map & 1)) {
2446 int r;
2447 busy_phy_map |= 1 << i;
2448
2449 /* Quick check if there is a PHY at the address */
2450 r = emac_mdio_read(dev->ndev, i, MII_BMCR);
2451 if (r == 0xffff || r < 0)
2452 continue;
2453 if (!emac_mii_phy_probe(&dev->phy, i))
2454 break;
2455 }
2456
2457 /* Enable external clock source */
2458#ifdef CONFIG_PPC_DCR_NATIVE
2459 if (emac_has_feature(dev, EMAC_FTR_440GX_PHY_CLK_FIX))
2460 dcri_clrset(SDR0, SDR0_MFR, SDR0_MFR_ECS, 0);
2461#endif
2462 mutex_unlock(&emac_phy_map_lock);
2463 if (i == 0x20) {
2464 printk(KERN_WARNING "%s: can't find PHY!\n", np->full_name);
2465 return -ENXIO;
2466 }
2467
2468 /* Init PHY */
2469 if (dev->phy.def->ops->init)
2470 dev->phy.def->ops->init(&dev->phy);
2471
2472 /* Disable any PHY features not supported by the platform */
2473 dev->phy.def->features &= ~dev->phy_feat_exc;
2474
2475 /* Setup initial link parameters */
2476 if (dev->phy.features & SUPPORTED_Autoneg) {
2477 adv = dev->phy.features;
2478 if (!emac_has_feature(dev, EMAC_FTR_NO_FLOW_CONTROL_40x))
2479 adv |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
2480 /* Restart autonegotiation */
2481 dev->phy.def->ops->setup_aneg(&dev->phy, adv);
2482 } else {
2483 u32 f = dev->phy.def->features;
2484 int speed = SPEED_10, fd = DUPLEX_HALF;
2485
2486 /* Select highest supported speed/duplex */
2487 if (f & SUPPORTED_1000baseT_Full) {
2488 speed = SPEED_1000;
2489 fd = DUPLEX_FULL;
2490 } else if (f & SUPPORTED_1000baseT_Half)
2491 speed = SPEED_1000;
2492 else if (f & SUPPORTED_100baseT_Full) {
2493 speed = SPEED_100;
2494 fd = DUPLEX_FULL;
2495 } else if (f & SUPPORTED_100baseT_Half)
2496 speed = SPEED_100;
2497 else if (f & SUPPORTED_10baseT_Full)
2498 fd = DUPLEX_FULL;
2499
2500 /* Force link parameters */
2501 dev->phy.def->ops->setup_forced(&dev->phy, speed, fd);
2502 }
2503 return 0;
2504}
2505
2506static int __devinit emac_init_config(struct emac_instance *dev)
2507{
2508 struct device_node *np = dev->ofdev->dev.of_node;
2509 const void *p;
2510
2511 /* Read config from device-tree */
2512 if (emac_read_uint_prop(np, "mal-device", &dev->mal_ph, 1))
2513 return -ENXIO;
2514 if (emac_read_uint_prop(np, "mal-tx-channel", &dev->mal_tx_chan, 1))
2515 return -ENXIO;
2516 if (emac_read_uint_prop(np, "mal-rx-channel", &dev->mal_rx_chan, 1))
2517 return -ENXIO;
2518 if (emac_read_uint_prop(np, "cell-index", &dev->cell_index, 1))
2519 return -ENXIO;
2520 if (emac_read_uint_prop(np, "max-frame-size", &dev->max_mtu, 0))
2521 dev->max_mtu = 1500;
2522 if (emac_read_uint_prop(np, "rx-fifo-size", &dev->rx_fifo_size, 0))
2523 dev->rx_fifo_size = 2048;
2524 if (emac_read_uint_prop(np, "tx-fifo-size", &dev->tx_fifo_size, 0))
2525 dev->tx_fifo_size = 2048;
2526 if (emac_read_uint_prop(np, "rx-fifo-size-gige", &dev->rx_fifo_size_gige, 0))
2527 dev->rx_fifo_size_gige = dev->rx_fifo_size;
2528 if (emac_read_uint_prop(np, "tx-fifo-size-gige", &dev->tx_fifo_size_gige, 0))
2529 dev->tx_fifo_size_gige = dev->tx_fifo_size;
2530 if (emac_read_uint_prop(np, "phy-address", &dev->phy_address, 0))
2531 dev->phy_address = 0xffffffff;
2532 if (emac_read_uint_prop(np, "phy-map", &dev->phy_map, 0))
2533 dev->phy_map = 0xffffffff;
2534 if (emac_read_uint_prop(np, "gpcs-address", &dev->gpcs_address, 0))
2535 dev->gpcs_address = 0xffffffff;
2536 if (emac_read_uint_prop(np->parent, "clock-frequency", &dev->opb_bus_freq, 1))
2537 return -ENXIO;
2538 if (emac_read_uint_prop(np, "tah-device", &dev->tah_ph, 0))
2539 dev->tah_ph = 0;
2540 if (emac_read_uint_prop(np, "tah-channel", &dev->tah_port, 0))
2541 dev->tah_port = 0;
2542 if (emac_read_uint_prop(np, "mdio-device", &dev->mdio_ph, 0))
2543 dev->mdio_ph = 0;
2544 if (emac_read_uint_prop(np, "zmii-device", &dev->zmii_ph, 0))
2545 dev->zmii_ph = 0;
2546 if (emac_read_uint_prop(np, "zmii-channel", &dev->zmii_port, 0))
2547 dev->zmii_port = 0xffffffff;
2548 if (emac_read_uint_prop(np, "rgmii-device", &dev->rgmii_ph, 0))
2549 dev->rgmii_ph = 0;
2550 if (emac_read_uint_prop(np, "rgmii-channel", &dev->rgmii_port, 0))
2551 dev->rgmii_port = 0xffffffff;
2552 if (emac_read_uint_prop(np, "fifo-entry-size", &dev->fifo_entry_size, 0))
2553 dev->fifo_entry_size = 16;
2554 if (emac_read_uint_prop(np, "mal-burst-size", &dev->mal_burst_size, 0))
2555 dev->mal_burst_size = 256;
2556
2557 /* PHY mode needs some decoding */
2558 dev->phy_mode = of_get_phy_mode(np);
2559 if (dev->phy_mode < 0)
2560 dev->phy_mode = PHY_MODE_NA;
2561
2562 /* Check EMAC version */
2563 if (of_device_is_compatible(np, "ibm,emac4sync")) {
2564 dev->features |= (EMAC_FTR_EMAC4 | EMAC_FTR_EMAC4SYNC);
2565 if (of_device_is_compatible(np, "ibm,emac-460ex") ||
2566 of_device_is_compatible(np, "ibm,emac-460gt"))
2567 dev->features |= EMAC_FTR_460EX_PHY_CLK_FIX;
2568 if (of_device_is_compatible(np, "ibm,emac-405ex") ||
2569 of_device_is_compatible(np, "ibm,emac-405exr"))
2570 dev->features |= EMAC_FTR_440EP_PHY_CLK_FIX;
2571 } else if (of_device_is_compatible(np, "ibm,emac4")) {
2572 dev->features |= EMAC_FTR_EMAC4;
2573 if (of_device_is_compatible(np, "ibm,emac-440gx"))
2574 dev->features |= EMAC_FTR_440GX_PHY_CLK_FIX;
2575 } else {
2576 if (of_device_is_compatible(np, "ibm,emac-440ep") ||
2577 of_device_is_compatible(np, "ibm,emac-440gr"))
2578 dev->features |= EMAC_FTR_440EP_PHY_CLK_FIX;
2579 if (of_device_is_compatible(np, "ibm,emac-405ez")) {
2580#ifdef CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL
2581 dev->features |= EMAC_FTR_NO_FLOW_CONTROL_40x;
2582#else
2583 printk(KERN_ERR "%s: Flow control not disabled!\n",
2584 np->full_name);
2585 return -ENXIO;
2586#endif
2587 }
2588
2589 }
2590
2591 /* Fixup some feature bits based on the device tree */
2592 if (of_get_property(np, "has-inverted-stacr-oc", NULL))
2593 dev->features |= EMAC_FTR_STACR_OC_INVERT;
2594 if (of_get_property(np, "has-new-stacr-staopc", NULL))
2595 dev->features |= EMAC_FTR_HAS_NEW_STACR;
2596
2597 /* CAB lacks the appropriate properties */
2598 if (of_device_is_compatible(np, "ibm,emac-axon"))
2599 dev->features |= EMAC_FTR_HAS_NEW_STACR |
2600 EMAC_FTR_STACR_OC_INVERT;
2601
2602 /* Enable TAH/ZMII/RGMII features as found */
2603 if (dev->tah_ph != 0) {
2604#ifdef CONFIG_IBM_NEW_EMAC_TAH
2605 dev->features |= EMAC_FTR_HAS_TAH;
2606#else
2607 printk(KERN_ERR "%s: TAH support not enabled !\n",
2608 np->full_name);
2609 return -ENXIO;
2610#endif
2611 }
2612
2613 if (dev->zmii_ph != 0) {
2614#ifdef CONFIG_IBM_NEW_EMAC_ZMII
2615 dev->features |= EMAC_FTR_HAS_ZMII;
2616#else
2617 printk(KERN_ERR "%s: ZMII support not enabled !\n",
2618 np->full_name);
2619 return -ENXIO;
2620#endif
2621 }
2622
2623 if (dev->rgmii_ph != 0) {
2624#ifdef CONFIG_IBM_NEW_EMAC_RGMII
2625 dev->features |= EMAC_FTR_HAS_RGMII;
2626#else
2627 printk(KERN_ERR "%s: RGMII support not enabled !\n",
2628 np->full_name);
2629 return -ENXIO;
2630#endif
2631 }
2632
2633 /* Read MAC-address */
2634 p = of_get_property(np, "local-mac-address", NULL);
2635 if (p == NULL) {
2636 printk(KERN_ERR "%s: Can't find local-mac-address property\n",
2637 np->full_name);
2638 return -ENXIO;
2639 }
2640 memcpy(dev->ndev->dev_addr, p, 6);
2641
2642 /* IAHT and GAHT filter parameterization */
2643 if (emac_has_feature(dev, EMAC_FTR_EMAC4SYNC)) {
2644 dev->xaht_slots_shift = EMAC4SYNC_XAHT_SLOTS_SHIFT;
2645 dev->xaht_width_shift = EMAC4SYNC_XAHT_WIDTH_SHIFT;
2646 } else {
2647 dev->xaht_slots_shift = EMAC4_XAHT_SLOTS_SHIFT;
2648 dev->xaht_width_shift = EMAC4_XAHT_WIDTH_SHIFT;
2649 }
2650
2651 DBG(dev, "features : 0x%08x / 0x%08x\n", dev->features, EMAC_FTRS_POSSIBLE);
2652 DBG(dev, "tx_fifo_size : %d (%d gige)\n", dev->tx_fifo_size, dev->tx_fifo_size_gige);
2653 DBG(dev, "rx_fifo_size : %d (%d gige)\n", dev->rx_fifo_size, dev->rx_fifo_size_gige);
2654 DBG(dev, "max_mtu : %d\n", dev->max_mtu);
2655 DBG(dev, "OPB freq : %d\n", dev->opb_bus_freq);
2656
2657 return 0;
2658}
2659
2660static const struct net_device_ops emac_netdev_ops = {
2661 .ndo_open = emac_open,
2662 .ndo_stop = emac_close,
2663 .ndo_get_stats = emac_stats,
2664 .ndo_set_multicast_list = emac_set_multicast_list,
2665 .ndo_do_ioctl = emac_ioctl,
2666 .ndo_tx_timeout = emac_tx_timeout,
2667 .ndo_validate_addr = eth_validate_addr,
2668 .ndo_set_mac_address = eth_mac_addr,
2669 .ndo_start_xmit = emac_start_xmit,
2670 .ndo_change_mtu = eth_change_mtu,
2671};
2672
2673static const struct net_device_ops emac_gige_netdev_ops = {
2674 .ndo_open = emac_open,
2675 .ndo_stop = emac_close,
2676 .ndo_get_stats = emac_stats,
2677 .ndo_set_multicast_list = emac_set_multicast_list,
2678 .ndo_do_ioctl = emac_ioctl,
2679 .ndo_tx_timeout = emac_tx_timeout,
2680 .ndo_validate_addr = eth_validate_addr,
2681 .ndo_set_mac_address = eth_mac_addr,
2682 .ndo_start_xmit = emac_start_xmit_sg,
2683 .ndo_change_mtu = emac_change_mtu,
2684};
2685
2686static int __devinit emac_probe(struct platform_device *ofdev)
2687{
2688 struct net_device *ndev;
2689 struct emac_instance *dev;
2690 struct device_node *np = ofdev->dev.of_node;
2691 struct device_node **blist = NULL;
2692 int err, i;
2693
2694 /* Skip unused/unwired EMACS. We leave the check for an unused
2695 * property here for now, but new flat device trees should set a
2696 * status property to "disabled" instead.
2697 */
2698 if (of_get_property(np, "unused", NULL) || !of_device_is_available(np))
2699 return -ENODEV;
2700
2701 /* Find ourselves in the bootlist if we are there */
2702 for (i = 0; i < EMAC_BOOT_LIST_SIZE; i++)
2703 if (emac_boot_list[i] == np)
2704 blist = &emac_boot_list[i];
2705
2706 /* Allocate our net_device structure */
2707 err = -ENOMEM;
2708 ndev = alloc_etherdev(sizeof(struct emac_instance));
2709 if (!ndev) {
2710 printk(KERN_ERR "%s: could not allocate ethernet device!\n",
2711 np->full_name);
2712 goto err_gone;
2713 }
2714 dev = netdev_priv(ndev);
2715 dev->ndev = ndev;
2716 dev->ofdev = ofdev;
2717 dev->blist = blist;
2718 SET_NETDEV_DEV(ndev, &ofdev->dev);
2719
2720 /* Initialize some embedded data structures */
2721 mutex_init(&dev->mdio_lock);
2722 mutex_init(&dev->link_lock);
2723 spin_lock_init(&dev->lock);
2724 INIT_WORK(&dev->reset_work, emac_reset_work);
2725
2726 /* Init various config data based on device-tree */
2727 err = emac_init_config(dev);
2728 if (err != 0)
2729 goto err_free;
2730
2731 /* Get interrupts. EMAC irq is mandatory, WOL irq is optional */
2732 dev->emac_irq = irq_of_parse_and_map(np, 0);
2733 dev->wol_irq = irq_of_parse_and_map(np, 1);
2734 if (dev->emac_irq == NO_IRQ) {
2735 printk(KERN_ERR "%s: Can't map main interrupt\n", np->full_name);
2736 goto err_free;
2737 }
2738 ndev->irq = dev->emac_irq;
2739
2740 /* Map EMAC regs */
2741 if (of_address_to_resource(np, 0, &dev->rsrc_regs)) {
2742 printk(KERN_ERR "%s: Can't get registers address\n",
2743 np->full_name);
2744 goto err_irq_unmap;
2745 }
2746 // TODO : request_mem_region
2747 dev->emacp = ioremap(dev->rsrc_regs.start,
2748 resource_size(&dev->rsrc_regs));
2749 if (dev->emacp == NULL) {
2750 printk(KERN_ERR "%s: Can't map device registers!\n",
2751 np->full_name);
2752 err = -ENOMEM;
2753 goto err_irq_unmap;
2754 }
2755
2756 /* Wait for dependent devices */
2757 err = emac_wait_deps(dev);
2758 if (err) {
2759 printk(KERN_ERR
2760 "%s: Timeout waiting for dependent devices\n",
2761 np->full_name);
2762 /* display more info about what's missing ? */
2763 goto err_reg_unmap;
2764 }
2765 dev->mal = dev_get_drvdata(&dev->mal_dev->dev);
2766 if (dev->mdio_dev != NULL)
2767 dev->mdio_instance = dev_get_drvdata(&dev->mdio_dev->dev);
2768
2769 /* Register with MAL */
2770 dev->commac.ops = &emac_commac_ops;
2771 dev->commac.dev = dev;
2772 dev->commac.tx_chan_mask = MAL_CHAN_MASK(dev->mal_tx_chan);
2773 dev->commac.rx_chan_mask = MAL_CHAN_MASK(dev->mal_rx_chan);
2774 err = mal_register_commac(dev->mal, &dev->commac);
2775 if (err) {
2776 printk(KERN_ERR "%s: failed to register with mal %s!\n",
2777 np->full_name, dev->mal_dev->dev.of_node->full_name);
2778 goto err_rel_deps;
2779 }
2780 dev->rx_skb_size = emac_rx_skb_size(ndev->mtu);
2781 dev->rx_sync_size = emac_rx_sync_size(ndev->mtu);
2782
2783 /* Get pointers to BD rings */
2784 dev->tx_desc =
2785 dev->mal->bd_virt + mal_tx_bd_offset(dev->mal, dev->mal_tx_chan);
2786 dev->rx_desc =
2787 dev->mal->bd_virt + mal_rx_bd_offset(dev->mal, dev->mal_rx_chan);
2788
2789 DBG(dev, "tx_desc %p" NL, dev->tx_desc);
2790 DBG(dev, "rx_desc %p" NL, dev->rx_desc);
2791
2792 /* Clean rings */
2793 memset(dev->tx_desc, 0, NUM_TX_BUFF * sizeof(struct mal_descriptor));
2794 memset(dev->rx_desc, 0, NUM_RX_BUFF * sizeof(struct mal_descriptor));
2795 memset(dev->tx_skb, 0, NUM_TX_BUFF * sizeof(struct sk_buff *));
2796 memset(dev->rx_skb, 0, NUM_RX_BUFF * sizeof(struct sk_buff *));
2797
2798 /* Attach to ZMII, if needed */
2799 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII) &&
2800 (err = zmii_attach(dev->zmii_dev, dev->zmii_port, &dev->phy_mode)) != 0)
2801 goto err_unreg_commac;
2802
2803 /* Attach to RGMII, if needed */
2804 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII) &&
2805 (err = rgmii_attach(dev->rgmii_dev, dev->rgmii_port, dev->phy_mode)) != 0)
2806 goto err_detach_zmii;
2807
2808 /* Attach to TAH, if needed */
2809 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH) &&
2810 (err = tah_attach(dev->tah_dev, dev->tah_port)) != 0)
2811 goto err_detach_rgmii;
2812
2813 /* Set some link defaults before we can find out real parameters */
2814 dev->phy.speed = SPEED_100;
2815 dev->phy.duplex = DUPLEX_FULL;
2816 dev->phy.autoneg = AUTONEG_DISABLE;
2817 dev->phy.pause = dev->phy.asym_pause = 0;
2818 dev->stop_timeout = STOP_TIMEOUT_100;
2819 INIT_DELAYED_WORK(&dev->link_work, emac_link_timer);
2820
2821 /* Find PHY if any */
2822 err = emac_init_phy(dev);
2823 if (err != 0)
2824 goto err_detach_tah;
2825
2826 if (dev->tah_dev) {
2827 ndev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG;
2828 ndev->features |= ndev->hw_features | NETIF_F_RXCSUM;
2829 }
2830 ndev->watchdog_timeo = 5 * HZ;
2831 if (emac_phy_supports_gige(dev->phy_mode)) {
2832 ndev->netdev_ops = &emac_gige_netdev_ops;
2833 dev->commac.ops = &emac_commac_sg_ops;
2834 } else
2835 ndev->netdev_ops = &emac_netdev_ops;
2836 SET_ETHTOOL_OPS(ndev, &emac_ethtool_ops);
2837
2838 netif_carrier_off(ndev);
2839
2840 err = register_netdev(ndev);
2841 if (err) {
2842 printk(KERN_ERR "%s: failed to register net device (%d)!\n",
2843 np->full_name, err);
2844 goto err_detach_tah;
2845 }
2846
2847 /* Set our drvdata last as we don't want them visible until we are
2848 * fully initialized
2849 */
2850 wmb();
2851 dev_set_drvdata(&ofdev->dev, dev);
2852
2853 /* There's a new kid in town ! Let's tell everybody */
2854 wake_up_all(&emac_probe_wait);
2855
2856
2857 printk(KERN_INFO "%s: EMAC-%d %s, MAC %pM\n",
2858 ndev->name, dev->cell_index, np->full_name, ndev->dev_addr);
2859
2860 if (dev->phy_mode == PHY_MODE_SGMII)
2861 printk(KERN_NOTICE "%s: in SGMII mode\n", ndev->name);
2862
2863 if (dev->phy.address >= 0)
2864 printk("%s: found %s PHY (0x%02x)\n", ndev->name,
2865 dev->phy.def->name, dev->phy.address);
2866
2867 emac_dbg_register(dev);
2868
2869 /* Life is good */
2870 return 0;
2871
2872 /* I have a bad feeling about this ... */
2873
2874 err_detach_tah:
2875 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
2876 tah_detach(dev->tah_dev, dev->tah_port);
2877 err_detach_rgmii:
2878 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
2879 rgmii_detach(dev->rgmii_dev, dev->rgmii_port);
2880 err_detach_zmii:
2881 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
2882 zmii_detach(dev->zmii_dev, dev->zmii_port);
2883 err_unreg_commac:
2884 mal_unregister_commac(dev->mal, &dev->commac);
2885 err_rel_deps:
2886 emac_put_deps(dev);
2887 err_reg_unmap:
2888 iounmap(dev->emacp);
2889 err_irq_unmap:
2890 if (dev->wol_irq != NO_IRQ)
2891 irq_dispose_mapping(dev->wol_irq);
2892 if (dev->emac_irq != NO_IRQ)
2893 irq_dispose_mapping(dev->emac_irq);
2894 err_free:
2895 free_netdev(ndev);
2896 err_gone:
2897 /* if we were on the bootlist, remove us as we won't show up and
2898 * wake up all waiters to notify them in case they were waiting
2899 * on us
2900 */
2901 if (blist) {
2902 *blist = NULL;
2903 wake_up_all(&emac_probe_wait);
2904 }
2905 return err;
2906}
2907
2908static int __devexit emac_remove(struct platform_device *ofdev)
2909{
2910 struct emac_instance *dev = dev_get_drvdata(&ofdev->dev);
2911
2912 DBG(dev, "remove" NL);
2913
2914 dev_set_drvdata(&ofdev->dev, NULL);
2915
2916 unregister_netdev(dev->ndev);
2917
2918 cancel_work_sync(&dev->reset_work);
2919
2920 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
2921 tah_detach(dev->tah_dev, dev->tah_port);
2922 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
2923 rgmii_detach(dev->rgmii_dev, dev->rgmii_port);
2924 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
2925 zmii_detach(dev->zmii_dev, dev->zmii_port);
2926
2927 mal_unregister_commac(dev->mal, &dev->commac);
2928 emac_put_deps(dev);
2929
2930 emac_dbg_unregister(dev);
2931 iounmap(dev->emacp);
2932
2933 if (dev->wol_irq != NO_IRQ)
2934 irq_dispose_mapping(dev->wol_irq);
2935 if (dev->emac_irq != NO_IRQ)
2936 irq_dispose_mapping(dev->emac_irq);
2937
2938 free_netdev(dev->ndev);
2939
2940 return 0;
2941}
2942
2943/* XXX Features in here should be replaced by properties... */
2944static struct of_device_id emac_match[] =
2945{
2946 {
2947 .type = "network",
2948 .compatible = "ibm,emac",
2949 },
2950 {
2951 .type = "network",
2952 .compatible = "ibm,emac4",
2953 },
2954 {
2955 .type = "network",
2956 .compatible = "ibm,emac4sync",
2957 },
2958 {},
2959};
2960MODULE_DEVICE_TABLE(of, emac_match);
2961
2962static struct platform_driver emac_driver = {
2963 .driver = {
2964 .name = "emac",
2965 .owner = THIS_MODULE,
2966 .of_match_table = emac_match,
2967 },
2968 .probe = emac_probe,
2969 .remove = emac_remove,
2970};
2971
2972static void __init emac_make_bootlist(void)
2973{
2974 struct device_node *np = NULL;
2975 int j, max, i = 0, k;
2976 int cell_indices[EMAC_BOOT_LIST_SIZE];
2977
2978 /* Collect EMACs */
2979 while((np = of_find_all_nodes(np)) != NULL) {
2980 const u32 *idx;
2981
2982 if (of_match_node(emac_match, np) == NULL)
2983 continue;
2984 if (of_get_property(np, "unused", NULL))
2985 continue;
2986 idx = of_get_property(np, "cell-index", NULL);
2987 if (idx == NULL)
2988 continue;
2989 cell_indices[i] = *idx;
2990 emac_boot_list[i++] = of_node_get(np);
2991 if (i >= EMAC_BOOT_LIST_SIZE) {
2992 of_node_put(np);
2993 break;
2994 }
2995 }
2996 max = i;
2997
2998 /* Bubble sort them (doh, what a creative algorithm :-) */
2999 for (i = 0; max > 1 && (i < (max - 1)); i++)
3000 for (j = i; j < max; j++) {
3001 if (cell_indices[i] > cell_indices[j]) {
3002 np = emac_boot_list[i];
3003 emac_boot_list[i] = emac_boot_list[j];
3004 emac_boot_list[j] = np;
3005 k = cell_indices[i];
3006 cell_indices[i] = cell_indices[j];
3007 cell_indices[j] = k;
3008 }
3009 }
3010}
3011
3012static int __init emac_init(void)
3013{
3014 int rc;
3015
3016 printk(KERN_INFO DRV_DESC ", version " DRV_VERSION "\n");
3017
3018 /* Init debug stuff */
3019 emac_init_debug();
3020
3021 /* Build EMAC boot list */
3022 emac_make_bootlist();
3023
3024 /* Init submodules */
3025 rc = mal_init();
3026 if (rc)
3027 goto err;
3028 rc = zmii_init();
3029 if (rc)
3030 goto err_mal;
3031 rc = rgmii_init();
3032 if (rc)
3033 goto err_zmii;
3034 rc = tah_init();
3035 if (rc)
3036 goto err_rgmii;
3037 rc = platform_driver_register(&emac_driver);
3038 if (rc)
3039 goto err_tah;
3040
3041 return 0;
3042
3043 err_tah:
3044 tah_exit();
3045 err_rgmii:
3046 rgmii_exit();
3047 err_zmii:
3048 zmii_exit();
3049 err_mal:
3050 mal_exit();
3051 err:
3052 return rc;
3053}
3054
3055static void __exit emac_exit(void)
3056{
3057 int i;
3058
3059 platform_driver_unregister(&emac_driver);
3060
3061 tah_exit();
3062 rgmii_exit();
3063 zmii_exit();
3064 mal_exit();
3065 emac_fini_debug();
3066
3067 /* Destroy EMAC boot list */
3068 for (i = 0; i < EMAC_BOOT_LIST_SIZE; i++)
3069 if (emac_boot_list[i])
3070 of_node_put(emac_boot_list[i]);
3071}
3072
3073module_init(emac_init);
3074module_exit(emac_exit);
diff --git a/drivers/net/ethernet/ibm/emac/core.h b/drivers/net/ethernet/ibm/emac/core.h
new file mode 100644
index 00000000000..4fec0844d59
--- /dev/null
+++ b/drivers/net/ethernet/ibm/emac/core.h
@@ -0,0 +1,462 @@
1/*
2 * drivers/net/ibm_newemac/core.h
3 *
4 * Driver for PowerPC 4xx on-chip ethernet controller.
5 *
6 * Copyright 2007 Benjamin Herrenschmidt, IBM Corp.
7 * <benh@kernel.crashing.org>
8 *
9 * Based on the arch/ppc version of the driver:
10 *
11 * Copyright (c) 2004, 2005 Zultys Technologies.
12 * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>
13 *
14 * Based on original work by
15 * Armin Kuster <akuster@mvista.com>
16 * Johnnie Peters <jpeters@mvista.com>
17 * Copyright 2000, 2001 MontaVista Softare Inc.
18 *
19 * This program is free software; you can redistribute it and/or modify it
20 * under the terms of the GNU General Public License as published by the
21 * Free Software Foundation; either version 2 of the License, or (at your
22 * option) any later version.
23 *
24 */
25#ifndef __IBM_NEWEMAC_CORE_H
26#define __IBM_NEWEMAC_CORE_H
27
28#include <linux/module.h>
29#include <linux/init.h>
30#include <linux/list.h>
31#include <linux/kernel.h>
32#include <linux/interrupt.h>
33#include <linux/netdevice.h>
34#include <linux/dma-mapping.h>
35#include <linux/spinlock.h>
36#include <linux/of_platform.h>
37#include <linux/slab.h>
38
39#include <asm/io.h>
40#include <asm/dcr.h>
41
42#include "emac.h"
43#include "phy.h"
44#include "zmii.h"
45#include "rgmii.h"
46#include "mal.h"
47#include "tah.h"
48#include "debug.h"
49
50#define NUM_TX_BUFF CONFIG_IBM_NEW_EMAC_TXB
51#define NUM_RX_BUFF CONFIG_IBM_NEW_EMAC_RXB
52
53/* Simple sanity check */
54#if NUM_TX_BUFF > 256 || NUM_RX_BUFF > 256
55#error Invalid number of buffer descriptors (greater than 256)
56#endif
57
58#define EMAC_MIN_MTU 46
59
60/* Maximum L2 header length (VLAN tagged, no FCS) */
61#define EMAC_MTU_OVERHEAD (6 * 2 + 2 + 4)
62
63/* RX BD size for the given MTU */
64static inline int emac_rx_size(int mtu)
65{
66 if (mtu > ETH_DATA_LEN)
67 return MAL_MAX_RX_SIZE;
68 else
69 return mal_rx_size(ETH_DATA_LEN + EMAC_MTU_OVERHEAD);
70}
71
72#define EMAC_DMA_ALIGN(x) ALIGN((x), dma_get_cache_alignment())
73
74#define EMAC_RX_SKB_HEADROOM \
75 EMAC_DMA_ALIGN(CONFIG_IBM_NEW_EMAC_RX_SKB_HEADROOM)
76
77/* Size of RX skb for the given MTU */
78static inline int emac_rx_skb_size(int mtu)
79{
80 int size = max(mtu + EMAC_MTU_OVERHEAD, emac_rx_size(mtu));
81 return EMAC_DMA_ALIGN(size + 2) + EMAC_RX_SKB_HEADROOM;
82}
83
84/* RX DMA sync size */
85static inline int emac_rx_sync_size(int mtu)
86{
87 return EMAC_DMA_ALIGN(emac_rx_size(mtu) + 2);
88}
89
90/* Driver statistcs is split into two parts to make it more cache friendly:
91 * - normal statistics (packet count, etc)
92 * - error statistics
93 *
94 * When statistics is requested by ethtool, these parts are concatenated,
95 * normal one goes first.
96 *
97 * Please, keep these structures in sync with emac_stats_keys.
98 */
99
100/* Normal TX/RX Statistics */
101struct emac_stats {
102 u64 rx_packets;
103 u64 rx_bytes;
104 u64 tx_packets;
105 u64 tx_bytes;
106 u64 rx_packets_csum;
107 u64 tx_packets_csum;
108};
109
110/* Error statistics */
111struct emac_error_stats {
112 u64 tx_undo;
113
114 /* Software RX Errors */
115 u64 rx_dropped_stack;
116 u64 rx_dropped_oom;
117 u64 rx_dropped_error;
118 u64 rx_dropped_resize;
119 u64 rx_dropped_mtu;
120 u64 rx_stopped;
121 /* BD reported RX errors */
122 u64 rx_bd_errors;
123 u64 rx_bd_overrun;
124 u64 rx_bd_bad_packet;
125 u64 rx_bd_runt_packet;
126 u64 rx_bd_short_event;
127 u64 rx_bd_alignment_error;
128 u64 rx_bd_bad_fcs;
129 u64 rx_bd_packet_too_long;
130 u64 rx_bd_out_of_range;
131 u64 rx_bd_in_range;
132 /* EMAC IRQ reported RX errors */
133 u64 rx_parity;
134 u64 rx_fifo_overrun;
135 u64 rx_overrun;
136 u64 rx_bad_packet;
137 u64 rx_runt_packet;
138 u64 rx_short_event;
139 u64 rx_alignment_error;
140 u64 rx_bad_fcs;
141 u64 rx_packet_too_long;
142 u64 rx_out_of_range;
143 u64 rx_in_range;
144
145 /* Software TX Errors */
146 u64 tx_dropped;
147 /* BD reported TX errors */
148 u64 tx_bd_errors;
149 u64 tx_bd_bad_fcs;
150 u64 tx_bd_carrier_loss;
151 u64 tx_bd_excessive_deferral;
152 u64 tx_bd_excessive_collisions;
153 u64 tx_bd_late_collision;
154 u64 tx_bd_multple_collisions;
155 u64 tx_bd_single_collision;
156 u64 tx_bd_underrun;
157 u64 tx_bd_sqe;
158 /* EMAC IRQ reported TX errors */
159 u64 tx_parity;
160 u64 tx_underrun;
161 u64 tx_sqe;
162 u64 tx_errors;
163};
164
165#define EMAC_ETHTOOL_STATS_COUNT ((sizeof(struct emac_stats) + \
166 sizeof(struct emac_error_stats)) \
167 / sizeof(u64))
168
169struct emac_instance {
170 struct net_device *ndev;
171 struct resource rsrc_regs;
172 struct emac_regs __iomem *emacp;
173 struct platform_device *ofdev;
174 struct device_node **blist; /* bootlist entry */
175
176 /* MAL linkage */
177 u32 mal_ph;
178 struct platform_device *mal_dev;
179 u32 mal_rx_chan;
180 u32 mal_tx_chan;
181 struct mal_instance *mal;
182 struct mal_commac commac;
183
184 /* PHY infos */
185 u32 phy_mode;
186 u32 phy_map;
187 u32 phy_address;
188 u32 phy_feat_exc;
189 struct mii_phy phy;
190 struct mutex link_lock;
191 struct delayed_work link_work;
192 int link_polling;
193
194 /* GPCS PHY infos */
195 u32 gpcs_address;
196
197 /* Shared MDIO if any */
198 u32 mdio_ph;
199 struct platform_device *mdio_dev;
200 struct emac_instance *mdio_instance;
201 struct mutex mdio_lock;
202
203 /* ZMII infos if any */
204 u32 zmii_ph;
205 u32 zmii_port;
206 struct platform_device *zmii_dev;
207
208 /* RGMII infos if any */
209 u32 rgmii_ph;
210 u32 rgmii_port;
211 struct platform_device *rgmii_dev;
212
213 /* TAH infos if any */
214 u32 tah_ph;
215 u32 tah_port;
216 struct platform_device *tah_dev;
217
218 /* IRQs */
219 int wol_irq;
220 int emac_irq;
221
222 /* OPB bus frequency in Mhz */
223 u32 opb_bus_freq;
224
225 /* Cell index within an ASIC (for clk mgmnt) */
226 u32 cell_index;
227
228 /* Max supported MTU */
229 u32 max_mtu;
230
231 /* Feature bits (from probe table) */
232 unsigned int features;
233
234 /* Tx and Rx fifo sizes & other infos in bytes */
235 u32 tx_fifo_size;
236 u32 tx_fifo_size_gige;
237 u32 rx_fifo_size;
238 u32 rx_fifo_size_gige;
239 u32 fifo_entry_size;
240 u32 mal_burst_size; /* move to MAL ? */
241
242 /* IAHT and GAHT filter parameterization */
243 u32 xaht_slots_shift;
244 u32 xaht_width_shift;
245
246 /* Descriptor management
247 */
248 struct mal_descriptor *tx_desc;
249 int tx_cnt;
250 int tx_slot;
251 int ack_slot;
252
253 struct mal_descriptor *rx_desc;
254 int rx_slot;
255 struct sk_buff *rx_sg_skb; /* 1 */
256 int rx_skb_size;
257 int rx_sync_size;
258
259 struct sk_buff *tx_skb[NUM_TX_BUFF];
260 struct sk_buff *rx_skb[NUM_RX_BUFF];
261
262 /* Stats
263 */
264 struct emac_error_stats estats;
265 struct net_device_stats nstats;
266 struct emac_stats stats;
267
268 /* Misc
269 */
270 int reset_failed;
271 int stop_timeout; /* in us */
272 int no_mcast;
273 int mcast_pending;
274 int opened;
275 struct work_struct reset_work;
276 spinlock_t lock;
277};
278
279/*
280 * Features of various EMAC implementations
281 */
282
283/*
284 * No flow control on 40x according to the original driver
285 */
286#define EMAC_FTR_NO_FLOW_CONTROL_40x 0x00000001
287/*
288 * Cell is an EMAC4
289 */
290#define EMAC_FTR_EMAC4 0x00000002
291/*
292 * For the 440SPe, AMCC inexplicably changed the polarity of
293 * the "operation complete" bit in the MII control register.
294 */
295#define EMAC_FTR_STACR_OC_INVERT 0x00000004
296/*
297 * Set if we have a TAH.
298 */
299#define EMAC_FTR_HAS_TAH 0x00000008
300/*
301 * Set if we have a ZMII.
302 */
303#define EMAC_FTR_HAS_ZMII 0x00000010
304/*
305 * Set if we have a RGMII.
306 */
307#define EMAC_FTR_HAS_RGMII 0x00000020
308/*
309 * Set if we have new type STACR with STAOPC
310 */
311#define EMAC_FTR_HAS_NEW_STACR 0x00000040
312/*
313 * Set if we need phy clock workaround for 440gx
314 */
315#define EMAC_FTR_440GX_PHY_CLK_FIX 0x00000080
316/*
317 * Set if we need phy clock workaround for 440ep or 440gr
318 */
319#define EMAC_FTR_440EP_PHY_CLK_FIX 0x00000100
320/*
321 * The 405EX and 460EX contain the EMAC4SYNC core
322 */
323#define EMAC_FTR_EMAC4SYNC 0x00000200
324/*
325 * Set if we need phy clock workaround for 460ex or 460gt
326 */
327#define EMAC_FTR_460EX_PHY_CLK_FIX 0x00000400
328
329
330/* Right now, we don't quite handle the always/possible masks on the
331 * most optimal way as we don't have a way to say something like
332 * always EMAC4. Patches welcome.
333 */
334enum {
335 EMAC_FTRS_ALWAYS = 0,
336
337 EMAC_FTRS_POSSIBLE =
338#ifdef CONFIG_IBM_NEW_EMAC_EMAC4
339 EMAC_FTR_EMAC4 | EMAC_FTR_EMAC4SYNC |
340 EMAC_FTR_HAS_NEW_STACR |
341 EMAC_FTR_STACR_OC_INVERT | EMAC_FTR_440GX_PHY_CLK_FIX |
342#endif
343#ifdef CONFIG_IBM_NEW_EMAC_TAH
344 EMAC_FTR_HAS_TAH |
345#endif
346#ifdef CONFIG_IBM_NEW_EMAC_ZMII
347 EMAC_FTR_HAS_ZMII |
348#endif
349#ifdef CONFIG_IBM_NEW_EMAC_RGMII
350 EMAC_FTR_HAS_RGMII |
351#endif
352#ifdef CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL
353 EMAC_FTR_NO_FLOW_CONTROL_40x |
354#endif
355 EMAC_FTR_460EX_PHY_CLK_FIX |
356 EMAC_FTR_440EP_PHY_CLK_FIX,
357};
358
359static inline int emac_has_feature(struct emac_instance *dev,
360 unsigned long feature)
361{
362 return (EMAC_FTRS_ALWAYS & feature) ||
363 (EMAC_FTRS_POSSIBLE & dev->features & feature);
364}
365
366/*
367 * Various instances of the EMAC core have varying 1) number of
368 * address match slots, 2) width of the registers for handling address
369 * match slots, 3) number of registers for handling address match
370 * slots and 4) base offset for those registers.
371 *
372 * These macros and inlines handle these differences based on
373 * parameters supplied by the device structure which are, in turn,
374 * initialized based on the "compatible" entry in the device tree.
375 */
376
377#define EMAC4_XAHT_SLOTS_SHIFT 6
378#define EMAC4_XAHT_WIDTH_SHIFT 4
379
380#define EMAC4SYNC_XAHT_SLOTS_SHIFT 8
381#define EMAC4SYNC_XAHT_WIDTH_SHIFT 5
382
383#define EMAC_XAHT_SLOTS(dev) (1 << (dev)->xaht_slots_shift)
384#define EMAC_XAHT_WIDTH(dev) (1 << (dev)->xaht_width_shift)
385#define EMAC_XAHT_REGS(dev) (1 << ((dev)->xaht_slots_shift - \
386 (dev)->xaht_width_shift))
387
388#define EMAC_XAHT_CRC_TO_SLOT(dev, crc) \
389 ((EMAC_XAHT_SLOTS(dev) - 1) - \
390 ((crc) >> ((sizeof (u32) * BITS_PER_BYTE) - \
391 (dev)->xaht_slots_shift)))
392
393#define EMAC_XAHT_SLOT_TO_REG(dev, slot) \
394 ((slot) >> (dev)->xaht_width_shift)
395
396#define EMAC_XAHT_SLOT_TO_MASK(dev, slot) \
397 ((u32)(1 << (EMAC_XAHT_WIDTH(dev) - 1)) >> \
398 ((slot) & (u32)(EMAC_XAHT_WIDTH(dev) - 1)))
399
400static inline u32 *emac_xaht_base(struct emac_instance *dev)
401{
402 struct emac_regs __iomem *p = dev->emacp;
403 int offset;
404
405 /* The first IAHT entry always is the base of the block of
406 * IAHT and GAHT registers.
407 */
408 if (emac_has_feature(dev, EMAC_FTR_EMAC4SYNC))
409 offset = offsetof(struct emac_regs, u1.emac4sync.iaht1);
410 else
411 offset = offsetof(struct emac_regs, u0.emac4.iaht1);
412
413 return (u32 *)((ptrdiff_t)p + offset);
414}
415
416static inline u32 *emac_gaht_base(struct emac_instance *dev)
417{
418 /* GAHT registers always come after an identical number of
419 * IAHT registers.
420 */
421 return emac_xaht_base(dev) + EMAC_XAHT_REGS(dev);
422}
423
424static inline u32 *emac_iaht_base(struct emac_instance *dev)
425{
426 /* IAHT registers always come before an identical number of
427 * GAHT registers.
428 */
429 return emac_xaht_base(dev);
430}
431
432/* Ethtool get_regs complex data.
433 * We want to get not just EMAC registers, but also MAL, ZMII, RGMII, TAH
434 * when available.
435 *
436 * Returned BLOB consists of the ibm_emac_ethtool_regs_hdr,
437 * MAL registers, EMAC registers and optional ZMII, RGMII, TAH registers.
438 * Each register component is preceded with emac_ethtool_regs_subhdr.
439 * Order of the optional headers follows their relative bit posititions
440 * in emac_ethtool_regs_hdr.components
441 */
442#define EMAC_ETHTOOL_REGS_ZMII 0x00000001
443#define EMAC_ETHTOOL_REGS_RGMII 0x00000002
444#define EMAC_ETHTOOL_REGS_TAH 0x00000004
445
446struct emac_ethtool_regs_hdr {
447 u32 components;
448};
449
450struct emac_ethtool_regs_subhdr {
451 u32 version;
452 u32 index;
453};
454
455#define EMAC_ETHTOOL_REGS_VER 0
456#define EMAC_ETHTOOL_REGS_SIZE(dev) ((dev)->rsrc_regs.end - \
457 (dev)->rsrc_regs.start + 1)
458#define EMAC4_ETHTOOL_REGS_VER 1
459#define EMAC4_ETHTOOL_REGS_SIZE(dev) ((dev)->rsrc_regs.end - \
460 (dev)->rsrc_regs.start + 1)
461
462#endif /* __IBM_NEWEMAC_CORE_H */
diff --git a/drivers/net/ethernet/ibm/emac/debug.c b/drivers/net/ethernet/ibm/emac/debug.c
new file mode 100644
index 00000000000..8c6c1e2a875
--- /dev/null
+++ b/drivers/net/ethernet/ibm/emac/debug.c
@@ -0,0 +1,270 @@
1/*
2 * drivers/net/ibm_newemac/debug.c
3 *
4 * Driver for PowerPC 4xx on-chip ethernet controller, debug print routines.
5 *
6 * Copyright 2007 Benjamin Herrenschmidt, IBM Corp.
7 * <benh@kernel.crashing.org>
8 *
9 * Based on the arch/ppc version of the driver:
10 *
11 * Copyright (c) 2004, 2005 Zultys Technologies
12 * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>
13 *
14 * This program is free software; you can redistribute it and/or modify it
15 * under the terms of the GNU General Public License as published by the
16 * Free Software Foundation; either version 2 of the License, or (at your
17 * option) any later version.
18 *
19 */
20#include <linux/init.h>
21#include <linux/module.h>
22#include <linux/kernel.h>
23#include <linux/netdevice.h>
24#include <linux/sysrq.h>
25#include <asm/io.h>
26
27#include "core.h"
28
29static DEFINE_SPINLOCK(emac_dbg_lock);
30
31static void emac_desc_dump(struct emac_instance *p)
32{
33 int i;
34 printk("** EMAC %s TX BDs **\n"
35 " tx_cnt = %d tx_slot = %d ack_slot = %d\n",
36 p->ofdev->dev.of_node->full_name,
37 p->tx_cnt, p->tx_slot, p->ack_slot);
38 for (i = 0; i < NUM_TX_BUFF / 2; ++i)
39 printk
40 ("bd[%2d] 0x%08x %c 0x%04x %4u - bd[%2d] 0x%08x %c 0x%04x %4u\n",
41 i, p->tx_desc[i].data_ptr, p->tx_skb[i] ? 'V' : ' ',
42 p->tx_desc[i].ctrl, p->tx_desc[i].data_len,
43 NUM_TX_BUFF / 2 + i,
44 p->tx_desc[NUM_TX_BUFF / 2 + i].data_ptr,
45 p->tx_skb[NUM_TX_BUFF / 2 + i] ? 'V' : ' ',
46 p->tx_desc[NUM_TX_BUFF / 2 + i].ctrl,
47 p->tx_desc[NUM_TX_BUFF / 2 + i].data_len);
48
49 printk("** EMAC %s RX BDs **\n"
50 " rx_slot = %d flags = 0x%lx rx_skb_size = %d rx_sync_size = %d\n"
51 " rx_sg_skb = 0x%p\n",
52 p->ofdev->dev.of_node->full_name,
53 p->rx_slot, p->commac.flags, p->rx_skb_size,
54 p->rx_sync_size, p->rx_sg_skb);
55 for (i = 0; i < NUM_RX_BUFF / 2; ++i)
56 printk
57 ("bd[%2d] 0x%08x %c 0x%04x %4u - bd[%2d] 0x%08x %c 0x%04x %4u\n",
58 i, p->rx_desc[i].data_ptr, p->rx_skb[i] ? 'V' : ' ',
59 p->rx_desc[i].ctrl, p->rx_desc[i].data_len,
60 NUM_RX_BUFF / 2 + i,
61 p->rx_desc[NUM_RX_BUFF / 2 + i].data_ptr,
62 p->rx_skb[NUM_RX_BUFF / 2 + i] ? 'V' : ' ',
63 p->rx_desc[NUM_RX_BUFF / 2 + i].ctrl,
64 p->rx_desc[NUM_RX_BUFF / 2 + i].data_len);
65}
66
67static void emac_mac_dump(struct emac_instance *dev)
68{
69 struct emac_regs __iomem *p = dev->emacp;
70 const int xaht_regs = EMAC_XAHT_REGS(dev);
71 u32 *gaht_base = emac_gaht_base(dev);
72 u32 *iaht_base = emac_iaht_base(dev);
73 int emac4sync = emac_has_feature(dev, EMAC_FTR_EMAC4SYNC);
74 int n;
75
76 printk("** EMAC %s registers **\n"
77 "MR0 = 0x%08x MR1 = 0x%08x TMR0 = 0x%08x TMR1 = 0x%08x\n"
78 "RMR = 0x%08x ISR = 0x%08x ISER = 0x%08x\n"
79 "IAR = %04x%08x VTPID = 0x%04x VTCI = 0x%04x\n",
80 dev->ofdev->dev.of_node->full_name,
81 in_be32(&p->mr0), in_be32(&p->mr1),
82 in_be32(&p->tmr0), in_be32(&p->tmr1),
83 in_be32(&p->rmr), in_be32(&p->isr), in_be32(&p->iser),
84 in_be32(&p->iahr), in_be32(&p->ialr), in_be32(&p->vtpid),
85 in_be32(&p->vtci)
86 );
87
88 if (emac4sync)
89 printk("MAR = %04x%08x MMAR = %04x%08x\n",
90 in_be32(&p->u0.emac4sync.mahr),
91 in_be32(&p->u0.emac4sync.malr),
92 in_be32(&p->u0.emac4sync.mmahr),
93 in_be32(&p->u0.emac4sync.mmalr)
94 );
95
96 for (n = 0; n < xaht_regs; n++)
97 printk("IAHT%02d = 0x%08x\n", n + 1, in_be32(iaht_base + n));
98
99 for (n = 0; n < xaht_regs; n++)
100 printk("GAHT%02d = 0x%08x\n", n + 1, in_be32(gaht_base + n));
101
102 printk("LSA = %04x%08x IPGVR = 0x%04x\n"
103 "STACR = 0x%08x TRTR = 0x%08x RWMR = 0x%08x\n"
104 "OCTX = 0x%08x OCRX = 0x%08x\n",
105 in_be32(&p->lsah), in_be32(&p->lsal), in_be32(&p->ipgvr),
106 in_be32(&p->stacr), in_be32(&p->trtr), in_be32(&p->rwmr),
107 in_be32(&p->octx), in_be32(&p->ocrx)
108 );
109
110 if (!emac4sync) {
111 printk("IPCR = 0x%08x\n",
112 in_be32(&p->u1.emac4.ipcr)
113 );
114 } else {
115 printk("REVID = 0x%08x TPC = 0x%08x\n",
116 in_be32(&p->u1.emac4sync.revid),
117 in_be32(&p->u1.emac4sync.tpc)
118 );
119 }
120
121 emac_desc_dump(dev);
122}
123
124static void emac_mal_dump(struct mal_instance *mal)
125{
126 int i;
127
128 printk("** MAL %s Registers **\n"
129 "CFG = 0x%08x ESR = 0x%08x IER = 0x%08x\n"
130 "TX|CASR = 0x%08x CARR = 0x%08x EOBISR = 0x%08x DEIR = 0x%08x\n"
131 "RX|CASR = 0x%08x CARR = 0x%08x EOBISR = 0x%08x DEIR = 0x%08x\n",
132 mal->ofdev->dev.of_node->full_name,
133 get_mal_dcrn(mal, MAL_CFG), get_mal_dcrn(mal, MAL_ESR),
134 get_mal_dcrn(mal, MAL_IER),
135 get_mal_dcrn(mal, MAL_TXCASR), get_mal_dcrn(mal, MAL_TXCARR),
136 get_mal_dcrn(mal, MAL_TXEOBISR), get_mal_dcrn(mal, MAL_TXDEIR),
137 get_mal_dcrn(mal, MAL_RXCASR), get_mal_dcrn(mal, MAL_RXCARR),
138 get_mal_dcrn(mal, MAL_RXEOBISR), get_mal_dcrn(mal, MAL_RXDEIR)
139 );
140
141 printk("TX|");
142 for (i = 0; i < mal->num_tx_chans; ++i) {
143 if (i && !(i % 4))
144 printk("\n ");
145 printk("CTP%d = 0x%08x ", i, get_mal_dcrn(mal, MAL_TXCTPR(i)));
146 }
147 printk("\nRX|");
148 for (i = 0; i < mal->num_rx_chans; ++i) {
149 if (i && !(i % 4))
150 printk("\n ");
151 printk("CTP%d = 0x%08x ", i, get_mal_dcrn(mal, MAL_RXCTPR(i)));
152 }
153 printk("\n ");
154 for (i = 0; i < mal->num_rx_chans; ++i) {
155 u32 r = get_mal_dcrn(mal, MAL_RCBS(i));
156 if (i && !(i % 3))
157 printk("\n ");
158 printk("RCBS%d = 0x%08x (%d) ", i, r, r * 16);
159 }
160 printk("\n");
161}
162
163static struct emac_instance *__emacs[4];
164static struct mal_instance *__mals[1];
165
166void emac_dbg_register(struct emac_instance *dev)
167{
168 unsigned long flags;
169 int i;
170
171 spin_lock_irqsave(&emac_dbg_lock, flags);
172 for (i = 0; i < ARRAY_SIZE(__emacs); i++)
173 if (__emacs[i] == NULL) {
174 __emacs[i] = dev;
175 break;
176 }
177 spin_unlock_irqrestore(&emac_dbg_lock, flags);
178}
179
180void emac_dbg_unregister(struct emac_instance *dev)
181{
182 unsigned long flags;
183 int i;
184
185 spin_lock_irqsave(&emac_dbg_lock, flags);
186 for (i = 0; i < ARRAY_SIZE(__emacs); i++)
187 if (__emacs[i] == dev) {
188 __emacs[i] = NULL;
189 break;
190 }
191 spin_unlock_irqrestore(&emac_dbg_lock, flags);
192}
193
194void mal_dbg_register(struct mal_instance *mal)
195{
196 unsigned long flags;
197 int i;
198
199 spin_lock_irqsave(&emac_dbg_lock, flags);
200 for (i = 0; i < ARRAY_SIZE(__mals); i++)
201 if (__mals[i] == NULL) {
202 __mals[i] = mal;
203 break;
204 }
205 spin_unlock_irqrestore(&emac_dbg_lock, flags);
206}
207
208void mal_dbg_unregister(struct mal_instance *mal)
209{
210 unsigned long flags;
211 int i;
212
213 spin_lock_irqsave(&emac_dbg_lock, flags);
214 for (i = 0; i < ARRAY_SIZE(__mals); i++)
215 if (__mals[i] == mal) {
216 __mals[i] = NULL;
217 break;
218 }
219 spin_unlock_irqrestore(&emac_dbg_lock, flags);
220}
221
222void emac_dbg_dump_all(void)
223{
224 unsigned int i;
225 unsigned long flags;
226
227 spin_lock_irqsave(&emac_dbg_lock, flags);
228
229 for (i = 0; i < ARRAY_SIZE(__mals); ++i)
230 if (__mals[i])
231 emac_mal_dump(__mals[i]);
232
233 for (i = 0; i < ARRAY_SIZE(__emacs); ++i)
234 if (__emacs[i])
235 emac_mac_dump(__emacs[i]);
236
237 spin_unlock_irqrestore(&emac_dbg_lock, flags);
238}
239
240#if defined(CONFIG_MAGIC_SYSRQ)
241static void emac_sysrq_handler(int key)
242{
243 emac_dbg_dump_all();
244}
245
246static struct sysrq_key_op emac_sysrq_op = {
247 .handler = emac_sysrq_handler,
248 .help_msg = "emaC",
249 .action_msg = "Show EMAC(s) status",
250};
251
252int __init emac_init_debug(void)
253{
254 return register_sysrq_key('c', &emac_sysrq_op);
255}
256
257void __exit emac_fini_debug(void)
258{
259 unregister_sysrq_key('c', &emac_sysrq_op);
260}
261
262#else
263int __init emac_init_debug(void)
264{
265 return 0;
266}
267void __exit emac_fini_debug(void)
268{
269}
270#endif /* CONFIG_MAGIC_SYSRQ */
diff --git a/drivers/net/ethernet/ibm/emac/debug.h b/drivers/net/ethernet/ibm/emac/debug.h
new file mode 100644
index 00000000000..e596c77ccdf
--- /dev/null
+++ b/drivers/net/ethernet/ibm/emac/debug.h
@@ -0,0 +1,83 @@
1/*
2 * drivers/net/ibm_newemac/debug.h
3 *
4 * Driver for PowerPC 4xx on-chip ethernet controller, debug print routines.
5 *
6 * Copyright 2007 Benjamin Herrenschmidt, IBM Corp.
7 * <benh@kernel.crashing.org>
8 *
9 * Based on the arch/ppc version of the driver:
10 *
11 * Copyright (c) 2004, 2005 Zultys Technologies
12 * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>
13 *
14 * This program is free software; you can redistribute it and/or modify it
15 * under the terms of the GNU General Public License as published by the
16 * Free Software Foundation; either version 2 of the License, or (at your
17 * option) any later version.
18 *
19 */
20#ifndef __IBM_NEWEMAC_DEBUG_H
21#define __IBM_NEWEMAC_DEBUG_H
22
23#include <linux/init.h>
24
25#include "core.h"
26
27#if defined(CONFIG_IBM_NEW_EMAC_DEBUG)
28
29struct emac_instance;
30struct mal_instance;
31
32extern void emac_dbg_register(struct emac_instance *dev);
33extern void emac_dbg_unregister(struct emac_instance *dev);
34extern void mal_dbg_register(struct mal_instance *mal);
35extern void mal_dbg_unregister(struct mal_instance *mal);
36extern int emac_init_debug(void) __init;
37extern void emac_fini_debug(void) __exit;
38extern void emac_dbg_dump_all(void);
39
40# define DBG_LEVEL 1
41
42#else
43
44# define emac_dbg_register(x) do { } while(0)
45# define emac_dbg_unregister(x) do { } while(0)
46# define mal_dbg_register(x) do { } while(0)
47# define mal_dbg_unregister(x) do { } while(0)
48# define emac_init_debug() do { } while(0)
49# define emac_fini_debug() do { } while(0)
50# define emac_dbg_dump_all() do { } while(0)
51
52# define DBG_LEVEL 0
53
54#endif
55
56#define EMAC_DBG(d, name, fmt, arg...) \
57 printk(KERN_DEBUG #name "%s: " fmt, d->ofdev->dev.of_node->full_name, ## arg)
58
59#if DBG_LEVEL > 0
60# define DBG(d,f,x...) EMAC_DBG(d, emac, f, ##x)
61# define MAL_DBG(d,f,x...) EMAC_DBG(d, mal, f, ##x)
62# define ZMII_DBG(d,f,x...) EMAC_DBG(d, zmii, f, ##x)
63# define RGMII_DBG(d,f,x...) EMAC_DBG(d, rgmii, f, ##x)
64# define NL "\n"
65#else
66# define DBG(f,x...) ((void)0)
67# define MAL_DBG(d,f,x...) ((void)0)
68# define ZMII_DBG(d,f,x...) ((void)0)
69# define RGMII_DBG(d,f,x...) ((void)0)
70#endif
71#if DBG_LEVEL > 1
72# define DBG2(d,f,x...) DBG(d,f, ##x)
73# define MAL_DBG2(d,f,x...) MAL_DBG(d,f, ##x)
74# define ZMII_DBG2(d,f,x...) ZMII_DBG(d,f, ##x)
75# define RGMII_DBG2(d,f,x...) RGMII_DBG(d,f, ##x)
76#else
77# define DBG2(f,x...) ((void)0)
78# define MAL_DBG2(d,f,x...) ((void)0)
79# define ZMII_DBG2(d,f,x...) ((void)0)
80# define RGMII_DBG2(d,f,x...) ((void)0)
81#endif
82
83#endif /* __IBM_NEWEMAC_DEBUG_H */
diff --git a/drivers/net/ethernet/ibm/emac/emac.h b/drivers/net/ethernet/ibm/emac/emac.h
new file mode 100644
index 00000000000..1568278d759
--- /dev/null
+++ b/drivers/net/ethernet/ibm/emac/emac.h
@@ -0,0 +1,312 @@
1/*
2 * drivers/net/ibm_newemac/emac.h
3 *
4 * Register definitions for PowerPC 4xx on-chip ethernet contoller
5 *
6 * Copyright 2007 Benjamin Herrenschmidt, IBM Corp.
7 * <benh@kernel.crashing.org>
8 *
9 * Based on the arch/ppc version of the driver:
10 *
11 * Copyright (c) 2004, 2005 Zultys Technologies.
12 * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>
13 *
14 * Based on original work by
15 * Matt Porter <mporter@kernel.crashing.org>
16 * Armin Kuster <akuster@mvista.com>
17 * Copyright 2002-2004 MontaVista Software Inc.
18 *
19 * This program is free software; you can redistribute it and/or modify it
20 * under the terms of the GNU General Public License as published by the
21 * Free Software Foundation; either version 2 of the License, or (at your
22 * option) any later version.
23 *
24 */
25#ifndef __IBM_NEWEMAC_H
26#define __IBM_NEWEMAC_H
27
28#include <linux/types.h>
29#include <linux/phy.h>
30
31/* EMAC registers Write Access rules */
32struct emac_regs {
33 /* Common registers across all EMAC implementations. */
34 u32 mr0; /* Special */
35 u32 mr1; /* Reset */
36 u32 tmr0; /* Special */
37 u32 tmr1; /* Special */
38 u32 rmr; /* Reset */
39 u32 isr; /* Always */
40 u32 iser; /* Reset */
41 u32 iahr; /* Reset, R, T */
42 u32 ialr; /* Reset, R, T */
43 u32 vtpid; /* Reset, R, T */
44 u32 vtci; /* Reset, R, T */
45 u32 ptr; /* Reset, T */
46 union {
47 /* Registers unique to EMAC4 implementations */
48 struct {
49 u32 iaht1; /* Reset, R */
50 u32 iaht2; /* Reset, R */
51 u32 iaht3; /* Reset, R */
52 u32 iaht4; /* Reset, R */
53 u32 gaht1; /* Reset, R */
54 u32 gaht2; /* Reset, R */
55 u32 gaht3; /* Reset, R */
56 u32 gaht4; /* Reset, R */
57 } emac4;
58 /* Registers unique to EMAC4SYNC implementations */
59 struct {
60 u32 mahr; /* Reset, R, T */
61 u32 malr; /* Reset, R, T */
62 u32 mmahr; /* Reset, R, T */
63 u32 mmalr; /* Reset, R, T */
64 u32 rsvd0[4];
65 } emac4sync;
66 } u0;
67 /* Common registers across all EMAC implementations. */
68 u32 lsah;
69 u32 lsal;
70 u32 ipgvr; /* Reset, T */
71 u32 stacr; /* Special */
72 u32 trtr; /* Special */
73 u32 rwmr; /* Reset */
74 u32 octx;
75 u32 ocrx;
76 union {
77 /* Registers unique to EMAC4 implementations */
78 struct {
79 u32 ipcr;
80 } emac4;
81 /* Registers unique to EMAC4SYNC implementations */
82 struct {
83 u32 rsvd1;
84 u32 revid;
85 u32 rsvd2[2];
86 u32 iaht1; /* Reset, R */
87 u32 iaht2; /* Reset, R */
88 u32 iaht3; /* Reset, R */
89 u32 iaht4; /* Reset, R */
90 u32 iaht5; /* Reset, R */
91 u32 iaht6; /* Reset, R */
92 u32 iaht7; /* Reset, R */
93 u32 iaht8; /* Reset, R */
94 u32 gaht1; /* Reset, R */
95 u32 gaht2; /* Reset, R */
96 u32 gaht3; /* Reset, R */
97 u32 gaht4; /* Reset, R */
98 u32 gaht5; /* Reset, R */
99 u32 gaht6; /* Reset, R */
100 u32 gaht7; /* Reset, R */
101 u32 gaht8; /* Reset, R */
102 u32 tpc; /* Reset, T */
103 } emac4sync;
104 } u1;
105};
106
107/*
108 * PHY mode settings (EMAC <-> ZMII/RGMII bridge <-> PHY)
109 */
110#define PHY_MODE_NA PHY_INTERFACE_MODE_NA
111#define PHY_MODE_MII PHY_INTERFACE_MODE_MII
112#define PHY_MODE_RMII PHY_INTERFACE_MODE_RMII
113#define PHY_MODE_SMII PHY_INTERFACE_MODE_SMII
114#define PHY_MODE_RGMII PHY_INTERFACE_MODE_RGMII
115#define PHY_MODE_TBI PHY_INTERFACE_MODE_TBI
116#define PHY_MODE_GMII PHY_INTERFACE_MODE_GMII
117#define PHY_MODE_RTBI PHY_INTERFACE_MODE_RTBI
118#define PHY_MODE_SGMII PHY_INTERFACE_MODE_SGMII
119
120/* EMACx_MR0 */
121#define EMAC_MR0_RXI 0x80000000
122#define EMAC_MR0_TXI 0x40000000
123#define EMAC_MR0_SRST 0x20000000
124#define EMAC_MR0_TXE 0x10000000
125#define EMAC_MR0_RXE 0x08000000
126#define EMAC_MR0_WKE 0x04000000
127
128/* EMACx_MR1 */
129#define EMAC_MR1_FDE 0x80000000
130#define EMAC_MR1_ILE 0x40000000
131#define EMAC_MR1_VLE 0x20000000
132#define EMAC_MR1_EIFC 0x10000000
133#define EMAC_MR1_APP 0x08000000
134#define EMAC_MR1_IST 0x01000000
135
136#define EMAC_MR1_MF_MASK 0x00c00000
137#define EMAC_MR1_MF_10 0x00000000
138#define EMAC_MR1_MF_100 0x00400000
139#define EMAC_MR1_MF_1000 0x00800000
140#define EMAC_MR1_MF_1000GPCS 0x00c00000
141#define EMAC_MR1_MF_IPPA(id) (((id) & 0x1f) << 6)
142
143#define EMAC_MR1_RFS_4K 0x00300000
144#define EMAC_MR1_RFS_16K 0x00000000
145#define EMAC_MR1_TFS_2K 0x00080000
146#define EMAC_MR1_TR0_MULT 0x00008000
147#define EMAC_MR1_JPSM 0x00000000
148#define EMAC_MR1_MWSW_001 0x00000000
149#define EMAC_MR1_BASE(opb) (EMAC_MR1_TFS_2K | EMAC_MR1_TR0_MULT)
150
151
152#define EMAC4_MR1_RFS_2K 0x00100000
153#define EMAC4_MR1_RFS_4K 0x00180000
154#define EMAC4_MR1_RFS_16K 0x00280000
155#define EMAC4_MR1_TFS_2K 0x00020000
156#define EMAC4_MR1_TFS_4K 0x00030000
157#define EMAC4_MR1_TFS_16K 0x00050000
158#define EMAC4_MR1_TR 0x00008000
159#define EMAC4_MR1_MWSW_001 0x00001000
160#define EMAC4_MR1_JPSM 0x00000800
161#define EMAC4_MR1_OBCI_MASK 0x00000038
162#define EMAC4_MR1_OBCI_50 0x00000000
163#define EMAC4_MR1_OBCI_66 0x00000008
164#define EMAC4_MR1_OBCI_83 0x00000010
165#define EMAC4_MR1_OBCI_100 0x00000018
166#define EMAC4_MR1_OBCI_100P 0x00000020
167#define EMAC4_MR1_OBCI(freq) ((freq) <= 50 ? EMAC4_MR1_OBCI_50 : \
168 (freq) <= 66 ? EMAC4_MR1_OBCI_66 : \
169 (freq) <= 83 ? EMAC4_MR1_OBCI_83 : \
170 (freq) <= 100 ? EMAC4_MR1_OBCI_100 : \
171 EMAC4_MR1_OBCI_100P)
172
173/* EMACx_TMR0 */
174#define EMAC_TMR0_GNP 0x80000000
175#define EMAC_TMR0_DEFAULT 0x00000000
176#define EMAC4_TMR0_TFAE_2_32 0x00000001
177#define EMAC4_TMR0_TFAE_4_64 0x00000002
178#define EMAC4_TMR0_TFAE_8_128 0x00000003
179#define EMAC4_TMR0_TFAE_16_256 0x00000004
180#define EMAC4_TMR0_TFAE_32_512 0x00000005
181#define EMAC4_TMR0_TFAE_64_1024 0x00000006
182#define EMAC4_TMR0_TFAE_128_2048 0x00000007
183#define EMAC4_TMR0_DEFAULT EMAC4_TMR0_TFAE_2_32
184#define EMAC_TMR0_XMIT (EMAC_TMR0_GNP | EMAC_TMR0_DEFAULT)
185#define EMAC4_TMR0_XMIT (EMAC_TMR0_GNP | EMAC4_TMR0_DEFAULT)
186
187/* EMACx_TMR1 */
188
189#define EMAC_TMR1(l,h) (((l) << 27) | (((h) & 0xff) << 16))
190#define EMAC4_TMR1(l,h) (((l) << 27) | (((h) & 0x3ff) << 14))
191
192/* EMACx_RMR */
193#define EMAC_RMR_SP 0x80000000
194#define EMAC_RMR_SFCS 0x40000000
195#define EMAC_RMR_RRP 0x20000000
196#define EMAC_RMR_RFP 0x10000000
197#define EMAC_RMR_ROP 0x08000000
198#define EMAC_RMR_RPIR 0x04000000
199#define EMAC_RMR_PPP 0x02000000
200#define EMAC_RMR_PME 0x01000000
201#define EMAC_RMR_PMME 0x00800000
202#define EMAC_RMR_IAE 0x00400000
203#define EMAC_RMR_MIAE 0x00200000
204#define EMAC_RMR_BAE 0x00100000
205#define EMAC_RMR_MAE 0x00080000
206#define EMAC_RMR_BASE 0x00000000
207#define EMAC4_RMR_RFAF_2_32 0x00000001
208#define EMAC4_RMR_RFAF_4_64 0x00000002
209#define EMAC4_RMR_RFAF_8_128 0x00000003
210#define EMAC4_RMR_RFAF_16_256 0x00000004
211#define EMAC4_RMR_RFAF_32_512 0x00000005
212#define EMAC4_RMR_RFAF_64_1024 0x00000006
213#define EMAC4_RMR_RFAF_128_2048 0x00000007
214#define EMAC4_RMR_BASE EMAC4_RMR_RFAF_128_2048
215
216/* EMACx_ISR & EMACx_ISER */
217#define EMAC4_ISR_TXPE 0x20000000
218#define EMAC4_ISR_RXPE 0x10000000
219#define EMAC4_ISR_TXUE 0x08000000
220#define EMAC4_ISR_RXOE 0x04000000
221#define EMAC_ISR_OVR 0x02000000
222#define EMAC_ISR_PP 0x01000000
223#define EMAC_ISR_BP 0x00800000
224#define EMAC_ISR_RP 0x00400000
225#define EMAC_ISR_SE 0x00200000
226#define EMAC_ISR_ALE 0x00100000
227#define EMAC_ISR_BFCS 0x00080000
228#define EMAC_ISR_PTLE 0x00040000
229#define EMAC_ISR_ORE 0x00020000
230#define EMAC_ISR_IRE 0x00010000
231#define EMAC_ISR_SQE 0x00000080
232#define EMAC_ISR_TE 0x00000040
233#define EMAC_ISR_MOS 0x00000002
234#define EMAC_ISR_MOF 0x00000001
235
236/* EMACx_STACR */
237#define EMAC_STACR_PHYD_MASK 0xffff
238#define EMAC_STACR_PHYD_SHIFT 16
239#define EMAC_STACR_OC 0x00008000
240#define EMAC_STACR_PHYE 0x00004000
241#define EMAC_STACR_STAC_MASK 0x00003000
242#define EMAC_STACR_STAC_READ 0x00001000
243#define EMAC_STACR_STAC_WRITE 0x00002000
244#define EMAC_STACR_OPBC_MASK 0x00000C00
245#define EMAC_STACR_OPBC_50 0x00000000
246#define EMAC_STACR_OPBC_66 0x00000400
247#define EMAC_STACR_OPBC_83 0x00000800
248#define EMAC_STACR_OPBC_100 0x00000C00
249#define EMAC_STACR_OPBC(freq) ((freq) <= 50 ? EMAC_STACR_OPBC_50 : \
250 (freq) <= 66 ? EMAC_STACR_OPBC_66 : \
251 (freq) <= 83 ? EMAC_STACR_OPBC_83 : EMAC_STACR_OPBC_100)
252#define EMAC_STACR_BASE(opb) EMAC_STACR_OPBC(opb)
253#define EMAC4_STACR_BASE(opb) 0x00000000
254#define EMAC_STACR_PCDA_MASK 0x1f
255#define EMAC_STACR_PCDA_SHIFT 5
256#define EMAC_STACR_PRA_MASK 0x1f
257#define EMACX_STACR_STAC_MASK 0x00003800
258#define EMACX_STACR_STAC_READ 0x00001000
259#define EMACX_STACR_STAC_WRITE 0x00000800
260#define EMACX_STACR_STAC_IND_ADDR 0x00002000
261#define EMACX_STACR_STAC_IND_READ 0x00003800
262#define EMACX_STACR_STAC_IND_READINC 0x00003000
263#define EMACX_STACR_STAC_IND_WRITE 0x00002800
264
265
266/* EMACx_TRTR */
267#define EMAC_TRTR_SHIFT_EMAC4 24
268#define EMAC_TRTR_SHIFT 27
269
270/* EMAC specific TX descriptor control fields (write access) */
271#define EMAC_TX_CTRL_GFCS 0x0200
272#define EMAC_TX_CTRL_GP 0x0100
273#define EMAC_TX_CTRL_ISA 0x0080
274#define EMAC_TX_CTRL_RSA 0x0040
275#define EMAC_TX_CTRL_IVT 0x0020
276#define EMAC_TX_CTRL_RVT 0x0010
277#define EMAC_TX_CTRL_TAH_CSUM 0x000e
278
279/* EMAC specific TX descriptor status fields (read access) */
280#define EMAC_TX_ST_BFCS 0x0200
281#define EMAC_TX_ST_LCS 0x0080
282#define EMAC_TX_ST_ED 0x0040
283#define EMAC_TX_ST_EC 0x0020
284#define EMAC_TX_ST_LC 0x0010
285#define EMAC_TX_ST_MC 0x0008
286#define EMAC_TX_ST_SC 0x0004
287#define EMAC_TX_ST_UR 0x0002
288#define EMAC_TX_ST_SQE 0x0001
289#define EMAC_IS_BAD_TX (EMAC_TX_ST_LCS | EMAC_TX_ST_ED | \
290 EMAC_TX_ST_EC | EMAC_TX_ST_LC | \
291 EMAC_TX_ST_MC | EMAC_TX_ST_UR)
292#define EMAC_IS_BAD_TX_TAH (EMAC_TX_ST_LCS | EMAC_TX_ST_ED | \
293 EMAC_TX_ST_EC | EMAC_TX_ST_LC)
294
295/* EMAC specific RX descriptor status fields (read access) */
296#define EMAC_RX_ST_OE 0x0200
297#define EMAC_RX_ST_PP 0x0100
298#define EMAC_RX_ST_BP 0x0080
299#define EMAC_RX_ST_RP 0x0040
300#define EMAC_RX_ST_SE 0x0020
301#define EMAC_RX_ST_AE 0x0010
302#define EMAC_RX_ST_BFCS 0x0008
303#define EMAC_RX_ST_PTL 0x0004
304#define EMAC_RX_ST_ORE 0x0002
305#define EMAC_RX_ST_IRE 0x0001
306#define EMAC_RX_TAH_BAD_CSUM 0x0003
307#define EMAC_BAD_RX_MASK (EMAC_RX_ST_OE | EMAC_RX_ST_BP | \
308 EMAC_RX_ST_RP | EMAC_RX_ST_SE | \
309 EMAC_RX_ST_AE | EMAC_RX_ST_BFCS | \
310 EMAC_RX_ST_PTL | EMAC_RX_ST_ORE | \
311 EMAC_RX_ST_IRE )
312#endif /* __IBM_NEWEMAC_H */
diff --git a/drivers/net/ethernet/ibm/emac/mal.c b/drivers/net/ethernet/ibm/emac/mal.c
new file mode 100644
index 00000000000..d268f404b7b
--- /dev/null
+++ b/drivers/net/ethernet/ibm/emac/mal.c
@@ -0,0 +1,809 @@
1/*
2 * drivers/net/ibm_newemac/mal.c
3 *
4 * Memory Access Layer (MAL) support
5 *
6 * Copyright 2007 Benjamin Herrenschmidt, IBM Corp.
7 * <benh@kernel.crashing.org>
8 *
9 * Based on the arch/ppc version of the driver:
10 *
11 * Copyright (c) 2004, 2005 Zultys Technologies.
12 * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>
13 *
14 * Based on original work by
15 * Benjamin Herrenschmidt <benh@kernel.crashing.org>,
16 * David Gibson <hermes@gibson.dropbear.id.au>,
17 *
18 * Armin Kuster <akuster@mvista.com>
19 * Copyright 2002 MontaVista Softare Inc.
20 *
21 * This program is free software; you can redistribute it and/or modify it
22 * under the terms of the GNU General Public License as published by the
23 * Free Software Foundation; either version 2 of the License, or (at your
24 * option) any later version.
25 *
26 */
27
28#include <linux/delay.h>
29#include <linux/slab.h>
30
31#include "core.h"
32#include <asm/dcr-regs.h>
33
34static int mal_count;
35
36int __devinit mal_register_commac(struct mal_instance *mal,
37 struct mal_commac *commac)
38{
39 unsigned long flags;
40
41 spin_lock_irqsave(&mal->lock, flags);
42
43 MAL_DBG(mal, "reg(%08x, %08x)" NL,
44 commac->tx_chan_mask, commac->rx_chan_mask);
45
46 /* Don't let multiple commacs claim the same channel(s) */
47 if ((mal->tx_chan_mask & commac->tx_chan_mask) ||
48 (mal->rx_chan_mask & commac->rx_chan_mask)) {
49 spin_unlock_irqrestore(&mal->lock, flags);
50 printk(KERN_WARNING "mal%d: COMMAC channels conflict!\n",
51 mal->index);
52 return -EBUSY;
53 }
54
55 if (list_empty(&mal->list))
56 napi_enable(&mal->napi);
57 mal->tx_chan_mask |= commac->tx_chan_mask;
58 mal->rx_chan_mask |= commac->rx_chan_mask;
59 list_add(&commac->list, &mal->list);
60
61 spin_unlock_irqrestore(&mal->lock, flags);
62
63 return 0;
64}
65
66void mal_unregister_commac(struct mal_instance *mal,
67 struct mal_commac *commac)
68{
69 unsigned long flags;
70
71 spin_lock_irqsave(&mal->lock, flags);
72
73 MAL_DBG(mal, "unreg(%08x, %08x)" NL,
74 commac->tx_chan_mask, commac->rx_chan_mask);
75
76 mal->tx_chan_mask &= ~commac->tx_chan_mask;
77 mal->rx_chan_mask &= ~commac->rx_chan_mask;
78 list_del_init(&commac->list);
79 if (list_empty(&mal->list))
80 napi_disable(&mal->napi);
81
82 spin_unlock_irqrestore(&mal->lock, flags);
83}
84
85int mal_set_rcbs(struct mal_instance *mal, int channel, unsigned long size)
86{
87 BUG_ON(channel < 0 || channel >= mal->num_rx_chans ||
88 size > MAL_MAX_RX_SIZE);
89
90 MAL_DBG(mal, "set_rbcs(%d, %lu)" NL, channel, size);
91
92 if (size & 0xf) {
93 printk(KERN_WARNING
94 "mal%d: incorrect RX size %lu for the channel %d\n",
95 mal->index, size, channel);
96 return -EINVAL;
97 }
98
99 set_mal_dcrn(mal, MAL_RCBS(channel), size >> 4);
100 return 0;
101}
102
103int mal_tx_bd_offset(struct mal_instance *mal, int channel)
104{
105 BUG_ON(channel < 0 || channel >= mal->num_tx_chans);
106
107 return channel * NUM_TX_BUFF;
108}
109
110int mal_rx_bd_offset(struct mal_instance *mal, int channel)
111{
112 BUG_ON(channel < 0 || channel >= mal->num_rx_chans);
113 return mal->num_tx_chans * NUM_TX_BUFF + channel * NUM_RX_BUFF;
114}
115
116void mal_enable_tx_channel(struct mal_instance *mal, int channel)
117{
118 unsigned long flags;
119
120 spin_lock_irqsave(&mal->lock, flags);
121
122 MAL_DBG(mal, "enable_tx(%d)" NL, channel);
123
124 set_mal_dcrn(mal, MAL_TXCASR,
125 get_mal_dcrn(mal, MAL_TXCASR) | MAL_CHAN_MASK(channel));
126
127 spin_unlock_irqrestore(&mal->lock, flags);
128}
129
130void mal_disable_tx_channel(struct mal_instance *mal, int channel)
131{
132 set_mal_dcrn(mal, MAL_TXCARR, MAL_CHAN_MASK(channel));
133
134 MAL_DBG(mal, "disable_tx(%d)" NL, channel);
135}
136
137void mal_enable_rx_channel(struct mal_instance *mal, int channel)
138{
139 unsigned long flags;
140
141 /*
142 * On some 4xx PPC's (e.g. 460EX/GT), the rx channel is a multiple
143 * of 8, but enabling in MAL_RXCASR needs the divided by 8 value
144 * for the bitmask
145 */
146 if (!(channel % 8))
147 channel >>= 3;
148
149 spin_lock_irqsave(&mal->lock, flags);
150
151 MAL_DBG(mal, "enable_rx(%d)" NL, channel);
152
153 set_mal_dcrn(mal, MAL_RXCASR,
154 get_mal_dcrn(mal, MAL_RXCASR) | MAL_CHAN_MASK(channel));
155
156 spin_unlock_irqrestore(&mal->lock, flags);
157}
158
159void mal_disable_rx_channel(struct mal_instance *mal, int channel)
160{
161 /*
162 * On some 4xx PPC's (e.g. 460EX/GT), the rx channel is a multiple
163 * of 8, but enabling in MAL_RXCASR needs the divided by 8 value
164 * for the bitmask
165 */
166 if (!(channel % 8))
167 channel >>= 3;
168
169 set_mal_dcrn(mal, MAL_RXCARR, MAL_CHAN_MASK(channel));
170
171 MAL_DBG(mal, "disable_rx(%d)" NL, channel);
172}
173
174void mal_poll_add(struct mal_instance *mal, struct mal_commac *commac)
175{
176 unsigned long flags;
177
178 spin_lock_irqsave(&mal->lock, flags);
179
180 MAL_DBG(mal, "poll_add(%p)" NL, commac);
181
182 /* starts disabled */
183 set_bit(MAL_COMMAC_POLL_DISABLED, &commac->flags);
184
185 list_add_tail(&commac->poll_list, &mal->poll_list);
186
187 spin_unlock_irqrestore(&mal->lock, flags);
188}
189
190void mal_poll_del(struct mal_instance *mal, struct mal_commac *commac)
191{
192 unsigned long flags;
193
194 spin_lock_irqsave(&mal->lock, flags);
195
196 MAL_DBG(mal, "poll_del(%p)" NL, commac);
197
198 list_del(&commac->poll_list);
199
200 spin_unlock_irqrestore(&mal->lock, flags);
201}
202
203/* synchronized by mal_poll() */
204static inline void mal_enable_eob_irq(struct mal_instance *mal)
205{
206 MAL_DBG2(mal, "enable_irq" NL);
207
208 // XXX might want to cache MAL_CFG as the DCR read can be slooooow
209 set_mal_dcrn(mal, MAL_CFG, get_mal_dcrn(mal, MAL_CFG) | MAL_CFG_EOPIE);
210}
211
212/* synchronized by NAPI state */
213static inline void mal_disable_eob_irq(struct mal_instance *mal)
214{
215 // XXX might want to cache MAL_CFG as the DCR read can be slooooow
216 set_mal_dcrn(mal, MAL_CFG, get_mal_dcrn(mal, MAL_CFG) & ~MAL_CFG_EOPIE);
217
218 MAL_DBG2(mal, "disable_irq" NL);
219}
220
221static irqreturn_t mal_serr(int irq, void *dev_instance)
222{
223 struct mal_instance *mal = dev_instance;
224
225 u32 esr = get_mal_dcrn(mal, MAL_ESR);
226
227 /* Clear the error status register */
228 set_mal_dcrn(mal, MAL_ESR, esr);
229
230 MAL_DBG(mal, "SERR %08x" NL, esr);
231
232 if (esr & MAL_ESR_EVB) {
233 if (esr & MAL_ESR_DE) {
234 /* We ignore Descriptor error,
235 * TXDE or RXDE interrupt will be generated anyway.
236 */
237 return IRQ_HANDLED;
238 }
239
240 if (esr & MAL_ESR_PEIN) {
241 /* PLB error, it's probably buggy hardware or
242 * incorrect physical address in BD (i.e. bug)
243 */
244 if (net_ratelimit())
245 printk(KERN_ERR
246 "mal%d: system error, "
247 "PLB (ESR = 0x%08x)\n",
248 mal->index, esr);
249 return IRQ_HANDLED;
250 }
251
252 /* OPB error, it's probably buggy hardware or incorrect
253 * EBC setup
254 */
255 if (net_ratelimit())
256 printk(KERN_ERR
257 "mal%d: system error, OPB (ESR = 0x%08x)\n",
258 mal->index, esr);
259 }
260 return IRQ_HANDLED;
261}
262
263static inline void mal_schedule_poll(struct mal_instance *mal)
264{
265 if (likely(napi_schedule_prep(&mal->napi))) {
266 MAL_DBG2(mal, "schedule_poll" NL);
267 mal_disable_eob_irq(mal);
268 __napi_schedule(&mal->napi);
269 } else
270 MAL_DBG2(mal, "already in poll" NL);
271}
272
273static irqreturn_t mal_txeob(int irq, void *dev_instance)
274{
275 struct mal_instance *mal = dev_instance;
276
277 u32 r = get_mal_dcrn(mal, MAL_TXEOBISR);
278
279 MAL_DBG2(mal, "txeob %08x" NL, r);
280
281 mal_schedule_poll(mal);
282 set_mal_dcrn(mal, MAL_TXEOBISR, r);
283
284#ifdef CONFIG_PPC_DCR_NATIVE
285 if (mal_has_feature(mal, MAL_FTR_CLEAR_ICINTSTAT))
286 mtdcri(SDR0, DCRN_SDR_ICINTSTAT,
287 (mfdcri(SDR0, DCRN_SDR_ICINTSTAT) | ICINTSTAT_ICTX));
288#endif
289
290 return IRQ_HANDLED;
291}
292
293static irqreturn_t mal_rxeob(int irq, void *dev_instance)
294{
295 struct mal_instance *mal = dev_instance;
296
297 u32 r = get_mal_dcrn(mal, MAL_RXEOBISR);
298
299 MAL_DBG2(mal, "rxeob %08x" NL, r);
300
301 mal_schedule_poll(mal);
302 set_mal_dcrn(mal, MAL_RXEOBISR, r);
303
304#ifdef CONFIG_PPC_DCR_NATIVE
305 if (mal_has_feature(mal, MAL_FTR_CLEAR_ICINTSTAT))
306 mtdcri(SDR0, DCRN_SDR_ICINTSTAT,
307 (mfdcri(SDR0, DCRN_SDR_ICINTSTAT) | ICINTSTAT_ICRX));
308#endif
309
310 return IRQ_HANDLED;
311}
312
313static irqreturn_t mal_txde(int irq, void *dev_instance)
314{
315 struct mal_instance *mal = dev_instance;
316
317 u32 deir = get_mal_dcrn(mal, MAL_TXDEIR);
318 set_mal_dcrn(mal, MAL_TXDEIR, deir);
319
320 MAL_DBG(mal, "txde %08x" NL, deir);
321
322 if (net_ratelimit())
323 printk(KERN_ERR
324 "mal%d: TX descriptor error (TXDEIR = 0x%08x)\n",
325 mal->index, deir);
326
327 return IRQ_HANDLED;
328}
329
330static irqreturn_t mal_rxde(int irq, void *dev_instance)
331{
332 struct mal_instance *mal = dev_instance;
333 struct list_head *l;
334
335 u32 deir = get_mal_dcrn(mal, MAL_RXDEIR);
336
337 MAL_DBG(mal, "rxde %08x" NL, deir);
338
339 list_for_each(l, &mal->list) {
340 struct mal_commac *mc = list_entry(l, struct mal_commac, list);
341 if (deir & mc->rx_chan_mask) {
342 set_bit(MAL_COMMAC_RX_STOPPED, &mc->flags);
343 mc->ops->rxde(mc->dev);
344 }
345 }
346
347 mal_schedule_poll(mal);
348 set_mal_dcrn(mal, MAL_RXDEIR, deir);
349
350 return IRQ_HANDLED;
351}
352
353static irqreturn_t mal_int(int irq, void *dev_instance)
354{
355 struct mal_instance *mal = dev_instance;
356 u32 esr = get_mal_dcrn(mal, MAL_ESR);
357
358 if (esr & MAL_ESR_EVB) {
359 /* descriptor error */
360 if (esr & MAL_ESR_DE) {
361 if (esr & MAL_ESR_CIDT)
362 return mal_rxde(irq, dev_instance);
363 else
364 return mal_txde(irq, dev_instance);
365 } else { /* SERR */
366 return mal_serr(irq, dev_instance);
367 }
368 }
369 return IRQ_HANDLED;
370}
371
372void mal_poll_disable(struct mal_instance *mal, struct mal_commac *commac)
373{
374 /* Spinlock-type semantics: only one caller disable poll at a time */
375 while (test_and_set_bit(MAL_COMMAC_POLL_DISABLED, &commac->flags))
376 msleep(1);
377
378 /* Synchronize with the MAL NAPI poller */
379 napi_synchronize(&mal->napi);
380}
381
382void mal_poll_enable(struct mal_instance *mal, struct mal_commac *commac)
383{
384 smp_wmb();
385 clear_bit(MAL_COMMAC_POLL_DISABLED, &commac->flags);
386
387 /* Feels better to trigger a poll here to catch up with events that
388 * may have happened on this channel while disabled. It will most
389 * probably be delayed until the next interrupt but that's mostly a
390 * non-issue in the context where this is called.
391 */
392 napi_schedule(&mal->napi);
393}
394
395static int mal_poll(struct napi_struct *napi, int budget)
396{
397 struct mal_instance *mal = container_of(napi, struct mal_instance, napi);
398 struct list_head *l;
399 int received = 0;
400 unsigned long flags;
401
402 MAL_DBG2(mal, "poll(%d)" NL, budget);
403 again:
404 /* Process TX skbs */
405 list_for_each(l, &mal->poll_list) {
406 struct mal_commac *mc =
407 list_entry(l, struct mal_commac, poll_list);
408 mc->ops->poll_tx(mc->dev);
409 }
410
411 /* Process RX skbs.
412 *
413 * We _might_ need something more smart here to enforce polling
414 * fairness.
415 */
416 list_for_each(l, &mal->poll_list) {
417 struct mal_commac *mc =
418 list_entry(l, struct mal_commac, poll_list);
419 int n;
420 if (unlikely(test_bit(MAL_COMMAC_POLL_DISABLED, &mc->flags)))
421 continue;
422 n = mc->ops->poll_rx(mc->dev, budget);
423 if (n) {
424 received += n;
425 budget -= n;
426 if (budget <= 0)
427 goto more_work; // XXX What if this is the last one ?
428 }
429 }
430
431 /* We need to disable IRQs to protect from RXDE IRQ here */
432 spin_lock_irqsave(&mal->lock, flags);
433 __napi_complete(napi);
434 mal_enable_eob_irq(mal);
435 spin_unlock_irqrestore(&mal->lock, flags);
436
437 /* Check for "rotting" packet(s) */
438 list_for_each(l, &mal->poll_list) {
439 struct mal_commac *mc =
440 list_entry(l, struct mal_commac, poll_list);
441 if (unlikely(test_bit(MAL_COMMAC_POLL_DISABLED, &mc->flags)))
442 continue;
443 if (unlikely(mc->ops->peek_rx(mc->dev) ||
444 test_bit(MAL_COMMAC_RX_STOPPED, &mc->flags))) {
445 MAL_DBG2(mal, "rotting packet" NL);
446 if (napi_reschedule(napi))
447 mal_disable_eob_irq(mal);
448 else
449 MAL_DBG2(mal, "already in poll list" NL);
450
451 if (budget > 0)
452 goto again;
453 else
454 goto more_work;
455 }
456 mc->ops->poll_tx(mc->dev);
457 }
458
459 more_work:
460 MAL_DBG2(mal, "poll() %d <- %d" NL, budget, received);
461 return received;
462}
463
464static void mal_reset(struct mal_instance *mal)
465{
466 int n = 10;
467
468 MAL_DBG(mal, "reset" NL);
469
470 set_mal_dcrn(mal, MAL_CFG, MAL_CFG_SR);
471
472 /* Wait for reset to complete (1 system clock) */
473 while ((get_mal_dcrn(mal, MAL_CFG) & MAL_CFG_SR) && n)
474 --n;
475
476 if (unlikely(!n))
477 printk(KERN_ERR "mal%d: reset timeout\n", mal->index);
478}
479
480int mal_get_regs_len(struct mal_instance *mal)
481{
482 return sizeof(struct emac_ethtool_regs_subhdr) +
483 sizeof(struct mal_regs);
484}
485
486void *mal_dump_regs(struct mal_instance *mal, void *buf)
487{
488 struct emac_ethtool_regs_subhdr *hdr = buf;
489 struct mal_regs *regs = (struct mal_regs *)(hdr + 1);
490 int i;
491
492 hdr->version = mal->version;
493 hdr->index = mal->index;
494
495 regs->tx_count = mal->num_tx_chans;
496 regs->rx_count = mal->num_rx_chans;
497
498 regs->cfg = get_mal_dcrn(mal, MAL_CFG);
499 regs->esr = get_mal_dcrn(mal, MAL_ESR);
500 regs->ier = get_mal_dcrn(mal, MAL_IER);
501 regs->tx_casr = get_mal_dcrn(mal, MAL_TXCASR);
502 regs->tx_carr = get_mal_dcrn(mal, MAL_TXCARR);
503 regs->tx_eobisr = get_mal_dcrn(mal, MAL_TXEOBISR);
504 regs->tx_deir = get_mal_dcrn(mal, MAL_TXDEIR);
505 regs->rx_casr = get_mal_dcrn(mal, MAL_RXCASR);
506 regs->rx_carr = get_mal_dcrn(mal, MAL_RXCARR);
507 regs->rx_eobisr = get_mal_dcrn(mal, MAL_RXEOBISR);
508 regs->rx_deir = get_mal_dcrn(mal, MAL_RXDEIR);
509
510 for (i = 0; i < regs->tx_count; ++i)
511 regs->tx_ctpr[i] = get_mal_dcrn(mal, MAL_TXCTPR(i));
512
513 for (i = 0; i < regs->rx_count; ++i) {
514 regs->rx_ctpr[i] = get_mal_dcrn(mal, MAL_RXCTPR(i));
515 regs->rcbs[i] = get_mal_dcrn(mal, MAL_RCBS(i));
516 }
517 return regs + 1;
518}
519
520static int __devinit mal_probe(struct platform_device *ofdev)
521{
522 struct mal_instance *mal;
523 int err = 0, i, bd_size;
524 int index = mal_count++;
525 unsigned int dcr_base;
526 const u32 *prop;
527 u32 cfg;
528 unsigned long irqflags;
529 irq_handler_t hdlr_serr, hdlr_txde, hdlr_rxde;
530
531 mal = kzalloc(sizeof(struct mal_instance), GFP_KERNEL);
532 if (!mal) {
533 printk(KERN_ERR
534 "mal%d: out of memory allocating MAL structure!\n",
535 index);
536 return -ENOMEM;
537 }
538 mal->index = index;
539 mal->ofdev = ofdev;
540 mal->version = of_device_is_compatible(ofdev->dev.of_node, "ibm,mcmal2") ? 2 : 1;
541
542 MAL_DBG(mal, "probe" NL);
543
544 prop = of_get_property(ofdev->dev.of_node, "num-tx-chans", NULL);
545 if (prop == NULL) {
546 printk(KERN_ERR
547 "mal%d: can't find MAL num-tx-chans property!\n",
548 index);
549 err = -ENODEV;
550 goto fail;
551 }
552 mal->num_tx_chans = prop[0];
553
554 prop = of_get_property(ofdev->dev.of_node, "num-rx-chans", NULL);
555 if (prop == NULL) {
556 printk(KERN_ERR
557 "mal%d: can't find MAL num-rx-chans property!\n",
558 index);
559 err = -ENODEV;
560 goto fail;
561 }
562 mal->num_rx_chans = prop[0];
563
564 dcr_base = dcr_resource_start(ofdev->dev.of_node, 0);
565 if (dcr_base == 0) {
566 printk(KERN_ERR
567 "mal%d: can't find DCR resource!\n", index);
568 err = -ENODEV;
569 goto fail;
570 }
571 mal->dcr_host = dcr_map(ofdev->dev.of_node, dcr_base, 0x100);
572 if (!DCR_MAP_OK(mal->dcr_host)) {
573 printk(KERN_ERR
574 "mal%d: failed to map DCRs !\n", index);
575 err = -ENODEV;
576 goto fail;
577 }
578
579 if (of_device_is_compatible(ofdev->dev.of_node, "ibm,mcmal-405ez")) {
580#if defined(CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT) && \
581 defined(CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR)
582 mal->features |= (MAL_FTR_CLEAR_ICINTSTAT |
583 MAL_FTR_COMMON_ERR_INT);
584#else
585 printk(KERN_ERR "%s: Support for 405EZ not enabled!\n",
586 ofdev->dev.of_node->full_name);
587 err = -ENODEV;
588 goto fail;
589#endif
590 }
591
592 mal->txeob_irq = irq_of_parse_and_map(ofdev->dev.of_node, 0);
593 mal->rxeob_irq = irq_of_parse_and_map(ofdev->dev.of_node, 1);
594 mal->serr_irq = irq_of_parse_and_map(ofdev->dev.of_node, 2);
595
596 if (mal_has_feature(mal, MAL_FTR_COMMON_ERR_INT)) {
597 mal->txde_irq = mal->rxde_irq = mal->serr_irq;
598 } else {
599 mal->txde_irq = irq_of_parse_and_map(ofdev->dev.of_node, 3);
600 mal->rxde_irq = irq_of_parse_and_map(ofdev->dev.of_node, 4);
601 }
602
603 if (mal->txeob_irq == NO_IRQ || mal->rxeob_irq == NO_IRQ ||
604 mal->serr_irq == NO_IRQ || mal->txde_irq == NO_IRQ ||
605 mal->rxde_irq == NO_IRQ) {
606 printk(KERN_ERR
607 "mal%d: failed to map interrupts !\n", index);
608 err = -ENODEV;
609 goto fail_unmap;
610 }
611
612 INIT_LIST_HEAD(&mal->poll_list);
613 INIT_LIST_HEAD(&mal->list);
614 spin_lock_init(&mal->lock);
615
616 init_dummy_netdev(&mal->dummy_dev);
617
618 netif_napi_add(&mal->dummy_dev, &mal->napi, mal_poll,
619 CONFIG_IBM_NEW_EMAC_POLL_WEIGHT);
620
621 /* Load power-on reset defaults */
622 mal_reset(mal);
623
624 /* Set the MAL configuration register */
625 cfg = (mal->version == 2) ? MAL2_CFG_DEFAULT : MAL1_CFG_DEFAULT;
626 cfg |= MAL_CFG_PLBB | MAL_CFG_OPBBL | MAL_CFG_LEA;
627
628 /* Current Axon is not happy with priority being non-0, it can
629 * deadlock, fix it up here
630 */
631 if (of_device_is_compatible(ofdev->dev.of_node, "ibm,mcmal-axon"))
632 cfg &= ~(MAL2_CFG_RPP_10 | MAL2_CFG_WPP_10);
633
634 /* Apply configuration */
635 set_mal_dcrn(mal, MAL_CFG, cfg);
636
637 /* Allocate space for BD rings */
638 BUG_ON(mal->num_tx_chans <= 0 || mal->num_tx_chans > 32);
639 BUG_ON(mal->num_rx_chans <= 0 || mal->num_rx_chans > 32);
640
641 bd_size = sizeof(struct mal_descriptor) *
642 (NUM_TX_BUFF * mal->num_tx_chans +
643 NUM_RX_BUFF * mal->num_rx_chans);
644 mal->bd_virt =
645 dma_alloc_coherent(&ofdev->dev, bd_size, &mal->bd_dma,
646 GFP_KERNEL);
647 if (mal->bd_virt == NULL) {
648 printk(KERN_ERR
649 "mal%d: out of memory allocating RX/TX descriptors!\n",
650 index);
651 err = -ENOMEM;
652 goto fail_unmap;
653 }
654 memset(mal->bd_virt, 0, bd_size);
655
656 for (i = 0; i < mal->num_tx_chans; ++i)
657 set_mal_dcrn(mal, MAL_TXCTPR(i), mal->bd_dma +
658 sizeof(struct mal_descriptor) *
659 mal_tx_bd_offset(mal, i));
660
661 for (i = 0; i < mal->num_rx_chans; ++i)
662 set_mal_dcrn(mal, MAL_RXCTPR(i), mal->bd_dma +
663 sizeof(struct mal_descriptor) *
664 mal_rx_bd_offset(mal, i));
665
666 if (mal_has_feature(mal, MAL_FTR_COMMON_ERR_INT)) {
667 irqflags = IRQF_SHARED;
668 hdlr_serr = hdlr_txde = hdlr_rxde = mal_int;
669 } else {
670 irqflags = 0;
671 hdlr_serr = mal_serr;
672 hdlr_txde = mal_txde;
673 hdlr_rxde = mal_rxde;
674 }
675
676 err = request_irq(mal->serr_irq, hdlr_serr, irqflags, "MAL SERR", mal);
677 if (err)
678 goto fail2;
679 err = request_irq(mal->txde_irq, hdlr_txde, irqflags, "MAL TX DE", mal);
680 if (err)
681 goto fail3;
682 err = request_irq(mal->txeob_irq, mal_txeob, 0, "MAL TX EOB", mal);
683 if (err)
684 goto fail4;
685 err = request_irq(mal->rxde_irq, hdlr_rxde, irqflags, "MAL RX DE", mal);
686 if (err)
687 goto fail5;
688 err = request_irq(mal->rxeob_irq, mal_rxeob, 0, "MAL RX EOB", mal);
689 if (err)
690 goto fail6;
691
692 /* Enable all MAL SERR interrupt sources */
693 if (mal->version == 2)
694 set_mal_dcrn(mal, MAL_IER, MAL2_IER_EVENTS);
695 else
696 set_mal_dcrn(mal, MAL_IER, MAL1_IER_EVENTS);
697
698 /* Enable EOB interrupt */
699 mal_enable_eob_irq(mal);
700
701 printk(KERN_INFO
702 "MAL v%d %s, %d TX channels, %d RX channels\n",
703 mal->version, ofdev->dev.of_node->full_name,
704 mal->num_tx_chans, mal->num_rx_chans);
705
706 /* Advertise this instance to the rest of the world */
707 wmb();
708 dev_set_drvdata(&ofdev->dev, mal);
709
710 mal_dbg_register(mal);
711
712 return 0;
713
714 fail6:
715 free_irq(mal->rxde_irq, mal);
716 fail5:
717 free_irq(mal->txeob_irq, mal);
718 fail4:
719 free_irq(mal->txde_irq, mal);
720 fail3:
721 free_irq(mal->serr_irq, mal);
722 fail2:
723 dma_free_coherent(&ofdev->dev, bd_size, mal->bd_virt, mal->bd_dma);
724 fail_unmap:
725 dcr_unmap(mal->dcr_host, 0x100);
726 fail:
727 kfree(mal);
728
729 return err;
730}
731
732static int __devexit mal_remove(struct platform_device *ofdev)
733{
734 struct mal_instance *mal = dev_get_drvdata(&ofdev->dev);
735
736 MAL_DBG(mal, "remove" NL);
737
738 /* Synchronize with scheduled polling */
739 napi_disable(&mal->napi);
740
741 if (!list_empty(&mal->list)) {
742 /* This is *very* bad */
743 printk(KERN_EMERG
744 "mal%d: commac list is not empty on remove!\n",
745 mal->index);
746 WARN_ON(1);
747 }
748
749 dev_set_drvdata(&ofdev->dev, NULL);
750
751 free_irq(mal->serr_irq, mal);
752 free_irq(mal->txde_irq, mal);
753 free_irq(mal->txeob_irq, mal);
754 free_irq(mal->rxde_irq, mal);
755 free_irq(mal->rxeob_irq, mal);
756
757 mal_reset(mal);
758
759 mal_dbg_unregister(mal);
760
761 dma_free_coherent(&ofdev->dev,
762 sizeof(struct mal_descriptor) *
763 (NUM_TX_BUFF * mal->num_tx_chans +
764 NUM_RX_BUFF * mal->num_rx_chans), mal->bd_virt,
765 mal->bd_dma);
766 kfree(mal);
767
768 return 0;
769}
770
771static struct of_device_id mal_platform_match[] =
772{
773 {
774 .compatible = "ibm,mcmal",
775 },
776 {
777 .compatible = "ibm,mcmal2",
778 },
779 /* Backward compat */
780 {
781 .type = "mcmal-dma",
782 .compatible = "ibm,mcmal",
783 },
784 {
785 .type = "mcmal-dma",
786 .compatible = "ibm,mcmal2",
787 },
788 {},
789};
790
791static struct platform_driver mal_of_driver = {
792 .driver = {
793 .name = "mcmal",
794 .owner = THIS_MODULE,
795 .of_match_table = mal_platform_match,
796 },
797 .probe = mal_probe,
798 .remove = mal_remove,
799};
800
801int __init mal_init(void)
802{
803 return platform_driver_register(&mal_of_driver);
804}
805
806void mal_exit(void)
807{
808 platform_driver_unregister(&mal_of_driver);
809}
diff --git a/drivers/net/ethernet/ibm/emac/mal.h b/drivers/net/ethernet/ibm/emac/mal.h
new file mode 100644
index 00000000000..66084214bf4
--- /dev/null
+++ b/drivers/net/ethernet/ibm/emac/mal.h
@@ -0,0 +1,316 @@
1/*
2 * drivers/net/ibm_newemac/mal.h
3 *
4 * Memory Access Layer (MAL) support
5 *
6 * Copyright 2007 Benjamin Herrenschmidt, IBM Corp.
7 * <benh@kernel.crashing.org>
8 *
9 * Based on the arch/ppc version of the driver:
10 *
11 * Copyright (c) 2004, 2005 Zultys Technologies.
12 * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>
13 *
14 * Based on original work by
15 * Armin Kuster <akuster@mvista.com>
16 * Copyright 2002 MontaVista Softare Inc.
17 *
18 * This program is free software; you can redistribute it and/or modify it
19 * under the terms of the GNU General Public License as published by the
20 * Free Software Foundation; either version 2 of the License, or (at your
21 * option) any later version.
22 *
23 */
24#ifndef __IBM_NEWEMAC_MAL_H
25#define __IBM_NEWEMAC_MAL_H
26
27/*
28 * There are some variations on the MAL, we express them in this driver as
29 * MAL Version 1 and 2 though that doesn't match any IBM terminology.
30 *
31 * We call MAL 1 the version in 405GP, 405GPR, 405EP, 440EP, 440GR and
32 * NP405H.
33 *
34 * We call MAL 2 the version in 440GP, 440GX, 440SP, 440SPE and Axon
35 *
36 * The driver expects a "version" property in the emac node containing
37 * a number 1 or 2. New device-trees for EMAC capable platforms are thus
38 * required to include that when porting to arch/powerpc.
39 */
40
41/* MALx DCR registers */
42#define MAL_CFG 0x00
43#define MAL_CFG_SR 0x80000000
44#define MAL_CFG_PLBB 0x00004000
45#define MAL_CFG_OPBBL 0x00000080
46#define MAL_CFG_EOPIE 0x00000004
47#define MAL_CFG_LEA 0x00000002
48#define MAL_CFG_SD 0x00000001
49
50/* MAL V1 CFG bits */
51#define MAL1_CFG_PLBP_MASK 0x00c00000
52#define MAL1_CFG_PLBP_10 0x00800000
53#define MAL1_CFG_GA 0x00200000
54#define MAL1_CFG_OA 0x00100000
55#define MAL1_CFG_PLBLE 0x00080000
56#define MAL1_CFG_PLBT_MASK 0x00078000
57#define MAL1_CFG_DEFAULT (MAL1_CFG_PLBP_10 | MAL1_CFG_PLBT_MASK)
58
59/* MAL V2 CFG bits */
60#define MAL2_CFG_RPP_MASK 0x00c00000
61#define MAL2_CFG_RPP_10 0x00800000
62#define MAL2_CFG_RMBS_MASK 0x00300000
63#define MAL2_CFG_WPP_MASK 0x000c0000
64#define MAL2_CFG_WPP_10 0x00080000
65#define MAL2_CFG_WMBS_MASK 0x00030000
66#define MAL2_CFG_PLBLE 0x00008000
67#define MAL2_CFG_DEFAULT (MAL2_CFG_RMBS_MASK | MAL2_CFG_WMBS_MASK | \
68 MAL2_CFG_RPP_10 | MAL2_CFG_WPP_10)
69
70#define MAL_ESR 0x01
71#define MAL_ESR_EVB 0x80000000
72#define MAL_ESR_CIDT 0x40000000
73#define MAL_ESR_CID_MASK 0x3e000000
74#define MAL_ESR_CID_SHIFT 25
75#define MAL_ESR_DE 0x00100000
76#define MAL_ESR_OTE 0x00040000
77#define MAL_ESR_OSE 0x00020000
78#define MAL_ESR_PEIN 0x00010000
79#define MAL_ESR_DEI 0x00000010
80#define MAL_ESR_OTEI 0x00000004
81#define MAL_ESR_OSEI 0x00000002
82#define MAL_ESR_PBEI 0x00000001
83
84/* MAL V1 ESR bits */
85#define MAL1_ESR_ONE 0x00080000
86#define MAL1_ESR_ONEI 0x00000008
87
88/* MAL V2 ESR bits */
89#define MAL2_ESR_PTE 0x00800000
90#define MAL2_ESR_PRE 0x00400000
91#define MAL2_ESR_PWE 0x00200000
92#define MAL2_ESR_PTEI 0x00000080
93#define MAL2_ESR_PREI 0x00000040
94#define MAL2_ESR_PWEI 0x00000020
95
96
97#define MAL_IER 0x02
98#define MAL_IER_DE 0x00000010
99#define MAL_IER_OTE 0x00000004
100#define MAL_IER_OE 0x00000002
101#define MAL_IER_PE 0x00000001
102/* MAL V1 IER bits */
103#define MAL1_IER_NWE 0x00000008
104#define MAL1_IER_SOC_EVENTS MAL1_IER_NWE
105#define MAL1_IER_EVENTS (MAL1_IER_SOC_EVENTS | MAL_IER_DE | \
106 MAL_IER_OTE | MAL_IER_OE | MAL_IER_PE)
107
108/* MAL V2 IER bits */
109#define MAL2_IER_PT 0x00000080
110#define MAL2_IER_PRE 0x00000040
111#define MAL2_IER_PWE 0x00000020
112#define MAL2_IER_SOC_EVENTS (MAL2_IER_PT | MAL2_IER_PRE | MAL2_IER_PWE)
113#define MAL2_IER_EVENTS (MAL2_IER_SOC_EVENTS | MAL_IER_DE | \
114 MAL_IER_OTE | MAL_IER_OE | MAL_IER_PE)
115
116
117#define MAL_TXCASR 0x04
118#define MAL_TXCARR 0x05
119#define MAL_TXEOBISR 0x06
120#define MAL_TXDEIR 0x07
121#define MAL_RXCASR 0x10
122#define MAL_RXCARR 0x11
123#define MAL_RXEOBISR 0x12
124#define MAL_RXDEIR 0x13
125#define MAL_TXCTPR(n) ((n) + 0x20)
126#define MAL_RXCTPR(n) ((n) + 0x40)
127#define MAL_RCBS(n) ((n) + 0x60)
128
129/* In reality MAL can handle TX buffers up to 4095 bytes long,
130 * but this isn't a good round number :) --ebs
131 */
132#define MAL_MAX_TX_SIZE 4080
133#define MAL_MAX_RX_SIZE 4080
134
135static inline int mal_rx_size(int len)
136{
137 len = (len + 0xf) & ~0xf;
138 return len > MAL_MAX_RX_SIZE ? MAL_MAX_RX_SIZE : len;
139}
140
141static inline int mal_tx_chunks(int len)
142{
143 return (len + MAL_MAX_TX_SIZE - 1) / MAL_MAX_TX_SIZE;
144}
145
146#define MAL_CHAN_MASK(n) (0x80000000 >> (n))
147
148/* MAL Buffer Descriptor structure */
149struct mal_descriptor {
150 u16 ctrl; /* MAL / Commac status control bits */
151 u16 data_len; /* Max length is 4K-1 (12 bits) */
152 u32 data_ptr; /* pointer to actual data buffer */
153};
154
155/* the following defines are for the MadMAL status and control registers. */
156/* MADMAL transmit and receive status/control bits */
157#define MAL_RX_CTRL_EMPTY 0x8000
158#define MAL_RX_CTRL_WRAP 0x4000
159#define MAL_RX_CTRL_CM 0x2000
160#define MAL_RX_CTRL_LAST 0x1000
161#define MAL_RX_CTRL_FIRST 0x0800
162#define MAL_RX_CTRL_INTR 0x0400
163#define MAL_RX_CTRL_SINGLE (MAL_RX_CTRL_LAST | MAL_RX_CTRL_FIRST)
164#define MAL_IS_SINGLE_RX(ctrl) (((ctrl) & MAL_RX_CTRL_SINGLE) == MAL_RX_CTRL_SINGLE)
165
166#define MAL_TX_CTRL_READY 0x8000
167#define MAL_TX_CTRL_WRAP 0x4000
168#define MAL_TX_CTRL_CM 0x2000
169#define MAL_TX_CTRL_LAST 0x1000
170#define MAL_TX_CTRL_INTR 0x0400
171
172struct mal_commac_ops {
173 void (*poll_tx) (void *dev);
174 int (*poll_rx) (void *dev, int budget);
175 int (*peek_rx) (void *dev);
176 void (*rxde) (void *dev);
177};
178
179struct mal_commac {
180 struct mal_commac_ops *ops;
181 void *dev;
182 struct list_head poll_list;
183 long flags;
184#define MAL_COMMAC_RX_STOPPED 0
185#define MAL_COMMAC_POLL_DISABLED 1
186 u32 tx_chan_mask;
187 u32 rx_chan_mask;
188 struct list_head list;
189};
190
191struct mal_instance {
192 int version;
193 dcr_host_t dcr_host;
194
195 int num_tx_chans; /* Number of TX channels */
196 int num_rx_chans; /* Number of RX channels */
197 int txeob_irq; /* TX End Of Buffer IRQ */
198 int rxeob_irq; /* RX End Of Buffer IRQ */
199 int txde_irq; /* TX Descriptor Error IRQ */
200 int rxde_irq; /* RX Descriptor Error IRQ */
201 int serr_irq; /* MAL System Error IRQ */
202
203 struct list_head poll_list;
204 struct napi_struct napi;
205
206 struct list_head list;
207 u32 tx_chan_mask;
208 u32 rx_chan_mask;
209
210 dma_addr_t bd_dma;
211 struct mal_descriptor *bd_virt;
212
213 struct platform_device *ofdev;
214 int index;
215 spinlock_t lock;
216
217 struct net_device dummy_dev;
218
219 unsigned int features;
220};
221
222static inline u32 get_mal_dcrn(struct mal_instance *mal, int reg)
223{
224 return dcr_read(mal->dcr_host, reg);
225}
226
227static inline void set_mal_dcrn(struct mal_instance *mal, int reg, u32 val)
228{
229 dcr_write(mal->dcr_host, reg, val);
230}
231
232/* Features of various MAL implementations */
233
234/* Set if you have interrupt coalescing and you have to clear the SDR
235 * register for TXEOB and RXEOB interrupts to work
236 */
237#define MAL_FTR_CLEAR_ICINTSTAT 0x00000001
238
239/* Set if your MAL has SERR, TXDE, and RXDE OR'd into a single UIC
240 * interrupt
241 */
242#define MAL_FTR_COMMON_ERR_INT 0x00000002
243
244enum {
245 MAL_FTRS_ALWAYS = 0,
246
247 MAL_FTRS_POSSIBLE =
248#ifdef CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT
249 MAL_FTR_CLEAR_ICINTSTAT |
250#endif
251#ifdef CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR
252 MAL_FTR_COMMON_ERR_INT |
253#endif
254 0,
255};
256
257static inline int mal_has_feature(struct mal_instance *dev,
258 unsigned long feature)
259{
260 return (MAL_FTRS_ALWAYS & feature) ||
261 (MAL_FTRS_POSSIBLE & dev->features & feature);
262}
263
264/* Register MAL devices */
265int mal_init(void);
266void mal_exit(void);
267
268int mal_register_commac(struct mal_instance *mal,
269 struct mal_commac *commac);
270void mal_unregister_commac(struct mal_instance *mal,
271 struct mal_commac *commac);
272int mal_set_rcbs(struct mal_instance *mal, int channel, unsigned long size);
273
274/* Returns BD ring offset for a particular channel
275 (in 'struct mal_descriptor' elements)
276*/
277int mal_tx_bd_offset(struct mal_instance *mal, int channel);
278int mal_rx_bd_offset(struct mal_instance *mal, int channel);
279
280void mal_enable_tx_channel(struct mal_instance *mal, int channel);
281void mal_disable_tx_channel(struct mal_instance *mal, int channel);
282void mal_enable_rx_channel(struct mal_instance *mal, int channel);
283void mal_disable_rx_channel(struct mal_instance *mal, int channel);
284
285void mal_poll_disable(struct mal_instance *mal, struct mal_commac *commac);
286void mal_poll_enable(struct mal_instance *mal, struct mal_commac *commac);
287
288/* Add/remove EMAC to/from MAL polling list */
289void mal_poll_add(struct mal_instance *mal, struct mal_commac *commac);
290void mal_poll_del(struct mal_instance *mal, struct mal_commac *commac);
291
292/* Ethtool MAL registers */
293struct mal_regs {
294 u32 tx_count;
295 u32 rx_count;
296
297 u32 cfg;
298 u32 esr;
299 u32 ier;
300 u32 tx_casr;
301 u32 tx_carr;
302 u32 tx_eobisr;
303 u32 tx_deir;
304 u32 rx_casr;
305 u32 rx_carr;
306 u32 rx_eobisr;
307 u32 rx_deir;
308 u32 tx_ctpr[32];
309 u32 rx_ctpr[32];
310 u32 rcbs[32];
311};
312
313int mal_get_regs_len(struct mal_instance *mal);
314void *mal_dump_regs(struct mal_instance *mal, void *buf);
315
316#endif /* __IBM_NEWEMAC_MAL_H */
diff --git a/drivers/net/ethernet/ibm/emac/phy.c b/drivers/net/ethernet/ibm/emac/phy.c
new file mode 100644
index 00000000000..ab4e5969fe6
--- /dev/null
+++ b/drivers/net/ethernet/ibm/emac/phy.c
@@ -0,0 +1,541 @@
1/*
2 * drivers/net/ibm_newemac/phy.c
3 *
4 * Driver for PowerPC 4xx on-chip ethernet controller, PHY support.
5 * Borrowed from sungem_phy.c, though I only kept the generic MII
6 * driver for now.
7 *
8 * This file should be shared with other drivers or eventually
9 * merged as the "low level" part of miilib
10 *
11 * Copyright 2007 Benjamin Herrenschmidt, IBM Corp.
12 * <benh@kernel.crashing.org>
13 *
14 * Based on the arch/ppc version of the driver:
15 *
16 * (c) 2003, Benjamin Herrenscmidt (benh@kernel.crashing.org)
17 * (c) 2004-2005, Eugene Surovegin <ebs@ebshome.net>
18 *
19 */
20#include <linux/module.h>
21#include <linux/kernel.h>
22#include <linux/types.h>
23#include <linux/netdevice.h>
24#include <linux/mii.h>
25#include <linux/ethtool.h>
26#include <linux/delay.h>
27
28#include "emac.h"
29#include "phy.h"
30
31#define phy_read _phy_read
32#define phy_write _phy_write
33
34static inline int _phy_read(struct mii_phy *phy, int reg)
35{
36 return phy->mdio_read(phy->dev, phy->address, reg);
37}
38
39static inline void _phy_write(struct mii_phy *phy, int reg, int val)
40{
41 phy->mdio_write(phy->dev, phy->address, reg, val);
42}
43
44static inline int gpcs_phy_read(struct mii_phy *phy, int reg)
45{
46 return phy->mdio_read(phy->dev, phy->gpcs_address, reg);
47}
48
49static inline void gpcs_phy_write(struct mii_phy *phy, int reg, int val)
50{
51 phy->mdio_write(phy->dev, phy->gpcs_address, reg, val);
52}
53
54int emac_mii_reset_phy(struct mii_phy *phy)
55{
56 int val;
57 int limit = 10000;
58
59 val = phy_read(phy, MII_BMCR);
60 val &= ~(BMCR_ISOLATE | BMCR_ANENABLE);
61 val |= BMCR_RESET;
62 phy_write(phy, MII_BMCR, val);
63
64 udelay(300);
65
66 while (--limit) {
67 val = phy_read(phy, MII_BMCR);
68 if (val >= 0 && (val & BMCR_RESET) == 0)
69 break;
70 udelay(10);
71 }
72 if ((val & BMCR_ISOLATE) && limit > 0)
73 phy_write(phy, MII_BMCR, val & ~BMCR_ISOLATE);
74
75 return limit <= 0;
76}
77
78int emac_mii_reset_gpcs(struct mii_phy *phy)
79{
80 int val;
81 int limit = 10000;
82
83 val = gpcs_phy_read(phy, MII_BMCR);
84 val &= ~(BMCR_ISOLATE | BMCR_ANENABLE);
85 val |= BMCR_RESET;
86 gpcs_phy_write(phy, MII_BMCR, val);
87
88 udelay(300);
89
90 while (--limit) {
91 val = gpcs_phy_read(phy, MII_BMCR);
92 if (val >= 0 && (val & BMCR_RESET) == 0)
93 break;
94 udelay(10);
95 }
96 if ((val & BMCR_ISOLATE) && limit > 0)
97 gpcs_phy_write(phy, MII_BMCR, val & ~BMCR_ISOLATE);
98
99 if (limit > 0 && phy->mode == PHY_MODE_SGMII) {
100 /* Configure GPCS interface to recommended setting for SGMII */
101 gpcs_phy_write(phy, 0x04, 0x8120); /* AsymPause, FDX */
102 gpcs_phy_write(phy, 0x07, 0x2801); /* msg_pg, toggle */
103 gpcs_phy_write(phy, 0x00, 0x0140); /* 1Gbps, FDX */
104 }
105
106 return limit <= 0;
107}
108
109static int genmii_setup_aneg(struct mii_phy *phy, u32 advertise)
110{
111 int ctl, adv;
112
113 phy->autoneg = AUTONEG_ENABLE;
114 phy->speed = SPEED_10;
115 phy->duplex = DUPLEX_HALF;
116 phy->pause = phy->asym_pause = 0;
117 phy->advertising = advertise;
118
119 ctl = phy_read(phy, MII_BMCR);
120 if (ctl < 0)
121 return ctl;
122 ctl &= ~(BMCR_FULLDPLX | BMCR_SPEED100 | BMCR_SPEED1000 | BMCR_ANENABLE);
123
124 /* First clear the PHY */
125 phy_write(phy, MII_BMCR, ctl);
126
127 /* Setup standard advertise */
128 adv = phy_read(phy, MII_ADVERTISE);
129 if (adv < 0)
130 return adv;
131 adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP |
132 ADVERTISE_PAUSE_ASYM);
133 if (advertise & ADVERTISED_10baseT_Half)
134 adv |= ADVERTISE_10HALF;
135 if (advertise & ADVERTISED_10baseT_Full)
136 adv |= ADVERTISE_10FULL;
137 if (advertise & ADVERTISED_100baseT_Half)
138 adv |= ADVERTISE_100HALF;
139 if (advertise & ADVERTISED_100baseT_Full)
140 adv |= ADVERTISE_100FULL;
141 if (advertise & ADVERTISED_Pause)
142 adv |= ADVERTISE_PAUSE_CAP;
143 if (advertise & ADVERTISED_Asym_Pause)
144 adv |= ADVERTISE_PAUSE_ASYM;
145 phy_write(phy, MII_ADVERTISE, adv);
146
147 if (phy->features &
148 (SUPPORTED_1000baseT_Full | SUPPORTED_1000baseT_Half)) {
149 adv = phy_read(phy, MII_CTRL1000);
150 if (adv < 0)
151 return adv;
152 adv &= ~(ADVERTISE_1000FULL | ADVERTISE_1000HALF);
153 if (advertise & ADVERTISED_1000baseT_Full)
154 adv |= ADVERTISE_1000FULL;
155 if (advertise & ADVERTISED_1000baseT_Half)
156 adv |= ADVERTISE_1000HALF;
157 phy_write(phy, MII_CTRL1000, adv);
158 }
159
160 /* Start/Restart aneg */
161 ctl = phy_read(phy, MII_BMCR);
162 ctl |= (BMCR_ANENABLE | BMCR_ANRESTART);
163 phy_write(phy, MII_BMCR, ctl);
164
165 return 0;
166}
167
168static int genmii_setup_forced(struct mii_phy *phy, int speed, int fd)
169{
170 int ctl;
171
172 phy->autoneg = AUTONEG_DISABLE;
173 phy->speed = speed;
174 phy->duplex = fd;
175 phy->pause = phy->asym_pause = 0;
176
177 ctl = phy_read(phy, MII_BMCR);
178 if (ctl < 0)
179 return ctl;
180 ctl &= ~(BMCR_FULLDPLX | BMCR_SPEED100 | BMCR_SPEED1000 | BMCR_ANENABLE);
181
182 /* First clear the PHY */
183 phy_write(phy, MII_BMCR, ctl | BMCR_RESET);
184
185 /* Select speed & duplex */
186 switch (speed) {
187 case SPEED_10:
188 break;
189 case SPEED_100:
190 ctl |= BMCR_SPEED100;
191 break;
192 case SPEED_1000:
193 ctl |= BMCR_SPEED1000;
194 break;
195 default:
196 return -EINVAL;
197 }
198 if (fd == DUPLEX_FULL)
199 ctl |= BMCR_FULLDPLX;
200 phy_write(phy, MII_BMCR, ctl);
201
202 return 0;
203}
204
205static int genmii_poll_link(struct mii_phy *phy)
206{
207 int status;
208
209 /* Clear latched value with dummy read */
210 phy_read(phy, MII_BMSR);
211 status = phy_read(phy, MII_BMSR);
212 if (status < 0 || (status & BMSR_LSTATUS) == 0)
213 return 0;
214 if (phy->autoneg == AUTONEG_ENABLE && !(status & BMSR_ANEGCOMPLETE))
215 return 0;
216 return 1;
217}
218
219static int genmii_read_link(struct mii_phy *phy)
220{
221 if (phy->autoneg == AUTONEG_ENABLE) {
222 int glpa = 0;
223 int lpa = phy_read(phy, MII_LPA) & phy_read(phy, MII_ADVERTISE);
224 if (lpa < 0)
225 return lpa;
226
227 if (phy->features &
228 (SUPPORTED_1000baseT_Full | SUPPORTED_1000baseT_Half)) {
229 int adv = phy_read(phy, MII_CTRL1000);
230 glpa = phy_read(phy, MII_STAT1000);
231
232 if (glpa < 0 || adv < 0)
233 return adv;
234
235 glpa &= adv << 2;
236 }
237
238 phy->speed = SPEED_10;
239 phy->duplex = DUPLEX_HALF;
240 phy->pause = phy->asym_pause = 0;
241
242 if (glpa & (LPA_1000FULL | LPA_1000HALF)) {
243 phy->speed = SPEED_1000;
244 if (glpa & LPA_1000FULL)
245 phy->duplex = DUPLEX_FULL;
246 } else if (lpa & (LPA_100FULL | LPA_100HALF)) {
247 phy->speed = SPEED_100;
248 if (lpa & LPA_100FULL)
249 phy->duplex = DUPLEX_FULL;
250 } else if (lpa & LPA_10FULL)
251 phy->duplex = DUPLEX_FULL;
252
253 if (phy->duplex == DUPLEX_FULL) {
254 phy->pause = lpa & LPA_PAUSE_CAP ? 1 : 0;
255 phy->asym_pause = lpa & LPA_PAUSE_ASYM ? 1 : 0;
256 }
257 } else {
258 int bmcr = phy_read(phy, MII_BMCR);
259 if (bmcr < 0)
260 return bmcr;
261
262 if (bmcr & BMCR_FULLDPLX)
263 phy->duplex = DUPLEX_FULL;
264 else
265 phy->duplex = DUPLEX_HALF;
266 if (bmcr & BMCR_SPEED1000)
267 phy->speed = SPEED_1000;
268 else if (bmcr & BMCR_SPEED100)
269 phy->speed = SPEED_100;
270 else
271 phy->speed = SPEED_10;
272
273 phy->pause = phy->asym_pause = 0;
274 }
275 return 0;
276}
277
278/* Generic implementation for most 10/100/1000 PHYs */
279static struct mii_phy_ops generic_phy_ops = {
280 .setup_aneg = genmii_setup_aneg,
281 .setup_forced = genmii_setup_forced,
282 .poll_link = genmii_poll_link,
283 .read_link = genmii_read_link
284};
285
286static struct mii_phy_def genmii_phy_def = {
287 .phy_id = 0x00000000,
288 .phy_id_mask = 0x00000000,
289 .name = "Generic MII",
290 .ops = &generic_phy_ops
291};
292
293/* CIS8201 */
294#define MII_CIS8201_10BTCSR 0x16
295#define TENBTCSR_ECHO_DISABLE 0x2000
296#define MII_CIS8201_EPCR 0x17
297#define EPCR_MODE_MASK 0x3000
298#define EPCR_GMII_MODE 0x0000
299#define EPCR_RGMII_MODE 0x1000
300#define EPCR_TBI_MODE 0x2000
301#define EPCR_RTBI_MODE 0x3000
302#define MII_CIS8201_ACSR 0x1c
303#define ACSR_PIN_PRIO_SELECT 0x0004
304
305static int cis8201_init(struct mii_phy *phy)
306{
307 int epcr;
308
309 epcr = phy_read(phy, MII_CIS8201_EPCR);
310 if (epcr < 0)
311 return epcr;
312
313 epcr &= ~EPCR_MODE_MASK;
314
315 switch (phy->mode) {
316 case PHY_MODE_TBI:
317 epcr |= EPCR_TBI_MODE;
318 break;
319 case PHY_MODE_RTBI:
320 epcr |= EPCR_RTBI_MODE;
321 break;
322 case PHY_MODE_GMII:
323 epcr |= EPCR_GMII_MODE;
324 break;
325 case PHY_MODE_RGMII:
326 default:
327 epcr |= EPCR_RGMII_MODE;
328 }
329
330 phy_write(phy, MII_CIS8201_EPCR, epcr);
331
332 /* MII regs override strap pins */
333 phy_write(phy, MII_CIS8201_ACSR,
334 phy_read(phy, MII_CIS8201_ACSR) | ACSR_PIN_PRIO_SELECT);
335
336 /* Disable TX_EN -> CRS echo mode, otherwise 10/HDX doesn't work */
337 phy_write(phy, MII_CIS8201_10BTCSR,
338 phy_read(phy, MII_CIS8201_10BTCSR) | TENBTCSR_ECHO_DISABLE);
339
340 return 0;
341}
342
343static struct mii_phy_ops cis8201_phy_ops = {
344 .init = cis8201_init,
345 .setup_aneg = genmii_setup_aneg,
346 .setup_forced = genmii_setup_forced,
347 .poll_link = genmii_poll_link,
348 .read_link = genmii_read_link
349};
350
351static struct mii_phy_def cis8201_phy_def = {
352 .phy_id = 0x000fc410,
353 .phy_id_mask = 0x000ffff0,
354 .name = "CIS8201 Gigabit Ethernet",
355 .ops = &cis8201_phy_ops
356};
357
358static struct mii_phy_def bcm5248_phy_def = {
359
360 .phy_id = 0x0143bc00,
361 .phy_id_mask = 0x0ffffff0,
362 .name = "BCM5248 10/100 SMII Ethernet",
363 .ops = &generic_phy_ops
364};
365
366static int m88e1111_init(struct mii_phy *phy)
367{
368 pr_debug("%s: Marvell 88E1111 Ethernet\n", __func__);
369 phy_write(phy, 0x14, 0x0ce3);
370 phy_write(phy, 0x18, 0x4101);
371 phy_write(phy, 0x09, 0x0e00);
372 phy_write(phy, 0x04, 0x01e1);
373 phy_write(phy, 0x00, 0x9140);
374 phy_write(phy, 0x00, 0x1140);
375
376 return 0;
377}
378
379static int m88e1112_init(struct mii_phy *phy)
380{
381 /*
382 * Marvell 88E1112 PHY needs to have the SGMII MAC
383 * interace (page 2) properly configured to
384 * communicate with the 460EX/GT GPCS interface.
385 */
386
387 u16 reg_short;
388
389 pr_debug("%s: Marvell 88E1112 Ethernet\n", __func__);
390
391 /* Set access to Page 2 */
392 phy_write(phy, 0x16, 0x0002);
393
394 phy_write(phy, 0x00, 0x0040); /* 1Gbps */
395 reg_short = (u16)(phy_read(phy, 0x1a));
396 reg_short |= 0x8000; /* bypass Auto-Negotiation */
397 phy_write(phy, 0x1a, reg_short);
398 emac_mii_reset_phy(phy); /* reset MAC interface */
399
400 /* Reset access to Page 0 */
401 phy_write(phy, 0x16, 0x0000);
402
403 return 0;
404}
405
406static int et1011c_init(struct mii_phy *phy)
407{
408 u16 reg_short;
409
410 reg_short = (u16)(phy_read(phy, 0x16));
411 reg_short &= ~(0x7);
412 reg_short |= 0x6; /* RGMII Trace Delay*/
413 phy_write(phy, 0x16, reg_short);
414
415 reg_short = (u16)(phy_read(phy, 0x17));
416 reg_short &= ~(0x40);
417 phy_write(phy, 0x17, reg_short);
418
419 phy_write(phy, 0x1c, 0x74f0);
420 return 0;
421}
422
423static struct mii_phy_ops et1011c_phy_ops = {
424 .init = et1011c_init,
425 .setup_aneg = genmii_setup_aneg,
426 .setup_forced = genmii_setup_forced,
427 .poll_link = genmii_poll_link,
428 .read_link = genmii_read_link
429};
430
431static struct mii_phy_def et1011c_phy_def = {
432 .phy_id = 0x0282f000,
433 .phy_id_mask = 0x0fffff00,
434 .name = "ET1011C Gigabit Ethernet",
435 .ops = &et1011c_phy_ops
436};
437
438
439
440
441
442static struct mii_phy_ops m88e1111_phy_ops = {
443 .init = m88e1111_init,
444 .setup_aneg = genmii_setup_aneg,
445 .setup_forced = genmii_setup_forced,
446 .poll_link = genmii_poll_link,
447 .read_link = genmii_read_link
448};
449
450static struct mii_phy_def m88e1111_phy_def = {
451
452 .phy_id = 0x01410CC0,
453 .phy_id_mask = 0x0ffffff0,
454 .name = "Marvell 88E1111 Ethernet",
455 .ops = &m88e1111_phy_ops,
456};
457
458static struct mii_phy_ops m88e1112_phy_ops = {
459 .init = m88e1112_init,
460 .setup_aneg = genmii_setup_aneg,
461 .setup_forced = genmii_setup_forced,
462 .poll_link = genmii_poll_link,
463 .read_link = genmii_read_link
464};
465
466static struct mii_phy_def m88e1112_phy_def = {
467 .phy_id = 0x01410C90,
468 .phy_id_mask = 0x0ffffff0,
469 .name = "Marvell 88E1112 Ethernet",
470 .ops = &m88e1112_phy_ops,
471};
472
473static struct mii_phy_def *mii_phy_table[] = {
474 &et1011c_phy_def,
475 &cis8201_phy_def,
476 &bcm5248_phy_def,
477 &m88e1111_phy_def,
478 &m88e1112_phy_def,
479 &genmii_phy_def,
480 NULL
481};
482
483int emac_mii_phy_probe(struct mii_phy *phy, int address)
484{
485 struct mii_phy_def *def;
486 int i;
487 u32 id;
488
489 phy->autoneg = AUTONEG_DISABLE;
490 phy->advertising = 0;
491 phy->address = address;
492 phy->speed = SPEED_10;
493 phy->duplex = DUPLEX_HALF;
494 phy->pause = phy->asym_pause = 0;
495
496 /* Take PHY out of isolate mode and reset it. */
497 if (emac_mii_reset_phy(phy))
498 return -ENODEV;
499
500 /* Read ID and find matching entry */
501 id = (phy_read(phy, MII_PHYSID1) << 16) | phy_read(phy, MII_PHYSID2);
502 for (i = 0; (def = mii_phy_table[i]) != NULL; i++)
503 if ((id & def->phy_id_mask) == def->phy_id)
504 break;
505 /* Should never be NULL (we have a generic entry), but... */
506 if (!def)
507 return -ENODEV;
508
509 phy->def = def;
510
511 /* Determine PHY features if needed */
512 phy->features = def->features;
513 if (!phy->features) {
514 u16 bmsr = phy_read(phy, MII_BMSR);
515 if (bmsr & BMSR_ANEGCAPABLE)
516 phy->features |= SUPPORTED_Autoneg;
517 if (bmsr & BMSR_10HALF)
518 phy->features |= SUPPORTED_10baseT_Half;
519 if (bmsr & BMSR_10FULL)
520 phy->features |= SUPPORTED_10baseT_Full;
521 if (bmsr & BMSR_100HALF)
522 phy->features |= SUPPORTED_100baseT_Half;
523 if (bmsr & BMSR_100FULL)
524 phy->features |= SUPPORTED_100baseT_Full;
525 if (bmsr & BMSR_ESTATEN) {
526 u16 esr = phy_read(phy, MII_ESTATUS);
527 if (esr & ESTATUS_1000_TFULL)
528 phy->features |= SUPPORTED_1000baseT_Full;
529 if (esr & ESTATUS_1000_THALF)
530 phy->features |= SUPPORTED_1000baseT_Half;
531 }
532 phy->features |= SUPPORTED_MII;
533 }
534
535 /* Setup default advertising */
536 phy->advertising = phy->features;
537
538 return 0;
539}
540
541MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/ibm/emac/phy.h b/drivers/net/ethernet/ibm/emac/phy.h
new file mode 100644
index 00000000000..5d2bf4cbe50
--- /dev/null
+++ b/drivers/net/ethernet/ibm/emac/phy.h
@@ -0,0 +1,87 @@
1/*
2 * drivers/net/ibm_newemac/phy.h
3 *
4 * Driver for PowerPC 4xx on-chip ethernet controller, PHY support
5 *
6 * Copyright 2007 Benjamin Herrenschmidt, IBM Corp.
7 * <benh@kernel.crashing.org>
8 *
9 * Based on the arch/ppc version of the driver:
10 *
11 * Benjamin Herrenschmidt <benh@kernel.crashing.org>
12 * February 2003
13 *
14 * Minor additions by Eugene Surovegin <ebs@ebshome.net>, 2004
15 *
16 * This program is free software; you can redistribute it and/or modify it
17 * under the terms of the GNU General Public License as published by the
18 * Free Software Foundation; either version 2 of the License, or (at your
19 * option) any later version.
20 *
21 * This file basically duplicates sungem_phy.{c,h} with different PHYs
22 * supported. I'm looking into merging that in a single mii layer more
23 * flexible than mii.c
24 */
25
26#ifndef __IBM_NEWEMAC_PHY_H
27#define __IBM_NEWEMAC_PHY_H
28
29struct mii_phy;
30
31/* Operations supported by any kind of PHY */
32struct mii_phy_ops {
33 int (*init) (struct mii_phy * phy);
34 int (*suspend) (struct mii_phy * phy, int wol_options);
35 int (*setup_aneg) (struct mii_phy * phy, u32 advertise);
36 int (*setup_forced) (struct mii_phy * phy, int speed, int fd);
37 int (*poll_link) (struct mii_phy * phy);
38 int (*read_link) (struct mii_phy * phy);
39};
40
41/* Structure used to statically define an mii/gii based PHY */
42struct mii_phy_def {
43 u32 phy_id; /* Concatenated ID1 << 16 | ID2 */
44 u32 phy_id_mask; /* Significant bits */
45 u32 features; /* Ethtool SUPPORTED_* defines or
46 0 for autodetect */
47 int magic_aneg; /* Autoneg does all speed test for us */
48 const char *name;
49 const struct mii_phy_ops *ops;
50};
51
52/* An instance of a PHY, partially borrowed from mii_if_info */
53struct mii_phy {
54 struct mii_phy_def *def;
55 u32 advertising; /* Ethtool ADVERTISED_* defines */
56 u32 features; /* Copied from mii_phy_def.features
57 or determined automaticaly */
58 int address; /* PHY address */
59 int mode; /* PHY mode */
60 int gpcs_address; /* GPCS PHY address */
61
62 /* 1: autoneg enabled, 0: disabled */
63 int autoneg;
64
65 /* forced speed & duplex (no autoneg)
66 * partner speed & duplex & pause (autoneg)
67 */
68 int speed;
69 int duplex;
70 int pause;
71 int asym_pause;
72
73 /* Provided by host chip */
74 struct net_device *dev;
75 int (*mdio_read) (struct net_device * dev, int addr, int reg);
76 void (*mdio_write) (struct net_device * dev, int addr, int reg,
77 int val);
78};
79
80/* Pass in a struct mii_phy with dev, mdio_read and mdio_write
81 * filled, the remaining fields will be filled on return
82 */
83int emac_mii_phy_probe(struct mii_phy *phy, int address);
84int emac_mii_reset_phy(struct mii_phy *phy);
85int emac_mii_reset_gpcs(struct mii_phy *phy);
86
87#endif /* __IBM_NEWEMAC_PHY_H */
diff --git a/drivers/net/ethernet/ibm/emac/rgmii.c b/drivers/net/ethernet/ibm/emac/rgmii.c
new file mode 100644
index 00000000000..4fa53f3def6
--- /dev/null
+++ b/drivers/net/ethernet/ibm/emac/rgmii.c
@@ -0,0 +1,338 @@
1/*
2 * drivers/net/ibm_newemac/rgmii.c
3 *
4 * Driver for PowerPC 4xx on-chip ethernet controller, RGMII bridge support.
5 *
6 * Copyright 2007 Benjamin Herrenschmidt, IBM Corp.
7 * <benh@kernel.crashing.org>
8 *
9 * Based on the arch/ppc version of the driver:
10 *
11 * Copyright (c) 2004, 2005 Zultys Technologies.
12 * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>
13 *
14 * Based on original work by
15 * Matt Porter <mporter@kernel.crashing.org>
16 * Copyright 2004 MontaVista Software, Inc.
17 *
18 * This program is free software; you can redistribute it and/or modify it
19 * under the terms of the GNU General Public License as published by the
20 * Free Software Foundation; either version 2 of the License, or (at your
21 * option) any later version.
22 *
23 */
24#include <linux/slab.h>
25#include <linux/kernel.h>
26#include <linux/ethtool.h>
27#include <asm/io.h>
28
29#include "emac.h"
30#include "debug.h"
31
32// XXX FIXME: Axon seems to support a subset of the RGMII, we
33// thus need to take that into account and possibly change some
34// of the bit settings below that don't seem to quite match the
35// AXON spec
36
37/* RGMIIx_FER */
38#define RGMII_FER_MASK(idx) (0x7 << ((idx) * 4))
39#define RGMII_FER_RTBI(idx) (0x4 << ((idx) * 4))
40#define RGMII_FER_RGMII(idx) (0x5 << ((idx) * 4))
41#define RGMII_FER_TBI(idx) (0x6 << ((idx) * 4))
42#define RGMII_FER_GMII(idx) (0x7 << ((idx) * 4))
43#define RGMII_FER_MII(idx) RGMII_FER_GMII(idx)
44
45/* RGMIIx_SSR */
46#define RGMII_SSR_MASK(idx) (0x7 << ((idx) * 8))
47#define RGMII_SSR_100(idx) (0x2 << ((idx) * 8))
48#define RGMII_SSR_1000(idx) (0x4 << ((idx) * 8))
49
50/* RGMII bridge supports only GMII/TBI and RGMII/RTBI PHYs */
51static inline int rgmii_valid_mode(int phy_mode)
52{
53 return phy_mode == PHY_MODE_GMII ||
54 phy_mode == PHY_MODE_MII ||
55 phy_mode == PHY_MODE_RGMII ||
56 phy_mode == PHY_MODE_TBI ||
57 phy_mode == PHY_MODE_RTBI;
58}
59
60static inline const char *rgmii_mode_name(int mode)
61{
62 switch (mode) {
63 case PHY_MODE_RGMII:
64 return "RGMII";
65 case PHY_MODE_TBI:
66 return "TBI";
67 case PHY_MODE_GMII:
68 return "GMII";
69 case PHY_MODE_MII:
70 return "MII";
71 case PHY_MODE_RTBI:
72 return "RTBI";
73 default:
74 BUG();
75 }
76}
77
78static inline u32 rgmii_mode_mask(int mode, int input)
79{
80 switch (mode) {
81 case PHY_MODE_RGMII:
82 return RGMII_FER_RGMII(input);
83 case PHY_MODE_TBI:
84 return RGMII_FER_TBI(input);
85 case PHY_MODE_GMII:
86 return RGMII_FER_GMII(input);
87 case PHY_MODE_MII:
88 return RGMII_FER_MII(input);
89 case PHY_MODE_RTBI:
90 return RGMII_FER_RTBI(input);
91 default:
92 BUG();
93 }
94}
95
96int __devinit rgmii_attach(struct platform_device *ofdev, int input, int mode)
97{
98 struct rgmii_instance *dev = dev_get_drvdata(&ofdev->dev);
99 struct rgmii_regs __iomem *p = dev->base;
100
101 RGMII_DBG(dev, "attach(%d)" NL, input);
102
103 /* Check if we need to attach to a RGMII */
104 if (input < 0 || !rgmii_valid_mode(mode)) {
105 printk(KERN_ERR "%s: unsupported settings !\n",
106 ofdev->dev.of_node->full_name);
107 return -ENODEV;
108 }
109
110 mutex_lock(&dev->lock);
111
112 /* Enable this input */
113 out_be32(&p->fer, in_be32(&p->fer) | rgmii_mode_mask(mode, input));
114
115 printk(KERN_NOTICE "%s: input %d in %s mode\n",
116 ofdev->dev.of_node->full_name, input, rgmii_mode_name(mode));
117
118 ++dev->users;
119
120 mutex_unlock(&dev->lock);
121
122 return 0;
123}
124
125void rgmii_set_speed(struct platform_device *ofdev, int input, int speed)
126{
127 struct rgmii_instance *dev = dev_get_drvdata(&ofdev->dev);
128 struct rgmii_regs __iomem *p = dev->base;
129 u32 ssr;
130
131 mutex_lock(&dev->lock);
132
133 ssr = in_be32(&p->ssr) & ~RGMII_SSR_MASK(input);
134
135 RGMII_DBG(dev, "speed(%d, %d)" NL, input, speed);
136
137 if (speed == SPEED_1000)
138 ssr |= RGMII_SSR_1000(input);
139 else if (speed == SPEED_100)
140 ssr |= RGMII_SSR_100(input);
141
142 out_be32(&p->ssr, ssr);
143
144 mutex_unlock(&dev->lock);
145}
146
147void rgmii_get_mdio(struct platform_device *ofdev, int input)
148{
149 struct rgmii_instance *dev = dev_get_drvdata(&ofdev->dev);
150 struct rgmii_regs __iomem *p = dev->base;
151 u32 fer;
152
153 RGMII_DBG2(dev, "get_mdio(%d)" NL, input);
154
155 if (!(dev->flags & EMAC_RGMII_FLAG_HAS_MDIO))
156 return;
157
158 mutex_lock(&dev->lock);
159
160 fer = in_be32(&p->fer);
161 fer |= 0x00080000u >> input;
162 out_be32(&p->fer, fer);
163 (void)in_be32(&p->fer);
164
165 DBG2(dev, " fer = 0x%08x\n", fer);
166}
167
168void rgmii_put_mdio(struct platform_device *ofdev, int input)
169{
170 struct rgmii_instance *dev = dev_get_drvdata(&ofdev->dev);
171 struct rgmii_regs __iomem *p = dev->base;
172 u32 fer;
173
174 RGMII_DBG2(dev, "put_mdio(%d)" NL, input);
175
176 if (!(dev->flags & EMAC_RGMII_FLAG_HAS_MDIO))
177 return;
178
179 fer = in_be32(&p->fer);
180 fer &= ~(0x00080000u >> input);
181 out_be32(&p->fer, fer);
182 (void)in_be32(&p->fer);
183
184 DBG2(dev, " fer = 0x%08x\n", fer);
185
186 mutex_unlock(&dev->lock);
187}
188
189void rgmii_detach(struct platform_device *ofdev, int input)
190{
191 struct rgmii_instance *dev = dev_get_drvdata(&ofdev->dev);
192 struct rgmii_regs __iomem *p;
193
194 BUG_ON(!dev || dev->users == 0);
195 p = dev->base;
196
197 mutex_lock(&dev->lock);
198
199 RGMII_DBG(dev, "detach(%d)" NL, input);
200
201 /* Disable this input */
202 out_be32(&p->fer, in_be32(&p->fer) & ~RGMII_FER_MASK(input));
203
204 --dev->users;
205
206 mutex_unlock(&dev->lock);
207}
208
209int rgmii_get_regs_len(struct platform_device *ofdev)
210{
211 return sizeof(struct emac_ethtool_regs_subhdr) +
212 sizeof(struct rgmii_regs);
213}
214
215void *rgmii_dump_regs(struct platform_device *ofdev, void *buf)
216{
217 struct rgmii_instance *dev = dev_get_drvdata(&ofdev->dev);
218 struct emac_ethtool_regs_subhdr *hdr = buf;
219 struct rgmii_regs *regs = (struct rgmii_regs *)(hdr + 1);
220
221 hdr->version = 0;
222 hdr->index = 0; /* for now, are there chips with more than one
223 * rgmii ? if yes, then we'll add a cell_index
224 * like we do for emac
225 */
226 memcpy_fromio(regs, dev->base, sizeof(struct rgmii_regs));
227 return regs + 1;
228}
229
230
231static int __devinit rgmii_probe(struct platform_device *ofdev)
232{
233 struct device_node *np = ofdev->dev.of_node;
234 struct rgmii_instance *dev;
235 struct resource regs;
236 int rc;
237
238 rc = -ENOMEM;
239 dev = kzalloc(sizeof(struct rgmii_instance), GFP_KERNEL);
240 if (dev == NULL) {
241 printk(KERN_ERR "%s: could not allocate RGMII device!\n",
242 np->full_name);
243 goto err_gone;
244 }
245
246 mutex_init(&dev->lock);
247 dev->ofdev = ofdev;
248
249 rc = -ENXIO;
250 if (of_address_to_resource(np, 0, &regs)) {
251 printk(KERN_ERR "%s: Can't get registers address\n",
252 np->full_name);
253 goto err_free;
254 }
255
256 rc = -ENOMEM;
257 dev->base = (struct rgmii_regs __iomem *)ioremap(regs.start,
258 sizeof(struct rgmii_regs));
259 if (dev->base == NULL) {
260 printk(KERN_ERR "%s: Can't map device registers!\n",
261 np->full_name);
262 goto err_free;
263 }
264
265 /* Check for RGMII flags */
266 if (of_get_property(ofdev->dev.of_node, "has-mdio", NULL))
267 dev->flags |= EMAC_RGMII_FLAG_HAS_MDIO;
268
269 /* CAB lacks the right properties, fix this up */
270 if (of_device_is_compatible(ofdev->dev.of_node, "ibm,rgmii-axon"))
271 dev->flags |= EMAC_RGMII_FLAG_HAS_MDIO;
272
273 DBG2(dev, " Boot FER = 0x%08x, SSR = 0x%08x\n",
274 in_be32(&dev->base->fer), in_be32(&dev->base->ssr));
275
276 /* Disable all inputs by default */
277 out_be32(&dev->base->fer, 0);
278
279 printk(KERN_INFO
280 "RGMII %s initialized with%s MDIO support\n",
281 ofdev->dev.of_node->full_name,
282 (dev->flags & EMAC_RGMII_FLAG_HAS_MDIO) ? "" : "out");
283
284 wmb();
285 dev_set_drvdata(&ofdev->dev, dev);
286
287 return 0;
288
289 err_free:
290 kfree(dev);
291 err_gone:
292 return rc;
293}
294
295static int __devexit rgmii_remove(struct platform_device *ofdev)
296{
297 struct rgmii_instance *dev = dev_get_drvdata(&ofdev->dev);
298
299 dev_set_drvdata(&ofdev->dev, NULL);
300
301 WARN_ON(dev->users != 0);
302
303 iounmap(dev->base);
304 kfree(dev);
305
306 return 0;
307}
308
309static struct of_device_id rgmii_match[] =
310{
311 {
312 .compatible = "ibm,rgmii",
313 },
314 {
315 .type = "emac-rgmii",
316 },
317 {},
318};
319
320static struct platform_driver rgmii_driver = {
321 .driver = {
322 .name = "emac-rgmii",
323 .owner = THIS_MODULE,
324 .of_match_table = rgmii_match,
325 },
326 .probe = rgmii_probe,
327 .remove = rgmii_remove,
328};
329
330int __init rgmii_init(void)
331{
332 return platform_driver_register(&rgmii_driver);
333}
334
335void rgmii_exit(void)
336{
337 platform_driver_unregister(&rgmii_driver);
338}
diff --git a/drivers/net/ethernet/ibm/emac/rgmii.h b/drivers/net/ethernet/ibm/emac/rgmii.h
new file mode 100644
index 00000000000..d6979904986
--- /dev/null
+++ b/drivers/net/ethernet/ibm/emac/rgmii.h
@@ -0,0 +1,82 @@
1/*
2 * drivers/net/ibm_newemac/rgmii.h
3 *
4 * Driver for PowerPC 4xx on-chip ethernet controller, RGMII bridge support.
5 *
6 * Copyright 2007 Benjamin Herrenschmidt, IBM Corp.
7 * <benh@kernel.crashing.org>
8 *
9 * Based on the arch/ppc version of the driver:
10 *
11 * Based on ocp_zmii.h/ibm_emac_zmii.h
12 * Armin Kuster akuster@mvista.com
13 *
14 * Copyright 2004 MontaVista Software, Inc.
15 * Matt Porter <mporter@kernel.crashing.org>
16 *
17 * Copyright (c) 2004, 2005 Zultys Technologies.
18 * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>
19 *
20 * This program is free software; you can redistribute it and/or modify it
21 * under the terms of the GNU General Public License as published by the
22 * Free Software Foundation; either version 2 of the License, or (at your
23 * option) any later version.
24 */
25
26#ifndef __IBM_NEWEMAC_RGMII_H
27#define __IBM_NEWEMAC_RGMII_H
28
29/* RGMII bridge type */
30#define RGMII_STANDARD 0
31#define RGMII_AXON 1
32
33/* RGMII bridge */
34struct rgmii_regs {
35 u32 fer; /* Function enable register */
36 u32 ssr; /* Speed select register */
37};
38
39/* RGMII device */
40struct rgmii_instance {
41 struct rgmii_regs __iomem *base;
42
43 /* RGMII bridge flags */
44 int flags;
45#define EMAC_RGMII_FLAG_HAS_MDIO 0x00000001
46
47 /* Only one EMAC whacks us at a time */
48 struct mutex lock;
49
50 /* number of EMACs using this RGMII bridge */
51 int users;
52
53 /* OF device instance */
54 struct platform_device *ofdev;
55};
56
57#ifdef CONFIG_IBM_NEW_EMAC_RGMII
58
59extern int rgmii_init(void);
60extern void rgmii_exit(void);
61extern int rgmii_attach(struct platform_device *ofdev, int input, int mode);
62extern void rgmii_detach(struct platform_device *ofdev, int input);
63extern void rgmii_get_mdio(struct platform_device *ofdev, int input);
64extern void rgmii_put_mdio(struct platform_device *ofdev, int input);
65extern void rgmii_set_speed(struct platform_device *ofdev, int input, int speed);
66extern int rgmii_get_regs_len(struct platform_device *ofdev);
67extern void *rgmii_dump_regs(struct platform_device *ofdev, void *buf);
68
69#else
70
71# define rgmii_init() 0
72# define rgmii_exit() do { } while(0)
73# define rgmii_attach(x,y,z) (-ENXIO)
74# define rgmii_detach(x,y) do { } while(0)
75# define rgmii_get_mdio(o,i) do { } while (0)
76# define rgmii_put_mdio(o,i) do { } while (0)
77# define rgmii_set_speed(x,y,z) do { } while(0)
78# define rgmii_get_regs_len(x) 0
79# define rgmii_dump_regs(x,buf) (buf)
80#endif /* !CONFIG_IBM_NEW_EMAC_RGMII */
81
82#endif /* __IBM_NEWEMAC_RGMII_H */
diff --git a/drivers/net/ethernet/ibm/emac/tah.c b/drivers/net/ethernet/ibm/emac/tah.c
new file mode 100644
index 00000000000..5f51bf7c9dc
--- /dev/null
+++ b/drivers/net/ethernet/ibm/emac/tah.c
@@ -0,0 +1,185 @@
1/*
2 * drivers/net/ibm_newemac/tah.c
3 *
4 * Driver for PowerPC 4xx on-chip ethernet controller, TAH support.
5 *
6 * Copyright 2007 Benjamin Herrenschmidt, IBM Corp.
7 * <benh@kernel.crashing.org>
8 *
9 * Based on the arch/ppc version of the driver:
10 *
11 * Copyright 2004 MontaVista Software, Inc.
12 * Matt Porter <mporter@kernel.crashing.org>
13 *
14 * Copyright (c) 2005 Eugene Surovegin <ebs@ebshome.net>
15 *
16 * This program is free software; you can redistribute it and/or modify it
17 * under the terms of the GNU General Public License as published by the
18 * Free Software Foundation; either version 2 of the License, or (at your
19 * option) any later version.
20 */
21#include <asm/io.h>
22
23#include "emac.h"
24#include "core.h"
25
26int __devinit tah_attach(struct platform_device *ofdev, int channel)
27{
28 struct tah_instance *dev = dev_get_drvdata(&ofdev->dev);
29
30 mutex_lock(&dev->lock);
31 /* Reset has been done at probe() time... nothing else to do for now */
32 ++dev->users;
33 mutex_unlock(&dev->lock);
34
35 return 0;
36}
37
38void tah_detach(struct platform_device *ofdev, int channel)
39{
40 struct tah_instance *dev = dev_get_drvdata(&ofdev->dev);
41
42 mutex_lock(&dev->lock);
43 --dev->users;
44 mutex_unlock(&dev->lock);
45}
46
47void tah_reset(struct platform_device *ofdev)
48{
49 struct tah_instance *dev = dev_get_drvdata(&ofdev->dev);
50 struct tah_regs __iomem *p = dev->base;
51 int n;
52
53 /* Reset TAH */
54 out_be32(&p->mr, TAH_MR_SR);
55 n = 100;
56 while ((in_be32(&p->mr) & TAH_MR_SR) && n)
57 --n;
58
59 if (unlikely(!n))
60 printk(KERN_ERR "%s: reset timeout\n",
61 ofdev->dev.of_node->full_name);
62
63 /* 10KB TAH TX FIFO accommodates the max MTU of 9000 */
64 out_be32(&p->mr,
65 TAH_MR_CVR | TAH_MR_ST_768 | TAH_MR_TFS_10KB | TAH_MR_DTFP |
66 TAH_MR_DIG);
67}
68
69int tah_get_regs_len(struct platform_device *ofdev)
70{
71 return sizeof(struct emac_ethtool_regs_subhdr) +
72 sizeof(struct tah_regs);
73}
74
75void *tah_dump_regs(struct platform_device *ofdev, void *buf)
76{
77 struct tah_instance *dev = dev_get_drvdata(&ofdev->dev);
78 struct emac_ethtool_regs_subhdr *hdr = buf;
79 struct tah_regs *regs = (struct tah_regs *)(hdr + 1);
80
81 hdr->version = 0;
82 hdr->index = 0; /* for now, are there chips with more than one
83 * zmii ? if yes, then we'll add a cell_index
84 * like we do for emac
85 */
86 memcpy_fromio(regs, dev->base, sizeof(struct tah_regs));
87 return regs + 1;
88}
89
90static int __devinit tah_probe(struct platform_device *ofdev)
91{
92 struct device_node *np = ofdev->dev.of_node;
93 struct tah_instance *dev;
94 struct resource regs;
95 int rc;
96
97 rc = -ENOMEM;
98 dev = kzalloc(sizeof(struct tah_instance), GFP_KERNEL);
99 if (dev == NULL) {
100 printk(KERN_ERR "%s: could not allocate TAH device!\n",
101 np->full_name);
102 goto err_gone;
103 }
104
105 mutex_init(&dev->lock);
106 dev->ofdev = ofdev;
107
108 rc = -ENXIO;
109 if (of_address_to_resource(np, 0, &regs)) {
110 printk(KERN_ERR "%s: Can't get registers address\n",
111 np->full_name);
112 goto err_free;
113 }
114
115 rc = -ENOMEM;
116 dev->base = (struct tah_regs __iomem *)ioremap(regs.start,
117 sizeof(struct tah_regs));
118 if (dev->base == NULL) {
119 printk(KERN_ERR "%s: Can't map device registers!\n",
120 np->full_name);
121 goto err_free;
122 }
123
124 dev_set_drvdata(&ofdev->dev, dev);
125
126 /* Initialize TAH and enable IPv4 checksum verification, no TSO yet */
127 tah_reset(ofdev);
128
129 printk(KERN_INFO
130 "TAH %s initialized\n", ofdev->dev.of_node->full_name);
131 wmb();
132
133 return 0;
134
135 err_free:
136 kfree(dev);
137 err_gone:
138 return rc;
139}
140
141static int __devexit tah_remove(struct platform_device *ofdev)
142{
143 struct tah_instance *dev = dev_get_drvdata(&ofdev->dev);
144
145 dev_set_drvdata(&ofdev->dev, NULL);
146
147 WARN_ON(dev->users != 0);
148
149 iounmap(dev->base);
150 kfree(dev);
151
152 return 0;
153}
154
155static struct of_device_id tah_match[] =
156{
157 {
158 .compatible = "ibm,tah",
159 },
160 /* For backward compat with old DT */
161 {
162 .type = "tah",
163 },
164 {},
165};
166
167static struct platform_driver tah_driver = {
168 .driver = {
169 .name = "emac-tah",
170 .owner = THIS_MODULE,
171 .of_match_table = tah_match,
172 },
173 .probe = tah_probe,
174 .remove = tah_remove,
175};
176
177int __init tah_init(void)
178{
179 return platform_driver_register(&tah_driver);
180}
181
182void tah_exit(void)
183{
184 platform_driver_unregister(&tah_driver);
185}
diff --git a/drivers/net/ethernet/ibm/emac/tah.h b/drivers/net/ethernet/ibm/emac/tah.h
new file mode 100644
index 00000000000..61dbeca006d
--- /dev/null
+++ b/drivers/net/ethernet/ibm/emac/tah.h
@@ -0,0 +1,95 @@
1/*
2 * drivers/net/ibm_newemac/tah.h
3 *
4 * Driver for PowerPC 4xx on-chip ethernet controller, TAH support.
5 *
6 * Copyright 2007 Benjamin Herrenschmidt, IBM Corp.
7 * <benh@kernel.crashing.org>
8 *
9 * Based on the arch/ppc version of the driver:
10 *
11 * Copyright 2004 MontaVista Software, Inc.
12 * Matt Porter <mporter@kernel.crashing.org>
13 *
14 * Copyright (c) 2005 Eugene Surovegin <ebs@ebshome.net>
15 *
16 * This program is free software; you can redistribute it and/or modify it
17 * under the terms of the GNU General Public License as published by the
18 * Free Software Foundation; either version 2 of the License, or (at your
19 * option) any later version.
20 */
21
22#ifndef __IBM_NEWEMAC_TAH_H
23#define __IBM_NEWEMAC_TAH_H
24
25/* TAH */
26struct tah_regs {
27 u32 revid;
28 u32 pad[3];
29 u32 mr;
30 u32 ssr0;
31 u32 ssr1;
32 u32 ssr2;
33 u32 ssr3;
34 u32 ssr4;
35 u32 ssr5;
36 u32 tsr;
37};
38
39
40/* TAH device */
41struct tah_instance {
42 struct tah_regs __iomem *base;
43
44 /* Only one EMAC whacks us at a time */
45 struct mutex lock;
46
47 /* number of EMACs using this TAH */
48 int users;
49
50 /* OF device instance */
51 struct platform_device *ofdev;
52};
53
54
55/* TAH engine */
56#define TAH_MR_CVR 0x80000000
57#define TAH_MR_SR 0x40000000
58#define TAH_MR_ST_256 0x01000000
59#define TAH_MR_ST_512 0x02000000
60#define TAH_MR_ST_768 0x03000000
61#define TAH_MR_ST_1024 0x04000000
62#define TAH_MR_ST_1280 0x05000000
63#define TAH_MR_ST_1536 0x06000000
64#define TAH_MR_TFS_16KB 0x00000000
65#define TAH_MR_TFS_2KB 0x00200000
66#define TAH_MR_TFS_4KB 0x00400000
67#define TAH_MR_TFS_6KB 0x00600000
68#define TAH_MR_TFS_8KB 0x00800000
69#define TAH_MR_TFS_10KB 0x00a00000
70#define TAH_MR_DTFP 0x00100000
71#define TAH_MR_DIG 0x00080000
72
73#ifdef CONFIG_IBM_NEW_EMAC_TAH
74
75extern int tah_init(void);
76extern void tah_exit(void);
77extern int tah_attach(struct platform_device *ofdev, int channel);
78extern void tah_detach(struct platform_device *ofdev, int channel);
79extern void tah_reset(struct platform_device *ofdev);
80extern int tah_get_regs_len(struct platform_device *ofdev);
81extern void *tah_dump_regs(struct platform_device *ofdev, void *buf);
82
83#else
84
85# define tah_init() 0
86# define tah_exit() do { } while(0)
87# define tah_attach(x,y) (-ENXIO)
88# define tah_detach(x,y) do { } while(0)
89# define tah_reset(x) do { } while(0)
90# define tah_get_regs_len(x) 0
91# define tah_dump_regs(x,buf) (buf)
92
93#endif /* !CONFIG_IBM_NEW_EMAC_TAH */
94
95#endif /* __IBM_NEWEMAC_TAH_H */
diff --git a/drivers/net/ethernet/ibm/emac/zmii.c b/drivers/net/ethernet/ibm/emac/zmii.c
new file mode 100644
index 00000000000..97449e786d6
--- /dev/null
+++ b/drivers/net/ethernet/ibm/emac/zmii.c
@@ -0,0 +1,332 @@
1/*
2 * drivers/net/ibm_newemac/zmii.c
3 *
4 * Driver for PowerPC 4xx on-chip ethernet controller, ZMII bridge support.
5 *
6 * Copyright 2007 Benjamin Herrenschmidt, IBM Corp.
7 * <benh@kernel.crashing.org>
8 *
9 * Based on the arch/ppc version of the driver:
10 *
11 * Copyright (c) 2004, 2005 Zultys Technologies.
12 * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>
13 *
14 * Based on original work by
15 * Armin Kuster <akuster@mvista.com>
16 * Copyright 2001 MontaVista Softare Inc.
17 *
18 * This program is free software; you can redistribute it and/or modify it
19 * under the terms of the GNU General Public License as published by the
20 * Free Software Foundation; either version 2 of the License, or (at your
21 * option) any later version.
22 *
23 */
24#include <linux/slab.h>
25#include <linux/kernel.h>
26#include <linux/ethtool.h>
27#include <asm/io.h>
28
29#include "emac.h"
30#include "core.h"
31
32/* ZMIIx_FER */
33#define ZMII_FER_MDI(idx) (0x80000000 >> ((idx) * 4))
34#define ZMII_FER_MDI_ALL (ZMII_FER_MDI(0) | ZMII_FER_MDI(1) | \
35 ZMII_FER_MDI(2) | ZMII_FER_MDI(3))
36
37#define ZMII_FER_SMII(idx) (0x40000000 >> ((idx) * 4))
38#define ZMII_FER_RMII(idx) (0x20000000 >> ((idx) * 4))
39#define ZMII_FER_MII(idx) (0x10000000 >> ((idx) * 4))
40
41/* ZMIIx_SSR */
42#define ZMII_SSR_SCI(idx) (0x40000000 >> ((idx) * 4))
43#define ZMII_SSR_FSS(idx) (0x20000000 >> ((idx) * 4))
44#define ZMII_SSR_SP(idx) (0x10000000 >> ((idx) * 4))
45
46/* ZMII only supports MII, RMII and SMII
47 * we also support autodetection for backward compatibility
48 */
49static inline int zmii_valid_mode(int mode)
50{
51 return mode == PHY_MODE_MII ||
52 mode == PHY_MODE_RMII ||
53 mode == PHY_MODE_SMII ||
54 mode == PHY_MODE_NA;
55}
56
57static inline const char *zmii_mode_name(int mode)
58{
59 switch (mode) {
60 case PHY_MODE_MII:
61 return "MII";
62 case PHY_MODE_RMII:
63 return "RMII";
64 case PHY_MODE_SMII:
65 return "SMII";
66 default:
67 BUG();
68 }
69}
70
71static inline u32 zmii_mode_mask(int mode, int input)
72{
73 switch (mode) {
74 case PHY_MODE_MII:
75 return ZMII_FER_MII(input);
76 case PHY_MODE_RMII:
77 return ZMII_FER_RMII(input);
78 case PHY_MODE_SMII:
79 return ZMII_FER_SMII(input);
80 default:
81 return 0;
82 }
83}
84
85int __devinit zmii_attach(struct platform_device *ofdev, int input, int *mode)
86{
87 struct zmii_instance *dev = dev_get_drvdata(&ofdev->dev);
88 struct zmii_regs __iomem *p = dev->base;
89
90 ZMII_DBG(dev, "init(%d, %d)" NL, input, *mode);
91
92 if (!zmii_valid_mode(*mode)) {
93 /* Probably an EMAC connected to RGMII,
94 * but it still may need ZMII for MDIO so
95 * we don't fail here.
96 */
97 dev->users++;
98 return 0;
99 }
100
101 mutex_lock(&dev->lock);
102
103 /* Autodetect ZMII mode if not specified.
104 * This is only for backward compatibility with the old driver.
105 * Please, always specify PHY mode in your board port to avoid
106 * any surprises.
107 */
108 if (dev->mode == PHY_MODE_NA) {
109 if (*mode == PHY_MODE_NA) {
110 u32 r = dev->fer_save;
111
112 ZMII_DBG(dev, "autodetecting mode, FER = 0x%08x" NL, r);
113
114 if (r & (ZMII_FER_MII(0) | ZMII_FER_MII(1)))
115 dev->mode = PHY_MODE_MII;
116 else if (r & (ZMII_FER_RMII(0) | ZMII_FER_RMII(1)))
117 dev->mode = PHY_MODE_RMII;
118 else
119 dev->mode = PHY_MODE_SMII;
120 } else
121 dev->mode = *mode;
122
123 printk(KERN_NOTICE "%s: bridge in %s mode\n",
124 ofdev->dev.of_node->full_name,
125 zmii_mode_name(dev->mode));
126 } else {
127 /* All inputs must use the same mode */
128 if (*mode != PHY_MODE_NA && *mode != dev->mode) {
129 printk(KERN_ERR
130 "%s: invalid mode %d specified for input %d\n",
131 ofdev->dev.of_node->full_name, *mode, input);
132 mutex_unlock(&dev->lock);
133 return -EINVAL;
134 }
135 }
136
137 /* Report back correct PHY mode,
138 * it may be used during PHY initialization.
139 */
140 *mode = dev->mode;
141
142 /* Enable this input */
143 out_be32(&p->fer, in_be32(&p->fer) | zmii_mode_mask(dev->mode, input));
144 ++dev->users;
145
146 mutex_unlock(&dev->lock);
147
148 return 0;
149}
150
151void zmii_get_mdio(struct platform_device *ofdev, int input)
152{
153 struct zmii_instance *dev = dev_get_drvdata(&ofdev->dev);
154 u32 fer;
155
156 ZMII_DBG2(dev, "get_mdio(%d)" NL, input);
157
158 mutex_lock(&dev->lock);
159
160 fer = in_be32(&dev->base->fer) & ~ZMII_FER_MDI_ALL;
161 out_be32(&dev->base->fer, fer | ZMII_FER_MDI(input));
162}
163
164void zmii_put_mdio(struct platform_device *ofdev, int input)
165{
166 struct zmii_instance *dev = dev_get_drvdata(&ofdev->dev);
167
168 ZMII_DBG2(dev, "put_mdio(%d)" NL, input);
169 mutex_unlock(&dev->lock);
170}
171
172
173void zmii_set_speed(struct platform_device *ofdev, int input, int speed)
174{
175 struct zmii_instance *dev = dev_get_drvdata(&ofdev->dev);
176 u32 ssr;
177
178 mutex_lock(&dev->lock);
179
180 ssr = in_be32(&dev->base->ssr);
181
182 ZMII_DBG(dev, "speed(%d, %d)" NL, input, speed);
183
184 if (speed == SPEED_100)
185 ssr |= ZMII_SSR_SP(input);
186 else
187 ssr &= ~ZMII_SSR_SP(input);
188
189 out_be32(&dev->base->ssr, ssr);
190
191 mutex_unlock(&dev->lock);
192}
193
194void zmii_detach(struct platform_device *ofdev, int input)
195{
196 struct zmii_instance *dev = dev_get_drvdata(&ofdev->dev);
197
198 BUG_ON(!dev || dev->users == 0);
199
200 mutex_lock(&dev->lock);
201
202 ZMII_DBG(dev, "detach(%d)" NL, input);
203
204 /* Disable this input */
205 out_be32(&dev->base->fer,
206 in_be32(&dev->base->fer) & ~zmii_mode_mask(dev->mode, input));
207
208 --dev->users;
209
210 mutex_unlock(&dev->lock);
211}
212
213int zmii_get_regs_len(struct platform_device *ofdev)
214{
215 return sizeof(struct emac_ethtool_regs_subhdr) +
216 sizeof(struct zmii_regs);
217}
218
219void *zmii_dump_regs(struct platform_device *ofdev, void *buf)
220{
221 struct zmii_instance *dev = dev_get_drvdata(&ofdev->dev);
222 struct emac_ethtool_regs_subhdr *hdr = buf;
223 struct zmii_regs *regs = (struct zmii_regs *)(hdr + 1);
224
225 hdr->version = 0;
226 hdr->index = 0; /* for now, are there chips with more than one
227 * zmii ? if yes, then we'll add a cell_index
228 * like we do for emac
229 */
230 memcpy_fromio(regs, dev->base, sizeof(struct zmii_regs));
231 return regs + 1;
232}
233
234static int __devinit zmii_probe(struct platform_device *ofdev)
235{
236 struct device_node *np = ofdev->dev.of_node;
237 struct zmii_instance *dev;
238 struct resource regs;
239 int rc;
240
241 rc = -ENOMEM;
242 dev = kzalloc(sizeof(struct zmii_instance), GFP_KERNEL);
243 if (dev == NULL) {
244 printk(KERN_ERR "%s: could not allocate ZMII device!\n",
245 np->full_name);
246 goto err_gone;
247 }
248
249 mutex_init(&dev->lock);
250 dev->ofdev = ofdev;
251 dev->mode = PHY_MODE_NA;
252
253 rc = -ENXIO;
254 if (of_address_to_resource(np, 0, &regs)) {
255 printk(KERN_ERR "%s: Can't get registers address\n",
256 np->full_name);
257 goto err_free;
258 }
259
260 rc = -ENOMEM;
261 dev->base = (struct zmii_regs __iomem *)ioremap(regs.start,
262 sizeof(struct zmii_regs));
263 if (dev->base == NULL) {
264 printk(KERN_ERR "%s: Can't map device registers!\n",
265 np->full_name);
266 goto err_free;
267 }
268
269 /* We may need FER value for autodetection later */
270 dev->fer_save = in_be32(&dev->base->fer);
271
272 /* Disable all inputs by default */
273 out_be32(&dev->base->fer, 0);
274
275 printk(KERN_INFO
276 "ZMII %s initialized\n", ofdev->dev.of_node->full_name);
277 wmb();
278 dev_set_drvdata(&ofdev->dev, dev);
279
280 return 0;
281
282 err_free:
283 kfree(dev);
284 err_gone:
285 return rc;
286}
287
288static int __devexit zmii_remove(struct platform_device *ofdev)
289{
290 struct zmii_instance *dev = dev_get_drvdata(&ofdev->dev);
291
292 dev_set_drvdata(&ofdev->dev, NULL);
293
294 WARN_ON(dev->users != 0);
295
296 iounmap(dev->base);
297 kfree(dev);
298
299 return 0;
300}
301
302static struct of_device_id zmii_match[] =
303{
304 {
305 .compatible = "ibm,zmii",
306 },
307 /* For backward compat with old DT */
308 {
309 .type = "emac-zmii",
310 },
311 {},
312};
313
314static struct platform_driver zmii_driver = {
315 .driver = {
316 .name = "emac-zmii",
317 .owner = THIS_MODULE,
318 .of_match_table = zmii_match,
319 },
320 .probe = zmii_probe,
321 .remove = zmii_remove,
322};
323
324int __init zmii_init(void)
325{
326 return platform_driver_register(&zmii_driver);
327}
328
329void zmii_exit(void)
330{
331 platform_driver_unregister(&zmii_driver);
332}
diff --git a/drivers/net/ethernet/ibm/emac/zmii.h b/drivers/net/ethernet/ibm/emac/zmii.h
new file mode 100644
index 00000000000..1333fa2b278
--- /dev/null
+++ b/drivers/net/ethernet/ibm/emac/zmii.h
@@ -0,0 +1,78 @@
1/*
2 * drivers/net/ibm_newemac/zmii.h
3 *
4 * Driver for PowerPC 4xx on-chip ethernet controller, ZMII bridge support.
5 *
6 * Copyright 2007 Benjamin Herrenschmidt, IBM Corp.
7 * <benh@kernel.crashing.org>
8 *
9 * Based on the arch/ppc version of the driver:
10 *
11 * Copyright (c) 2004, 2005 Zultys Technologies.
12 * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>
13 *
14 * Based on original work by
15 * Armin Kuster <akuster@mvista.com>
16 * Copyright 2001 MontaVista Softare Inc.
17 *
18 * This program is free software; you can redistribute it and/or modify it
19 * under the terms of the GNU General Public License as published by the
20 * Free Software Foundation; either version 2 of the License, or (at your
21 * option) any later version.
22 *
23 */
24#ifndef __IBM_NEWEMAC_ZMII_H
25#define __IBM_NEWEMAC_ZMII_H
26
27/* ZMII bridge registers */
28struct zmii_regs {
29 u32 fer; /* Function enable reg */
30 u32 ssr; /* Speed select reg */
31 u32 smiirs; /* SMII status reg */
32};
33
34/* ZMII device */
35struct zmii_instance {
36 struct zmii_regs __iomem *base;
37
38 /* Only one EMAC whacks us at a time */
39 struct mutex lock;
40
41 /* subset of PHY_MODE_XXXX */
42 int mode;
43
44 /* number of EMACs using this ZMII bridge */
45 int users;
46
47 /* FER value left by firmware */
48 u32 fer_save;
49
50 /* OF device instance */
51 struct platform_device *ofdev;
52};
53
54#ifdef CONFIG_IBM_NEW_EMAC_ZMII
55
56extern int zmii_init(void);
57extern void zmii_exit(void);
58extern int zmii_attach(struct platform_device *ofdev, int input, int *mode);
59extern void zmii_detach(struct platform_device *ofdev, int input);
60extern void zmii_get_mdio(struct platform_device *ofdev, int input);
61extern void zmii_put_mdio(struct platform_device *ofdev, int input);
62extern void zmii_set_speed(struct platform_device *ofdev, int input, int speed);
63extern int zmii_get_regs_len(struct platform_device *ocpdev);
64extern void *zmii_dump_regs(struct platform_device *ofdev, void *buf);
65
66#else
67# define zmii_init() 0
68# define zmii_exit() do { } while(0)
69# define zmii_attach(x,y,z) (-ENXIO)
70# define zmii_detach(x,y) do { } while(0)
71# define zmii_get_mdio(x,y) do { } while(0)
72# define zmii_put_mdio(x,y) do { } while(0)
73# define zmii_set_speed(x,y,z) do { } while(0)
74# define zmii_get_regs_len(x) 0
75# define zmii_dump_regs(x,buf) (buf)
76#endif /* !CONFIG_IBM_NEW_EMAC_ZMII */
77
78#endif /* __IBM_NEWEMAC_ZMII_H */