aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMugunthan V N <mugunthanvnm@ti.com>2012-03-18 16:17:54 -0400
committerDavid S. Miller <davem@davemloft.net>2012-03-19 18:02:05 -0400
commitdf828598a755732e717b0adca82f884e44d37576 (patch)
tree26d260fd85a57efa14cc4be65ebd702f35879be4
parentdb82173f23c5289118142fc76111f99ff809df89 (diff)
netdev: driver: ethernet: Add TI CPSW driver
This patch adds support for TI's CPSW driver. The three port switch gigabit ethernet subsystem provides ethernet packet communication and can be configured as an ethernet switch. Supports 10/100/1000 Mbps. Signed-off-by: Cyril Chemparathy <cyril@ti.com> Signed-off-by: Sriramakrishnan A G <srk@ti.com> Signed-off-by: Mugunthan V N <mugunthanvnm@ti.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/net/ethernet/ti/Kconfig11
-rw-r--r--drivers/net/ethernet/ti/Makefile2
-rw-r--r--drivers/net/ethernet/ti/cpsw.c1018
-rw-r--r--include/linux/platform_data/cpsw.h55
4 files changed, 1086 insertions, 0 deletions
diff --git a/drivers/net/ethernet/ti/Kconfig b/drivers/net/ethernet/ti/Kconfig
index de76c70ec8fb..b42252c4bec8 100644
--- a/drivers/net/ethernet/ti/Kconfig
+++ b/drivers/net/ethernet/ti/Kconfig
@@ -49,6 +49,17 @@ config TI_DAVINCI_CPDMA
49 To compile this driver as a module, choose M here: the module 49 To compile this driver as a module, choose M here: the module
50 will be called davinci_cpdma. This is recommended. 50 will be called davinci_cpdma. This is recommended.
51 51
52config TI_CPSW
53 tristate "TI CPSW Switch Support"
54 depends on ARM && (ARCH_DAVINCI || SOC_OMAPAM33XX)
55 select TI_DAVINCI_CPDMA
56 select TI_DAVINCI_MDIO
57 ---help---
58 This driver supports TI's CPSW Ethernet Switch.
59
60 To compile this driver as a module, choose M here: the module
61 will be called cpsw.
62
52config TLAN 63config TLAN
53 tristate "TI ThunderLAN support" 64 tristate "TI ThunderLAN support"
54 depends on (PCI || EISA) 65 depends on (PCI || EISA)
diff --git a/drivers/net/ethernet/ti/Makefile b/drivers/net/ethernet/ti/Makefile
index aedb3af74e5a..91bd8bba78ff 100644
--- a/drivers/net/ethernet/ti/Makefile
+++ b/drivers/net/ethernet/ti/Makefile
@@ -7,3 +7,5 @@ obj-$(CONFIG_CPMAC) += cpmac.o
7obj-$(CONFIG_TI_DAVINCI_EMAC) += davinci_emac.o 7obj-$(CONFIG_TI_DAVINCI_EMAC) += davinci_emac.o
8obj-$(CONFIG_TI_DAVINCI_MDIO) += davinci_mdio.o 8obj-$(CONFIG_TI_DAVINCI_MDIO) += davinci_mdio.o
9obj-$(CONFIG_TI_DAVINCI_CPDMA) += davinci_cpdma.o 9obj-$(CONFIG_TI_DAVINCI_CPDMA) += davinci_cpdma.o
10obj-$(CONFIG_TI_CPSW) += ti_cpsw.o
11ti_cpsw-y := cpsw_ale.o cpsw.o
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
new file mode 100644
index 000000000000..c68c9d96312e
--- /dev/null
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -0,0 +1,1018 @@
1/*
2 * Texas Instruments Ethernet Switch Driver
3 *
4 * Copyright (C) 2012 Texas Instruments
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License as
8 * published by the Free Software Foundation version 2.
9 *
10 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
11 * kind, whether express or implied; without even the implied warranty
12 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 */
15
16#include <linux/kernel.h>
17#include <linux/io.h>
18#include <linux/clk.h>
19#include <linux/timer.h>
20#include <linux/module.h>
21#include <linux/platform_device.h>
22#include <linux/irqreturn.h>
23#include <linux/interrupt.h>
24#include <linux/if_ether.h>
25#include <linux/etherdevice.h>
26#include <linux/netdevice.h>
27#include <linux/phy.h>
28#include <linux/workqueue.h>
29#include <linux/delay.h>
30
31#include <linux/platform_data/cpsw.h>
32
33#include "cpsw_ale.h"
34#include "davinci_cpdma.h"
35
36#define CPSW_DEBUG (NETIF_MSG_HW | NETIF_MSG_WOL | \
37 NETIF_MSG_DRV | NETIF_MSG_LINK | \
38 NETIF_MSG_IFUP | NETIF_MSG_INTR | \
39 NETIF_MSG_PROBE | NETIF_MSG_TIMER | \
40 NETIF_MSG_IFDOWN | NETIF_MSG_RX_ERR | \
41 NETIF_MSG_TX_ERR | NETIF_MSG_TX_DONE | \
42 NETIF_MSG_PKTDATA | NETIF_MSG_TX_QUEUED | \
43 NETIF_MSG_RX_STATUS)
44
45#define cpsw_info(priv, type, format, ...) \
46do { \
47 if (netif_msg_##type(priv) && net_ratelimit()) \
48 dev_info(priv->dev, format, ## __VA_ARGS__); \
49} while (0)
50
51#define cpsw_err(priv, type, format, ...) \
52do { \
53 if (netif_msg_##type(priv) && net_ratelimit()) \
54 dev_err(priv->dev, format, ## __VA_ARGS__); \
55} while (0)
56
57#define cpsw_dbg(priv, type, format, ...) \
58do { \
59 if (netif_msg_##type(priv) && net_ratelimit()) \
60 dev_dbg(priv->dev, format, ## __VA_ARGS__); \
61} while (0)
62
63#define cpsw_notice(priv, type, format, ...) \
64do { \
65 if (netif_msg_##type(priv) && net_ratelimit()) \
66 dev_notice(priv->dev, format, ## __VA_ARGS__); \
67} while (0)
68
69#define CPSW_MAJOR_VERSION(reg) (reg >> 8 & 0x7)
70#define CPSW_MINOR_VERSION(reg) (reg & 0xff)
71#define CPSW_RTL_VERSION(reg) ((reg >> 11) & 0x1f)
72
73#define CPDMA_RXTHRESH 0x0c0
74#define CPDMA_RXFREE 0x0e0
75#define CPDMA_TXHDP 0x00
76#define CPDMA_RXHDP 0x20
77#define CPDMA_TXCP 0x40
78#define CPDMA_RXCP 0x60
79
80#define cpsw_dma_regs(base, offset) \
81 (void __iomem *)((base) + (offset))
82#define cpsw_dma_rxthresh(base, offset) \
83 (void __iomem *)((base) + (offset) + CPDMA_RXTHRESH)
84#define cpsw_dma_rxfree(base, offset) \
85 (void __iomem *)((base) + (offset) + CPDMA_RXFREE)
86#define cpsw_dma_txhdp(base, offset) \
87 (void __iomem *)((base) + (offset) + CPDMA_TXHDP)
88#define cpsw_dma_rxhdp(base, offset) \
89 (void __iomem *)((base) + (offset) + CPDMA_RXHDP)
90#define cpsw_dma_txcp(base, offset) \
91 (void __iomem *)((base) + (offset) + CPDMA_TXCP)
92#define cpsw_dma_rxcp(base, offset) \
93 (void __iomem *)((base) + (offset) + CPDMA_RXCP)
94
95#define CPSW_POLL_WEIGHT 64
96#define CPSW_MIN_PACKET_SIZE 60
97#define CPSW_MAX_PACKET_SIZE (1500 + 14 + 4 + 4)
98
99#define RX_PRIORITY_MAPPING 0x76543210
100#define TX_PRIORITY_MAPPING 0x33221100
101#define CPDMA_TX_PRIORITY_MAP 0x76543210
102
103#define cpsw_enable_irq(priv) \
104 do { \
105 u32 i; \
106 for (i = 0; i < priv->num_irqs; i++) \
107 enable_irq(priv->irqs_table[i]); \
108 } while (0);
109#define cpsw_disable_irq(priv) \
110 do { \
111 u32 i; \
112 for (i = 0; i < priv->num_irqs; i++) \
113 disable_irq_nosync(priv->irqs_table[i]); \
114 } while (0);
115
116static int debug_level;
117module_param(debug_level, int, 0);
118MODULE_PARM_DESC(debug_level, "cpsw debug level (NETIF_MSG bits)");
119
120static int ale_ageout = 10;
121module_param(ale_ageout, int, 0);
122MODULE_PARM_DESC(ale_ageout, "cpsw ale ageout interval (seconds)");
123
124static int rx_packet_max = CPSW_MAX_PACKET_SIZE;
125module_param(rx_packet_max, int, 0);
126MODULE_PARM_DESC(rx_packet_max, "maximum receive packet size (bytes)");
127
128struct cpsw_ss_regs {
129 u32 id_ver;
130 u32 soft_reset;
131 u32 control;
132 u32 int_control;
133 u32 rx_thresh_en;
134 u32 rx_en;
135 u32 tx_en;
136 u32 misc_en;
137};
138
139struct cpsw_regs {
140 u32 id_ver;
141 u32 control;
142 u32 soft_reset;
143 u32 stat_port_en;
144 u32 ptype;
145};
146
147struct cpsw_slave_regs {
148 u32 max_blks;
149 u32 blk_cnt;
150 u32 flow_thresh;
151 u32 port_vlan;
152 u32 tx_pri_map;
153 u32 ts_ctl;
154 u32 ts_seq_ltype;
155 u32 ts_vlan;
156 u32 sa_lo;
157 u32 sa_hi;
158};
159
160struct cpsw_host_regs {
161 u32 max_blks;
162 u32 blk_cnt;
163 u32 flow_thresh;
164 u32 port_vlan;
165 u32 tx_pri_map;
166 u32 cpdma_tx_pri_map;
167 u32 cpdma_rx_chan_map;
168};
169
170struct cpsw_sliver_regs {
171 u32 id_ver;
172 u32 mac_control;
173 u32 mac_status;
174 u32 soft_reset;
175 u32 rx_maxlen;
176 u32 __reserved_0;
177 u32 rx_pause;
178 u32 tx_pause;
179 u32 __reserved_1;
180 u32 rx_pri_map;
181};
182
183struct cpsw_slave {
184 struct cpsw_slave_regs __iomem *regs;
185 struct cpsw_sliver_regs __iomem *sliver;
186 int slave_num;
187 u32 mac_control;
188 struct cpsw_slave_data *data;
189 struct phy_device *phy;
190};
191
192struct cpsw_priv {
193 spinlock_t lock;
194 struct platform_device *pdev;
195 struct net_device *ndev;
196 struct resource *cpsw_res;
197 struct resource *cpsw_ss_res;
198 struct napi_struct napi;
199 struct device *dev;
200 struct cpsw_platform_data data;
201 struct cpsw_regs __iomem *regs;
202 struct cpsw_ss_regs __iomem *ss_regs;
203 struct cpsw_host_regs __iomem *host_port_regs;
204 u32 msg_enable;
205 struct net_device_stats stats;
206 int rx_packet_max;
207 int host_port;
208 struct clk *clk;
209 u8 mac_addr[ETH_ALEN];
210 struct cpsw_slave *slaves;
211 struct cpdma_ctlr *dma;
212 struct cpdma_chan *txch, *rxch;
213 struct cpsw_ale *ale;
214 /* snapshot of IRQ numbers */
215 u32 irqs_table[4];
216 u32 num_irqs;
217};
218
219#define napi_to_priv(napi) container_of(napi, struct cpsw_priv, napi)
220#define for_each_slave(priv, func, arg...) \
221 do { \
222 int idx; \
223 for (idx = 0; idx < (priv)->data.slaves; idx++) \
224 (func)((priv)->slaves + idx, ##arg); \
225 } while (0)
226
227static void cpsw_intr_enable(struct cpsw_priv *priv)
228{
229 __raw_writel(0xFF, &priv->ss_regs->tx_en);
230 __raw_writel(0xFF, &priv->ss_regs->rx_en);
231
232 cpdma_ctlr_int_ctrl(priv->dma, true);
233 return;
234}
235
236static void cpsw_intr_disable(struct cpsw_priv *priv)
237{
238 __raw_writel(0, &priv->ss_regs->tx_en);
239 __raw_writel(0, &priv->ss_regs->rx_en);
240
241 cpdma_ctlr_int_ctrl(priv->dma, false);
242 return;
243}
244
245void cpsw_tx_handler(void *token, int len, int status)
246{
247 struct sk_buff *skb = token;
248 struct net_device *ndev = skb->dev;
249 struct cpsw_priv *priv = netdev_priv(ndev);
250
251 if (unlikely(netif_queue_stopped(ndev)))
252 netif_start_queue(ndev);
253 priv->stats.tx_packets++;
254 priv->stats.tx_bytes += len;
255 dev_kfree_skb_any(skb);
256}
257
258void cpsw_rx_handler(void *token, int len, int status)
259{
260 struct sk_buff *skb = token;
261 struct net_device *ndev = skb->dev;
262 struct cpsw_priv *priv = netdev_priv(ndev);
263 int ret = 0;
264
265 /* free and bail if we are shutting down */
266 if (unlikely(!netif_running(ndev)) ||
267 unlikely(!netif_carrier_ok(ndev))) {
268 dev_kfree_skb_any(skb);
269 return;
270 }
271 if (likely(status >= 0)) {
272 skb_put(skb, len);
273 skb->protocol = eth_type_trans(skb, ndev);
274 netif_receive_skb(skb);
275 priv->stats.rx_bytes += len;
276 priv->stats.rx_packets++;
277 skb = NULL;
278 }
279
280 if (unlikely(!netif_running(ndev))) {
281 if (skb)
282 dev_kfree_skb_any(skb);
283 return;
284 }
285
286 if (likely(!skb)) {
287 skb = netdev_alloc_skb_ip_align(ndev, priv->rx_packet_max);
288 if (WARN_ON(!skb))
289 return;
290
291 ret = cpdma_chan_submit(priv->rxch, skb, skb->data,
292 skb_tailroom(skb), GFP_KERNEL);
293 }
294 WARN_ON(ret < 0);
295}
296
297static irqreturn_t cpsw_interrupt(int irq, void *dev_id)
298{
299 struct cpsw_priv *priv = dev_id;
300
301 if (likely(netif_running(priv->ndev))) {
302 cpsw_intr_disable(priv);
303 cpsw_disable_irq(priv);
304 napi_schedule(&priv->napi);
305 }
306 return IRQ_HANDLED;
307}
308
309static inline int cpsw_get_slave_port(struct cpsw_priv *priv, u32 slave_num)
310{
311 if (priv->host_port == 0)
312 return slave_num + 1;
313 else
314 return slave_num;
315}
316
317static int cpsw_poll(struct napi_struct *napi, int budget)
318{
319 struct cpsw_priv *priv = napi_to_priv(napi);
320 int num_tx, num_rx;
321
322 num_tx = cpdma_chan_process(priv->txch, 128);
323 num_rx = cpdma_chan_process(priv->rxch, budget);
324
325 if (num_rx || num_tx)
326 cpsw_dbg(priv, intr, "poll %d rx, %d tx pkts\n",
327 num_rx, num_tx);
328
329 if (num_rx < budget) {
330 napi_complete(napi);
331 cpsw_intr_enable(priv);
332 cpdma_ctlr_eoi(priv->dma);
333 cpsw_enable_irq(priv);
334 }
335
336 return num_rx;
337}
338
339static inline void soft_reset(const char *module, void __iomem *reg)
340{
341 unsigned long timeout = jiffies + HZ;
342
343 __raw_writel(1, reg);
344 do {
345 cpu_relax();
346 } while ((__raw_readl(reg) & 1) && time_after(timeout, jiffies));
347
348 WARN(__raw_readl(reg) & 1, "failed to soft-reset %s\n", module);
349}
350
351#define mac_hi(mac) (((mac)[0] << 0) | ((mac)[1] << 8) | \
352 ((mac)[2] << 16) | ((mac)[3] << 24))
353#define mac_lo(mac) (((mac)[4] << 0) | ((mac)[5] << 8))
354
355static void cpsw_set_slave_mac(struct cpsw_slave *slave,
356 struct cpsw_priv *priv)
357{
358 __raw_writel(mac_hi(priv->mac_addr), &slave->regs->sa_hi);
359 __raw_writel(mac_lo(priv->mac_addr), &slave->regs->sa_lo);
360}
361
362static void _cpsw_adjust_link(struct cpsw_slave *slave,
363 struct cpsw_priv *priv, bool *link)
364{
365 struct phy_device *phy = slave->phy;
366 u32 mac_control = 0;
367 u32 slave_port;
368
369 if (!phy)
370 return;
371
372 slave_port = cpsw_get_slave_port(priv, slave->slave_num);
373
374 if (phy->link) {
375 mac_control = priv->data.mac_control;
376
377 /* enable forwarding */
378 cpsw_ale_control_set(priv->ale, slave_port,
379 ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
380
381 if (phy->speed == 1000)
382 mac_control |= BIT(7); /* GIGABITEN */
383 if (phy->duplex)
384 mac_control |= BIT(0); /* FULLDUPLEXEN */
385 *link = true;
386 } else {
387 mac_control = 0;
388 /* disable forwarding */
389 cpsw_ale_control_set(priv->ale, slave_port,
390 ALE_PORT_STATE, ALE_PORT_STATE_DISABLE);
391 }
392
393 if (mac_control != slave->mac_control) {
394 phy_print_status(phy);
395 __raw_writel(mac_control, &slave->sliver->mac_control);
396 }
397
398 slave->mac_control = mac_control;
399}
400
401static void cpsw_adjust_link(struct net_device *ndev)
402{
403 struct cpsw_priv *priv = netdev_priv(ndev);
404 bool link = false;
405
406 for_each_slave(priv, _cpsw_adjust_link, priv, &link);
407
408 if (link) {
409 netif_carrier_on(ndev);
410 if (netif_running(ndev))
411 netif_wake_queue(ndev);
412 } else {
413 netif_carrier_off(ndev);
414 netif_stop_queue(ndev);
415 }
416}
417
418static inline int __show_stat(char *buf, int maxlen, const char *name, u32 val)
419{
420 static char *leader = "........................................";
421
422 if (!val)
423 return 0;
424 else
425 return snprintf(buf, maxlen, "%s %s %10d\n", name,
426 leader + strlen(name), val);
427}
428
429static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv)
430{
431 char name[32];
432 u32 slave_port;
433
434 sprintf(name, "slave-%d", slave->slave_num);
435
436 soft_reset(name, &slave->sliver->soft_reset);
437
438 /* setup priority mapping */
439 __raw_writel(RX_PRIORITY_MAPPING, &slave->sliver->rx_pri_map);
440 __raw_writel(TX_PRIORITY_MAPPING, &slave->regs->tx_pri_map);
441
442 /* setup max packet size, and mac address */
443 __raw_writel(priv->rx_packet_max, &slave->sliver->rx_maxlen);
444 cpsw_set_slave_mac(slave, priv);
445
446 slave->mac_control = 0; /* no link yet */
447
448 slave_port = cpsw_get_slave_port(priv, slave->slave_num);
449
450 cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast,
451 1 << slave_port, 0, ALE_MCAST_FWD_2);
452
453 slave->phy = phy_connect(priv->ndev, slave->data->phy_id,
454 &cpsw_adjust_link, 0, slave->data->phy_if);
455 if (IS_ERR(slave->phy)) {
456 dev_err(priv->dev, "phy %s not found on slave %d\n",
457 slave->data->phy_id, slave->slave_num);
458 slave->phy = NULL;
459 } else {
460 dev_info(priv->dev, "phy found : id is : 0x%x\n",
461 slave->phy->phy_id);
462 phy_start(slave->phy);
463 }
464}
465
466static void cpsw_init_host_port(struct cpsw_priv *priv)
467{
468 /* soft reset the controller and initialize ale */
469 soft_reset("cpsw", &priv->regs->soft_reset);
470 cpsw_ale_start(priv->ale);
471
472 /* switch to vlan unaware mode */
473 cpsw_ale_control_set(priv->ale, 0, ALE_VLAN_AWARE, 0);
474
475 /* setup host port priority mapping */
476 __raw_writel(CPDMA_TX_PRIORITY_MAP,
477 &priv->host_port_regs->cpdma_tx_pri_map);
478 __raw_writel(0, &priv->host_port_regs->cpdma_rx_chan_map);
479
480 cpsw_ale_control_set(priv->ale, priv->host_port,
481 ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
482
483 cpsw_ale_add_ucast(priv->ale, priv->mac_addr, priv->host_port, 0);
484 cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast,
485 1 << priv->host_port, 0, ALE_MCAST_FWD_2);
486}
487
488static int cpsw_ndo_open(struct net_device *ndev)
489{
490 struct cpsw_priv *priv = netdev_priv(ndev);
491 int i, ret;
492 u32 reg;
493
494 cpsw_intr_disable(priv);
495 netif_carrier_off(ndev);
496
497 ret = clk_enable(priv->clk);
498 if (ret < 0) {
499 dev_err(priv->dev, "unable to turn on device clock\n");
500 return ret;
501 }
502
503 reg = __raw_readl(&priv->regs->id_ver);
504
505 dev_info(priv->dev, "initializing cpsw version %d.%d (%d)\n",
506 CPSW_MAJOR_VERSION(reg), CPSW_MINOR_VERSION(reg),
507 CPSW_RTL_VERSION(reg));
508
509 /* initialize host and slave ports */
510 cpsw_init_host_port(priv);
511 for_each_slave(priv, cpsw_slave_open, priv);
512
513 /* setup tx dma to fixed prio and zero offset */
514 cpdma_control_set(priv->dma, CPDMA_TX_PRIO_FIXED, 1);
515 cpdma_control_set(priv->dma, CPDMA_RX_BUFFER_OFFSET, 0);
516
517 /* disable priority elevation and enable statistics on all ports */
518 __raw_writel(0, &priv->regs->ptype);
519
520 /* enable statistics collection only on the host port */
521 __raw_writel(0x7, &priv->regs->stat_port_en);
522
523 if (WARN_ON(!priv->data.rx_descs))
524 priv->data.rx_descs = 128;
525
526 for (i = 0; i < priv->data.rx_descs; i++) {
527 struct sk_buff *skb;
528
529 ret = -ENOMEM;
530 skb = netdev_alloc_skb_ip_align(priv->ndev,
531 priv->rx_packet_max);
532 if (!skb)
533 break;
534 ret = cpdma_chan_submit(priv->rxch, skb, skb->data,
535 skb_tailroom(skb), GFP_KERNEL);
536 if (WARN_ON(ret < 0))
537 break;
538 }
539 /* continue even if we didn't manage to submit all receive descs */
540 cpsw_info(priv, ifup, "submitted %d rx descriptors\n", i);
541
542 cpdma_ctlr_start(priv->dma);
543 cpsw_intr_enable(priv);
544 napi_enable(&priv->napi);
545 cpdma_ctlr_eoi(priv->dma);
546
547 return 0;
548}
549
550static void cpsw_slave_stop(struct cpsw_slave *slave, struct cpsw_priv *priv)
551{
552 if (!slave->phy)
553 return;
554 phy_stop(slave->phy);
555 phy_disconnect(slave->phy);
556 slave->phy = NULL;
557}
558
559static int cpsw_ndo_stop(struct net_device *ndev)
560{
561 struct cpsw_priv *priv = netdev_priv(ndev);
562
563 cpsw_info(priv, ifdown, "shutting down cpsw device\n");
564 cpsw_intr_disable(priv);
565 cpdma_ctlr_int_ctrl(priv->dma, false);
566 cpdma_ctlr_stop(priv->dma);
567 netif_stop_queue(priv->ndev);
568 napi_disable(&priv->napi);
569 netif_carrier_off(priv->ndev);
570 cpsw_ale_stop(priv->ale);
571 for_each_slave(priv, cpsw_slave_stop, priv);
572 clk_disable(priv->clk);
573 return 0;
574}
575
576static netdev_tx_t cpsw_ndo_start_xmit(struct sk_buff *skb,
577 struct net_device *ndev)
578{
579 struct cpsw_priv *priv = netdev_priv(ndev);
580 int ret;
581
582 ndev->trans_start = jiffies;
583
584 if (skb_padto(skb, CPSW_MIN_PACKET_SIZE)) {
585 cpsw_err(priv, tx_err, "packet pad failed\n");
586 priv->stats.tx_dropped++;
587 return NETDEV_TX_OK;
588 }
589
590 ret = cpdma_chan_submit(priv->txch, skb, skb->data,
591 skb->len, GFP_KERNEL);
592 if (unlikely(ret != 0)) {
593 cpsw_err(priv, tx_err, "desc submit failed\n");
594 goto fail;
595 }
596
597 return NETDEV_TX_OK;
598fail:
599 priv->stats.tx_dropped++;
600 netif_stop_queue(ndev);
601 return NETDEV_TX_BUSY;
602}
603
604static void cpsw_ndo_change_rx_flags(struct net_device *ndev, int flags)
605{
606 /*
607 * The switch cannot operate in promiscuous mode without substantial
608 * headache. For promiscuous mode to work, we would need to put the
609 * ALE in bypass mode and route all traffic to the host port.
610 * Subsequently, the host will need to operate as a "bridge", learn,
611 * and flood as needed. For now, we simply complain here and
612 * do nothing about it :-)
613 */
614 if ((flags & IFF_PROMISC) && (ndev->flags & IFF_PROMISC))
615 dev_err(&ndev->dev, "promiscuity ignored!\n");
616
617 /*
618 * The switch cannot filter multicast traffic unless it is configured
619 * in "VLAN Aware" mode. Unfortunately, VLAN awareness requires a
620 * whole bunch of additional logic that this driver does not implement
621 * at present.
622 */
623 if ((flags & IFF_ALLMULTI) && !(ndev->flags & IFF_ALLMULTI))
624 dev_err(&ndev->dev, "multicast traffic cannot be filtered!\n");
625}
626
627static void cpsw_ndo_tx_timeout(struct net_device *ndev)
628{
629 struct cpsw_priv *priv = netdev_priv(ndev);
630
631 cpsw_err(priv, tx_err, "transmit timeout, restarting dma\n");
632 priv->stats.tx_errors++;
633 cpsw_intr_disable(priv);
634 cpdma_ctlr_int_ctrl(priv->dma, false);
635 cpdma_chan_stop(priv->txch);
636 cpdma_chan_start(priv->txch);
637 cpdma_ctlr_int_ctrl(priv->dma, true);
638 cpsw_intr_enable(priv);
639 cpdma_ctlr_eoi(priv->dma);
640}
641
642static struct net_device_stats *cpsw_ndo_get_stats(struct net_device *ndev)
643{
644 struct cpsw_priv *priv = netdev_priv(ndev);
645 return &priv->stats;
646}
647
648#ifdef CONFIG_NET_POLL_CONTROLLER
649static void cpsw_ndo_poll_controller(struct net_device *ndev)
650{
651 struct cpsw_priv *priv = netdev_priv(ndev);
652
653 cpsw_intr_disable(priv);
654 cpdma_ctlr_int_ctrl(priv->dma, false);
655 cpsw_interrupt(ndev->irq, priv);
656 cpdma_ctlr_int_ctrl(priv->dma, true);
657 cpsw_intr_enable(priv);
658 cpdma_ctlr_eoi(priv->dma);
659}
660#endif
661
662static const struct net_device_ops cpsw_netdev_ops = {
663 .ndo_open = cpsw_ndo_open,
664 .ndo_stop = cpsw_ndo_stop,
665 .ndo_start_xmit = cpsw_ndo_start_xmit,
666 .ndo_change_rx_flags = cpsw_ndo_change_rx_flags,
667 .ndo_validate_addr = eth_validate_addr,
668 .ndo_tx_timeout = cpsw_ndo_tx_timeout,
669 .ndo_get_stats = cpsw_ndo_get_stats,
670#ifdef CONFIG_NET_POLL_CONTROLLER
671 .ndo_poll_controller = cpsw_ndo_poll_controller,
672#endif
673};
674
675static void cpsw_get_drvinfo(struct net_device *ndev,
676 struct ethtool_drvinfo *info)
677{
678 struct cpsw_priv *priv = netdev_priv(ndev);
679 strcpy(info->driver, "TI CPSW Driver v1.0");
680 strcpy(info->version, "1.0");
681 strcpy(info->bus_info, priv->pdev->name);
682}
683
684static u32 cpsw_get_msglevel(struct net_device *ndev)
685{
686 struct cpsw_priv *priv = netdev_priv(ndev);
687 return priv->msg_enable;
688}
689
690static void cpsw_set_msglevel(struct net_device *ndev, u32 value)
691{
692 struct cpsw_priv *priv = netdev_priv(ndev);
693 priv->msg_enable = value;
694}
695
696static const struct ethtool_ops cpsw_ethtool_ops = {
697 .get_drvinfo = cpsw_get_drvinfo,
698 .get_msglevel = cpsw_get_msglevel,
699 .set_msglevel = cpsw_set_msglevel,
700 .get_link = ethtool_op_get_link,
701};
702
703static void cpsw_slave_init(struct cpsw_slave *slave, struct cpsw_priv *priv)
704{
705 void __iomem *regs = priv->regs;
706 int slave_num = slave->slave_num;
707 struct cpsw_slave_data *data = priv->data.slave_data + slave_num;
708
709 slave->data = data;
710 slave->regs = regs + data->slave_reg_ofs;
711 slave->sliver = regs + data->sliver_reg_ofs;
712}
713
714static int __devinit cpsw_probe(struct platform_device *pdev)
715{
716 struct cpsw_platform_data *data = pdev->dev.platform_data;
717 struct net_device *ndev;
718 struct cpsw_priv *priv;
719 struct cpdma_params dma_params;
720 struct cpsw_ale_params ale_params;
721 void __iomem *regs;
722 struct resource *res;
723 int ret = 0, i, k = 0;
724
725 if (!data) {
726 pr_err("platform data missing\n");
727 return -ENODEV;
728 }
729
730 ndev = alloc_etherdev(sizeof(struct cpsw_priv));
731 if (!ndev) {
732 pr_err("error allocating net_device\n");
733 return -ENOMEM;
734 }
735
736 platform_set_drvdata(pdev, ndev);
737 priv = netdev_priv(ndev);
738 spin_lock_init(&priv->lock);
739 priv->data = *data;
740 priv->pdev = pdev;
741 priv->ndev = ndev;
742 priv->dev = &ndev->dev;
743 priv->msg_enable = netif_msg_init(debug_level, CPSW_DEBUG);
744 priv->rx_packet_max = max(rx_packet_max, 128);
745
746 if (is_valid_ether_addr(data->slave_data[0].mac_addr)) {
747 memcpy(priv->mac_addr, data->slave_data[0].mac_addr, ETH_ALEN);
748 pr_info("Detected MACID = %pM", priv->mac_addr);
749 } else {
750 random_ether_addr(priv->mac_addr);
751 pr_info("Random MACID = %pM", priv->mac_addr);
752 }
753
754 memcpy(ndev->dev_addr, priv->mac_addr, ETH_ALEN);
755
756 priv->slaves = kzalloc(sizeof(struct cpsw_slave) * data->slaves,
757 GFP_KERNEL);
758 if (!priv->slaves) {
759 ret = -EBUSY;
760 goto clean_ndev_ret;
761 }
762 for (i = 0; i < data->slaves; i++)
763 priv->slaves[i].slave_num = i;
764
765 priv->clk = clk_get(&pdev->dev, NULL);
766 if (IS_ERR(priv->clk)) {
767 dev_err(priv->dev, "failed to get device clock)\n");
768 ret = -EBUSY;
769 }
770
771 priv->cpsw_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
772 if (!priv->cpsw_res) {
773 dev_err(priv->dev, "error getting i/o resource\n");
774 ret = -ENOENT;
775 goto clean_clk_ret;
776 }
777
778 if (!request_mem_region(priv->cpsw_res->start,
779 resource_size(priv->cpsw_res), ndev->name)) {
780 dev_err(priv->dev, "failed request i/o region\n");
781 ret = -ENXIO;
782 goto clean_clk_ret;
783 }
784
785 regs = ioremap(priv->cpsw_res->start, resource_size(priv->cpsw_res));
786 if (!regs) {
787 dev_err(priv->dev, "unable to map i/o region\n");
788 goto clean_cpsw_iores_ret;
789 }
790 priv->regs = regs;
791 priv->host_port = data->host_port_num;
792 priv->host_port_regs = regs + data->host_port_reg_ofs;
793
794 priv->cpsw_ss_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
795 if (!priv->cpsw_ss_res) {
796 dev_err(priv->dev, "error getting i/o resource\n");
797 ret = -ENOENT;
798 goto clean_clk_ret;
799 }
800
801 if (!request_mem_region(priv->cpsw_ss_res->start,
802 resource_size(priv->cpsw_ss_res), ndev->name)) {
803 dev_err(priv->dev, "failed request i/o region\n");
804 ret = -ENXIO;
805 goto clean_clk_ret;
806 }
807
808 regs = ioremap(priv->cpsw_ss_res->start,
809 resource_size(priv->cpsw_ss_res));
810 if (!regs) {
811 dev_err(priv->dev, "unable to map i/o region\n");
812 goto clean_cpsw_ss_iores_ret;
813 }
814 priv->ss_regs = regs;
815
816 for_each_slave(priv, cpsw_slave_init, priv);
817
818 memset(&dma_params, 0, sizeof(dma_params));
819 dma_params.dev = &pdev->dev;
820 dma_params.dmaregs = cpsw_dma_regs((u32)priv->regs,
821 data->cpdma_reg_ofs);
822 dma_params.rxthresh = cpsw_dma_rxthresh((u32)priv->regs,
823 data->cpdma_reg_ofs);
824 dma_params.rxfree = cpsw_dma_rxfree((u32)priv->regs,
825 data->cpdma_reg_ofs);
826 dma_params.txhdp = cpsw_dma_txhdp((u32)priv->regs,
827 data->cpdma_sram_ofs);
828 dma_params.rxhdp = cpsw_dma_rxhdp((u32)priv->regs,
829 data->cpdma_sram_ofs);
830 dma_params.txcp = cpsw_dma_txcp((u32)priv->regs,
831 data->cpdma_sram_ofs);
832 dma_params.rxcp = cpsw_dma_rxcp((u32)priv->regs,
833 data->cpdma_sram_ofs);
834
835 dma_params.num_chan = data->channels;
836 dma_params.has_soft_reset = true;
837 dma_params.min_packet_size = CPSW_MIN_PACKET_SIZE;
838 dma_params.desc_mem_size = data->bd_ram_size;
839 dma_params.desc_align = 16;
840 dma_params.has_ext_regs = true;
841 dma_params.desc_mem_phys = data->no_bd_ram ? 0 :
842 (u32 __force)priv->cpsw_res->start + data->bd_ram_ofs;
843 dma_params.desc_hw_addr = data->hw_ram_addr ?
844 data->hw_ram_addr : dma_params.desc_mem_phys ;
845
846 priv->dma = cpdma_ctlr_create(&dma_params);
847 if (!priv->dma) {
848 dev_err(priv->dev, "error initializing dma\n");
849 ret = -ENOMEM;
850 goto clean_iomap_ret;
851 }
852
853 priv->txch = cpdma_chan_create(priv->dma, tx_chan_num(0),
854 cpsw_tx_handler);
855 priv->rxch = cpdma_chan_create(priv->dma, rx_chan_num(0),
856 cpsw_rx_handler);
857
858 if (WARN_ON(!priv->txch || !priv->rxch)) {
859 dev_err(priv->dev, "error initializing dma channels\n");
860 ret = -ENOMEM;
861 goto clean_dma_ret;
862 }
863
864 memset(&ale_params, 0, sizeof(ale_params));
865 ale_params.dev = &ndev->dev;
866 ale_params.ale_regs = (void *)((u32)priv->regs) +
867 ((u32)data->ale_reg_ofs);
868 ale_params.ale_ageout = ale_ageout;
869 ale_params.ale_entries = data->ale_entries;
870 ale_params.ale_ports = data->slaves;
871
872 priv->ale = cpsw_ale_create(&ale_params);
873 if (!priv->ale) {
874 dev_err(priv->dev, "error initializing ale engine\n");
875 ret = -ENODEV;
876 goto clean_dma_ret;
877 }
878
879 ndev->irq = platform_get_irq(pdev, 0);
880 if (ndev->irq < 0) {
881 dev_err(priv->dev, "error getting irq resource\n");
882 ret = -ENOENT;
883 goto clean_ale_ret;
884 }
885
886 while ((res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, k))) {
887 for (i = res->start; i <= res->end; i++) {
888 if (request_irq(i, cpsw_interrupt, IRQF_DISABLED,
889 dev_name(&pdev->dev), priv)) {
890 dev_err(priv->dev, "error attaching irq\n");
891 goto clean_ale_ret;
892 }
893 priv->irqs_table[k] = i;
894 priv->num_irqs = k;
895 }
896 k++;
897 }
898
899 ndev->flags |= IFF_ALLMULTI; /* see cpsw_ndo_change_rx_flags() */
900
901 ndev->netdev_ops = &cpsw_netdev_ops;
902 SET_ETHTOOL_OPS(ndev, &cpsw_ethtool_ops);
903 netif_napi_add(ndev, &priv->napi, cpsw_poll, CPSW_POLL_WEIGHT);
904
905 /* register the network device */
906 SET_NETDEV_DEV(ndev, &pdev->dev);
907 ret = register_netdev(ndev);
908 if (ret) {
909 dev_err(priv->dev, "error registering net device\n");
910 ret = -ENODEV;
911 goto clean_irq_ret;
912 }
913
914 cpsw_notice(priv, probe, "initialized device (regs %x, irq %d)\n",
915 priv->cpsw_res->start, ndev->irq);
916
917 return 0;
918
919clean_irq_ret:
920 free_irq(ndev->irq, priv);
921clean_ale_ret:
922 cpsw_ale_destroy(priv->ale);
923clean_dma_ret:
924 cpdma_chan_destroy(priv->txch);
925 cpdma_chan_destroy(priv->rxch);
926 cpdma_ctlr_destroy(priv->dma);
927clean_iomap_ret:
928 iounmap(priv->regs);
929clean_cpsw_ss_iores_ret:
930 release_mem_region(priv->cpsw_ss_res->start,
931 resource_size(priv->cpsw_ss_res));
932clean_cpsw_iores_ret:
933 release_mem_region(priv->cpsw_res->start,
934 resource_size(priv->cpsw_res));
935clean_clk_ret:
936 clk_put(priv->clk);
937 kfree(priv->slaves);
938clean_ndev_ret:
939 free_netdev(ndev);
940 return ret;
941}
942
943static int __devexit cpsw_remove(struct platform_device *pdev)
944{
945 struct net_device *ndev = platform_get_drvdata(pdev);
946 struct cpsw_priv *priv = netdev_priv(ndev);
947
948 pr_info("removing device");
949 platform_set_drvdata(pdev, NULL);
950
951 free_irq(ndev->irq, priv);
952 cpsw_ale_destroy(priv->ale);
953 cpdma_chan_destroy(priv->txch);
954 cpdma_chan_destroy(priv->rxch);
955 cpdma_ctlr_destroy(priv->dma);
956 iounmap(priv->regs);
957 release_mem_region(priv->cpsw_res->start,
958 resource_size(priv->cpsw_res));
959 release_mem_region(priv->cpsw_ss_res->start,
960 resource_size(priv->cpsw_ss_res));
961 clk_put(priv->clk);
962 kfree(priv->slaves);
963 free_netdev(ndev);
964
965 return 0;
966}
967
968static int cpsw_suspend(struct device *dev)
969{
970 struct platform_device *pdev = to_platform_device(dev);
971 struct net_device *ndev = platform_get_drvdata(pdev);
972
973 if (netif_running(ndev))
974 cpsw_ndo_stop(ndev);
975 return 0;
976}
977
978static int cpsw_resume(struct device *dev)
979{
980 struct platform_device *pdev = to_platform_device(dev);
981 struct net_device *ndev = platform_get_drvdata(pdev);
982
983 if (netif_running(ndev))
984 cpsw_ndo_open(ndev);
985 return 0;
986}
987
988static const struct dev_pm_ops cpsw_pm_ops = {
989 .suspend = cpsw_suspend,
990 .resume = cpsw_resume,
991};
992
993static struct platform_driver cpsw_driver = {
994 .driver = {
995 .name = "cpsw",
996 .owner = THIS_MODULE,
997 .pm = &cpsw_pm_ops,
998 },
999 .probe = cpsw_probe,
1000 .remove = __devexit_p(cpsw_remove),
1001};
1002
1003static int __init cpsw_init(void)
1004{
1005 return platform_driver_register(&cpsw_driver);
1006}
1007late_initcall(cpsw_init);
1008
1009static void __exit cpsw_exit(void)
1010{
1011 platform_driver_unregister(&cpsw_driver);
1012}
1013module_exit(cpsw_exit);
1014
1015MODULE_LICENSE("GPL");
1016MODULE_AUTHOR("Cyril Chemparathy <cyril@ti.com>");
1017MODULE_AUTHOR("Mugunthan V N <mugunthanvnm@ti.com>");
1018MODULE_DESCRIPTION("TI CPSW Ethernet driver");
diff --git a/include/linux/platform_data/cpsw.h b/include/linux/platform_data/cpsw.h
new file mode 100644
index 000000000000..c4e23d029498
--- /dev/null
+++ b/include/linux/platform_data/cpsw.h
@@ -0,0 +1,55 @@
1/*
2 * Texas Instruments Ethernet Switch Driver
3 *
4 * Copyright (C) 2012 Texas Instruments
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License as
8 * published by the Free Software Foundation version 2.
9 *
10 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
11 * kind, whether express or implied; without even the implied warranty
12 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 */
15#ifndef __CPSW_H__
16#define __CPSW_H__
17
18#include <linux/if_ether.h>
19
20struct cpsw_slave_data {
21 u32 slave_reg_ofs;
22 u32 sliver_reg_ofs;
23 const char *phy_id;
24 int phy_if;
25 u8 mac_addr[ETH_ALEN];
26};
27
28struct cpsw_platform_data {
29 u32 ss_reg_ofs; /* Subsystem control register offset */
30 u32 channels; /* number of cpdma channels (symmetric) */
31 u32 cpdma_reg_ofs; /* cpdma register offset */
32 u32 cpdma_sram_ofs; /* cpdma sram offset */
33
34 u32 slaves; /* number of slave cpgmac ports */
35 struct cpsw_slave_data *slave_data;
36
37 u32 ale_reg_ofs; /* address lookup engine reg offset */
38 u32 ale_entries; /* ale table size */
39
40 u32 host_port_reg_ofs; /* cpsw cpdma host port registers */
41 u32 host_port_num; /* The port number for the host port */
42
43 u32 hw_stats_reg_ofs; /* cpsw hardware statistics counters */
44
45 u32 bd_ram_ofs; /* embedded buffer descriptor RAM offset*/
46 u32 bd_ram_size; /*buffer descriptor ram size */
47 u32 hw_ram_addr; /*if the HW address for BD RAM is different */
48 bool no_bd_ram; /* no embedded BD ram*/
49
50 u32 rx_descs; /* Number of Rx Descriptios */
51
52 u32 mac_control; /* Mac control register */
53};
54
55#endif /* __CPSW_H__ */