aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/can
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/can')
-rw-r--r--drivers/net/can/Kconfig24
-rw-r--r--drivers/net/can/Makefile4
-rw-r--r--drivers/net/can/at91_can.c42
-rw-r--r--drivers/net/can/bfin_can.c705
-rw-r--r--drivers/net/can/dev.c85
-rw-r--r--drivers/net/can/mcp251x.c1150
-rw-r--r--drivers/net/can/mscan/Kconfig24
-rw-r--r--drivers/net/can/mscan/Makefile5
-rw-r--r--drivers/net/can/mscan/mpc5xxx_can.c421
-rw-r--r--drivers/net/can/mscan/mscan.c701
-rw-r--r--drivers/net/can/mscan/mscan.h302
-rw-r--r--drivers/net/can/sja1000/Kconfig12
-rw-r--r--drivers/net/can/sja1000/Makefile1
-rw-r--r--drivers/net/can/sja1000/ems_pci.c3
-rw-r--r--drivers/net/can/sja1000/kvaser_pci.c2
-rw-r--r--drivers/net/can/sja1000/plx_pci.c473
-rw-r--r--drivers/net/can/sja1000/sja1000.c64
-rw-r--r--drivers/net/can/sja1000/sja1000.h2
-rw-r--r--drivers/net/can/ti_hecc.c1056
-rw-r--r--drivers/net/can/usb/Kconfig2
-rw-r--r--drivers/net/can/usb/ems_usb.c48
-rw-r--r--drivers/net/can/vcan.c13
22 files changed, 5028 insertions, 111 deletions
diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig
index 772f6d2489ce..05b751719bd5 100644
--- a/drivers/net/can/Kconfig
+++ b/drivers/net/can/Kconfig
@@ -41,6 +41,30 @@ config CAN_AT91
41 ---help--- 41 ---help---
42 This is a driver for the SoC CAN controller in Atmel's AT91SAM9263. 42 This is a driver for the SoC CAN controller in Atmel's AT91SAM9263.
43 43
44config CAN_TI_HECC
45 depends on CAN_DEV && ARCH_OMAP3
46 tristate "TI High End CAN Controller"
47 ---help---
48 Driver for TI HECC (High End CAN Controller) module found on many
49 TI devices. The device specifications are available from www.ti.com
50
51config CAN_MCP251X
52 tristate "Microchip MCP251x SPI CAN controllers"
53 depends on CAN_DEV && SPI && HAS_DMA
54 ---help---
55 Driver for the Microchip MCP251x SPI CAN controllers.
56
57config CAN_BFIN
58 depends on CAN_DEV && (BF534 || BF536 || BF537 || BF538 || BF539 || BF54x)
59 tristate "Analog Devices Blackfin on-chip CAN"
60 ---help---
61 Driver for the Analog Devices Blackfin on-chip CAN controllers
62
63 To compile this driver as a module, choose M here: the
64 module will be called bfin_can.
65
66source "drivers/net/can/mscan/Kconfig"
67
44source "drivers/net/can/sja1000/Kconfig" 68source "drivers/net/can/sja1000/Kconfig"
45 69
46source "drivers/net/can/usb/Kconfig" 70source "drivers/net/can/usb/Kconfig"
diff --git a/drivers/net/can/Makefile b/drivers/net/can/Makefile
index 0dea62721f2f..7a702f28d01c 100644
--- a/drivers/net/can/Makefile
+++ b/drivers/net/can/Makefile
@@ -10,6 +10,10 @@ can-dev-y := dev.o
10obj-y += usb/ 10obj-y += usb/
11 11
12obj-$(CONFIG_CAN_SJA1000) += sja1000/ 12obj-$(CONFIG_CAN_SJA1000) += sja1000/
13obj-$(CONFIG_CAN_MSCAN) += mscan/
13obj-$(CONFIG_CAN_AT91) += at91_can.o 14obj-$(CONFIG_CAN_AT91) += at91_can.o
15obj-$(CONFIG_CAN_TI_HECC) += ti_hecc.o
16obj-$(CONFIG_CAN_MCP251X) += mcp251x.o
17obj-$(CONFIG_CAN_BFIN) += bfin_can.o
14 18
15ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG 19ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG
diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c
index f67ae285a35a..a2f29a38798a 100644
--- a/drivers/net/can/at91_can.c
+++ b/drivers/net/can/at91_can.c
@@ -221,38 +221,6 @@ static inline void set_mb_mode(const struct at91_priv *priv, unsigned int mb,
221 set_mb_mode_prio(priv, mb, mode, 0); 221 set_mb_mode_prio(priv, mb, mode, 0);
222} 222}
223 223
224static struct sk_buff *alloc_can_skb(struct net_device *dev,
225 struct can_frame **cf)
226{
227 struct sk_buff *skb;
228
229 skb = netdev_alloc_skb(dev, sizeof(struct can_frame));
230 if (unlikely(!skb))
231 return NULL;
232
233 skb->protocol = htons(ETH_P_CAN);
234 skb->ip_summed = CHECKSUM_UNNECESSARY;
235 *cf = (struct can_frame *)skb_put(skb, sizeof(struct can_frame));
236
237 return skb;
238}
239
240static struct sk_buff *alloc_can_err_skb(struct net_device *dev,
241 struct can_frame **cf)
242{
243 struct sk_buff *skb;
244
245 skb = alloc_can_skb(dev, cf);
246 if (unlikely(!skb))
247 return NULL;
248
249 memset(*cf, 0, sizeof(struct can_frame));
250 (*cf)->can_id = CAN_ERR_FLAG;
251 (*cf)->can_dlc = CAN_ERR_DLC;
252
253 return skb;
254}
255
256/* 224/*
257 * Swtich transceiver on or off 225 * Swtich transceiver on or off
258 */ 226 */
@@ -374,6 +342,9 @@ static netdev_tx_t at91_start_xmit(struct sk_buff *skb, struct net_device *dev)
374 unsigned int mb, prio; 342 unsigned int mb, prio;
375 u32 reg_mid, reg_mcr; 343 u32 reg_mid, reg_mcr;
376 344
345 if (can_dropped_invalid_skb(dev, skb))
346 return NETDEV_TX_OK;
347
377 mb = get_tx_next_mb(priv); 348 mb = get_tx_next_mb(priv);
378 prio = get_tx_next_prio(priv); 349 prio = get_tx_next_prio(priv);
379 350
@@ -506,7 +477,7 @@ static void at91_read_mb(struct net_device *dev, unsigned int mb,
506 reg_msr = at91_read(priv, AT91_MSR(mb)); 477 reg_msr = at91_read(priv, AT91_MSR(mb));
507 if (reg_msr & AT91_MSR_MRTR) 478 if (reg_msr & AT91_MSR_MRTR)
508 cf->can_id |= CAN_RTR_FLAG; 479 cf->can_id |= CAN_RTR_FLAG;
509 cf->can_dlc = min_t(__u8, (reg_msr >> 16) & 0xf, 8); 480 cf->can_dlc = get_can_dlc((reg_msr >> 16) & 0xf);
510 481
511 *(u32 *)(cf->data + 0) = at91_read(priv, AT91_MDL(mb)); 482 *(u32 *)(cf->data + 0) = at91_read(priv, AT91_MDL(mb));
512 *(u32 *)(cf->data + 4) = at91_read(priv, AT91_MDH(mb)); 483 *(u32 *)(cf->data + 4) = at91_read(priv, AT91_MDH(mb));
@@ -1069,7 +1040,7 @@ static int __init at91_can_probe(struct platform_device *pdev)
1069 1040
1070 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1041 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1071 irq = platform_get_irq(pdev, 0); 1042 irq = platform_get_irq(pdev, 0);
1072 if (!res || !irq) { 1043 if (!res || irq <= 0) {
1073 err = -ENODEV; 1044 err = -ENODEV;
1074 goto exit_put; 1045 goto exit_put;
1075 } 1046 }
@@ -1087,7 +1058,7 @@ static int __init at91_can_probe(struct platform_device *pdev)
1087 goto exit_release; 1058 goto exit_release;
1088 } 1059 }
1089 1060
1090 dev = alloc_candev(sizeof(struct at91_priv)); 1061 dev = alloc_candev(sizeof(struct at91_priv), AT91_MB_TX_NUM);
1091 if (!dev) { 1062 if (!dev) {
1092 err = -ENOMEM; 1063 err = -ENOMEM;
1093 goto exit_iounmap; 1064 goto exit_iounmap;
@@ -1102,6 +1073,7 @@ static int __init at91_can_probe(struct platform_device *pdev)
1102 priv->can.bittiming_const = &at91_bittiming_const; 1073 priv->can.bittiming_const = &at91_bittiming_const;
1103 priv->can.do_set_bittiming = at91_set_bittiming; 1074 priv->can.do_set_bittiming = at91_set_bittiming;
1104 priv->can.do_set_mode = at91_set_mode; 1075 priv->can.do_set_mode = at91_set_mode;
1076 priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES;
1105 priv->reg_base = addr; 1077 priv->reg_base = addr;
1106 priv->dev = dev; 1078 priv->dev = dev;
1107 priv->clk = clk; 1079 priv->clk = clk;
diff --git a/drivers/net/can/bfin_can.c b/drivers/net/can/bfin_can.c
new file mode 100644
index 000000000000..03489864376d
--- /dev/null
+++ b/drivers/net/can/bfin_can.c
@@ -0,0 +1,705 @@
1/*
2 * Blackfin On-Chip CAN Driver
3 *
4 * Copyright 2004-2009 Analog Devices Inc.
5 *
6 * Enter bugs at http://blackfin.uclinux.org/
7 *
8 * Licensed under the GPL-2 or later.
9 */
10
11#include <linux/module.h>
12#include <linux/init.h>
13#include <linux/kernel.h>
14#include <linux/bitops.h>
15#include <linux/interrupt.h>
16#include <linux/errno.h>
17#include <linux/netdevice.h>
18#include <linux/skbuff.h>
19#include <linux/platform_device.h>
20
21#include <linux/can.h>
22#include <linux/can/dev.h>
23#include <linux/can/error.h>
24
25#include <asm/bfin_can.h>
26#include <asm/portmux.h>
27
28#define DRV_NAME "bfin_can"
29#define BFIN_CAN_TIMEOUT 100
30#define TX_ECHO_SKB_MAX 1
31
32/*
33 * bfin can private data
34 */
35struct bfin_can_priv {
36 struct can_priv can; /* must be the first member */
37 struct net_device *dev;
38 void __iomem *membase;
39 int rx_irq;
40 int tx_irq;
41 int err_irq;
42 unsigned short *pin_list;
43};
44
45/*
46 * bfin can timing parameters
47 */
48static struct can_bittiming_const bfin_can_bittiming_const = {
49 .name = DRV_NAME,
50 .tseg1_min = 1,
51 .tseg1_max = 16,
52 .tseg2_min = 1,
53 .tseg2_max = 8,
54 .sjw_max = 4,
55 /*
56 * Although the BRP field can be set to any value, it is recommended
57 * that the value be greater than or equal to 4, as restrictions
58 * apply to the bit timing configuration when BRP is less than 4.
59 */
60 .brp_min = 4,
61 .brp_max = 1024,
62 .brp_inc = 1,
63};
64
65static int bfin_can_set_bittiming(struct net_device *dev)
66{
67 struct bfin_can_priv *priv = netdev_priv(dev);
68 struct bfin_can_regs __iomem *reg = priv->membase;
69 struct can_bittiming *bt = &priv->can.bittiming;
70 u16 clk, timing;
71
72 clk = bt->brp - 1;
73 timing = ((bt->sjw - 1) << 8) | (bt->prop_seg + bt->phase_seg1 - 1) |
74 ((bt->phase_seg2 - 1) << 4);
75
76 /*
77 * If the SAM bit is set, the input signal is oversampled three times
78 * at the SCLK rate.
79 */
80 if (priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES)
81 timing |= SAM;
82
83 bfin_write16(&reg->clock, clk);
84 bfin_write16(&reg->timing, timing);
85
86 dev_info(dev->dev.parent, "setting CLOCK=0x%04x TIMING=0x%04x\n",
87 clk, timing);
88
89 return 0;
90}
91
92static void bfin_can_set_reset_mode(struct net_device *dev)
93{
94 struct bfin_can_priv *priv = netdev_priv(dev);
95 struct bfin_can_regs __iomem *reg = priv->membase;
96 int timeout = BFIN_CAN_TIMEOUT;
97 int i;
98
99 /* disable interrupts */
100 bfin_write16(&reg->mbim1, 0);
101 bfin_write16(&reg->mbim2, 0);
102 bfin_write16(&reg->gim, 0);
103
104 /* reset can and enter configuration mode */
105 bfin_write16(&reg->control, SRS | CCR);
106 SSYNC();
107 bfin_write16(&reg->control, CCR);
108 SSYNC();
109 while (!(bfin_read16(&reg->control) & CCA)) {
110 udelay(10);
111 if (--timeout == 0) {
112 dev_err(dev->dev.parent,
113 "fail to enter configuration mode\n");
114 BUG();
115 }
116 }
117
118 /*
119 * All mailbox configurations are marked as inactive
120 * by writing to CAN Mailbox Configuration Registers 1 and 2
121 * For all bits: 0 - Mailbox disabled, 1 - Mailbox enabled
122 */
123 bfin_write16(&reg->mc1, 0);
124 bfin_write16(&reg->mc2, 0);
125
126 /* Set Mailbox Direction */
127 bfin_write16(&reg->md1, 0xFFFF); /* mailbox 1-16 are RX */
128 bfin_write16(&reg->md2, 0); /* mailbox 17-32 are TX */
129
130 /* RECEIVE_STD_CHL */
131 for (i = 0; i < 2; i++) {
132 bfin_write16(&reg->chl[RECEIVE_STD_CHL + i].id0, 0);
133 bfin_write16(&reg->chl[RECEIVE_STD_CHL + i].id1, AME);
134 bfin_write16(&reg->chl[RECEIVE_STD_CHL + i].dlc, 0);
135 bfin_write16(&reg->msk[RECEIVE_STD_CHL + i].amh, 0x1FFF);
136 bfin_write16(&reg->msk[RECEIVE_STD_CHL + i].aml, 0xFFFF);
137 }
138
139 /* RECEIVE_EXT_CHL */
140 for (i = 0; i < 2; i++) {
141 bfin_write16(&reg->chl[RECEIVE_EXT_CHL + i].id0, 0);
142 bfin_write16(&reg->chl[RECEIVE_EXT_CHL + i].id1, AME | IDE);
143 bfin_write16(&reg->chl[RECEIVE_EXT_CHL + i].dlc, 0);
144 bfin_write16(&reg->msk[RECEIVE_EXT_CHL + i].amh, 0x1FFF);
145 bfin_write16(&reg->msk[RECEIVE_EXT_CHL + i].aml, 0xFFFF);
146 }
147
148 bfin_write16(&reg->mc2, BIT(TRANSMIT_CHL - 16));
149 bfin_write16(&reg->mc1, BIT(RECEIVE_STD_CHL) + BIT(RECEIVE_EXT_CHL));
150 SSYNC();
151
152 priv->can.state = CAN_STATE_STOPPED;
153}
154
155static void bfin_can_set_normal_mode(struct net_device *dev)
156{
157 struct bfin_can_priv *priv = netdev_priv(dev);
158 struct bfin_can_regs __iomem *reg = priv->membase;
159 int timeout = BFIN_CAN_TIMEOUT;
160
161 /*
162 * leave configuration mode
163 */
164 bfin_write16(&reg->control, bfin_read16(&reg->control) & ~CCR);
165
166 while (bfin_read16(&reg->status) & CCA) {
167 udelay(10);
168 if (--timeout == 0) {
169 dev_err(dev->dev.parent,
170 "fail to leave configuration mode\n");
171 BUG();
172 }
173 }
174
175 /*
176 * clear _All_ tx and rx interrupts
177 */
178 bfin_write16(&reg->mbtif1, 0xFFFF);
179 bfin_write16(&reg->mbtif2, 0xFFFF);
180 bfin_write16(&reg->mbrif1, 0xFFFF);
181 bfin_write16(&reg->mbrif2, 0xFFFF);
182
183 /*
184 * clear global interrupt status register
185 */
186 bfin_write16(&reg->gis, 0x7FF); /* overwrites with '1' */
187
188 /*
189 * Initialize Interrupts
190 * - set bits in the mailbox interrupt mask register
191 * - global interrupt mask
192 */
193 bfin_write16(&reg->mbim1, BIT(RECEIVE_STD_CHL) + BIT(RECEIVE_EXT_CHL));
194 bfin_write16(&reg->mbim2, BIT(TRANSMIT_CHL - 16));
195
196 bfin_write16(&reg->gim, EPIM | BOIM | RMLIM);
197 SSYNC();
198}
199
200static void bfin_can_start(struct net_device *dev)
201{
202 struct bfin_can_priv *priv = netdev_priv(dev);
203
204 /* enter reset mode */
205 if (priv->can.state != CAN_STATE_STOPPED)
206 bfin_can_set_reset_mode(dev);
207
208 /* leave reset mode */
209 bfin_can_set_normal_mode(dev);
210}
211
212static int bfin_can_set_mode(struct net_device *dev, enum can_mode mode)
213{
214 switch (mode) {
215 case CAN_MODE_START:
216 bfin_can_start(dev);
217 if (netif_queue_stopped(dev))
218 netif_wake_queue(dev);
219 break;
220
221 default:
222 return -EOPNOTSUPP;
223 }
224
225 return 0;
226}
227
228static int bfin_can_start_xmit(struct sk_buff *skb, struct net_device *dev)
229{
230 struct bfin_can_priv *priv = netdev_priv(dev);
231 struct bfin_can_regs __iomem *reg = priv->membase;
232 struct can_frame *cf = (struct can_frame *)skb->data;
233 u8 dlc = cf->can_dlc;
234 canid_t id = cf->can_id;
235 u8 *data = cf->data;
236 u16 val;
237 int i;
238
239 if (can_dropped_invalid_skb(dev, skb))
240 return NETDEV_TX_OK;
241
242 netif_stop_queue(dev);
243
244 /* fill id */
245 if (id & CAN_EFF_FLAG) {
246 bfin_write16(&reg->chl[TRANSMIT_CHL].id0, id);
247 if (id & CAN_RTR_FLAG)
248 writew(((id & 0x1FFF0000) >> 16) | IDE | AME | RTR,
249 &reg->chl[TRANSMIT_CHL].id1);
250 else
251 writew(((id & 0x1FFF0000) >> 16) | IDE | AME,
252 &reg->chl[TRANSMIT_CHL].id1);
253
254 } else {
255 if (id & CAN_RTR_FLAG)
256 writew((id << 2) | AME | RTR,
257 &reg->chl[TRANSMIT_CHL].id1);
258 else
259 bfin_write16(&reg->chl[TRANSMIT_CHL].id1,
260 (id << 2) | AME);
261 }
262
263 /* fill payload */
264 for (i = 0; i < 8; i += 2) {
265 val = ((7 - i) < dlc ? (data[7 - i]) : 0) +
266 ((6 - i) < dlc ? (data[6 - i] << 8) : 0);
267 bfin_write16(&reg->chl[TRANSMIT_CHL].data[i], val);
268 }
269
270 /* fill data length code */
271 bfin_write16(&reg->chl[TRANSMIT_CHL].dlc, dlc);
272
273 dev->trans_start = jiffies;
274
275 can_put_echo_skb(skb, dev, 0);
276
277 /* set transmit request */
278 bfin_write16(&reg->trs2, BIT(TRANSMIT_CHL - 16));
279
280 return 0;
281}
282
283static void bfin_can_rx(struct net_device *dev, u16 isrc)
284{
285 struct bfin_can_priv *priv = netdev_priv(dev);
286 struct net_device_stats *stats = &dev->stats;
287 struct bfin_can_regs __iomem *reg = priv->membase;
288 struct can_frame *cf;
289 struct sk_buff *skb;
290 int obj;
291 int i;
292 u16 val;
293
294 skb = alloc_can_skb(dev, &cf);
295 if (skb == NULL)
296 return;
297
298 /* get id */
299 if (isrc & BIT(RECEIVE_EXT_CHL)) {
300 /* extended frame format (EFF) */
301 cf->can_id = ((bfin_read16(&reg->chl[RECEIVE_EXT_CHL].id1)
302 & 0x1FFF) << 16)
303 + bfin_read16(&reg->chl[RECEIVE_EXT_CHL].id0);
304 cf->can_id |= CAN_EFF_FLAG;
305 obj = RECEIVE_EXT_CHL;
306 } else {
307 /* standard frame format (SFF) */
308 cf->can_id = (bfin_read16(&reg->chl[RECEIVE_STD_CHL].id1)
309 & 0x1ffc) >> 2;
310 obj = RECEIVE_STD_CHL;
311 }
312 if (bfin_read16(&reg->chl[obj].id1) & RTR)
313 cf->can_id |= CAN_RTR_FLAG;
314
315 /* get data length code */
316 cf->can_dlc = get_can_dlc(bfin_read16(&reg->chl[obj].dlc) & 0xF);
317
318 /* get payload */
319 for (i = 0; i < 8; i += 2) {
320 val = bfin_read16(&reg->chl[obj].data[i]);
321 cf->data[7 - i] = (7 - i) < cf->can_dlc ? val : 0;
322 cf->data[6 - i] = (6 - i) < cf->can_dlc ? (val >> 8) : 0;
323 }
324
325 netif_rx(skb);
326
327 stats->rx_packets++;
328 stats->rx_bytes += cf->can_dlc;
329}
330
331static int bfin_can_err(struct net_device *dev, u16 isrc, u16 status)
332{
333 struct bfin_can_priv *priv = netdev_priv(dev);
334 struct bfin_can_regs __iomem *reg = priv->membase;
335 struct net_device_stats *stats = &dev->stats;
336 struct can_frame *cf;
337 struct sk_buff *skb;
338 enum can_state state = priv->can.state;
339
340 skb = alloc_can_err_skb(dev, &cf);
341 if (skb == NULL)
342 return -ENOMEM;
343
344 if (isrc & RMLIS) {
345 /* data overrun interrupt */
346 dev_dbg(dev->dev.parent, "data overrun interrupt\n");
347 cf->can_id |= CAN_ERR_CRTL;
348 cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
349 stats->rx_over_errors++;
350 stats->rx_errors++;
351 }
352
353 if (isrc & BOIS) {
354 dev_dbg(dev->dev.parent, "bus-off mode interrupt\n");
355 state = CAN_STATE_BUS_OFF;
356 cf->can_id |= CAN_ERR_BUSOFF;
357 can_bus_off(dev);
358 }
359
360 if (isrc & EPIS) {
361 /* error passive interrupt */
362 dev_dbg(dev->dev.parent, "error passive interrupt\n");
363 state = CAN_STATE_ERROR_PASSIVE;
364 }
365
366 if ((isrc & EWTIS) || (isrc & EWRIS)) {
367 dev_dbg(dev->dev.parent,
368 "Error Warning Transmit/Receive Interrupt\n");
369 state = CAN_STATE_ERROR_WARNING;
370 }
371
372 if (state != priv->can.state && (state == CAN_STATE_ERROR_WARNING ||
373 state == CAN_STATE_ERROR_PASSIVE)) {
374 u16 cec = bfin_read16(&reg->cec);
375 u8 rxerr = cec;
376 u8 txerr = cec >> 8;
377
378 cf->can_id |= CAN_ERR_CRTL;
379 if (state == CAN_STATE_ERROR_WARNING) {
380 priv->can.can_stats.error_warning++;
381 cf->data[1] = (txerr > rxerr) ?
382 CAN_ERR_CRTL_TX_WARNING :
383 CAN_ERR_CRTL_RX_WARNING;
384 } else {
385 priv->can.can_stats.error_passive++;
386 cf->data[1] = (txerr > rxerr) ?
387 CAN_ERR_CRTL_TX_PASSIVE :
388 CAN_ERR_CRTL_RX_PASSIVE;
389 }
390 }
391
392 if (status) {
393 priv->can.can_stats.bus_error++;
394
395 cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
396
397 if (status & BEF)
398 cf->data[2] |= CAN_ERR_PROT_BIT;
399 else if (status & FER)
400 cf->data[2] |= CAN_ERR_PROT_FORM;
401 else if (status & SER)
402 cf->data[2] |= CAN_ERR_PROT_STUFF;
403 else
404 cf->data[2] |= CAN_ERR_PROT_UNSPEC;
405 }
406
407 priv->can.state = state;
408
409 netif_rx(skb);
410
411 stats->rx_packets++;
412 stats->rx_bytes += cf->can_dlc;
413
414 return 0;
415}
416
417irqreturn_t bfin_can_interrupt(int irq, void *dev_id)
418{
419 struct net_device *dev = dev_id;
420 struct bfin_can_priv *priv = netdev_priv(dev);
421 struct bfin_can_regs __iomem *reg = priv->membase;
422 struct net_device_stats *stats = &dev->stats;
423 u16 status, isrc;
424
425 if ((irq == priv->tx_irq) && bfin_read16(&reg->mbtif2)) {
426 /* transmission complete interrupt */
427 bfin_write16(&reg->mbtif2, 0xFFFF);
428 stats->tx_packets++;
429 stats->tx_bytes += bfin_read16(&reg->chl[TRANSMIT_CHL].dlc);
430 can_get_echo_skb(dev, 0);
431 netif_wake_queue(dev);
432 } else if ((irq == priv->rx_irq) && bfin_read16(&reg->mbrif1)) {
433 /* receive interrupt */
434 isrc = bfin_read16(&reg->mbrif1);
435 bfin_write16(&reg->mbrif1, 0xFFFF);
436 bfin_can_rx(dev, isrc);
437 } else if ((irq == priv->err_irq) && bfin_read16(&reg->gis)) {
438 /* error interrupt */
439 isrc = bfin_read16(&reg->gis);
440 status = bfin_read16(&reg->esr);
441 bfin_write16(&reg->gis, 0x7FF);
442 bfin_can_err(dev, isrc, status);
443 } else {
444 return IRQ_NONE;
445 }
446
447 return IRQ_HANDLED;
448}
449
450static int bfin_can_open(struct net_device *dev)
451{
452 struct bfin_can_priv *priv = netdev_priv(dev);
453 int err;
454
455 /* set chip into reset mode */
456 bfin_can_set_reset_mode(dev);
457
458 /* common open */
459 err = open_candev(dev);
460 if (err)
461 goto exit_open;
462
463 /* register interrupt handler */
464 err = request_irq(priv->rx_irq, &bfin_can_interrupt, 0,
465 "bfin-can-rx", dev);
466 if (err)
467 goto exit_rx_irq;
468 err = request_irq(priv->tx_irq, &bfin_can_interrupt, 0,
469 "bfin-can-tx", dev);
470 if (err)
471 goto exit_tx_irq;
472 err = request_irq(priv->err_irq, &bfin_can_interrupt, 0,
473 "bfin-can-err", dev);
474 if (err)
475 goto exit_err_irq;
476
477 bfin_can_start(dev);
478
479 netif_start_queue(dev);
480
481 return 0;
482
483exit_err_irq:
484 free_irq(priv->tx_irq, dev);
485exit_tx_irq:
486 free_irq(priv->rx_irq, dev);
487exit_rx_irq:
488 close_candev(dev);
489exit_open:
490 return err;
491}
492
493static int bfin_can_close(struct net_device *dev)
494{
495 struct bfin_can_priv *priv = netdev_priv(dev);
496
497 netif_stop_queue(dev);
498 bfin_can_set_reset_mode(dev);
499
500 close_candev(dev);
501
502 free_irq(priv->rx_irq, dev);
503 free_irq(priv->tx_irq, dev);
504 free_irq(priv->err_irq, dev);
505
506 return 0;
507}
508
509struct net_device *alloc_bfin_candev(void)
510{
511 struct net_device *dev;
512 struct bfin_can_priv *priv;
513
514 dev = alloc_candev(sizeof(*priv), TX_ECHO_SKB_MAX);
515 if (!dev)
516 return NULL;
517
518 priv = netdev_priv(dev);
519
520 priv->dev = dev;
521 priv->can.bittiming_const = &bfin_can_bittiming_const;
522 priv->can.do_set_bittiming = bfin_can_set_bittiming;
523 priv->can.do_set_mode = bfin_can_set_mode;
524 priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES;
525
526 return dev;
527}
528
529static const struct net_device_ops bfin_can_netdev_ops = {
530 .ndo_open = bfin_can_open,
531 .ndo_stop = bfin_can_close,
532 .ndo_start_xmit = bfin_can_start_xmit,
533};
534
535static int __devinit bfin_can_probe(struct platform_device *pdev)
536{
537 int err;
538 struct net_device *dev;
539 struct bfin_can_priv *priv;
540 struct resource *res_mem, *rx_irq, *tx_irq, *err_irq;
541 unsigned short *pdata;
542
543 pdata = pdev->dev.platform_data;
544 if (!pdata) {
545 dev_err(&pdev->dev, "No platform data provided!\n");
546 err = -EINVAL;
547 goto exit;
548 }
549
550 res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
551 rx_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
552 tx_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
553 err_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 2);
554 if (!res_mem || !rx_irq || !tx_irq || !err_irq) {
555 err = -EINVAL;
556 goto exit;
557 }
558
559 if (!request_mem_region(res_mem->start, resource_size(res_mem),
560 dev_name(&pdev->dev))) {
561 err = -EBUSY;
562 goto exit;
563 }
564
565 /* request peripheral pins */
566 err = peripheral_request_list(pdata, dev_name(&pdev->dev));
567 if (err)
568 goto exit_mem_release;
569
570 dev = alloc_bfin_candev();
571 if (!dev) {
572 err = -ENOMEM;
573 goto exit_peri_pin_free;
574 }
575
576 priv = netdev_priv(dev);
577 priv->membase = (void __iomem *)res_mem->start;
578 priv->rx_irq = rx_irq->start;
579 priv->tx_irq = tx_irq->start;
580 priv->err_irq = err_irq->start;
581 priv->pin_list = pdata;
582 priv->can.clock.freq = get_sclk();
583
584 dev_set_drvdata(&pdev->dev, dev);
585 SET_NETDEV_DEV(dev, &pdev->dev);
586
587 dev->flags |= IFF_ECHO; /* we support local echo */
588 dev->netdev_ops = &bfin_can_netdev_ops;
589
590 bfin_can_set_reset_mode(dev);
591
592 err = register_candev(dev);
593 if (err) {
594 dev_err(&pdev->dev, "registering failed (err=%d)\n", err);
595 goto exit_candev_free;
596 }
597
598 dev_info(&pdev->dev,
599 "%s device registered"
600 "(&reg_base=%p, rx_irq=%d, tx_irq=%d, err_irq=%d, sclk=%d)\n",
601 DRV_NAME, (void *)priv->membase, priv->rx_irq,
602 priv->tx_irq, priv->err_irq, priv->can.clock.freq);
603 return 0;
604
605exit_candev_free:
606 free_candev(dev);
607exit_peri_pin_free:
608 peripheral_free_list(pdata);
609exit_mem_release:
610 release_mem_region(res_mem->start, resource_size(res_mem));
611exit:
612 return err;
613}
614
615static int __devexit bfin_can_remove(struct platform_device *pdev)
616{
617 struct net_device *dev = dev_get_drvdata(&pdev->dev);
618 struct bfin_can_priv *priv = netdev_priv(dev);
619 struct resource *res;
620
621 bfin_can_set_reset_mode(dev);
622
623 unregister_candev(dev);
624
625 dev_set_drvdata(&pdev->dev, NULL);
626
627 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
628 release_mem_region(res->start, resource_size(res));
629
630 peripheral_free_list(priv->pin_list);
631
632 free_candev(dev);
633 return 0;
634}
635
636#ifdef CONFIG_PM
637static int bfin_can_suspend(struct platform_device *pdev, pm_message_t mesg)
638{
639 struct net_device *dev = dev_get_drvdata(&pdev->dev);
640 struct bfin_can_priv *priv = netdev_priv(dev);
641 struct bfin_can_regs __iomem *reg = priv->membase;
642 int timeout = BFIN_CAN_TIMEOUT;
643
644 if (netif_running(dev)) {
645 /* enter sleep mode */
646 bfin_write16(&reg->control, bfin_read16(&reg->control) | SMR);
647 SSYNC();
648 while (!(bfin_read16(&reg->intr) & SMACK)) {
649 udelay(10);
650 if (--timeout == 0) {
651 dev_err(dev->dev.parent,
652 "fail to enter sleep mode\n");
653 BUG();
654 }
655 }
656 }
657
658 return 0;
659}
660
661static int bfin_can_resume(struct platform_device *pdev)
662{
663 struct net_device *dev = dev_get_drvdata(&pdev->dev);
664 struct bfin_can_priv *priv = netdev_priv(dev);
665 struct bfin_can_regs __iomem *reg = priv->membase;
666
667 if (netif_running(dev)) {
668 /* leave sleep mode */
669 bfin_write16(&reg->intr, 0);
670 SSYNC();
671 }
672
673 return 0;
674}
675#else
676#define bfin_can_suspend NULL
677#define bfin_can_resume NULL
678#endif /* CONFIG_PM */
679
680static struct platform_driver bfin_can_driver = {
681 .probe = bfin_can_probe,
682 .remove = __devexit_p(bfin_can_remove),
683 .suspend = bfin_can_suspend,
684 .resume = bfin_can_resume,
685 .driver = {
686 .name = DRV_NAME,
687 .owner = THIS_MODULE,
688 },
689};
690
691static int __init bfin_can_init(void)
692{
693 return platform_driver_register(&bfin_can_driver);
694}
695module_init(bfin_can_init);
696
697static void __exit bfin_can_exit(void)
698{
699 platform_driver_unregister(&bfin_can_driver);
700}
701module_exit(bfin_can_exit);
702
703MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
704MODULE_LICENSE("GPL");
705MODULE_DESCRIPTION("Blackfin on-chip CAN netdevice driver");
diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
index 2868fe842a41..d0f8c7e67e7d 100644
--- a/drivers/net/can/dev.c
+++ b/drivers/net/can/dev.c
@@ -19,6 +19,7 @@
19 19
20#include <linux/module.h> 20#include <linux/module.h>
21#include <linux/kernel.h> 21#include <linux/kernel.h>
22#include <linux/slab.h>
22#include <linux/netdevice.h> 23#include <linux/netdevice.h>
23#include <linux/if_arp.h> 24#include <linux/if_arp.h>
24#include <linux/can.h> 25#include <linux/can.h>
@@ -245,7 +246,7 @@ static void can_flush_echo_skb(struct net_device *dev)
245 struct net_device_stats *stats = &dev->stats; 246 struct net_device_stats *stats = &dev->stats;
246 int i; 247 int i;
247 248
248 for (i = 0; i < CAN_ECHO_SKB_MAX; i++) { 249 for (i = 0; i < priv->echo_skb_max; i++) {
249 if (priv->echo_skb[i]) { 250 if (priv->echo_skb[i]) {
250 kfree_skb(priv->echo_skb[i]); 251 kfree_skb(priv->echo_skb[i]);
251 priv->echo_skb[i] = NULL; 252 priv->echo_skb[i] = NULL;
@@ -262,10 +263,13 @@ static void can_flush_echo_skb(struct net_device *dev)
262 * of the device driver. The driver must protect access to 263 * of the device driver. The driver must protect access to
263 * priv->echo_skb, if necessary. 264 * priv->echo_skb, if necessary.
264 */ 265 */
265void can_put_echo_skb(struct sk_buff *skb, struct net_device *dev, int idx) 266void can_put_echo_skb(struct sk_buff *skb, struct net_device *dev,
267 unsigned int idx)
266{ 268{
267 struct can_priv *priv = netdev_priv(dev); 269 struct can_priv *priv = netdev_priv(dev);
268 270
271 BUG_ON(idx >= priv->echo_skb_max);
272
269 /* check flag whether this packet has to be looped back */ 273 /* check flag whether this packet has to be looped back */
270 if (!(dev->flags & IFF_ECHO) || skb->pkt_type != PACKET_LOOPBACK) { 274 if (!(dev->flags & IFF_ECHO) || skb->pkt_type != PACKET_LOOPBACK) {
271 kfree_skb(skb); 275 kfree_skb(skb);
@@ -311,10 +315,12 @@ EXPORT_SYMBOL_GPL(can_put_echo_skb);
311 * is handled in the device driver. The driver must protect 315 * is handled in the device driver. The driver must protect
312 * access to priv->echo_skb, if necessary. 316 * access to priv->echo_skb, if necessary.
313 */ 317 */
314void can_get_echo_skb(struct net_device *dev, int idx) 318void can_get_echo_skb(struct net_device *dev, unsigned int idx)
315{ 319{
316 struct can_priv *priv = netdev_priv(dev); 320 struct can_priv *priv = netdev_priv(dev);
317 321
322 BUG_ON(idx >= priv->echo_skb_max);
323
318 if (priv->echo_skb[idx]) { 324 if (priv->echo_skb[idx]) {
319 netif_rx(priv->echo_skb[idx]); 325 netif_rx(priv->echo_skb[idx]);
320 priv->echo_skb[idx] = NULL; 326 priv->echo_skb[idx] = NULL;
@@ -327,10 +333,12 @@ EXPORT_SYMBOL_GPL(can_get_echo_skb);
327 * 333 *
328 * The function is typically called when TX failed. 334 * The function is typically called when TX failed.
329 */ 335 */
330void can_free_echo_skb(struct net_device *dev, int idx) 336void can_free_echo_skb(struct net_device *dev, unsigned int idx)
331{ 337{
332 struct can_priv *priv = netdev_priv(dev); 338 struct can_priv *priv = netdev_priv(dev);
333 339
340 BUG_ON(idx >= priv->echo_skb_max);
341
334 if (priv->echo_skb[idx]) { 342 if (priv->echo_skb[idx]) {
335 kfree_skb(priv->echo_skb[idx]); 343 kfree_skb(priv->echo_skb[idx]);
336 priv->echo_skb[idx] = NULL; 344 priv->echo_skb[idx] = NULL;
@@ -359,17 +367,12 @@ void can_restart(unsigned long data)
359 can_flush_echo_skb(dev); 367 can_flush_echo_skb(dev);
360 368
361 /* send restart message upstream */ 369 /* send restart message upstream */
362 skb = dev_alloc_skb(sizeof(struct can_frame)); 370 skb = alloc_can_err_skb(dev, &cf);
363 if (skb == NULL) { 371 if (skb == NULL) {
364 err = -ENOMEM; 372 err = -ENOMEM;
365 goto restart; 373 goto restart;
366 } 374 }
367 skb->dev = dev; 375 cf->can_id |= CAN_ERR_RESTARTED;
368 skb->protocol = htons(ETH_P_CAN);
369 cf = (struct can_frame *)skb_put(skb, sizeof(struct can_frame));
370 memset(cf, 0, sizeof(struct can_frame));
371 cf->can_id = CAN_ERR_FLAG | CAN_ERR_RESTARTED;
372 cf->can_dlc = CAN_ERR_DLC;
373 376
374 netif_rx(skb); 377 netif_rx(skb);
375 378
@@ -442,20 +445,66 @@ static void can_setup(struct net_device *dev)
442 dev->features = NETIF_F_NO_CSUM; 445 dev->features = NETIF_F_NO_CSUM;
443} 446}
444 447
448struct sk_buff *alloc_can_skb(struct net_device *dev, struct can_frame **cf)
449{
450 struct sk_buff *skb;
451
452 skb = netdev_alloc_skb(dev, sizeof(struct can_frame));
453 if (unlikely(!skb))
454 return NULL;
455
456 skb->protocol = htons(ETH_P_CAN);
457 skb->pkt_type = PACKET_BROADCAST;
458 skb->ip_summed = CHECKSUM_UNNECESSARY;
459 *cf = (struct can_frame *)skb_put(skb, sizeof(struct can_frame));
460 memset(*cf, 0, sizeof(struct can_frame));
461
462 return skb;
463}
464EXPORT_SYMBOL_GPL(alloc_can_skb);
465
466struct sk_buff *alloc_can_err_skb(struct net_device *dev, struct can_frame **cf)
467{
468 struct sk_buff *skb;
469
470 skb = alloc_can_skb(dev, cf);
471 if (unlikely(!skb))
472 return NULL;
473
474 (*cf)->can_id = CAN_ERR_FLAG;
475 (*cf)->can_dlc = CAN_ERR_DLC;
476
477 return skb;
478}
479EXPORT_SYMBOL_GPL(alloc_can_err_skb);
480
445/* 481/*
446 * Allocate and setup space for the CAN network device 482 * Allocate and setup space for the CAN network device
447 */ 483 */
448struct net_device *alloc_candev(int sizeof_priv) 484struct net_device *alloc_candev(int sizeof_priv, unsigned int echo_skb_max)
449{ 485{
450 struct net_device *dev; 486 struct net_device *dev;
451 struct can_priv *priv; 487 struct can_priv *priv;
488 int size;
452 489
453 dev = alloc_netdev(sizeof_priv, "can%d", can_setup); 490 if (echo_skb_max)
491 size = ALIGN(sizeof_priv, sizeof(struct sk_buff *)) +
492 echo_skb_max * sizeof(struct sk_buff *);
493 else
494 size = sizeof_priv;
495
496 dev = alloc_netdev(size, "can%d", can_setup);
454 if (!dev) 497 if (!dev)
455 return NULL; 498 return NULL;
456 499
457 priv = netdev_priv(dev); 500 priv = netdev_priv(dev);
458 501
502 if (echo_skb_max) {
503 priv->echo_skb_max = echo_skb_max;
504 priv->echo_skb = (void *)priv +
505 ALIGN(sizeof_priv, sizeof(struct sk_buff *));
506 }
507
459 priv->state = CAN_STATE_STOPPED; 508 priv->state = CAN_STATE_STOPPED;
460 509
461 init_timer(&priv->restart_timer); 510 init_timer(&priv->restart_timer);
@@ -526,6 +575,7 @@ static const struct nla_policy can_policy[IFLA_CAN_MAX + 1] = {
526 [IFLA_CAN_BITTIMING_CONST] 575 [IFLA_CAN_BITTIMING_CONST]
527 = { .len = sizeof(struct can_bittiming_const) }, 576 = { .len = sizeof(struct can_bittiming_const) },
528 [IFLA_CAN_CLOCK] = { .len = sizeof(struct can_clock) }, 577 [IFLA_CAN_CLOCK] = { .len = sizeof(struct can_clock) },
578 [IFLA_CAN_BERR_COUNTER] = { .len = sizeof(struct can_berr_counter) },
529}; 579};
530 580
531static int can_changelink(struct net_device *dev, 581static int can_changelink(struct net_device *dev,
@@ -544,6 +594,8 @@ static int can_changelink(struct net_device *dev,
544 if (dev->flags & IFF_UP) 594 if (dev->flags & IFF_UP)
545 return -EBUSY; 595 return -EBUSY;
546 cm = nla_data(data[IFLA_CAN_CTRLMODE]); 596 cm = nla_data(data[IFLA_CAN_CTRLMODE]);
597 if (cm->flags & ~priv->ctrlmode_supported)
598 return -EOPNOTSUPP;
547 priv->ctrlmode &= ~cm->mask; 599 priv->ctrlmode &= ~cm->mask;
548 priv->ctrlmode |= cm->flags; 600 priv->ctrlmode |= cm->flags;
549 } 601 }
@@ -599,6 +651,8 @@ static size_t can_get_size(const struct net_device *dev)
599 size += nla_total_size(sizeof(u32)); /* IFLA_CAN_RESTART_MS */ 651 size += nla_total_size(sizeof(u32)); /* IFLA_CAN_RESTART_MS */
600 size += sizeof(struct can_bittiming); /* IFLA_CAN_BITTIMING */ 652 size += sizeof(struct can_bittiming); /* IFLA_CAN_BITTIMING */
601 size += sizeof(struct can_clock); /* IFLA_CAN_CLOCK */ 653 size += sizeof(struct can_clock); /* IFLA_CAN_CLOCK */
654 if (priv->do_get_berr_counter) /* IFLA_CAN_BERR_COUNTER */
655 size += sizeof(struct can_berr_counter);
602 if (priv->bittiming_const) /* IFLA_CAN_BITTIMING_CONST */ 656 if (priv->bittiming_const) /* IFLA_CAN_BITTIMING_CONST */
603 size += sizeof(struct can_bittiming_const); 657 size += sizeof(struct can_bittiming_const);
604 658
@@ -609,6 +663,7 @@ static int can_fill_info(struct sk_buff *skb, const struct net_device *dev)
609{ 663{
610 struct can_priv *priv = netdev_priv(dev); 664 struct can_priv *priv = netdev_priv(dev);
611 struct can_ctrlmode cm = {.flags = priv->ctrlmode}; 665 struct can_ctrlmode cm = {.flags = priv->ctrlmode};
666 struct can_berr_counter bec;
612 enum can_state state = priv->state; 667 enum can_state state = priv->state;
613 668
614 if (priv->do_get_state) 669 if (priv->do_get_state)
@@ -619,6 +674,8 @@ static int can_fill_info(struct sk_buff *skb, const struct net_device *dev)
619 NLA_PUT(skb, IFLA_CAN_BITTIMING, 674 NLA_PUT(skb, IFLA_CAN_BITTIMING,
620 sizeof(priv->bittiming), &priv->bittiming); 675 sizeof(priv->bittiming), &priv->bittiming);
621 NLA_PUT(skb, IFLA_CAN_CLOCK, sizeof(cm), &priv->clock); 676 NLA_PUT(skb, IFLA_CAN_CLOCK, sizeof(cm), &priv->clock);
677 if (priv->do_get_berr_counter && !priv->do_get_berr_counter(dev, &bec))
678 NLA_PUT(skb, IFLA_CAN_BERR_COUNTER, sizeof(bec), &bec);
622 if (priv->bittiming_const) 679 if (priv->bittiming_const)
623 NLA_PUT(skb, IFLA_CAN_BITTIMING_CONST, 680 NLA_PUT(skb, IFLA_CAN_BITTIMING_CONST,
624 sizeof(*priv->bittiming_const), priv->bittiming_const); 681 sizeof(*priv->bittiming_const), priv->bittiming_const);
@@ -647,7 +704,7 @@ nla_put_failure:
647 return -EMSGSIZE; 704 return -EMSGSIZE;
648} 705}
649 706
650static int can_newlink(struct net_device *dev, 707static int can_newlink(struct net *src_net, struct net_device *dev,
651 struct nlattr *tb[], struct nlattr *data[]) 708 struct nlattr *tb[], struct nlattr *data[])
652{ 709{
653 return -EOPNOTSUPP; 710 return -EOPNOTSUPP;
diff --git a/drivers/net/can/mcp251x.c b/drivers/net/can/mcp251x.c
new file mode 100644
index 000000000000..b39b108318b4
--- /dev/null
+++ b/drivers/net/can/mcp251x.c
@@ -0,0 +1,1150 @@
1/*
2 * CAN bus driver for Microchip 251x CAN Controller with SPI Interface
3 *
4 * MCP2510 support and bug fixes by Christian Pellegrin
5 * <chripell@evolware.org>
6 *
7 * Copyright 2009 Christian Pellegrin EVOL S.r.l.
8 *
9 * Copyright 2007 Raymarine UK, Ltd. All Rights Reserved.
10 * Written under contract by:
11 * Chris Elston, Katalix Systems, Ltd.
12 *
13 * Based on Microchip MCP251x CAN controller driver written by
14 * David Vrabel, Copyright 2006 Arcom Control Systems Ltd.
15 *
16 * Based on CAN bus driver for the CCAN controller written by
17 * - Sascha Hauer, Marc Kleine-Budde, Pengutronix
18 * - Simon Kallweit, intefo AG
19 * Copyright 2007
20 *
21 * This program is free software; you can redistribute it and/or modify
22 * it under the terms of the version 2 of the GNU General Public License
23 * as published by the Free Software Foundation
24 *
25 * This program is distributed in the hope that it will be useful,
26 * but WITHOUT ANY WARRANTY; without even the implied warranty of
27 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
28 * GNU General Public License for more details.
29 *
30 * You should have received a copy of the GNU General Public License
31 * along with this program; if not, write to the Free Software
32 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
33 *
34 *
35 *
36 * Your platform definition file should specify something like:
37 *
38 * static struct mcp251x_platform_data mcp251x_info = {
39 * .oscillator_frequency = 8000000,
40 * .board_specific_setup = &mcp251x_setup,
41 * .model = CAN_MCP251X_MCP2510,
42 * .power_enable = mcp251x_power_enable,
43 * .transceiver_enable = NULL,
44 * };
45 *
46 * static struct spi_board_info spi_board_info[] = {
47 * {
48 * .modalias = "mcp251x",
49 * .platform_data = &mcp251x_info,
50 * .irq = IRQ_EINT13,
51 * .max_speed_hz = 2*1000*1000,
52 * .chip_select = 2,
53 * },
54 * };
55 *
56 * Please see mcp251x.h for a description of the fields in
57 * struct mcp251x_platform_data.
58 *
59 */
60
61#include <linux/can.h>
62#include <linux/can/core.h>
63#include <linux/can/dev.h>
64#include <linux/can/platform/mcp251x.h>
65#include <linux/completion.h>
66#include <linux/delay.h>
67#include <linux/device.h>
68#include <linux/dma-mapping.h>
69#include <linux/freezer.h>
70#include <linux/interrupt.h>
71#include <linux/io.h>
72#include <linux/kernel.h>
73#include <linux/module.h>
74#include <linux/netdevice.h>
75#include <linux/platform_device.h>
76#include <linux/slab.h>
77#include <linux/spi/spi.h>
78#include <linux/uaccess.h>
79
80/* SPI interface instruction set */
81#define INSTRUCTION_WRITE 0x02
82#define INSTRUCTION_READ 0x03
83#define INSTRUCTION_BIT_MODIFY 0x05
84#define INSTRUCTION_LOAD_TXB(n) (0x40 + 2 * (n))
85#define INSTRUCTION_READ_RXB(n) (((n) == 0) ? 0x90 : 0x94)
86#define INSTRUCTION_RESET 0xC0
87
88/* MPC251x registers */
89#define CANSTAT 0x0e
90#define CANCTRL 0x0f
91# define CANCTRL_REQOP_MASK 0xe0
92# define CANCTRL_REQOP_CONF 0x80
93# define CANCTRL_REQOP_LISTEN_ONLY 0x60
94# define CANCTRL_REQOP_LOOPBACK 0x40
95# define CANCTRL_REQOP_SLEEP 0x20
96# define CANCTRL_REQOP_NORMAL 0x00
97# define CANCTRL_OSM 0x08
98# define CANCTRL_ABAT 0x10
99#define TEC 0x1c
100#define REC 0x1d
101#define CNF1 0x2a
102# define CNF1_SJW_SHIFT 6
103#define CNF2 0x29
104# define CNF2_BTLMODE 0x80
105# define CNF2_SAM 0x40
106# define CNF2_PS1_SHIFT 3
107#define CNF3 0x28
108# define CNF3_SOF 0x08
109# define CNF3_WAKFIL 0x04
110# define CNF3_PHSEG2_MASK 0x07
111#define CANINTE 0x2b
112# define CANINTE_MERRE 0x80
113# define CANINTE_WAKIE 0x40
114# define CANINTE_ERRIE 0x20
115# define CANINTE_TX2IE 0x10
116# define CANINTE_TX1IE 0x08
117# define CANINTE_TX0IE 0x04
118# define CANINTE_RX1IE 0x02
119# define CANINTE_RX0IE 0x01
120#define CANINTF 0x2c
121# define CANINTF_MERRF 0x80
122# define CANINTF_WAKIF 0x40
123# define CANINTF_ERRIF 0x20
124# define CANINTF_TX2IF 0x10
125# define CANINTF_TX1IF 0x08
126# define CANINTF_TX0IF 0x04
127# define CANINTF_RX1IF 0x02
128# define CANINTF_RX0IF 0x01
129#define EFLG 0x2d
130# define EFLG_EWARN 0x01
131# define EFLG_RXWAR 0x02
132# define EFLG_TXWAR 0x04
133# define EFLG_RXEP 0x08
134# define EFLG_TXEP 0x10
135# define EFLG_TXBO 0x20
136# define EFLG_RX0OVR 0x40
137# define EFLG_RX1OVR 0x80
138#define TXBCTRL(n) (((n) * 0x10) + 0x30 + TXBCTRL_OFF)
139# define TXBCTRL_ABTF 0x40
140# define TXBCTRL_MLOA 0x20
141# define TXBCTRL_TXERR 0x10
142# define TXBCTRL_TXREQ 0x08
143#define TXBSIDH(n) (((n) * 0x10) + 0x30 + TXBSIDH_OFF)
144# define SIDH_SHIFT 3
145#define TXBSIDL(n) (((n) * 0x10) + 0x30 + TXBSIDL_OFF)
146# define SIDL_SID_MASK 7
147# define SIDL_SID_SHIFT 5
148# define SIDL_EXIDE_SHIFT 3
149# define SIDL_EID_SHIFT 16
150# define SIDL_EID_MASK 3
151#define TXBEID8(n) (((n) * 0x10) + 0x30 + TXBEID8_OFF)
152#define TXBEID0(n) (((n) * 0x10) + 0x30 + TXBEID0_OFF)
153#define TXBDLC(n) (((n) * 0x10) + 0x30 + TXBDLC_OFF)
154# define DLC_RTR_SHIFT 6
155#define TXBCTRL_OFF 0
156#define TXBSIDH_OFF 1
157#define TXBSIDL_OFF 2
158#define TXBEID8_OFF 3
159#define TXBEID0_OFF 4
160#define TXBDLC_OFF 5
161#define TXBDAT_OFF 6
162#define RXBCTRL(n) (((n) * 0x10) + 0x60 + RXBCTRL_OFF)
163# define RXBCTRL_BUKT 0x04
164# define RXBCTRL_RXM0 0x20
165# define RXBCTRL_RXM1 0x40
166#define RXBSIDH(n) (((n) * 0x10) + 0x60 + RXBSIDH_OFF)
167# define RXBSIDH_SHIFT 3
168#define RXBSIDL(n) (((n) * 0x10) + 0x60 + RXBSIDL_OFF)
169# define RXBSIDL_IDE 0x08
170# define RXBSIDL_EID 3
171# define RXBSIDL_SHIFT 5
172#define RXBEID8(n) (((n) * 0x10) + 0x60 + RXBEID8_OFF)
173#define RXBEID0(n) (((n) * 0x10) + 0x60 + RXBEID0_OFF)
174#define RXBDLC(n) (((n) * 0x10) + 0x60 + RXBDLC_OFF)
175# define RXBDLC_LEN_MASK 0x0f
176# define RXBDLC_RTR 0x40
177#define RXBCTRL_OFF 0
178#define RXBSIDH_OFF 1
179#define RXBSIDL_OFF 2
180#define RXBEID8_OFF 3
181#define RXBEID0_OFF 4
182#define RXBDLC_OFF 5
183#define RXBDAT_OFF 6
184#define RXFSIDH(n) ((n) * 4)
185#define RXFSIDL(n) ((n) * 4 + 1)
186#define RXFEID8(n) ((n) * 4 + 2)
187#define RXFEID0(n) ((n) * 4 + 3)
188#define RXMSIDH(n) ((n) * 4 + 0x20)
189#define RXMSIDL(n) ((n) * 4 + 0x21)
190#define RXMEID8(n) ((n) * 4 + 0x22)
191#define RXMEID0(n) ((n) * 4 + 0x23)
192
193#define GET_BYTE(val, byte) \
194 (((val) >> ((byte) * 8)) & 0xff)
195#define SET_BYTE(val, byte) \
196 (((val) & 0xff) << ((byte) * 8))
197
198/*
199 * Buffer size required for the largest SPI transfer (i.e., reading a
200 * frame)
201 */
202#define CAN_FRAME_MAX_DATA_LEN 8
203#define SPI_TRANSFER_BUF_LEN (6 + CAN_FRAME_MAX_DATA_LEN)
204#define CAN_FRAME_MAX_BITS 128
205
206#define TX_ECHO_SKB_MAX 1
207
208#define DEVICE_NAME "mcp251x"
209
210static int mcp251x_enable_dma; /* Enable SPI DMA. Default: 0 (Off) */
211module_param(mcp251x_enable_dma, int, S_IRUGO);
212MODULE_PARM_DESC(mcp251x_enable_dma, "Enable SPI DMA. Default: 0 (Off)");
213
214static struct can_bittiming_const mcp251x_bittiming_const = {
215 .name = DEVICE_NAME,
216 .tseg1_min = 3,
217 .tseg1_max = 16,
218 .tseg2_min = 2,
219 .tseg2_max = 8,
220 .sjw_max = 4,
221 .brp_min = 1,
222 .brp_max = 64,
223 .brp_inc = 1,
224};
225
226struct mcp251x_priv {
227 struct can_priv can;
228 struct net_device *net;
229 struct spi_device *spi;
230
231 struct mutex mcp_lock; /* SPI device lock */
232
233 u8 *spi_tx_buf;
234 u8 *spi_rx_buf;
235 dma_addr_t spi_tx_dma;
236 dma_addr_t spi_rx_dma;
237
238 struct sk_buff *tx_skb;
239 int tx_len;
240
241 struct workqueue_struct *wq;
242 struct work_struct tx_work;
243 struct work_struct restart_work;
244
245 int force_quit;
246 int after_suspend;
247#define AFTER_SUSPEND_UP 1
248#define AFTER_SUSPEND_DOWN 2
249#define AFTER_SUSPEND_POWER 4
250#define AFTER_SUSPEND_RESTART 8
251 int restart_tx;
252};
253
254static void mcp251x_clean(struct net_device *net)
255{
256 struct mcp251x_priv *priv = netdev_priv(net);
257
258 if (priv->tx_skb || priv->tx_len)
259 net->stats.tx_errors++;
260 if (priv->tx_skb)
261 dev_kfree_skb(priv->tx_skb);
262 if (priv->tx_len)
263 can_free_echo_skb(priv->net, 0);
264 priv->tx_skb = NULL;
265 priv->tx_len = 0;
266}
267
268/*
269 * Note about handling of error return of mcp251x_spi_trans: accessing
270 * registers via SPI is not really different conceptually than using
271 * normal I/O assembler instructions, although it's much more
272 * complicated from a practical POV. So it's not advisable to always
273 * check the return value of this function. Imagine that every
274 * read{b,l}, write{b,l} and friends would be bracketed in "if ( < 0)
275 * error();", it would be a great mess (well there are some situation
276 * when exception handling C++ like could be useful after all). So we
277 * just check that transfers are OK at the beginning of our
278 * conversation with the chip and to avoid doing really nasty things
279 * (like injecting bogus packets in the network stack).
280 */
281static int mcp251x_spi_trans(struct spi_device *spi, int len)
282{
283 struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev);
284 struct spi_transfer t = {
285 .tx_buf = priv->spi_tx_buf,
286 .rx_buf = priv->spi_rx_buf,
287 .len = len,
288 .cs_change = 0,
289 };
290 struct spi_message m;
291 int ret;
292
293 spi_message_init(&m);
294
295 if (mcp251x_enable_dma) {
296 t.tx_dma = priv->spi_tx_dma;
297 t.rx_dma = priv->spi_rx_dma;
298 m.is_dma_mapped = 1;
299 }
300
301 spi_message_add_tail(&t, &m);
302
303 ret = spi_sync(spi, &m);
304 if (ret)
305 dev_err(&spi->dev, "spi transfer failed: ret = %d\n", ret);
306 return ret;
307}
308
309static u8 mcp251x_read_reg(struct spi_device *spi, uint8_t reg)
310{
311 struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev);
312 u8 val = 0;
313
314 priv->spi_tx_buf[0] = INSTRUCTION_READ;
315 priv->spi_tx_buf[1] = reg;
316
317 mcp251x_spi_trans(spi, 3);
318 val = priv->spi_rx_buf[2];
319
320 return val;
321}
322
323static void mcp251x_write_reg(struct spi_device *spi, u8 reg, uint8_t val)
324{
325 struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev);
326
327 priv->spi_tx_buf[0] = INSTRUCTION_WRITE;
328 priv->spi_tx_buf[1] = reg;
329 priv->spi_tx_buf[2] = val;
330
331 mcp251x_spi_trans(spi, 3);
332}
333
334static void mcp251x_write_bits(struct spi_device *spi, u8 reg,
335 u8 mask, uint8_t val)
336{
337 struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev);
338
339 priv->spi_tx_buf[0] = INSTRUCTION_BIT_MODIFY;
340 priv->spi_tx_buf[1] = reg;
341 priv->spi_tx_buf[2] = mask;
342 priv->spi_tx_buf[3] = val;
343
344 mcp251x_spi_trans(spi, 4);
345}
346
347static void mcp251x_hw_tx_frame(struct spi_device *spi, u8 *buf,
348 int len, int tx_buf_idx)
349{
350 struct mcp251x_platform_data *pdata = spi->dev.platform_data;
351 struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev);
352
353 if (pdata->model == CAN_MCP251X_MCP2510) {
354 int i;
355
356 for (i = 1; i < TXBDAT_OFF + len; i++)
357 mcp251x_write_reg(spi, TXBCTRL(tx_buf_idx) + i,
358 buf[i]);
359 } else {
360 memcpy(priv->spi_tx_buf, buf, TXBDAT_OFF + len);
361 mcp251x_spi_trans(spi, TXBDAT_OFF + len);
362 }
363}
364
365static void mcp251x_hw_tx(struct spi_device *spi, struct can_frame *frame,
366 int tx_buf_idx)
367{
368 u32 sid, eid, exide, rtr;
369 u8 buf[SPI_TRANSFER_BUF_LEN];
370
371 exide = (frame->can_id & CAN_EFF_FLAG) ? 1 : 0; /* Extended ID Enable */
372 if (exide)
373 sid = (frame->can_id & CAN_EFF_MASK) >> 18;
374 else
375 sid = frame->can_id & CAN_SFF_MASK; /* Standard ID */
376 eid = frame->can_id & CAN_EFF_MASK; /* Extended ID */
377 rtr = (frame->can_id & CAN_RTR_FLAG) ? 1 : 0; /* Remote transmission */
378
379 buf[TXBCTRL_OFF] = INSTRUCTION_LOAD_TXB(tx_buf_idx);
380 buf[TXBSIDH_OFF] = sid >> SIDH_SHIFT;
381 buf[TXBSIDL_OFF] = ((sid & SIDL_SID_MASK) << SIDL_SID_SHIFT) |
382 (exide << SIDL_EXIDE_SHIFT) |
383 ((eid >> SIDL_EID_SHIFT) & SIDL_EID_MASK);
384 buf[TXBEID8_OFF] = GET_BYTE(eid, 1);
385 buf[TXBEID0_OFF] = GET_BYTE(eid, 0);
386 buf[TXBDLC_OFF] = (rtr << DLC_RTR_SHIFT) | frame->can_dlc;
387 memcpy(buf + TXBDAT_OFF, frame->data, frame->can_dlc);
388 mcp251x_hw_tx_frame(spi, buf, frame->can_dlc, tx_buf_idx);
389 mcp251x_write_reg(spi, TXBCTRL(tx_buf_idx), TXBCTRL_TXREQ);
390}
391
392static void mcp251x_hw_rx_frame(struct spi_device *spi, u8 *buf,
393 int buf_idx)
394{
395 struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev);
396 struct mcp251x_platform_data *pdata = spi->dev.platform_data;
397
398 if (pdata->model == CAN_MCP251X_MCP2510) {
399 int i, len;
400
401 for (i = 1; i < RXBDAT_OFF; i++)
402 buf[i] = mcp251x_read_reg(spi, RXBCTRL(buf_idx) + i);
403
404 len = get_can_dlc(buf[RXBDLC_OFF] & RXBDLC_LEN_MASK);
405 for (; i < (RXBDAT_OFF + len); i++)
406 buf[i] = mcp251x_read_reg(spi, RXBCTRL(buf_idx) + i);
407 } else {
408 priv->spi_tx_buf[RXBCTRL_OFF] = INSTRUCTION_READ_RXB(buf_idx);
409 mcp251x_spi_trans(spi, SPI_TRANSFER_BUF_LEN);
410 memcpy(buf, priv->spi_rx_buf, SPI_TRANSFER_BUF_LEN);
411 }
412}
413
414static void mcp251x_hw_rx(struct spi_device *spi, int buf_idx)
415{
416 struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev);
417 struct sk_buff *skb;
418 struct can_frame *frame;
419 u8 buf[SPI_TRANSFER_BUF_LEN];
420
421 skb = alloc_can_skb(priv->net, &frame);
422 if (!skb) {
423 dev_err(&spi->dev, "cannot allocate RX skb\n");
424 priv->net->stats.rx_dropped++;
425 return;
426 }
427
428 mcp251x_hw_rx_frame(spi, buf, buf_idx);
429 if (buf[RXBSIDL_OFF] & RXBSIDL_IDE) {
430 /* Extended ID format */
431 frame->can_id = CAN_EFF_FLAG;
432 frame->can_id |=
433 /* Extended ID part */
434 SET_BYTE(buf[RXBSIDL_OFF] & RXBSIDL_EID, 2) |
435 SET_BYTE(buf[RXBEID8_OFF], 1) |
436 SET_BYTE(buf[RXBEID0_OFF], 0) |
437 /* Standard ID part */
438 (((buf[RXBSIDH_OFF] << RXBSIDH_SHIFT) |
439 (buf[RXBSIDL_OFF] >> RXBSIDL_SHIFT)) << 18);
440 /* Remote transmission request */
441 if (buf[RXBDLC_OFF] & RXBDLC_RTR)
442 frame->can_id |= CAN_RTR_FLAG;
443 } else {
444 /* Standard ID format */
445 frame->can_id =
446 (buf[RXBSIDH_OFF] << RXBSIDH_SHIFT) |
447 (buf[RXBSIDL_OFF] >> RXBSIDL_SHIFT);
448 }
449 /* Data length */
450 frame->can_dlc = get_can_dlc(buf[RXBDLC_OFF] & RXBDLC_LEN_MASK);
451 memcpy(frame->data, buf + RXBDAT_OFF, frame->can_dlc);
452
453 priv->net->stats.rx_packets++;
454 priv->net->stats.rx_bytes += frame->can_dlc;
455 netif_rx(skb);
456}
457
458static void mcp251x_hw_sleep(struct spi_device *spi)
459{
460 mcp251x_write_reg(spi, CANCTRL, CANCTRL_REQOP_SLEEP);
461}
462
463static netdev_tx_t mcp251x_hard_start_xmit(struct sk_buff *skb,
464 struct net_device *net)
465{
466 struct mcp251x_priv *priv = netdev_priv(net);
467 struct spi_device *spi = priv->spi;
468
469 if (priv->tx_skb || priv->tx_len) {
470 dev_warn(&spi->dev, "hard_xmit called while tx busy\n");
471 return NETDEV_TX_BUSY;
472 }
473
474 if (can_dropped_invalid_skb(net, skb))
475 return NETDEV_TX_OK;
476
477 netif_stop_queue(net);
478 priv->tx_skb = skb;
479 net->trans_start = jiffies;
480 queue_work(priv->wq, &priv->tx_work);
481
482 return NETDEV_TX_OK;
483}
484
485static int mcp251x_do_set_mode(struct net_device *net, enum can_mode mode)
486{
487 struct mcp251x_priv *priv = netdev_priv(net);
488
489 switch (mode) {
490 case CAN_MODE_START:
491 mcp251x_clean(net);
492 /* We have to delay work since SPI I/O may sleep */
493 priv->can.state = CAN_STATE_ERROR_ACTIVE;
494 priv->restart_tx = 1;
495 if (priv->can.restart_ms == 0)
496 priv->after_suspend = AFTER_SUSPEND_RESTART;
497 queue_work(priv->wq, &priv->restart_work);
498 break;
499 default:
500 return -EOPNOTSUPP;
501 }
502
503 return 0;
504}
505
506static int mcp251x_set_normal_mode(struct spi_device *spi)
507{
508 struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev);
509 unsigned long timeout;
510
511 /* Enable interrupts */
512 mcp251x_write_reg(spi, CANINTE,
513 CANINTE_ERRIE | CANINTE_TX2IE | CANINTE_TX1IE |
514 CANINTE_TX0IE | CANINTE_RX1IE | CANINTE_RX0IE);
515
516 if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) {
517 /* Put device into loopback mode */
518 mcp251x_write_reg(spi, CANCTRL, CANCTRL_REQOP_LOOPBACK);
519 } else if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) {
520 /* Put device into listen-only mode */
521 mcp251x_write_reg(spi, CANCTRL, CANCTRL_REQOP_LISTEN_ONLY);
522 } else {
523 /* Put device into normal mode */
524 mcp251x_write_reg(spi, CANCTRL, CANCTRL_REQOP_NORMAL);
525
526 /* Wait for the device to enter normal mode */
527 timeout = jiffies + HZ;
528 while (mcp251x_read_reg(spi, CANSTAT) & CANCTRL_REQOP_MASK) {
529 schedule();
530 if (time_after(jiffies, timeout)) {
531 dev_err(&spi->dev, "MCP251x didn't"
532 " enter in normal mode\n");
533 return -EBUSY;
534 }
535 }
536 }
537 priv->can.state = CAN_STATE_ERROR_ACTIVE;
538 return 0;
539}
540
541static int mcp251x_do_set_bittiming(struct net_device *net)
542{
543 struct mcp251x_priv *priv = netdev_priv(net);
544 struct can_bittiming *bt = &priv->can.bittiming;
545 struct spi_device *spi = priv->spi;
546
547 mcp251x_write_reg(spi, CNF1, ((bt->sjw - 1) << CNF1_SJW_SHIFT) |
548 (bt->brp - 1));
549 mcp251x_write_reg(spi, CNF2, CNF2_BTLMODE |
550 (priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES ?
551 CNF2_SAM : 0) |
552 ((bt->phase_seg1 - 1) << CNF2_PS1_SHIFT) |
553 (bt->prop_seg - 1));
554 mcp251x_write_bits(spi, CNF3, CNF3_PHSEG2_MASK,
555 (bt->phase_seg2 - 1));
556 dev_info(&spi->dev, "CNF: 0x%02x 0x%02x 0x%02x\n",
557 mcp251x_read_reg(spi, CNF1),
558 mcp251x_read_reg(spi, CNF2),
559 mcp251x_read_reg(spi, CNF3));
560
561 return 0;
562}
563
564static int mcp251x_setup(struct net_device *net, struct mcp251x_priv *priv,
565 struct spi_device *spi)
566{
567 mcp251x_do_set_bittiming(net);
568
569 mcp251x_write_reg(spi, RXBCTRL(0),
570 RXBCTRL_BUKT | RXBCTRL_RXM0 | RXBCTRL_RXM1);
571 mcp251x_write_reg(spi, RXBCTRL(1),
572 RXBCTRL_RXM0 | RXBCTRL_RXM1);
573 return 0;
574}
575
576static int mcp251x_hw_reset(struct spi_device *spi)
577{
578 struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev);
579 int ret;
580 unsigned long timeout;
581
582 priv->spi_tx_buf[0] = INSTRUCTION_RESET;
583 ret = spi_write(spi, priv->spi_tx_buf, 1);
584 if (ret) {
585 dev_err(&spi->dev, "reset failed: ret = %d\n", ret);
586 return -EIO;
587 }
588
589 /* Wait for reset to finish */
590 timeout = jiffies + HZ;
591 mdelay(10);
592 while ((mcp251x_read_reg(spi, CANSTAT) & CANCTRL_REQOP_MASK)
593 != CANCTRL_REQOP_CONF) {
594 schedule();
595 if (time_after(jiffies, timeout)) {
596 dev_err(&spi->dev, "MCP251x didn't"
597 " enter in conf mode after reset\n");
598 return -EBUSY;
599 }
600 }
601 return 0;
602}
603
604static int mcp251x_hw_probe(struct spi_device *spi)
605{
606 int st1, st2;
607
608 mcp251x_hw_reset(spi);
609
610 /*
611 * Please note that these are "magic values" based on after
612 * reset defaults taken from data sheet which allows us to see
613 * if we really have a chip on the bus (we avoid common all
614 * zeroes or all ones situations)
615 */
616 st1 = mcp251x_read_reg(spi, CANSTAT) & 0xEE;
617 st2 = mcp251x_read_reg(spi, CANCTRL) & 0x17;
618
619 dev_dbg(&spi->dev, "CANSTAT 0x%02x CANCTRL 0x%02x\n", st1, st2);
620
621 /* Check for power up default values */
622 return (st1 == 0x80 && st2 == 0x07) ? 1 : 0;
623}
624
625static void mcp251x_open_clean(struct net_device *net)
626{
627 struct mcp251x_priv *priv = netdev_priv(net);
628 struct spi_device *spi = priv->spi;
629 struct mcp251x_platform_data *pdata = spi->dev.platform_data;
630
631 free_irq(spi->irq, priv);
632 mcp251x_hw_sleep(spi);
633 if (pdata->transceiver_enable)
634 pdata->transceiver_enable(0);
635 close_candev(net);
636}
637
638static int mcp251x_stop(struct net_device *net)
639{
640 struct mcp251x_priv *priv = netdev_priv(net);
641 struct spi_device *spi = priv->spi;
642 struct mcp251x_platform_data *pdata = spi->dev.platform_data;
643
644 close_candev(net);
645
646 priv->force_quit = 1;
647 free_irq(spi->irq, priv);
648 destroy_workqueue(priv->wq);
649 priv->wq = NULL;
650
651 mutex_lock(&priv->mcp_lock);
652
653 /* Disable and clear pending interrupts */
654 mcp251x_write_reg(spi, CANINTE, 0x00);
655 mcp251x_write_reg(spi, CANINTF, 0x00);
656
657 mcp251x_write_reg(spi, TXBCTRL(0), 0);
658 mcp251x_clean(net);
659
660 mcp251x_hw_sleep(spi);
661
662 if (pdata->transceiver_enable)
663 pdata->transceiver_enable(0);
664
665 priv->can.state = CAN_STATE_STOPPED;
666
667 mutex_unlock(&priv->mcp_lock);
668
669 return 0;
670}
671
672static void mcp251x_error_skb(struct net_device *net, int can_id, int data1)
673{
674 struct sk_buff *skb;
675 struct can_frame *frame;
676
677 skb = alloc_can_err_skb(net, &frame);
678 if (skb) {
679 frame->can_id = can_id;
680 frame->data[1] = data1;
681 netif_rx(skb);
682 } else {
683 dev_err(&net->dev,
684 "cannot allocate error skb\n");
685 }
686}
687
688static void mcp251x_tx_work_handler(struct work_struct *ws)
689{
690 struct mcp251x_priv *priv = container_of(ws, struct mcp251x_priv,
691 tx_work);
692 struct spi_device *spi = priv->spi;
693 struct net_device *net = priv->net;
694 struct can_frame *frame;
695
696 mutex_lock(&priv->mcp_lock);
697 if (priv->tx_skb) {
698 if (priv->can.state == CAN_STATE_BUS_OFF) {
699 mcp251x_clean(net);
700 } else {
701 frame = (struct can_frame *)priv->tx_skb->data;
702
703 if (frame->can_dlc > CAN_FRAME_MAX_DATA_LEN)
704 frame->can_dlc = CAN_FRAME_MAX_DATA_LEN;
705 mcp251x_hw_tx(spi, frame, 0);
706 priv->tx_len = 1 + frame->can_dlc;
707 can_put_echo_skb(priv->tx_skb, net, 0);
708 priv->tx_skb = NULL;
709 }
710 }
711 mutex_unlock(&priv->mcp_lock);
712}
713
714static void mcp251x_restart_work_handler(struct work_struct *ws)
715{
716 struct mcp251x_priv *priv = container_of(ws, struct mcp251x_priv,
717 restart_work);
718 struct spi_device *spi = priv->spi;
719 struct net_device *net = priv->net;
720
721 mutex_lock(&priv->mcp_lock);
722 if (priv->after_suspend) {
723 mdelay(10);
724 mcp251x_hw_reset(spi);
725 mcp251x_setup(net, priv, spi);
726 if (priv->after_suspend & AFTER_SUSPEND_RESTART) {
727 mcp251x_set_normal_mode(spi);
728 } else if (priv->after_suspend & AFTER_SUSPEND_UP) {
729 netif_device_attach(net);
730 mcp251x_clean(net);
731 mcp251x_set_normal_mode(spi);
732 netif_wake_queue(net);
733 } else {
734 mcp251x_hw_sleep(spi);
735 }
736 priv->after_suspend = 0;
737 priv->force_quit = 0;
738 }
739
740 if (priv->restart_tx) {
741 priv->restart_tx = 0;
742 mcp251x_write_reg(spi, TXBCTRL(0), 0);
743 mcp251x_clean(net);
744 netif_wake_queue(net);
745 mcp251x_error_skb(net, CAN_ERR_RESTARTED, 0);
746 }
747 mutex_unlock(&priv->mcp_lock);
748}
749
750static irqreturn_t mcp251x_can_ist(int irq, void *dev_id)
751{
752 struct mcp251x_priv *priv = dev_id;
753 struct spi_device *spi = priv->spi;
754 struct net_device *net = priv->net;
755
756 mutex_lock(&priv->mcp_lock);
757 while (!priv->force_quit) {
758 enum can_state new_state;
759 u8 intf = mcp251x_read_reg(spi, CANINTF);
760 u8 eflag;
761 int can_id = 0, data1 = 0;
762
763 if (intf & CANINTF_RX0IF) {
764 mcp251x_hw_rx(spi, 0);
765 /* Free one buffer ASAP */
766 mcp251x_write_bits(spi, CANINTF, intf & CANINTF_RX0IF,
767 0x00);
768 }
769
770 if (intf & CANINTF_RX1IF)
771 mcp251x_hw_rx(spi, 1);
772
773 mcp251x_write_bits(spi, CANINTF, intf, 0x00);
774
775 eflag = mcp251x_read_reg(spi, EFLG);
776 mcp251x_write_reg(spi, EFLG, 0x00);
777
778 /* Update can state */
779 if (eflag & EFLG_TXBO) {
780 new_state = CAN_STATE_BUS_OFF;
781 can_id |= CAN_ERR_BUSOFF;
782 } else if (eflag & EFLG_TXEP) {
783 new_state = CAN_STATE_ERROR_PASSIVE;
784 can_id |= CAN_ERR_CRTL;
785 data1 |= CAN_ERR_CRTL_TX_PASSIVE;
786 } else if (eflag & EFLG_RXEP) {
787 new_state = CAN_STATE_ERROR_PASSIVE;
788 can_id |= CAN_ERR_CRTL;
789 data1 |= CAN_ERR_CRTL_RX_PASSIVE;
790 } else if (eflag & EFLG_TXWAR) {
791 new_state = CAN_STATE_ERROR_WARNING;
792 can_id |= CAN_ERR_CRTL;
793 data1 |= CAN_ERR_CRTL_TX_WARNING;
794 } else if (eflag & EFLG_RXWAR) {
795 new_state = CAN_STATE_ERROR_WARNING;
796 can_id |= CAN_ERR_CRTL;
797 data1 |= CAN_ERR_CRTL_RX_WARNING;
798 } else {
799 new_state = CAN_STATE_ERROR_ACTIVE;
800 }
801
802 /* Update can state statistics */
803 switch (priv->can.state) {
804 case CAN_STATE_ERROR_ACTIVE:
805 if (new_state >= CAN_STATE_ERROR_WARNING &&
806 new_state <= CAN_STATE_BUS_OFF)
807 priv->can.can_stats.error_warning++;
808 case CAN_STATE_ERROR_WARNING: /* fallthrough */
809 if (new_state >= CAN_STATE_ERROR_PASSIVE &&
810 new_state <= CAN_STATE_BUS_OFF)
811 priv->can.can_stats.error_passive++;
812 break;
813 default:
814 break;
815 }
816 priv->can.state = new_state;
817
818 if (intf & CANINTF_ERRIF) {
819 /* Handle overflow counters */
820 if (eflag & (EFLG_RX0OVR | EFLG_RX1OVR)) {
821 if (eflag & EFLG_RX0OVR)
822 net->stats.rx_over_errors++;
823 if (eflag & EFLG_RX1OVR)
824 net->stats.rx_over_errors++;
825 can_id |= CAN_ERR_CRTL;
826 data1 |= CAN_ERR_CRTL_RX_OVERFLOW;
827 }
828 mcp251x_error_skb(net, can_id, data1);
829 }
830
831 if (priv->can.state == CAN_STATE_BUS_OFF) {
832 if (priv->can.restart_ms == 0) {
833 priv->force_quit = 1;
834 can_bus_off(net);
835 mcp251x_hw_sleep(spi);
836 break;
837 }
838 }
839
840 if (intf == 0)
841 break;
842
843 if (intf & (CANINTF_TX2IF | CANINTF_TX1IF | CANINTF_TX0IF)) {
844 net->stats.tx_packets++;
845 net->stats.tx_bytes += priv->tx_len - 1;
846 if (priv->tx_len) {
847 can_get_echo_skb(net, 0);
848 priv->tx_len = 0;
849 }
850 netif_wake_queue(net);
851 }
852
853 }
854 mutex_unlock(&priv->mcp_lock);
855 return IRQ_HANDLED;
856}
857
858static int mcp251x_open(struct net_device *net)
859{
860 struct mcp251x_priv *priv = netdev_priv(net);
861 struct spi_device *spi = priv->spi;
862 struct mcp251x_platform_data *pdata = spi->dev.platform_data;
863 int ret;
864
865 ret = open_candev(net);
866 if (ret) {
867 dev_err(&spi->dev, "unable to set initial baudrate!\n");
868 return ret;
869 }
870
871 mutex_lock(&priv->mcp_lock);
872 if (pdata->transceiver_enable)
873 pdata->transceiver_enable(1);
874
875 priv->force_quit = 0;
876 priv->tx_skb = NULL;
877 priv->tx_len = 0;
878
879 ret = request_threaded_irq(spi->irq, NULL, mcp251x_can_ist,
880 IRQF_TRIGGER_FALLING, DEVICE_NAME, priv);
881 if (ret) {
882 dev_err(&spi->dev, "failed to acquire irq %d\n", spi->irq);
883 if (pdata->transceiver_enable)
884 pdata->transceiver_enable(0);
885 close_candev(net);
886 goto open_unlock;
887 }
888
889 priv->wq = create_freezeable_workqueue("mcp251x_wq");
890 INIT_WORK(&priv->tx_work, mcp251x_tx_work_handler);
891 INIT_WORK(&priv->restart_work, mcp251x_restart_work_handler);
892
893 ret = mcp251x_hw_reset(spi);
894 if (ret) {
895 mcp251x_open_clean(net);
896 goto open_unlock;
897 }
898 ret = mcp251x_setup(net, priv, spi);
899 if (ret) {
900 mcp251x_open_clean(net);
901 goto open_unlock;
902 }
903 ret = mcp251x_set_normal_mode(spi);
904 if (ret) {
905 mcp251x_open_clean(net);
906 goto open_unlock;
907 }
908 netif_wake_queue(net);
909
910open_unlock:
911 mutex_unlock(&priv->mcp_lock);
912 return ret;
913}
914
915static const struct net_device_ops mcp251x_netdev_ops = {
916 .ndo_open = mcp251x_open,
917 .ndo_stop = mcp251x_stop,
918 .ndo_start_xmit = mcp251x_hard_start_xmit,
919};
920
921static int __devinit mcp251x_can_probe(struct spi_device *spi)
922{
923 struct net_device *net;
924 struct mcp251x_priv *priv;
925 struct mcp251x_platform_data *pdata = spi->dev.platform_data;
926 int ret = -ENODEV;
927
928 if (!pdata)
929 /* Platform data is required for osc freq */
930 goto error_out;
931
932 /* Allocate can/net device */
933 net = alloc_candev(sizeof(struct mcp251x_priv), TX_ECHO_SKB_MAX);
934 if (!net) {
935 ret = -ENOMEM;
936 goto error_alloc;
937 }
938
939 net->netdev_ops = &mcp251x_netdev_ops;
940 net->flags |= IFF_ECHO;
941
942 priv = netdev_priv(net);
943 priv->can.bittiming_const = &mcp251x_bittiming_const;
944 priv->can.do_set_mode = mcp251x_do_set_mode;
945 priv->can.clock.freq = pdata->oscillator_frequency / 2;
946 priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES |
947 CAN_CTRLMODE_LOOPBACK | CAN_CTRLMODE_LISTENONLY;
948 priv->net = net;
949 dev_set_drvdata(&spi->dev, priv);
950
951 priv->spi = spi;
952 mutex_init(&priv->mcp_lock);
953
954 /* If requested, allocate DMA buffers */
955 if (mcp251x_enable_dma) {
956 spi->dev.coherent_dma_mask = ~0;
957
958 /*
959 * Minimum coherent DMA allocation is PAGE_SIZE, so allocate
960 * that much and share it between Tx and Rx DMA buffers.
961 */
962 priv->spi_tx_buf = dma_alloc_coherent(&spi->dev,
963 PAGE_SIZE,
964 &priv->spi_tx_dma,
965 GFP_DMA);
966
967 if (priv->spi_tx_buf) {
968 priv->spi_rx_buf = (u8 *)(priv->spi_tx_buf +
969 (PAGE_SIZE / 2));
970 priv->spi_rx_dma = (dma_addr_t)(priv->spi_tx_dma +
971 (PAGE_SIZE / 2));
972 } else {
973 /* Fall back to non-DMA */
974 mcp251x_enable_dma = 0;
975 }
976 }
977
978 /* Allocate non-DMA buffers */
979 if (!mcp251x_enable_dma) {
980 priv->spi_tx_buf = kmalloc(SPI_TRANSFER_BUF_LEN, GFP_KERNEL);
981 if (!priv->spi_tx_buf) {
982 ret = -ENOMEM;
983 goto error_tx_buf;
984 }
985 priv->spi_rx_buf = kmalloc(SPI_TRANSFER_BUF_LEN, GFP_KERNEL);
986 if (!priv->spi_rx_buf) {
987 ret = -ENOMEM;
988 goto error_rx_buf;
989 }
990 }
991
992 if (pdata->power_enable)
993 pdata->power_enable(1);
994
995 /* Call out to platform specific setup */
996 if (pdata->board_specific_setup)
997 pdata->board_specific_setup(spi);
998
999 SET_NETDEV_DEV(net, &spi->dev);
1000
1001 /* Configure the SPI bus */
1002 spi->mode = SPI_MODE_0;
1003 spi->bits_per_word = 8;
1004 spi_setup(spi);
1005
1006 /* Here is OK to not lock the MCP, no one knows about it yet */
1007 if (!mcp251x_hw_probe(spi)) {
1008 dev_info(&spi->dev, "Probe failed\n");
1009 goto error_probe;
1010 }
1011 mcp251x_hw_sleep(spi);
1012
1013 if (pdata->transceiver_enable)
1014 pdata->transceiver_enable(0);
1015
1016 ret = register_candev(net);
1017 if (!ret) {
1018 dev_info(&spi->dev, "probed\n");
1019 return ret;
1020 }
1021error_probe:
1022 if (!mcp251x_enable_dma)
1023 kfree(priv->spi_rx_buf);
1024error_rx_buf:
1025 if (!mcp251x_enable_dma)
1026 kfree(priv->spi_tx_buf);
1027error_tx_buf:
1028 free_candev(net);
1029 if (mcp251x_enable_dma)
1030 dma_free_coherent(&spi->dev, PAGE_SIZE,
1031 priv->spi_tx_buf, priv->spi_tx_dma);
1032error_alloc:
1033 if (pdata->power_enable)
1034 pdata->power_enable(0);
1035 dev_err(&spi->dev, "probe failed\n");
1036error_out:
1037 return ret;
1038}
1039
1040static int __devexit mcp251x_can_remove(struct spi_device *spi)
1041{
1042 struct mcp251x_platform_data *pdata = spi->dev.platform_data;
1043 struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev);
1044 struct net_device *net = priv->net;
1045
1046 unregister_candev(net);
1047 free_candev(net);
1048
1049 if (mcp251x_enable_dma) {
1050 dma_free_coherent(&spi->dev, PAGE_SIZE,
1051 priv->spi_tx_buf, priv->spi_tx_dma);
1052 } else {
1053 kfree(priv->spi_tx_buf);
1054 kfree(priv->spi_rx_buf);
1055 }
1056
1057 if (pdata->power_enable)
1058 pdata->power_enable(0);
1059
1060 return 0;
1061}
1062
1063#ifdef CONFIG_PM
1064static int mcp251x_can_suspend(struct spi_device *spi, pm_message_t state)
1065{
1066 struct mcp251x_platform_data *pdata = spi->dev.platform_data;
1067 struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev);
1068 struct net_device *net = priv->net;
1069
1070 priv->force_quit = 1;
1071 disable_irq(spi->irq);
1072 /*
1073 * Note: at this point neither IST nor workqueues are running.
1074 * open/stop cannot be called anyway so locking is not needed
1075 */
1076 if (netif_running(net)) {
1077 netif_device_detach(net);
1078
1079 mcp251x_hw_sleep(spi);
1080 if (pdata->transceiver_enable)
1081 pdata->transceiver_enable(0);
1082 priv->after_suspend = AFTER_SUSPEND_UP;
1083 } else {
1084 priv->after_suspend = AFTER_SUSPEND_DOWN;
1085 }
1086
1087 if (pdata->power_enable) {
1088 pdata->power_enable(0);
1089 priv->after_suspend |= AFTER_SUSPEND_POWER;
1090 }
1091
1092 return 0;
1093}
1094
1095static int mcp251x_can_resume(struct spi_device *spi)
1096{
1097 struct mcp251x_platform_data *pdata = spi->dev.platform_data;
1098 struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev);
1099
1100 if (priv->after_suspend & AFTER_SUSPEND_POWER) {
1101 pdata->power_enable(1);
1102 queue_work(priv->wq, &priv->restart_work);
1103 } else {
1104 if (priv->after_suspend & AFTER_SUSPEND_UP) {
1105 if (pdata->transceiver_enable)
1106 pdata->transceiver_enable(1);
1107 queue_work(priv->wq, &priv->restart_work);
1108 } else {
1109 priv->after_suspend = 0;
1110 }
1111 }
1112 priv->force_quit = 0;
1113 enable_irq(spi->irq);
1114 return 0;
1115}
1116#else
1117#define mcp251x_can_suspend NULL
1118#define mcp251x_can_resume NULL
1119#endif
1120
1121static struct spi_driver mcp251x_can_driver = {
1122 .driver = {
1123 .name = DEVICE_NAME,
1124 .bus = &spi_bus_type,
1125 .owner = THIS_MODULE,
1126 },
1127
1128 .probe = mcp251x_can_probe,
1129 .remove = __devexit_p(mcp251x_can_remove),
1130 .suspend = mcp251x_can_suspend,
1131 .resume = mcp251x_can_resume,
1132};
1133
1134static int __init mcp251x_can_init(void)
1135{
1136 return spi_register_driver(&mcp251x_can_driver);
1137}
1138
1139static void __exit mcp251x_can_exit(void)
1140{
1141 spi_unregister_driver(&mcp251x_can_driver);
1142}
1143
1144module_init(mcp251x_can_init);
1145module_exit(mcp251x_can_exit);
1146
1147MODULE_AUTHOR("Chris Elston <celston@katalix.com>, "
1148 "Christian Pellegrin <chripell@evolware.org>");
1149MODULE_DESCRIPTION("Microchip 251x CAN driver");
1150MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/can/mscan/Kconfig b/drivers/net/can/mscan/Kconfig
new file mode 100644
index 000000000000..27d1d398e25e
--- /dev/null
+++ b/drivers/net/can/mscan/Kconfig
@@ -0,0 +1,24 @@
1config CAN_MSCAN
2 depends on CAN_DEV && (PPC || M68K || M68KNOMMU)
3 tristate "Support for Freescale MSCAN based chips"
4 ---help---
5 The Motorola Scalable Controller Area Network (MSCAN) definition
6 is based on the MSCAN12 definition which is the specific
7 implementation of the Motorola Scalable CAN concept targeted for
8 the Motorola MC68HC12 Microcontroller Family.
9
10if CAN_MSCAN
11
12config CAN_MPC5XXX
13 tristate "Freescale MPC5xxx onboard CAN controller"
14 depends on (PPC_MPC52xx || PPC_MPC512x)
15 ---help---
16 If you say yes here you get support for Freescale's MPC5xxx
17 onboard CAN controller. Currently, the MPC5200, MPC5200B and
18 MPC5121 (Rev. 2 and later) are supported.
19
20 This driver can also be built as a module. If so, the module
21 will be called mscan-mpc5xxx.ko.
22
23endif
24
diff --git a/drivers/net/can/mscan/Makefile b/drivers/net/can/mscan/Makefile
new file mode 100644
index 000000000000..c9fab17cd8b4
--- /dev/null
+++ b/drivers/net/can/mscan/Makefile
@@ -0,0 +1,5 @@
1
2obj-$(CONFIG_CAN_MPC5XXX) += mscan-mpc5xxx.o
3mscan-mpc5xxx-objs := mscan.o mpc5xxx_can.o
4
5ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG
diff --git a/drivers/net/can/mscan/mpc5xxx_can.c b/drivers/net/can/mscan/mpc5xxx_can.c
new file mode 100644
index 000000000000..03e7c48465a2
--- /dev/null
+++ b/drivers/net/can/mscan/mpc5xxx_can.c
@@ -0,0 +1,421 @@
1/*
2 * CAN bus driver for the Freescale MPC5xxx embedded CPU.
3 *
4 * Copyright (C) 2004-2005 Andrey Volkov <avolkov@varma-el.com>,
5 * Varma Electronics Oy
6 * Copyright (C) 2008-2009 Wolfgang Grandegger <wg@grandegger.com>
7 * Copyright (C) 2009 Wolfram Sang, Pengutronix <w.sang@pengutronix.de>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the version 2 of the GNU General Public License
11 * as published by the Free Software Foundation
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 */
22
23#include <linux/kernel.h>
24#include <linux/module.h>
25#include <linux/interrupt.h>
26#include <linux/platform_device.h>
27#include <linux/netdevice.h>
28#include <linux/can.h>
29#include <linux/can/dev.h>
30#include <linux/of_platform.h>
31#include <sysdev/fsl_soc.h>
32#include <linux/clk.h>
33#include <linux/io.h>
34#include <asm/mpc52xx.h>
35
36#include "mscan.h"
37
38#define DRV_NAME "mpc5xxx_can"
39
40struct mpc5xxx_can_data {
41 unsigned int type;
42 u32 (*get_clock)(struct of_device *ofdev, const char *clock_name,
43 int *mscan_clksrc);
44};
45
46#ifdef CONFIG_PPC_MPC52xx
47static struct of_device_id __devinitdata mpc52xx_cdm_ids[] = {
48 { .compatible = "fsl,mpc5200-cdm", },
49 {}
50};
51
52static u32 __devinit mpc52xx_can_get_clock(struct of_device *ofdev,
53 const char *clock_name,
54 int *mscan_clksrc)
55{
56 unsigned int pvr;
57 struct mpc52xx_cdm __iomem *cdm;
58 struct device_node *np_cdm;
59 unsigned int freq;
60 u32 val;
61
62 pvr = mfspr(SPRN_PVR);
63
64 /*
65 * Either the oscillator clock (SYS_XTAL_IN) or the IP bus clock
66 * (IP_CLK) can be selected as MSCAN clock source. According to
67 * the MPC5200 user's manual, the oscillator clock is the better
68 * choice as it has less jitter. For this reason, it is selected
69 * by default. Unfortunately, it can not be selected for the old
70 * MPC5200 Rev. A chips due to a hardware bug (check errata).
71 */
72 if (clock_name && strcmp(clock_name, "ip") == 0)
73 *mscan_clksrc = MSCAN_CLKSRC_BUS;
74 else
75 *mscan_clksrc = MSCAN_CLKSRC_XTAL;
76
77 freq = mpc5xxx_get_bus_frequency(ofdev->node);
78 if (!freq)
79 return 0;
80
81 if (*mscan_clksrc == MSCAN_CLKSRC_BUS || pvr == 0x80822011)
82 return freq;
83
84 /* Determine SYS_XTAL_IN frequency from the clock domain settings */
85 np_cdm = of_find_matching_node(NULL, mpc52xx_cdm_ids);
86 if (!np_cdm) {
87 dev_err(&ofdev->dev, "can't get clock node!\n");
88 return 0;
89 }
90 cdm = of_iomap(np_cdm, 0);
91
92 if (in_8(&cdm->ipb_clk_sel) & 0x1)
93 freq *= 2;
94 val = in_be32(&cdm->rstcfg);
95
96 freq *= (val & (1 << 5)) ? 8 : 4;
97 freq /= (val & (1 << 6)) ? 12 : 16;
98
99 of_node_put(np_cdm);
100 iounmap(cdm);
101
102 return freq;
103}
104#else /* !CONFIG_PPC_MPC52xx */
105static u32 __devinit mpc52xx_can_get_clock(struct of_device *ofdev,
106 const char *clock_name,
107 int *mscan_clksrc)
108{
109 return 0;
110}
111#endif /* CONFIG_PPC_MPC52xx */
112
113#ifdef CONFIG_PPC_MPC512x
114struct mpc512x_clockctl {
115 u32 spmr; /* System PLL Mode Reg */
116 u32 sccr[2]; /* System Clk Ctrl Reg 1 & 2 */
117 u32 scfr1; /* System Clk Freq Reg 1 */
118 u32 scfr2; /* System Clk Freq Reg 2 */
119 u32 reserved;
120 u32 bcr; /* Bread Crumb Reg */
121 u32 pccr[12]; /* PSC Clk Ctrl Reg 0-11 */
122 u32 spccr; /* SPDIF Clk Ctrl Reg */
123 u32 cccr; /* CFM Clk Ctrl Reg */
124 u32 dccr; /* DIU Clk Cnfg Reg */
125 u32 mccr[4]; /* MSCAN Clk Ctrl Reg 1-3 */
126};
127
128static struct of_device_id __devinitdata mpc512x_clock_ids[] = {
129 { .compatible = "fsl,mpc5121-clock", },
130 {}
131};
132
133static u32 __devinit mpc512x_can_get_clock(struct of_device *ofdev,
134 const char *clock_name,
135 int *mscan_clksrc)
136{
137 struct mpc512x_clockctl __iomem *clockctl;
138 struct device_node *np_clock;
139 struct clk *sys_clk, *ref_clk;
140 int plen, clockidx, clocksrc = -1;
141 u32 sys_freq, val, clockdiv = 1, freq = 0;
142 const u32 *pval;
143
144 np_clock = of_find_matching_node(NULL, mpc512x_clock_ids);
145 if (!np_clock) {
146 dev_err(&ofdev->dev, "couldn't find clock node\n");
147 return -ENODEV;
148 }
149 clockctl = of_iomap(np_clock, 0);
150 if (!clockctl) {
151 dev_err(&ofdev->dev, "couldn't map clock registers\n");
152 return 0;
153 }
154
155 /* Determine the MSCAN device index from the physical address */
156 pval = of_get_property(ofdev->node, "reg", &plen);
157 BUG_ON(!pval || plen < sizeof(*pval));
158 clockidx = (*pval & 0x80) ? 1 : 0;
159 if (*pval & 0x2000)
160 clockidx += 2;
161
162 /*
163 * Clock source and divider selection: 3 different clock sources
164 * can be selected: "ip", "ref" or "sys". For the latter two, a
165 * clock divider can be defined as well. If the clock source is
166 * not specified by the device tree, we first try to find an
167 * optimal CAN source clock based on the system clock. If that
168 * is not posslible, the reference clock will be used.
169 */
170 if (clock_name && !strcmp(clock_name, "ip")) {
171 *mscan_clksrc = MSCAN_CLKSRC_IPS;
172 freq = mpc5xxx_get_bus_frequency(ofdev->node);
173 } else {
174 *mscan_clksrc = MSCAN_CLKSRC_BUS;
175
176 pval = of_get_property(ofdev->node,
177 "fsl,mscan-clock-divider", &plen);
178 if (pval && plen == sizeof(*pval))
179 clockdiv = *pval;
180 if (!clockdiv)
181 clockdiv = 1;
182
183 if (!clock_name || !strcmp(clock_name, "sys")) {
184 sys_clk = clk_get(&ofdev->dev, "sys_clk");
185 if (!sys_clk) {
186 dev_err(&ofdev->dev, "couldn't get sys_clk\n");
187 goto exit_unmap;
188 }
189 /* Get and round up/down sys clock rate */
190 sys_freq = 1000000 *
191 ((clk_get_rate(sys_clk) + 499999) / 1000000);
192
193 if (!clock_name) {
194 /* A multiple of 16 MHz would be optimal */
195 if ((sys_freq % 16000000) == 0) {
196 clocksrc = 0;
197 clockdiv = sys_freq / 16000000;
198 freq = sys_freq / clockdiv;
199 }
200 } else {
201 clocksrc = 0;
202 freq = sys_freq / clockdiv;
203 }
204 }
205
206 if (clocksrc < 0) {
207 ref_clk = clk_get(&ofdev->dev, "ref_clk");
208 if (!ref_clk) {
209 dev_err(&ofdev->dev, "couldn't get ref_clk\n");
210 goto exit_unmap;
211 }
212 clocksrc = 1;
213 freq = clk_get_rate(ref_clk) / clockdiv;
214 }
215 }
216
217 /* Disable clock */
218 out_be32(&clockctl->mccr[clockidx], 0x0);
219 if (clocksrc >= 0) {
220 /* Set source and divider */
221 val = (clocksrc << 14) | ((clockdiv - 1) << 17);
222 out_be32(&clockctl->mccr[clockidx], val);
223 /* Enable clock */
224 out_be32(&clockctl->mccr[clockidx], val | 0x10000);
225 }
226
227 /* Enable MSCAN clock domain */
228 val = in_be32(&clockctl->sccr[1]);
229 if (!(val & (1 << 25)))
230 out_be32(&clockctl->sccr[1], val | (1 << 25));
231
232 dev_dbg(&ofdev->dev, "using '%s' with frequency divider %d\n",
233 *mscan_clksrc == MSCAN_CLKSRC_IPS ? "ips_clk" :
234 clocksrc == 1 ? "ref_clk" : "sys_clk", clockdiv);
235
236exit_unmap:
237 of_node_put(np_clock);
238 iounmap(clockctl);
239
240 return freq;
241}
242#else /* !CONFIG_PPC_MPC512x */
243static u32 __devinit mpc512x_can_get_clock(struct of_device *ofdev,
244 const char *clock_name,
245 int *mscan_clksrc)
246{
247 return 0;
248}
249#endif /* CONFIG_PPC_MPC512x */
250
251static int __devinit mpc5xxx_can_probe(struct of_device *ofdev,
252 const struct of_device_id *id)
253{
254 struct mpc5xxx_can_data *data = (struct mpc5xxx_can_data *)id->data;
255 struct device_node *np = ofdev->node;
256 struct net_device *dev;
257 struct mscan_priv *priv;
258 void __iomem *base;
259 const char *clock_name = NULL;
260 int irq, mscan_clksrc = 0;
261 int err = -ENOMEM;
262
263 base = of_iomap(np, 0);
264 if (!base) {
265 dev_err(&ofdev->dev, "couldn't ioremap\n");
266 return err;
267 }
268
269 irq = irq_of_parse_and_map(np, 0);
270 if (!irq) {
271 dev_err(&ofdev->dev, "no irq found\n");
272 err = -ENODEV;
273 goto exit_unmap_mem;
274 }
275
276 dev = alloc_mscandev();
277 if (!dev)
278 goto exit_dispose_irq;
279
280 priv = netdev_priv(dev);
281 priv->reg_base = base;
282 dev->irq = irq;
283
284 clock_name = of_get_property(np, "fsl,mscan-clock-source", NULL);
285
286 BUG_ON(!data);
287 priv->type = data->type;
288 priv->can.clock.freq = data->get_clock(ofdev, clock_name,
289 &mscan_clksrc);
290 if (!priv->can.clock.freq) {
291 dev_err(&ofdev->dev, "couldn't get MSCAN clock properties\n");
292 goto exit_free_mscan;
293 }
294
295 SET_NETDEV_DEV(dev, &ofdev->dev);
296
297 err = register_mscandev(dev, mscan_clksrc);
298 if (err) {
299 dev_err(&ofdev->dev, "registering %s failed (err=%d)\n",
300 DRV_NAME, err);
301 goto exit_free_mscan;
302 }
303
304 dev_set_drvdata(&ofdev->dev, dev);
305
306 dev_info(&ofdev->dev, "MSCAN at 0x%p, irq %d, clock %d Hz\n",
307 priv->reg_base, dev->irq, priv->can.clock.freq);
308
309 return 0;
310
311exit_free_mscan:
312 free_candev(dev);
313exit_dispose_irq:
314 irq_dispose_mapping(irq);
315exit_unmap_mem:
316 iounmap(base);
317
318 return err;
319}
320
321static int __devexit mpc5xxx_can_remove(struct of_device *ofdev)
322{
323 struct net_device *dev = dev_get_drvdata(&ofdev->dev);
324 struct mscan_priv *priv = netdev_priv(dev);
325
326 dev_set_drvdata(&ofdev->dev, NULL);
327
328 unregister_mscandev(dev);
329 iounmap(priv->reg_base);
330 irq_dispose_mapping(dev->irq);
331 free_candev(dev);
332
333 return 0;
334}
335
336#ifdef CONFIG_PM
337static struct mscan_regs saved_regs;
338static int mpc5xxx_can_suspend(struct of_device *ofdev, pm_message_t state)
339{
340 struct net_device *dev = dev_get_drvdata(&ofdev->dev);
341 struct mscan_priv *priv = netdev_priv(dev);
342 struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base;
343
344 _memcpy_fromio(&saved_regs, regs, sizeof(*regs));
345
346 return 0;
347}
348
349static int mpc5xxx_can_resume(struct of_device *ofdev)
350{
351 struct net_device *dev = dev_get_drvdata(&ofdev->dev);
352 struct mscan_priv *priv = netdev_priv(dev);
353 struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base;
354
355 regs->canctl0 |= MSCAN_INITRQ;
356 while (!(regs->canctl1 & MSCAN_INITAK))
357 udelay(10);
358
359 regs->canctl1 = saved_regs.canctl1;
360 regs->canbtr0 = saved_regs.canbtr0;
361 regs->canbtr1 = saved_regs.canbtr1;
362 regs->canidac = saved_regs.canidac;
363
364 /* restore masks, buffers etc. */
365 _memcpy_toio(&regs->canidar1_0, (void *)&saved_regs.canidar1_0,
366 sizeof(*regs) - offsetof(struct mscan_regs, canidar1_0));
367
368 regs->canctl0 &= ~MSCAN_INITRQ;
369 regs->cantbsel = saved_regs.cantbsel;
370 regs->canrier = saved_regs.canrier;
371 regs->cantier = saved_regs.cantier;
372 regs->canctl0 = saved_regs.canctl0;
373
374 return 0;
375}
376#endif
377
378static struct mpc5xxx_can_data __devinitdata mpc5200_can_data = {
379 .type = MSCAN_TYPE_MPC5200,
380 .get_clock = mpc52xx_can_get_clock,
381};
382
383static struct mpc5xxx_can_data __devinitdata mpc5121_can_data = {
384 .type = MSCAN_TYPE_MPC5121,
385 .get_clock = mpc512x_can_get_clock,
386};
387
388static struct of_device_id __devinitdata mpc5xxx_can_table[] = {
389 { .compatible = "fsl,mpc5200-mscan", .data = &mpc5200_can_data, },
390 /* Note that only MPC5121 Rev. 2 (and later) is supported */
391 { .compatible = "fsl,mpc5121-mscan", .data = &mpc5121_can_data, },
392 {},
393};
394
395static struct of_platform_driver mpc5xxx_can_driver = {
396 .owner = THIS_MODULE,
397 .name = "mpc5xxx_can",
398 .probe = mpc5xxx_can_probe,
399 .remove = __devexit_p(mpc5xxx_can_remove),
400#ifdef CONFIG_PM
401 .suspend = mpc5xxx_can_suspend,
402 .resume = mpc5xxx_can_resume,
403#endif
404 .match_table = mpc5xxx_can_table,
405};
406
407static int __init mpc5xxx_can_init(void)
408{
409 return of_register_platform_driver(&mpc5xxx_can_driver);
410}
411module_init(mpc5xxx_can_init);
412
413static void __exit mpc5xxx_can_exit(void)
414{
415 return of_unregister_platform_driver(&mpc5xxx_can_driver);
416};
417module_exit(mpc5xxx_can_exit);
418
419MODULE_AUTHOR("Wolfgang Grandegger <wg@grandegger.com>");
420MODULE_DESCRIPTION("Freescale MPC5xxx CAN driver");
421MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/can/mscan/mscan.c b/drivers/net/can/mscan/mscan.c
new file mode 100644
index 000000000000..6b7dd578d417
--- /dev/null
+++ b/drivers/net/can/mscan/mscan.c
@@ -0,0 +1,701 @@
1/*
2 * CAN bus driver for the alone generic (as possible as) MSCAN controller.
3 *
4 * Copyright (C) 2005-2006 Andrey Volkov <avolkov@varma-el.com>,
5 * Varma Electronics Oy
6 * Copyright (C) 2008-2009 Wolfgang Grandegger <wg@grandegger.com>
7 * Copyright (C) 2008-2009 Pengutronix <kernel@pengutronix.de>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the version 2 of the GNU General Public License
11 * as published by the Free Software Foundation
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 */
22
23#include <linux/kernel.h>
24#include <linux/module.h>
25#include <linux/interrupt.h>
26#include <linux/delay.h>
27#include <linux/netdevice.h>
28#include <linux/if_arp.h>
29#include <linux/if_ether.h>
30#include <linux/list.h>
31#include <linux/can.h>
32#include <linux/can/dev.h>
33#include <linux/can/error.h>
34#include <linux/io.h>
35
36#include "mscan.h"
37
38static struct can_bittiming_const mscan_bittiming_const = {
39 .name = "mscan",
40 .tseg1_min = 4,
41 .tseg1_max = 16,
42 .tseg2_min = 2,
43 .tseg2_max = 8,
44 .sjw_max = 4,
45 .brp_min = 1,
46 .brp_max = 64,
47 .brp_inc = 1,
48};
49
50struct mscan_state {
51 u8 mode;
52 u8 canrier;
53 u8 cantier;
54};
55
56static enum can_state state_map[] = {
57 CAN_STATE_ERROR_ACTIVE,
58 CAN_STATE_ERROR_WARNING,
59 CAN_STATE_ERROR_PASSIVE,
60 CAN_STATE_BUS_OFF
61};
62
63static int mscan_set_mode(struct net_device *dev, u8 mode)
64{
65 struct mscan_priv *priv = netdev_priv(dev);
66 struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base;
67 int ret = 0;
68 int i;
69 u8 canctl1;
70
71 if (mode != MSCAN_NORMAL_MODE) {
72 if (priv->tx_active) {
73 /* Abort transfers before going to sleep */#
74 out_8(&regs->cantarq, priv->tx_active);
75 /* Suppress TX done interrupts */
76 out_8(&regs->cantier, 0);
77 }
78
79 canctl1 = in_8(&regs->canctl1);
80 if ((mode & MSCAN_SLPRQ) && !(canctl1 & MSCAN_SLPAK)) {
81 setbits8(&regs->canctl0, MSCAN_SLPRQ);
82 for (i = 0; i < MSCAN_SET_MODE_RETRIES; i++) {
83 if (in_8(&regs->canctl1) & MSCAN_SLPAK)
84 break;
85 udelay(100);
86 }
87 /*
88 * The mscan controller will fail to enter sleep mode,
89 * while there are irregular activities on bus, like
90 * somebody keeps retransmitting. This behavior is
91 * undocumented and seems to differ between mscan built
92 * in mpc5200b and mpc5200. We proceed in that case,
93 * since otherwise the slprq will be kept set and the
94 * controller will get stuck. NOTE: INITRQ or CSWAI
95 * will abort all active transmit actions, if still
96 * any, at once.
97 */
98 if (i >= MSCAN_SET_MODE_RETRIES)
99 dev_dbg(dev->dev.parent,
100 "device failed to enter sleep mode. "
101 "We proceed anyhow.\n");
102 else
103 priv->can.state = CAN_STATE_SLEEPING;
104 }
105
106 if ((mode & MSCAN_INITRQ) && !(canctl1 & MSCAN_INITAK)) {
107 setbits8(&regs->canctl0, MSCAN_INITRQ);
108 for (i = 0; i < MSCAN_SET_MODE_RETRIES; i++) {
109 if (in_8(&regs->canctl1) & MSCAN_INITAK)
110 break;
111 }
112 if (i >= MSCAN_SET_MODE_RETRIES)
113 ret = -ENODEV;
114 }
115 if (!ret)
116 priv->can.state = CAN_STATE_STOPPED;
117
118 if (mode & MSCAN_CSWAI)
119 setbits8(&regs->canctl0, MSCAN_CSWAI);
120
121 } else {
122 canctl1 = in_8(&regs->canctl1);
123 if (canctl1 & (MSCAN_SLPAK | MSCAN_INITAK)) {
124 clrbits8(&regs->canctl0, MSCAN_SLPRQ | MSCAN_INITRQ);
125 for (i = 0; i < MSCAN_SET_MODE_RETRIES; i++) {
126 canctl1 = in_8(&regs->canctl1);
127 if (!(canctl1 & (MSCAN_INITAK | MSCAN_SLPAK)))
128 break;
129 }
130 if (i >= MSCAN_SET_MODE_RETRIES)
131 ret = -ENODEV;
132 else
133 priv->can.state = CAN_STATE_ERROR_ACTIVE;
134 }
135 }
136 return ret;
137}
138
139static int mscan_start(struct net_device *dev)
140{
141 struct mscan_priv *priv = netdev_priv(dev);
142 struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base;
143 u8 canrflg;
144 int err;
145
146 out_8(&regs->canrier, 0);
147
148 INIT_LIST_HEAD(&priv->tx_head);
149 priv->prev_buf_id = 0;
150 priv->cur_pri = 0;
151 priv->tx_active = 0;
152 priv->shadow_canrier = 0;
153 priv->flags = 0;
154
155 if (priv->type == MSCAN_TYPE_MPC5121) {
156 /* Clear pending bus-off condition */
157 if (in_8(&regs->canmisc) & MSCAN_BOHOLD)
158 out_8(&regs->canmisc, MSCAN_BOHOLD);
159 }
160
161 err = mscan_set_mode(dev, MSCAN_NORMAL_MODE);
162 if (err)
163 return err;
164
165 canrflg = in_8(&regs->canrflg);
166 priv->shadow_statflg = canrflg & MSCAN_STAT_MSK;
167 priv->can.state = state_map[max(MSCAN_STATE_RX(canrflg),
168 MSCAN_STATE_TX(canrflg))];
169 out_8(&regs->cantier, 0);
170
171 /* Enable receive interrupts. */
172 out_8(&regs->canrier, MSCAN_RX_INTS_ENABLE);
173
174 return 0;
175}
176
177static int mscan_restart(struct net_device *dev)
178{
179 struct mscan_priv *priv = netdev_priv(dev);
180
181 if (priv->type == MSCAN_TYPE_MPC5121) {
182 struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base;
183
184 priv->can.state = CAN_STATE_ERROR_ACTIVE;
185 WARN(!(in_8(&regs->canmisc) & MSCAN_BOHOLD),
186 "bus-off state expected");
187 out_8(&regs->canmisc, MSCAN_BOHOLD);
188 /* Re-enable receive interrupts. */
189 out_8(&regs->canrier, MSCAN_RX_INTS_ENABLE);
190 } else {
191 if (priv->can.state <= CAN_STATE_BUS_OFF)
192 mscan_set_mode(dev, MSCAN_INIT_MODE);
193 return mscan_start(dev);
194 }
195
196 return 0;
197}
198
199static netdev_tx_t mscan_start_xmit(struct sk_buff *skb, struct net_device *dev)
200{
201 struct can_frame *frame = (struct can_frame *)skb->data;
202 struct mscan_priv *priv = netdev_priv(dev);
203 struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base;
204 int i, rtr, buf_id;
205 u32 can_id;
206
207 if (can_dropped_invalid_skb(dev, skb))
208 return NETDEV_TX_OK;
209
210 out_8(&regs->cantier, 0);
211
212 i = ~priv->tx_active & MSCAN_TXE;
213 buf_id = ffs(i) - 1;
214 switch (hweight8(i)) {
215 case 0:
216 netif_stop_queue(dev);
217 dev_err(dev->dev.parent, "Tx Ring full when queue awake!\n");
218 return NETDEV_TX_BUSY;
219 case 1:
220 /*
221 * if buf_id < 3, then current frame will be send out of order,
222 * since buffer with lower id have higher priority (hell..)
223 */
224 netif_stop_queue(dev);
225 case 2:
226 if (buf_id < priv->prev_buf_id) {
227 priv->cur_pri++;
228 if (priv->cur_pri == 0xff) {
229 set_bit(F_TX_WAIT_ALL, &priv->flags);
230 netif_stop_queue(dev);
231 }
232 }
233 set_bit(F_TX_PROGRESS, &priv->flags);
234 break;
235 }
236 priv->prev_buf_id = buf_id;
237 out_8(&regs->cantbsel, i);
238
239 rtr = frame->can_id & CAN_RTR_FLAG;
240
241 /* RTR is always the lowest bit of interest, then IDs follow */
242 if (frame->can_id & CAN_EFF_FLAG) {
243 can_id = (frame->can_id & CAN_EFF_MASK)
244 << (MSCAN_EFF_RTR_SHIFT + 1);
245 if (rtr)
246 can_id |= 1 << MSCAN_EFF_RTR_SHIFT;
247 out_be16(&regs->tx.idr3_2, can_id);
248
249 can_id >>= 16;
250 /* EFF_FLAGS are inbetween the IDs :( */
251 can_id = (can_id & 0x7) | ((can_id << 2) & 0xffe0)
252 | MSCAN_EFF_FLAGS;
253 } else {
254 can_id = (frame->can_id & CAN_SFF_MASK)
255 << (MSCAN_SFF_RTR_SHIFT + 1);
256 if (rtr)
257 can_id |= 1 << MSCAN_SFF_RTR_SHIFT;
258 }
259 out_be16(&regs->tx.idr1_0, can_id);
260
261 if (!rtr) {
262 void __iomem *data = &regs->tx.dsr1_0;
263 u16 *payload = (u16 *)frame->data;
264
265 /* It is safe to write into dsr[dlc+1] */
266 for (i = 0; i < (frame->can_dlc + 1) / 2; i++) {
267 out_be16(data, *payload++);
268 data += 2 + _MSCAN_RESERVED_DSR_SIZE;
269 }
270 }
271
272 out_8(&regs->tx.dlr, frame->can_dlc);
273 out_8(&regs->tx.tbpr, priv->cur_pri);
274
275 /* Start transmission. */
276 out_8(&regs->cantflg, 1 << buf_id);
277
278 if (!test_bit(F_TX_PROGRESS, &priv->flags))
279 dev->trans_start = jiffies;
280
281 list_add_tail(&priv->tx_queue[buf_id].list, &priv->tx_head);
282
283 can_put_echo_skb(skb, dev, buf_id);
284
285 /* Enable interrupt. */
286 priv->tx_active |= 1 << buf_id;
287 out_8(&regs->cantier, priv->tx_active);
288
289 return NETDEV_TX_OK;
290}
291
292/* This function returns the old state to see where we came from */
293static enum can_state check_set_state(struct net_device *dev, u8 canrflg)
294{
295 struct mscan_priv *priv = netdev_priv(dev);
296 enum can_state state, old_state = priv->can.state;
297
298 if (canrflg & MSCAN_CSCIF && old_state <= CAN_STATE_BUS_OFF) {
299 state = state_map[max(MSCAN_STATE_RX(canrflg),
300 MSCAN_STATE_TX(canrflg))];
301 priv->can.state = state;
302 }
303 return old_state;
304}
305
306static void mscan_get_rx_frame(struct net_device *dev, struct can_frame *frame)
307{
308 struct mscan_priv *priv = netdev_priv(dev);
309 struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base;
310 u32 can_id;
311 int i;
312
313 can_id = in_be16(&regs->rx.idr1_0);
314 if (can_id & (1 << 3)) {
315 frame->can_id = CAN_EFF_FLAG;
316 can_id = ((can_id << 16) | in_be16(&regs->rx.idr3_2));
317 can_id = ((can_id & 0xffe00000) |
318 ((can_id & 0x7ffff) << 2)) >> 2;
319 } else {
320 can_id >>= 4;
321 frame->can_id = 0;
322 }
323
324 frame->can_id |= can_id >> 1;
325 if (can_id & 1)
326 frame->can_id |= CAN_RTR_FLAG;
327
328 frame->can_dlc = get_can_dlc(in_8(&regs->rx.dlr) & 0xf);
329
330 if (!(frame->can_id & CAN_RTR_FLAG)) {
331 void __iomem *data = &regs->rx.dsr1_0;
332 u16 *payload = (u16 *)frame->data;
333
334 for (i = 0; i < (frame->can_dlc + 1) / 2; i++) {
335 *payload++ = in_be16(data);
336 data += 2 + _MSCAN_RESERVED_DSR_SIZE;
337 }
338 }
339
340 out_8(&regs->canrflg, MSCAN_RXF);
341}
342
343static void mscan_get_err_frame(struct net_device *dev, struct can_frame *frame,
344 u8 canrflg)
345{
346 struct mscan_priv *priv = netdev_priv(dev);
347 struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base;
348 struct net_device_stats *stats = &dev->stats;
349 enum can_state old_state;
350
351 dev_dbg(dev->dev.parent, "error interrupt (canrflg=%#x)\n", canrflg);
352 frame->can_id = CAN_ERR_FLAG;
353
354 if (canrflg & MSCAN_OVRIF) {
355 frame->can_id |= CAN_ERR_CRTL;
356 frame->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
357 stats->rx_over_errors++;
358 stats->rx_errors++;
359 } else {
360 frame->data[1] = 0;
361 }
362
363 old_state = check_set_state(dev, canrflg);
364 /* State changed */
365 if (old_state != priv->can.state) {
366 switch (priv->can.state) {
367 case CAN_STATE_ERROR_WARNING:
368 frame->can_id |= CAN_ERR_CRTL;
369 priv->can.can_stats.error_warning++;
370 if ((priv->shadow_statflg & MSCAN_RSTAT_MSK) <
371 (canrflg & MSCAN_RSTAT_MSK))
372 frame->data[1] |= CAN_ERR_CRTL_RX_WARNING;
373 if ((priv->shadow_statflg & MSCAN_TSTAT_MSK) <
374 (canrflg & MSCAN_TSTAT_MSK))
375 frame->data[1] |= CAN_ERR_CRTL_TX_WARNING;
376 break;
377 case CAN_STATE_ERROR_PASSIVE:
378 frame->can_id |= CAN_ERR_CRTL;
379 priv->can.can_stats.error_passive++;
380 frame->data[1] |= CAN_ERR_CRTL_RX_PASSIVE;
381 break;
382 case CAN_STATE_BUS_OFF:
383 frame->can_id |= CAN_ERR_BUSOFF;
384 /*
385 * The MSCAN on the MPC5200 does recover from bus-off
386 * automatically. To avoid that we stop the chip doing
387 * a light-weight stop (we are in irq-context).
388 */
389 if (priv->type != MSCAN_TYPE_MPC5121) {
390 out_8(&regs->cantier, 0);
391 out_8(&regs->canrier, 0);
392 setbits8(&regs->canctl0,
393 MSCAN_SLPRQ | MSCAN_INITRQ);
394 }
395 can_bus_off(dev);
396 break;
397 default:
398 break;
399 }
400 }
401 priv->shadow_statflg = canrflg & MSCAN_STAT_MSK;
402 frame->can_dlc = CAN_ERR_DLC;
403 out_8(&regs->canrflg, MSCAN_ERR_IF);
404}
405
406static int mscan_rx_poll(struct napi_struct *napi, int quota)
407{
408 struct mscan_priv *priv = container_of(napi, struct mscan_priv, napi);
409 struct net_device *dev = napi->dev;
410 struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base;
411 struct net_device_stats *stats = &dev->stats;
412 int npackets = 0;
413 int ret = 1;
414 struct sk_buff *skb;
415 struct can_frame *frame;
416 u8 canrflg;
417
418 while (npackets < quota) {
419 canrflg = in_8(&regs->canrflg);
420 if (!(canrflg & (MSCAN_RXF | MSCAN_ERR_IF)))
421 break;
422
423 skb = alloc_can_skb(dev, &frame);
424 if (!skb) {
425 if (printk_ratelimit())
426 dev_notice(dev->dev.parent, "packet dropped\n");
427 stats->rx_dropped++;
428 out_8(&regs->canrflg, canrflg);
429 continue;
430 }
431
432 if (canrflg & MSCAN_RXF)
433 mscan_get_rx_frame(dev, frame);
434 else if (canrflg & MSCAN_ERR_IF)
435 mscan_get_err_frame(dev, frame, canrflg);
436
437 stats->rx_packets++;
438 stats->rx_bytes += frame->can_dlc;
439 npackets++;
440 netif_receive_skb(skb);
441 }
442
443 if (!(in_8(&regs->canrflg) & (MSCAN_RXF | MSCAN_ERR_IF))) {
444 napi_complete(&priv->napi);
445 clear_bit(F_RX_PROGRESS, &priv->flags);
446 if (priv->can.state < CAN_STATE_BUS_OFF)
447 out_8(&regs->canrier, priv->shadow_canrier);
448 ret = 0;
449 }
450 return ret;
451}
452
453static irqreturn_t mscan_isr(int irq, void *dev_id)
454{
455 struct net_device *dev = (struct net_device *)dev_id;
456 struct mscan_priv *priv = netdev_priv(dev);
457 struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base;
458 struct net_device_stats *stats = &dev->stats;
459 u8 cantier, cantflg, canrflg;
460 irqreturn_t ret = IRQ_NONE;
461
462 cantier = in_8(&regs->cantier) & MSCAN_TXE;
463 cantflg = in_8(&regs->cantflg) & cantier;
464
465 if (cantier && cantflg) {
466 struct list_head *tmp, *pos;
467
468 list_for_each_safe(pos, tmp, &priv->tx_head) {
469 struct tx_queue_entry *entry =
470 list_entry(pos, struct tx_queue_entry, list);
471 u8 mask = entry->mask;
472
473 if (!(cantflg & mask))
474 continue;
475
476 out_8(&regs->cantbsel, mask);
477 stats->tx_bytes += in_8(&regs->tx.dlr);
478 stats->tx_packets++;
479 can_get_echo_skb(dev, entry->id);
480 priv->tx_active &= ~mask;
481 list_del(pos);
482 }
483
484 if (list_empty(&priv->tx_head)) {
485 clear_bit(F_TX_WAIT_ALL, &priv->flags);
486 clear_bit(F_TX_PROGRESS, &priv->flags);
487 priv->cur_pri = 0;
488 } else {
489 dev->trans_start = jiffies;
490 }
491
492 if (!test_bit(F_TX_WAIT_ALL, &priv->flags))
493 netif_wake_queue(dev);
494
495 out_8(&regs->cantier, priv->tx_active);
496 ret = IRQ_HANDLED;
497 }
498
499 canrflg = in_8(&regs->canrflg);
500 if ((canrflg & ~MSCAN_STAT_MSK) &&
501 !test_and_set_bit(F_RX_PROGRESS, &priv->flags)) {
502 if (canrflg & ~MSCAN_STAT_MSK) {
503 priv->shadow_canrier = in_8(&regs->canrier);
504 out_8(&regs->canrier, 0);
505 napi_schedule(&priv->napi);
506 ret = IRQ_HANDLED;
507 } else {
508 clear_bit(F_RX_PROGRESS, &priv->flags);
509 }
510 }
511 return ret;
512}
513
514static int mscan_do_set_mode(struct net_device *dev, enum can_mode mode)
515{
516 struct mscan_priv *priv = netdev_priv(dev);
517 int ret = 0;
518
519 if (!priv->open_time)
520 return -EINVAL;
521
522 switch (mode) {
523 case CAN_MODE_START:
524 ret = mscan_restart(dev);
525 if (ret)
526 break;
527 if (netif_queue_stopped(dev))
528 netif_wake_queue(dev);
529 break;
530
531 default:
532 ret = -EOPNOTSUPP;
533 break;
534 }
535 return ret;
536}
537
538static int mscan_do_set_bittiming(struct net_device *dev)
539{
540 struct mscan_priv *priv = netdev_priv(dev);
541 struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base;
542 struct can_bittiming *bt = &priv->can.bittiming;
543 u8 btr0, btr1;
544
545 btr0 = BTR0_SET_BRP(bt->brp) | BTR0_SET_SJW(bt->sjw);
546 btr1 = (BTR1_SET_TSEG1(bt->prop_seg + bt->phase_seg1) |
547 BTR1_SET_TSEG2(bt->phase_seg2) |
548 BTR1_SET_SAM(priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES));
549
550 dev_info(dev->dev.parent, "setting BTR0=0x%02x BTR1=0x%02x\n",
551 btr0, btr1);
552
553 out_8(&regs->canbtr0, btr0);
554 out_8(&regs->canbtr1, btr1);
555
556 return 0;
557}
558
559static int mscan_open(struct net_device *dev)
560{
561 int ret;
562 struct mscan_priv *priv = netdev_priv(dev);
563 struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base;
564
565 /* common open */
566 ret = open_candev(dev);
567 if (ret)
568 return ret;
569
570 napi_enable(&priv->napi);
571
572 ret = request_irq(dev->irq, mscan_isr, 0, dev->name, dev);
573 if (ret < 0) {
574 dev_err(dev->dev.parent, "failed to attach interrupt\n");
575 goto exit_napi_disable;
576 }
577
578 priv->open_time = jiffies;
579
580 clrbits8(&regs->canctl1, MSCAN_LISTEN);
581
582 ret = mscan_start(dev);
583 if (ret)
584 goto exit_free_irq;
585
586 netif_start_queue(dev);
587
588 return 0;
589
590exit_free_irq:
591 priv->open_time = 0;
592 free_irq(dev->irq, dev);
593exit_napi_disable:
594 napi_disable(&priv->napi);
595 close_candev(dev);
596 return ret;
597}
598
599static int mscan_close(struct net_device *dev)
600{
601 struct mscan_priv *priv = netdev_priv(dev);
602 struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base;
603
604 netif_stop_queue(dev);
605 napi_disable(&priv->napi);
606
607 out_8(&regs->cantier, 0);
608 out_8(&regs->canrier, 0);
609 mscan_set_mode(dev, MSCAN_INIT_MODE);
610 close_candev(dev);
611 free_irq(dev->irq, dev);
612 priv->open_time = 0;
613
614 return 0;
615}
616
617static const struct net_device_ops mscan_netdev_ops = {
618 .ndo_open = mscan_open,
619 .ndo_stop = mscan_close,
620 .ndo_start_xmit = mscan_start_xmit,
621};
622
623int register_mscandev(struct net_device *dev, int mscan_clksrc)
624{
625 struct mscan_priv *priv = netdev_priv(dev);
626 struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base;
627 u8 ctl1;
628
629 ctl1 = in_8(&regs->canctl1);
630 if (mscan_clksrc)
631 ctl1 |= MSCAN_CLKSRC;
632 else
633 ctl1 &= ~MSCAN_CLKSRC;
634
635 if (priv->type == MSCAN_TYPE_MPC5121)
636 ctl1 |= MSCAN_BORM; /* bus-off recovery upon request */
637
638 ctl1 |= MSCAN_CANE;
639 out_8(&regs->canctl1, ctl1);
640 udelay(100);
641
642 /* acceptance mask/acceptance code (accept everything) */
643 out_be16(&regs->canidar1_0, 0);
644 out_be16(&regs->canidar3_2, 0);
645 out_be16(&regs->canidar5_4, 0);
646 out_be16(&regs->canidar7_6, 0);
647
648 out_be16(&regs->canidmr1_0, 0xffff);
649 out_be16(&regs->canidmr3_2, 0xffff);
650 out_be16(&regs->canidmr5_4, 0xffff);
651 out_be16(&regs->canidmr7_6, 0xffff);
652 /* Two 32 bit Acceptance Filters */
653 out_8(&regs->canidac, MSCAN_AF_32BIT);
654
655 mscan_set_mode(dev, MSCAN_INIT_MODE);
656
657 return register_candev(dev);
658}
659
660void unregister_mscandev(struct net_device *dev)
661{
662 struct mscan_priv *priv = netdev_priv(dev);
663 struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base;
664 mscan_set_mode(dev, MSCAN_INIT_MODE);
665 clrbits8(&regs->canctl1, MSCAN_CANE);
666 unregister_candev(dev);
667}
668
669struct net_device *alloc_mscandev(void)
670{
671 struct net_device *dev;
672 struct mscan_priv *priv;
673 int i;
674
675 dev = alloc_candev(sizeof(struct mscan_priv), MSCAN_ECHO_SKB_MAX);
676 if (!dev)
677 return NULL;
678 priv = netdev_priv(dev);
679
680 dev->netdev_ops = &mscan_netdev_ops;
681
682 dev->flags |= IFF_ECHO; /* we support local echo */
683
684 netif_napi_add(dev, &priv->napi, mscan_rx_poll, 8);
685
686 priv->can.bittiming_const = &mscan_bittiming_const;
687 priv->can.do_set_bittiming = mscan_do_set_bittiming;
688 priv->can.do_set_mode = mscan_do_set_mode;
689 priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES;
690
691 for (i = 0; i < TX_QUEUE_SIZE; i++) {
692 priv->tx_queue[i].id = i;
693 priv->tx_queue[i].mask = 1 << i;
694 }
695
696 return dev;
697}
698
699MODULE_AUTHOR("Andrey Volkov <avolkov@varma-el.com>");
700MODULE_LICENSE("GPL v2");
701MODULE_DESCRIPTION("CAN port driver for a MSCAN based chips");
diff --git a/drivers/net/can/mscan/mscan.h b/drivers/net/can/mscan/mscan.h
new file mode 100644
index 000000000000..4ff966473bc9
--- /dev/null
+++ b/drivers/net/can/mscan/mscan.h
@@ -0,0 +1,302 @@
1/*
2 * Definitions of consts/structs to drive the Freescale MSCAN.
3 *
4 * Copyright (C) 2005-2006 Andrey Volkov <avolkov@varma-el.com>,
5 * Varma Electronics Oy
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the version 2 of the GNU General Public License
9 * as published by the Free Software Foundation
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21#ifndef __MSCAN_H__
22#define __MSCAN_H__
23
24#include <linux/types.h>
25
26/* MSCAN control register 0 (CANCTL0) bits */
27#define MSCAN_RXFRM 0x80
28#define MSCAN_RXACT 0x40
29#define MSCAN_CSWAI 0x20
30#define MSCAN_SYNCH 0x10
31#define MSCAN_TIME 0x08
32#define MSCAN_WUPE 0x04
33#define MSCAN_SLPRQ 0x02
34#define MSCAN_INITRQ 0x01
35
36/* MSCAN control register 1 (CANCTL1) bits */
37#define MSCAN_CANE 0x80
38#define MSCAN_CLKSRC 0x40
39#define MSCAN_LOOPB 0x20
40#define MSCAN_LISTEN 0x10
41#define MSCAN_BORM 0x08
42#define MSCAN_WUPM 0x04
43#define MSCAN_SLPAK 0x02
44#define MSCAN_INITAK 0x01
45
46/* Use the MPC5XXX MSCAN variant? */
47#ifdef CONFIG_PPC
48#define MSCAN_FOR_MPC5XXX
49#endif
50
51#ifdef MSCAN_FOR_MPC5XXX
52#define MSCAN_CLKSRC_BUS 0
53#define MSCAN_CLKSRC_XTAL MSCAN_CLKSRC
54#define MSCAN_CLKSRC_IPS MSCAN_CLKSRC
55#else
56#define MSCAN_CLKSRC_BUS MSCAN_CLKSRC
57#define MSCAN_CLKSRC_XTAL 0
58#endif
59
60/* MSCAN receiver flag register (CANRFLG) bits */
61#define MSCAN_WUPIF 0x80
62#define MSCAN_CSCIF 0x40
63#define MSCAN_RSTAT1 0x20
64#define MSCAN_RSTAT0 0x10
65#define MSCAN_TSTAT1 0x08
66#define MSCAN_TSTAT0 0x04
67#define MSCAN_OVRIF 0x02
68#define MSCAN_RXF 0x01
69#define MSCAN_ERR_IF (MSCAN_OVRIF | MSCAN_CSCIF)
70#define MSCAN_RSTAT_MSK (MSCAN_RSTAT1 | MSCAN_RSTAT0)
71#define MSCAN_TSTAT_MSK (MSCAN_TSTAT1 | MSCAN_TSTAT0)
72#define MSCAN_STAT_MSK (MSCAN_RSTAT_MSK | MSCAN_TSTAT_MSK)
73
74#define MSCAN_STATE_BUS_OFF (MSCAN_RSTAT1 | MSCAN_RSTAT0 | \
75 MSCAN_TSTAT1 | MSCAN_TSTAT0)
76#define MSCAN_STATE_TX(canrflg) (((canrflg)&MSCAN_TSTAT_MSK)>>2)
77#define MSCAN_STATE_RX(canrflg) (((canrflg)&MSCAN_RSTAT_MSK)>>4)
78#define MSCAN_STATE_ACTIVE 0
79#define MSCAN_STATE_WARNING 1
80#define MSCAN_STATE_PASSIVE 2
81#define MSCAN_STATE_BUSOFF 3
82
83/* MSCAN receiver interrupt enable register (CANRIER) bits */
84#define MSCAN_WUPIE 0x80
85#define MSCAN_CSCIE 0x40
86#define MSCAN_RSTATE1 0x20
87#define MSCAN_RSTATE0 0x10
88#define MSCAN_TSTATE1 0x08
89#define MSCAN_TSTATE0 0x04
90#define MSCAN_OVRIE 0x02
91#define MSCAN_RXFIE 0x01
92
93/* MSCAN transmitter flag register (CANTFLG) bits */
94#define MSCAN_TXE2 0x04
95#define MSCAN_TXE1 0x02
96#define MSCAN_TXE0 0x01
97#define MSCAN_TXE (MSCAN_TXE2 | MSCAN_TXE1 | MSCAN_TXE0)
98
99/* MSCAN transmitter interrupt enable register (CANTIER) bits */
100#define MSCAN_TXIE2 0x04
101#define MSCAN_TXIE1 0x02
102#define MSCAN_TXIE0 0x01
103#define MSCAN_TXIE (MSCAN_TXIE2 | MSCAN_TXIE1 | MSCAN_TXIE0)
104
105/* MSCAN transmitter message abort request (CANTARQ) bits */
106#define MSCAN_ABTRQ2 0x04
107#define MSCAN_ABTRQ1 0x02
108#define MSCAN_ABTRQ0 0x01
109
110/* MSCAN transmitter message abort ack (CANTAAK) bits */
111#define MSCAN_ABTAK2 0x04
112#define MSCAN_ABTAK1 0x02
113#define MSCAN_ABTAK0 0x01
114
115/* MSCAN transmit buffer selection (CANTBSEL) bits */
116#define MSCAN_TX2 0x04
117#define MSCAN_TX1 0x02
118#define MSCAN_TX0 0x01
119
120/* MSCAN ID acceptance control register (CANIDAC) bits */
121#define MSCAN_IDAM1 0x20
122#define MSCAN_IDAM0 0x10
123#define MSCAN_IDHIT2 0x04
124#define MSCAN_IDHIT1 0x02
125#define MSCAN_IDHIT0 0x01
126
127#define MSCAN_AF_32BIT 0x00
128#define MSCAN_AF_16BIT MSCAN_IDAM0
129#define MSCAN_AF_8BIT MSCAN_IDAM1
130#define MSCAN_AF_CLOSED (MSCAN_IDAM0|MSCAN_IDAM1)
131#define MSCAN_AF_MASK (~(MSCAN_IDAM0|MSCAN_IDAM1))
132
133/* MSCAN Miscellaneous Register (CANMISC) bits */
134#define MSCAN_BOHOLD 0x01
135
136/* MSCAN Identifier Register (IDR) bits */
137#define MSCAN_SFF_RTR_SHIFT 4
138#define MSCAN_EFF_RTR_SHIFT 0
139#define MSCAN_EFF_FLAGS 0x18 /* IDE + SRR */
140
141#ifdef MSCAN_FOR_MPC5XXX
142#define _MSCAN_RESERVED_(n, num) u8 _res##n[num]
143#define _MSCAN_RESERVED_DSR_SIZE 2
144#else
145#define _MSCAN_RESERVED_(n, num)
146#define _MSCAN_RESERVED_DSR_SIZE 0
147#endif
148
149/* Structure of the hardware registers */
150struct mscan_regs {
151 /* (see doc S12MSCANV3/D) MPC5200 MSCAN */
152 u8 canctl0; /* + 0x00 0x00 */
153 u8 canctl1; /* + 0x01 0x01 */
154 _MSCAN_RESERVED_(1, 2); /* + 0x02 */
155 u8 canbtr0; /* + 0x04 0x02 */
156 u8 canbtr1; /* + 0x05 0x03 */
157 _MSCAN_RESERVED_(2, 2); /* + 0x06 */
158 u8 canrflg; /* + 0x08 0x04 */
159 u8 canrier; /* + 0x09 0x05 */
160 _MSCAN_RESERVED_(3, 2); /* + 0x0a */
161 u8 cantflg; /* + 0x0c 0x06 */
162 u8 cantier; /* + 0x0d 0x07 */
163 _MSCAN_RESERVED_(4, 2); /* + 0x0e */
164 u8 cantarq; /* + 0x10 0x08 */
165 u8 cantaak; /* + 0x11 0x09 */
166 _MSCAN_RESERVED_(5, 2); /* + 0x12 */
167 u8 cantbsel; /* + 0x14 0x0a */
168 u8 canidac; /* + 0x15 0x0b */
169 u8 reserved; /* + 0x16 0x0c */
170 _MSCAN_RESERVED_(6, 2); /* + 0x17 */
171 u8 canmisc; /* + 0x19 0x0d */
172 _MSCAN_RESERVED_(7, 2); /* + 0x1a */
173 u8 canrxerr; /* + 0x1c 0x0e */
174 u8 cantxerr; /* + 0x1d 0x0f */
175 _MSCAN_RESERVED_(8, 2); /* + 0x1e */
176 u16 canidar1_0; /* + 0x20 0x10 */
177 _MSCAN_RESERVED_(9, 2); /* + 0x22 */
178 u16 canidar3_2; /* + 0x24 0x12 */
179 _MSCAN_RESERVED_(10, 2); /* + 0x26 */
180 u16 canidmr1_0; /* + 0x28 0x14 */
181 _MSCAN_RESERVED_(11, 2); /* + 0x2a */
182 u16 canidmr3_2; /* + 0x2c 0x16 */
183 _MSCAN_RESERVED_(12, 2); /* + 0x2e */
184 u16 canidar5_4; /* + 0x30 0x18 */
185 _MSCAN_RESERVED_(13, 2); /* + 0x32 */
186 u16 canidar7_6; /* + 0x34 0x1a */
187 _MSCAN_RESERVED_(14, 2); /* + 0x36 */
188 u16 canidmr5_4; /* + 0x38 0x1c */
189 _MSCAN_RESERVED_(15, 2); /* + 0x3a */
190 u16 canidmr7_6; /* + 0x3c 0x1e */
191 _MSCAN_RESERVED_(16, 2); /* + 0x3e */
192 struct {
193 u16 idr1_0; /* + 0x40 0x20 */
194 _MSCAN_RESERVED_(17, 2); /* + 0x42 */
195 u16 idr3_2; /* + 0x44 0x22 */
196 _MSCAN_RESERVED_(18, 2); /* + 0x46 */
197 u16 dsr1_0; /* + 0x48 0x24 */
198 _MSCAN_RESERVED_(19, 2); /* + 0x4a */
199 u16 dsr3_2; /* + 0x4c 0x26 */
200 _MSCAN_RESERVED_(20, 2); /* + 0x4e */
201 u16 dsr5_4; /* + 0x50 0x28 */
202 _MSCAN_RESERVED_(21, 2); /* + 0x52 */
203 u16 dsr7_6; /* + 0x54 0x2a */
204 _MSCAN_RESERVED_(22, 2); /* + 0x56 */
205 u8 dlr; /* + 0x58 0x2c */
206 u8 reserved; /* + 0x59 0x2d */
207 _MSCAN_RESERVED_(23, 2); /* + 0x5a */
208 u16 time; /* + 0x5c 0x2e */
209 } rx;
210 _MSCAN_RESERVED_(24, 2); /* + 0x5e */
211 struct {
212 u16 idr1_0; /* + 0x60 0x30 */
213 _MSCAN_RESERVED_(25, 2); /* + 0x62 */
214 u16 idr3_2; /* + 0x64 0x32 */
215 _MSCAN_RESERVED_(26, 2); /* + 0x66 */
216 u16 dsr1_0; /* + 0x68 0x34 */
217 _MSCAN_RESERVED_(27, 2); /* + 0x6a */
218 u16 dsr3_2; /* + 0x6c 0x36 */
219 _MSCAN_RESERVED_(28, 2); /* + 0x6e */
220 u16 dsr5_4; /* + 0x70 0x38 */
221 _MSCAN_RESERVED_(29, 2); /* + 0x72 */
222 u16 dsr7_6; /* + 0x74 0x3a */
223 _MSCAN_RESERVED_(30, 2); /* + 0x76 */
224 u8 dlr; /* + 0x78 0x3c */
225 u8 tbpr; /* + 0x79 0x3d */
226 _MSCAN_RESERVED_(31, 2); /* + 0x7a */
227 u16 time; /* + 0x7c 0x3e */
228 } tx;
229 _MSCAN_RESERVED_(32, 2); /* + 0x7e */
230} __attribute__ ((packed));
231
232#undef _MSCAN_RESERVED_
233#define MSCAN_REGION sizeof(struct mscan)
234
235#define MSCAN_NORMAL_MODE 0
236#define MSCAN_SLEEP_MODE MSCAN_SLPRQ
237#define MSCAN_INIT_MODE (MSCAN_INITRQ | MSCAN_SLPRQ)
238#define MSCAN_POWEROFF_MODE (MSCAN_CSWAI | MSCAN_SLPRQ)
239#define MSCAN_SET_MODE_RETRIES 255
240#define MSCAN_ECHO_SKB_MAX 3
241#define MSCAN_RX_INTS_ENABLE (MSCAN_OVRIE | MSCAN_RXFIE | MSCAN_CSCIE | \
242 MSCAN_RSTATE1 | MSCAN_RSTATE0 | \
243 MSCAN_TSTATE1 | MSCAN_TSTATE0)
244
245/* MSCAN type variants */
246enum {
247 MSCAN_TYPE_MPC5200,
248 MSCAN_TYPE_MPC5121
249};
250
251#define BTR0_BRP_MASK 0x3f
252#define BTR0_SJW_SHIFT 6
253#define BTR0_SJW_MASK (0x3 << BTR0_SJW_SHIFT)
254
255#define BTR1_TSEG1_MASK 0xf
256#define BTR1_TSEG2_SHIFT 4
257#define BTR1_TSEG2_MASK (0x7 << BTR1_TSEG2_SHIFT)
258#define BTR1_SAM_SHIFT 7
259
260#define BTR0_SET_BRP(brp) (((brp) - 1) & BTR0_BRP_MASK)
261#define BTR0_SET_SJW(sjw) ((((sjw) - 1) << BTR0_SJW_SHIFT) & \
262 BTR0_SJW_MASK)
263
264#define BTR1_SET_TSEG1(tseg1) (((tseg1) - 1) & BTR1_TSEG1_MASK)
265#define BTR1_SET_TSEG2(tseg2) ((((tseg2) - 1) << BTR1_TSEG2_SHIFT) & \
266 BTR1_TSEG2_MASK)
267#define BTR1_SET_SAM(sam) ((sam) ? 1 << BTR1_SAM_SHIFT : 0)
268
269#define F_RX_PROGRESS 0
270#define F_TX_PROGRESS 1
271#define F_TX_WAIT_ALL 2
272
273#define TX_QUEUE_SIZE 3
274
275struct tx_queue_entry {
276 struct list_head list;
277 u8 mask;
278 u8 id;
279};
280
281struct mscan_priv {
282 struct can_priv can; /* must be the first member */
283 unsigned int type; /* MSCAN type variants */
284 long open_time;
285 unsigned long flags;
286 void __iomem *reg_base; /* ioremap'ed address to registers */
287 u8 shadow_statflg;
288 u8 shadow_canrier;
289 u8 cur_pri;
290 u8 prev_buf_id;
291 u8 tx_active;
292
293 struct list_head tx_head;
294 struct tx_queue_entry tx_queue[TX_QUEUE_SIZE];
295 struct napi_struct napi;
296};
297
298extern struct net_device *alloc_mscandev(void);
299extern int register_mscandev(struct net_device *dev, int mscan_clksrc);
300extern void unregister_mscandev(struct net_device *dev);
301
302#endif /* __MSCAN_H__ */
diff --git a/drivers/net/can/sja1000/Kconfig b/drivers/net/can/sja1000/Kconfig
index 4c674927f247..9e277d64a318 100644
--- a/drivers/net/can/sja1000/Kconfig
+++ b/drivers/net/can/sja1000/Kconfig
@@ -44,4 +44,16 @@ config CAN_KVASER_PCI
44 This driver is for the the PCIcanx and PCIcan cards (1, 2 or 44 This driver is for the the PCIcanx and PCIcan cards (1, 2 or
45 4 channel) from Kvaser (http://www.kvaser.com). 45 4 channel) from Kvaser (http://www.kvaser.com).
46 46
47config CAN_PLX_PCI
48 tristate "PLX90xx PCI-bridge based Cards"
49 depends on PCI
50 ---help---
51 This driver is for CAN interface cards based on
52 the PLX90xx PCI bridge.
53 Driver supports now:
54 - Adlink PCI-7841/cPCI-7841 card (http://www.adlinktech.com/)
55 - Adlink PCI-7841/cPCI-7841 SE card
56 - Marathon CAN-bus-PCI card (http://www.marathon.ru/)
57 - TEWS TECHNOLOGIES TPMC810 card (http://www.tews.com/)
58
47endif 59endif
diff --git a/drivers/net/can/sja1000/Makefile b/drivers/net/can/sja1000/Makefile
index 9d245ac03965..ce924553995d 100644
--- a/drivers/net/can/sja1000/Makefile
+++ b/drivers/net/can/sja1000/Makefile
@@ -8,5 +8,6 @@ obj-$(CONFIG_CAN_SJA1000_PLATFORM) += sja1000_platform.o
8obj-$(CONFIG_CAN_SJA1000_OF_PLATFORM) += sja1000_of_platform.o 8obj-$(CONFIG_CAN_SJA1000_OF_PLATFORM) += sja1000_of_platform.o
9obj-$(CONFIG_CAN_EMS_PCI) += ems_pci.o 9obj-$(CONFIG_CAN_EMS_PCI) += ems_pci.o
10obj-$(CONFIG_CAN_KVASER_PCI) += kvaser_pci.o 10obj-$(CONFIG_CAN_KVASER_PCI) += kvaser_pci.o
11obj-$(CONFIG_CAN_PLX_PCI) += plx_pci.o
11 12
12ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG 13ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG
diff --git a/drivers/net/can/sja1000/ems_pci.c b/drivers/net/can/sja1000/ems_pci.c
index fd04789d3370..5f53da0bc40c 100644
--- a/drivers/net/can/sja1000/ems_pci.c
+++ b/drivers/net/can/sja1000/ems_pci.c
@@ -22,6 +22,7 @@
22#include <linux/interrupt.h> 22#include <linux/interrupt.h>
23#include <linux/netdevice.h> 23#include <linux/netdevice.h>
24#include <linux/delay.h> 24#include <linux/delay.h>
25#include <linux/slab.h>
25#include <linux/pci.h> 26#include <linux/pci.h>
26#include <linux/can.h> 27#include <linux/can.h>
27#include <linux/can/dev.h> 28#include <linux/can/dev.h>
@@ -102,7 +103,7 @@ struct ems_pci_card {
102 103
103#define EMS_PCI_BASE_SIZE 4096 /* size of controller area */ 104#define EMS_PCI_BASE_SIZE 4096 /* size of controller area */
104 105
105static struct pci_device_id ems_pci_tbl[] = { 106static DEFINE_PCI_DEVICE_TABLE(ems_pci_tbl) = {
106 /* CPC-PCI v1 */ 107 /* CPC-PCI v1 */
107 {PCI_VENDOR_ID_SIEMENS, 0x2104, PCI_ANY_ID, PCI_ANY_ID,}, 108 {PCI_VENDOR_ID_SIEMENS, 0x2104, PCI_ANY_ID, PCI_ANY_ID,},
108 /* CPC-PCI v2 */ 109 /* CPC-PCI v2 */
diff --git a/drivers/net/can/sja1000/kvaser_pci.c b/drivers/net/can/sja1000/kvaser_pci.c
index 7dd7769b9713..441e776a7f59 100644
--- a/drivers/net/can/sja1000/kvaser_pci.c
+++ b/drivers/net/can/sja1000/kvaser_pci.c
@@ -109,7 +109,7 @@ struct kvaser_pci {
109#define KVASER_PCI_VENDOR_ID2 0x1a07 /* the PCI device and vendor IDs */ 109#define KVASER_PCI_VENDOR_ID2 0x1a07 /* the PCI device and vendor IDs */
110#define KVASER_PCI_DEVICE_ID2 0x0008 110#define KVASER_PCI_DEVICE_ID2 0x0008
111 111
112static struct pci_device_id kvaser_pci_tbl[] = { 112static DEFINE_PCI_DEVICE_TABLE(kvaser_pci_tbl) = {
113 {KVASER_PCI_VENDOR_ID1, KVASER_PCI_DEVICE_ID1, PCI_ANY_ID, PCI_ANY_ID,}, 113 {KVASER_PCI_VENDOR_ID1, KVASER_PCI_DEVICE_ID1, PCI_ANY_ID, PCI_ANY_ID,},
114 {KVASER_PCI_VENDOR_ID2, KVASER_PCI_DEVICE_ID2, PCI_ANY_ID, PCI_ANY_ID,}, 114 {KVASER_PCI_VENDOR_ID2, KVASER_PCI_DEVICE_ID2, PCI_ANY_ID, PCI_ANY_ID,},
115 { 0,} 115 { 0,}
diff --git a/drivers/net/can/sja1000/plx_pci.c b/drivers/net/can/sja1000/plx_pci.c
new file mode 100644
index 000000000000..4aff4070db96
--- /dev/null
+++ b/drivers/net/can/sja1000/plx_pci.c
@@ -0,0 +1,473 @@
1/*
2 * Copyright (C) 2008-2010 Pavel Cheblakov <P.B.Cheblakov@inp.nsk.su>
3 *
4 * Derived from the ems_pci.c driver:
5 * Copyright (C) 2007 Wolfgang Grandegger <wg@grandegger.com>
6 * Copyright (C) 2008 Markus Plessing <plessing@ems-wuensche.com>
7 * Copyright (C) 2008 Sebastian Haas <haas@ems-wuensche.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the version 2 of the GNU General Public License
11 * as published by the Free Software Foundation
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software Foundation,
20 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
21 */
22
23#include <linux/kernel.h>
24#include <linux/module.h>
25#include <linux/interrupt.h>
26#include <linux/netdevice.h>
27#include <linux/delay.h>
28#include <linux/slab.h>
29#include <linux/pci.h>
30#include <linux/can.h>
31#include <linux/can/dev.h>
32#include <linux/io.h>
33
34#include "sja1000.h"
35
36#define DRV_NAME "sja1000_plx_pci"
37
38MODULE_AUTHOR("Pavel Cheblakov <P.B.Cheblakov@inp.nsk.su>");
39MODULE_DESCRIPTION("Socket-CAN driver for PLX90xx PCI-bridge cards with "
40 "the SJA1000 chips");
41MODULE_SUPPORTED_DEVICE("Adlink PCI-7841/cPCI-7841, "
42 "Adlink PCI-7841/cPCI-7841 SE, "
43 "Marathon CAN-bus-PCI, "
44 "TEWS TECHNOLOGIES TPMC810");
45MODULE_LICENSE("GPL v2");
46
47#define PLX_PCI_MAX_CHAN 2
48
49struct plx_pci_card {
50 int channels; /* detected channels count */
51 struct net_device *net_dev[PLX_PCI_MAX_CHAN];
52 void __iomem *conf_addr;
53};
54
55#define PLX_PCI_CAN_CLOCK (16000000 / 2)
56
57/* PLX90xx registers */
58#define PLX_INTCSR 0x4c /* Interrupt Control/Status */
59#define PLX_CNTRL 0x50 /* User I/O, Direct Slave Response,
60 * Serial EEPROM, and Initialization
61 * Control register
62 */
63
64#define PLX_LINT1_EN 0x1 /* Local interrupt 1 enable */
65#define PLX_LINT2_EN (1 << 3) /* Local interrupt 2 enable */
66#define PLX_PCI_INT_EN (1 << 6) /* PCI Interrupt Enable */
67#define PLX_PCI_RESET (1 << 30) /* PCI Adapter Software Reset */
68
69/*
70 * The board configuration is probably following:
71 * RX1 is connected to ground.
72 * TX1 is not connected.
73 * CLKO is not connected.
74 * Setting the OCR register to 0xDA is a good idea.
75 * This means normal output mode, push-pull and the correct polarity.
76 */
77#define PLX_PCI_OCR (OCR_TX0_PUSHPULL | OCR_TX1_PUSHPULL)
78
79/*
80 * In the CDR register, you should set CBP to 1.
81 * You will probably also want to set the clock divider value to 7
82 * (meaning direct oscillator output) because the second SJA1000 chip
83 * is driven by the first one CLKOUT output.
84 */
85#define PLX_PCI_CDR (CDR_CBP | CDR_CLKOUT_MASK)
86
87/* SJA1000 Control Register in the BasicCAN Mode */
88#define REG_CR 0x00
89
90/* States of some SJA1000 registers after hardware reset in the BasicCAN mode*/
91#define REG_CR_BASICCAN_INITIAL 0x21
92#define REG_CR_BASICCAN_INITIAL_MASK 0xa1
93#define REG_SR_BASICCAN_INITIAL 0x0c
94#define REG_IR_BASICCAN_INITIAL 0xe0
95
96/* States of some SJA1000 registers after hardware reset in the PeliCAN mode*/
97#define REG_MOD_PELICAN_INITIAL 0x01
98#define REG_SR_PELICAN_INITIAL 0x3c
99#define REG_IR_PELICAN_INITIAL 0x00
100
101#define ADLINK_PCI_VENDOR_ID 0x144A
102#define ADLINK_PCI_DEVICE_ID 0x7841
103
104#define MARATHON_PCI_DEVICE_ID 0x2715
105
106#define TEWS_PCI_VENDOR_ID 0x1498
107#define TEWS_PCI_DEVICE_ID_TMPC810 0x032A
108
109static void plx_pci_reset_common(struct pci_dev *pdev);
110static void plx_pci_reset_marathon(struct pci_dev *pdev);
111
112struct plx_pci_channel_map {
113 u32 bar;
114 u32 offset;
115 u32 size; /* 0x00 - auto, e.g. length of entire bar */
116};
117
118struct plx_pci_card_info {
119 const char *name;
120 int channel_count;
121 u32 can_clock;
122 u8 ocr; /* output control register */
123 u8 cdr; /* clock divider register */
124
125 /* Parameters for mapping local configuration space */
126 struct plx_pci_channel_map conf_map;
127
128 /* Parameters for mapping the SJA1000 chips */
129 struct plx_pci_channel_map chan_map_tbl[PLX_PCI_MAX_CHAN];
130
131 /* Pointer to device-dependent reset function */
132 void (*reset_func)(struct pci_dev *pdev);
133};
134
135static struct plx_pci_card_info plx_pci_card_info_adlink __devinitdata = {
136 "Adlink PCI-7841/cPCI-7841", 2,
137 PLX_PCI_CAN_CLOCK, PLX_PCI_OCR, PLX_PCI_CDR,
138 {1, 0x00, 0x00}, { {2, 0x00, 0x80}, {2, 0x80, 0x80} },
139 &plx_pci_reset_common
140 /* based on PLX9052 */
141};
142
143static struct plx_pci_card_info plx_pci_card_info_adlink_se __devinitdata = {
144 "Adlink PCI-7841/cPCI-7841 SE", 2,
145 PLX_PCI_CAN_CLOCK, PLX_PCI_OCR, PLX_PCI_CDR,
146 {0, 0x00, 0x00}, { {2, 0x00, 0x80}, {2, 0x80, 0x80} },
147 &plx_pci_reset_common
148 /* based on PLX9052 */
149};
150
151static struct plx_pci_card_info plx_pci_card_info_marathon __devinitdata = {
152 "Marathon CAN-bus-PCI", 2,
153 PLX_PCI_CAN_CLOCK, PLX_PCI_OCR, PLX_PCI_CDR,
154 {0, 0x00, 0x00}, { {2, 0x00, 0x00}, {4, 0x00, 0x00} },
155 &plx_pci_reset_marathon
156 /* based on PLX9052 */
157};
158
159static struct plx_pci_card_info plx_pci_card_info_tews __devinitdata = {
160 "TEWS TECHNOLOGIES TPMC810", 2,
161 PLX_PCI_CAN_CLOCK, PLX_PCI_OCR, PLX_PCI_CDR,
162 {0, 0x00, 0x00}, { {2, 0x000, 0x80}, {2, 0x100, 0x80} },
163 &plx_pci_reset_common
164 /* based on PLX9030 */
165};
166
167static DEFINE_PCI_DEVICE_TABLE(plx_pci_tbl) = {
168 {
169 /* Adlink PCI-7841/cPCI-7841 */
170 ADLINK_PCI_VENDOR_ID, ADLINK_PCI_DEVICE_ID,
171 PCI_ANY_ID, PCI_ANY_ID,
172 PCI_CLASS_NETWORK_OTHER << 8, ~0,
173 (kernel_ulong_t)&plx_pci_card_info_adlink
174 },
175 {
176 /* Adlink PCI-7841/cPCI-7841 SE */
177 ADLINK_PCI_VENDOR_ID, ADLINK_PCI_DEVICE_ID,
178 PCI_ANY_ID, PCI_ANY_ID,
179 PCI_CLASS_COMMUNICATION_OTHER << 8, ~0,
180 (kernel_ulong_t)&plx_pci_card_info_adlink_se
181 },
182 {
183 /* Marathon CAN-bus-PCI card */
184 PCI_VENDOR_ID_PLX, MARATHON_PCI_DEVICE_ID,
185 PCI_ANY_ID, PCI_ANY_ID,
186 0, 0,
187 (kernel_ulong_t)&plx_pci_card_info_marathon
188 },
189 {
190 /* TEWS TECHNOLOGIES TPMC810 card */
191 TEWS_PCI_VENDOR_ID, TEWS_PCI_DEVICE_ID_TMPC810,
192 PCI_ANY_ID, PCI_ANY_ID,
193 0, 0,
194 (kernel_ulong_t)&plx_pci_card_info_tews
195 },
196 { 0,}
197};
198MODULE_DEVICE_TABLE(pci, plx_pci_tbl);
199
200static u8 plx_pci_read_reg(const struct sja1000_priv *priv, int port)
201{
202 return ioread8(priv->reg_base + port);
203}
204
205static void plx_pci_write_reg(const struct sja1000_priv *priv, int port, u8 val)
206{
207 iowrite8(val, priv->reg_base + port);
208}
209
210/*
211 * Check if a CAN controller is present at the specified location
212 * by trying to switch 'em from the Basic mode into the PeliCAN mode.
213 * Also check states of some registers in reset mode.
214 */
215static inline int plx_pci_check_sja1000(const struct sja1000_priv *priv)
216{
217 int flag = 0;
218
219 /*
220 * Check registers after hardware reset (the Basic mode)
221 * See states on p. 10 of the Datasheet.
222 */
223 if ((priv->read_reg(priv, REG_CR) & REG_CR_BASICCAN_INITIAL_MASK) ==
224 REG_CR_BASICCAN_INITIAL &&
225 (priv->read_reg(priv, REG_SR) == REG_SR_BASICCAN_INITIAL) &&
226 (priv->read_reg(priv, REG_IR) == REG_IR_BASICCAN_INITIAL))
227 flag = 1;
228
229 /* Bring the SJA1000 into the PeliCAN mode*/
230 priv->write_reg(priv, REG_CDR, CDR_PELICAN);
231
232 /*
233 * Check registers after reset in the PeliCAN mode.
234 * See states on p. 23 of the Datasheet.
235 */
236 if (priv->read_reg(priv, REG_MOD) == REG_MOD_PELICAN_INITIAL &&
237 priv->read_reg(priv, REG_SR) == REG_SR_PELICAN_INITIAL &&
238 priv->read_reg(priv, REG_IR) == REG_IR_PELICAN_INITIAL)
239 return flag;
240
241 return 0;
242}
243
244/*
245 * PLX90xx software reset
246 * Also LRESET# asserts and brings to reset device on the Local Bus (if wired).
247 * For most cards it's enough for reset the SJA1000 chips.
248 */
249static void plx_pci_reset_common(struct pci_dev *pdev)
250{
251 struct plx_pci_card *card = pci_get_drvdata(pdev);
252 u32 cntrl;
253
254 cntrl = ioread32(card->conf_addr + PLX_CNTRL);
255 cntrl |= PLX_PCI_RESET;
256 iowrite32(cntrl, card->conf_addr + PLX_CNTRL);
257 udelay(100);
258 cntrl ^= PLX_PCI_RESET;
259 iowrite32(cntrl, card->conf_addr + PLX_CNTRL);
260};
261
262/* Special reset function for Marathon card */
263static void plx_pci_reset_marathon(struct pci_dev *pdev)
264{
265 void __iomem *reset_addr;
266 int i;
267 int reset_bar[2] = {3, 5};
268
269 plx_pci_reset_common(pdev);
270
271 for (i = 0; i < 2; i++) {
272 reset_addr = pci_iomap(pdev, reset_bar[i], 0);
273 if (!reset_addr) {
274 dev_err(&pdev->dev, "Failed to remap reset "
275 "space %d (BAR%d)\n", i, reset_bar[i]);
276 } else {
277 /* reset the SJA1000 chip */
278 iowrite8(0x1, reset_addr);
279 udelay(100);
280 pci_iounmap(pdev, reset_addr);
281 }
282 }
283}
284
285static void plx_pci_del_card(struct pci_dev *pdev)
286{
287 struct plx_pci_card *card = pci_get_drvdata(pdev);
288 struct net_device *dev;
289 struct sja1000_priv *priv;
290 int i = 0;
291
292 for (i = 0; i < card->channels; i++) {
293 dev = card->net_dev[i];
294 if (!dev)
295 continue;
296
297 dev_info(&pdev->dev, "Removing %s\n", dev->name);
298 unregister_sja1000dev(dev);
299 priv = netdev_priv(dev);
300 if (priv->reg_base)
301 pci_iounmap(pdev, priv->reg_base);
302 free_sja1000dev(dev);
303 }
304
305 plx_pci_reset_common(pdev);
306
307 /*
308 * Disable interrupts from PCI-card (PLX90xx) and disable Local_1,
309 * Local_2 interrupts
310 */
311 iowrite32(0x0, card->conf_addr + PLX_INTCSR);
312
313 if (card->conf_addr)
314 pci_iounmap(pdev, card->conf_addr);
315
316 kfree(card);
317
318 pci_disable_device(pdev);
319 pci_set_drvdata(pdev, NULL);
320}
321
322/*
323 * Probe PLX90xx based device for the SJA1000 chips and register each
324 * available CAN channel to SJA1000 Socket-CAN subsystem.
325 */
326static int __devinit plx_pci_add_card(struct pci_dev *pdev,
327 const struct pci_device_id *ent)
328{
329 struct sja1000_priv *priv;
330 struct net_device *dev;
331 struct plx_pci_card *card;
332 struct plx_pci_card_info *ci;
333 int err, i;
334 u32 val;
335 void __iomem *addr;
336
337 ci = (struct plx_pci_card_info *)ent->driver_data;
338
339 if (pci_enable_device(pdev) < 0) {
340 dev_err(&pdev->dev, "Failed to enable PCI device\n");
341 return -ENODEV;
342 }
343
344 dev_info(&pdev->dev, "Detected \"%s\" card at slot #%i\n",
345 ci->name, PCI_SLOT(pdev->devfn));
346
347 /* Allocate card structures to hold addresses, ... */
348 card = kzalloc(sizeof(*card), GFP_KERNEL);
349 if (!card) {
350 dev_err(&pdev->dev, "Unable to allocate memory\n");
351 pci_disable_device(pdev);
352 return -ENOMEM;
353 }
354
355 pci_set_drvdata(pdev, card);
356
357 card->channels = 0;
358
359 /* Remap PLX90xx configuration space */
360 addr = pci_iomap(pdev, ci->conf_map.bar, ci->conf_map.size);
361 if (!addr) {
362 err = -ENOMEM;
363 dev_err(&pdev->dev, "Failed to remap configuration space "
364 "(BAR%d)\n", ci->conf_map.bar);
365 goto failure_cleanup;
366 }
367 card->conf_addr = addr + ci->conf_map.offset;
368
369 ci->reset_func(pdev);
370
371 /* Detect available channels */
372 for (i = 0; i < ci->channel_count; i++) {
373 struct plx_pci_channel_map *cm = &ci->chan_map_tbl[i];
374
375 dev = alloc_sja1000dev(0);
376 if (!dev) {
377 err = -ENOMEM;
378 goto failure_cleanup;
379 }
380
381 card->net_dev[i] = dev;
382 priv = netdev_priv(dev);
383 priv->priv = card;
384 priv->irq_flags = IRQF_SHARED;
385
386 dev->irq = pdev->irq;
387
388 /*
389 * Remap IO space of the SJA1000 chips
390 * This is device-dependent mapping
391 */
392 addr = pci_iomap(pdev, cm->bar, cm->size);
393 if (!addr) {
394 err = -ENOMEM;
395 dev_err(&pdev->dev, "Failed to remap BAR%d\n", cm->bar);
396 goto failure_cleanup;
397 }
398
399 priv->reg_base = addr + cm->offset;
400 priv->read_reg = plx_pci_read_reg;
401 priv->write_reg = plx_pci_write_reg;
402
403 /* Check if channel is present */
404 if (plx_pci_check_sja1000(priv)) {
405 priv->can.clock.freq = ci->can_clock;
406 priv->ocr = ci->ocr;
407 priv->cdr = ci->cdr;
408
409 SET_NETDEV_DEV(dev, &pdev->dev);
410
411 /* Register SJA1000 device */
412 err = register_sja1000dev(dev);
413 if (err) {
414 dev_err(&pdev->dev, "Registering device failed "
415 "(err=%d)\n", err);
416 free_sja1000dev(dev);
417 goto failure_cleanup;
418 }
419
420 card->channels++;
421
422 dev_info(&pdev->dev, "Channel #%d at 0x%p, irq %d "
423 "registered as %s\n", i + 1, priv->reg_base,
424 dev->irq, dev->name);
425 } else {
426 dev_err(&pdev->dev, "Channel #%d not detected\n",
427 i + 1);
428 free_sja1000dev(dev);
429 }
430 }
431
432 if (!card->channels) {
433 err = -ENODEV;
434 goto failure_cleanup;
435 }
436
437 /*
438 * Enable interrupts from PCI-card (PLX90xx) and enable Local_1,
439 * Local_2 interrupts from the SJA1000 chips
440 */
441 val = ioread32(card->conf_addr + PLX_INTCSR);
442 val |= PLX_LINT1_EN | PLX_LINT2_EN | PLX_PCI_INT_EN;
443 iowrite32(val, card->conf_addr + PLX_INTCSR);
444
445 return 0;
446
447failure_cleanup:
448 dev_err(&pdev->dev, "Error: %d. Cleaning Up.\n", err);
449
450 plx_pci_del_card(pdev);
451
452 return err;
453}
454
455static struct pci_driver plx_pci_driver = {
456 .name = DRV_NAME,
457 .id_table = plx_pci_tbl,
458 .probe = plx_pci_add_card,
459 .remove = plx_pci_del_card,
460};
461
462static int __init plx_pci_init(void)
463{
464 return pci_register_driver(&plx_pci_driver);
465}
466
467static void __exit plx_pci_exit(void)
468{
469 pci_unregister_driver(&plx_pci_driver);
470}
471
472module_init(plx_pci_init);
473module_exit(plx_pci_exit);
diff --git a/drivers/net/can/sja1000/sja1000.c b/drivers/net/can/sja1000/sja1000.c
index 16d2ecd2a3b7..145b1a731a53 100644
--- a/drivers/net/can/sja1000/sja1000.c
+++ b/drivers/net/can/sja1000/sja1000.c
@@ -130,8 +130,12 @@ static void set_normal_mode(struct net_device *dev)
130 /* check reset bit */ 130 /* check reset bit */
131 if ((status & MOD_RM) == 0) { 131 if ((status & MOD_RM) == 0) {
132 priv->can.state = CAN_STATE_ERROR_ACTIVE; 132 priv->can.state = CAN_STATE_ERROR_ACTIVE;
133 /* enable all interrupts */ 133 /* enable interrupts */
134 priv->write_reg(priv, REG_IER, IRQ_ALL); 134 if (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING)
135 priv->write_reg(priv, REG_IER, IRQ_ALL);
136 else
137 priv->write_reg(priv, REG_IER,
138 IRQ_ALL & ~IRQ_BEI);
135 return; 139 return;
136 } 140 }
137 141
@@ -203,6 +207,17 @@ static int sja1000_set_bittiming(struct net_device *dev)
203 return 0; 207 return 0;
204} 208}
205 209
210static int sja1000_get_berr_counter(const struct net_device *dev,
211 struct can_berr_counter *bec)
212{
213 struct sja1000_priv *priv = netdev_priv(dev);
214
215 bec->txerr = priv->read_reg(priv, REG_TXERR);
216 bec->rxerr = priv->read_reg(priv, REG_RXERR);
217
218 return 0;
219}
220
206/* 221/*
207 * initialize SJA1000 chip: 222 * initialize SJA1000 chip:
208 * - reset chip 223 * - reset chip
@@ -249,6 +264,9 @@ static netdev_tx_t sja1000_start_xmit(struct sk_buff *skb,
249 uint8_t dreg; 264 uint8_t dreg;
250 int i; 265 int i;
251 266
267 if (can_dropped_invalid_skb(dev, skb))
268 return NETDEV_TX_OK;
269
252 netif_stop_queue(dev); 270 netif_stop_queue(dev);
253 271
254 fi = dlc = cf->can_dlc; 272 fi = dlc = cf->can_dlc;
@@ -293,17 +311,14 @@ static void sja1000_rx(struct net_device *dev)
293 uint8_t fi; 311 uint8_t fi;
294 uint8_t dreg; 312 uint8_t dreg;
295 canid_t id; 313 canid_t id;
296 uint8_t dlc;
297 int i; 314 int i;
298 315
299 skb = dev_alloc_skb(sizeof(struct can_frame)); 316 /* create zero'ed CAN frame buffer */
317 skb = alloc_can_skb(dev, &cf);
300 if (skb == NULL) 318 if (skb == NULL)
301 return; 319 return;
302 skb->dev = dev;
303 skb->protocol = htons(ETH_P_CAN);
304 320
305 fi = priv->read_reg(priv, REG_FI); 321 fi = priv->read_reg(priv, REG_FI);
306 dlc = fi & 0x0F;
307 322
308 if (fi & FI_FF) { 323 if (fi & FI_FF) {
309 /* extended frame format (EFF) */ 324 /* extended frame format (EFF) */
@@ -320,18 +335,15 @@ static void sja1000_rx(struct net_device *dev)
320 | (priv->read_reg(priv, REG_ID2) >> 5); 335 | (priv->read_reg(priv, REG_ID2) >> 5);
321 } 336 }
322 337
323 if (fi & FI_RTR) 338 if (fi & FI_RTR) {
324 id |= CAN_RTR_FLAG; 339 id |= CAN_RTR_FLAG;
340 } else {
341 cf->can_dlc = get_can_dlc(fi & 0x0F);
342 for (i = 0; i < cf->can_dlc; i++)
343 cf->data[i] = priv->read_reg(priv, dreg++);
344 }
325 345
326 cf = (struct can_frame *)skb_put(skb, sizeof(struct can_frame));
327 memset(cf, 0, sizeof(struct can_frame));
328 cf->can_id = id; 346 cf->can_id = id;
329 cf->can_dlc = dlc;
330 for (i = 0; i < dlc; i++)
331 cf->data[i] = priv->read_reg(priv, dreg++);
332
333 while (i < 8)
334 cf->data[i++] = 0;
335 347
336 /* release receive buffer */ 348 /* release receive buffer */
337 priv->write_reg(priv, REG_CMR, CMD_RRB); 349 priv->write_reg(priv, REG_CMR, CMD_RRB);
@@ -339,7 +351,7 @@ static void sja1000_rx(struct net_device *dev)
339 netif_rx(skb); 351 netif_rx(skb);
340 352
341 stats->rx_packets++; 353 stats->rx_packets++;
342 stats->rx_bytes += dlc; 354 stats->rx_bytes += cf->can_dlc;
343} 355}
344 356
345static int sja1000_err(struct net_device *dev, uint8_t isrc, uint8_t status) 357static int sja1000_err(struct net_device *dev, uint8_t isrc, uint8_t status)
@@ -351,15 +363,9 @@ static int sja1000_err(struct net_device *dev, uint8_t isrc, uint8_t status)
351 enum can_state state = priv->can.state; 363 enum can_state state = priv->can.state;
352 uint8_t ecc, alc; 364 uint8_t ecc, alc;
353 365
354 skb = dev_alloc_skb(sizeof(struct can_frame)); 366 skb = alloc_can_err_skb(dev, &cf);
355 if (skb == NULL) 367 if (skb == NULL)
356 return -ENOMEM; 368 return -ENOMEM;
357 skb->dev = dev;
358 skb->protocol = htons(ETH_P_CAN);
359 cf = (struct can_frame *)skb_put(skb, sizeof(struct can_frame));
360 memset(cf, 0, sizeof(struct can_frame));
361 cf->can_id = CAN_ERR_FLAG;
362 cf->can_dlc = CAN_ERR_DLC;
363 369
364 if (isrc & IRQ_DOI) { 370 if (isrc & IRQ_DOI) {
365 /* data overrun interrupt */ 371 /* data overrun interrupt */
@@ -446,6 +452,8 @@ static int sja1000_err(struct net_device *dev, uint8_t isrc, uint8_t status)
446 CAN_ERR_CRTL_TX_PASSIVE : 452 CAN_ERR_CRTL_TX_PASSIVE :
447 CAN_ERR_CRTL_RX_PASSIVE; 453 CAN_ERR_CRTL_RX_PASSIVE;
448 } 454 }
455 cf->data[6] = txerr;
456 cf->data[7] = rxerr;
449 } 457 }
450 458
451 priv->can.state = state; 459 priv->can.state = state;
@@ -526,7 +534,7 @@ static int sja1000_open(struct net_device *dev)
526 534
527 /* register interrupt handler, if not done by the device driver */ 535 /* register interrupt handler, if not done by the device driver */
528 if (!(priv->flags & SJA1000_CUSTOM_IRQ_HANDLER)) { 536 if (!(priv->flags & SJA1000_CUSTOM_IRQ_HANDLER)) {
529 err = request_irq(dev->irq, &sja1000_interrupt, priv->irq_flags, 537 err = request_irq(dev->irq, sja1000_interrupt, priv->irq_flags,
530 dev->name, (void *)dev); 538 dev->name, (void *)dev);
531 if (err) { 539 if (err) {
532 close_candev(dev); 540 close_candev(dev);
@@ -565,7 +573,8 @@ struct net_device *alloc_sja1000dev(int sizeof_priv)
565 struct net_device *dev; 573 struct net_device *dev;
566 struct sja1000_priv *priv; 574 struct sja1000_priv *priv;
567 575
568 dev = alloc_candev(sizeof(struct sja1000_priv) + sizeof_priv); 576 dev = alloc_candev(sizeof(struct sja1000_priv) + sizeof_priv,
577 SJA1000_ECHO_SKB_MAX);
569 if (!dev) 578 if (!dev)
570 return NULL; 579 return NULL;
571 580
@@ -575,6 +584,9 @@ struct net_device *alloc_sja1000dev(int sizeof_priv)
575 priv->can.bittiming_const = &sja1000_bittiming_const; 584 priv->can.bittiming_const = &sja1000_bittiming_const;
576 priv->can.do_set_bittiming = sja1000_set_bittiming; 585 priv->can.do_set_bittiming = sja1000_set_bittiming;
577 priv->can.do_set_mode = sja1000_set_mode; 586 priv->can.do_set_mode = sja1000_set_mode;
587 priv->can.do_get_berr_counter = sja1000_get_berr_counter;
588 priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES |
589 CAN_CTRLMODE_BERR_REPORTING;
578 590
579 if (sizeof_priv) 591 if (sizeof_priv)
580 priv->priv = (void *)priv + sizeof(struct sja1000_priv); 592 priv->priv = (void *)priv + sizeof(struct sja1000_priv);
diff --git a/drivers/net/can/sja1000/sja1000.h b/drivers/net/can/sja1000/sja1000.h
index 302d2c763ad7..97a622b9302f 100644
--- a/drivers/net/can/sja1000/sja1000.h
+++ b/drivers/net/can/sja1000/sja1000.h
@@ -50,6 +50,8 @@
50#include <linux/can/dev.h> 50#include <linux/can/dev.h>
51#include <linux/can/platform/sja1000.h> 51#include <linux/can/platform/sja1000.h>
52 52
53#define SJA1000_ECHO_SKB_MAX 1 /* the SJA1000 has one TX buffer object */
54
53#define SJA1000_MAX_IRQ 20 /* max. number of interrupts handled in ISR */ 55#define SJA1000_MAX_IRQ 20 /* max. number of interrupts handled in ISR */
54 56
55/* SJA1000 registers - manual section 6.4 (Pelican Mode) */ 57/* SJA1000 registers - manual section 6.4 (Pelican Mode) */
diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c
new file mode 100644
index 000000000000..0c3d2ba0d178
--- /dev/null
+++ b/drivers/net/can/ti_hecc.c
@@ -0,0 +1,1056 @@
1/*
2 * TI HECC (CAN) device driver
3 *
4 * This driver supports TI's HECC (High End CAN Controller module) and the
5 * specs for the same is available at <http://www.ti.com>
6 *
7 * Copyright (C) 2009 Texas Instruments Incorporated - http://www.ti.com/
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License as
11 * published by the Free Software Foundation version 2.
12 *
13 * This program is distributed as is WITHOUT ANY WARRANTY of any
14 * kind, whether express or implied; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 */
19
20/*
21 * Your platform definitions should specify module ram offsets and interrupt
22 * number to use as follows:
23 *
24 * static struct ti_hecc_platform_data am3517_evm_hecc_pdata = {
25 * .scc_hecc_offset = 0,
26 * .scc_ram_offset = 0x3000,
27 * .hecc_ram_offset = 0x3000,
28 * .mbx_offset = 0x2000,
29 * .int_line = 0,
30 * .revision = 1,
31 * .transceiver_switch = hecc_phy_control,
32 * };
33 *
34 * Please see include/linux/can/platform/ti_hecc.h for description of
35 * above fields.
36 *
37 */
38
39#include <linux/module.h>
40#include <linux/init.h>
41#include <linux/kernel.h>
42#include <linux/types.h>
43#include <linux/interrupt.h>
44#include <linux/errno.h>
45#include <linux/netdevice.h>
46#include <linux/skbuff.h>
47#include <linux/platform_device.h>
48#include <linux/clk.h>
49
50#include <linux/can.h>
51#include <linux/can/dev.h>
52#include <linux/can/error.h>
53#include <linux/can/platform/ti_hecc.h>
54
55#define DRV_NAME "ti_hecc"
56#define HECC_MODULE_VERSION "0.7"
57MODULE_VERSION(HECC_MODULE_VERSION);
58#define DRV_DESC "TI High End CAN Controller Driver " HECC_MODULE_VERSION
59
60/* TX / RX Mailbox Configuration */
61#define HECC_MAX_MAILBOXES 32 /* hardware mailboxes - do not change */
62#define MAX_TX_PRIO 0x3F /* hardware value - do not change */
63
64/*
65 * Important Note: TX mailbox configuration
66 * TX mailboxes should be restricted to the number of SKB buffers to avoid
67 * maintaining SKB buffers separately. TX mailboxes should be a power of 2
68 * for the mailbox logic to work. Top mailbox numbers are reserved for RX
69 * and lower mailboxes for TX.
70 *
71 * HECC_MAX_TX_MBOX HECC_MB_TX_SHIFT
72 * 4 (default) 2
73 * 8 3
74 * 16 4
75 */
76#define HECC_MB_TX_SHIFT 2 /* as per table above */
77#define HECC_MAX_TX_MBOX BIT(HECC_MB_TX_SHIFT)
78
79#define HECC_TX_PRIO_SHIFT (HECC_MB_TX_SHIFT)
80#define HECC_TX_PRIO_MASK (MAX_TX_PRIO << HECC_MB_TX_SHIFT)
81#define HECC_TX_MB_MASK (HECC_MAX_TX_MBOX - 1)
82#define HECC_TX_MASK ((HECC_MAX_TX_MBOX - 1) | HECC_TX_PRIO_MASK)
83#define HECC_TX_MBOX_MASK (~(BIT(HECC_MAX_TX_MBOX) - 1))
84#define HECC_DEF_NAPI_WEIGHT HECC_MAX_RX_MBOX
85
86/*
87 * Important Note: RX mailbox configuration
88 * RX mailboxes are further logically split into two - main and buffer
89 * mailboxes. The goal is to get all packets into main mailboxes as
90 * driven by mailbox number and receive priority (higher to lower) and
91 * buffer mailboxes are used to receive pkts while main mailboxes are being
92 * processed. This ensures in-order packet reception.
93 *
94 * Here are the recommended values for buffer mailbox. Note that RX mailboxes
95 * start after TX mailboxes:
96 *
97 * HECC_MAX_RX_MBOX HECC_RX_BUFFER_MBOX No of buffer mailboxes
98 * 28 12 8
99 * 16 20 4
100 */
101
102#define HECC_MAX_RX_MBOX (HECC_MAX_MAILBOXES - HECC_MAX_TX_MBOX)
103#define HECC_RX_BUFFER_MBOX 12 /* as per table above */
104#define HECC_RX_FIRST_MBOX (HECC_MAX_MAILBOXES - 1)
105#define HECC_RX_HIGH_MBOX_MASK (~(BIT(HECC_RX_BUFFER_MBOX) - 1))
106
107/* TI HECC module registers */
108#define HECC_CANME 0x0 /* Mailbox enable */
109#define HECC_CANMD 0x4 /* Mailbox direction */
110#define HECC_CANTRS 0x8 /* Transmit request set */
111#define HECC_CANTRR 0xC /* Transmit request */
112#define HECC_CANTA 0x10 /* Transmission acknowledge */
113#define HECC_CANAA 0x14 /* Abort acknowledge */
114#define HECC_CANRMP 0x18 /* Receive message pending */
115#define HECC_CANRML 0x1C /* Remote message lost */
116#define HECC_CANRFP 0x20 /* Remote frame pending */
117#define HECC_CANGAM 0x24 /* SECC only:Global acceptance mask */
118#define HECC_CANMC 0x28 /* Master control */
119#define HECC_CANBTC 0x2C /* Bit timing configuration */
120#define HECC_CANES 0x30 /* Error and status */
121#define HECC_CANTEC 0x34 /* Transmit error counter */
122#define HECC_CANREC 0x38 /* Receive error counter */
123#define HECC_CANGIF0 0x3C /* Global interrupt flag 0 */
124#define HECC_CANGIM 0x40 /* Global interrupt mask */
125#define HECC_CANGIF1 0x44 /* Global interrupt flag 1 */
126#define HECC_CANMIM 0x48 /* Mailbox interrupt mask */
127#define HECC_CANMIL 0x4C /* Mailbox interrupt level */
128#define HECC_CANOPC 0x50 /* Overwrite protection control */
129#define HECC_CANTIOC 0x54 /* Transmit I/O control */
130#define HECC_CANRIOC 0x58 /* Receive I/O control */
131#define HECC_CANLNT 0x5C /* HECC only: Local network time */
132#define HECC_CANTOC 0x60 /* HECC only: Time-out control */
133#define HECC_CANTOS 0x64 /* HECC only: Time-out status */
134#define HECC_CANTIOCE 0x68 /* SCC only:Enhanced TX I/O control */
135#define HECC_CANRIOCE 0x6C /* SCC only:Enhanced RX I/O control */
136
137/* Mailbox registers */
138#define HECC_CANMID 0x0
139#define HECC_CANMCF 0x4
140#define HECC_CANMDL 0x8
141#define HECC_CANMDH 0xC
142
143#define HECC_SET_REG 0xFFFFFFFF
144#define HECC_CANID_MASK 0x3FF /* 18 bits mask for extended id's */
145#define HECC_CCE_WAIT_COUNT 100 /* Wait for ~1 sec for CCE bit */
146
147#define HECC_CANMC_SCM BIT(13) /* SCC compat mode */
148#define HECC_CANMC_CCR BIT(12) /* Change config request */
149#define HECC_CANMC_PDR BIT(11) /* Local Power down - for sleep mode */
150#define HECC_CANMC_ABO BIT(7) /* Auto Bus On */
151#define HECC_CANMC_STM BIT(6) /* Self test mode - loopback */
152#define HECC_CANMC_SRES BIT(5) /* Software reset */
153
154#define HECC_CANTIOC_EN BIT(3) /* Enable CAN TX I/O pin */
155#define HECC_CANRIOC_EN BIT(3) /* Enable CAN RX I/O pin */
156
157#define HECC_CANMID_IDE BIT(31) /* Extended frame format */
158#define HECC_CANMID_AME BIT(30) /* Acceptance mask enable */
159#define HECC_CANMID_AAM BIT(29) /* Auto answer mode */
160
161#define HECC_CANES_FE BIT(24) /* form error */
162#define HECC_CANES_BE BIT(23) /* bit error */
163#define HECC_CANES_SA1 BIT(22) /* stuck at dominant error */
164#define HECC_CANES_CRCE BIT(21) /* CRC error */
165#define HECC_CANES_SE BIT(20) /* stuff bit error */
166#define HECC_CANES_ACKE BIT(19) /* ack error */
167#define HECC_CANES_BO BIT(18) /* Bus off status */
168#define HECC_CANES_EP BIT(17) /* Error passive status */
169#define HECC_CANES_EW BIT(16) /* Error warning status */
170#define HECC_CANES_SMA BIT(5) /* suspend mode ack */
171#define HECC_CANES_CCE BIT(4) /* Change config enabled */
172#define HECC_CANES_PDA BIT(3) /* Power down mode ack */
173
174#define HECC_CANBTC_SAM BIT(7) /* sample points */
175
176#define HECC_BUS_ERROR (HECC_CANES_FE | HECC_CANES_BE |\
177 HECC_CANES_CRCE | HECC_CANES_SE |\
178 HECC_CANES_ACKE)
179
180#define HECC_CANMCF_RTR BIT(4) /* Remote transmit request */
181
182#define HECC_CANGIF_MAIF BIT(17) /* Message alarm interrupt */
183#define HECC_CANGIF_TCOIF BIT(16) /* Timer counter overflow int */
184#define HECC_CANGIF_GMIF BIT(15) /* Global mailbox interrupt */
185#define HECC_CANGIF_AAIF BIT(14) /* Abort ack interrupt */
186#define HECC_CANGIF_WDIF BIT(13) /* Write denied interrupt */
187#define HECC_CANGIF_WUIF BIT(12) /* Wake up interrupt */
188#define HECC_CANGIF_RMLIF BIT(11) /* Receive message lost interrupt */
189#define HECC_CANGIF_BOIF BIT(10) /* Bus off interrupt */
190#define HECC_CANGIF_EPIF BIT(9) /* Error passive interrupt */
191#define HECC_CANGIF_WLIF BIT(8) /* Warning level interrupt */
192#define HECC_CANGIF_MBOX_MASK 0x1F /* Mailbox number mask */
193#define HECC_CANGIM_I1EN BIT(1) /* Int line 1 enable */
194#define HECC_CANGIM_I0EN BIT(0) /* Int line 0 enable */
195#define HECC_CANGIM_DEF_MASK 0x700 /* only busoff/warning/passive */
196#define HECC_CANGIM_SIL BIT(2) /* system interrupts to int line 1 */
197
198/* CAN Bittiming constants as per HECC specs */
199static struct can_bittiming_const ti_hecc_bittiming_const = {
200 .name = DRV_NAME,
201 .tseg1_min = 1,
202 .tseg1_max = 16,
203 .tseg2_min = 1,
204 .tseg2_max = 8,
205 .sjw_max = 4,
206 .brp_min = 1,
207 .brp_max = 256,
208 .brp_inc = 1,
209};
210
211struct ti_hecc_priv {
212 struct can_priv can; /* MUST be first member/field */
213 struct napi_struct napi;
214 struct net_device *ndev;
215 struct clk *clk;
216 void __iomem *base;
217 u32 scc_ram_offset;
218 u32 hecc_ram_offset;
219 u32 mbx_offset;
220 u32 int_line;
221 spinlock_t mbx_lock; /* CANME register needs protection */
222 u32 tx_head;
223 u32 tx_tail;
224 u32 rx_next;
225 void (*transceiver_switch)(int);
226};
227
228static inline int get_tx_head_mb(struct ti_hecc_priv *priv)
229{
230 return priv->tx_head & HECC_TX_MB_MASK;
231}
232
233static inline int get_tx_tail_mb(struct ti_hecc_priv *priv)
234{
235 return priv->tx_tail & HECC_TX_MB_MASK;
236}
237
238static inline int get_tx_head_prio(struct ti_hecc_priv *priv)
239{
240 return (priv->tx_head >> HECC_TX_PRIO_SHIFT) & MAX_TX_PRIO;
241}
242
243static inline void hecc_write_lam(struct ti_hecc_priv *priv, u32 mbxno, u32 val)
244{
245 __raw_writel(val, priv->base + priv->hecc_ram_offset + mbxno * 4);
246}
247
248static inline void hecc_write_mbx(struct ti_hecc_priv *priv, u32 mbxno,
249 u32 reg, u32 val)
250{
251 __raw_writel(val, priv->base + priv->mbx_offset + mbxno * 0x10 +
252 reg);
253}
254
255static inline u32 hecc_read_mbx(struct ti_hecc_priv *priv, u32 mbxno, u32 reg)
256{
257 return __raw_readl(priv->base + priv->mbx_offset + mbxno * 0x10 +
258 reg);
259}
260
261static inline void hecc_write(struct ti_hecc_priv *priv, u32 reg, u32 val)
262{
263 __raw_writel(val, priv->base + reg);
264}
265
266static inline u32 hecc_read(struct ti_hecc_priv *priv, int reg)
267{
268 return __raw_readl(priv->base + reg);
269}
270
271static inline void hecc_set_bit(struct ti_hecc_priv *priv, int reg,
272 u32 bit_mask)
273{
274 hecc_write(priv, reg, hecc_read(priv, reg) | bit_mask);
275}
276
277static inline void hecc_clear_bit(struct ti_hecc_priv *priv, int reg,
278 u32 bit_mask)
279{
280 hecc_write(priv, reg, hecc_read(priv, reg) & ~bit_mask);
281}
282
283static inline u32 hecc_get_bit(struct ti_hecc_priv *priv, int reg, u32 bit_mask)
284{
285 return (hecc_read(priv, reg) & bit_mask) ? 1 : 0;
286}
287
288static int ti_hecc_get_state(const struct net_device *ndev,
289 enum can_state *state)
290{
291 struct ti_hecc_priv *priv = netdev_priv(ndev);
292
293 *state = priv->can.state;
294 return 0;
295}
296
297static int ti_hecc_set_btc(struct ti_hecc_priv *priv)
298{
299 struct can_bittiming *bit_timing = &priv->can.bittiming;
300 u32 can_btc;
301
302 can_btc = (bit_timing->phase_seg2 - 1) & 0x7;
303 can_btc |= ((bit_timing->phase_seg1 + bit_timing->prop_seg - 1)
304 & 0xF) << 3;
305 if (priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES) {
306 if (bit_timing->brp > 4)
307 can_btc |= HECC_CANBTC_SAM;
308 else
309 dev_warn(priv->ndev->dev.parent, "WARN: Triple" \
310 "sampling not set due to h/w limitations");
311 }
312 can_btc |= ((bit_timing->sjw - 1) & 0x3) << 8;
313 can_btc |= ((bit_timing->brp - 1) & 0xFF) << 16;
314
315 /* ERM being set to 0 by default meaning resync at falling edge */
316
317 hecc_write(priv, HECC_CANBTC, can_btc);
318 dev_info(priv->ndev->dev.parent, "setting CANBTC=%#x\n", can_btc);
319
320 return 0;
321}
322
323static void ti_hecc_transceiver_switch(const struct ti_hecc_priv *priv,
324 int on)
325{
326 if (priv->transceiver_switch)
327 priv->transceiver_switch(on);
328}
329
330static void ti_hecc_reset(struct net_device *ndev)
331{
332 u32 cnt;
333 struct ti_hecc_priv *priv = netdev_priv(ndev);
334
335 dev_dbg(ndev->dev.parent, "resetting hecc ...\n");
336 hecc_set_bit(priv, HECC_CANMC, HECC_CANMC_SRES);
337
338 /* Set change control request and wait till enabled */
339 hecc_set_bit(priv, HECC_CANMC, HECC_CANMC_CCR);
340
341 /*
342 * INFO: It has been observed that at times CCE bit may not be
343 * set and hw seems to be ok even if this bit is not set so
344 * timing out with a timing of 1ms to respect the specs
345 */
346 cnt = HECC_CCE_WAIT_COUNT;
347 while (!hecc_get_bit(priv, HECC_CANES, HECC_CANES_CCE) && cnt != 0) {
348 --cnt;
349 udelay(10);
350 }
351
352 /*
353 * Note: On HECC, BTC can be programmed only in initialization mode, so
354 * it is expected that the can bittiming parameters are set via ip
355 * utility before the device is opened
356 */
357 ti_hecc_set_btc(priv);
358
359 /* Clear CCR (and CANMC register) and wait for CCE = 0 enable */
360 hecc_write(priv, HECC_CANMC, 0);
361
362 /*
363 * INFO: CAN net stack handles bus off and hence disabling auto-bus-on
364 * hecc_set_bit(priv, HECC_CANMC, HECC_CANMC_ABO);
365 */
366
367 /*
368 * INFO: It has been observed that at times CCE bit may not be
369 * set and hw seems to be ok even if this bit is not set so
370 */
371 cnt = HECC_CCE_WAIT_COUNT;
372 while (hecc_get_bit(priv, HECC_CANES, HECC_CANES_CCE) && cnt != 0) {
373 --cnt;
374 udelay(10);
375 }
376
377 /* Enable TX and RX I/O Control pins */
378 hecc_write(priv, HECC_CANTIOC, HECC_CANTIOC_EN);
379 hecc_write(priv, HECC_CANRIOC, HECC_CANRIOC_EN);
380
381 /* Clear registers for clean operation */
382 hecc_write(priv, HECC_CANTA, HECC_SET_REG);
383 hecc_write(priv, HECC_CANRMP, HECC_SET_REG);
384 hecc_write(priv, HECC_CANGIF0, HECC_SET_REG);
385 hecc_write(priv, HECC_CANGIF1, HECC_SET_REG);
386 hecc_write(priv, HECC_CANME, 0);
387 hecc_write(priv, HECC_CANMD, 0);
388
389 /* SCC compat mode NOT supported (and not needed too) */
390 hecc_set_bit(priv, HECC_CANMC, HECC_CANMC_SCM);
391}
392
393static void ti_hecc_start(struct net_device *ndev)
394{
395 struct ti_hecc_priv *priv = netdev_priv(ndev);
396 u32 cnt, mbxno, mbx_mask;
397
398 /* put HECC in initialization mode and set btc */
399 ti_hecc_reset(ndev);
400
401 priv->tx_head = priv->tx_tail = HECC_TX_MASK;
402 priv->rx_next = HECC_RX_FIRST_MBOX;
403
404 /* Enable local and global acceptance mask registers */
405 hecc_write(priv, HECC_CANGAM, HECC_SET_REG);
406
407 /* Prepare configured mailboxes to receive messages */
408 for (cnt = 0; cnt < HECC_MAX_RX_MBOX; cnt++) {
409 mbxno = HECC_MAX_MAILBOXES - 1 - cnt;
410 mbx_mask = BIT(mbxno);
411 hecc_clear_bit(priv, HECC_CANME, mbx_mask);
412 hecc_write_mbx(priv, mbxno, HECC_CANMID, HECC_CANMID_AME);
413 hecc_write_lam(priv, mbxno, HECC_SET_REG);
414 hecc_set_bit(priv, HECC_CANMD, mbx_mask);
415 hecc_set_bit(priv, HECC_CANME, mbx_mask);
416 hecc_set_bit(priv, HECC_CANMIM, mbx_mask);
417 }
418
419 /* Prevent message over-write & Enable interrupts */
420 hecc_write(priv, HECC_CANOPC, HECC_SET_REG);
421 if (priv->int_line) {
422 hecc_write(priv, HECC_CANMIL, HECC_SET_REG);
423 hecc_write(priv, HECC_CANGIM, HECC_CANGIM_DEF_MASK |
424 HECC_CANGIM_I1EN | HECC_CANGIM_SIL);
425 } else {
426 hecc_write(priv, HECC_CANMIL, 0);
427 hecc_write(priv, HECC_CANGIM,
428 HECC_CANGIM_DEF_MASK | HECC_CANGIM_I0EN);
429 }
430 priv->can.state = CAN_STATE_ERROR_ACTIVE;
431}
432
433static void ti_hecc_stop(struct net_device *ndev)
434{
435 struct ti_hecc_priv *priv = netdev_priv(ndev);
436
437 /* Disable interrupts and disable mailboxes */
438 hecc_write(priv, HECC_CANGIM, 0);
439 hecc_write(priv, HECC_CANMIM, 0);
440 hecc_write(priv, HECC_CANME, 0);
441 priv->can.state = CAN_STATE_STOPPED;
442}
443
444static int ti_hecc_do_set_mode(struct net_device *ndev, enum can_mode mode)
445{
446 int ret = 0;
447
448 switch (mode) {
449 case CAN_MODE_START:
450 ti_hecc_start(ndev);
451 netif_wake_queue(ndev);
452 break;
453 default:
454 ret = -EOPNOTSUPP;
455 break;
456 }
457
458 return ret;
459}
460
461/*
462 * ti_hecc_xmit: HECC Transmit
463 *
464 * The transmit mailboxes start from 0 to HECC_MAX_TX_MBOX. In HECC the
465 * priority of the mailbox for tranmission is dependent upon priority setting
466 * field in mailbox registers. The mailbox with highest value in priority field
467 * is transmitted first. Only when two mailboxes have the same value in
468 * priority field the highest numbered mailbox is transmitted first.
469 *
470 * To utilize the HECC priority feature as described above we start with the
471 * highest numbered mailbox with highest priority level and move on to the next
472 * mailbox with the same priority level and so on. Once we loop through all the
473 * transmit mailboxes we choose the next priority level (lower) and so on
474 * until we reach the lowest priority level on the lowest numbered mailbox
475 * when we stop transmission until all mailboxes are transmitted and then
476 * restart at highest numbered mailbox with highest priority.
477 *
478 * Two counters (head and tail) are used to track the next mailbox to transmit
479 * and to track the echo buffer for already transmitted mailbox. The queue
480 * is stopped when all the mailboxes are busy or when there is a priority
481 * value roll-over happens.
482 */
483static netdev_tx_t ti_hecc_xmit(struct sk_buff *skb, struct net_device *ndev)
484{
485 struct ti_hecc_priv *priv = netdev_priv(ndev);
486 struct can_frame *cf = (struct can_frame *)skb->data;
487 u32 mbxno, mbx_mask, data;
488 unsigned long flags;
489
490 if (can_dropped_invalid_skb(ndev, skb))
491 return NETDEV_TX_OK;
492
493 mbxno = get_tx_head_mb(priv);
494 mbx_mask = BIT(mbxno);
495 spin_lock_irqsave(&priv->mbx_lock, flags);
496 if (unlikely(hecc_read(priv, HECC_CANME) & mbx_mask)) {
497 spin_unlock_irqrestore(&priv->mbx_lock, flags);
498 netif_stop_queue(ndev);
499 dev_err(priv->ndev->dev.parent,
500 "BUG: TX mbx not ready tx_head=%08X, tx_tail=%08X\n",
501 priv->tx_head, priv->tx_tail);
502 return NETDEV_TX_BUSY;
503 }
504 spin_unlock_irqrestore(&priv->mbx_lock, flags);
505
506 /* Prepare mailbox for transmission */
507 if (cf->can_id & CAN_RTR_FLAG) /* Remote transmission request */
508 data |= HECC_CANMCF_RTR;
509 data |= get_tx_head_prio(priv) << 8;
510 hecc_write_mbx(priv, mbxno, HECC_CANMCF, data);
511
512 if (cf->can_id & CAN_EFF_FLAG) /* Extended frame format */
513 data = (cf->can_id & CAN_EFF_MASK) | HECC_CANMID_IDE;
514 else /* Standard frame format */
515 data = (cf->can_id & CAN_SFF_MASK) << 18;
516 hecc_write_mbx(priv, mbxno, HECC_CANMID, data);
517 hecc_write_mbx(priv, mbxno, HECC_CANMDL,
518 be32_to_cpu(*(u32 *)(cf->data)));
519 if (cf->can_dlc > 4)
520 hecc_write_mbx(priv, mbxno, HECC_CANMDH,
521 be32_to_cpu(*(u32 *)(cf->data + 4)));
522 else
523 *(u32 *)(cf->data + 4) = 0;
524 can_put_echo_skb(skb, ndev, mbxno);
525
526 spin_lock_irqsave(&priv->mbx_lock, flags);
527 --priv->tx_head;
528 if ((hecc_read(priv, HECC_CANME) & BIT(get_tx_head_mb(priv))) ||
529 (priv->tx_head & HECC_TX_MASK) == HECC_TX_MASK) {
530 netif_stop_queue(ndev);
531 }
532 hecc_set_bit(priv, HECC_CANME, mbx_mask);
533 spin_unlock_irqrestore(&priv->mbx_lock, flags);
534
535 hecc_clear_bit(priv, HECC_CANMD, mbx_mask);
536 hecc_set_bit(priv, HECC_CANMIM, mbx_mask);
537 hecc_write(priv, HECC_CANTRS, mbx_mask);
538
539 return NETDEV_TX_OK;
540}
541
542static int ti_hecc_rx_pkt(struct ti_hecc_priv *priv, int mbxno)
543{
544 struct net_device_stats *stats = &priv->ndev->stats;
545 struct can_frame *cf;
546 struct sk_buff *skb;
547 u32 data, mbx_mask;
548 unsigned long flags;
549
550 skb = alloc_can_skb(priv->ndev, &cf);
551 if (!skb) {
552 if (printk_ratelimit())
553 dev_err(priv->ndev->dev.parent,
554 "ti_hecc_rx_pkt: alloc_can_skb() failed\n");
555 return -ENOMEM;
556 }
557
558 mbx_mask = BIT(mbxno);
559 data = hecc_read_mbx(priv, mbxno, HECC_CANMID);
560 if (data & HECC_CANMID_IDE)
561 cf->can_id = (data & CAN_EFF_MASK) | CAN_EFF_FLAG;
562 else
563 cf->can_id = (data >> 18) & CAN_SFF_MASK;
564 data = hecc_read_mbx(priv, mbxno, HECC_CANMCF);
565 if (data & HECC_CANMCF_RTR)
566 cf->can_id |= CAN_RTR_FLAG;
567 cf->can_dlc = get_can_dlc(data & 0xF);
568 data = hecc_read_mbx(priv, mbxno, HECC_CANMDL);
569 *(u32 *)(cf->data) = cpu_to_be32(data);
570 if (cf->can_dlc > 4) {
571 data = hecc_read_mbx(priv, mbxno, HECC_CANMDH);
572 *(u32 *)(cf->data + 4) = cpu_to_be32(data);
573 } else {
574 *(u32 *)(cf->data + 4) = 0;
575 }
576 spin_lock_irqsave(&priv->mbx_lock, flags);
577 hecc_clear_bit(priv, HECC_CANME, mbx_mask);
578 hecc_write(priv, HECC_CANRMP, mbx_mask);
579 /* enable mailbox only if it is part of rx buffer mailboxes */
580 if (priv->rx_next < HECC_RX_BUFFER_MBOX)
581 hecc_set_bit(priv, HECC_CANME, mbx_mask);
582 spin_unlock_irqrestore(&priv->mbx_lock, flags);
583
584 stats->rx_bytes += cf->can_dlc;
585 netif_receive_skb(skb);
586 stats->rx_packets++;
587
588 return 0;
589}
590
591/*
592 * ti_hecc_rx_poll - HECC receive pkts
593 *
594 * The receive mailboxes start from highest numbered mailbox till last xmit
595 * mailbox. On CAN frame reception the hardware places the data into highest
596 * numbered mailbox that matches the CAN ID filter. Since all receive mailboxes
597 * have same filtering (ALL CAN frames) packets will arrive in the highest
598 * available RX mailbox and we need to ensure in-order packet reception.
599 *
600 * To ensure the packets are received in the right order we logically divide
601 * the RX mailboxes into main and buffer mailboxes. Packets are received as per
602 * mailbox priotity (higher to lower) in the main bank and once it is full we
603 * disable further reception into main mailboxes. While the main mailboxes are
604 * processed in NAPI, further packets are received in buffer mailboxes.
605 *
606 * We maintain a RX next mailbox counter to process packets and once all main
607 * mailboxe packets are passed to the upper stack we enable all of them but
608 * continue to process packets received in buffer mailboxes. With each packet
609 * received from buffer mailbox we enable it immediately so as to handle the
610 * overflow from higher mailboxes.
611 */
612static int ti_hecc_rx_poll(struct napi_struct *napi, int quota)
613{
614 struct net_device *ndev = napi->dev;
615 struct ti_hecc_priv *priv = netdev_priv(ndev);
616 u32 num_pkts = 0;
617 u32 mbx_mask;
618 unsigned long pending_pkts, flags;
619
620 if (!netif_running(ndev))
621 return 0;
622
623 while ((pending_pkts = hecc_read(priv, HECC_CANRMP)) &&
624 num_pkts < quota) {
625 mbx_mask = BIT(priv->rx_next); /* next rx mailbox to process */
626 if (mbx_mask & pending_pkts) {
627 if (ti_hecc_rx_pkt(priv, priv->rx_next) < 0)
628 return num_pkts;
629 ++num_pkts;
630 } else if (priv->rx_next > HECC_RX_BUFFER_MBOX) {
631 break; /* pkt not received yet */
632 }
633 --priv->rx_next;
634 if (priv->rx_next == HECC_RX_BUFFER_MBOX) {
635 /* enable high bank mailboxes */
636 spin_lock_irqsave(&priv->mbx_lock, flags);
637 mbx_mask = hecc_read(priv, HECC_CANME);
638 mbx_mask |= HECC_RX_HIGH_MBOX_MASK;
639 hecc_write(priv, HECC_CANME, mbx_mask);
640 spin_unlock_irqrestore(&priv->mbx_lock, flags);
641 } else if (priv->rx_next == HECC_MAX_TX_MBOX - 1) {
642 priv->rx_next = HECC_RX_FIRST_MBOX;
643 break;
644 }
645 }
646
647 /* Enable packet interrupt if all pkts are handled */
648 if (hecc_read(priv, HECC_CANRMP) == 0) {
649 napi_complete(napi);
650 /* Re-enable RX mailbox interrupts */
651 mbx_mask = hecc_read(priv, HECC_CANMIM);
652 mbx_mask |= HECC_TX_MBOX_MASK;
653 hecc_write(priv, HECC_CANMIM, mbx_mask);
654 }
655
656 return num_pkts;
657}
658
659static int ti_hecc_error(struct net_device *ndev, int int_status,
660 int err_status)
661{
662 struct ti_hecc_priv *priv = netdev_priv(ndev);
663 struct net_device_stats *stats = &ndev->stats;
664 struct can_frame *cf;
665 struct sk_buff *skb;
666
667 /* propogate the error condition to the can stack */
668 skb = alloc_can_err_skb(ndev, &cf);
669 if (!skb) {
670 if (printk_ratelimit())
671 dev_err(priv->ndev->dev.parent,
672 "ti_hecc_error: alloc_can_err_skb() failed\n");
673 return -ENOMEM;
674 }
675
676 if (int_status & HECC_CANGIF_WLIF) { /* warning level int */
677 if ((int_status & HECC_CANGIF_BOIF) == 0) {
678 priv->can.state = CAN_STATE_ERROR_WARNING;
679 ++priv->can.can_stats.error_warning;
680 cf->can_id |= CAN_ERR_CRTL;
681 if (hecc_read(priv, HECC_CANTEC) > 96)
682 cf->data[1] |= CAN_ERR_CRTL_TX_WARNING;
683 if (hecc_read(priv, HECC_CANREC) > 96)
684 cf->data[1] |= CAN_ERR_CRTL_RX_WARNING;
685 }
686 hecc_set_bit(priv, HECC_CANES, HECC_CANES_EW);
687 dev_dbg(priv->ndev->dev.parent, "Error Warning interrupt\n");
688 hecc_clear_bit(priv, HECC_CANMC, HECC_CANMC_CCR);
689 }
690
691 if (int_status & HECC_CANGIF_EPIF) { /* error passive int */
692 if ((int_status & HECC_CANGIF_BOIF) == 0) {
693 priv->can.state = CAN_STATE_ERROR_PASSIVE;
694 ++priv->can.can_stats.error_passive;
695 cf->can_id |= CAN_ERR_CRTL;
696 if (hecc_read(priv, HECC_CANTEC) > 127)
697 cf->data[1] |= CAN_ERR_CRTL_TX_PASSIVE;
698 if (hecc_read(priv, HECC_CANREC) > 127)
699 cf->data[1] |= CAN_ERR_CRTL_RX_PASSIVE;
700 }
701 hecc_set_bit(priv, HECC_CANES, HECC_CANES_EP);
702 dev_dbg(priv->ndev->dev.parent, "Error passive interrupt\n");
703 hecc_clear_bit(priv, HECC_CANMC, HECC_CANMC_CCR);
704 }
705
706 /*
707 * Need to check busoff condition in error status register too to
708 * ensure warning interrupts don't hog the system
709 */
710 if ((int_status & HECC_CANGIF_BOIF) || (err_status & HECC_CANES_BO)) {
711 priv->can.state = CAN_STATE_BUS_OFF;
712 cf->can_id |= CAN_ERR_BUSOFF;
713 hecc_set_bit(priv, HECC_CANES, HECC_CANES_BO);
714 hecc_clear_bit(priv, HECC_CANMC, HECC_CANMC_CCR);
715 /* Disable all interrupts in bus-off to avoid int hog */
716 hecc_write(priv, HECC_CANGIM, 0);
717 can_bus_off(ndev);
718 }
719
720 if (err_status & HECC_BUS_ERROR) {
721 ++priv->can.can_stats.bus_error;
722 cf->can_id |= CAN_ERR_BUSERROR | CAN_ERR_PROT;
723 cf->data[2] |= CAN_ERR_PROT_UNSPEC;
724 if (err_status & HECC_CANES_FE) {
725 hecc_set_bit(priv, HECC_CANES, HECC_CANES_FE);
726 cf->data[2] |= CAN_ERR_PROT_FORM;
727 }
728 if (err_status & HECC_CANES_BE) {
729 hecc_set_bit(priv, HECC_CANES, HECC_CANES_BE);
730 cf->data[2] |= CAN_ERR_PROT_BIT;
731 }
732 if (err_status & HECC_CANES_SE) {
733 hecc_set_bit(priv, HECC_CANES, HECC_CANES_SE);
734 cf->data[2] |= CAN_ERR_PROT_STUFF;
735 }
736 if (err_status & HECC_CANES_CRCE) {
737 hecc_set_bit(priv, HECC_CANES, HECC_CANES_CRCE);
738 cf->data[2] |= CAN_ERR_PROT_LOC_CRC_SEQ |
739 CAN_ERR_PROT_LOC_CRC_DEL;
740 }
741 if (err_status & HECC_CANES_ACKE) {
742 hecc_set_bit(priv, HECC_CANES, HECC_CANES_ACKE);
743 cf->data[2] |= CAN_ERR_PROT_LOC_ACK |
744 CAN_ERR_PROT_LOC_ACK_DEL;
745 }
746 }
747
748 netif_receive_skb(skb);
749 stats->rx_packets++;
750 stats->rx_bytes += cf->can_dlc;
751 return 0;
752}
753
754static irqreturn_t ti_hecc_interrupt(int irq, void *dev_id)
755{
756 struct net_device *ndev = (struct net_device *)dev_id;
757 struct ti_hecc_priv *priv = netdev_priv(ndev);
758 struct net_device_stats *stats = &ndev->stats;
759 u32 mbxno, mbx_mask, int_status, err_status;
760 unsigned long ack, flags;
761
762 int_status = hecc_read(priv,
763 (priv->int_line) ? HECC_CANGIF1 : HECC_CANGIF0);
764
765 if (!int_status)
766 return IRQ_NONE;
767
768 err_status = hecc_read(priv, HECC_CANES);
769 if (err_status & (HECC_BUS_ERROR | HECC_CANES_BO |
770 HECC_CANES_EP | HECC_CANES_EW))
771 ti_hecc_error(ndev, int_status, err_status);
772
773 if (int_status & HECC_CANGIF_GMIF) {
774 while (priv->tx_tail - priv->tx_head > 0) {
775 mbxno = get_tx_tail_mb(priv);
776 mbx_mask = BIT(mbxno);
777 if (!(mbx_mask & hecc_read(priv, HECC_CANTA)))
778 break;
779 hecc_clear_bit(priv, HECC_CANMIM, mbx_mask);
780 hecc_write(priv, HECC_CANTA, mbx_mask);
781 spin_lock_irqsave(&priv->mbx_lock, flags);
782 hecc_clear_bit(priv, HECC_CANME, mbx_mask);
783 spin_unlock_irqrestore(&priv->mbx_lock, flags);
784 stats->tx_bytes += hecc_read_mbx(priv, mbxno,
785 HECC_CANMCF) & 0xF;
786 stats->tx_packets++;
787 can_get_echo_skb(ndev, mbxno);
788 --priv->tx_tail;
789 }
790
791 /* restart queue if wrap-up or if queue stalled on last pkt */
792 if (((priv->tx_head == priv->tx_tail) &&
793 ((priv->tx_head & HECC_TX_MASK) != HECC_TX_MASK)) ||
794 (((priv->tx_tail & HECC_TX_MASK) == HECC_TX_MASK) &&
795 ((priv->tx_head & HECC_TX_MASK) == HECC_TX_MASK)))
796 netif_wake_queue(ndev);
797
798 /* Disable RX mailbox interrupts and let NAPI reenable them */
799 if (hecc_read(priv, HECC_CANRMP)) {
800 ack = hecc_read(priv, HECC_CANMIM);
801 ack &= BIT(HECC_MAX_TX_MBOX) - 1;
802 hecc_write(priv, HECC_CANMIM, ack);
803 napi_schedule(&priv->napi);
804 }
805 }
806
807 /* clear all interrupt conditions - read back to avoid spurious ints */
808 if (priv->int_line) {
809 hecc_write(priv, HECC_CANGIF1, HECC_SET_REG);
810 int_status = hecc_read(priv, HECC_CANGIF1);
811 } else {
812 hecc_write(priv, HECC_CANGIF0, HECC_SET_REG);
813 int_status = hecc_read(priv, HECC_CANGIF0);
814 }
815
816 return IRQ_HANDLED;
817}
818
819static int ti_hecc_open(struct net_device *ndev)
820{
821 struct ti_hecc_priv *priv = netdev_priv(ndev);
822 int err;
823
824 err = request_irq(ndev->irq, ti_hecc_interrupt, IRQF_SHARED,
825 ndev->name, ndev);
826 if (err) {
827 dev_err(ndev->dev.parent, "error requesting interrupt\n");
828 return err;
829 }
830
831 ti_hecc_transceiver_switch(priv, 1);
832
833 /* Open common can device */
834 err = open_candev(ndev);
835 if (err) {
836 dev_err(ndev->dev.parent, "open_candev() failed %d\n", err);
837 ti_hecc_transceiver_switch(priv, 0);
838 free_irq(ndev->irq, ndev);
839 return err;
840 }
841
842 ti_hecc_start(ndev);
843 napi_enable(&priv->napi);
844 netif_start_queue(ndev);
845
846 return 0;
847}
848
849static int ti_hecc_close(struct net_device *ndev)
850{
851 struct ti_hecc_priv *priv = netdev_priv(ndev);
852
853 netif_stop_queue(ndev);
854 napi_disable(&priv->napi);
855 ti_hecc_stop(ndev);
856 free_irq(ndev->irq, ndev);
857 close_candev(ndev);
858 ti_hecc_transceiver_switch(priv, 0);
859
860 return 0;
861}
862
863static const struct net_device_ops ti_hecc_netdev_ops = {
864 .ndo_open = ti_hecc_open,
865 .ndo_stop = ti_hecc_close,
866 .ndo_start_xmit = ti_hecc_xmit,
867};
868
869static int ti_hecc_probe(struct platform_device *pdev)
870{
871 struct net_device *ndev = (struct net_device *)0;
872 struct ti_hecc_priv *priv;
873 struct ti_hecc_platform_data *pdata;
874 struct resource *mem, *irq;
875 void __iomem *addr;
876 int err = -ENODEV;
877
878 pdata = pdev->dev.platform_data;
879 if (!pdata) {
880 dev_err(&pdev->dev, "No platform data\n");
881 goto probe_exit;
882 }
883
884 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
885 if (!mem) {
886 dev_err(&pdev->dev, "No mem resources\n");
887 goto probe_exit;
888 }
889 irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
890 if (!irq) {
891 dev_err(&pdev->dev, "No irq resource\n");
892 goto probe_exit;
893 }
894 if (!request_mem_region(mem->start, resource_size(mem), pdev->name)) {
895 dev_err(&pdev->dev, "HECC region already claimed\n");
896 err = -EBUSY;
897 goto probe_exit;
898 }
899 addr = ioremap(mem->start, resource_size(mem));
900 if (!addr) {
901 dev_err(&pdev->dev, "ioremap failed\n");
902 err = -ENOMEM;
903 goto probe_exit_free_region;
904 }
905
906 ndev = alloc_candev(sizeof(struct ti_hecc_priv), HECC_MAX_TX_MBOX);
907 if (!ndev) {
908 dev_err(&pdev->dev, "alloc_candev failed\n");
909 err = -ENOMEM;
910 goto probe_exit_iounmap;
911 }
912
913 priv = netdev_priv(ndev);
914 priv->ndev = ndev;
915 priv->base = addr;
916 priv->scc_ram_offset = pdata->scc_ram_offset;
917 priv->hecc_ram_offset = pdata->hecc_ram_offset;
918 priv->mbx_offset = pdata->mbx_offset;
919 priv->int_line = pdata->int_line;
920 priv->transceiver_switch = pdata->transceiver_switch;
921
922 priv->can.bittiming_const = &ti_hecc_bittiming_const;
923 priv->can.do_set_mode = ti_hecc_do_set_mode;
924 priv->can.do_get_state = ti_hecc_get_state;
925 priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES;
926
927 ndev->irq = irq->start;
928 ndev->flags |= IFF_ECHO;
929 platform_set_drvdata(pdev, ndev);
930 SET_NETDEV_DEV(ndev, &pdev->dev);
931 ndev->netdev_ops = &ti_hecc_netdev_ops;
932
933 priv->clk = clk_get(&pdev->dev, "hecc_ck");
934 if (IS_ERR(priv->clk)) {
935 dev_err(&pdev->dev, "No clock available\n");
936 err = PTR_ERR(priv->clk);
937 priv->clk = NULL;
938 goto probe_exit_candev;
939 }
940 priv->can.clock.freq = clk_get_rate(priv->clk);
941 netif_napi_add(ndev, &priv->napi, ti_hecc_rx_poll,
942 HECC_DEF_NAPI_WEIGHT);
943
944 clk_enable(priv->clk);
945 err = register_candev(ndev);
946 if (err) {
947 dev_err(&pdev->dev, "register_candev() failed\n");
948 goto probe_exit_clk;
949 }
950 dev_info(&pdev->dev, "device registered (reg_base=%p, irq=%u)\n",
951 priv->base, (u32) ndev->irq);
952
953 return 0;
954
955probe_exit_clk:
956 clk_put(priv->clk);
957probe_exit_candev:
958 free_candev(ndev);
959probe_exit_iounmap:
960 iounmap(addr);
961probe_exit_free_region:
962 release_mem_region(mem->start, resource_size(mem));
963probe_exit:
964 return err;
965}
966
967static int __devexit ti_hecc_remove(struct platform_device *pdev)
968{
969 struct resource *res;
970 struct net_device *ndev = platform_get_drvdata(pdev);
971 struct ti_hecc_priv *priv = netdev_priv(ndev);
972
973 clk_disable(priv->clk);
974 clk_put(priv->clk);
975 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
976 iounmap(priv->base);
977 release_mem_region(res->start, resource_size(res));
978 unregister_candev(ndev);
979 free_candev(ndev);
980 platform_set_drvdata(pdev, NULL);
981
982 return 0;
983}
984
985
986#ifdef CONFIG_PM
987static int ti_hecc_suspend(struct platform_device *pdev, pm_message_t state)
988{
989 struct net_device *dev = platform_get_drvdata(pdev);
990 struct ti_hecc_priv *priv = netdev_priv(dev);
991
992 if (netif_running(dev)) {
993 netif_stop_queue(dev);
994 netif_device_detach(dev);
995 }
996
997 hecc_set_bit(priv, HECC_CANMC, HECC_CANMC_PDR);
998 priv->can.state = CAN_STATE_SLEEPING;
999
1000 clk_disable(priv->clk);
1001
1002 return 0;
1003}
1004
1005static int ti_hecc_resume(struct platform_device *pdev)
1006{
1007 struct net_device *dev = platform_get_drvdata(pdev);
1008 struct ti_hecc_priv *priv = netdev_priv(dev);
1009
1010 clk_enable(priv->clk);
1011
1012 hecc_clear_bit(priv, HECC_CANMC, HECC_CANMC_PDR);
1013 priv->can.state = CAN_STATE_ERROR_ACTIVE;
1014
1015 if (netif_running(dev)) {
1016 netif_device_attach(dev);
1017 netif_start_queue(dev);
1018 }
1019
1020 return 0;
1021}
1022#else
1023#define ti_hecc_suspend NULL
1024#define ti_hecc_resume NULL
1025#endif
1026
1027/* TI HECC netdevice driver: platform driver structure */
1028static struct platform_driver ti_hecc_driver = {
1029 .driver = {
1030 .name = DRV_NAME,
1031 .owner = THIS_MODULE,
1032 },
1033 .probe = ti_hecc_probe,
1034 .remove = __devexit_p(ti_hecc_remove),
1035 .suspend = ti_hecc_suspend,
1036 .resume = ti_hecc_resume,
1037};
1038
1039static int __init ti_hecc_init_driver(void)
1040{
1041 printk(KERN_INFO DRV_DESC "\n");
1042 return platform_driver_register(&ti_hecc_driver);
1043}
1044
1045static void __exit ti_hecc_exit_driver(void)
1046{
1047 printk(KERN_INFO DRV_DESC " unloaded\n");
1048 platform_driver_unregister(&ti_hecc_driver);
1049}
1050
1051module_exit(ti_hecc_exit_driver);
1052module_init(ti_hecc_init_driver);
1053
1054MODULE_AUTHOR("Anant Gole <anantgole@ti.com>");
1055MODULE_LICENSE("GPL v2");
1056MODULE_DESCRIPTION(DRV_DESC);
diff --git a/drivers/net/can/usb/Kconfig b/drivers/net/can/usb/Kconfig
index bbc78e0b8a15..97ff6febad63 100644
--- a/drivers/net/can/usb/Kconfig
+++ b/drivers/net/can/usb/Kconfig
@@ -5,6 +5,6 @@ config CAN_EMS_USB
5 tristate "EMS CPC-USB/ARM7 CAN/USB interface" 5 tristate "EMS CPC-USB/ARM7 CAN/USB interface"
6 ---help--- 6 ---help---
7 This driver is for the one channel CPC-USB/ARM7 CAN/USB interface 7 This driver is for the one channel CPC-USB/ARM7 CAN/USB interface
8 from from EMS Dr. Thomas Wuensche (http://www.ems-wuensche.de). 8 from EMS Dr. Thomas Wuensche (http://www.ems-wuensche.de).
9 9
10endmenu 10endmenu
diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c
index abdbd9c2b788..d800b598ae3d 100644
--- a/drivers/net/can/usb/ems_usb.c
+++ b/drivers/net/can/usb/ems_usb.c
@@ -232,7 +232,7 @@ MODULE_DEVICE_TABLE(usb, ems_usb_table);
232#define INTR_IN_BUFFER_SIZE 4 232#define INTR_IN_BUFFER_SIZE 4
233 233
234#define MAX_RX_URBS 10 234#define MAX_RX_URBS 10
235#define MAX_TX_URBS CAN_ECHO_SKB_MAX 235#define MAX_TX_URBS 10
236 236
237struct ems_usb; 237struct ems_usb;
238 238
@@ -311,23 +311,19 @@ static void ems_usb_rx_can_msg(struct ems_usb *dev, struct ems_cpc_msg *msg)
311 int i; 311 int i;
312 struct net_device_stats *stats = &dev->netdev->stats; 312 struct net_device_stats *stats = &dev->netdev->stats;
313 313
314 skb = netdev_alloc_skb(dev->netdev, sizeof(struct can_frame)); 314 skb = alloc_can_skb(dev->netdev, &cf);
315 if (skb == NULL) 315 if (skb == NULL)
316 return; 316 return;
317 317
318 skb->protocol = htons(ETH_P_CAN);
319
320 cf = (struct can_frame *)skb_put(skb, sizeof(struct can_frame));
321
322 cf->can_id = le32_to_cpu(msg->msg.can_msg.id); 318 cf->can_id = le32_to_cpu(msg->msg.can_msg.id);
323 cf->can_dlc = min_t(u8, msg->msg.can_msg.length, 8); 319 cf->can_dlc = get_can_dlc(msg->msg.can_msg.length & 0xF);
324 320
325 if (msg->type == CPC_MSG_TYPE_EXT_CAN_FRAME 321 if (msg->type == CPC_MSG_TYPE_EXT_CAN_FRAME ||
326 || msg->type == CPC_MSG_TYPE_EXT_RTR_FRAME) 322 msg->type == CPC_MSG_TYPE_EXT_RTR_FRAME)
327 cf->can_id |= CAN_EFF_FLAG; 323 cf->can_id |= CAN_EFF_FLAG;
328 324
329 if (msg->type == CPC_MSG_TYPE_RTR_FRAME 325 if (msg->type == CPC_MSG_TYPE_RTR_FRAME ||
330 || msg->type == CPC_MSG_TYPE_EXT_RTR_FRAME) { 326 msg->type == CPC_MSG_TYPE_EXT_RTR_FRAME) {
331 cf->can_id |= CAN_RTR_FLAG; 327 cf->can_id |= CAN_RTR_FLAG;
332 } else { 328 } else {
333 for (i = 0; i < cf->can_dlc; i++) 329 for (i = 0; i < cf->can_dlc; i++)
@@ -346,18 +342,10 @@ static void ems_usb_rx_err(struct ems_usb *dev, struct ems_cpc_msg *msg)
346 struct sk_buff *skb; 342 struct sk_buff *skb;
347 struct net_device_stats *stats = &dev->netdev->stats; 343 struct net_device_stats *stats = &dev->netdev->stats;
348 344
349 skb = netdev_alloc_skb(dev->netdev, sizeof(struct can_frame)); 345 skb = alloc_can_err_skb(dev->netdev, &cf);
350 if (skb == NULL) 346 if (skb == NULL)
351 return; 347 return;
352 348
353 skb->protocol = htons(ETH_P_CAN);
354
355 cf = (struct can_frame *)skb_put(skb, sizeof(struct can_frame));
356 memset(cf, 0, sizeof(struct can_frame));
357
358 cf->can_id = CAN_ERR_FLAG;
359 cf->can_dlc = CAN_ERR_DLC;
360
361 if (msg->type == CPC_MSG_TYPE_CAN_STATE) { 349 if (msg->type == CPC_MSG_TYPE_CAN_STATE) {
362 u8 state = msg->msg.can_state; 350 u8 state = msg->msg.can_state;
363 351
@@ -779,6 +767,9 @@ static netdev_tx_t ems_usb_start_xmit(struct sk_buff *skb, struct net_device *ne
779 size_t size = CPC_HEADER_SIZE + CPC_MSG_HEADER_LEN 767 size_t size = CPC_HEADER_SIZE + CPC_MSG_HEADER_LEN
780 + sizeof(struct cpc_can_msg); 768 + sizeof(struct cpc_can_msg);
781 769
770 if (can_dropped_invalid_skb(netdev, skb))
771 return NETDEV_TX_OK;
772
782 /* create a URB, and a buffer for it, and copy the data to the URB */ 773 /* create a URB, and a buffer for it, and copy the data to the URB */
783 urb = usb_alloc_urb(0, GFP_ATOMIC); 774 urb = usb_alloc_urb(0, GFP_ATOMIC);
784 if (!urb) { 775 if (!urb) {
@@ -885,9 +876,7 @@ static netdev_tx_t ems_usb_start_xmit(struct sk_buff *skb, struct net_device *ne
885 return NETDEV_TX_OK; 876 return NETDEV_TX_OK;
886 877
887nomem: 878nomem:
888 if (skb) 879 dev_kfree_skb(skb);
889 dev_kfree_skb(skb);
890
891 stats->tx_dropped++; 880 stats->tx_dropped++;
892 881
893 return NETDEV_TX_OK; 882 return NETDEV_TX_OK;
@@ -1015,9 +1004,9 @@ static int ems_usb_probe(struct usb_interface *intf,
1015 struct ems_usb *dev; 1004 struct ems_usb *dev;
1016 int i, err = -ENOMEM; 1005 int i, err = -ENOMEM;
1017 1006
1018 netdev = alloc_candev(sizeof(struct ems_usb)); 1007 netdev = alloc_candev(sizeof(struct ems_usb), MAX_TX_URBS);
1019 if (!netdev) { 1008 if (!netdev) {
1020 dev_err(netdev->dev.parent, "Couldn't alloc candev\n"); 1009 dev_err(&intf->dev, "ems_usb: Couldn't alloc candev\n");
1021 return -ENOMEM; 1010 return -ENOMEM;
1022 } 1011 }
1023 1012
@@ -1031,8 +1020,7 @@ static int ems_usb_probe(struct usb_interface *intf,
1031 dev->can.bittiming_const = &ems_usb_bittiming_const; 1020 dev->can.bittiming_const = &ems_usb_bittiming_const;
1032 dev->can.do_set_bittiming = ems_usb_set_bittiming; 1021 dev->can.do_set_bittiming = ems_usb_set_bittiming;
1033 dev->can.do_set_mode = ems_usb_set_mode; 1022 dev->can.do_set_mode = ems_usb_set_mode;
1034 1023 dev->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES;
1035 netdev->flags |= IFF_ECHO; /* we support local echo */
1036 1024
1037 netdev->netdev_ops = &ems_usb_netdev_ops; 1025 netdev->netdev_ops = &ems_usb_netdev_ops;
1038 1026
@@ -1048,20 +1036,20 @@ static int ems_usb_probe(struct usb_interface *intf,
1048 1036
1049 dev->intr_urb = usb_alloc_urb(0, GFP_KERNEL); 1037 dev->intr_urb = usb_alloc_urb(0, GFP_KERNEL);
1050 if (!dev->intr_urb) { 1038 if (!dev->intr_urb) {
1051 dev_err(netdev->dev.parent, "Couldn't alloc intr URB\n"); 1039 dev_err(&intf->dev, "Couldn't alloc intr URB\n");
1052 goto cleanup_candev; 1040 goto cleanup_candev;
1053 } 1041 }
1054 1042
1055 dev->intr_in_buffer = kzalloc(INTR_IN_BUFFER_SIZE, GFP_KERNEL); 1043 dev->intr_in_buffer = kzalloc(INTR_IN_BUFFER_SIZE, GFP_KERNEL);
1056 if (!dev->intr_in_buffer) { 1044 if (!dev->intr_in_buffer) {
1057 dev_err(netdev->dev.parent, "Couldn't alloc Intr buffer\n"); 1045 dev_err(&intf->dev, "Couldn't alloc Intr buffer\n");
1058 goto cleanup_intr_urb; 1046 goto cleanup_intr_urb;
1059 } 1047 }
1060 1048
1061 dev->tx_msg_buffer = kzalloc(CPC_HEADER_SIZE + 1049 dev->tx_msg_buffer = kzalloc(CPC_HEADER_SIZE +
1062 sizeof(struct ems_cpc_msg), GFP_KERNEL); 1050 sizeof(struct ems_cpc_msg), GFP_KERNEL);
1063 if (!dev->tx_msg_buffer) { 1051 if (!dev->tx_msg_buffer) {
1064 dev_err(netdev->dev.parent, "Couldn't alloc Tx buffer\n"); 1052 dev_err(&intf->dev, "Couldn't alloc Tx buffer\n");
1065 goto cleanup_intr_in_buffer; 1053 goto cleanup_intr_in_buffer;
1066 } 1054 }
1067 1055
diff --git a/drivers/net/can/vcan.c b/drivers/net/can/vcan.c
index 80ac56313981..a30b8f480f61 100644
--- a/drivers/net/can/vcan.c
+++ b/drivers/net/can/vcan.c
@@ -47,6 +47,8 @@
47#include <linux/if_arp.h> 47#include <linux/if_arp.h>
48#include <linux/if_ether.h> 48#include <linux/if_ether.h>
49#include <linux/can.h> 49#include <linux/can.h>
50#include <linux/can/dev.h>
51#include <linux/slab.h>
50#include <net/rtnetlink.h> 52#include <net/rtnetlink.h>
51 53
52static __initdata const char banner[] = 54static __initdata const char banner[] =
@@ -70,10 +72,11 @@ MODULE_PARM_DESC(echo, "Echo sent frames (for testing). Default: 0 (Off)");
70 72
71static void vcan_rx(struct sk_buff *skb, struct net_device *dev) 73static void vcan_rx(struct sk_buff *skb, struct net_device *dev)
72{ 74{
75 struct can_frame *cf = (struct can_frame *)skb->data;
73 struct net_device_stats *stats = &dev->stats; 76 struct net_device_stats *stats = &dev->stats;
74 77
75 stats->rx_packets++; 78 stats->rx_packets++;
76 stats->rx_bytes += skb->len; 79 stats->rx_bytes += cf->can_dlc;
77 80
78 skb->protocol = htons(ETH_P_CAN); 81 skb->protocol = htons(ETH_P_CAN);
79 skb->pkt_type = PACKET_BROADCAST; 82 skb->pkt_type = PACKET_BROADCAST;
@@ -85,11 +88,15 @@ static void vcan_rx(struct sk_buff *skb, struct net_device *dev)
85 88
86static netdev_tx_t vcan_tx(struct sk_buff *skb, struct net_device *dev) 89static netdev_tx_t vcan_tx(struct sk_buff *skb, struct net_device *dev)
87{ 90{
91 struct can_frame *cf = (struct can_frame *)skb->data;
88 struct net_device_stats *stats = &dev->stats; 92 struct net_device_stats *stats = &dev->stats;
89 int loop; 93 int loop;
90 94
95 if (can_dropped_invalid_skb(dev, skb))
96 return NETDEV_TX_OK;
97
91 stats->tx_packets++; 98 stats->tx_packets++;
92 stats->tx_bytes += skb->len; 99 stats->tx_bytes += cf->can_dlc;
93 100
94 /* set flag whether this packet has to be looped back */ 101 /* set flag whether this packet has to be looped back */
95 loop = skb->pkt_type == PACKET_LOOPBACK; 102 loop = skb->pkt_type == PACKET_LOOPBACK;
@@ -103,7 +110,7 @@ static netdev_tx_t vcan_tx(struct sk_buff *skb, struct net_device *dev)
103 * CAN core already did the echo for us 110 * CAN core already did the echo for us
104 */ 111 */
105 stats->rx_packets++; 112 stats->rx_packets++;
106 stats->rx_bytes += skb->len; 113 stats->rx_bytes += cf->can_dlc;
107 } 114 }
108 kfree_skb(skb); 115 kfree_skb(skb);
109 return NETDEV_TX_OK; 116 return NETDEV_TX_OK;