aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/can
diff options
context:
space:
mode:
authorBenjamin Herrenschmidt <benh@kernel.crashing.org>2009-12-09 01:14:38 -0500
committerBenjamin Herrenschmidt <benh@kernel.crashing.org>2009-12-09 01:14:38 -0500
commitbcd6acd51f3d4d1ada201e9bc5c40a31d6d80c71 (patch)
tree2f6dffd2d3e4dd67355a224de7e7a960335a92fd /drivers/net/can
parent11c34c7deaeeebcee342cbc35e1bb2a6711b2431 (diff)
parent3ff6a468b45b5dfeb0e903e56f4eb27d34b2437c (diff)
Merge commit 'origin/master' into next
Conflicts: include/linux/kvm.h
Diffstat (limited to 'drivers/net/can')
-rw-r--r--drivers/net/can/Kconfig15
-rw-r--r--drivers/net/can/Makefile3
-rw-r--r--drivers/net/can/at91_can.c34
-rw-r--r--drivers/net/can/dev.c76
-rw-r--r--drivers/net/can/mcp251x.c1166
-rw-r--r--drivers/net/can/mscan/Kconfig23
-rw-r--r--drivers/net/can/mscan/Makefile5
-rw-r--r--drivers/net/can/mscan/mpc5xxx_can.c259
-rw-r--r--drivers/net/can/mscan/mscan.c668
-rw-r--r--drivers/net/can/mscan/mscan.h296
-rw-r--r--drivers/net/can/sja1000/sja1000.c19
-rw-r--r--drivers/net/can/sja1000/sja1000.h2
-rw-r--r--drivers/net/can/ti_hecc.c993
-rw-r--r--drivers/net/can/usb/ems_usb.c28
14 files changed, 3506 insertions, 81 deletions
diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig
index 772f6d2489ce..bb803fa1e6a7 100644
--- a/drivers/net/can/Kconfig
+++ b/drivers/net/can/Kconfig
@@ -41,6 +41,21 @@ config CAN_AT91
41 ---help--- 41 ---help---
42 This is a driver for the SoC CAN controller in Atmel's AT91SAM9263. 42 This is a driver for the SoC CAN controller in Atmel's AT91SAM9263.
43 43
44config CAN_TI_HECC
45 depends on CAN_DEV && ARCH_OMAP3
46 tristate "TI High End CAN Controller"
47 ---help---
48 Driver for TI HECC (High End CAN Controller) module found on many
49 TI devices. The device specifications are available from www.ti.com
50
51config CAN_MCP251X
52 tristate "Microchip MCP251x SPI CAN controllers"
53 depends on CAN_DEV && SPI
54 ---help---
55 Driver for the Microchip MCP251x SPI CAN controllers.
56
57source "drivers/net/can/mscan/Kconfig"
58
44source "drivers/net/can/sja1000/Kconfig" 59source "drivers/net/can/sja1000/Kconfig"
45 60
46source "drivers/net/can/usb/Kconfig" 61source "drivers/net/can/usb/Kconfig"
diff --git a/drivers/net/can/Makefile b/drivers/net/can/Makefile
index 0dea62721f2f..56899fef1c6a 100644
--- a/drivers/net/can/Makefile
+++ b/drivers/net/can/Makefile
@@ -10,6 +10,9 @@ can-dev-y := dev.o
10obj-y += usb/ 10obj-y += usb/
11 11
12obj-$(CONFIG_CAN_SJA1000) += sja1000/ 12obj-$(CONFIG_CAN_SJA1000) += sja1000/
13obj-$(CONFIG_CAN_MSCAN) += mscan/
13obj-$(CONFIG_CAN_AT91) += at91_can.o 14obj-$(CONFIG_CAN_AT91) += at91_can.o
15obj-$(CONFIG_CAN_TI_HECC) += ti_hecc.o
16obj-$(CONFIG_CAN_MCP251X) += mcp251x.o
14 17
15ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG 18ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG
diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c
index f67ae285a35a..cbe3fce53e3b 100644
--- a/drivers/net/can/at91_can.c
+++ b/drivers/net/can/at91_can.c
@@ -221,38 +221,6 @@ static inline void set_mb_mode(const struct at91_priv *priv, unsigned int mb,
221 set_mb_mode_prio(priv, mb, mode, 0); 221 set_mb_mode_prio(priv, mb, mode, 0);
222} 222}
223 223
224static struct sk_buff *alloc_can_skb(struct net_device *dev,
225 struct can_frame **cf)
226{
227 struct sk_buff *skb;
228
229 skb = netdev_alloc_skb(dev, sizeof(struct can_frame));
230 if (unlikely(!skb))
231 return NULL;
232
233 skb->protocol = htons(ETH_P_CAN);
234 skb->ip_summed = CHECKSUM_UNNECESSARY;
235 *cf = (struct can_frame *)skb_put(skb, sizeof(struct can_frame));
236
237 return skb;
238}
239
240static struct sk_buff *alloc_can_err_skb(struct net_device *dev,
241 struct can_frame **cf)
242{
243 struct sk_buff *skb;
244
245 skb = alloc_can_skb(dev, cf);
246 if (unlikely(!skb))
247 return NULL;
248
249 memset(*cf, 0, sizeof(struct can_frame));
250 (*cf)->can_id = CAN_ERR_FLAG;
251 (*cf)->can_dlc = CAN_ERR_DLC;
252
253 return skb;
254}
255
256/* 224/*
257 * Swtich transceiver on or off 225 * Swtich transceiver on or off
258 */ 226 */
@@ -1087,7 +1055,7 @@ static int __init at91_can_probe(struct platform_device *pdev)
1087 goto exit_release; 1055 goto exit_release;
1088 } 1056 }
1089 1057
1090 dev = alloc_candev(sizeof(struct at91_priv)); 1058 dev = alloc_candev(sizeof(struct at91_priv), AT91_MB_TX_NUM);
1091 if (!dev) { 1059 if (!dev) {
1092 err = -ENOMEM; 1060 err = -ENOMEM;
1093 goto exit_iounmap; 1061 goto exit_iounmap;
diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
index 2868fe842a41..c1bb29f0322b 100644
--- a/drivers/net/can/dev.c
+++ b/drivers/net/can/dev.c
@@ -245,7 +245,7 @@ static void can_flush_echo_skb(struct net_device *dev)
245 struct net_device_stats *stats = &dev->stats; 245 struct net_device_stats *stats = &dev->stats;
246 int i; 246 int i;
247 247
248 for (i = 0; i < CAN_ECHO_SKB_MAX; i++) { 248 for (i = 0; i < priv->echo_skb_max; i++) {
249 if (priv->echo_skb[i]) { 249 if (priv->echo_skb[i]) {
250 kfree_skb(priv->echo_skb[i]); 250 kfree_skb(priv->echo_skb[i]);
251 priv->echo_skb[i] = NULL; 251 priv->echo_skb[i] = NULL;
@@ -262,10 +262,13 @@ static void can_flush_echo_skb(struct net_device *dev)
262 * of the device driver. The driver must protect access to 262 * of the device driver. The driver must protect access to
263 * priv->echo_skb, if necessary. 263 * priv->echo_skb, if necessary.
264 */ 264 */
265void can_put_echo_skb(struct sk_buff *skb, struct net_device *dev, int idx) 265void can_put_echo_skb(struct sk_buff *skb, struct net_device *dev,
266 unsigned int idx)
266{ 267{
267 struct can_priv *priv = netdev_priv(dev); 268 struct can_priv *priv = netdev_priv(dev);
268 269
270 BUG_ON(idx >= priv->echo_skb_max);
271
269 /* check flag whether this packet has to be looped back */ 272 /* check flag whether this packet has to be looped back */
270 if (!(dev->flags & IFF_ECHO) || skb->pkt_type != PACKET_LOOPBACK) { 273 if (!(dev->flags & IFF_ECHO) || skb->pkt_type != PACKET_LOOPBACK) {
271 kfree_skb(skb); 274 kfree_skb(skb);
@@ -311,10 +314,12 @@ EXPORT_SYMBOL_GPL(can_put_echo_skb);
311 * is handled in the device driver. The driver must protect 314 * is handled in the device driver. The driver must protect
312 * access to priv->echo_skb, if necessary. 315 * access to priv->echo_skb, if necessary.
313 */ 316 */
314void can_get_echo_skb(struct net_device *dev, int idx) 317void can_get_echo_skb(struct net_device *dev, unsigned int idx)
315{ 318{
316 struct can_priv *priv = netdev_priv(dev); 319 struct can_priv *priv = netdev_priv(dev);
317 320
321 BUG_ON(idx >= priv->echo_skb_max);
322
318 if (priv->echo_skb[idx]) { 323 if (priv->echo_skb[idx]) {
319 netif_rx(priv->echo_skb[idx]); 324 netif_rx(priv->echo_skb[idx]);
320 priv->echo_skb[idx] = NULL; 325 priv->echo_skb[idx] = NULL;
@@ -327,10 +332,12 @@ EXPORT_SYMBOL_GPL(can_get_echo_skb);
327 * 332 *
328 * The function is typically called when TX failed. 333 * The function is typically called when TX failed.
329 */ 334 */
330void can_free_echo_skb(struct net_device *dev, int idx) 335void can_free_echo_skb(struct net_device *dev, unsigned int idx)
331{ 336{
332 struct can_priv *priv = netdev_priv(dev); 337 struct can_priv *priv = netdev_priv(dev);
333 338
339 BUG_ON(idx >= priv->echo_skb_max);
340
334 if (priv->echo_skb[idx]) { 341 if (priv->echo_skb[idx]) {
335 kfree_skb(priv->echo_skb[idx]); 342 kfree_skb(priv->echo_skb[idx]);
336 priv->echo_skb[idx] = NULL; 343 priv->echo_skb[idx] = NULL;
@@ -359,17 +366,12 @@ void can_restart(unsigned long data)
359 can_flush_echo_skb(dev); 366 can_flush_echo_skb(dev);
360 367
361 /* send restart message upstream */ 368 /* send restart message upstream */
362 skb = dev_alloc_skb(sizeof(struct can_frame)); 369 skb = alloc_can_err_skb(dev, &cf);
363 if (skb == NULL) { 370 if (skb == NULL) {
364 err = -ENOMEM; 371 err = -ENOMEM;
365 goto restart; 372 goto restart;
366 } 373 }
367 skb->dev = dev; 374 cf->can_id |= CAN_ERR_RESTARTED;
368 skb->protocol = htons(ETH_P_CAN);
369 cf = (struct can_frame *)skb_put(skb, sizeof(struct can_frame));
370 memset(cf, 0, sizeof(struct can_frame));
371 cf->can_id = CAN_ERR_FLAG | CAN_ERR_RESTARTED;
372 cf->can_dlc = CAN_ERR_DLC;
373 375
374 netif_rx(skb); 376 netif_rx(skb);
375 377
@@ -442,20 +444,66 @@ static void can_setup(struct net_device *dev)
442 dev->features = NETIF_F_NO_CSUM; 444 dev->features = NETIF_F_NO_CSUM;
443} 445}
444 446
447struct sk_buff *alloc_can_skb(struct net_device *dev, struct can_frame **cf)
448{
449 struct sk_buff *skb;
450
451 skb = netdev_alloc_skb(dev, sizeof(struct can_frame));
452 if (unlikely(!skb))
453 return NULL;
454
455 skb->protocol = htons(ETH_P_CAN);
456 skb->pkt_type = PACKET_BROADCAST;
457 skb->ip_summed = CHECKSUM_UNNECESSARY;
458 *cf = (struct can_frame *)skb_put(skb, sizeof(struct can_frame));
459 memset(*cf, 0, sizeof(struct can_frame));
460
461 return skb;
462}
463EXPORT_SYMBOL_GPL(alloc_can_skb);
464
465struct sk_buff *alloc_can_err_skb(struct net_device *dev, struct can_frame **cf)
466{
467 struct sk_buff *skb;
468
469 skb = alloc_can_skb(dev, cf);
470 if (unlikely(!skb))
471 return NULL;
472
473 (*cf)->can_id = CAN_ERR_FLAG;
474 (*cf)->can_dlc = CAN_ERR_DLC;
475
476 return skb;
477}
478EXPORT_SYMBOL_GPL(alloc_can_err_skb);
479
445/* 480/*
446 * Allocate and setup space for the CAN network device 481 * Allocate and setup space for the CAN network device
447 */ 482 */
448struct net_device *alloc_candev(int sizeof_priv) 483struct net_device *alloc_candev(int sizeof_priv, unsigned int echo_skb_max)
449{ 484{
450 struct net_device *dev; 485 struct net_device *dev;
451 struct can_priv *priv; 486 struct can_priv *priv;
487 int size;
452 488
453 dev = alloc_netdev(sizeof_priv, "can%d", can_setup); 489 if (echo_skb_max)
490 size = ALIGN(sizeof_priv, sizeof(struct sk_buff *)) +
491 echo_skb_max * sizeof(struct sk_buff *);
492 else
493 size = sizeof_priv;
494
495 dev = alloc_netdev(size, "can%d", can_setup);
454 if (!dev) 496 if (!dev)
455 return NULL; 497 return NULL;
456 498
457 priv = netdev_priv(dev); 499 priv = netdev_priv(dev);
458 500
501 if (echo_skb_max) {
502 priv->echo_skb_max = echo_skb_max;
503 priv->echo_skb = (void *)priv +
504 ALIGN(sizeof_priv, sizeof(struct sk_buff *));
505 }
506
459 priv->state = CAN_STATE_STOPPED; 507 priv->state = CAN_STATE_STOPPED;
460 508
461 init_timer(&priv->restart_timer); 509 init_timer(&priv->restart_timer);
@@ -647,7 +695,7 @@ nla_put_failure:
647 return -EMSGSIZE; 695 return -EMSGSIZE;
648} 696}
649 697
650static int can_newlink(struct net_device *dev, 698static int can_newlink(struct net *src_net, struct net_device *dev,
651 struct nlattr *tb[], struct nlattr *data[]) 699 struct nlattr *tb[], struct nlattr *data[])
652{ 700{
653 return -EOPNOTSUPP; 701 return -EOPNOTSUPP;
diff --git a/drivers/net/can/mcp251x.c b/drivers/net/can/mcp251x.c
new file mode 100644
index 000000000000..78b1b69b2921
--- /dev/null
+++ b/drivers/net/can/mcp251x.c
@@ -0,0 +1,1166 @@
1/*
2 * CAN bus driver for Microchip 251x CAN Controller with SPI Interface
3 *
4 * MCP2510 support and bug fixes by Christian Pellegrin
5 * <chripell@evolware.org>
6 *
7 * Copyright 2009 Christian Pellegrin EVOL S.r.l.
8 *
9 * Copyright 2007 Raymarine UK, Ltd. All Rights Reserved.
10 * Written under contract by:
11 * Chris Elston, Katalix Systems, Ltd.
12 *
13 * Based on Microchip MCP251x CAN controller driver written by
14 * David Vrabel, Copyright 2006 Arcom Control Systems Ltd.
15 *
16 * Based on CAN bus driver for the CCAN controller written by
17 * - Sascha Hauer, Marc Kleine-Budde, Pengutronix
18 * - Simon Kallweit, intefo AG
19 * Copyright 2007
20 *
21 * This program is free software; you can redistribute it and/or modify
22 * it under the terms of the version 2 of the GNU General Public License
23 * as published by the Free Software Foundation
24 *
25 * This program is distributed in the hope that it will be useful,
26 * but WITHOUT ANY WARRANTY; without even the implied warranty of
27 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
28 * GNU General Public License for more details.
29 *
30 * You should have received a copy of the GNU General Public License
31 * along with this program; if not, write to the Free Software
32 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
33 *
34 *
35 *
36 * Your platform definition file should specify something like:
37 *
38 * static struct mcp251x_platform_data mcp251x_info = {
39 * .oscillator_frequency = 8000000,
40 * .board_specific_setup = &mcp251x_setup,
41 * .model = CAN_MCP251X_MCP2510,
42 * .power_enable = mcp251x_power_enable,
43 * .transceiver_enable = NULL,
44 * };
45 *
46 * static struct spi_board_info spi_board_info[] = {
47 * {
48 * .modalias = "mcp251x",
49 * .platform_data = &mcp251x_info,
50 * .irq = IRQ_EINT13,
51 * .max_speed_hz = 2*1000*1000,
52 * .chip_select = 2,
53 * },
54 * };
55 *
56 * Please see mcp251x.h for a description of the fields in
57 * struct mcp251x_platform_data.
58 *
59 */
60
61#include <linux/can.h>
62#include <linux/can/core.h>
63#include <linux/can/dev.h>
64#include <linux/can/platform/mcp251x.h>
65#include <linux/completion.h>
66#include <linux/delay.h>
67#include <linux/device.h>
68#include <linux/dma-mapping.h>
69#include <linux/freezer.h>
70#include <linux/interrupt.h>
71#include <linux/io.h>
72#include <linux/kernel.h>
73#include <linux/module.h>
74#include <linux/netdevice.h>
75#include <linux/platform_device.h>
76#include <linux/spi/spi.h>
77#include <linux/uaccess.h>
78
79/* SPI interface instruction set */
80#define INSTRUCTION_WRITE 0x02
81#define INSTRUCTION_READ 0x03
82#define INSTRUCTION_BIT_MODIFY 0x05
83#define INSTRUCTION_LOAD_TXB(n) (0x40 + 2 * (n))
84#define INSTRUCTION_READ_RXB(n) (((n) == 0) ? 0x90 : 0x94)
85#define INSTRUCTION_RESET 0xC0
86
87/* MPC251x registers */
88#define CANSTAT 0x0e
89#define CANCTRL 0x0f
90# define CANCTRL_REQOP_MASK 0xe0
91# define CANCTRL_REQOP_CONF 0x80
92# define CANCTRL_REQOP_LISTEN_ONLY 0x60
93# define CANCTRL_REQOP_LOOPBACK 0x40
94# define CANCTRL_REQOP_SLEEP 0x20
95# define CANCTRL_REQOP_NORMAL 0x00
96# define CANCTRL_OSM 0x08
97# define CANCTRL_ABAT 0x10
98#define TEC 0x1c
99#define REC 0x1d
100#define CNF1 0x2a
101# define CNF1_SJW_SHIFT 6
102#define CNF2 0x29
103# define CNF2_BTLMODE 0x80
104# define CNF2_SAM 0x40
105# define CNF2_PS1_SHIFT 3
106#define CNF3 0x28
107# define CNF3_SOF 0x08
108# define CNF3_WAKFIL 0x04
109# define CNF3_PHSEG2_MASK 0x07
110#define CANINTE 0x2b
111# define CANINTE_MERRE 0x80
112# define CANINTE_WAKIE 0x40
113# define CANINTE_ERRIE 0x20
114# define CANINTE_TX2IE 0x10
115# define CANINTE_TX1IE 0x08
116# define CANINTE_TX0IE 0x04
117# define CANINTE_RX1IE 0x02
118# define CANINTE_RX0IE 0x01
119#define CANINTF 0x2c
120# define CANINTF_MERRF 0x80
121# define CANINTF_WAKIF 0x40
122# define CANINTF_ERRIF 0x20
123# define CANINTF_TX2IF 0x10
124# define CANINTF_TX1IF 0x08
125# define CANINTF_TX0IF 0x04
126# define CANINTF_RX1IF 0x02
127# define CANINTF_RX0IF 0x01
128#define EFLG 0x2d
129# define EFLG_EWARN 0x01
130# define EFLG_RXWAR 0x02
131# define EFLG_TXWAR 0x04
132# define EFLG_RXEP 0x08
133# define EFLG_TXEP 0x10
134# define EFLG_TXBO 0x20
135# define EFLG_RX0OVR 0x40
136# define EFLG_RX1OVR 0x80
137#define TXBCTRL(n) (((n) * 0x10) + 0x30 + TXBCTRL_OFF)
138# define TXBCTRL_ABTF 0x40
139# define TXBCTRL_MLOA 0x20
140# define TXBCTRL_TXERR 0x10
141# define TXBCTRL_TXREQ 0x08
142#define TXBSIDH(n) (((n) * 0x10) + 0x30 + TXBSIDH_OFF)
143# define SIDH_SHIFT 3
144#define TXBSIDL(n) (((n) * 0x10) + 0x30 + TXBSIDL_OFF)
145# define SIDL_SID_MASK 7
146# define SIDL_SID_SHIFT 5
147# define SIDL_EXIDE_SHIFT 3
148# define SIDL_EID_SHIFT 16
149# define SIDL_EID_MASK 3
150#define TXBEID8(n) (((n) * 0x10) + 0x30 + TXBEID8_OFF)
151#define TXBEID0(n) (((n) * 0x10) + 0x30 + TXBEID0_OFF)
152#define TXBDLC(n) (((n) * 0x10) + 0x30 + TXBDLC_OFF)
153# define DLC_RTR_SHIFT 6
154#define TXBCTRL_OFF 0
155#define TXBSIDH_OFF 1
156#define TXBSIDL_OFF 2
157#define TXBEID8_OFF 3
158#define TXBEID0_OFF 4
159#define TXBDLC_OFF 5
160#define TXBDAT_OFF 6
161#define RXBCTRL(n) (((n) * 0x10) + 0x60 + RXBCTRL_OFF)
162# define RXBCTRL_BUKT 0x04
163# define RXBCTRL_RXM0 0x20
164# define RXBCTRL_RXM1 0x40
165#define RXBSIDH(n) (((n) * 0x10) + 0x60 + RXBSIDH_OFF)
166# define RXBSIDH_SHIFT 3
167#define RXBSIDL(n) (((n) * 0x10) + 0x60 + RXBSIDL_OFF)
168# define RXBSIDL_IDE 0x08
169# define RXBSIDL_EID 3
170# define RXBSIDL_SHIFT 5
171#define RXBEID8(n) (((n) * 0x10) + 0x60 + RXBEID8_OFF)
172#define RXBEID0(n) (((n) * 0x10) + 0x60 + RXBEID0_OFF)
173#define RXBDLC(n) (((n) * 0x10) + 0x60 + RXBDLC_OFF)
174# define RXBDLC_LEN_MASK 0x0f
175# define RXBDLC_RTR 0x40
176#define RXBCTRL_OFF 0
177#define RXBSIDH_OFF 1
178#define RXBSIDL_OFF 2
179#define RXBEID8_OFF 3
180#define RXBEID0_OFF 4
181#define RXBDLC_OFF 5
182#define RXBDAT_OFF 6
183
184#define GET_BYTE(val, byte) \
185 (((val) >> ((byte) * 8)) & 0xff)
186#define SET_BYTE(val, byte) \
187 (((val) & 0xff) << ((byte) * 8))
188
189/*
190 * Buffer size required for the largest SPI transfer (i.e., reading a
191 * frame)
192 */
193#define CAN_FRAME_MAX_DATA_LEN 8
194#define SPI_TRANSFER_BUF_LEN (6 + CAN_FRAME_MAX_DATA_LEN)
195#define CAN_FRAME_MAX_BITS 128
196
197#define TX_ECHO_SKB_MAX 1
198
199#define DEVICE_NAME "mcp251x"
200
201static int mcp251x_enable_dma; /* Enable SPI DMA. Default: 0 (Off) */
202module_param(mcp251x_enable_dma, int, S_IRUGO);
203MODULE_PARM_DESC(mcp251x_enable_dma, "Enable SPI DMA. Default: 0 (Off)");
204
205static struct can_bittiming_const mcp251x_bittiming_const = {
206 .name = DEVICE_NAME,
207 .tseg1_min = 3,
208 .tseg1_max = 16,
209 .tseg2_min = 2,
210 .tseg2_max = 8,
211 .sjw_max = 4,
212 .brp_min = 1,
213 .brp_max = 64,
214 .brp_inc = 1,
215};
216
217struct mcp251x_priv {
218 struct can_priv can;
219 struct net_device *net;
220 struct spi_device *spi;
221
222 struct mutex spi_lock; /* SPI buffer lock */
223 u8 *spi_tx_buf;
224 u8 *spi_rx_buf;
225 dma_addr_t spi_tx_dma;
226 dma_addr_t spi_rx_dma;
227
228 struct sk_buff *tx_skb;
229 int tx_len;
230 struct workqueue_struct *wq;
231 struct work_struct tx_work;
232 struct work_struct irq_work;
233 struct completion awake;
234 int wake;
235 int force_quit;
236 int after_suspend;
237#define AFTER_SUSPEND_UP 1
238#define AFTER_SUSPEND_DOWN 2
239#define AFTER_SUSPEND_POWER 4
240#define AFTER_SUSPEND_RESTART 8
241 int restart_tx;
242};
243
244static void mcp251x_clean(struct net_device *net)
245{
246 struct mcp251x_priv *priv = netdev_priv(net);
247
248 net->stats.tx_errors++;
249 if (priv->tx_skb)
250 dev_kfree_skb(priv->tx_skb);
251 if (priv->tx_len)
252 can_free_echo_skb(priv->net, 0);
253 priv->tx_skb = NULL;
254 priv->tx_len = 0;
255}
256
257/*
258 * Note about handling of error return of mcp251x_spi_trans: accessing
259 * registers via SPI is not really different conceptually than using
260 * normal I/O assembler instructions, although it's much more
261 * complicated from a practical POV. So it's not advisable to always
262 * check the return value of this function. Imagine that every
263 * read{b,l}, write{b,l} and friends would be bracketed in "if ( < 0)
264 * error();", it would be a great mess (well there are some situation
265 * when exception handling C++ like could be useful after all). So we
266 * just check that transfers are OK at the beginning of our
267 * conversation with the chip and to avoid doing really nasty things
268 * (like injecting bogus packets in the network stack).
269 */
270static int mcp251x_spi_trans(struct spi_device *spi, int len)
271{
272 struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev);
273 struct spi_transfer t = {
274 .tx_buf = priv->spi_tx_buf,
275 .rx_buf = priv->spi_rx_buf,
276 .len = len,
277 .cs_change = 0,
278 };
279 struct spi_message m;
280 int ret;
281
282 spi_message_init(&m);
283
284 if (mcp251x_enable_dma) {
285 t.tx_dma = priv->spi_tx_dma;
286 t.rx_dma = priv->spi_rx_dma;
287 m.is_dma_mapped = 1;
288 }
289
290 spi_message_add_tail(&t, &m);
291
292 ret = spi_sync(spi, &m);
293 if (ret)
294 dev_err(&spi->dev, "spi transfer failed: ret = %d\n", ret);
295 return ret;
296}
297
298static u8 mcp251x_read_reg(struct spi_device *spi, uint8_t reg)
299{
300 struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev);
301 u8 val = 0;
302
303 mutex_lock(&priv->spi_lock);
304
305 priv->spi_tx_buf[0] = INSTRUCTION_READ;
306 priv->spi_tx_buf[1] = reg;
307
308 mcp251x_spi_trans(spi, 3);
309 val = priv->spi_rx_buf[2];
310
311 mutex_unlock(&priv->spi_lock);
312
313 return val;
314}
315
316static void mcp251x_write_reg(struct spi_device *spi, u8 reg, uint8_t val)
317{
318 struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev);
319
320 mutex_lock(&priv->spi_lock);
321
322 priv->spi_tx_buf[0] = INSTRUCTION_WRITE;
323 priv->spi_tx_buf[1] = reg;
324 priv->spi_tx_buf[2] = val;
325
326 mcp251x_spi_trans(spi, 3);
327
328 mutex_unlock(&priv->spi_lock);
329}
330
331static void mcp251x_write_bits(struct spi_device *spi, u8 reg,
332 u8 mask, uint8_t val)
333{
334 struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev);
335
336 mutex_lock(&priv->spi_lock);
337
338 priv->spi_tx_buf[0] = INSTRUCTION_BIT_MODIFY;
339 priv->spi_tx_buf[1] = reg;
340 priv->spi_tx_buf[2] = mask;
341 priv->spi_tx_buf[3] = val;
342
343 mcp251x_spi_trans(spi, 4);
344
345 mutex_unlock(&priv->spi_lock);
346}
347
348static void mcp251x_hw_tx_frame(struct spi_device *spi, u8 *buf,
349 int len, int tx_buf_idx)
350{
351 struct mcp251x_platform_data *pdata = spi->dev.platform_data;
352 struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev);
353
354 if (pdata->model == CAN_MCP251X_MCP2510) {
355 int i;
356
357 for (i = 1; i < TXBDAT_OFF + len; i++)
358 mcp251x_write_reg(spi, TXBCTRL(tx_buf_idx) + i,
359 buf[i]);
360 } else {
361 mutex_lock(&priv->spi_lock);
362 memcpy(priv->spi_tx_buf, buf, TXBDAT_OFF + len);
363 mcp251x_spi_trans(spi, TXBDAT_OFF + len);
364 mutex_unlock(&priv->spi_lock);
365 }
366}
367
368static void mcp251x_hw_tx(struct spi_device *spi, struct can_frame *frame,
369 int tx_buf_idx)
370{
371 u32 sid, eid, exide, rtr;
372 u8 buf[SPI_TRANSFER_BUF_LEN];
373
374 exide = (frame->can_id & CAN_EFF_FLAG) ? 1 : 0; /* Extended ID Enable */
375 if (exide)
376 sid = (frame->can_id & CAN_EFF_MASK) >> 18;
377 else
378 sid = frame->can_id & CAN_SFF_MASK; /* Standard ID */
379 eid = frame->can_id & CAN_EFF_MASK; /* Extended ID */
380 rtr = (frame->can_id & CAN_RTR_FLAG) ? 1 : 0; /* Remote transmission */
381
382 buf[TXBCTRL_OFF] = INSTRUCTION_LOAD_TXB(tx_buf_idx);
383 buf[TXBSIDH_OFF] = sid >> SIDH_SHIFT;
384 buf[TXBSIDL_OFF] = ((sid & SIDL_SID_MASK) << SIDL_SID_SHIFT) |
385 (exide << SIDL_EXIDE_SHIFT) |
386 ((eid >> SIDL_EID_SHIFT) & SIDL_EID_MASK);
387 buf[TXBEID8_OFF] = GET_BYTE(eid, 1);
388 buf[TXBEID0_OFF] = GET_BYTE(eid, 0);
389 buf[TXBDLC_OFF] = (rtr << DLC_RTR_SHIFT) | frame->can_dlc;
390 memcpy(buf + TXBDAT_OFF, frame->data, frame->can_dlc);
391 mcp251x_hw_tx_frame(spi, buf, frame->can_dlc, tx_buf_idx);
392 mcp251x_write_reg(spi, TXBCTRL(tx_buf_idx), TXBCTRL_TXREQ);
393}
394
395static void mcp251x_hw_rx_frame(struct spi_device *spi, u8 *buf,
396 int buf_idx)
397{
398 struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev);
399 struct mcp251x_platform_data *pdata = spi->dev.platform_data;
400
401 if (pdata->model == CAN_MCP251X_MCP2510) {
402 int i, len;
403
404 for (i = 1; i < RXBDAT_OFF; i++)
405 buf[i] = mcp251x_read_reg(spi, RXBCTRL(buf_idx) + i);
406 len = buf[RXBDLC_OFF] & RXBDLC_LEN_MASK;
407 if (len > 8)
408 len = 8;
409 for (; i < (RXBDAT_OFF + len); i++)
410 buf[i] = mcp251x_read_reg(spi, RXBCTRL(buf_idx) + i);
411 } else {
412 mutex_lock(&priv->spi_lock);
413
414 priv->spi_tx_buf[RXBCTRL_OFF] = INSTRUCTION_READ_RXB(buf_idx);
415 mcp251x_spi_trans(spi, SPI_TRANSFER_BUF_LEN);
416 memcpy(buf, priv->spi_rx_buf, SPI_TRANSFER_BUF_LEN);
417
418 mutex_unlock(&priv->spi_lock);
419 }
420}
421
422static void mcp251x_hw_rx(struct spi_device *spi, int buf_idx)
423{
424 struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev);
425 struct sk_buff *skb;
426 struct can_frame *frame;
427 u8 buf[SPI_TRANSFER_BUF_LEN];
428
429 skb = alloc_can_skb(priv->net, &frame);
430 if (!skb) {
431 dev_err(&spi->dev, "cannot allocate RX skb\n");
432 priv->net->stats.rx_dropped++;
433 return;
434 }
435
436 mcp251x_hw_rx_frame(spi, buf, buf_idx);
437 if (buf[RXBSIDL_OFF] & RXBSIDL_IDE) {
438 /* Extended ID format */
439 frame->can_id = CAN_EFF_FLAG;
440 frame->can_id |=
441 /* Extended ID part */
442 SET_BYTE(buf[RXBSIDL_OFF] & RXBSIDL_EID, 2) |
443 SET_BYTE(buf[RXBEID8_OFF], 1) |
444 SET_BYTE(buf[RXBEID0_OFF], 0) |
445 /* Standard ID part */
446 (((buf[RXBSIDH_OFF] << RXBSIDH_SHIFT) |
447 (buf[RXBSIDL_OFF] >> RXBSIDL_SHIFT)) << 18);
448 /* Remote transmission request */
449 if (buf[RXBDLC_OFF] & RXBDLC_RTR)
450 frame->can_id |= CAN_RTR_FLAG;
451 } else {
452 /* Standard ID format */
453 frame->can_id =
454 (buf[RXBSIDH_OFF] << RXBSIDH_SHIFT) |
455 (buf[RXBSIDL_OFF] >> RXBSIDL_SHIFT);
456 }
457 /* Data length */
458 frame->can_dlc = buf[RXBDLC_OFF] & RXBDLC_LEN_MASK;
459 if (frame->can_dlc > 8) {
460 dev_warn(&spi->dev, "invalid frame recevied\n");
461 priv->net->stats.rx_errors++;
462 dev_kfree_skb(skb);
463 return;
464 }
465 memcpy(frame->data, buf + RXBDAT_OFF, frame->can_dlc);
466
467 priv->net->stats.rx_packets++;
468 priv->net->stats.rx_bytes += frame->can_dlc;
469 netif_rx(skb);
470}
471
472static void mcp251x_hw_sleep(struct spi_device *spi)
473{
474 mcp251x_write_reg(spi, CANCTRL, CANCTRL_REQOP_SLEEP);
475}
476
477static void mcp251x_hw_wakeup(struct spi_device *spi)
478{
479 struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev);
480
481 priv->wake = 1;
482
483 /* Can only wake up by generating a wake-up interrupt. */
484 mcp251x_write_bits(spi, CANINTE, CANINTE_WAKIE, CANINTE_WAKIE);
485 mcp251x_write_bits(spi, CANINTF, CANINTF_WAKIF, CANINTF_WAKIF);
486
487 /* Wait until the device is awake */
488 if (!wait_for_completion_timeout(&priv->awake, HZ))
489 dev_err(&spi->dev, "MCP251x didn't wake-up\n");
490}
491
492static netdev_tx_t mcp251x_hard_start_xmit(struct sk_buff *skb,
493 struct net_device *net)
494{
495 struct mcp251x_priv *priv = netdev_priv(net);
496 struct spi_device *spi = priv->spi;
497
498 if (priv->tx_skb || priv->tx_len) {
499 dev_warn(&spi->dev, "hard_xmit called while tx busy\n");
500 netif_stop_queue(net);
501 return NETDEV_TX_BUSY;
502 }
503
504 if (skb->len != sizeof(struct can_frame)) {
505 dev_err(&spi->dev, "dropping packet - bad length\n");
506 dev_kfree_skb(skb);
507 net->stats.tx_dropped++;
508 return NETDEV_TX_OK;
509 }
510
511 netif_stop_queue(net);
512 priv->tx_skb = skb;
513 net->trans_start = jiffies;
514 queue_work(priv->wq, &priv->tx_work);
515
516 return NETDEV_TX_OK;
517}
518
519static int mcp251x_do_set_mode(struct net_device *net, enum can_mode mode)
520{
521 struct mcp251x_priv *priv = netdev_priv(net);
522
523 switch (mode) {
524 case CAN_MODE_START:
525 /* We have to delay work since SPI I/O may sleep */
526 priv->can.state = CAN_STATE_ERROR_ACTIVE;
527 priv->restart_tx = 1;
528 if (priv->can.restart_ms == 0)
529 priv->after_suspend = AFTER_SUSPEND_RESTART;
530 queue_work(priv->wq, &priv->irq_work);
531 break;
532 default:
533 return -EOPNOTSUPP;
534 }
535
536 return 0;
537}
538
539static void mcp251x_set_normal_mode(struct spi_device *spi)
540{
541 struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev);
542 unsigned long timeout;
543
544 /* Enable interrupts */
545 mcp251x_write_reg(spi, CANINTE,
546 CANINTE_ERRIE | CANINTE_TX2IE | CANINTE_TX1IE |
547 CANINTE_TX0IE | CANINTE_RX1IE | CANINTE_RX0IE |
548 CANINTF_MERRF);
549
550 if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) {
551 /* Put device into loopback mode */
552 mcp251x_write_reg(spi, CANCTRL, CANCTRL_REQOP_LOOPBACK);
553 } else {
554 /* Put device into normal mode */
555 mcp251x_write_reg(spi, CANCTRL, CANCTRL_REQOP_NORMAL);
556
557 /* Wait for the device to enter normal mode */
558 timeout = jiffies + HZ;
559 while (mcp251x_read_reg(spi, CANSTAT) & CANCTRL_REQOP_MASK) {
560 schedule();
561 if (time_after(jiffies, timeout)) {
562 dev_err(&spi->dev, "MCP251x didn't"
563 " enter in normal mode\n");
564 return;
565 }
566 }
567 }
568 priv->can.state = CAN_STATE_ERROR_ACTIVE;
569}
570
571static int mcp251x_do_set_bittiming(struct net_device *net)
572{
573 struct mcp251x_priv *priv = netdev_priv(net);
574 struct can_bittiming *bt = &priv->can.bittiming;
575 struct spi_device *spi = priv->spi;
576
577 mcp251x_write_reg(spi, CNF1, ((bt->sjw - 1) << CNF1_SJW_SHIFT) |
578 (bt->brp - 1));
579 mcp251x_write_reg(spi, CNF2, CNF2_BTLMODE |
580 (priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES ?
581 CNF2_SAM : 0) |
582 ((bt->phase_seg1 - 1) << CNF2_PS1_SHIFT) |
583 (bt->prop_seg - 1));
584 mcp251x_write_bits(spi, CNF3, CNF3_PHSEG2_MASK,
585 (bt->phase_seg2 - 1));
586 dev_info(&spi->dev, "CNF: 0x%02x 0x%02x 0x%02x\n",
587 mcp251x_read_reg(spi, CNF1),
588 mcp251x_read_reg(spi, CNF2),
589 mcp251x_read_reg(spi, CNF3));
590
591 return 0;
592}
593
594static int mcp251x_setup(struct net_device *net, struct mcp251x_priv *priv,
595 struct spi_device *spi)
596{
597 mcp251x_do_set_bittiming(net);
598
599 /* Enable RX0->RX1 buffer roll over and disable filters */
600 mcp251x_write_bits(spi, RXBCTRL(0),
601 RXBCTRL_BUKT | RXBCTRL_RXM0 | RXBCTRL_RXM1,
602 RXBCTRL_BUKT | RXBCTRL_RXM0 | RXBCTRL_RXM1);
603 mcp251x_write_bits(spi, RXBCTRL(1),
604 RXBCTRL_RXM0 | RXBCTRL_RXM1,
605 RXBCTRL_RXM0 | RXBCTRL_RXM1);
606 return 0;
607}
608
609static void mcp251x_hw_reset(struct spi_device *spi)
610{
611 struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev);
612 int ret;
613
614 mutex_lock(&priv->spi_lock);
615
616 priv->spi_tx_buf[0] = INSTRUCTION_RESET;
617
618 ret = spi_write(spi, priv->spi_tx_buf, 1);
619
620 mutex_unlock(&priv->spi_lock);
621
622 if (ret)
623 dev_err(&spi->dev, "reset failed: ret = %d\n", ret);
624 /* Wait for reset to finish */
625 mdelay(10);
626}
627
628static int mcp251x_hw_probe(struct spi_device *spi)
629{
630 int st1, st2;
631
632 mcp251x_hw_reset(spi);
633
634 /*
635 * Please note that these are "magic values" based on after
636 * reset defaults taken from data sheet which allows us to see
637 * if we really have a chip on the bus (we avoid common all
638 * zeroes or all ones situations)
639 */
640 st1 = mcp251x_read_reg(spi, CANSTAT) & 0xEE;
641 st2 = mcp251x_read_reg(spi, CANCTRL) & 0x17;
642
643 dev_dbg(&spi->dev, "CANSTAT 0x%02x CANCTRL 0x%02x\n", st1, st2);
644
645 /* Check for power up default values */
646 return (st1 == 0x80 && st2 == 0x07) ? 1 : 0;
647}
648
649static irqreturn_t mcp251x_can_isr(int irq, void *dev_id)
650{
651 struct net_device *net = (struct net_device *)dev_id;
652 struct mcp251x_priv *priv = netdev_priv(net);
653
654 /* Schedule bottom half */
655 if (!work_pending(&priv->irq_work))
656 queue_work(priv->wq, &priv->irq_work);
657
658 return IRQ_HANDLED;
659}
660
661static int mcp251x_open(struct net_device *net)
662{
663 struct mcp251x_priv *priv = netdev_priv(net);
664 struct spi_device *spi = priv->spi;
665 struct mcp251x_platform_data *pdata = spi->dev.platform_data;
666 int ret;
667
668 ret = open_candev(net);
669 if (ret) {
670 dev_err(&spi->dev, "unable to set initial baudrate!\n");
671 return ret;
672 }
673
674 if (pdata->transceiver_enable)
675 pdata->transceiver_enable(1);
676
677 priv->force_quit = 0;
678 priv->tx_skb = NULL;
679 priv->tx_len = 0;
680
681 ret = request_irq(spi->irq, mcp251x_can_isr,
682 IRQF_TRIGGER_FALLING, DEVICE_NAME, net);
683 if (ret) {
684 dev_err(&spi->dev, "failed to acquire irq %d\n", spi->irq);
685 if (pdata->transceiver_enable)
686 pdata->transceiver_enable(0);
687 close_candev(net);
688 return ret;
689 }
690
691 mcp251x_hw_wakeup(spi);
692 mcp251x_hw_reset(spi);
693 ret = mcp251x_setup(net, priv, spi);
694 if (ret) {
695 free_irq(spi->irq, net);
696 mcp251x_hw_sleep(spi);
697 if (pdata->transceiver_enable)
698 pdata->transceiver_enable(0);
699 close_candev(net);
700 return ret;
701 }
702 mcp251x_set_normal_mode(spi);
703 netif_wake_queue(net);
704
705 return 0;
706}
707
708static int mcp251x_stop(struct net_device *net)
709{
710 struct mcp251x_priv *priv = netdev_priv(net);
711 struct spi_device *spi = priv->spi;
712 struct mcp251x_platform_data *pdata = spi->dev.platform_data;
713
714 close_candev(net);
715
716 /* Disable and clear pending interrupts */
717 mcp251x_write_reg(spi, CANINTE, 0x00);
718 mcp251x_write_reg(spi, CANINTF, 0x00);
719
720 priv->force_quit = 1;
721 free_irq(spi->irq, net);
722 flush_workqueue(priv->wq);
723
724 mcp251x_write_reg(spi, TXBCTRL(0), 0);
725 if (priv->tx_skb || priv->tx_len)
726 mcp251x_clean(net);
727
728 mcp251x_hw_sleep(spi);
729
730 if (pdata->transceiver_enable)
731 pdata->transceiver_enable(0);
732
733 priv->can.state = CAN_STATE_STOPPED;
734
735 return 0;
736}
737
738static void mcp251x_tx_work_handler(struct work_struct *ws)
739{
740 struct mcp251x_priv *priv = container_of(ws, struct mcp251x_priv,
741 tx_work);
742 struct spi_device *spi = priv->spi;
743 struct net_device *net = priv->net;
744 struct can_frame *frame;
745
746 if (priv->tx_skb) {
747 frame = (struct can_frame *)priv->tx_skb->data;
748
749 if (priv->can.state == CAN_STATE_BUS_OFF) {
750 mcp251x_clean(net);
751 netif_wake_queue(net);
752 return;
753 }
754 if (frame->can_dlc > CAN_FRAME_MAX_DATA_LEN)
755 frame->can_dlc = CAN_FRAME_MAX_DATA_LEN;
756 mcp251x_hw_tx(spi, frame, 0);
757 priv->tx_len = 1 + frame->can_dlc;
758 can_put_echo_skb(priv->tx_skb, net, 0);
759 priv->tx_skb = NULL;
760 }
761}
762
763static void mcp251x_irq_work_handler(struct work_struct *ws)
764{
765 struct mcp251x_priv *priv = container_of(ws, struct mcp251x_priv,
766 irq_work);
767 struct spi_device *spi = priv->spi;
768 struct net_device *net = priv->net;
769 u8 txbnctrl;
770 u8 intf;
771 enum can_state new_state;
772
773 if (priv->after_suspend) {
774 mdelay(10);
775 mcp251x_hw_reset(spi);
776 mcp251x_setup(net, priv, spi);
777 if (priv->after_suspend & AFTER_SUSPEND_RESTART) {
778 mcp251x_set_normal_mode(spi);
779 } else if (priv->after_suspend & AFTER_SUSPEND_UP) {
780 netif_device_attach(net);
781 /* Clean since we lost tx buffer */
782 if (priv->tx_skb || priv->tx_len) {
783 mcp251x_clean(net);
784 netif_wake_queue(net);
785 }
786 mcp251x_set_normal_mode(spi);
787 } else {
788 mcp251x_hw_sleep(spi);
789 }
790 priv->after_suspend = 0;
791 }
792
793 if (priv->can.restart_ms == 0 && priv->can.state == CAN_STATE_BUS_OFF)
794 return;
795
796 while (!priv->force_quit && !freezing(current)) {
797 u8 eflag = mcp251x_read_reg(spi, EFLG);
798 int can_id = 0, data1 = 0;
799
800 mcp251x_write_reg(spi, EFLG, 0x00);
801
802 if (priv->restart_tx) {
803 priv->restart_tx = 0;
804 mcp251x_write_reg(spi, TXBCTRL(0), 0);
805 if (priv->tx_skb || priv->tx_len)
806 mcp251x_clean(net);
807 netif_wake_queue(net);
808 can_id |= CAN_ERR_RESTARTED;
809 }
810
811 if (priv->wake) {
812 /* Wait whilst the device wakes up */
813 mdelay(10);
814 priv->wake = 0;
815 }
816
817 intf = mcp251x_read_reg(spi, CANINTF);
818 mcp251x_write_bits(spi, CANINTF, intf, 0x00);
819
820 /* Update can state */
821 if (eflag & EFLG_TXBO) {
822 new_state = CAN_STATE_BUS_OFF;
823 can_id |= CAN_ERR_BUSOFF;
824 } else if (eflag & EFLG_TXEP) {
825 new_state = CAN_STATE_ERROR_PASSIVE;
826 can_id |= CAN_ERR_CRTL;
827 data1 |= CAN_ERR_CRTL_TX_PASSIVE;
828 } else if (eflag & EFLG_RXEP) {
829 new_state = CAN_STATE_ERROR_PASSIVE;
830 can_id |= CAN_ERR_CRTL;
831 data1 |= CAN_ERR_CRTL_RX_PASSIVE;
832 } else if (eflag & EFLG_TXWAR) {
833 new_state = CAN_STATE_ERROR_WARNING;
834 can_id |= CAN_ERR_CRTL;
835 data1 |= CAN_ERR_CRTL_TX_WARNING;
836 } else if (eflag & EFLG_RXWAR) {
837 new_state = CAN_STATE_ERROR_WARNING;
838 can_id |= CAN_ERR_CRTL;
839 data1 |= CAN_ERR_CRTL_RX_WARNING;
840 } else {
841 new_state = CAN_STATE_ERROR_ACTIVE;
842 }
843
844 /* Update can state statistics */
845 switch (priv->can.state) {
846 case CAN_STATE_ERROR_ACTIVE:
847 if (new_state >= CAN_STATE_ERROR_WARNING &&
848 new_state <= CAN_STATE_BUS_OFF)
849 priv->can.can_stats.error_warning++;
850 case CAN_STATE_ERROR_WARNING: /* fallthrough */
851 if (new_state >= CAN_STATE_ERROR_PASSIVE &&
852 new_state <= CAN_STATE_BUS_OFF)
853 priv->can.can_stats.error_passive++;
854 break;
855 default:
856 break;
857 }
858 priv->can.state = new_state;
859
860 if ((intf & CANINTF_ERRIF) || (can_id & CAN_ERR_RESTARTED)) {
861 struct sk_buff *skb;
862 struct can_frame *frame;
863
864 /* Create error frame */
865 skb = alloc_can_err_skb(net, &frame);
866 if (skb) {
867 /* Set error frame flags based on bus state */
868 frame->can_id = can_id;
869 frame->data[1] = data1;
870
871 /* Update net stats for overflows */
872 if (eflag & (EFLG_RX0OVR | EFLG_RX1OVR)) {
873 if (eflag & EFLG_RX0OVR)
874 net->stats.rx_over_errors++;
875 if (eflag & EFLG_RX1OVR)
876 net->stats.rx_over_errors++;
877 frame->can_id |= CAN_ERR_CRTL;
878 frame->data[1] |=
879 CAN_ERR_CRTL_RX_OVERFLOW;
880 }
881
882 netif_rx(skb);
883 } else {
884 dev_info(&spi->dev,
885 "cannot allocate error skb\n");
886 }
887 }
888
889 if (priv->can.state == CAN_STATE_BUS_OFF) {
890 if (priv->can.restart_ms == 0) {
891 can_bus_off(net);
892 mcp251x_hw_sleep(spi);
893 return;
894 }
895 }
896
897 if (intf == 0)
898 break;
899
900 if (intf & CANINTF_WAKIF)
901 complete(&priv->awake);
902
903 if (intf & CANINTF_MERRF) {
904 /* If there are pending Tx buffers, restart queue */
905 txbnctrl = mcp251x_read_reg(spi, TXBCTRL(0));
906 if (!(txbnctrl & TXBCTRL_TXREQ)) {
907 if (priv->tx_skb || priv->tx_len)
908 mcp251x_clean(net);
909 netif_wake_queue(net);
910 }
911 }
912
913 if (intf & (CANINTF_TX2IF | CANINTF_TX1IF | CANINTF_TX0IF)) {
914 net->stats.tx_packets++;
915 net->stats.tx_bytes += priv->tx_len - 1;
916 if (priv->tx_len) {
917 can_get_echo_skb(net, 0);
918 priv->tx_len = 0;
919 }
920 netif_wake_queue(net);
921 }
922
923 if (intf & CANINTF_RX0IF)
924 mcp251x_hw_rx(spi, 0);
925
926 if (intf & CANINTF_RX1IF)
927 mcp251x_hw_rx(spi, 1);
928 }
929}
930
931static const struct net_device_ops mcp251x_netdev_ops = {
932 .ndo_open = mcp251x_open,
933 .ndo_stop = mcp251x_stop,
934 .ndo_start_xmit = mcp251x_hard_start_xmit,
935};
936
937static int __devinit mcp251x_can_probe(struct spi_device *spi)
938{
939 struct net_device *net;
940 struct mcp251x_priv *priv;
941 struct mcp251x_platform_data *pdata = spi->dev.platform_data;
942 int ret = -ENODEV;
943
944 if (!pdata)
945 /* Platform data is required for osc freq */
946 goto error_out;
947
948 /* Allocate can/net device */
949 net = alloc_candev(sizeof(struct mcp251x_priv), TX_ECHO_SKB_MAX);
950 if (!net) {
951 ret = -ENOMEM;
952 goto error_alloc;
953 }
954
955 net->netdev_ops = &mcp251x_netdev_ops;
956 net->flags |= IFF_ECHO;
957
958 priv = netdev_priv(net);
959 priv->can.bittiming_const = &mcp251x_bittiming_const;
960 priv->can.do_set_mode = mcp251x_do_set_mode;
961 priv->can.clock.freq = pdata->oscillator_frequency / 2;
962 priv->net = net;
963 dev_set_drvdata(&spi->dev, priv);
964
965 priv->spi = spi;
966 mutex_init(&priv->spi_lock);
967
968 /* If requested, allocate DMA buffers */
969 if (mcp251x_enable_dma) {
970 spi->dev.coherent_dma_mask = ~0;
971
972 /*
973 * Minimum coherent DMA allocation is PAGE_SIZE, so allocate
974 * that much and share it between Tx and Rx DMA buffers.
975 */
976 priv->spi_tx_buf = dma_alloc_coherent(&spi->dev,
977 PAGE_SIZE,
978 &priv->spi_tx_dma,
979 GFP_DMA);
980
981 if (priv->spi_tx_buf) {
982 priv->spi_rx_buf = (u8 *)(priv->spi_tx_buf +
983 (PAGE_SIZE / 2));
984 priv->spi_rx_dma = (dma_addr_t)(priv->spi_tx_dma +
985 (PAGE_SIZE / 2));
986 } else {
987 /* Fall back to non-DMA */
988 mcp251x_enable_dma = 0;
989 }
990 }
991
992 /* Allocate non-DMA buffers */
993 if (!mcp251x_enable_dma) {
994 priv->spi_tx_buf = kmalloc(SPI_TRANSFER_BUF_LEN, GFP_KERNEL);
995 if (!priv->spi_tx_buf) {
996 ret = -ENOMEM;
997 goto error_tx_buf;
998 }
999 priv->spi_rx_buf = kmalloc(SPI_TRANSFER_BUF_LEN, GFP_KERNEL);
1000 if (!priv->spi_tx_buf) {
1001 ret = -ENOMEM;
1002 goto error_rx_buf;
1003 }
1004 }
1005
1006 if (pdata->power_enable)
1007 pdata->power_enable(1);
1008
1009 /* Call out to platform specific setup */
1010 if (pdata->board_specific_setup)
1011 pdata->board_specific_setup(spi);
1012
1013 SET_NETDEV_DEV(net, &spi->dev);
1014
1015 priv->wq = create_freezeable_workqueue("mcp251x_wq");
1016
1017 INIT_WORK(&priv->tx_work, mcp251x_tx_work_handler);
1018 INIT_WORK(&priv->irq_work, mcp251x_irq_work_handler);
1019
1020 init_completion(&priv->awake);
1021
1022 /* Configure the SPI bus */
1023 spi->mode = SPI_MODE_0;
1024 spi->bits_per_word = 8;
1025 spi_setup(spi);
1026
1027 if (!mcp251x_hw_probe(spi)) {
1028 dev_info(&spi->dev, "Probe failed\n");
1029 goto error_probe;
1030 }
1031 mcp251x_hw_sleep(spi);
1032
1033 if (pdata->transceiver_enable)
1034 pdata->transceiver_enable(0);
1035
1036 ret = register_candev(net);
1037 if (!ret) {
1038 dev_info(&spi->dev, "probed\n");
1039 return ret;
1040 }
1041error_probe:
1042 if (!mcp251x_enable_dma)
1043 kfree(priv->spi_rx_buf);
1044error_rx_buf:
1045 if (!mcp251x_enable_dma)
1046 kfree(priv->spi_tx_buf);
1047error_tx_buf:
1048 free_candev(net);
1049 if (mcp251x_enable_dma)
1050 dma_free_coherent(&spi->dev, PAGE_SIZE,
1051 priv->spi_tx_buf, priv->spi_tx_dma);
1052error_alloc:
1053 if (pdata->power_enable)
1054 pdata->power_enable(0);
1055 dev_err(&spi->dev, "probe failed\n");
1056error_out:
1057 return ret;
1058}
1059
1060static int __devexit mcp251x_can_remove(struct spi_device *spi)
1061{
1062 struct mcp251x_platform_data *pdata = spi->dev.platform_data;
1063 struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev);
1064 struct net_device *net = priv->net;
1065
1066 unregister_candev(net);
1067 free_candev(net);
1068
1069 priv->force_quit = 1;
1070 flush_workqueue(priv->wq);
1071 destroy_workqueue(priv->wq);
1072
1073 if (mcp251x_enable_dma) {
1074 dma_free_coherent(&spi->dev, PAGE_SIZE,
1075 priv->spi_tx_buf, priv->spi_tx_dma);
1076 } else {
1077 kfree(priv->spi_tx_buf);
1078 kfree(priv->spi_rx_buf);
1079 }
1080
1081 if (pdata->power_enable)
1082 pdata->power_enable(0);
1083
1084 return 0;
1085}
1086
1087#ifdef CONFIG_PM
1088static int mcp251x_can_suspend(struct spi_device *spi, pm_message_t state)
1089{
1090 struct mcp251x_platform_data *pdata = spi->dev.platform_data;
1091 struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev);
1092 struct net_device *net = priv->net;
1093
1094 if (netif_running(net)) {
1095 netif_device_detach(net);
1096
1097 mcp251x_hw_sleep(spi);
1098 if (pdata->transceiver_enable)
1099 pdata->transceiver_enable(0);
1100 priv->after_suspend = AFTER_SUSPEND_UP;
1101 } else {
1102 priv->after_suspend = AFTER_SUSPEND_DOWN;
1103 }
1104
1105 if (pdata->power_enable) {
1106 pdata->power_enable(0);
1107 priv->after_suspend |= AFTER_SUSPEND_POWER;
1108 }
1109
1110 return 0;
1111}
1112
1113static int mcp251x_can_resume(struct spi_device *spi)
1114{
1115 struct mcp251x_platform_data *pdata = spi->dev.platform_data;
1116 struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev);
1117
1118 if (priv->after_suspend & AFTER_SUSPEND_POWER) {
1119 pdata->power_enable(1);
1120 queue_work(priv->wq, &priv->irq_work);
1121 } else {
1122 if (priv->after_suspend & AFTER_SUSPEND_UP) {
1123 if (pdata->transceiver_enable)
1124 pdata->transceiver_enable(1);
1125 queue_work(priv->wq, &priv->irq_work);
1126 } else {
1127 priv->after_suspend = 0;
1128 }
1129 }
1130 return 0;
1131}
1132#else
1133#define mcp251x_can_suspend NULL
1134#define mcp251x_can_resume NULL
1135#endif
1136
1137static struct spi_driver mcp251x_can_driver = {
1138 .driver = {
1139 .name = DEVICE_NAME,
1140 .bus = &spi_bus_type,
1141 .owner = THIS_MODULE,
1142 },
1143
1144 .probe = mcp251x_can_probe,
1145 .remove = __devexit_p(mcp251x_can_remove),
1146 .suspend = mcp251x_can_suspend,
1147 .resume = mcp251x_can_resume,
1148};
1149
1150static int __init mcp251x_can_init(void)
1151{
1152 return spi_register_driver(&mcp251x_can_driver);
1153}
1154
1155static void __exit mcp251x_can_exit(void)
1156{
1157 spi_unregister_driver(&mcp251x_can_driver);
1158}
1159
1160module_init(mcp251x_can_init);
1161module_exit(mcp251x_can_exit);
1162
1163MODULE_AUTHOR("Chris Elston <celston@katalix.com>, "
1164 "Christian Pellegrin <chripell@evolware.org>");
1165MODULE_DESCRIPTION("Microchip 251x CAN driver");
1166MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/can/mscan/Kconfig b/drivers/net/can/mscan/Kconfig
new file mode 100644
index 000000000000..cd0f2d6f375d
--- /dev/null
+++ b/drivers/net/can/mscan/Kconfig
@@ -0,0 +1,23 @@
1config CAN_MSCAN
2 depends on CAN_DEV && (PPC || M68K || M68KNOMMU)
3 tristate "Support for Freescale MSCAN based chips"
4 ---help---
5 The Motorola Scalable Controller Area Network (MSCAN) definition
6 is based on the MSCAN12 definition which is the specific
7 implementation of the Motorola Scalable CAN concept targeted for
8 the Motorola MC68HC12 Microcontroller Family.
9
10if CAN_MSCAN
11
12config CAN_MPC5XXX
13 tristate "Freescale MPC5xxx onboard CAN controller"
14 depends on PPC_MPC52xx
15 ---help---
16 If you say yes here you get support for Freescale's MPC5xxx
17 onboard CAN controller.
18
19 This driver can also be built as a module. If so, the module
20 will be called mscan-mpc5xxx.ko.
21
22endif
23
diff --git a/drivers/net/can/mscan/Makefile b/drivers/net/can/mscan/Makefile
new file mode 100644
index 000000000000..c9fab17cd8b4
--- /dev/null
+++ b/drivers/net/can/mscan/Makefile
@@ -0,0 +1,5 @@
1
2obj-$(CONFIG_CAN_MPC5XXX) += mscan-mpc5xxx.o
3mscan-mpc5xxx-objs := mscan.o mpc5xxx_can.o
4
5ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG
diff --git a/drivers/net/can/mscan/mpc5xxx_can.c b/drivers/net/can/mscan/mpc5xxx_can.c
new file mode 100644
index 000000000000..1de6f6349b16
--- /dev/null
+++ b/drivers/net/can/mscan/mpc5xxx_can.c
@@ -0,0 +1,259 @@
1/*
2 * CAN bus driver for the Freescale MPC5xxx embedded CPU.
3 *
4 * Copyright (C) 2004-2005 Andrey Volkov <avolkov@varma-el.com>,
5 * Varma Electronics Oy
6 * Copyright (C) 2008-2009 Wolfgang Grandegger <wg@grandegger.com>
7 * Copyright (C) 2009 Wolfram Sang, Pengutronix <w.sang@pengutronix.de>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the version 2 of the GNU General Public License
11 * as published by the Free Software Foundation
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 */
22
23#include <linux/kernel.h>
24#include <linux/module.h>
25#include <linux/interrupt.h>
26#include <linux/platform_device.h>
27#include <linux/netdevice.h>
28#include <linux/can.h>
29#include <linux/can/dev.h>
30#include <linux/of_platform.h>
31#include <sysdev/fsl_soc.h>
32#include <linux/io.h>
33#include <asm/mpc52xx.h>
34
35#include "mscan.h"
36
37#define DRV_NAME "mpc5xxx_can"
38
39static struct of_device_id mpc52xx_cdm_ids[] __devinitdata = {
40 { .compatible = "fsl,mpc5200-cdm", },
41 {}
42};
43
44/*
45 * Get frequency of the MSCAN clock source
46 *
47 * Either the oscillator clock (SYS_XTAL_IN) or the IP bus clock (IP_CLK)
48 * can be selected. According to the MPC5200 user's manual, the oscillator
49 * clock is the better choice as it has less jitter but due to a hardware
50 * bug, it can not be selected for the old MPC5200 Rev. A chips.
51 */
52
53static unsigned int __devinit mpc52xx_can_clock_freq(struct of_device *of,
54 int clock_src)
55{
56 unsigned int pvr;
57 struct mpc52xx_cdm __iomem *cdm;
58 struct device_node *np_cdm;
59 unsigned int freq;
60 u32 val;
61
62 pvr = mfspr(SPRN_PVR);
63
64 freq = mpc5xxx_get_bus_frequency(of->node);
65 if (!freq)
66 return 0;
67
68 if (clock_src == MSCAN_CLKSRC_BUS || pvr == 0x80822011)
69 return freq;
70
71 /* Determine SYS_XTAL_IN frequency from the clock domain settings */
72 np_cdm = of_find_matching_node(NULL, mpc52xx_cdm_ids);
73 if (!np_cdm) {
74 dev_err(&of->dev, "can't get clock node!\n");
75 return 0;
76 }
77 cdm = of_iomap(np_cdm, 0);
78 of_node_put(np_cdm);
79
80 if (in_8(&cdm->ipb_clk_sel) & 0x1)
81 freq *= 2;
82 val = in_be32(&cdm->rstcfg);
83
84 freq *= (val & (1 << 5)) ? 8 : 4;
85 freq /= (val & (1 << 6)) ? 12 : 16;
86
87 iounmap(cdm);
88
89 return freq;
90}
91
92static int __devinit mpc5xxx_can_probe(struct of_device *ofdev,
93 const struct of_device_id *id)
94{
95 struct device_node *np = ofdev->node;
96 struct net_device *dev;
97 struct mscan_priv *priv;
98 void __iomem *base;
99 const char *clk_src;
100 int err, irq, clock_src;
101
102 base = of_iomap(ofdev->node, 0);
103 if (!base) {
104 dev_err(&ofdev->dev, "couldn't ioremap\n");
105 err = -ENOMEM;
106 goto exit_release_mem;
107 }
108
109 irq = irq_of_parse_and_map(np, 0);
110 if (!irq) {
111 dev_err(&ofdev->dev, "no irq found\n");
112 err = -ENODEV;
113 goto exit_unmap_mem;
114 }
115
116 dev = alloc_mscandev();
117 if (!dev) {
118 err = -ENOMEM;
119 goto exit_dispose_irq;
120 }
121
122 priv = netdev_priv(dev);
123 priv->reg_base = base;
124 dev->irq = irq;
125
126 /*
127 * Either the oscillator clock (SYS_XTAL_IN) or the IP bus clock
128 * (IP_CLK) can be selected as MSCAN clock source. According to
129 * the MPC5200 user's manual, the oscillator clock is the better
130 * choice as it has less jitter. For this reason, it is selected
131 * by default.
132 */
133 clk_src = of_get_property(np, "fsl,mscan-clock-source", NULL);
134 if (clk_src && strcmp(clk_src, "ip") == 0)
135 clock_src = MSCAN_CLKSRC_BUS;
136 else
137 clock_src = MSCAN_CLKSRC_XTAL;
138 priv->can.clock.freq = mpc52xx_can_clock_freq(ofdev, clock_src);
139 if (!priv->can.clock.freq) {
140 dev_err(&ofdev->dev, "couldn't get MSCAN clock frequency\n");
141 err = -ENODEV;
142 goto exit_free_mscan;
143 }
144
145 SET_NETDEV_DEV(dev, &ofdev->dev);
146
147 err = register_mscandev(dev, clock_src);
148 if (err) {
149 dev_err(&ofdev->dev, "registering %s failed (err=%d)\n",
150 DRV_NAME, err);
151 goto exit_free_mscan;
152 }
153
154 dev_set_drvdata(&ofdev->dev, dev);
155
156 dev_info(&ofdev->dev, "MSCAN at 0x%p, irq %d, clock %d Hz\n",
157 priv->reg_base, dev->irq, priv->can.clock.freq);
158
159 return 0;
160
161exit_free_mscan:
162 free_candev(dev);
163exit_dispose_irq:
164 irq_dispose_mapping(irq);
165exit_unmap_mem:
166 iounmap(base);
167exit_release_mem:
168 return err;
169}
170
171static int __devexit mpc5xxx_can_remove(struct of_device *ofdev)
172{
173 struct net_device *dev = dev_get_drvdata(&ofdev->dev);
174 struct mscan_priv *priv = netdev_priv(dev);
175
176 dev_set_drvdata(&ofdev->dev, NULL);
177
178 unregister_mscandev(dev);
179 iounmap(priv->reg_base);
180 irq_dispose_mapping(dev->irq);
181 free_candev(dev);
182
183 return 0;
184}
185
186#ifdef CONFIG_PM
187static struct mscan_regs saved_regs;
188static int mpc5xxx_can_suspend(struct of_device *ofdev, pm_message_t state)
189{
190 struct net_device *dev = dev_get_drvdata(&ofdev->dev);
191 struct mscan_priv *priv = netdev_priv(dev);
192 struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base;
193
194 _memcpy_fromio(&saved_regs, regs, sizeof(*regs));
195
196 return 0;
197}
198
199static int mpc5xxx_can_resume(struct of_device *ofdev)
200{
201 struct net_device *dev = dev_get_drvdata(&ofdev->dev);
202 struct mscan_priv *priv = netdev_priv(dev);
203 struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base;
204
205 regs->canctl0 |= MSCAN_INITRQ;
206 while (!(regs->canctl1 & MSCAN_INITAK))
207 udelay(10);
208
209 regs->canctl1 = saved_regs.canctl1;
210 regs->canbtr0 = saved_regs.canbtr0;
211 regs->canbtr1 = saved_regs.canbtr1;
212 regs->canidac = saved_regs.canidac;
213
214 /* restore masks, buffers etc. */
215 _memcpy_toio(&regs->canidar1_0, (void *)&saved_regs.canidar1_0,
216 sizeof(*regs) - offsetof(struct mscan_regs, canidar1_0));
217
218 regs->canctl0 &= ~MSCAN_INITRQ;
219 regs->cantbsel = saved_regs.cantbsel;
220 regs->canrier = saved_regs.canrier;
221 regs->cantier = saved_regs.cantier;
222 regs->canctl0 = saved_regs.canctl0;
223
224 return 0;
225}
226#endif
227
228static struct of_device_id __devinitdata mpc5xxx_can_table[] = {
229 {.compatible = "fsl,mpc5200-mscan"},
230 {},
231};
232
233static struct of_platform_driver mpc5xxx_can_driver = {
234 .owner = THIS_MODULE,
235 .name = "mpc5xxx_can",
236 .probe = mpc5xxx_can_probe,
237 .remove = __devexit_p(mpc5xxx_can_remove),
238#ifdef CONFIG_PM
239 .suspend = mpc5xxx_can_suspend,
240 .resume = mpc5xxx_can_resume,
241#endif
242 .match_table = mpc5xxx_can_table,
243};
244
245static int __init mpc5xxx_can_init(void)
246{
247 return of_register_platform_driver(&mpc5xxx_can_driver);
248}
249module_init(mpc5xxx_can_init);
250
251static void __exit mpc5xxx_can_exit(void)
252{
253 return of_unregister_platform_driver(&mpc5xxx_can_driver);
254};
255module_exit(mpc5xxx_can_exit);
256
257MODULE_AUTHOR("Wolfgang Grandegger <wg@grandegger.com>");
258MODULE_DESCRIPTION("Freescale MPC5200 CAN driver");
259MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/can/mscan/mscan.c b/drivers/net/can/mscan/mscan.c
new file mode 100644
index 000000000000..bb06dfb58f25
--- /dev/null
+++ b/drivers/net/can/mscan/mscan.c
@@ -0,0 +1,668 @@
1/*
2 * CAN bus driver for the alone generic (as possible as) MSCAN controller.
3 *
4 * Copyright (C) 2005-2006 Andrey Volkov <avolkov@varma-el.com>,
5 * Varma Electronics Oy
6 * Copyright (C) 2008-2009 Wolfgang Grandegger <wg@grandegger.com>
7 * Copytight (C) 2008-2009 Pengutronix <kernel@pengutronix.de>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the version 2 of the GNU General Public License
11 * as published by the Free Software Foundation
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 */
22
23#include <linux/kernel.h>
24#include <linux/module.h>
25#include <linux/interrupt.h>
26#include <linux/delay.h>
27#include <linux/netdevice.h>
28#include <linux/if_arp.h>
29#include <linux/if_ether.h>
30#include <linux/list.h>
31#include <linux/can.h>
32#include <linux/can/dev.h>
33#include <linux/can/error.h>
34#include <linux/io.h>
35
36#include "mscan.h"
37
38static struct can_bittiming_const mscan_bittiming_const = {
39 .name = "mscan",
40 .tseg1_min = 4,
41 .tseg1_max = 16,
42 .tseg2_min = 2,
43 .tseg2_max = 8,
44 .sjw_max = 4,
45 .brp_min = 1,
46 .brp_max = 64,
47 .brp_inc = 1,
48};
49
50struct mscan_state {
51 u8 mode;
52 u8 canrier;
53 u8 cantier;
54};
55
56static enum can_state state_map[] = {
57 CAN_STATE_ERROR_ACTIVE,
58 CAN_STATE_ERROR_WARNING,
59 CAN_STATE_ERROR_PASSIVE,
60 CAN_STATE_BUS_OFF
61};
62
63static int mscan_set_mode(struct net_device *dev, u8 mode)
64{
65 struct mscan_priv *priv = netdev_priv(dev);
66 struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base;
67 int ret = 0;
68 int i;
69 u8 canctl1;
70
71 if (mode != MSCAN_NORMAL_MODE) {
72 if (priv->tx_active) {
73 /* Abort transfers before going to sleep */#
74 out_8(&regs->cantarq, priv->tx_active);
75 /* Suppress TX done interrupts */
76 out_8(&regs->cantier, 0);
77 }
78
79 canctl1 = in_8(&regs->canctl1);
80 if ((mode & MSCAN_SLPRQ) && !(canctl1 & MSCAN_SLPAK)) {
81 setbits8(&regs->canctl0, MSCAN_SLPRQ);
82 for (i = 0; i < MSCAN_SET_MODE_RETRIES; i++) {
83 if (in_8(&regs->canctl1) & MSCAN_SLPAK)
84 break;
85 udelay(100);
86 }
87 /*
88 * The mscan controller will fail to enter sleep mode,
89 * while there are irregular activities on bus, like
90 * somebody keeps retransmitting. This behavior is
91 * undocumented and seems to differ between mscan built
92 * in mpc5200b and mpc5200. We proceed in that case,
93 * since otherwise the slprq will be kept set and the
94 * controller will get stuck. NOTE: INITRQ or CSWAI
95 * will abort all active transmit actions, if still
96 * any, at once.
97 */
98 if (i >= MSCAN_SET_MODE_RETRIES)
99 dev_dbg(dev->dev.parent,
100 "device failed to enter sleep mode. "
101 "We proceed anyhow.\n");
102 else
103 priv->can.state = CAN_STATE_SLEEPING;
104 }
105
106 if ((mode & MSCAN_INITRQ) && !(canctl1 & MSCAN_INITAK)) {
107 setbits8(&regs->canctl0, MSCAN_INITRQ);
108 for (i = 0; i < MSCAN_SET_MODE_RETRIES; i++) {
109 if (in_8(&regs->canctl1) & MSCAN_INITAK)
110 break;
111 }
112 if (i >= MSCAN_SET_MODE_RETRIES)
113 ret = -ENODEV;
114 }
115 if (!ret)
116 priv->can.state = CAN_STATE_STOPPED;
117
118 if (mode & MSCAN_CSWAI)
119 setbits8(&regs->canctl0, MSCAN_CSWAI);
120
121 } else {
122 canctl1 = in_8(&regs->canctl1);
123 if (canctl1 & (MSCAN_SLPAK | MSCAN_INITAK)) {
124 clrbits8(&regs->canctl0, MSCAN_SLPRQ | MSCAN_INITRQ);
125 for (i = 0; i < MSCAN_SET_MODE_RETRIES; i++) {
126 canctl1 = in_8(&regs->canctl1);
127 if (!(canctl1 & (MSCAN_INITAK | MSCAN_SLPAK)))
128 break;
129 }
130 if (i >= MSCAN_SET_MODE_RETRIES)
131 ret = -ENODEV;
132 else
133 priv->can.state = CAN_STATE_ERROR_ACTIVE;
134 }
135 }
136 return ret;
137}
138
139static int mscan_start(struct net_device *dev)
140{
141 struct mscan_priv *priv = netdev_priv(dev);
142 struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base;
143 u8 canrflg;
144 int err;
145
146 out_8(&regs->canrier, 0);
147
148 INIT_LIST_HEAD(&priv->tx_head);
149 priv->prev_buf_id = 0;
150 priv->cur_pri = 0;
151 priv->tx_active = 0;
152 priv->shadow_canrier = 0;
153 priv->flags = 0;
154
155 err = mscan_set_mode(dev, MSCAN_NORMAL_MODE);
156 if (err)
157 return err;
158
159 canrflg = in_8(&regs->canrflg);
160 priv->shadow_statflg = canrflg & MSCAN_STAT_MSK;
161 priv->can.state = state_map[max(MSCAN_STATE_RX(canrflg),
162 MSCAN_STATE_TX(canrflg))];
163 out_8(&regs->cantier, 0);
164
165 /* Enable receive interrupts. */
166 out_8(&regs->canrier, MSCAN_OVRIE | MSCAN_RXFIE | MSCAN_CSCIE |
167 MSCAN_RSTATE1 | MSCAN_RSTATE0 | MSCAN_TSTATE1 | MSCAN_TSTATE0);
168
169 return 0;
170}
171
172static netdev_tx_t mscan_start_xmit(struct sk_buff *skb, struct net_device *dev)
173{
174 struct can_frame *frame = (struct can_frame *)skb->data;
175 struct mscan_priv *priv = netdev_priv(dev);
176 struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base;
177 int i, rtr, buf_id;
178 u32 can_id;
179
180 if (frame->can_dlc > 8)
181 return -EINVAL;
182
183 out_8(&regs->cantier, 0);
184
185 i = ~priv->tx_active & MSCAN_TXE;
186 buf_id = ffs(i) - 1;
187 switch (hweight8(i)) {
188 case 0:
189 netif_stop_queue(dev);
190 dev_err(dev->dev.parent, "Tx Ring full when queue awake!\n");
191 return NETDEV_TX_BUSY;
192 case 1:
193 /*
194 * if buf_id < 3, then current frame will be send out of order,
195 * since buffer with lower id have higher priority (hell..)
196 */
197 netif_stop_queue(dev);
198 case 2:
199 if (buf_id < priv->prev_buf_id) {
200 priv->cur_pri++;
201 if (priv->cur_pri == 0xff) {
202 set_bit(F_TX_WAIT_ALL, &priv->flags);
203 netif_stop_queue(dev);
204 }
205 }
206 set_bit(F_TX_PROGRESS, &priv->flags);
207 break;
208 }
209 priv->prev_buf_id = buf_id;
210 out_8(&regs->cantbsel, i);
211
212 rtr = frame->can_id & CAN_RTR_FLAG;
213
214 /* RTR is always the lowest bit of interest, then IDs follow */
215 if (frame->can_id & CAN_EFF_FLAG) {
216 can_id = (frame->can_id & CAN_EFF_MASK)
217 << (MSCAN_EFF_RTR_SHIFT + 1);
218 if (rtr)
219 can_id |= 1 << MSCAN_EFF_RTR_SHIFT;
220 out_be16(&regs->tx.idr3_2, can_id);
221
222 can_id >>= 16;
223 /* EFF_FLAGS are inbetween the IDs :( */
224 can_id = (can_id & 0x7) | ((can_id << 2) & 0xffe0)
225 | MSCAN_EFF_FLAGS;
226 } else {
227 can_id = (frame->can_id & CAN_SFF_MASK)
228 << (MSCAN_SFF_RTR_SHIFT + 1);
229 if (rtr)
230 can_id |= 1 << MSCAN_SFF_RTR_SHIFT;
231 }
232 out_be16(&regs->tx.idr1_0, can_id);
233
234 if (!rtr) {
235 void __iomem *data = &regs->tx.dsr1_0;
236 u16 *payload = (u16 *)frame->data;
237
238 /* It is safe to write into dsr[dlc+1] */
239 for (i = 0; i < (frame->can_dlc + 1) / 2; i++) {
240 out_be16(data, *payload++);
241 data += 2 + _MSCAN_RESERVED_DSR_SIZE;
242 }
243 }
244
245 out_8(&regs->tx.dlr, frame->can_dlc);
246 out_8(&regs->tx.tbpr, priv->cur_pri);
247
248 /* Start transmission. */
249 out_8(&regs->cantflg, 1 << buf_id);
250
251 if (!test_bit(F_TX_PROGRESS, &priv->flags))
252 dev->trans_start = jiffies;
253
254 list_add_tail(&priv->tx_queue[buf_id].list, &priv->tx_head);
255
256 can_put_echo_skb(skb, dev, buf_id);
257
258 /* Enable interrupt. */
259 priv->tx_active |= 1 << buf_id;
260 out_8(&regs->cantier, priv->tx_active);
261
262 return NETDEV_TX_OK;
263}
264
265/* This function returns the old state to see where we came from */
266static enum can_state check_set_state(struct net_device *dev, u8 canrflg)
267{
268 struct mscan_priv *priv = netdev_priv(dev);
269 enum can_state state, old_state = priv->can.state;
270
271 if (canrflg & MSCAN_CSCIF && old_state <= CAN_STATE_BUS_OFF) {
272 state = state_map[max(MSCAN_STATE_RX(canrflg),
273 MSCAN_STATE_TX(canrflg))];
274 priv->can.state = state;
275 }
276 return old_state;
277}
278
279static void mscan_get_rx_frame(struct net_device *dev, struct can_frame *frame)
280{
281 struct mscan_priv *priv = netdev_priv(dev);
282 struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base;
283 u32 can_id;
284 int i;
285
286 can_id = in_be16(&regs->rx.idr1_0);
287 if (can_id & (1 << 3)) {
288 frame->can_id = CAN_EFF_FLAG;
289 can_id = ((can_id << 16) | in_be16(&regs->rx.idr3_2));
290 can_id = ((can_id & 0xffe00000) |
291 ((can_id & 0x7ffff) << 2)) >> 2;
292 } else {
293 can_id >>= 4;
294 frame->can_id = 0;
295 }
296
297 frame->can_id |= can_id >> 1;
298 if (can_id & 1)
299 frame->can_id |= CAN_RTR_FLAG;
300 frame->can_dlc = in_8(&regs->rx.dlr) & 0xf;
301
302 if (!(frame->can_id & CAN_RTR_FLAG)) {
303 void __iomem *data = &regs->rx.dsr1_0;
304 u16 *payload = (u16 *)frame->data;
305
306 for (i = 0; i < (frame->can_dlc + 1) / 2; i++) {
307 *payload++ = in_be16(data);
308 data += 2 + _MSCAN_RESERVED_DSR_SIZE;
309 }
310 }
311
312 out_8(&regs->canrflg, MSCAN_RXF);
313}
314
315static void mscan_get_err_frame(struct net_device *dev, struct can_frame *frame,
316 u8 canrflg)
317{
318 struct mscan_priv *priv = netdev_priv(dev);
319 struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base;
320 struct net_device_stats *stats = &dev->stats;
321 enum can_state old_state;
322
323 dev_dbg(dev->dev.parent, "error interrupt (canrflg=%#x)\n", canrflg);
324 frame->can_id = CAN_ERR_FLAG;
325
326 if (canrflg & MSCAN_OVRIF) {
327 frame->can_id |= CAN_ERR_CRTL;
328 frame->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
329 stats->rx_over_errors++;
330 stats->rx_errors++;
331 } else {
332 frame->data[1] = 0;
333 }
334
335 old_state = check_set_state(dev, canrflg);
336 /* State changed */
337 if (old_state != priv->can.state) {
338 switch (priv->can.state) {
339 case CAN_STATE_ERROR_WARNING:
340 frame->can_id |= CAN_ERR_CRTL;
341 priv->can.can_stats.error_warning++;
342 if ((priv->shadow_statflg & MSCAN_RSTAT_MSK) <
343 (canrflg & MSCAN_RSTAT_MSK))
344 frame->data[1] |= CAN_ERR_CRTL_RX_WARNING;
345 if ((priv->shadow_statflg & MSCAN_TSTAT_MSK) <
346 (canrflg & MSCAN_TSTAT_MSK))
347 frame->data[1] |= CAN_ERR_CRTL_TX_WARNING;
348 break;
349 case CAN_STATE_ERROR_PASSIVE:
350 frame->can_id |= CAN_ERR_CRTL;
351 priv->can.can_stats.error_passive++;
352 frame->data[1] |= CAN_ERR_CRTL_RX_PASSIVE;
353 break;
354 case CAN_STATE_BUS_OFF:
355 frame->can_id |= CAN_ERR_BUSOFF;
356 /*
357 * The MSCAN on the MPC5200 does recover from bus-off
358 * automatically. To avoid that we stop the chip doing
359 * a light-weight stop (we are in irq-context).
360 */
361 out_8(&regs->cantier, 0);
362 out_8(&regs->canrier, 0);
363 setbits8(&regs->canctl0, MSCAN_SLPRQ | MSCAN_INITRQ);
364 can_bus_off(dev);
365 break;
366 default:
367 break;
368 }
369 }
370 priv->shadow_statflg = canrflg & MSCAN_STAT_MSK;
371 frame->can_dlc = CAN_ERR_DLC;
372 out_8(&regs->canrflg, MSCAN_ERR_IF);
373}
374
375static int mscan_rx_poll(struct napi_struct *napi, int quota)
376{
377 struct mscan_priv *priv = container_of(napi, struct mscan_priv, napi);
378 struct net_device *dev = napi->dev;
379 struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base;
380 struct net_device_stats *stats = &dev->stats;
381 int npackets = 0;
382 int ret = 1;
383 struct sk_buff *skb;
384 struct can_frame *frame;
385 u8 canrflg;
386
387 while (npackets < quota) {
388 canrflg = in_8(&regs->canrflg);
389 if (!(canrflg & (MSCAN_RXF | MSCAN_ERR_IF)))
390 break;
391
392 skb = alloc_can_skb(dev, &frame);
393 if (!skb) {
394 if (printk_ratelimit())
395 dev_notice(dev->dev.parent, "packet dropped\n");
396 stats->rx_dropped++;
397 out_8(&regs->canrflg, canrflg);
398 continue;
399 }
400
401 if (canrflg & MSCAN_RXF)
402 mscan_get_rx_frame(dev, frame);
403 else if (canrflg & MSCAN_ERR_IF)
404 mscan_get_err_frame(dev, frame, canrflg);
405
406 stats->rx_packets++;
407 stats->rx_bytes += frame->can_dlc;
408 npackets++;
409 netif_receive_skb(skb);
410 }
411
412 if (!(in_8(&regs->canrflg) & (MSCAN_RXF | MSCAN_ERR_IF))) {
413 napi_complete(&priv->napi);
414 clear_bit(F_RX_PROGRESS, &priv->flags);
415 if (priv->can.state < CAN_STATE_BUS_OFF)
416 out_8(&regs->canrier, priv->shadow_canrier);
417 ret = 0;
418 }
419 return ret;
420}
421
422static irqreturn_t mscan_isr(int irq, void *dev_id)
423{
424 struct net_device *dev = (struct net_device *)dev_id;
425 struct mscan_priv *priv = netdev_priv(dev);
426 struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base;
427 struct net_device_stats *stats = &dev->stats;
428 u8 cantier, cantflg, canrflg;
429 irqreturn_t ret = IRQ_NONE;
430
431 cantier = in_8(&regs->cantier) & MSCAN_TXE;
432 cantflg = in_8(&regs->cantflg) & cantier;
433
434 if (cantier && cantflg) {
435 struct list_head *tmp, *pos;
436
437 list_for_each_safe(pos, tmp, &priv->tx_head) {
438 struct tx_queue_entry *entry =
439 list_entry(pos, struct tx_queue_entry, list);
440 u8 mask = entry->mask;
441
442 if (!(cantflg & mask))
443 continue;
444
445 out_8(&regs->cantbsel, mask);
446 stats->tx_bytes += in_8(&regs->tx.dlr);
447 stats->tx_packets++;
448 can_get_echo_skb(dev, entry->id);
449 priv->tx_active &= ~mask;
450 list_del(pos);
451 }
452
453 if (list_empty(&priv->tx_head)) {
454 clear_bit(F_TX_WAIT_ALL, &priv->flags);
455 clear_bit(F_TX_PROGRESS, &priv->flags);
456 priv->cur_pri = 0;
457 } else {
458 dev->trans_start = jiffies;
459 }
460
461 if (!test_bit(F_TX_WAIT_ALL, &priv->flags))
462 netif_wake_queue(dev);
463
464 out_8(&regs->cantier, priv->tx_active);
465 ret = IRQ_HANDLED;
466 }
467
468 canrflg = in_8(&regs->canrflg);
469 if ((canrflg & ~MSCAN_STAT_MSK) &&
470 !test_and_set_bit(F_RX_PROGRESS, &priv->flags)) {
471 if (canrflg & ~MSCAN_STAT_MSK) {
472 priv->shadow_canrier = in_8(&regs->canrier);
473 out_8(&regs->canrier, 0);
474 napi_schedule(&priv->napi);
475 ret = IRQ_HANDLED;
476 } else {
477 clear_bit(F_RX_PROGRESS, &priv->flags);
478 }
479 }
480 return ret;
481}
482
483static int mscan_do_set_mode(struct net_device *dev, enum can_mode mode)
484{
485 struct mscan_priv *priv = netdev_priv(dev);
486 int ret = 0;
487
488 if (!priv->open_time)
489 return -EINVAL;
490
491 switch (mode) {
492 case CAN_MODE_START:
493 if (priv->can.state <= CAN_STATE_BUS_OFF)
494 mscan_set_mode(dev, MSCAN_INIT_MODE);
495 ret = mscan_start(dev);
496 if (ret)
497 break;
498 if (netif_queue_stopped(dev))
499 netif_wake_queue(dev);
500 break;
501
502 default:
503 ret = -EOPNOTSUPP;
504 break;
505 }
506 return ret;
507}
508
509static int mscan_do_set_bittiming(struct net_device *dev)
510{
511 struct mscan_priv *priv = netdev_priv(dev);
512 struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base;
513 struct can_bittiming *bt = &priv->can.bittiming;
514 u8 btr0, btr1;
515
516 btr0 = BTR0_SET_BRP(bt->brp) | BTR0_SET_SJW(bt->sjw);
517 btr1 = (BTR1_SET_TSEG1(bt->prop_seg + bt->phase_seg1) |
518 BTR1_SET_TSEG2(bt->phase_seg2) |
519 BTR1_SET_SAM(priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES));
520
521 dev_info(dev->dev.parent, "setting BTR0=0x%02x BTR1=0x%02x\n",
522 btr0, btr1);
523
524 out_8(&regs->canbtr0, btr0);
525 out_8(&regs->canbtr1, btr1);
526
527 return 0;
528}
529
530static int mscan_open(struct net_device *dev)
531{
532 int ret;
533 struct mscan_priv *priv = netdev_priv(dev);
534 struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base;
535
536 /* common open */
537 ret = open_candev(dev);
538 if (ret)
539 return ret;
540
541 napi_enable(&priv->napi);
542
543 ret = request_irq(dev->irq, mscan_isr, 0, dev->name, dev);
544 if (ret < 0) {
545 dev_err(dev->dev.parent, "failed to attach interrupt\n");
546 goto exit_napi_disable;
547 }
548
549 priv->open_time = jiffies;
550
551 clrbits8(&regs->canctl1, MSCAN_LISTEN);
552
553 ret = mscan_start(dev);
554 if (ret)
555 goto exit_free_irq;
556
557 netif_start_queue(dev);
558
559 return 0;
560
561exit_free_irq:
562 priv->open_time = 0;
563 free_irq(dev->irq, dev);
564exit_napi_disable:
565 napi_disable(&priv->napi);
566 close_candev(dev);
567 return ret;
568}
569
570static int mscan_close(struct net_device *dev)
571{
572 struct mscan_priv *priv = netdev_priv(dev);
573 struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base;
574
575 netif_stop_queue(dev);
576 napi_disable(&priv->napi);
577
578 out_8(&regs->cantier, 0);
579 out_8(&regs->canrier, 0);
580 mscan_set_mode(dev, MSCAN_INIT_MODE);
581 close_candev(dev);
582 free_irq(dev->irq, dev);
583 priv->open_time = 0;
584
585 return 0;
586}
587
588static const struct net_device_ops mscan_netdev_ops = {
589 .ndo_open = mscan_open,
590 .ndo_stop = mscan_close,
591 .ndo_start_xmit = mscan_start_xmit,
592};
593
594int register_mscandev(struct net_device *dev, int clock_src)
595{
596 struct mscan_priv *priv = netdev_priv(dev);
597 struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base;
598 u8 ctl1;
599
600 ctl1 = in_8(&regs->canctl1);
601 if (clock_src)
602 ctl1 |= MSCAN_CLKSRC;
603 else
604 ctl1 &= ~MSCAN_CLKSRC;
605
606 ctl1 |= MSCAN_CANE;
607 out_8(&regs->canctl1, ctl1);
608 udelay(100);
609
610 /* acceptance mask/acceptance code (accept everything) */
611 out_be16(&regs->canidar1_0, 0);
612 out_be16(&regs->canidar3_2, 0);
613 out_be16(&regs->canidar5_4, 0);
614 out_be16(&regs->canidar7_6, 0);
615
616 out_be16(&regs->canidmr1_0, 0xffff);
617 out_be16(&regs->canidmr3_2, 0xffff);
618 out_be16(&regs->canidmr5_4, 0xffff);
619 out_be16(&regs->canidmr7_6, 0xffff);
620 /* Two 32 bit Acceptance Filters */
621 out_8(&regs->canidac, MSCAN_AF_32BIT);
622
623 mscan_set_mode(dev, MSCAN_INIT_MODE);
624
625 return register_candev(dev);
626}
627
628void unregister_mscandev(struct net_device *dev)
629{
630 struct mscan_priv *priv = netdev_priv(dev);
631 struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base;
632 mscan_set_mode(dev, MSCAN_INIT_MODE);
633 clrbits8(&regs->canctl1, MSCAN_CANE);
634 unregister_candev(dev);
635}
636
637struct net_device *alloc_mscandev(void)
638{
639 struct net_device *dev;
640 struct mscan_priv *priv;
641 int i;
642
643 dev = alloc_candev(sizeof(struct mscan_priv), MSCAN_ECHO_SKB_MAX);
644 if (!dev)
645 return NULL;
646 priv = netdev_priv(dev);
647
648 dev->netdev_ops = &mscan_netdev_ops;
649
650 dev->flags |= IFF_ECHO; /* we support local echo */
651
652 netif_napi_add(dev, &priv->napi, mscan_rx_poll, 8);
653
654 priv->can.bittiming_const = &mscan_bittiming_const;
655 priv->can.do_set_bittiming = mscan_do_set_bittiming;
656 priv->can.do_set_mode = mscan_do_set_mode;
657
658 for (i = 0; i < TX_QUEUE_SIZE; i++) {
659 priv->tx_queue[i].id = i;
660 priv->tx_queue[i].mask = 1 << i;
661 }
662
663 return dev;
664}
665
666MODULE_AUTHOR("Andrey Volkov <avolkov@varma-el.com>");
667MODULE_LICENSE("GPL v2");
668MODULE_DESCRIPTION("CAN port driver for a MSCAN based chips");
diff --git a/drivers/net/can/mscan/mscan.h b/drivers/net/can/mscan/mscan.h
new file mode 100644
index 000000000000..00fc4aaf1ed8
--- /dev/null
+++ b/drivers/net/can/mscan/mscan.h
@@ -0,0 +1,296 @@
1/*
2 * Definitions of consts/structs to drive the Freescale MSCAN.
3 *
4 * Copyright (C) 2005-2006 Andrey Volkov <avolkov@varma-el.com>,
5 * Varma Electronics Oy
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the version 2 of the GNU General Public License
9 * as published by the Free Software Foundation
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21#ifndef __MSCAN_H__
22#define __MSCAN_H__
23
24#include <linux/types.h>
25
26/* MSCAN control register 0 (CANCTL0) bits */
27#define MSCAN_RXFRM 0x80
28#define MSCAN_RXACT 0x40
29#define MSCAN_CSWAI 0x20
30#define MSCAN_SYNCH 0x10
31#define MSCAN_TIME 0x08
32#define MSCAN_WUPE 0x04
33#define MSCAN_SLPRQ 0x02
34#define MSCAN_INITRQ 0x01
35
36/* MSCAN control register 1 (CANCTL1) bits */
37#define MSCAN_CANE 0x80
38#define MSCAN_CLKSRC 0x40
39#define MSCAN_LOOPB 0x20
40#define MSCAN_LISTEN 0x10
41#define MSCAN_WUPM 0x04
42#define MSCAN_SLPAK 0x02
43#define MSCAN_INITAK 0x01
44
45/* Use the MPC5200 MSCAN variant? */
46#ifdef CONFIG_PPC
47#define MSCAN_FOR_MPC5200
48#endif
49
50#ifdef MSCAN_FOR_MPC5200
51#define MSCAN_CLKSRC_BUS 0
52#define MSCAN_CLKSRC_XTAL MSCAN_CLKSRC
53#else
54#define MSCAN_CLKSRC_BUS MSCAN_CLKSRC
55#define MSCAN_CLKSRC_XTAL 0
56#endif
57
58/* MSCAN receiver flag register (CANRFLG) bits */
59#define MSCAN_WUPIF 0x80
60#define MSCAN_CSCIF 0x40
61#define MSCAN_RSTAT1 0x20
62#define MSCAN_RSTAT0 0x10
63#define MSCAN_TSTAT1 0x08
64#define MSCAN_TSTAT0 0x04
65#define MSCAN_OVRIF 0x02
66#define MSCAN_RXF 0x01
67#define MSCAN_ERR_IF (MSCAN_OVRIF | MSCAN_CSCIF)
68#define MSCAN_RSTAT_MSK (MSCAN_RSTAT1 | MSCAN_RSTAT0)
69#define MSCAN_TSTAT_MSK (MSCAN_TSTAT1 | MSCAN_TSTAT0)
70#define MSCAN_STAT_MSK (MSCAN_RSTAT_MSK | MSCAN_TSTAT_MSK)
71
72#define MSCAN_STATE_BUS_OFF (MSCAN_RSTAT1 | MSCAN_RSTAT0 | \
73 MSCAN_TSTAT1 | MSCAN_TSTAT0)
74#define MSCAN_STATE_TX(canrflg) (((canrflg)&MSCAN_TSTAT_MSK)>>2)
75#define MSCAN_STATE_RX(canrflg) (((canrflg)&MSCAN_RSTAT_MSK)>>4)
76#define MSCAN_STATE_ACTIVE 0
77#define MSCAN_STATE_WARNING 1
78#define MSCAN_STATE_PASSIVE 2
79#define MSCAN_STATE_BUSOFF 3
80
81/* MSCAN receiver interrupt enable register (CANRIER) bits */
82#define MSCAN_WUPIE 0x80
83#define MSCAN_CSCIE 0x40
84#define MSCAN_RSTATE1 0x20
85#define MSCAN_RSTATE0 0x10
86#define MSCAN_TSTATE1 0x08
87#define MSCAN_TSTATE0 0x04
88#define MSCAN_OVRIE 0x02
89#define MSCAN_RXFIE 0x01
90
91/* MSCAN transmitter flag register (CANTFLG) bits */
92#define MSCAN_TXE2 0x04
93#define MSCAN_TXE1 0x02
94#define MSCAN_TXE0 0x01
95#define MSCAN_TXE (MSCAN_TXE2 | MSCAN_TXE1 | MSCAN_TXE0)
96
97/* MSCAN transmitter interrupt enable register (CANTIER) bits */
98#define MSCAN_TXIE2 0x04
99#define MSCAN_TXIE1 0x02
100#define MSCAN_TXIE0 0x01
101#define MSCAN_TXIE (MSCAN_TXIE2 | MSCAN_TXIE1 | MSCAN_TXIE0)
102
103/* MSCAN transmitter message abort request (CANTARQ) bits */
104#define MSCAN_ABTRQ2 0x04
105#define MSCAN_ABTRQ1 0x02
106#define MSCAN_ABTRQ0 0x01
107
108/* MSCAN transmitter message abort ack (CANTAAK) bits */
109#define MSCAN_ABTAK2 0x04
110#define MSCAN_ABTAK1 0x02
111#define MSCAN_ABTAK0 0x01
112
113/* MSCAN transmit buffer selection (CANTBSEL) bits */
114#define MSCAN_TX2 0x04
115#define MSCAN_TX1 0x02
116#define MSCAN_TX0 0x01
117
118/* MSCAN ID acceptance control register (CANIDAC) bits */
119#define MSCAN_IDAM1 0x20
120#define MSCAN_IDAM0 0x10
121#define MSCAN_IDHIT2 0x04
122#define MSCAN_IDHIT1 0x02
123#define MSCAN_IDHIT0 0x01
124
125#define MSCAN_AF_32BIT 0x00
126#define MSCAN_AF_16BIT MSCAN_IDAM0
127#define MSCAN_AF_8BIT MSCAN_IDAM1
128#define MSCAN_AF_CLOSED (MSCAN_IDAM0|MSCAN_IDAM1)
129#define MSCAN_AF_MASK (~(MSCAN_IDAM0|MSCAN_IDAM1))
130
131/* MSCAN Miscellaneous Register (CANMISC) bits */
132#define MSCAN_BOHOLD 0x01
133
134/* MSCAN Identifier Register (IDR) bits */
135#define MSCAN_SFF_RTR_SHIFT 4
136#define MSCAN_EFF_RTR_SHIFT 0
137#define MSCAN_EFF_FLAGS 0x18 /* IDE + SRR */
138
139#ifdef MSCAN_FOR_MPC5200
140#define _MSCAN_RESERVED_(n, num) u8 _res##n[num]
141#define _MSCAN_RESERVED_DSR_SIZE 2
142#else
143#define _MSCAN_RESERVED_(n, num)
144#define _MSCAN_RESERVED_DSR_SIZE 0
145#endif
146
147/* Structure of the hardware registers */
148struct mscan_regs {
149 /* (see doc S12MSCANV3/D) MPC5200 MSCAN */
150 u8 canctl0; /* + 0x00 0x00 */
151 u8 canctl1; /* + 0x01 0x01 */
152 _MSCAN_RESERVED_(1, 2); /* + 0x02 */
153 u8 canbtr0; /* + 0x04 0x02 */
154 u8 canbtr1; /* + 0x05 0x03 */
155 _MSCAN_RESERVED_(2, 2); /* + 0x06 */
156 u8 canrflg; /* + 0x08 0x04 */
157 u8 canrier; /* + 0x09 0x05 */
158 _MSCAN_RESERVED_(3, 2); /* + 0x0a */
159 u8 cantflg; /* + 0x0c 0x06 */
160 u8 cantier; /* + 0x0d 0x07 */
161 _MSCAN_RESERVED_(4, 2); /* + 0x0e */
162 u8 cantarq; /* + 0x10 0x08 */
163 u8 cantaak; /* + 0x11 0x09 */
164 _MSCAN_RESERVED_(5, 2); /* + 0x12 */
165 u8 cantbsel; /* + 0x14 0x0a */
166 u8 canidac; /* + 0x15 0x0b */
167 u8 reserved; /* + 0x16 0x0c */
168 _MSCAN_RESERVED_(6, 5); /* + 0x17 */
169#ifndef MSCAN_FOR_MPC5200
170 u8 canmisc; /* 0x0d */
171#endif
172 u8 canrxerr; /* + 0x1c 0x0e */
173 u8 cantxerr; /* + 0x1d 0x0f */
174 _MSCAN_RESERVED_(7, 2); /* + 0x1e */
175 u16 canidar1_0; /* + 0x20 0x10 */
176 _MSCAN_RESERVED_(8, 2); /* + 0x22 */
177 u16 canidar3_2; /* + 0x24 0x12 */
178 _MSCAN_RESERVED_(9, 2); /* + 0x26 */
179 u16 canidmr1_0; /* + 0x28 0x14 */
180 _MSCAN_RESERVED_(10, 2); /* + 0x2a */
181 u16 canidmr3_2; /* + 0x2c 0x16 */
182 _MSCAN_RESERVED_(11, 2); /* + 0x2e */
183 u16 canidar5_4; /* + 0x30 0x18 */
184 _MSCAN_RESERVED_(12, 2); /* + 0x32 */
185 u16 canidar7_6; /* + 0x34 0x1a */
186 _MSCAN_RESERVED_(13, 2); /* + 0x36 */
187 u16 canidmr5_4; /* + 0x38 0x1c */
188 _MSCAN_RESERVED_(14, 2); /* + 0x3a */
189 u16 canidmr7_6; /* + 0x3c 0x1e */
190 _MSCAN_RESERVED_(15, 2); /* + 0x3e */
191 struct {
192 u16 idr1_0; /* + 0x40 0x20 */
193 _MSCAN_RESERVED_(16, 2); /* + 0x42 */
194 u16 idr3_2; /* + 0x44 0x22 */
195 _MSCAN_RESERVED_(17, 2); /* + 0x46 */
196 u16 dsr1_0; /* + 0x48 0x24 */
197 _MSCAN_RESERVED_(18, 2); /* + 0x4a */
198 u16 dsr3_2; /* + 0x4c 0x26 */
199 _MSCAN_RESERVED_(19, 2); /* + 0x4e */
200 u16 dsr5_4; /* + 0x50 0x28 */
201 _MSCAN_RESERVED_(20, 2); /* + 0x52 */
202 u16 dsr7_6; /* + 0x54 0x2a */
203 _MSCAN_RESERVED_(21, 2); /* + 0x56 */
204 u8 dlr; /* + 0x58 0x2c */
205 u8:8; /* + 0x59 0x2d */
206 _MSCAN_RESERVED_(22, 2); /* + 0x5a */
207 u16 time; /* + 0x5c 0x2e */
208 } rx;
209 _MSCAN_RESERVED_(23, 2); /* + 0x5e */
210 struct {
211 u16 idr1_0; /* + 0x60 0x30 */
212 _MSCAN_RESERVED_(24, 2); /* + 0x62 */
213 u16 idr3_2; /* + 0x64 0x32 */
214 _MSCAN_RESERVED_(25, 2); /* + 0x66 */
215 u16 dsr1_0; /* + 0x68 0x34 */
216 _MSCAN_RESERVED_(26, 2); /* + 0x6a */
217 u16 dsr3_2; /* + 0x6c 0x36 */
218 _MSCAN_RESERVED_(27, 2); /* + 0x6e */
219 u16 dsr5_4; /* + 0x70 0x38 */
220 _MSCAN_RESERVED_(28, 2); /* + 0x72 */
221 u16 dsr7_6; /* + 0x74 0x3a */
222 _MSCAN_RESERVED_(29, 2); /* + 0x76 */
223 u8 dlr; /* + 0x78 0x3c */
224 u8 tbpr; /* + 0x79 0x3d */
225 _MSCAN_RESERVED_(30, 2); /* + 0x7a */
226 u16 time; /* + 0x7c 0x3e */
227 } tx;
228 _MSCAN_RESERVED_(31, 2); /* + 0x7e */
229} __attribute__ ((packed));
230
231#undef _MSCAN_RESERVED_
232#define MSCAN_REGION sizeof(struct mscan)
233
234#define MSCAN_NORMAL_MODE 0
235#define MSCAN_SLEEP_MODE MSCAN_SLPRQ
236#define MSCAN_INIT_MODE (MSCAN_INITRQ | MSCAN_SLPRQ)
237#define MSCAN_POWEROFF_MODE (MSCAN_CSWAI | MSCAN_SLPRQ)
238#define MSCAN_SET_MODE_RETRIES 255
239#define MSCAN_ECHO_SKB_MAX 3
240
241#define BTR0_BRP_MASK 0x3f
242#define BTR0_SJW_SHIFT 6
243#define BTR0_SJW_MASK (0x3 << BTR0_SJW_SHIFT)
244
245#define BTR1_TSEG1_MASK 0xf
246#define BTR1_TSEG2_SHIFT 4
247#define BTR1_TSEG2_MASK (0x7 << BTR1_TSEG2_SHIFT)
248#define BTR1_SAM_SHIFT 7
249
250#define BTR0_SET_BRP(brp) (((brp) - 1) & BTR0_BRP_MASK)
251#define BTR0_SET_SJW(sjw) ((((sjw) - 1) << BTR0_SJW_SHIFT) & \
252 BTR0_SJW_MASK)
253
254#define BTR1_SET_TSEG1(tseg1) (((tseg1) - 1) & BTR1_TSEG1_MASK)
255#define BTR1_SET_TSEG2(tseg2) ((((tseg2) - 1) << BTR1_TSEG2_SHIFT) & \
256 BTR1_TSEG2_MASK)
257#define BTR1_SET_SAM(sam) ((sam) ? 1 << BTR1_SAM_SHIFT : 0)
258
259#define F_RX_PROGRESS 0
260#define F_TX_PROGRESS 1
261#define F_TX_WAIT_ALL 2
262
263#define TX_QUEUE_SIZE 3
264
265struct tx_queue_entry {
266 struct list_head list;
267 u8 mask;
268 u8 id;
269};
270
271struct mscan_priv {
272 struct can_priv can; /* must be the first member */
273 long open_time;
274 unsigned long flags;
275 void __iomem *reg_base; /* ioremap'ed address to registers */
276 u8 shadow_statflg;
277 u8 shadow_canrier;
278 u8 cur_pri;
279 u8 prev_buf_id;
280 u8 tx_active;
281
282 struct list_head tx_head;
283 struct tx_queue_entry tx_queue[TX_QUEUE_SIZE];
284 struct napi_struct napi;
285};
286
287extern struct net_device *alloc_mscandev(void);
288/*
289 * clock_src:
290 * 1 = The MSCAN clock source is the onchip Bus Clock.
291 * 0 = The MSCAN clock source is the chip Oscillator Clock.
292 */
293extern int register_mscandev(struct net_device *dev, int clock_src);
294extern void unregister_mscandev(struct net_device *dev);
295
296#endif /* __MSCAN_H__ */
diff --git a/drivers/net/can/sja1000/sja1000.c b/drivers/net/can/sja1000/sja1000.c
index 16d2ecd2a3b7..b4ba88a31075 100644
--- a/drivers/net/can/sja1000/sja1000.c
+++ b/drivers/net/can/sja1000/sja1000.c
@@ -296,11 +296,9 @@ static void sja1000_rx(struct net_device *dev)
296 uint8_t dlc; 296 uint8_t dlc;
297 int i; 297 int i;
298 298
299 skb = dev_alloc_skb(sizeof(struct can_frame)); 299 skb = alloc_can_skb(dev, &cf);
300 if (skb == NULL) 300 if (skb == NULL)
301 return; 301 return;
302 skb->dev = dev;
303 skb->protocol = htons(ETH_P_CAN);
304 302
305 fi = priv->read_reg(priv, REG_FI); 303 fi = priv->read_reg(priv, REG_FI);
306 dlc = fi & 0x0F; 304 dlc = fi & 0x0F;
@@ -323,8 +321,6 @@ static void sja1000_rx(struct net_device *dev)
323 if (fi & FI_RTR) 321 if (fi & FI_RTR)
324 id |= CAN_RTR_FLAG; 322 id |= CAN_RTR_FLAG;
325 323
326 cf = (struct can_frame *)skb_put(skb, sizeof(struct can_frame));
327 memset(cf, 0, sizeof(struct can_frame));
328 cf->can_id = id; 324 cf->can_id = id;
329 cf->can_dlc = dlc; 325 cf->can_dlc = dlc;
330 for (i = 0; i < dlc; i++) 326 for (i = 0; i < dlc; i++)
@@ -351,15 +347,9 @@ static int sja1000_err(struct net_device *dev, uint8_t isrc, uint8_t status)
351 enum can_state state = priv->can.state; 347 enum can_state state = priv->can.state;
352 uint8_t ecc, alc; 348 uint8_t ecc, alc;
353 349
354 skb = dev_alloc_skb(sizeof(struct can_frame)); 350 skb = alloc_can_err_skb(dev, &cf);
355 if (skb == NULL) 351 if (skb == NULL)
356 return -ENOMEM; 352 return -ENOMEM;
357 skb->dev = dev;
358 skb->protocol = htons(ETH_P_CAN);
359 cf = (struct can_frame *)skb_put(skb, sizeof(struct can_frame));
360 memset(cf, 0, sizeof(struct can_frame));
361 cf->can_id = CAN_ERR_FLAG;
362 cf->can_dlc = CAN_ERR_DLC;
363 353
364 if (isrc & IRQ_DOI) { 354 if (isrc & IRQ_DOI) {
365 /* data overrun interrupt */ 355 /* data overrun interrupt */
@@ -526,7 +516,7 @@ static int sja1000_open(struct net_device *dev)
526 516
527 /* register interrupt handler, if not done by the device driver */ 517 /* register interrupt handler, if not done by the device driver */
528 if (!(priv->flags & SJA1000_CUSTOM_IRQ_HANDLER)) { 518 if (!(priv->flags & SJA1000_CUSTOM_IRQ_HANDLER)) {
529 err = request_irq(dev->irq, &sja1000_interrupt, priv->irq_flags, 519 err = request_irq(dev->irq, sja1000_interrupt, priv->irq_flags,
530 dev->name, (void *)dev); 520 dev->name, (void *)dev);
531 if (err) { 521 if (err) {
532 close_candev(dev); 522 close_candev(dev);
@@ -565,7 +555,8 @@ struct net_device *alloc_sja1000dev(int sizeof_priv)
565 struct net_device *dev; 555 struct net_device *dev;
566 struct sja1000_priv *priv; 556 struct sja1000_priv *priv;
567 557
568 dev = alloc_candev(sizeof(struct sja1000_priv) + sizeof_priv); 558 dev = alloc_candev(sizeof(struct sja1000_priv) + sizeof_priv,
559 SJA1000_ECHO_SKB_MAX);
569 if (!dev) 560 if (!dev)
570 return NULL; 561 return NULL;
571 562
diff --git a/drivers/net/can/sja1000/sja1000.h b/drivers/net/can/sja1000/sja1000.h
index 302d2c763ad7..97a622b9302f 100644
--- a/drivers/net/can/sja1000/sja1000.h
+++ b/drivers/net/can/sja1000/sja1000.h
@@ -50,6 +50,8 @@
50#include <linux/can/dev.h> 50#include <linux/can/dev.h>
51#include <linux/can/platform/sja1000.h> 51#include <linux/can/platform/sja1000.h>
52 52
53#define SJA1000_ECHO_SKB_MAX 1 /* the SJA1000 has one TX buffer object */
54
53#define SJA1000_MAX_IRQ 20 /* max. number of interrupts handled in ISR */ 55#define SJA1000_MAX_IRQ 20 /* max. number of interrupts handled in ISR */
54 56
55/* SJA1000 registers - manual section 6.4 (Pelican Mode) */ 57/* SJA1000 registers - manual section 6.4 (Pelican Mode) */
diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c
new file mode 100644
index 000000000000..07e8016b17ec
--- /dev/null
+++ b/drivers/net/can/ti_hecc.c
@@ -0,0 +1,993 @@
1/*
2 * TI HECC (CAN) device driver
3 *
4 * This driver supports TI's HECC (High End CAN Controller module) and the
5 * specs for the same is available at <http://www.ti.com>
6 *
7 * Copyright (C) 2009 Texas Instruments Incorporated - http://www.ti.com/
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License as
11 * published by the Free Software Foundation version 2.
12 *
13 * This program is distributed as is WITHOUT ANY WARRANTY of any
14 * kind, whether express or implied; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 */
19
20/*
21 * Your platform definitions should specify module ram offsets and interrupt
22 * number to use as follows:
23 *
24 * static struct ti_hecc_platform_data am3517_evm_hecc_pdata = {
25 * .scc_hecc_offset = 0,
26 * .scc_ram_offset = 0x3000,
27 * .hecc_ram_offset = 0x3000,
28 * .mbx_offset = 0x2000,
29 * .int_line = 0,
30 * .revision = 1,
31 * };
32 *
33 * Please see include/can/platform/ti_hecc.h for description of above fields
34 *
35 */
36
37#include <linux/module.h>
38#include <linux/init.h>
39#include <linux/kernel.h>
40#include <linux/types.h>
41#include <linux/interrupt.h>
42#include <linux/errno.h>
43#include <linux/netdevice.h>
44#include <linux/skbuff.h>
45#include <linux/platform_device.h>
46#include <linux/clk.h>
47
48#include <linux/can.h>
49#include <linux/can/dev.h>
50#include <linux/can/error.h>
51#include <linux/can/platform/ti_hecc.h>
52
53#define DRV_NAME "ti_hecc"
54#define HECC_MODULE_VERSION "0.7"
55MODULE_VERSION(HECC_MODULE_VERSION);
56#define DRV_DESC "TI High End CAN Controller Driver " HECC_MODULE_VERSION
57
58/* TX / RX Mailbox Configuration */
59#define HECC_MAX_MAILBOXES 32 /* hardware mailboxes - do not change */
60#define MAX_TX_PRIO 0x3F /* hardware value - do not change */
61
62/*
63 * Important Note: TX mailbox configuration
64 * TX mailboxes should be restricted to the number of SKB buffers to avoid
65 * maintaining SKB buffers separately. TX mailboxes should be a power of 2
66 * for the mailbox logic to work. Top mailbox numbers are reserved for RX
67 * and lower mailboxes for TX.
68 *
69 * HECC_MAX_TX_MBOX HECC_MB_TX_SHIFT
70 * 4 (default) 2
71 * 8 3
72 * 16 4
73 */
74#define HECC_MB_TX_SHIFT 2 /* as per table above */
75#define HECC_MAX_TX_MBOX BIT(HECC_MB_TX_SHIFT)
76
77#define HECC_TX_PRIO_SHIFT (HECC_MB_TX_SHIFT)
78#define HECC_TX_PRIO_MASK (MAX_TX_PRIO << HECC_MB_TX_SHIFT)
79#define HECC_TX_MB_MASK (HECC_MAX_TX_MBOX - 1)
80#define HECC_TX_MASK ((HECC_MAX_TX_MBOX - 1) | HECC_TX_PRIO_MASK)
81#define HECC_TX_MBOX_MASK (~(BIT(HECC_MAX_TX_MBOX) - 1))
82#define HECC_DEF_NAPI_WEIGHT HECC_MAX_RX_MBOX
83
84/*
85 * Important Note: RX mailbox configuration
86 * RX mailboxes are further logically split into two - main and buffer
87 * mailboxes. The goal is to get all packets into main mailboxes as
88 * driven by mailbox number and receive priority (higher to lower) and
89 * buffer mailboxes are used to receive pkts while main mailboxes are being
90 * processed. This ensures in-order packet reception.
91 *
92 * Here are the recommended values for buffer mailbox. Note that RX mailboxes
93 * start after TX mailboxes:
94 *
95 * HECC_MAX_RX_MBOX HECC_RX_BUFFER_MBOX No of buffer mailboxes
96 * 28 12 8
97 * 16 20 4
98 */
99
100#define HECC_MAX_RX_MBOX (HECC_MAX_MAILBOXES - HECC_MAX_TX_MBOX)
101#define HECC_RX_BUFFER_MBOX 12 /* as per table above */
102#define HECC_RX_FIRST_MBOX (HECC_MAX_MAILBOXES - 1)
103#define HECC_RX_HIGH_MBOX_MASK (~(BIT(HECC_RX_BUFFER_MBOX) - 1))
104
105/* TI HECC module registers */
106#define HECC_CANME 0x0 /* Mailbox enable */
107#define HECC_CANMD 0x4 /* Mailbox direction */
108#define HECC_CANTRS 0x8 /* Transmit request set */
109#define HECC_CANTRR 0xC /* Transmit request */
110#define HECC_CANTA 0x10 /* Transmission acknowledge */
111#define HECC_CANAA 0x14 /* Abort acknowledge */
112#define HECC_CANRMP 0x18 /* Receive message pending */
113#define HECC_CANRML 0x1C /* Remote message lost */
114#define HECC_CANRFP 0x20 /* Remote frame pending */
115#define HECC_CANGAM 0x24 /* SECC only:Global acceptance mask */
116#define HECC_CANMC 0x28 /* Master control */
117#define HECC_CANBTC 0x2C /* Bit timing configuration */
118#define HECC_CANES 0x30 /* Error and status */
119#define HECC_CANTEC 0x34 /* Transmit error counter */
120#define HECC_CANREC 0x38 /* Receive error counter */
121#define HECC_CANGIF0 0x3C /* Global interrupt flag 0 */
122#define HECC_CANGIM 0x40 /* Global interrupt mask */
123#define HECC_CANGIF1 0x44 /* Global interrupt flag 1 */
124#define HECC_CANMIM 0x48 /* Mailbox interrupt mask */
125#define HECC_CANMIL 0x4C /* Mailbox interrupt level */
126#define HECC_CANOPC 0x50 /* Overwrite protection control */
127#define HECC_CANTIOC 0x54 /* Transmit I/O control */
128#define HECC_CANRIOC 0x58 /* Receive I/O control */
129#define HECC_CANLNT 0x5C /* HECC only: Local network time */
130#define HECC_CANTOC 0x60 /* HECC only: Time-out control */
131#define HECC_CANTOS 0x64 /* HECC only: Time-out status */
132#define HECC_CANTIOCE 0x68 /* SCC only:Enhanced TX I/O control */
133#define HECC_CANRIOCE 0x6C /* SCC only:Enhanced RX I/O control */
134
135/* Mailbox registers */
136#define HECC_CANMID 0x0
137#define HECC_CANMCF 0x4
138#define HECC_CANMDL 0x8
139#define HECC_CANMDH 0xC
140
141#define HECC_SET_REG 0xFFFFFFFF
142#define HECC_CANID_MASK 0x3FF /* 18 bits mask for extended id's */
143#define HECC_CCE_WAIT_COUNT 100 /* Wait for ~1 sec for CCE bit */
144
145#define HECC_CANMC_SCM BIT(13) /* SCC compat mode */
146#define HECC_CANMC_CCR BIT(12) /* Change config request */
147#define HECC_CANMC_PDR BIT(11) /* Local Power down - for sleep mode */
148#define HECC_CANMC_ABO BIT(7) /* Auto Bus On */
149#define HECC_CANMC_STM BIT(6) /* Self test mode - loopback */
150#define HECC_CANMC_SRES BIT(5) /* Software reset */
151
152#define HECC_CANTIOC_EN BIT(3) /* Enable CAN TX I/O pin */
153#define HECC_CANRIOC_EN BIT(3) /* Enable CAN RX I/O pin */
154
155#define HECC_CANMID_IDE BIT(31) /* Extended frame format */
156#define HECC_CANMID_AME BIT(30) /* Acceptance mask enable */
157#define HECC_CANMID_AAM BIT(29) /* Auto answer mode */
158
159#define HECC_CANES_FE BIT(24) /* form error */
160#define HECC_CANES_BE BIT(23) /* bit error */
161#define HECC_CANES_SA1 BIT(22) /* stuck at dominant error */
162#define HECC_CANES_CRCE BIT(21) /* CRC error */
163#define HECC_CANES_SE BIT(20) /* stuff bit error */
164#define HECC_CANES_ACKE BIT(19) /* ack error */
165#define HECC_CANES_BO BIT(18) /* Bus off status */
166#define HECC_CANES_EP BIT(17) /* Error passive status */
167#define HECC_CANES_EW BIT(16) /* Error warning status */
168#define HECC_CANES_SMA BIT(5) /* suspend mode ack */
169#define HECC_CANES_CCE BIT(4) /* Change config enabled */
170#define HECC_CANES_PDA BIT(3) /* Power down mode ack */
171
172#define HECC_CANBTC_SAM BIT(7) /* sample points */
173
174#define HECC_BUS_ERROR (HECC_CANES_FE | HECC_CANES_BE |\
175 HECC_CANES_CRCE | HECC_CANES_SE |\
176 HECC_CANES_ACKE)
177
178#define HECC_CANMCF_RTR BIT(4) /* Remote transmit request */
179
180#define HECC_CANGIF_MAIF BIT(17) /* Message alarm interrupt */
181#define HECC_CANGIF_TCOIF BIT(16) /* Timer counter overflow int */
182#define HECC_CANGIF_GMIF BIT(15) /* Global mailbox interrupt */
183#define HECC_CANGIF_AAIF BIT(14) /* Abort ack interrupt */
184#define HECC_CANGIF_WDIF BIT(13) /* Write denied interrupt */
185#define HECC_CANGIF_WUIF BIT(12) /* Wake up interrupt */
186#define HECC_CANGIF_RMLIF BIT(11) /* Receive message lost interrupt */
187#define HECC_CANGIF_BOIF BIT(10) /* Bus off interrupt */
188#define HECC_CANGIF_EPIF BIT(9) /* Error passive interrupt */
189#define HECC_CANGIF_WLIF BIT(8) /* Warning level interrupt */
190#define HECC_CANGIF_MBOX_MASK 0x1F /* Mailbox number mask */
191#define HECC_CANGIM_I1EN BIT(1) /* Int line 1 enable */
192#define HECC_CANGIM_I0EN BIT(0) /* Int line 0 enable */
193#define HECC_CANGIM_DEF_MASK 0x700 /* only busoff/warning/passive */
194#define HECC_CANGIM_SIL BIT(2) /* system interrupts to int line 1 */
195
196/* CAN Bittiming constants as per HECC specs */
197static struct can_bittiming_const ti_hecc_bittiming_const = {
198 .name = DRV_NAME,
199 .tseg1_min = 1,
200 .tseg1_max = 16,
201 .tseg2_min = 1,
202 .tseg2_max = 8,
203 .sjw_max = 4,
204 .brp_min = 1,
205 .brp_max = 256,
206 .brp_inc = 1,
207};
208
209struct ti_hecc_priv {
210 struct can_priv can; /* MUST be first member/field */
211 struct napi_struct napi;
212 struct net_device *ndev;
213 struct clk *clk;
214 void __iomem *base;
215 u32 scc_ram_offset;
216 u32 hecc_ram_offset;
217 u32 mbx_offset;
218 u32 int_line;
219 spinlock_t mbx_lock; /* CANME register needs protection */
220 u32 tx_head;
221 u32 tx_tail;
222 u32 rx_next;
223};
224
225static inline int get_tx_head_mb(struct ti_hecc_priv *priv)
226{
227 return priv->tx_head & HECC_TX_MB_MASK;
228}
229
230static inline int get_tx_tail_mb(struct ti_hecc_priv *priv)
231{
232 return priv->tx_tail & HECC_TX_MB_MASK;
233}
234
235static inline int get_tx_head_prio(struct ti_hecc_priv *priv)
236{
237 return (priv->tx_head >> HECC_TX_PRIO_SHIFT) & MAX_TX_PRIO;
238}
239
240static inline void hecc_write_lam(struct ti_hecc_priv *priv, u32 mbxno, u32 val)
241{
242 __raw_writel(val, priv->base + priv->hecc_ram_offset + mbxno * 4);
243}
244
245static inline void hecc_write_mbx(struct ti_hecc_priv *priv, u32 mbxno,
246 u32 reg, u32 val)
247{
248 __raw_writel(val, priv->base + priv->mbx_offset + mbxno * 0x10 +
249 reg);
250}
251
252static inline u32 hecc_read_mbx(struct ti_hecc_priv *priv, u32 mbxno, u32 reg)
253{
254 return __raw_readl(priv->base + priv->mbx_offset + mbxno * 0x10 +
255 reg);
256}
257
258static inline void hecc_write(struct ti_hecc_priv *priv, u32 reg, u32 val)
259{
260 __raw_writel(val, priv->base + reg);
261}
262
263static inline u32 hecc_read(struct ti_hecc_priv *priv, int reg)
264{
265 return __raw_readl(priv->base + reg);
266}
267
268static inline void hecc_set_bit(struct ti_hecc_priv *priv, int reg,
269 u32 bit_mask)
270{
271 hecc_write(priv, reg, hecc_read(priv, reg) | bit_mask);
272}
273
274static inline void hecc_clear_bit(struct ti_hecc_priv *priv, int reg,
275 u32 bit_mask)
276{
277 hecc_write(priv, reg, hecc_read(priv, reg) & ~bit_mask);
278}
279
280static inline u32 hecc_get_bit(struct ti_hecc_priv *priv, int reg, u32 bit_mask)
281{
282 return (hecc_read(priv, reg) & bit_mask) ? 1 : 0;
283}
284
285static int ti_hecc_get_state(const struct net_device *ndev,
286 enum can_state *state)
287{
288 struct ti_hecc_priv *priv = netdev_priv(ndev);
289
290 *state = priv->can.state;
291 return 0;
292}
293
294static int ti_hecc_set_btc(struct ti_hecc_priv *priv)
295{
296 struct can_bittiming *bit_timing = &priv->can.bittiming;
297 u32 can_btc;
298
299 can_btc = (bit_timing->phase_seg2 - 1) & 0x7;
300 can_btc |= ((bit_timing->phase_seg1 + bit_timing->prop_seg - 1)
301 & 0xF) << 3;
302 if (priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES) {
303 if (bit_timing->brp > 4)
304 can_btc |= HECC_CANBTC_SAM;
305 else
306 dev_warn(priv->ndev->dev.parent, "WARN: Triple" \
307 "sampling not set due to h/w limitations");
308 }
309 can_btc |= ((bit_timing->sjw - 1) & 0x3) << 8;
310 can_btc |= ((bit_timing->brp - 1) & 0xFF) << 16;
311
312 /* ERM being set to 0 by default meaning resync at falling edge */
313
314 hecc_write(priv, HECC_CANBTC, can_btc);
315 dev_info(priv->ndev->dev.parent, "setting CANBTC=%#x\n", can_btc);
316
317 return 0;
318}
319
320static void ti_hecc_reset(struct net_device *ndev)
321{
322 u32 cnt;
323 struct ti_hecc_priv *priv = netdev_priv(ndev);
324
325 dev_dbg(ndev->dev.parent, "resetting hecc ...\n");
326 hecc_set_bit(priv, HECC_CANMC, HECC_CANMC_SRES);
327
328 /* Set change control request and wait till enabled */
329 hecc_set_bit(priv, HECC_CANMC, HECC_CANMC_CCR);
330
331 /*
332 * INFO: It has been observed that at times CCE bit may not be
333 * set and hw seems to be ok even if this bit is not set so
334 * timing out with a timing of 1ms to respect the specs
335 */
336 cnt = HECC_CCE_WAIT_COUNT;
337 while (!hecc_get_bit(priv, HECC_CANES, HECC_CANES_CCE) && cnt != 0) {
338 --cnt;
339 udelay(10);
340 }
341
342 /*
343 * Note: On HECC, BTC can be programmed only in initialization mode, so
344 * it is expected that the can bittiming parameters are set via ip
345 * utility before the device is opened
346 */
347 ti_hecc_set_btc(priv);
348
349 /* Clear CCR (and CANMC register) and wait for CCE = 0 enable */
350 hecc_write(priv, HECC_CANMC, 0);
351
352 /*
353 * INFO: CAN net stack handles bus off and hence disabling auto-bus-on
354 * hecc_set_bit(priv, HECC_CANMC, HECC_CANMC_ABO);
355 */
356
357 /*
358 * INFO: It has been observed that at times CCE bit may not be
359 * set and hw seems to be ok even if this bit is not set so
360 */
361 cnt = HECC_CCE_WAIT_COUNT;
362 while (hecc_get_bit(priv, HECC_CANES, HECC_CANES_CCE) && cnt != 0) {
363 --cnt;
364 udelay(10);
365 }
366
367 /* Enable TX and RX I/O Control pins */
368 hecc_write(priv, HECC_CANTIOC, HECC_CANTIOC_EN);
369 hecc_write(priv, HECC_CANRIOC, HECC_CANRIOC_EN);
370
371 /* Clear registers for clean operation */
372 hecc_write(priv, HECC_CANTA, HECC_SET_REG);
373 hecc_write(priv, HECC_CANRMP, HECC_SET_REG);
374 hecc_write(priv, HECC_CANGIF0, HECC_SET_REG);
375 hecc_write(priv, HECC_CANGIF1, HECC_SET_REG);
376 hecc_write(priv, HECC_CANME, 0);
377 hecc_write(priv, HECC_CANMD, 0);
378
379 /* SCC compat mode NOT supported (and not needed too) */
380 hecc_set_bit(priv, HECC_CANMC, HECC_CANMC_SCM);
381}
382
383static void ti_hecc_start(struct net_device *ndev)
384{
385 struct ti_hecc_priv *priv = netdev_priv(ndev);
386 u32 cnt, mbxno, mbx_mask;
387
388 /* put HECC in initialization mode and set btc */
389 ti_hecc_reset(ndev);
390
391 priv->tx_head = priv->tx_tail = HECC_TX_MASK;
392 priv->rx_next = HECC_RX_FIRST_MBOX;
393
394 /* Enable local and global acceptance mask registers */
395 hecc_write(priv, HECC_CANGAM, HECC_SET_REG);
396
397 /* Prepare configured mailboxes to receive messages */
398 for (cnt = 0; cnt < HECC_MAX_RX_MBOX; cnt++) {
399 mbxno = HECC_MAX_MAILBOXES - 1 - cnt;
400 mbx_mask = BIT(mbxno);
401 hecc_clear_bit(priv, HECC_CANME, mbx_mask);
402 hecc_write_mbx(priv, mbxno, HECC_CANMID, HECC_CANMID_AME);
403 hecc_write_lam(priv, mbxno, HECC_SET_REG);
404 hecc_set_bit(priv, HECC_CANMD, mbx_mask);
405 hecc_set_bit(priv, HECC_CANME, mbx_mask);
406 hecc_set_bit(priv, HECC_CANMIM, mbx_mask);
407 }
408
409 /* Prevent message over-write & Enable interrupts */
410 hecc_write(priv, HECC_CANOPC, HECC_SET_REG);
411 if (priv->int_line) {
412 hecc_write(priv, HECC_CANMIL, HECC_SET_REG);
413 hecc_write(priv, HECC_CANGIM, HECC_CANGIM_DEF_MASK |
414 HECC_CANGIM_I1EN | HECC_CANGIM_SIL);
415 } else {
416 hecc_write(priv, HECC_CANMIL, 0);
417 hecc_write(priv, HECC_CANGIM,
418 HECC_CANGIM_DEF_MASK | HECC_CANGIM_I0EN);
419 }
420 priv->can.state = CAN_STATE_ERROR_ACTIVE;
421}
422
423static void ti_hecc_stop(struct net_device *ndev)
424{
425 struct ti_hecc_priv *priv = netdev_priv(ndev);
426
427 /* Disable interrupts and disable mailboxes */
428 hecc_write(priv, HECC_CANGIM, 0);
429 hecc_write(priv, HECC_CANMIM, 0);
430 hecc_write(priv, HECC_CANME, 0);
431 priv->can.state = CAN_STATE_STOPPED;
432}
433
434static int ti_hecc_do_set_mode(struct net_device *ndev, enum can_mode mode)
435{
436 int ret = 0;
437
438 switch (mode) {
439 case CAN_MODE_START:
440 ti_hecc_start(ndev);
441 netif_wake_queue(ndev);
442 break;
443 default:
444 ret = -EOPNOTSUPP;
445 break;
446 }
447
448 return ret;
449}
450
451/*
452 * ti_hecc_xmit: HECC Transmit
453 *
454 * The transmit mailboxes start from 0 to HECC_MAX_TX_MBOX. In HECC the
455 * priority of the mailbox for tranmission is dependent upon priority setting
456 * field in mailbox registers. The mailbox with highest value in priority field
457 * is transmitted first. Only when two mailboxes have the same value in
458 * priority field the highest numbered mailbox is transmitted first.
459 *
460 * To utilize the HECC priority feature as described above we start with the
461 * highest numbered mailbox with highest priority level and move on to the next
462 * mailbox with the same priority level and so on. Once we loop through all the
463 * transmit mailboxes we choose the next priority level (lower) and so on
464 * until we reach the lowest priority level on the lowest numbered mailbox
465 * when we stop transmission until all mailboxes are transmitted and then
466 * restart at highest numbered mailbox with highest priority.
467 *
468 * Two counters (head and tail) are used to track the next mailbox to transmit
469 * and to track the echo buffer for already transmitted mailbox. The queue
470 * is stopped when all the mailboxes are busy or when there is a priority
471 * value roll-over happens.
472 */
473static netdev_tx_t ti_hecc_xmit(struct sk_buff *skb, struct net_device *ndev)
474{
475 struct ti_hecc_priv *priv = netdev_priv(ndev);
476 struct can_frame *cf = (struct can_frame *)skb->data;
477 u32 mbxno, mbx_mask, data;
478 unsigned long flags;
479
480 mbxno = get_tx_head_mb(priv);
481 mbx_mask = BIT(mbxno);
482 spin_lock_irqsave(&priv->mbx_lock, flags);
483 if (unlikely(hecc_read(priv, HECC_CANME) & mbx_mask)) {
484 spin_unlock_irqrestore(&priv->mbx_lock, flags);
485 netif_stop_queue(ndev);
486 dev_err(priv->ndev->dev.parent,
487 "BUG: TX mbx not ready tx_head=%08X, tx_tail=%08X\n",
488 priv->tx_head, priv->tx_tail);
489 return NETDEV_TX_BUSY;
490 }
491 spin_unlock_irqrestore(&priv->mbx_lock, flags);
492
493 /* Prepare mailbox for transmission */
494 data = min_t(u8, cf->can_dlc, 8);
495 if (cf->can_id & CAN_RTR_FLAG) /* Remote transmission request */
496 data |= HECC_CANMCF_RTR;
497 data |= get_tx_head_prio(priv) << 8;
498 hecc_write_mbx(priv, mbxno, HECC_CANMCF, data);
499
500 if (cf->can_id & CAN_EFF_FLAG) /* Extended frame format */
501 data = (cf->can_id & CAN_EFF_MASK) | HECC_CANMID_IDE;
502 else /* Standard frame format */
503 data = (cf->can_id & CAN_SFF_MASK) << 18;
504 hecc_write_mbx(priv, mbxno, HECC_CANMID, data);
505 hecc_write_mbx(priv, mbxno, HECC_CANMDL,
506 be32_to_cpu(*(u32 *)(cf->data)));
507 if (cf->can_dlc > 4)
508 hecc_write_mbx(priv, mbxno, HECC_CANMDH,
509 be32_to_cpu(*(u32 *)(cf->data + 4)));
510 else
511 *(u32 *)(cf->data + 4) = 0;
512 can_put_echo_skb(skb, ndev, mbxno);
513
514 spin_lock_irqsave(&priv->mbx_lock, flags);
515 --priv->tx_head;
516 if ((hecc_read(priv, HECC_CANME) & BIT(get_tx_head_mb(priv))) ||
517 (priv->tx_head & HECC_TX_MASK) == HECC_TX_MASK) {
518 netif_stop_queue(ndev);
519 }
520 hecc_set_bit(priv, HECC_CANME, mbx_mask);
521 spin_unlock_irqrestore(&priv->mbx_lock, flags);
522
523 hecc_clear_bit(priv, HECC_CANMD, mbx_mask);
524 hecc_set_bit(priv, HECC_CANMIM, mbx_mask);
525 hecc_write(priv, HECC_CANTRS, mbx_mask);
526
527 return NETDEV_TX_OK;
528}
529
530static int ti_hecc_rx_pkt(struct ti_hecc_priv *priv, int mbxno)
531{
532 struct net_device_stats *stats = &priv->ndev->stats;
533 struct can_frame *cf;
534 struct sk_buff *skb;
535 u32 data, mbx_mask;
536 unsigned long flags;
537
538 skb = alloc_can_skb(priv->ndev, &cf);
539 if (!skb) {
540 if (printk_ratelimit())
541 dev_err(priv->ndev->dev.parent,
542 "ti_hecc_rx_pkt: alloc_can_skb() failed\n");
543 return -ENOMEM;
544 }
545
546 mbx_mask = BIT(mbxno);
547 data = hecc_read_mbx(priv, mbxno, HECC_CANMID);
548 if (data & HECC_CANMID_IDE)
549 cf->can_id = (data & CAN_EFF_MASK) | CAN_EFF_FLAG;
550 else
551 cf->can_id = (data >> 18) & CAN_SFF_MASK;
552 data = hecc_read_mbx(priv, mbxno, HECC_CANMCF);
553 if (data & HECC_CANMCF_RTR)
554 cf->can_id |= CAN_RTR_FLAG;
555 cf->can_dlc = data & 0xF;
556 data = hecc_read_mbx(priv, mbxno, HECC_CANMDL);
557 *(u32 *)(cf->data) = cpu_to_be32(data);
558 if (cf->can_dlc > 4) {
559 data = hecc_read_mbx(priv, mbxno, HECC_CANMDH);
560 *(u32 *)(cf->data + 4) = cpu_to_be32(data);
561 } else {
562 *(u32 *)(cf->data + 4) = 0;
563 }
564 spin_lock_irqsave(&priv->mbx_lock, flags);
565 hecc_clear_bit(priv, HECC_CANME, mbx_mask);
566 hecc_write(priv, HECC_CANRMP, mbx_mask);
567 /* enable mailbox only if it is part of rx buffer mailboxes */
568 if (priv->rx_next < HECC_RX_BUFFER_MBOX)
569 hecc_set_bit(priv, HECC_CANME, mbx_mask);
570 spin_unlock_irqrestore(&priv->mbx_lock, flags);
571
572 stats->rx_bytes += cf->can_dlc;
573 netif_receive_skb(skb);
574 stats->rx_packets++;
575
576 return 0;
577}
578
579/*
580 * ti_hecc_rx_poll - HECC receive pkts
581 *
582 * The receive mailboxes start from highest numbered mailbox till last xmit
583 * mailbox. On CAN frame reception the hardware places the data into highest
584 * numbered mailbox that matches the CAN ID filter. Since all receive mailboxes
585 * have same filtering (ALL CAN frames) packets will arrive in the highest
586 * available RX mailbox and we need to ensure in-order packet reception.
587 *
588 * To ensure the packets are received in the right order we logically divide
589 * the RX mailboxes into main and buffer mailboxes. Packets are received as per
590 * mailbox priotity (higher to lower) in the main bank and once it is full we
591 * disable further reception into main mailboxes. While the main mailboxes are
592 * processed in NAPI, further packets are received in buffer mailboxes.
593 *
594 * We maintain a RX next mailbox counter to process packets and once all main
595 * mailboxe packets are passed to the upper stack we enable all of them but
596 * continue to process packets received in buffer mailboxes. With each packet
597 * received from buffer mailbox we enable it immediately so as to handle the
598 * overflow from higher mailboxes.
599 */
600static int ti_hecc_rx_poll(struct napi_struct *napi, int quota)
601{
602 struct net_device *ndev = napi->dev;
603 struct ti_hecc_priv *priv = netdev_priv(ndev);
604 u32 num_pkts = 0;
605 u32 mbx_mask;
606 unsigned long pending_pkts, flags;
607
608 if (!netif_running(ndev))
609 return 0;
610
611 while ((pending_pkts = hecc_read(priv, HECC_CANRMP)) &&
612 num_pkts < quota) {
613 mbx_mask = BIT(priv->rx_next); /* next rx mailbox to process */
614 if (mbx_mask & pending_pkts) {
615 if (ti_hecc_rx_pkt(priv, priv->rx_next) < 0)
616 return num_pkts;
617 ++num_pkts;
618 } else if (priv->rx_next > HECC_RX_BUFFER_MBOX) {
619 break; /* pkt not received yet */
620 }
621 --priv->rx_next;
622 if (priv->rx_next == HECC_RX_BUFFER_MBOX) {
623 /* enable high bank mailboxes */
624 spin_lock_irqsave(&priv->mbx_lock, flags);
625 mbx_mask = hecc_read(priv, HECC_CANME);
626 mbx_mask |= HECC_RX_HIGH_MBOX_MASK;
627 hecc_write(priv, HECC_CANME, mbx_mask);
628 spin_unlock_irqrestore(&priv->mbx_lock, flags);
629 } else if (priv->rx_next == HECC_MAX_TX_MBOX - 1) {
630 priv->rx_next = HECC_RX_FIRST_MBOX;
631 break;
632 }
633 }
634
635 /* Enable packet interrupt if all pkts are handled */
636 if (hecc_read(priv, HECC_CANRMP) == 0) {
637 napi_complete(napi);
638 /* Re-enable RX mailbox interrupts */
639 mbx_mask = hecc_read(priv, HECC_CANMIM);
640 mbx_mask |= HECC_TX_MBOX_MASK;
641 hecc_write(priv, HECC_CANMIM, mbx_mask);
642 }
643
644 return num_pkts;
645}
646
647static int ti_hecc_error(struct net_device *ndev, int int_status,
648 int err_status)
649{
650 struct ti_hecc_priv *priv = netdev_priv(ndev);
651 struct net_device_stats *stats = &ndev->stats;
652 struct can_frame *cf;
653 struct sk_buff *skb;
654
655 /* propogate the error condition to the can stack */
656 skb = alloc_can_err_skb(ndev, &cf);
657 if (!skb) {
658 if (printk_ratelimit())
659 dev_err(priv->ndev->dev.parent,
660 "ti_hecc_error: alloc_can_err_skb() failed\n");
661 return -ENOMEM;
662 }
663
664 if (int_status & HECC_CANGIF_WLIF) { /* warning level int */
665 if ((int_status & HECC_CANGIF_BOIF) == 0) {
666 priv->can.state = CAN_STATE_ERROR_WARNING;
667 ++priv->can.can_stats.error_warning;
668 cf->can_id |= CAN_ERR_CRTL;
669 if (hecc_read(priv, HECC_CANTEC) > 96)
670 cf->data[1] |= CAN_ERR_CRTL_TX_WARNING;
671 if (hecc_read(priv, HECC_CANREC) > 96)
672 cf->data[1] |= CAN_ERR_CRTL_RX_WARNING;
673 }
674 hecc_set_bit(priv, HECC_CANES, HECC_CANES_EW);
675 dev_dbg(priv->ndev->dev.parent, "Error Warning interrupt\n");
676 hecc_clear_bit(priv, HECC_CANMC, HECC_CANMC_CCR);
677 }
678
679 if (int_status & HECC_CANGIF_EPIF) { /* error passive int */
680 if ((int_status & HECC_CANGIF_BOIF) == 0) {
681 priv->can.state = CAN_STATE_ERROR_PASSIVE;
682 ++priv->can.can_stats.error_passive;
683 cf->can_id |= CAN_ERR_CRTL;
684 if (hecc_read(priv, HECC_CANTEC) > 127)
685 cf->data[1] |= CAN_ERR_CRTL_TX_PASSIVE;
686 if (hecc_read(priv, HECC_CANREC) > 127)
687 cf->data[1] |= CAN_ERR_CRTL_RX_PASSIVE;
688 }
689 hecc_set_bit(priv, HECC_CANES, HECC_CANES_EP);
690 dev_dbg(priv->ndev->dev.parent, "Error passive interrupt\n");
691 hecc_clear_bit(priv, HECC_CANMC, HECC_CANMC_CCR);
692 }
693
694 /*
695 * Need to check busoff condition in error status register too to
696 * ensure warning interrupts don't hog the system
697 */
698 if ((int_status & HECC_CANGIF_BOIF) || (err_status & HECC_CANES_BO)) {
699 priv->can.state = CAN_STATE_BUS_OFF;
700 cf->can_id |= CAN_ERR_BUSOFF;
701 hecc_set_bit(priv, HECC_CANES, HECC_CANES_BO);
702 hecc_clear_bit(priv, HECC_CANMC, HECC_CANMC_CCR);
703 /* Disable all interrupts in bus-off to avoid int hog */
704 hecc_write(priv, HECC_CANGIM, 0);
705 can_bus_off(ndev);
706 }
707
708 if (err_status & HECC_BUS_ERROR) {
709 ++priv->can.can_stats.bus_error;
710 cf->can_id |= CAN_ERR_BUSERROR | CAN_ERR_PROT;
711 cf->data[2] |= CAN_ERR_PROT_UNSPEC;
712 if (err_status & HECC_CANES_FE) {
713 hecc_set_bit(priv, HECC_CANES, HECC_CANES_FE);
714 cf->data[2] |= CAN_ERR_PROT_FORM;
715 }
716 if (err_status & HECC_CANES_BE) {
717 hecc_set_bit(priv, HECC_CANES, HECC_CANES_BE);
718 cf->data[2] |= CAN_ERR_PROT_BIT;
719 }
720 if (err_status & HECC_CANES_SE) {
721 hecc_set_bit(priv, HECC_CANES, HECC_CANES_SE);
722 cf->data[2] |= CAN_ERR_PROT_STUFF;
723 }
724 if (err_status & HECC_CANES_CRCE) {
725 hecc_set_bit(priv, HECC_CANES, HECC_CANES_CRCE);
726 cf->data[2] |= CAN_ERR_PROT_LOC_CRC_SEQ |
727 CAN_ERR_PROT_LOC_CRC_DEL;
728 }
729 if (err_status & HECC_CANES_ACKE) {
730 hecc_set_bit(priv, HECC_CANES, HECC_CANES_ACKE);
731 cf->data[2] |= CAN_ERR_PROT_LOC_ACK |
732 CAN_ERR_PROT_LOC_ACK_DEL;
733 }
734 }
735
736 netif_receive_skb(skb);
737 stats->rx_packets++;
738 stats->rx_bytes += cf->can_dlc;
739 return 0;
740}
741
742static irqreturn_t ti_hecc_interrupt(int irq, void *dev_id)
743{
744 struct net_device *ndev = (struct net_device *)dev_id;
745 struct ti_hecc_priv *priv = netdev_priv(ndev);
746 struct net_device_stats *stats = &ndev->stats;
747 u32 mbxno, mbx_mask, int_status, err_status;
748 unsigned long ack, flags;
749
750 int_status = hecc_read(priv,
751 (priv->int_line) ? HECC_CANGIF1 : HECC_CANGIF0);
752
753 if (!int_status)
754 return IRQ_NONE;
755
756 err_status = hecc_read(priv, HECC_CANES);
757 if (err_status & (HECC_BUS_ERROR | HECC_CANES_BO |
758 HECC_CANES_EP | HECC_CANES_EW))
759 ti_hecc_error(ndev, int_status, err_status);
760
761 if (int_status & HECC_CANGIF_GMIF) {
762 while (priv->tx_tail - priv->tx_head > 0) {
763 mbxno = get_tx_tail_mb(priv);
764 mbx_mask = BIT(mbxno);
765 if (!(mbx_mask & hecc_read(priv, HECC_CANTA)))
766 break;
767 hecc_clear_bit(priv, HECC_CANMIM, mbx_mask);
768 hecc_write(priv, HECC_CANTA, mbx_mask);
769 spin_lock_irqsave(&priv->mbx_lock, flags);
770 hecc_clear_bit(priv, HECC_CANME, mbx_mask);
771 spin_unlock_irqrestore(&priv->mbx_lock, flags);
772 stats->tx_bytes += hecc_read_mbx(priv, mbxno,
773 HECC_CANMCF) & 0xF;
774 stats->tx_packets++;
775 can_get_echo_skb(ndev, mbxno);
776 --priv->tx_tail;
777 }
778
779 /* restart queue if wrap-up or if queue stalled on last pkt */
780 if (((priv->tx_head == priv->tx_tail) &&
781 ((priv->tx_head & HECC_TX_MASK) != HECC_TX_MASK)) ||
782 (((priv->tx_tail & HECC_TX_MASK) == HECC_TX_MASK) &&
783 ((priv->tx_head & HECC_TX_MASK) == HECC_TX_MASK)))
784 netif_wake_queue(ndev);
785
786 /* Disable RX mailbox interrupts and let NAPI reenable them */
787 if (hecc_read(priv, HECC_CANRMP)) {
788 ack = hecc_read(priv, HECC_CANMIM);
789 ack &= BIT(HECC_MAX_TX_MBOX) - 1;
790 hecc_write(priv, HECC_CANMIM, ack);
791 napi_schedule(&priv->napi);
792 }
793 }
794
795 /* clear all interrupt conditions - read back to avoid spurious ints */
796 if (priv->int_line) {
797 hecc_write(priv, HECC_CANGIF1, HECC_SET_REG);
798 int_status = hecc_read(priv, HECC_CANGIF1);
799 } else {
800 hecc_write(priv, HECC_CANGIF0, HECC_SET_REG);
801 int_status = hecc_read(priv, HECC_CANGIF0);
802 }
803
804 return IRQ_HANDLED;
805}
806
807static int ti_hecc_open(struct net_device *ndev)
808{
809 struct ti_hecc_priv *priv = netdev_priv(ndev);
810 int err;
811
812 err = request_irq(ndev->irq, ti_hecc_interrupt, IRQF_SHARED,
813 ndev->name, ndev);
814 if (err) {
815 dev_err(ndev->dev.parent, "error requesting interrupt\n");
816 return err;
817 }
818
819 /* Open common can device */
820 err = open_candev(ndev);
821 if (err) {
822 dev_err(ndev->dev.parent, "open_candev() failed %d\n", err);
823 free_irq(ndev->irq, ndev);
824 return err;
825 }
826
827 clk_enable(priv->clk);
828 ti_hecc_start(ndev);
829 napi_enable(&priv->napi);
830 netif_start_queue(ndev);
831
832 return 0;
833}
834
835static int ti_hecc_close(struct net_device *ndev)
836{
837 struct ti_hecc_priv *priv = netdev_priv(ndev);
838
839 netif_stop_queue(ndev);
840 napi_disable(&priv->napi);
841 ti_hecc_stop(ndev);
842 free_irq(ndev->irq, ndev);
843 clk_disable(priv->clk);
844 close_candev(ndev);
845
846 return 0;
847}
848
849static const struct net_device_ops ti_hecc_netdev_ops = {
850 .ndo_open = ti_hecc_open,
851 .ndo_stop = ti_hecc_close,
852 .ndo_start_xmit = ti_hecc_xmit,
853};
854
855static int ti_hecc_probe(struct platform_device *pdev)
856{
857 struct net_device *ndev = (struct net_device *)0;
858 struct ti_hecc_priv *priv;
859 struct ti_hecc_platform_data *pdata;
860 struct resource *mem, *irq;
861 void __iomem *addr;
862 int err = -ENODEV;
863
864 pdata = pdev->dev.platform_data;
865 if (!pdata) {
866 dev_err(&pdev->dev, "No platform data\n");
867 goto probe_exit;
868 }
869
870 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
871 if (!mem) {
872 dev_err(&pdev->dev, "No mem resources\n");
873 goto probe_exit;
874 }
875 irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
876 if (!irq) {
877 dev_err(&pdev->dev, "No irq resource\n");
878 goto probe_exit;
879 }
880 if (!request_mem_region(mem->start, resource_size(mem), pdev->name)) {
881 dev_err(&pdev->dev, "HECC region already claimed\n");
882 err = -EBUSY;
883 goto probe_exit;
884 }
885 addr = ioremap(mem->start, resource_size(mem));
886 if (!addr) {
887 dev_err(&pdev->dev, "ioremap failed\n");
888 err = -ENOMEM;
889 goto probe_exit_free_region;
890 }
891
892 ndev = alloc_candev(sizeof(struct ti_hecc_priv), HECC_MAX_TX_MBOX);
893 if (!ndev) {
894 dev_err(&pdev->dev, "alloc_candev failed\n");
895 err = -ENOMEM;
896 goto probe_exit_iounmap;
897 }
898
899 priv = netdev_priv(ndev);
900 priv->ndev = ndev;
901 priv->base = addr;
902 priv->scc_ram_offset = pdata->scc_ram_offset;
903 priv->hecc_ram_offset = pdata->hecc_ram_offset;
904 priv->mbx_offset = pdata->mbx_offset;
905 priv->int_line = pdata->int_line;
906
907 priv->can.bittiming_const = &ti_hecc_bittiming_const;
908 priv->can.do_set_mode = ti_hecc_do_set_mode;
909 priv->can.do_get_state = ti_hecc_get_state;
910
911 ndev->irq = irq->start;
912 ndev->flags |= IFF_ECHO;
913 platform_set_drvdata(pdev, ndev);
914 SET_NETDEV_DEV(ndev, &pdev->dev);
915 ndev->netdev_ops = &ti_hecc_netdev_ops;
916
917 priv->clk = clk_get(&pdev->dev, "hecc_ck");
918 if (IS_ERR(priv->clk)) {
919 dev_err(&pdev->dev, "No clock available\n");
920 err = PTR_ERR(priv->clk);
921 priv->clk = NULL;
922 goto probe_exit_candev;
923 }
924 priv->can.clock.freq = clk_get_rate(priv->clk);
925 netif_napi_add(ndev, &priv->napi, ti_hecc_rx_poll,
926 HECC_DEF_NAPI_WEIGHT);
927
928 err = register_candev(ndev);
929 if (err) {
930 dev_err(&pdev->dev, "register_candev() failed\n");
931 goto probe_exit_clk;
932 }
933 dev_info(&pdev->dev, "device registered (reg_base=%p, irq=%u)\n",
934 priv->base, (u32) ndev->irq);
935
936 return 0;
937
938probe_exit_clk:
939 clk_put(priv->clk);
940probe_exit_candev:
941 free_candev(ndev);
942probe_exit_iounmap:
943 iounmap(addr);
944probe_exit_free_region:
945 release_mem_region(mem->start, resource_size(mem));
946probe_exit:
947 return err;
948}
949
950static int __devexit ti_hecc_remove(struct platform_device *pdev)
951{
952 struct resource *res;
953 struct net_device *ndev = platform_get_drvdata(pdev);
954 struct ti_hecc_priv *priv = netdev_priv(ndev);
955
956 clk_put(priv->clk);
957 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
958 iounmap(priv->base);
959 release_mem_region(res->start, resource_size(res));
960 unregister_candev(ndev);
961 free_candev(ndev);
962 platform_set_drvdata(pdev, NULL);
963
964 return 0;
965}
966
967/* TI HECC netdevice driver: platform driver structure */
968static struct platform_driver ti_hecc_driver = {
969 .driver = {
970 .name = DRV_NAME,
971 .owner = THIS_MODULE,
972 },
973 .probe = ti_hecc_probe,
974 .remove = __devexit_p(ti_hecc_remove),
975};
976
977static int __init ti_hecc_init_driver(void)
978{
979 printk(KERN_INFO DRV_DESC "\n");
980 return platform_driver_register(&ti_hecc_driver);
981}
982module_init(ti_hecc_init_driver);
983
984static void __exit ti_hecc_exit_driver(void)
985{
986 printk(KERN_INFO DRV_DESC " unloaded\n");
987 platform_driver_unregister(&ti_hecc_driver);
988}
989module_exit(ti_hecc_exit_driver);
990
991MODULE_AUTHOR("Anant Gole <anantgole@ti.com>");
992MODULE_LICENSE("GPL v2");
993MODULE_DESCRIPTION(DRV_DESC);
diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c
index abdbd9c2b788..591eb0eb1c2b 100644
--- a/drivers/net/can/usb/ems_usb.c
+++ b/drivers/net/can/usb/ems_usb.c
@@ -232,7 +232,7 @@ MODULE_DEVICE_TABLE(usb, ems_usb_table);
232#define INTR_IN_BUFFER_SIZE 4 232#define INTR_IN_BUFFER_SIZE 4
233 233
234#define MAX_RX_URBS 10 234#define MAX_RX_URBS 10
235#define MAX_TX_URBS CAN_ECHO_SKB_MAX 235#define MAX_TX_URBS 10
236 236
237struct ems_usb; 237struct ems_usb;
238 238
@@ -311,23 +311,19 @@ static void ems_usb_rx_can_msg(struct ems_usb *dev, struct ems_cpc_msg *msg)
311 int i; 311 int i;
312 struct net_device_stats *stats = &dev->netdev->stats; 312 struct net_device_stats *stats = &dev->netdev->stats;
313 313
314 skb = netdev_alloc_skb(dev->netdev, sizeof(struct can_frame)); 314 skb = alloc_can_skb(dev->netdev, &cf);
315 if (skb == NULL) 315 if (skb == NULL)
316 return; 316 return;
317 317
318 skb->protocol = htons(ETH_P_CAN);
319
320 cf = (struct can_frame *)skb_put(skb, sizeof(struct can_frame));
321
322 cf->can_id = le32_to_cpu(msg->msg.can_msg.id); 318 cf->can_id = le32_to_cpu(msg->msg.can_msg.id);
323 cf->can_dlc = min_t(u8, msg->msg.can_msg.length, 8); 319 cf->can_dlc = min_t(u8, msg->msg.can_msg.length, 8);
324 320
325 if (msg->type == CPC_MSG_TYPE_EXT_CAN_FRAME 321 if (msg->type == CPC_MSG_TYPE_EXT_CAN_FRAME ||
326 || msg->type == CPC_MSG_TYPE_EXT_RTR_FRAME) 322 msg->type == CPC_MSG_TYPE_EXT_RTR_FRAME)
327 cf->can_id |= CAN_EFF_FLAG; 323 cf->can_id |= CAN_EFF_FLAG;
328 324
329 if (msg->type == CPC_MSG_TYPE_RTR_FRAME 325 if (msg->type == CPC_MSG_TYPE_RTR_FRAME ||
330 || msg->type == CPC_MSG_TYPE_EXT_RTR_FRAME) { 326 msg->type == CPC_MSG_TYPE_EXT_RTR_FRAME) {
331 cf->can_id |= CAN_RTR_FLAG; 327 cf->can_id |= CAN_RTR_FLAG;
332 } else { 328 } else {
333 for (i = 0; i < cf->can_dlc; i++) 329 for (i = 0; i < cf->can_dlc; i++)
@@ -346,18 +342,10 @@ static void ems_usb_rx_err(struct ems_usb *dev, struct ems_cpc_msg *msg)
346 struct sk_buff *skb; 342 struct sk_buff *skb;
347 struct net_device_stats *stats = &dev->netdev->stats; 343 struct net_device_stats *stats = &dev->netdev->stats;
348 344
349 skb = netdev_alloc_skb(dev->netdev, sizeof(struct can_frame)); 345 skb = alloc_can_err_skb(dev->netdev, &cf);
350 if (skb == NULL) 346 if (skb == NULL)
351 return; 347 return;
352 348
353 skb->protocol = htons(ETH_P_CAN);
354
355 cf = (struct can_frame *)skb_put(skb, sizeof(struct can_frame));
356 memset(cf, 0, sizeof(struct can_frame));
357
358 cf->can_id = CAN_ERR_FLAG;
359 cf->can_dlc = CAN_ERR_DLC;
360
361 if (msg->type == CPC_MSG_TYPE_CAN_STATE) { 349 if (msg->type == CPC_MSG_TYPE_CAN_STATE) {
362 u8 state = msg->msg.can_state; 350 u8 state = msg->msg.can_state;
363 351
@@ -1015,7 +1003,7 @@ static int ems_usb_probe(struct usb_interface *intf,
1015 struct ems_usb *dev; 1003 struct ems_usb *dev;
1016 int i, err = -ENOMEM; 1004 int i, err = -ENOMEM;
1017 1005
1018 netdev = alloc_candev(sizeof(struct ems_usb)); 1006 netdev = alloc_candev(sizeof(struct ems_usb), MAX_TX_URBS);
1019 if (!netdev) { 1007 if (!netdev) {
1020 dev_err(netdev->dev.parent, "Couldn't alloc candev\n"); 1008 dev_err(netdev->dev.parent, "Couldn't alloc candev\n");
1021 return -ENOMEM; 1009 return -ENOMEM;