diff options
Diffstat (limited to 'drivers/net')
-rw-r--r-- | drivers/net/can/at91_can.c | 1186 |
1 files changed, 1186 insertions, 0 deletions
diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c new file mode 100644 index 000000000000..f67ae285a35a --- /dev/null +++ b/drivers/net/can/at91_can.c | |||
@@ -0,0 +1,1186 @@ | |||
1 | /* | ||
2 | * at91_can.c - CAN network driver for AT91 SoC CAN controller | ||
3 | * | ||
4 | * (C) 2007 by Hans J. Koch <hjk@linutronix.de> | ||
5 | * (C) 2008, 2009 by Marc Kleine-Budde <kernel@pengutronix.de> | ||
6 | * | ||
7 | * This software may be distributed under the terms of the GNU General | ||
8 | * Public License ("GPL") version 2 as distributed in the 'COPYING' | ||
9 | * file from the main directory of the linux kernel source. | ||
10 | * | ||
11 | * Send feedback to <socketcan-users@lists.berlios.de> | ||
12 | * | ||
13 | * | ||
14 | * Your platform definition file should specify something like: | ||
15 | * | ||
16 | * static struct at91_can_data ek_can_data = { | ||
17 | * transceiver_switch = sam9263ek_transceiver_switch, | ||
18 | * }; | ||
19 | * | ||
20 | * at91_add_device_can(&ek_can_data); | ||
21 | * | ||
22 | */ | ||
23 | |||
24 | #include <linux/clk.h> | ||
25 | #include <linux/errno.h> | ||
26 | #include <linux/if_arp.h> | ||
27 | #include <linux/init.h> | ||
28 | #include <linux/interrupt.h> | ||
29 | #include <linux/kernel.h> | ||
30 | #include <linux/module.h> | ||
31 | #include <linux/netdevice.h> | ||
32 | #include <linux/platform_device.h> | ||
33 | #include <linux/skbuff.h> | ||
34 | #include <linux/spinlock.h> | ||
35 | #include <linux/string.h> | ||
36 | #include <linux/types.h> | ||
37 | |||
38 | #include <linux/can.h> | ||
39 | #include <linux/can/dev.h> | ||
40 | #include <linux/can/error.h> | ||
41 | |||
42 | #include <mach/board.h> | ||
43 | |||
44 | #define DRV_NAME "at91_can" | ||
45 | #define AT91_NAPI_WEIGHT 12 | ||
46 | |||
47 | /* | ||
48 | * RX/TX Mailbox split | ||
49 | * don't dare to touch | ||
50 | */ | ||
51 | #define AT91_MB_RX_NUM 12 | ||
52 | #define AT91_MB_TX_SHIFT 2 | ||
53 | |||
54 | #define AT91_MB_RX_FIRST 0 | ||
55 | #define AT91_MB_RX_LAST (AT91_MB_RX_FIRST + AT91_MB_RX_NUM - 1) | ||
56 | |||
57 | #define AT91_MB_RX_MASK(i) ((1 << (i)) - 1) | ||
58 | #define AT91_MB_RX_SPLIT 8 | ||
59 | #define AT91_MB_RX_LOW_LAST (AT91_MB_RX_SPLIT - 1) | ||
60 | #define AT91_MB_RX_LOW_MASK (AT91_MB_RX_MASK(AT91_MB_RX_SPLIT)) | ||
61 | |||
62 | #define AT91_MB_TX_NUM (1 << AT91_MB_TX_SHIFT) | ||
63 | #define AT91_MB_TX_FIRST (AT91_MB_RX_LAST + 1) | ||
64 | #define AT91_MB_TX_LAST (AT91_MB_TX_FIRST + AT91_MB_TX_NUM - 1) | ||
65 | |||
66 | #define AT91_NEXT_PRIO_SHIFT (AT91_MB_TX_SHIFT) | ||
67 | #define AT91_NEXT_PRIO_MASK (0xf << AT91_MB_TX_SHIFT) | ||
68 | #define AT91_NEXT_MB_MASK (AT91_MB_TX_NUM - 1) | ||
69 | #define AT91_NEXT_MASK ((AT91_MB_TX_NUM - 1) | AT91_NEXT_PRIO_MASK) | ||
70 | |||
71 | /* Common registers */ | ||
72 | enum at91_reg { | ||
73 | AT91_MR = 0x000, | ||
74 | AT91_IER = 0x004, | ||
75 | AT91_IDR = 0x008, | ||
76 | AT91_IMR = 0x00C, | ||
77 | AT91_SR = 0x010, | ||
78 | AT91_BR = 0x014, | ||
79 | AT91_TIM = 0x018, | ||
80 | AT91_TIMESTP = 0x01C, | ||
81 | AT91_ECR = 0x020, | ||
82 | AT91_TCR = 0x024, | ||
83 | AT91_ACR = 0x028, | ||
84 | }; | ||
85 | |||
86 | /* Mailbox registers (0 <= i <= 15) */ | ||
87 | #define AT91_MMR(i) (enum at91_reg)(0x200 + ((i) * 0x20)) | ||
88 | #define AT91_MAM(i) (enum at91_reg)(0x204 + ((i) * 0x20)) | ||
89 | #define AT91_MID(i) (enum at91_reg)(0x208 + ((i) * 0x20)) | ||
90 | #define AT91_MFID(i) (enum at91_reg)(0x20C + ((i) * 0x20)) | ||
91 | #define AT91_MSR(i) (enum at91_reg)(0x210 + ((i) * 0x20)) | ||
92 | #define AT91_MDL(i) (enum at91_reg)(0x214 + ((i) * 0x20)) | ||
93 | #define AT91_MDH(i) (enum at91_reg)(0x218 + ((i) * 0x20)) | ||
94 | #define AT91_MCR(i) (enum at91_reg)(0x21C + ((i) * 0x20)) | ||
95 | |||
96 | /* Register bits */ | ||
97 | #define AT91_MR_CANEN BIT(0) | ||
98 | #define AT91_MR_LPM BIT(1) | ||
99 | #define AT91_MR_ABM BIT(2) | ||
100 | #define AT91_MR_OVL BIT(3) | ||
101 | #define AT91_MR_TEOF BIT(4) | ||
102 | #define AT91_MR_TTM BIT(5) | ||
103 | #define AT91_MR_TIMFRZ BIT(6) | ||
104 | #define AT91_MR_DRPT BIT(7) | ||
105 | |||
106 | #define AT91_SR_RBSY BIT(29) | ||
107 | |||
108 | #define AT91_MMR_PRIO_SHIFT (16) | ||
109 | |||
110 | #define AT91_MID_MIDE BIT(29) | ||
111 | |||
112 | #define AT91_MSR_MRTR BIT(20) | ||
113 | #define AT91_MSR_MABT BIT(22) | ||
114 | #define AT91_MSR_MRDY BIT(23) | ||
115 | #define AT91_MSR_MMI BIT(24) | ||
116 | |||
117 | #define AT91_MCR_MRTR BIT(20) | ||
118 | #define AT91_MCR_MTCR BIT(23) | ||
119 | |||
120 | /* Mailbox Modes */ | ||
121 | enum at91_mb_mode { | ||
122 | AT91_MB_MODE_DISABLED = 0, | ||
123 | AT91_MB_MODE_RX = 1, | ||
124 | AT91_MB_MODE_RX_OVRWR = 2, | ||
125 | AT91_MB_MODE_TX = 3, | ||
126 | AT91_MB_MODE_CONSUMER = 4, | ||
127 | AT91_MB_MODE_PRODUCER = 5, | ||
128 | }; | ||
129 | |||
130 | /* Interrupt mask bits */ | ||
131 | #define AT91_IRQ_MB_RX ((1 << (AT91_MB_RX_LAST + 1)) \ | ||
132 | - (1 << AT91_MB_RX_FIRST)) | ||
133 | #define AT91_IRQ_MB_TX ((1 << (AT91_MB_TX_LAST + 1)) \ | ||
134 | - (1 << AT91_MB_TX_FIRST)) | ||
135 | #define AT91_IRQ_MB_ALL (AT91_IRQ_MB_RX | AT91_IRQ_MB_TX) | ||
136 | |||
137 | #define AT91_IRQ_ERRA (1 << 16) | ||
138 | #define AT91_IRQ_WARN (1 << 17) | ||
139 | #define AT91_IRQ_ERRP (1 << 18) | ||
140 | #define AT91_IRQ_BOFF (1 << 19) | ||
141 | #define AT91_IRQ_SLEEP (1 << 20) | ||
142 | #define AT91_IRQ_WAKEUP (1 << 21) | ||
143 | #define AT91_IRQ_TOVF (1 << 22) | ||
144 | #define AT91_IRQ_TSTP (1 << 23) | ||
145 | #define AT91_IRQ_CERR (1 << 24) | ||
146 | #define AT91_IRQ_SERR (1 << 25) | ||
147 | #define AT91_IRQ_AERR (1 << 26) | ||
148 | #define AT91_IRQ_FERR (1 << 27) | ||
149 | #define AT91_IRQ_BERR (1 << 28) | ||
150 | |||
151 | #define AT91_IRQ_ERR_ALL (0x1fff0000) | ||
152 | #define AT91_IRQ_ERR_FRAME (AT91_IRQ_CERR | AT91_IRQ_SERR | \ | ||
153 | AT91_IRQ_AERR | AT91_IRQ_FERR | AT91_IRQ_BERR) | ||
154 | #define AT91_IRQ_ERR_LINE (AT91_IRQ_ERRA | AT91_IRQ_WARN | \ | ||
155 | AT91_IRQ_ERRP | AT91_IRQ_BOFF) | ||
156 | |||
157 | #define AT91_IRQ_ALL (0x1fffffff) | ||
158 | |||
159 | struct at91_priv { | ||
160 | struct can_priv can; /* must be the first member! */ | ||
161 | struct net_device *dev; | ||
162 | struct napi_struct napi; | ||
163 | |||
164 | void __iomem *reg_base; | ||
165 | |||
166 | u32 reg_sr; | ||
167 | unsigned int tx_next; | ||
168 | unsigned int tx_echo; | ||
169 | unsigned int rx_next; | ||
170 | |||
171 | struct clk *clk; | ||
172 | struct at91_can_data *pdata; | ||
173 | }; | ||
174 | |||
175 | static struct can_bittiming_const at91_bittiming_const = { | ||
176 | .tseg1_min = 4, | ||
177 | .tseg1_max = 16, | ||
178 | .tseg2_min = 2, | ||
179 | .tseg2_max = 8, | ||
180 | .sjw_max = 4, | ||
181 | .brp_min = 2, | ||
182 | .brp_max = 128, | ||
183 | .brp_inc = 1, | ||
184 | }; | ||
185 | |||
186 | static inline int get_tx_next_mb(const struct at91_priv *priv) | ||
187 | { | ||
188 | return (priv->tx_next & AT91_NEXT_MB_MASK) + AT91_MB_TX_FIRST; | ||
189 | } | ||
190 | |||
191 | static inline int get_tx_next_prio(const struct at91_priv *priv) | ||
192 | { | ||
193 | return (priv->tx_next >> AT91_NEXT_PRIO_SHIFT) & 0xf; | ||
194 | } | ||
195 | |||
196 | static inline int get_tx_echo_mb(const struct at91_priv *priv) | ||
197 | { | ||
198 | return (priv->tx_echo & AT91_NEXT_MB_MASK) + AT91_MB_TX_FIRST; | ||
199 | } | ||
200 | |||
201 | static inline u32 at91_read(const struct at91_priv *priv, enum at91_reg reg) | ||
202 | { | ||
203 | return readl(priv->reg_base + reg); | ||
204 | } | ||
205 | |||
206 | static inline void at91_write(const struct at91_priv *priv, enum at91_reg reg, | ||
207 | u32 value) | ||
208 | { | ||
209 | writel(value, priv->reg_base + reg); | ||
210 | } | ||
211 | |||
212 | static inline void set_mb_mode_prio(const struct at91_priv *priv, | ||
213 | unsigned int mb, enum at91_mb_mode mode, int prio) | ||
214 | { | ||
215 | at91_write(priv, AT91_MMR(mb), (mode << 24) | (prio << 16)); | ||
216 | } | ||
217 | |||
218 | static inline void set_mb_mode(const struct at91_priv *priv, unsigned int mb, | ||
219 | enum at91_mb_mode mode) | ||
220 | { | ||
221 | set_mb_mode_prio(priv, mb, mode, 0); | ||
222 | } | ||
223 | |||
224 | static struct sk_buff *alloc_can_skb(struct net_device *dev, | ||
225 | struct can_frame **cf) | ||
226 | { | ||
227 | struct sk_buff *skb; | ||
228 | |||
229 | skb = netdev_alloc_skb(dev, sizeof(struct can_frame)); | ||
230 | if (unlikely(!skb)) | ||
231 | return NULL; | ||
232 | |||
233 | skb->protocol = htons(ETH_P_CAN); | ||
234 | skb->ip_summed = CHECKSUM_UNNECESSARY; | ||
235 | *cf = (struct can_frame *)skb_put(skb, sizeof(struct can_frame)); | ||
236 | |||
237 | return skb; | ||
238 | } | ||
239 | |||
240 | static struct sk_buff *alloc_can_err_skb(struct net_device *dev, | ||
241 | struct can_frame **cf) | ||
242 | { | ||
243 | struct sk_buff *skb; | ||
244 | |||
245 | skb = alloc_can_skb(dev, cf); | ||
246 | if (unlikely(!skb)) | ||
247 | return NULL; | ||
248 | |||
249 | memset(*cf, 0, sizeof(struct can_frame)); | ||
250 | (*cf)->can_id = CAN_ERR_FLAG; | ||
251 | (*cf)->can_dlc = CAN_ERR_DLC; | ||
252 | |||
253 | return skb; | ||
254 | } | ||
255 | |||
256 | /* | ||
257 | * Swtich transceiver on or off | ||
258 | */ | ||
259 | static void at91_transceiver_switch(const struct at91_priv *priv, int on) | ||
260 | { | ||
261 | if (priv->pdata && priv->pdata->transceiver_switch) | ||
262 | priv->pdata->transceiver_switch(on); | ||
263 | } | ||
264 | |||
265 | static void at91_setup_mailboxes(struct net_device *dev) | ||
266 | { | ||
267 | struct at91_priv *priv = netdev_priv(dev); | ||
268 | unsigned int i; | ||
269 | |||
270 | /* | ||
271 | * The first 12 mailboxes are used as a reception FIFO. The | ||
272 | * last mailbox is configured with overwrite option. The | ||
273 | * overwrite flag indicates a FIFO overflow. | ||
274 | */ | ||
275 | for (i = AT91_MB_RX_FIRST; i < AT91_MB_RX_LAST; i++) | ||
276 | set_mb_mode(priv, i, AT91_MB_MODE_RX); | ||
277 | set_mb_mode(priv, AT91_MB_RX_LAST, AT91_MB_MODE_RX_OVRWR); | ||
278 | |||
279 | /* The last 4 mailboxes are used for transmitting. */ | ||
280 | for (i = AT91_MB_TX_FIRST; i <= AT91_MB_TX_LAST; i++) | ||
281 | set_mb_mode_prio(priv, i, AT91_MB_MODE_TX, 0); | ||
282 | |||
283 | /* Reset tx and rx helper pointers */ | ||
284 | priv->tx_next = priv->tx_echo = priv->rx_next = 0; | ||
285 | } | ||
286 | |||
287 | static int at91_set_bittiming(struct net_device *dev) | ||
288 | { | ||
289 | const struct at91_priv *priv = netdev_priv(dev); | ||
290 | const struct can_bittiming *bt = &priv->can.bittiming; | ||
291 | u32 reg_br; | ||
292 | |||
293 | reg_br = ((priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES) << 24) | | ||
294 | ((bt->brp - 1) << 16) | ((bt->sjw - 1) << 12) | | ||
295 | ((bt->prop_seg - 1) << 8) | ((bt->phase_seg1 - 1) << 4) | | ||
296 | ((bt->phase_seg2 - 1) << 0); | ||
297 | |||
298 | dev_info(dev->dev.parent, "writing AT91_BR: 0x%08x\n", reg_br); | ||
299 | |||
300 | at91_write(priv, AT91_BR, reg_br); | ||
301 | |||
302 | return 0; | ||
303 | } | ||
304 | |||
305 | static void at91_chip_start(struct net_device *dev) | ||
306 | { | ||
307 | struct at91_priv *priv = netdev_priv(dev); | ||
308 | u32 reg_mr, reg_ier; | ||
309 | |||
310 | /* disable interrupts */ | ||
311 | at91_write(priv, AT91_IDR, AT91_IRQ_ALL); | ||
312 | |||
313 | /* disable chip */ | ||
314 | reg_mr = at91_read(priv, AT91_MR); | ||
315 | at91_write(priv, AT91_MR, reg_mr & ~AT91_MR_CANEN); | ||
316 | |||
317 | at91_setup_mailboxes(dev); | ||
318 | at91_transceiver_switch(priv, 1); | ||
319 | |||
320 | /* enable chip */ | ||
321 | at91_write(priv, AT91_MR, AT91_MR_CANEN); | ||
322 | |||
323 | priv->can.state = CAN_STATE_ERROR_ACTIVE; | ||
324 | |||
325 | /* Enable interrupts */ | ||
326 | reg_ier = AT91_IRQ_MB_RX | AT91_IRQ_ERRP | AT91_IRQ_ERR_FRAME; | ||
327 | at91_write(priv, AT91_IDR, AT91_IRQ_ALL); | ||
328 | at91_write(priv, AT91_IER, reg_ier); | ||
329 | } | ||
330 | |||
331 | static void at91_chip_stop(struct net_device *dev, enum can_state state) | ||
332 | { | ||
333 | struct at91_priv *priv = netdev_priv(dev); | ||
334 | u32 reg_mr; | ||
335 | |||
336 | /* disable interrupts */ | ||
337 | at91_write(priv, AT91_IDR, AT91_IRQ_ALL); | ||
338 | |||
339 | reg_mr = at91_read(priv, AT91_MR); | ||
340 | at91_write(priv, AT91_MR, reg_mr & ~AT91_MR_CANEN); | ||
341 | |||
342 | at91_transceiver_switch(priv, 0); | ||
343 | priv->can.state = state; | ||
344 | } | ||
345 | |||
346 | /* | ||
347 | * theory of operation: | ||
348 | * | ||
349 | * According to the datasheet priority 0 is the highest priority, 15 | ||
350 | * is the lowest. If two mailboxes have the same priority level the | ||
351 | * message of the mailbox with the lowest number is sent first. | ||
352 | * | ||
353 | * We use the first TX mailbox (AT91_MB_TX_FIRST) with prio 0, then | ||
354 | * the next mailbox with prio 0, and so on, until all mailboxes are | ||
355 | * used. Then we start from the beginning with mailbox | ||
356 | * AT91_MB_TX_FIRST, but with prio 1, mailbox AT91_MB_TX_FIRST + 1 | ||
357 | * prio 1. When we reach the last mailbox with prio 15, we have to | ||
358 | * stop sending, waiting for all messages to be delivered, then start | ||
359 | * again with mailbox AT91_MB_TX_FIRST prio 0. | ||
360 | * | ||
361 | * We use the priv->tx_next as counter for the next transmission | ||
362 | * mailbox, but without the offset AT91_MB_TX_FIRST. The lower bits | ||
363 | * encode the mailbox number, the upper 4 bits the mailbox priority: | ||
364 | * | ||
365 | * priv->tx_next = (prio << AT91_NEXT_PRIO_SHIFT) || | ||
366 | * (mb - AT91_MB_TX_FIRST); | ||
367 | * | ||
368 | */ | ||
369 | static netdev_tx_t at91_start_xmit(struct sk_buff *skb, struct net_device *dev) | ||
370 | { | ||
371 | struct at91_priv *priv = netdev_priv(dev); | ||
372 | struct net_device_stats *stats = &dev->stats; | ||
373 | struct can_frame *cf = (struct can_frame *)skb->data; | ||
374 | unsigned int mb, prio; | ||
375 | u32 reg_mid, reg_mcr; | ||
376 | |||
377 | mb = get_tx_next_mb(priv); | ||
378 | prio = get_tx_next_prio(priv); | ||
379 | |||
380 | if (unlikely(!(at91_read(priv, AT91_MSR(mb)) & AT91_MSR_MRDY))) { | ||
381 | netif_stop_queue(dev); | ||
382 | |||
383 | dev_err(dev->dev.parent, | ||
384 | "BUG! TX buffer full when queue awake!\n"); | ||
385 | return NETDEV_TX_BUSY; | ||
386 | } | ||
387 | |||
388 | if (cf->can_id & CAN_EFF_FLAG) | ||
389 | reg_mid = (cf->can_id & CAN_EFF_MASK) | AT91_MID_MIDE; | ||
390 | else | ||
391 | reg_mid = (cf->can_id & CAN_SFF_MASK) << 18; | ||
392 | |||
393 | reg_mcr = ((cf->can_id & CAN_RTR_FLAG) ? AT91_MCR_MRTR : 0) | | ||
394 | (cf->can_dlc << 16) | AT91_MCR_MTCR; | ||
395 | |||
396 | /* disable MB while writing ID (see datasheet) */ | ||
397 | set_mb_mode(priv, mb, AT91_MB_MODE_DISABLED); | ||
398 | at91_write(priv, AT91_MID(mb), reg_mid); | ||
399 | set_mb_mode_prio(priv, mb, AT91_MB_MODE_TX, prio); | ||
400 | |||
401 | at91_write(priv, AT91_MDL(mb), *(u32 *)(cf->data + 0)); | ||
402 | at91_write(priv, AT91_MDH(mb), *(u32 *)(cf->data + 4)); | ||
403 | |||
404 | /* This triggers transmission */ | ||
405 | at91_write(priv, AT91_MCR(mb), reg_mcr); | ||
406 | |||
407 | stats->tx_bytes += cf->can_dlc; | ||
408 | dev->trans_start = jiffies; | ||
409 | |||
410 | /* _NOTE_: substract AT91_MB_TX_FIRST offset from mb! */ | ||
411 | can_put_echo_skb(skb, dev, mb - AT91_MB_TX_FIRST); | ||
412 | |||
413 | /* | ||
414 | * we have to stop the queue and deliver all messages in case | ||
415 | * of a prio+mb counter wrap around. This is the case if | ||
416 | * tx_next buffer prio and mailbox equals 0. | ||
417 | * | ||
418 | * also stop the queue if next buffer is still in use | ||
419 | * (== not ready) | ||
420 | */ | ||
421 | priv->tx_next++; | ||
422 | if (!(at91_read(priv, AT91_MSR(get_tx_next_mb(priv))) & | ||
423 | AT91_MSR_MRDY) || | ||
424 | (priv->tx_next & AT91_NEXT_MASK) == 0) | ||
425 | netif_stop_queue(dev); | ||
426 | |||
427 | /* Enable interrupt for this mailbox */ | ||
428 | at91_write(priv, AT91_IER, 1 << mb); | ||
429 | |||
430 | return NETDEV_TX_OK; | ||
431 | } | ||
432 | |||
433 | /** | ||
434 | * at91_activate_rx_low - activate lower rx mailboxes | ||
435 | * @priv: a91 context | ||
436 | * | ||
437 | * Reenables the lower mailboxes for reception of new CAN messages | ||
438 | */ | ||
439 | static inline void at91_activate_rx_low(const struct at91_priv *priv) | ||
440 | { | ||
441 | u32 mask = AT91_MB_RX_LOW_MASK; | ||
442 | at91_write(priv, AT91_TCR, mask); | ||
443 | } | ||
444 | |||
445 | /** | ||
446 | * at91_activate_rx_mb - reactive single rx mailbox | ||
447 | * @priv: a91 context | ||
448 | * @mb: mailbox to reactivate | ||
449 | * | ||
450 | * Reenables given mailbox for reception of new CAN messages | ||
451 | */ | ||
452 | static inline void at91_activate_rx_mb(const struct at91_priv *priv, | ||
453 | unsigned int mb) | ||
454 | { | ||
455 | u32 mask = 1 << mb; | ||
456 | at91_write(priv, AT91_TCR, mask); | ||
457 | } | ||
458 | |||
459 | /** | ||
460 | * at91_rx_overflow_err - send error frame due to rx overflow | ||
461 | * @dev: net device | ||
462 | */ | ||
463 | static void at91_rx_overflow_err(struct net_device *dev) | ||
464 | { | ||
465 | struct net_device_stats *stats = &dev->stats; | ||
466 | struct sk_buff *skb; | ||
467 | struct can_frame *cf; | ||
468 | |||
469 | dev_dbg(dev->dev.parent, "RX buffer overflow\n"); | ||
470 | stats->rx_over_errors++; | ||
471 | stats->rx_errors++; | ||
472 | |||
473 | skb = alloc_can_err_skb(dev, &cf); | ||
474 | if (unlikely(!skb)) | ||
475 | return; | ||
476 | |||
477 | cf->can_id |= CAN_ERR_CRTL; | ||
478 | cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW; | ||
479 | netif_receive_skb(skb); | ||
480 | |||
481 | stats->rx_packets++; | ||
482 | stats->rx_bytes += cf->can_dlc; | ||
483 | } | ||
484 | |||
485 | /** | ||
486 | * at91_read_mb - read CAN msg from mailbox (lowlevel impl) | ||
487 | * @dev: net device | ||
488 | * @mb: mailbox number to read from | ||
489 | * @cf: can frame where to store message | ||
490 | * | ||
491 | * Reads a CAN message from the given mailbox and stores data into | ||
492 | * given can frame. "mb" and "cf" must be valid. | ||
493 | */ | ||
494 | static void at91_read_mb(struct net_device *dev, unsigned int mb, | ||
495 | struct can_frame *cf) | ||
496 | { | ||
497 | const struct at91_priv *priv = netdev_priv(dev); | ||
498 | u32 reg_msr, reg_mid; | ||
499 | |||
500 | reg_mid = at91_read(priv, AT91_MID(mb)); | ||
501 | if (reg_mid & AT91_MID_MIDE) | ||
502 | cf->can_id = ((reg_mid >> 0) & CAN_EFF_MASK) | CAN_EFF_FLAG; | ||
503 | else | ||
504 | cf->can_id = (reg_mid >> 18) & CAN_SFF_MASK; | ||
505 | |||
506 | reg_msr = at91_read(priv, AT91_MSR(mb)); | ||
507 | if (reg_msr & AT91_MSR_MRTR) | ||
508 | cf->can_id |= CAN_RTR_FLAG; | ||
509 | cf->can_dlc = min_t(__u8, (reg_msr >> 16) & 0xf, 8); | ||
510 | |||
511 | *(u32 *)(cf->data + 0) = at91_read(priv, AT91_MDL(mb)); | ||
512 | *(u32 *)(cf->data + 4) = at91_read(priv, AT91_MDH(mb)); | ||
513 | |||
514 | if (unlikely(mb == AT91_MB_RX_LAST && reg_msr & AT91_MSR_MMI)) | ||
515 | at91_rx_overflow_err(dev); | ||
516 | } | ||
517 | |||
518 | /** | ||
519 | * at91_read_msg - read CAN message from mailbox | ||
520 | * @dev: net device | ||
521 | * @mb: mail box to read from | ||
522 | * | ||
523 | * Reads a CAN message from given mailbox, and put into linux network | ||
524 | * RX queue, does all housekeeping chores (stats, ...) | ||
525 | */ | ||
526 | static void at91_read_msg(struct net_device *dev, unsigned int mb) | ||
527 | { | ||
528 | struct net_device_stats *stats = &dev->stats; | ||
529 | struct can_frame *cf; | ||
530 | struct sk_buff *skb; | ||
531 | |||
532 | skb = alloc_can_skb(dev, &cf); | ||
533 | if (unlikely(!skb)) { | ||
534 | stats->rx_dropped++; | ||
535 | return; | ||
536 | } | ||
537 | |||
538 | at91_read_mb(dev, mb, cf); | ||
539 | netif_receive_skb(skb); | ||
540 | |||
541 | stats->rx_packets++; | ||
542 | stats->rx_bytes += cf->can_dlc; | ||
543 | } | ||
544 | |||
545 | /** | ||
546 | * at91_poll_rx - read multiple CAN messages from mailboxes | ||
547 | * @dev: net device | ||
548 | * @quota: max number of pkgs we're allowed to receive | ||
549 | * | ||
550 | * Theory of Operation: | ||
551 | * | ||
552 | * 12 of the 16 mailboxes on the chip are reserved for RX. we split | ||
553 | * them into 2 groups. The lower group holds 8 and upper 4 mailboxes. | ||
554 | * | ||
555 | * Like it or not, but the chip always saves a received CAN message | ||
556 | * into the first free mailbox it finds (starting with the | ||
557 | * lowest). This makes it very difficult to read the messages in the | ||
558 | * right order from the chip. This is how we work around that problem: | ||
559 | * | ||
560 | * The first message goes into mb nr. 0 and issues an interrupt. All | ||
561 | * rx ints are disabled in the interrupt handler and a napi poll is | ||
562 | * scheduled. We read the mailbox, but do _not_ reenable the mb (to | ||
563 | * receive another message). | ||
564 | * | ||
565 | * lower mbxs upper | ||
566 | * ______^______ __^__ | ||
567 | * / \ / \ | ||
568 | * +-+-+-+-+-+-+-+-++-+-+-+-+ | ||
569 | * |x|x|x|x|x|x|x|x|| | | | | | ||
570 | * +-+-+-+-+-+-+-+-++-+-+-+-+ | ||
571 | * 0 0 0 0 0 0 0 0 0 0 1 1 \ mail | ||
572 | * 0 1 2 3 4 5 6 7 8 9 0 1 / box | ||
573 | * | ||
574 | * The variable priv->rx_next points to the next mailbox to read a | ||
575 | * message from. As long we're in the lower mailboxes we just read the | ||
576 | * mailbox but not reenable it. | ||
577 | * | ||
578 | * With completion of the last of the lower mailboxes, we reenable the | ||
579 | * whole first group, but continue to look for filled mailboxes in the | ||
580 | * upper mailboxes. Imagine the second group like overflow mailboxes, | ||
581 | * which takes CAN messages if the lower goup is full. While in the | ||
582 | * upper group we reenable the mailbox right after reading it. Giving | ||
583 | * the chip more room to store messages. | ||
584 | * | ||
585 | * After finishing we look again in the lower group if we've still | ||
586 | * quota. | ||
587 | * | ||
588 | */ | ||
589 | static int at91_poll_rx(struct net_device *dev, int quota) | ||
590 | { | ||
591 | struct at91_priv *priv = netdev_priv(dev); | ||
592 | u32 reg_sr = at91_read(priv, AT91_SR); | ||
593 | const unsigned long *addr = (unsigned long *)®_sr; | ||
594 | unsigned int mb; | ||
595 | int received = 0; | ||
596 | |||
597 | if (priv->rx_next > AT91_MB_RX_LOW_LAST && | ||
598 | reg_sr & AT91_MB_RX_LOW_MASK) | ||
599 | dev_info(dev->dev.parent, | ||
600 | "order of incoming frames cannot be guaranteed\n"); | ||
601 | |||
602 | again: | ||
603 | for (mb = find_next_bit(addr, AT91_MB_RX_NUM, priv->rx_next); | ||
604 | mb < AT91_MB_RX_NUM && quota > 0; | ||
605 | reg_sr = at91_read(priv, AT91_SR), | ||
606 | mb = find_next_bit(addr, AT91_MB_RX_NUM, ++priv->rx_next)) { | ||
607 | at91_read_msg(dev, mb); | ||
608 | |||
609 | /* reactivate mailboxes */ | ||
610 | if (mb == AT91_MB_RX_LOW_LAST) | ||
611 | /* all lower mailboxed, if just finished it */ | ||
612 | at91_activate_rx_low(priv); | ||
613 | else if (mb > AT91_MB_RX_LOW_LAST) | ||
614 | /* only the mailbox we read */ | ||
615 | at91_activate_rx_mb(priv, mb); | ||
616 | |||
617 | received++; | ||
618 | quota--; | ||
619 | } | ||
620 | |||
621 | /* upper group completed, look again in lower */ | ||
622 | if (priv->rx_next > AT91_MB_RX_LOW_LAST && | ||
623 | quota > 0 && mb >= AT91_MB_RX_NUM) { | ||
624 | priv->rx_next = 0; | ||
625 | goto again; | ||
626 | } | ||
627 | |||
628 | return received; | ||
629 | } | ||
630 | |||
631 | static void at91_poll_err_frame(struct net_device *dev, | ||
632 | struct can_frame *cf, u32 reg_sr) | ||
633 | { | ||
634 | struct at91_priv *priv = netdev_priv(dev); | ||
635 | |||
636 | /* CRC error */ | ||
637 | if (reg_sr & AT91_IRQ_CERR) { | ||
638 | dev_dbg(dev->dev.parent, "CERR irq\n"); | ||
639 | dev->stats.rx_errors++; | ||
640 | priv->can.can_stats.bus_error++; | ||
641 | cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR; | ||
642 | } | ||
643 | |||
644 | /* Stuffing Error */ | ||
645 | if (reg_sr & AT91_IRQ_SERR) { | ||
646 | dev_dbg(dev->dev.parent, "SERR irq\n"); | ||
647 | dev->stats.rx_errors++; | ||
648 | priv->can.can_stats.bus_error++; | ||
649 | cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR; | ||
650 | cf->data[2] |= CAN_ERR_PROT_STUFF; | ||
651 | } | ||
652 | |||
653 | /* Acknowledgement Error */ | ||
654 | if (reg_sr & AT91_IRQ_AERR) { | ||
655 | dev_dbg(dev->dev.parent, "AERR irq\n"); | ||
656 | dev->stats.tx_errors++; | ||
657 | cf->can_id |= CAN_ERR_ACK; | ||
658 | } | ||
659 | |||
660 | /* Form error */ | ||
661 | if (reg_sr & AT91_IRQ_FERR) { | ||
662 | dev_dbg(dev->dev.parent, "FERR irq\n"); | ||
663 | dev->stats.rx_errors++; | ||
664 | priv->can.can_stats.bus_error++; | ||
665 | cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR; | ||
666 | cf->data[2] |= CAN_ERR_PROT_FORM; | ||
667 | } | ||
668 | |||
669 | /* Bit Error */ | ||
670 | if (reg_sr & AT91_IRQ_BERR) { | ||
671 | dev_dbg(dev->dev.parent, "BERR irq\n"); | ||
672 | dev->stats.tx_errors++; | ||
673 | priv->can.can_stats.bus_error++; | ||
674 | cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR; | ||
675 | cf->data[2] |= CAN_ERR_PROT_BIT; | ||
676 | } | ||
677 | } | ||
678 | |||
679 | static int at91_poll_err(struct net_device *dev, int quota, u32 reg_sr) | ||
680 | { | ||
681 | struct sk_buff *skb; | ||
682 | struct can_frame *cf; | ||
683 | |||
684 | if (quota == 0) | ||
685 | return 0; | ||
686 | |||
687 | skb = alloc_can_err_skb(dev, &cf); | ||
688 | if (unlikely(!skb)) | ||
689 | return 0; | ||
690 | |||
691 | at91_poll_err_frame(dev, cf, reg_sr); | ||
692 | netif_receive_skb(skb); | ||
693 | |||
694 | dev->last_rx = jiffies; | ||
695 | dev->stats.rx_packets++; | ||
696 | dev->stats.rx_bytes += cf->can_dlc; | ||
697 | |||
698 | return 1; | ||
699 | } | ||
700 | |||
701 | static int at91_poll(struct napi_struct *napi, int quota) | ||
702 | { | ||
703 | struct net_device *dev = napi->dev; | ||
704 | const struct at91_priv *priv = netdev_priv(dev); | ||
705 | u32 reg_sr = at91_read(priv, AT91_SR); | ||
706 | int work_done = 0; | ||
707 | |||
708 | if (reg_sr & AT91_IRQ_MB_RX) | ||
709 | work_done += at91_poll_rx(dev, quota - work_done); | ||
710 | |||
711 | /* | ||
712 | * The error bits are clear on read, | ||
713 | * so use saved value from irq handler. | ||
714 | */ | ||
715 | reg_sr |= priv->reg_sr; | ||
716 | if (reg_sr & AT91_IRQ_ERR_FRAME) | ||
717 | work_done += at91_poll_err(dev, quota - work_done, reg_sr); | ||
718 | |||
719 | if (work_done < quota) { | ||
720 | /* enable IRQs for frame errors and all mailboxes >= rx_next */ | ||
721 | u32 reg_ier = AT91_IRQ_ERR_FRAME; | ||
722 | reg_ier |= AT91_IRQ_MB_RX & ~AT91_MB_RX_MASK(priv->rx_next); | ||
723 | |||
724 | napi_complete(napi); | ||
725 | at91_write(priv, AT91_IER, reg_ier); | ||
726 | } | ||
727 | |||
728 | return work_done; | ||
729 | } | ||
730 | |||
731 | /* | ||
732 | * theory of operation: | ||
733 | * | ||
734 | * priv->tx_echo holds the number of the oldest can_frame put for | ||
735 | * transmission into the hardware, but not yet ACKed by the CAN tx | ||
736 | * complete IRQ. | ||
737 | * | ||
738 | * We iterate from priv->tx_echo to priv->tx_next and check if the | ||
739 | * packet has been transmitted, echo it back to the CAN framework. If | ||
740 | * we discover a not yet transmitted package, stop looking for more. | ||
741 | * | ||
742 | */ | ||
743 | static void at91_irq_tx(struct net_device *dev, u32 reg_sr) | ||
744 | { | ||
745 | struct at91_priv *priv = netdev_priv(dev); | ||
746 | u32 reg_msr; | ||
747 | unsigned int mb; | ||
748 | |||
749 | /* masking of reg_sr not needed, already done by at91_irq */ | ||
750 | |||
751 | for (/* nix */; (priv->tx_next - priv->tx_echo) > 0; priv->tx_echo++) { | ||
752 | mb = get_tx_echo_mb(priv); | ||
753 | |||
754 | /* no event in mailbox? */ | ||
755 | if (!(reg_sr & (1 << mb))) | ||
756 | break; | ||
757 | |||
758 | /* Disable irq for this TX mailbox */ | ||
759 | at91_write(priv, AT91_IDR, 1 << mb); | ||
760 | |||
761 | /* | ||
762 | * only echo if mailbox signals us a transfer | ||
763 | * complete (MSR_MRDY). Otherwise it's a tansfer | ||
764 | * abort. "can_bus_off()" takes care about the skbs | ||
765 | * parked in the echo queue. | ||
766 | */ | ||
767 | reg_msr = at91_read(priv, AT91_MSR(mb)); | ||
768 | if (likely(reg_msr & AT91_MSR_MRDY && | ||
769 | ~reg_msr & AT91_MSR_MABT)) { | ||
770 | /* _NOTE_: substract AT91_MB_TX_FIRST offset from mb! */ | ||
771 | can_get_echo_skb(dev, mb - AT91_MB_TX_FIRST); | ||
772 | dev->stats.tx_packets++; | ||
773 | } | ||
774 | } | ||
775 | |||
776 | /* | ||
777 | * restart queue if we don't have a wrap around but restart if | ||
778 | * we get a TX int for the last can frame directly before a | ||
779 | * wrap around. | ||
780 | */ | ||
781 | if ((priv->tx_next & AT91_NEXT_MASK) != 0 || | ||
782 | (priv->tx_echo & AT91_NEXT_MASK) == 0) | ||
783 | netif_wake_queue(dev); | ||
784 | } | ||
785 | |||
786 | static void at91_irq_err_state(struct net_device *dev, | ||
787 | struct can_frame *cf, enum can_state new_state) | ||
788 | { | ||
789 | struct at91_priv *priv = netdev_priv(dev); | ||
790 | u32 reg_idr, reg_ier, reg_ecr; | ||
791 | u8 tec, rec; | ||
792 | |||
793 | reg_ecr = at91_read(priv, AT91_ECR); | ||
794 | rec = reg_ecr & 0xff; | ||
795 | tec = reg_ecr >> 16; | ||
796 | |||
797 | switch (priv->can.state) { | ||
798 | case CAN_STATE_ERROR_ACTIVE: | ||
799 | /* | ||
800 | * from: ERROR_ACTIVE | ||
801 | * to : ERROR_WARNING, ERROR_PASSIVE, BUS_OFF | ||
802 | * => : there was a warning int | ||
803 | */ | ||
804 | if (new_state >= CAN_STATE_ERROR_WARNING && | ||
805 | new_state <= CAN_STATE_BUS_OFF) { | ||
806 | dev_dbg(dev->dev.parent, "Error Warning IRQ\n"); | ||
807 | priv->can.can_stats.error_warning++; | ||
808 | |||
809 | cf->can_id |= CAN_ERR_CRTL; | ||
810 | cf->data[1] = (tec > rec) ? | ||
811 | CAN_ERR_CRTL_TX_WARNING : | ||
812 | CAN_ERR_CRTL_RX_WARNING; | ||
813 | } | ||
814 | case CAN_STATE_ERROR_WARNING: /* fallthrough */ | ||
815 | /* | ||
816 | * from: ERROR_ACTIVE, ERROR_WARNING | ||
817 | * to : ERROR_PASSIVE, BUS_OFF | ||
818 | * => : error passive int | ||
819 | */ | ||
820 | if (new_state >= CAN_STATE_ERROR_PASSIVE && | ||
821 | new_state <= CAN_STATE_BUS_OFF) { | ||
822 | dev_dbg(dev->dev.parent, "Error Passive IRQ\n"); | ||
823 | priv->can.can_stats.error_passive++; | ||
824 | |||
825 | cf->can_id |= CAN_ERR_CRTL; | ||
826 | cf->data[1] = (tec > rec) ? | ||
827 | CAN_ERR_CRTL_TX_PASSIVE : | ||
828 | CAN_ERR_CRTL_RX_PASSIVE; | ||
829 | } | ||
830 | break; | ||
831 | case CAN_STATE_BUS_OFF: | ||
832 | /* | ||
833 | * from: BUS_OFF | ||
834 | * to : ERROR_ACTIVE, ERROR_WARNING, ERROR_PASSIVE | ||
835 | */ | ||
836 | if (new_state <= CAN_STATE_ERROR_PASSIVE) { | ||
837 | cf->can_id |= CAN_ERR_RESTARTED; | ||
838 | |||
839 | dev_dbg(dev->dev.parent, "restarted\n"); | ||
840 | priv->can.can_stats.restarts++; | ||
841 | |||
842 | netif_carrier_on(dev); | ||
843 | netif_wake_queue(dev); | ||
844 | } | ||
845 | break; | ||
846 | default: | ||
847 | break; | ||
848 | } | ||
849 | |||
850 | |||
851 | /* process state changes depending on the new state */ | ||
852 | switch (new_state) { | ||
853 | case CAN_STATE_ERROR_ACTIVE: | ||
854 | /* | ||
855 | * actually we want to enable AT91_IRQ_WARN here, but | ||
856 | * it screws up the system under certain | ||
857 | * circumstances. so just enable AT91_IRQ_ERRP, thus | ||
858 | * the "fallthrough" | ||
859 | */ | ||
860 | dev_dbg(dev->dev.parent, "Error Active\n"); | ||
861 | cf->can_id |= CAN_ERR_PROT; | ||
862 | cf->data[2] = CAN_ERR_PROT_ACTIVE; | ||
863 | case CAN_STATE_ERROR_WARNING: /* fallthrough */ | ||
864 | reg_idr = AT91_IRQ_ERRA | AT91_IRQ_WARN | AT91_IRQ_BOFF; | ||
865 | reg_ier = AT91_IRQ_ERRP; | ||
866 | break; | ||
867 | case CAN_STATE_ERROR_PASSIVE: | ||
868 | reg_idr = AT91_IRQ_ERRA | AT91_IRQ_WARN | AT91_IRQ_ERRP; | ||
869 | reg_ier = AT91_IRQ_BOFF; | ||
870 | break; | ||
871 | case CAN_STATE_BUS_OFF: | ||
872 | reg_idr = AT91_IRQ_ERRA | AT91_IRQ_ERRP | | ||
873 | AT91_IRQ_WARN | AT91_IRQ_BOFF; | ||
874 | reg_ier = 0; | ||
875 | |||
876 | cf->can_id |= CAN_ERR_BUSOFF; | ||
877 | |||
878 | dev_dbg(dev->dev.parent, "bus-off\n"); | ||
879 | netif_carrier_off(dev); | ||
880 | priv->can.can_stats.bus_off++; | ||
881 | |||
882 | /* turn off chip, if restart is disabled */ | ||
883 | if (!priv->can.restart_ms) { | ||
884 | at91_chip_stop(dev, CAN_STATE_BUS_OFF); | ||
885 | return; | ||
886 | } | ||
887 | break; | ||
888 | default: | ||
889 | break; | ||
890 | } | ||
891 | |||
892 | at91_write(priv, AT91_IDR, reg_idr); | ||
893 | at91_write(priv, AT91_IER, reg_ier); | ||
894 | } | ||
895 | |||
896 | static void at91_irq_err(struct net_device *dev) | ||
897 | { | ||
898 | struct at91_priv *priv = netdev_priv(dev); | ||
899 | struct sk_buff *skb; | ||
900 | struct can_frame *cf; | ||
901 | enum can_state new_state; | ||
902 | u32 reg_sr; | ||
903 | |||
904 | reg_sr = at91_read(priv, AT91_SR); | ||
905 | |||
906 | /* we need to look at the unmasked reg_sr */ | ||
907 | if (unlikely(reg_sr & AT91_IRQ_BOFF)) | ||
908 | new_state = CAN_STATE_BUS_OFF; | ||
909 | else if (unlikely(reg_sr & AT91_IRQ_ERRP)) | ||
910 | new_state = CAN_STATE_ERROR_PASSIVE; | ||
911 | else if (unlikely(reg_sr & AT91_IRQ_WARN)) | ||
912 | new_state = CAN_STATE_ERROR_WARNING; | ||
913 | else if (likely(reg_sr & AT91_IRQ_ERRA)) | ||
914 | new_state = CAN_STATE_ERROR_ACTIVE; | ||
915 | else { | ||
916 | dev_err(dev->dev.parent, "BUG! hardware in undefined state\n"); | ||
917 | return; | ||
918 | } | ||
919 | |||
920 | /* state hasn't changed */ | ||
921 | if (likely(new_state == priv->can.state)) | ||
922 | return; | ||
923 | |||
924 | skb = alloc_can_err_skb(dev, &cf); | ||
925 | if (unlikely(!skb)) | ||
926 | return; | ||
927 | |||
928 | at91_irq_err_state(dev, cf, new_state); | ||
929 | netif_rx(skb); | ||
930 | |||
931 | dev->last_rx = jiffies; | ||
932 | dev->stats.rx_packets++; | ||
933 | dev->stats.rx_bytes += cf->can_dlc; | ||
934 | |||
935 | priv->can.state = new_state; | ||
936 | } | ||
937 | |||
938 | /* | ||
939 | * interrupt handler | ||
940 | */ | ||
941 | static irqreturn_t at91_irq(int irq, void *dev_id) | ||
942 | { | ||
943 | struct net_device *dev = dev_id; | ||
944 | struct at91_priv *priv = netdev_priv(dev); | ||
945 | irqreturn_t handled = IRQ_NONE; | ||
946 | u32 reg_sr, reg_imr; | ||
947 | |||
948 | reg_sr = at91_read(priv, AT91_SR); | ||
949 | reg_imr = at91_read(priv, AT91_IMR); | ||
950 | |||
951 | /* Ignore masked interrupts */ | ||
952 | reg_sr &= reg_imr; | ||
953 | if (!reg_sr) | ||
954 | goto exit; | ||
955 | |||
956 | handled = IRQ_HANDLED; | ||
957 | |||
958 | /* Receive or error interrupt? -> napi */ | ||
959 | if (reg_sr & (AT91_IRQ_MB_RX | AT91_IRQ_ERR_FRAME)) { | ||
960 | /* | ||
961 | * The error bits are clear on read, | ||
962 | * save for later use. | ||
963 | */ | ||
964 | priv->reg_sr = reg_sr; | ||
965 | at91_write(priv, AT91_IDR, | ||
966 | AT91_IRQ_MB_RX | AT91_IRQ_ERR_FRAME); | ||
967 | napi_schedule(&priv->napi); | ||
968 | } | ||
969 | |||
970 | /* Transmission complete interrupt */ | ||
971 | if (reg_sr & AT91_IRQ_MB_TX) | ||
972 | at91_irq_tx(dev, reg_sr); | ||
973 | |||
974 | at91_irq_err(dev); | ||
975 | |||
976 | exit: | ||
977 | return handled; | ||
978 | } | ||
979 | |||
980 | static int at91_open(struct net_device *dev) | ||
981 | { | ||
982 | struct at91_priv *priv = netdev_priv(dev); | ||
983 | int err; | ||
984 | |||
985 | clk_enable(priv->clk); | ||
986 | |||
987 | /* check or determine and set bittime */ | ||
988 | err = open_candev(dev); | ||
989 | if (err) | ||
990 | goto out; | ||
991 | |||
992 | /* register interrupt handler */ | ||
993 | if (request_irq(dev->irq, at91_irq, IRQF_SHARED, | ||
994 | dev->name, dev)) { | ||
995 | err = -EAGAIN; | ||
996 | goto out_close; | ||
997 | } | ||
998 | |||
999 | /* start chip and queuing */ | ||
1000 | at91_chip_start(dev); | ||
1001 | napi_enable(&priv->napi); | ||
1002 | netif_start_queue(dev); | ||
1003 | |||
1004 | return 0; | ||
1005 | |||
1006 | out_close: | ||
1007 | close_candev(dev); | ||
1008 | out: | ||
1009 | clk_disable(priv->clk); | ||
1010 | |||
1011 | return err; | ||
1012 | } | ||
1013 | |||
1014 | /* | ||
1015 | * stop CAN bus activity | ||
1016 | */ | ||
1017 | static int at91_close(struct net_device *dev) | ||
1018 | { | ||
1019 | struct at91_priv *priv = netdev_priv(dev); | ||
1020 | |||
1021 | netif_stop_queue(dev); | ||
1022 | napi_disable(&priv->napi); | ||
1023 | at91_chip_stop(dev, CAN_STATE_STOPPED); | ||
1024 | |||
1025 | free_irq(dev->irq, dev); | ||
1026 | clk_disable(priv->clk); | ||
1027 | |||
1028 | close_candev(dev); | ||
1029 | |||
1030 | return 0; | ||
1031 | } | ||
1032 | |||
1033 | static int at91_set_mode(struct net_device *dev, enum can_mode mode) | ||
1034 | { | ||
1035 | switch (mode) { | ||
1036 | case CAN_MODE_START: | ||
1037 | at91_chip_start(dev); | ||
1038 | netif_wake_queue(dev); | ||
1039 | break; | ||
1040 | |||
1041 | default: | ||
1042 | return -EOPNOTSUPP; | ||
1043 | } | ||
1044 | |||
1045 | return 0; | ||
1046 | } | ||
1047 | |||
1048 | static const struct net_device_ops at91_netdev_ops = { | ||
1049 | .ndo_open = at91_open, | ||
1050 | .ndo_stop = at91_close, | ||
1051 | .ndo_start_xmit = at91_start_xmit, | ||
1052 | }; | ||
1053 | |||
1054 | static int __init at91_can_probe(struct platform_device *pdev) | ||
1055 | { | ||
1056 | struct net_device *dev; | ||
1057 | struct at91_priv *priv; | ||
1058 | struct resource *res; | ||
1059 | struct clk *clk; | ||
1060 | void __iomem *addr; | ||
1061 | int err, irq; | ||
1062 | |||
1063 | clk = clk_get(&pdev->dev, "can_clk"); | ||
1064 | if (IS_ERR(clk)) { | ||
1065 | dev_err(&pdev->dev, "no clock defined\n"); | ||
1066 | err = -ENODEV; | ||
1067 | goto exit; | ||
1068 | } | ||
1069 | |||
1070 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
1071 | irq = platform_get_irq(pdev, 0); | ||
1072 | if (!res || !irq) { | ||
1073 | err = -ENODEV; | ||
1074 | goto exit_put; | ||
1075 | } | ||
1076 | |||
1077 | if (!request_mem_region(res->start, | ||
1078 | resource_size(res), | ||
1079 | pdev->name)) { | ||
1080 | err = -EBUSY; | ||
1081 | goto exit_put; | ||
1082 | } | ||
1083 | |||
1084 | addr = ioremap_nocache(res->start, resource_size(res)); | ||
1085 | if (!addr) { | ||
1086 | err = -ENOMEM; | ||
1087 | goto exit_release; | ||
1088 | } | ||
1089 | |||
1090 | dev = alloc_candev(sizeof(struct at91_priv)); | ||
1091 | if (!dev) { | ||
1092 | err = -ENOMEM; | ||
1093 | goto exit_iounmap; | ||
1094 | } | ||
1095 | |||
1096 | dev->netdev_ops = &at91_netdev_ops; | ||
1097 | dev->irq = irq; | ||
1098 | dev->flags |= IFF_ECHO; | ||
1099 | |||
1100 | priv = netdev_priv(dev); | ||
1101 | priv->can.clock.freq = clk_get_rate(clk); | ||
1102 | priv->can.bittiming_const = &at91_bittiming_const; | ||
1103 | priv->can.do_set_bittiming = at91_set_bittiming; | ||
1104 | priv->can.do_set_mode = at91_set_mode; | ||
1105 | priv->reg_base = addr; | ||
1106 | priv->dev = dev; | ||
1107 | priv->clk = clk; | ||
1108 | priv->pdata = pdev->dev.platform_data; | ||
1109 | |||
1110 | netif_napi_add(dev, &priv->napi, at91_poll, AT91_NAPI_WEIGHT); | ||
1111 | |||
1112 | dev_set_drvdata(&pdev->dev, dev); | ||
1113 | SET_NETDEV_DEV(dev, &pdev->dev); | ||
1114 | |||
1115 | err = register_candev(dev); | ||
1116 | if (err) { | ||
1117 | dev_err(&pdev->dev, "registering netdev failed\n"); | ||
1118 | goto exit_free; | ||
1119 | } | ||
1120 | |||
1121 | dev_info(&pdev->dev, "device registered (reg_base=%p, irq=%d)\n", | ||
1122 | priv->reg_base, dev->irq); | ||
1123 | |||
1124 | return 0; | ||
1125 | |||
1126 | exit_free: | ||
1127 | free_netdev(dev); | ||
1128 | exit_iounmap: | ||
1129 | iounmap(addr); | ||
1130 | exit_release: | ||
1131 | release_mem_region(res->start, resource_size(res)); | ||
1132 | exit_put: | ||
1133 | clk_put(clk); | ||
1134 | exit: | ||
1135 | return err; | ||
1136 | } | ||
1137 | |||
1138 | static int __devexit at91_can_remove(struct platform_device *pdev) | ||
1139 | { | ||
1140 | struct net_device *dev = platform_get_drvdata(pdev); | ||
1141 | struct at91_priv *priv = netdev_priv(dev); | ||
1142 | struct resource *res; | ||
1143 | |||
1144 | unregister_netdev(dev); | ||
1145 | |||
1146 | platform_set_drvdata(pdev, NULL); | ||
1147 | |||
1148 | free_netdev(dev); | ||
1149 | |||
1150 | iounmap(priv->reg_base); | ||
1151 | |||
1152 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
1153 | release_mem_region(res->start, resource_size(res)); | ||
1154 | |||
1155 | clk_put(priv->clk); | ||
1156 | |||
1157 | return 0; | ||
1158 | } | ||
1159 | |||
1160 | static struct platform_driver at91_can_driver = { | ||
1161 | .probe = at91_can_probe, | ||
1162 | .remove = __devexit_p(at91_can_remove), | ||
1163 | .driver = { | ||
1164 | .name = DRV_NAME, | ||
1165 | .owner = THIS_MODULE, | ||
1166 | }, | ||
1167 | }; | ||
1168 | |||
1169 | static int __init at91_can_module_init(void) | ||
1170 | { | ||
1171 | printk(KERN_INFO "%s netdevice driver\n", DRV_NAME); | ||
1172 | return platform_driver_register(&at91_can_driver); | ||
1173 | } | ||
1174 | |||
1175 | static void __exit at91_can_module_exit(void) | ||
1176 | { | ||
1177 | platform_driver_unregister(&at91_can_driver); | ||
1178 | printk(KERN_INFO "%s: driver removed\n", DRV_NAME); | ||
1179 | } | ||
1180 | |||
1181 | module_init(at91_can_module_init); | ||
1182 | module_exit(at91_can_module_exit); | ||
1183 | |||
1184 | MODULE_AUTHOR("Marc Kleine-Budde <mkl@pengutronix.de>"); | ||
1185 | MODULE_LICENSE("GPL v2"); | ||
1186 | MODULE_DESCRIPTION(DRV_NAME " CAN netdevice driver"); | ||