aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/can
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2011-06-07 04:03:56 -0400
committerDavid S. Miller <davem@davemloft.net>2011-06-07 04:03:56 -0400
commitcb2bad862e66eafbc97c26011ab7ab6cedc82810 (patch)
treeb3c0d31062642421c3d5af17a61b6f89fa3d6816 /drivers/net/can
parenta6b7a407865aab9f849dd99a71072b7cd1175116 (diff)
parentb9e379bccda6913f7baece42cd249824c0758b97 (diff)
Merge branch 'can/at91-sam9x5' of git://git.pengutronix.de/git/mkl/linux-2.6
Diffstat (limited to 'drivers/net/can')
-rw-r--r--drivers/net/can/Kconfig5
-rw-r--r--drivers/net/can/at91_can.c366
2 files changed, 259 insertions, 112 deletions
diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig
index 1d699e3df547..bbf06f77ee8c 100644
--- a/drivers/net/can/Kconfig
+++ b/drivers/net/can/Kconfig
@@ -58,9 +58,10 @@ config CAN_CALC_BITTIMING
58 58
59config CAN_AT91 59config CAN_AT91
60 tristate "Atmel AT91 onchip CAN controller" 60 tristate "Atmel AT91 onchip CAN controller"
61 depends on CAN_DEV && ARCH_AT91SAM9263 61 depends on CAN_DEV && (ARCH_AT91SAM9263 || ARCH_AT91SAM9X5)
62 ---help--- 62 ---help---
63 This is a driver for the SoC CAN controller in Atmel's AT91SAM9263. 63 This is a driver for the SoC CAN controller in Atmel's AT91SAM9263
64 and AT91SAM9X5 processors.
64 65
65config CAN_TI_HECC 66config CAN_TI_HECC
66 depends on CAN_DEV && ARCH_OMAP3 67 depends on CAN_DEV && ARCH_OMAP3
diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c
index 74efb5a2ad41..121ede663e20 100644
--- a/drivers/net/can/at91_can.c
+++ b/drivers/net/can/at91_can.c
@@ -41,32 +41,7 @@
41 41
42#include <mach/board.h> 42#include <mach/board.h>
43 43
44#define AT91_NAPI_WEIGHT 11 44#define AT91_MB_MASK(i) ((1 << (i)) - 1)
45
46/*
47 * RX/TX Mailbox split
48 * don't dare to touch
49 */
50#define AT91_MB_RX_NUM 11
51#define AT91_MB_TX_SHIFT 2
52
53#define AT91_MB_RX_FIRST 1
54#define AT91_MB_RX_LAST (AT91_MB_RX_FIRST + AT91_MB_RX_NUM - 1)
55
56#define AT91_MB_RX_MASK(i) ((1 << (i)) - 1)
57#define AT91_MB_RX_SPLIT 8
58#define AT91_MB_RX_LOW_LAST (AT91_MB_RX_SPLIT - 1)
59#define AT91_MB_RX_LOW_MASK (AT91_MB_RX_MASK(AT91_MB_RX_SPLIT) & \
60 ~AT91_MB_RX_MASK(AT91_MB_RX_FIRST))
61
62#define AT91_MB_TX_NUM (1 << AT91_MB_TX_SHIFT)
63#define AT91_MB_TX_FIRST (AT91_MB_RX_LAST + 1)
64#define AT91_MB_TX_LAST (AT91_MB_TX_FIRST + AT91_MB_TX_NUM - 1)
65
66#define AT91_NEXT_PRIO_SHIFT (AT91_MB_TX_SHIFT)
67#define AT91_NEXT_PRIO_MASK (0xf << AT91_MB_TX_SHIFT)
68#define AT91_NEXT_MB_MASK (AT91_MB_TX_NUM - 1)
69#define AT91_NEXT_MASK ((AT91_MB_TX_NUM - 1) | AT91_NEXT_PRIO_MASK)
70 45
71/* Common registers */ 46/* Common registers */
72enum at91_reg { 47enum at91_reg {
@@ -128,12 +103,6 @@ enum at91_mb_mode {
128}; 103};
129 104
130/* Interrupt mask bits */ 105/* Interrupt mask bits */
131#define AT91_IRQ_MB_RX ((1 << (AT91_MB_RX_LAST + 1)) \
132 - (1 << AT91_MB_RX_FIRST))
133#define AT91_IRQ_MB_TX ((1 << (AT91_MB_TX_LAST + 1)) \
134 - (1 << AT91_MB_TX_FIRST))
135#define AT91_IRQ_MB_ALL (AT91_IRQ_MB_RX | AT91_IRQ_MB_TX)
136
137#define AT91_IRQ_ERRA (1 << 16) 106#define AT91_IRQ_ERRA (1 << 16)
138#define AT91_IRQ_WARN (1 << 17) 107#define AT91_IRQ_WARN (1 << 17)
139#define AT91_IRQ_ERRP (1 << 18) 108#define AT91_IRQ_ERRP (1 << 18)
@@ -156,22 +125,51 @@ enum at91_mb_mode {
156 125
157#define AT91_IRQ_ALL (0x1fffffff) 126#define AT91_IRQ_ALL (0x1fffffff)
158 127
128enum at91_devtype {
129 AT91_DEVTYPE_SAM9263,
130 AT91_DEVTYPE_SAM9X5,
131};
132
133struct at91_devtype_data {
134 unsigned int rx_first;
135 unsigned int rx_split;
136 unsigned int rx_last;
137 unsigned int tx_shift;
138 enum at91_devtype type;
139};
140
159struct at91_priv { 141struct at91_priv {
160 struct can_priv can; /* must be the first member! */ 142 struct can_priv can; /* must be the first member! */
161 struct net_device *dev; 143 struct net_device *dev;
162 struct napi_struct napi; 144 struct napi_struct napi;
163 145
164 void __iomem *reg_base; 146 void __iomem *reg_base;
165 147
166 u32 reg_sr; 148 u32 reg_sr;
167 unsigned int tx_next; 149 unsigned int tx_next;
168 unsigned int tx_echo; 150 unsigned int tx_echo;
169 unsigned int rx_next; 151 unsigned int rx_next;
152 struct at91_devtype_data devtype_data;
170 153
171 struct clk *clk; 154 struct clk *clk;
172 struct at91_can_data *pdata; 155 struct at91_can_data *pdata;
173 156
174 canid_t mb0_id; 157 canid_t mb0_id;
158};
159
160static const struct at91_devtype_data at91_devtype_data[] __devinitconst = {
161 [AT91_DEVTYPE_SAM9263] = {
162 .rx_first = 1,
163 .rx_split = 8,
164 .rx_last = 11,
165 .tx_shift = 2,
166 },
167 [AT91_DEVTYPE_SAM9X5] = {
168 .rx_first = 0,
169 .rx_split = 4,
170 .rx_last = 5,
171 .tx_shift = 1,
172 },
175}; 173};
176 174
177static struct can_bittiming_const at91_bittiming_const = { 175static struct can_bittiming_const at91_bittiming_const = {
@@ -186,19 +184,111 @@ static struct can_bittiming_const at91_bittiming_const = {
186 .brp_inc = 1, 184 .brp_inc = 1,
187}; 185};
188 186
189static inline int get_tx_next_mb(const struct at91_priv *priv) 187#define AT91_IS(_model) \
188static inline int at91_is_sam##_model(const struct at91_priv *priv) \
189{ \
190 return priv->devtype_data.type == AT91_DEVTYPE_SAM##_model; \
191}
192
193AT91_IS(9263);
194AT91_IS(9X5);
195
196static inline unsigned int get_mb_rx_first(const struct at91_priv *priv)
197{
198 return priv->devtype_data.rx_first;
199}
200
201static inline unsigned int get_mb_rx_last(const struct at91_priv *priv)
202{
203 return priv->devtype_data.rx_last;
204}
205
206static inline unsigned int get_mb_rx_split(const struct at91_priv *priv)
207{
208 return priv->devtype_data.rx_split;
209}
210
211static inline unsigned int get_mb_rx_num(const struct at91_priv *priv)
212{
213 return get_mb_rx_last(priv) - get_mb_rx_first(priv) + 1;
214}
215
216static inline unsigned int get_mb_rx_low_last(const struct at91_priv *priv)
217{
218 return get_mb_rx_split(priv) - 1;
219}
220
221static inline unsigned int get_mb_rx_low_mask(const struct at91_priv *priv)
222{
223 return AT91_MB_MASK(get_mb_rx_split(priv)) &
224 ~AT91_MB_MASK(get_mb_rx_first(priv));
225}
226
227static inline unsigned int get_mb_tx_shift(const struct at91_priv *priv)
228{
229 return priv->devtype_data.tx_shift;
230}
231
232static inline unsigned int get_mb_tx_num(const struct at91_priv *priv)
233{
234 return 1 << get_mb_tx_shift(priv);
235}
236
237static inline unsigned int get_mb_tx_first(const struct at91_priv *priv)
238{
239 return get_mb_rx_last(priv) + 1;
240}
241
242static inline unsigned int get_mb_tx_last(const struct at91_priv *priv)
243{
244 return get_mb_tx_first(priv) + get_mb_tx_num(priv) - 1;
245}
246
247static inline unsigned int get_next_prio_shift(const struct at91_priv *priv)
248{
249 return get_mb_tx_shift(priv);
250}
251
252static inline unsigned int get_next_prio_mask(const struct at91_priv *priv)
253{
254 return 0xf << get_mb_tx_shift(priv);
255}
256
257static inline unsigned int get_next_mb_mask(const struct at91_priv *priv)
258{
259 return AT91_MB_MASK(get_mb_tx_shift(priv));
260}
261
262static inline unsigned int get_next_mask(const struct at91_priv *priv)
263{
264 return get_next_mb_mask(priv) | get_next_prio_mask(priv);
265}
266
267static inline unsigned int get_irq_mb_rx(const struct at91_priv *priv)
190{ 268{
191 return (priv->tx_next & AT91_NEXT_MB_MASK) + AT91_MB_TX_FIRST; 269 return AT91_MB_MASK(get_mb_rx_last(priv) + 1) &
270 ~AT91_MB_MASK(get_mb_rx_first(priv));
192} 271}
193 272
194static inline int get_tx_next_prio(const struct at91_priv *priv) 273static inline unsigned int get_irq_mb_tx(const struct at91_priv *priv)
195{ 274{
196 return (priv->tx_next >> AT91_NEXT_PRIO_SHIFT) & 0xf; 275 return AT91_MB_MASK(get_mb_tx_last(priv) + 1) &
276 ~AT91_MB_MASK(get_mb_tx_first(priv));
197} 277}
198 278
199static inline int get_tx_echo_mb(const struct at91_priv *priv) 279static inline unsigned int get_tx_next_mb(const struct at91_priv *priv)
200{ 280{
201 return (priv->tx_echo & AT91_NEXT_MB_MASK) + AT91_MB_TX_FIRST; 281 return (priv->tx_next & get_next_mb_mask(priv)) + get_mb_tx_first(priv);
282}
283
284static inline unsigned int get_tx_next_prio(const struct at91_priv *priv)
285{
286 return (priv->tx_next >> get_next_prio_shift(priv)) & 0xf;
287}
288
289static inline unsigned int get_tx_echo_mb(const struct at91_priv *priv)
290{
291 return (priv->tx_echo & get_next_mb_mask(priv)) + get_mb_tx_first(priv);
202} 292}
203 293
204static inline u32 at91_read(const struct at91_priv *priv, enum at91_reg reg) 294static inline u32 at91_read(const struct at91_priv *priv, enum at91_reg reg)
@@ -259,29 +349,29 @@ static void at91_setup_mailboxes(struct net_device *dev)
259 * overflow. 349 * overflow.
260 */ 350 */
261 reg_mid = at91_can_id_to_reg_mid(priv->mb0_id); 351 reg_mid = at91_can_id_to_reg_mid(priv->mb0_id);
262 for (i = 0; i < AT91_MB_RX_FIRST; i++) { 352 for (i = 0; i < get_mb_rx_first(priv); i++) {
263 set_mb_mode(priv, i, AT91_MB_MODE_DISABLED); 353 set_mb_mode(priv, i, AT91_MB_MODE_DISABLED);
264 at91_write(priv, AT91_MID(i), reg_mid); 354 at91_write(priv, AT91_MID(i), reg_mid);
265 at91_write(priv, AT91_MCR(i), 0x0); /* clear dlc */ 355 at91_write(priv, AT91_MCR(i), 0x0); /* clear dlc */
266 } 356 }
267 357
268 for (i = AT91_MB_RX_FIRST; i < AT91_MB_RX_LAST; i++) 358 for (i = get_mb_rx_first(priv); i < get_mb_rx_last(priv); i++)
269 set_mb_mode(priv, i, AT91_MB_MODE_RX); 359 set_mb_mode(priv, i, AT91_MB_MODE_RX);
270 set_mb_mode(priv, AT91_MB_RX_LAST, AT91_MB_MODE_RX_OVRWR); 360 set_mb_mode(priv, get_mb_rx_last(priv), AT91_MB_MODE_RX_OVRWR);
271 361
272 /* reset acceptance mask and id register */ 362 /* reset acceptance mask and id register */
273 for (i = AT91_MB_RX_FIRST; i <= AT91_MB_RX_LAST; i++) { 363 for (i = get_mb_rx_first(priv); i <= get_mb_rx_last(priv); i++) {
274 at91_write(priv, AT91_MAM(i), 0x0 ); 364 at91_write(priv, AT91_MAM(i), 0x0);
275 at91_write(priv, AT91_MID(i), AT91_MID_MIDE); 365 at91_write(priv, AT91_MID(i), AT91_MID_MIDE);
276 } 366 }
277 367
278 /* The last 4 mailboxes are used for transmitting. */ 368 /* The last 4 mailboxes are used for transmitting. */
279 for (i = AT91_MB_TX_FIRST; i <= AT91_MB_TX_LAST; i++) 369 for (i = get_mb_tx_first(priv); i <= get_mb_tx_last(priv); i++)
280 set_mb_mode_prio(priv, i, AT91_MB_MODE_TX, 0); 370 set_mb_mode_prio(priv, i, AT91_MB_MODE_TX, 0);
281 371
282 /* Reset tx and rx helper pointers */ 372 /* Reset tx and rx helper pointers */
283 priv->tx_next = priv->tx_echo = 0; 373 priv->tx_next = priv->tx_echo = 0;
284 priv->rx_next = AT91_MB_RX_FIRST; 374 priv->rx_next = get_mb_rx_first(priv);
285} 375}
286 376
287static int at91_set_bittiming(struct net_device *dev) 377static int at91_set_bittiming(struct net_device *dev)
@@ -336,7 +426,7 @@ static void at91_chip_start(struct net_device *dev)
336 priv->can.state = CAN_STATE_ERROR_ACTIVE; 426 priv->can.state = CAN_STATE_ERROR_ACTIVE;
337 427
338 /* Enable interrupts */ 428 /* Enable interrupts */
339 reg_ier = AT91_IRQ_MB_RX | AT91_IRQ_ERRP | AT91_IRQ_ERR_FRAME; 429 reg_ier = get_irq_mb_rx(priv) | AT91_IRQ_ERRP | AT91_IRQ_ERR_FRAME;
340 at91_write(priv, AT91_IDR, AT91_IRQ_ALL); 430 at91_write(priv, AT91_IDR, AT91_IRQ_ALL);
341 at91_write(priv, AT91_IER, reg_ier); 431 at91_write(priv, AT91_IER, reg_ier);
342} 432}
@@ -375,8 +465,8 @@ static void at91_chip_stop(struct net_device *dev, enum can_state state)
375 * mailbox, but without the offset AT91_MB_TX_FIRST. The lower bits 465 * mailbox, but without the offset AT91_MB_TX_FIRST. The lower bits
376 * encode the mailbox number, the upper 4 bits the mailbox priority: 466 * encode the mailbox number, the upper 4 bits the mailbox priority:
377 * 467 *
378 * priv->tx_next = (prio << AT91_NEXT_PRIO_SHIFT) || 468 * priv->tx_next = (prio << get_next_prio_shift(priv)) |
379 * (mb - AT91_MB_TX_FIRST); 469 * (mb - get_mb_tx_first(priv));
380 * 470 *
381 */ 471 */
382static netdev_tx_t at91_start_xmit(struct sk_buff *skb, struct net_device *dev) 472static netdev_tx_t at91_start_xmit(struct sk_buff *skb, struct net_device *dev)
@@ -417,7 +507,7 @@ static netdev_tx_t at91_start_xmit(struct sk_buff *skb, struct net_device *dev)
417 stats->tx_bytes += cf->can_dlc; 507 stats->tx_bytes += cf->can_dlc;
418 508
419 /* _NOTE_: subtract AT91_MB_TX_FIRST offset from mb! */ 509 /* _NOTE_: subtract AT91_MB_TX_FIRST offset from mb! */
420 can_put_echo_skb(skb, dev, mb - AT91_MB_TX_FIRST); 510 can_put_echo_skb(skb, dev, mb - get_mb_tx_first(priv));
421 511
422 /* 512 /*
423 * we have to stop the queue and deliver all messages in case 513 * we have to stop the queue and deliver all messages in case
@@ -430,7 +520,7 @@ static netdev_tx_t at91_start_xmit(struct sk_buff *skb, struct net_device *dev)
430 priv->tx_next++; 520 priv->tx_next++;
431 if (!(at91_read(priv, AT91_MSR(get_tx_next_mb(priv))) & 521 if (!(at91_read(priv, AT91_MSR(get_tx_next_mb(priv))) &
432 AT91_MSR_MRDY) || 522 AT91_MSR_MRDY) ||
433 (priv->tx_next & AT91_NEXT_MASK) == 0) 523 (priv->tx_next & get_next_mask(priv)) == 0)
434 netif_stop_queue(dev); 524 netif_stop_queue(dev);
435 525
436 /* Enable interrupt for this mailbox */ 526 /* Enable interrupt for this mailbox */
@@ -447,7 +537,7 @@ static netdev_tx_t at91_start_xmit(struct sk_buff *skb, struct net_device *dev)
447 */ 537 */
448static inline void at91_activate_rx_low(const struct at91_priv *priv) 538static inline void at91_activate_rx_low(const struct at91_priv *priv)
449{ 539{
450 u32 mask = AT91_MB_RX_LOW_MASK; 540 u32 mask = get_mb_rx_low_mask(priv);
451 at91_write(priv, AT91_TCR, mask); 541 at91_write(priv, AT91_TCR, mask);
452} 542}
453 543
@@ -513,17 +603,19 @@ static void at91_read_mb(struct net_device *dev, unsigned int mb,
513 cf->can_id = (reg_mid >> 18) & CAN_SFF_MASK; 603 cf->can_id = (reg_mid >> 18) & CAN_SFF_MASK;
514 604
515 reg_msr = at91_read(priv, AT91_MSR(mb)); 605 reg_msr = at91_read(priv, AT91_MSR(mb));
516 if (reg_msr & AT91_MSR_MRTR)
517 cf->can_id |= CAN_RTR_FLAG;
518 cf->can_dlc = get_can_dlc((reg_msr >> 16) & 0xf); 606 cf->can_dlc = get_can_dlc((reg_msr >> 16) & 0xf);
519 607
520 *(u32 *)(cf->data + 0) = at91_read(priv, AT91_MDL(mb)); 608 if (reg_msr & AT91_MSR_MRTR)
521 *(u32 *)(cf->data + 4) = at91_read(priv, AT91_MDH(mb)); 609 cf->can_id |= CAN_RTR_FLAG;
610 else {
611 *(u32 *)(cf->data + 0) = at91_read(priv, AT91_MDL(mb));
612 *(u32 *)(cf->data + 4) = at91_read(priv, AT91_MDH(mb));
613 }
522 614
523 /* allow RX of extended frames */ 615 /* allow RX of extended frames */
524 at91_write(priv, AT91_MID(mb), AT91_MID_MIDE); 616 at91_write(priv, AT91_MID(mb), AT91_MID_MIDE);
525 617
526 if (unlikely(mb == AT91_MB_RX_LAST && reg_msr & AT91_MSR_MMI)) 618 if (unlikely(mb == get_mb_rx_last(priv) && reg_msr & AT91_MSR_MMI))
527 at91_rx_overflow_err(dev); 619 at91_rx_overflow_err(dev);
528} 620}
529 621
@@ -561,8 +653,9 @@ static void at91_read_msg(struct net_device *dev, unsigned int mb)
561 * 653 *
562 * Theory of Operation: 654 * Theory of Operation:
563 * 655 *
564 * 11 of the 16 mailboxes on the chip are reserved for RX. we split 656 * About 3/4 of the mailboxes (get_mb_rx_first()...get_mb_rx_last())
565 * them into 2 groups. The lower group holds 7 and upper 4 mailboxes. 657 * on the chip are reserved for RX. We split them into 2 groups. The
658 * lower group ranges from get_mb_rx_first() to get_mb_rx_low_last().
566 * 659 *
567 * Like it or not, but the chip always saves a received CAN message 660 * Like it or not, but the chip always saves a received CAN message
568 * into the first free mailbox it finds (starting with the 661 * into the first free mailbox it finds (starting with the
@@ -610,23 +703,23 @@ static int at91_poll_rx(struct net_device *dev, int quota)
610 unsigned int mb; 703 unsigned int mb;
611 int received = 0; 704 int received = 0;
612 705
613 if (priv->rx_next > AT91_MB_RX_LOW_LAST && 706 if (priv->rx_next > get_mb_rx_low_last(priv) &&
614 reg_sr & AT91_MB_RX_LOW_MASK) 707 reg_sr & get_mb_rx_low_mask(priv))
615 netdev_info(dev, 708 netdev_info(dev,
616 "order of incoming frames cannot be guaranteed\n"); 709 "order of incoming frames cannot be guaranteed\n");
617 710
618 again: 711 again:
619 for (mb = find_next_bit(addr, AT91_MB_RX_LAST + 1, priv->rx_next); 712 for (mb = find_next_bit(addr, get_mb_tx_first(priv), priv->rx_next);
620 mb < AT91_MB_RX_LAST + 1 && quota > 0; 713 mb < get_mb_tx_first(priv) && quota > 0;
621 reg_sr = at91_read(priv, AT91_SR), 714 reg_sr = at91_read(priv, AT91_SR),
622 mb = find_next_bit(addr, AT91_MB_RX_LAST + 1, ++priv->rx_next)) { 715 mb = find_next_bit(addr, get_mb_tx_first(priv), ++priv->rx_next)) {
623 at91_read_msg(dev, mb); 716 at91_read_msg(dev, mb);
624 717
625 /* reactivate mailboxes */ 718 /* reactivate mailboxes */
626 if (mb == AT91_MB_RX_LOW_LAST) 719 if (mb == get_mb_rx_low_last(priv))
627 /* all lower mailboxed, if just finished it */ 720 /* all lower mailboxed, if just finished it */
628 at91_activate_rx_low(priv); 721 at91_activate_rx_low(priv);
629 else if (mb > AT91_MB_RX_LOW_LAST) 722 else if (mb > get_mb_rx_low_last(priv))
630 /* only the mailbox we read */ 723 /* only the mailbox we read */
631 at91_activate_rx_mb(priv, mb); 724 at91_activate_rx_mb(priv, mb);
632 725
@@ -635,9 +728,9 @@ static int at91_poll_rx(struct net_device *dev, int quota)
635 } 728 }
636 729
637 /* upper group completed, look again in lower */ 730 /* upper group completed, look again in lower */
638 if (priv->rx_next > AT91_MB_RX_LOW_LAST && 731 if (priv->rx_next > get_mb_rx_low_last(priv) &&
639 quota > 0 && mb > AT91_MB_RX_LAST) { 732 quota > 0 && mb > get_mb_rx_last(priv)) {
640 priv->rx_next = AT91_MB_RX_FIRST; 733 priv->rx_next = get_mb_rx_first(priv);
641 goto again; 734 goto again;
642 } 735 }
643 736
@@ -720,7 +813,7 @@ static int at91_poll(struct napi_struct *napi, int quota)
720 u32 reg_sr = at91_read(priv, AT91_SR); 813 u32 reg_sr = at91_read(priv, AT91_SR);
721 int work_done = 0; 814 int work_done = 0;
722 815
723 if (reg_sr & AT91_IRQ_MB_RX) 816 if (reg_sr & get_irq_mb_rx(priv))
724 work_done += at91_poll_rx(dev, quota - work_done); 817 work_done += at91_poll_rx(dev, quota - work_done);
725 818
726 /* 819 /*
@@ -734,7 +827,7 @@ static int at91_poll(struct napi_struct *napi, int quota)
734 if (work_done < quota) { 827 if (work_done < quota) {
735 /* enable IRQs for frame errors and all mailboxes >= rx_next */ 828 /* enable IRQs for frame errors and all mailboxes >= rx_next */
736 u32 reg_ier = AT91_IRQ_ERR_FRAME; 829 u32 reg_ier = AT91_IRQ_ERR_FRAME;
737 reg_ier |= AT91_IRQ_MB_RX & ~AT91_MB_RX_MASK(priv->rx_next); 830 reg_ier |= get_irq_mb_rx(priv) & ~AT91_MB_MASK(priv->rx_next);
738 831
739 napi_complete(napi); 832 napi_complete(napi);
740 at91_write(priv, AT91_IER, reg_ier); 833 at91_write(priv, AT91_IER, reg_ier);
@@ -783,7 +876,7 @@ static void at91_irq_tx(struct net_device *dev, u32 reg_sr)
783 if (likely(reg_msr & AT91_MSR_MRDY && 876 if (likely(reg_msr & AT91_MSR_MRDY &&
784 ~reg_msr & AT91_MSR_MABT)) { 877 ~reg_msr & AT91_MSR_MABT)) {
785 /* _NOTE_: subtract AT91_MB_TX_FIRST offset from mb! */ 878 /* _NOTE_: subtract AT91_MB_TX_FIRST offset from mb! */
786 can_get_echo_skb(dev, mb - AT91_MB_TX_FIRST); 879 can_get_echo_skb(dev, mb - get_mb_tx_first(priv));
787 dev->stats.tx_packets++; 880 dev->stats.tx_packets++;
788 } 881 }
789 } 882 }
@@ -793,8 +886,8 @@ static void at91_irq_tx(struct net_device *dev, u32 reg_sr)
793 * we get a TX int for the last can frame directly before a 886 * we get a TX int for the last can frame directly before a
794 * wrap around. 887 * wrap around.
795 */ 888 */
796 if ((priv->tx_next & AT91_NEXT_MASK) != 0 || 889 if ((priv->tx_next & get_next_mask(priv)) != 0 ||
797 (priv->tx_echo & AT91_NEXT_MASK) == 0) 890 (priv->tx_echo & get_next_mask(priv)) == 0)
798 netif_wake_queue(dev); 891 netif_wake_queue(dev);
799} 892}
800 893
@@ -906,6 +999,29 @@ static void at91_irq_err_state(struct net_device *dev,
906 at91_write(priv, AT91_IER, reg_ier); 999 at91_write(priv, AT91_IER, reg_ier);
907} 1000}
908 1001
1002static int at91_get_state_by_bec(const struct net_device *dev,
1003 enum can_state *state)
1004{
1005 struct can_berr_counter bec;
1006 int err;
1007
1008 err = at91_get_berr_counter(dev, &bec);
1009 if (err)
1010 return err;
1011
1012 if (bec.txerr < 96 && bec.rxerr < 96)
1013 *state = CAN_STATE_ERROR_ACTIVE;
1014 else if (bec.txerr < 128 && bec.rxerr < 128)
1015 *state = CAN_STATE_ERROR_WARNING;
1016 else if (bec.txerr < 256 && bec.rxerr < 256)
1017 *state = CAN_STATE_ERROR_PASSIVE;
1018 else
1019 *state = CAN_STATE_BUS_OFF;
1020
1021 return 0;
1022}
1023
1024
909static void at91_irq_err(struct net_device *dev) 1025static void at91_irq_err(struct net_device *dev)
910{ 1026{
911 struct at91_priv *priv = netdev_priv(dev); 1027 struct at91_priv *priv = netdev_priv(dev);
@@ -913,21 +1029,28 @@ static void at91_irq_err(struct net_device *dev)
913 struct can_frame *cf; 1029 struct can_frame *cf;
914 enum can_state new_state; 1030 enum can_state new_state;
915 u32 reg_sr; 1031 u32 reg_sr;
1032 int err;
916 1033
917 reg_sr = at91_read(priv, AT91_SR); 1034 if (at91_is_sam9263(priv)) {
918 1035 reg_sr = at91_read(priv, AT91_SR);
919 /* we need to look at the unmasked reg_sr */ 1036
920 if (unlikely(reg_sr & AT91_IRQ_BOFF)) 1037 /* we need to look at the unmasked reg_sr */
921 new_state = CAN_STATE_BUS_OFF; 1038 if (unlikely(reg_sr & AT91_IRQ_BOFF))
922 else if (unlikely(reg_sr & AT91_IRQ_ERRP)) 1039 new_state = CAN_STATE_BUS_OFF;
923 new_state = CAN_STATE_ERROR_PASSIVE; 1040 else if (unlikely(reg_sr & AT91_IRQ_ERRP))
924 else if (unlikely(reg_sr & AT91_IRQ_WARN)) 1041 new_state = CAN_STATE_ERROR_PASSIVE;
925 new_state = CAN_STATE_ERROR_WARNING; 1042 else if (unlikely(reg_sr & AT91_IRQ_WARN))
926 else if (likely(reg_sr & AT91_IRQ_ERRA)) 1043 new_state = CAN_STATE_ERROR_WARNING;
927 new_state = CAN_STATE_ERROR_ACTIVE; 1044 else if (likely(reg_sr & AT91_IRQ_ERRA))
928 else { 1045 new_state = CAN_STATE_ERROR_ACTIVE;
929 netdev_err(dev, "BUG! hardware in undefined state\n"); 1046 else {
930 return; 1047 netdev_err(dev, "BUG! hardware in undefined state\n");
1048 return;
1049 }
1050 } else {
1051 err = at91_get_state_by_bec(dev, &new_state);
1052 if (err)
1053 return;
931 } 1054 }
932 1055
933 /* state hasn't changed */ 1056 /* state hasn't changed */
@@ -968,19 +1091,19 @@ static irqreturn_t at91_irq(int irq, void *dev_id)
968 handled = IRQ_HANDLED; 1091 handled = IRQ_HANDLED;
969 1092
970 /* Receive or error interrupt? -> napi */ 1093 /* Receive or error interrupt? -> napi */
971 if (reg_sr & (AT91_IRQ_MB_RX | AT91_IRQ_ERR_FRAME)) { 1094 if (reg_sr & (get_irq_mb_rx(priv) | AT91_IRQ_ERR_FRAME)) {
972 /* 1095 /*
973 * The error bits are clear on read, 1096 * The error bits are clear on read,
974 * save for later use. 1097 * save for later use.
975 */ 1098 */
976 priv->reg_sr = reg_sr; 1099 priv->reg_sr = reg_sr;
977 at91_write(priv, AT91_IDR, 1100 at91_write(priv, AT91_IDR,
978 AT91_IRQ_MB_RX | AT91_IRQ_ERR_FRAME); 1101 get_irq_mb_rx(priv) | AT91_IRQ_ERR_FRAME);
979 napi_schedule(&priv->napi); 1102 napi_schedule(&priv->napi);
980 } 1103 }
981 1104
982 /* Transmission complete interrupt */ 1105 /* Transmission complete interrupt */
983 if (reg_sr & AT91_IRQ_MB_TX) 1106 if (reg_sr & get_irq_mb_tx(priv))
984 at91_irq_tx(dev, reg_sr); 1107 at91_irq_tx(dev, reg_sr);
985 1108
986 at91_irq_err(dev); 1109 at91_irq_err(dev);
@@ -1123,6 +1246,8 @@ static struct attribute_group at91_sysfs_attr_group = {
1123 1246
1124static int __devinit at91_can_probe(struct platform_device *pdev) 1247static int __devinit at91_can_probe(struct platform_device *pdev)
1125{ 1248{
1249 const struct at91_devtype_data *devtype_data;
1250 enum at91_devtype devtype;
1126 struct net_device *dev; 1251 struct net_device *dev;
1127 struct at91_priv *priv; 1252 struct at91_priv *priv;
1128 struct resource *res; 1253 struct resource *res;
@@ -1130,6 +1255,9 @@ static int __devinit at91_can_probe(struct platform_device *pdev)
1130 void __iomem *addr; 1255 void __iomem *addr;
1131 int err, irq; 1256 int err, irq;
1132 1257
1258 devtype = pdev->id_entry->driver_data;
1259 devtype_data = &at91_devtype_data[devtype];
1260
1133 clk = clk_get(&pdev->dev, "can_clk"); 1261 clk = clk_get(&pdev->dev, "can_clk");
1134 if (IS_ERR(clk)) { 1262 if (IS_ERR(clk)) {
1135 dev_err(&pdev->dev, "no clock defined\n"); 1263 dev_err(&pdev->dev, "no clock defined\n");
@@ -1157,7 +1285,8 @@ static int __devinit at91_can_probe(struct platform_device *pdev)
1157 goto exit_release; 1285 goto exit_release;
1158 } 1286 }
1159 1287
1160 dev = alloc_candev(sizeof(struct at91_priv), AT91_MB_TX_NUM); 1288 dev = alloc_candev(sizeof(struct at91_priv),
1289 1 << devtype_data->tx_shift);
1161 if (!dev) { 1290 if (!dev) {
1162 err = -ENOMEM; 1291 err = -ENOMEM;
1163 goto exit_iounmap; 1292 goto exit_iounmap;
@@ -1166,7 +1295,6 @@ static int __devinit at91_can_probe(struct platform_device *pdev)
1166 dev->netdev_ops = &at91_netdev_ops; 1295 dev->netdev_ops = &at91_netdev_ops;
1167 dev->irq = irq; 1296 dev->irq = irq;
1168 dev->flags |= IFF_ECHO; 1297 dev->flags |= IFF_ECHO;
1169 dev->sysfs_groups[0] = &at91_sysfs_attr_group;
1170 1298
1171 priv = netdev_priv(dev); 1299 priv = netdev_priv(dev);
1172 priv->can.clock.freq = clk_get_rate(clk); 1300 priv->can.clock.freq = clk_get_rate(clk);
@@ -1174,13 +1302,18 @@ static int __devinit at91_can_probe(struct platform_device *pdev)
1174 priv->can.do_set_mode = at91_set_mode; 1302 priv->can.do_set_mode = at91_set_mode;
1175 priv->can.do_get_berr_counter = at91_get_berr_counter; 1303 priv->can.do_get_berr_counter = at91_get_berr_counter;
1176 priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES; 1304 priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES;
1177 priv->reg_base = addr;
1178 priv->dev = dev; 1305 priv->dev = dev;
1306 priv->reg_base = addr;
1307 priv->devtype_data = *devtype_data;
1308 priv->devtype_data.type = devtype;
1179 priv->clk = clk; 1309 priv->clk = clk;
1180 priv->pdata = pdev->dev.platform_data; 1310 priv->pdata = pdev->dev.platform_data;
1181 priv->mb0_id = 0x7ff; 1311 priv->mb0_id = 0x7ff;
1182 1312
1183 netif_napi_add(dev, &priv->napi, at91_poll, AT91_NAPI_WEIGHT); 1313 netif_napi_add(dev, &priv->napi, at91_poll, get_mb_rx_num(priv));
1314
1315 if (at91_is_sam9263(priv))
1316 dev->sysfs_groups[0] = &at91_sysfs_attr_group;
1184 1317
1185 dev_set_drvdata(&pdev->dev, dev); 1318 dev_set_drvdata(&pdev->dev, dev);
1186 SET_NETDEV_DEV(dev, &pdev->dev); 1319 SET_NETDEV_DEV(dev, &pdev->dev);
@@ -1230,13 +1363,26 @@ static int __devexit at91_can_remove(struct platform_device *pdev)
1230 return 0; 1363 return 0;
1231} 1364}
1232 1365
1366static const struct platform_device_id at91_can_id_table[] = {
1367 {
1368 .name = "at91_can",
1369 .driver_data = AT91_DEVTYPE_SAM9263,
1370 }, {
1371 .name = "at91sam9x5_can",
1372 .driver_data = AT91_DEVTYPE_SAM9X5,
1373 }, {
1374 /* sentinel */
1375 }
1376};
1377
1233static struct platform_driver at91_can_driver = { 1378static struct platform_driver at91_can_driver = {
1234 .probe = at91_can_probe, 1379 .probe = at91_can_probe,
1235 .remove = __devexit_p(at91_can_remove), 1380 .remove = __devexit_p(at91_can_remove),
1236 .driver = { 1381 .driver = {
1237 .name = KBUILD_MODNAME, 1382 .name = KBUILD_MODNAME,
1238 .owner = THIS_MODULE, 1383 .owner = THIS_MODULE,
1239 }, 1384 },
1385 .id_table = at91_can_id_table,
1240}; 1386};
1241 1387
1242static int __init at91_can_module_init(void) 1388static int __init at91_can_module_init(void)