aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/can
diff options
context:
space:
mode:
authorMarc Kleine-Budde <mkl@pengutronix.de>2011-05-03 11:31:40 -0400
committerMarc Kleine-Budde <mkl@pengutronix.de>2011-06-06 05:02:17 -0400
commit79008997e232ad1d871bb6fedfb7fbd77ea95af8 (patch)
tree7be3b22fa90ea906398a25cfee6b240630e48463 /drivers/net/can
parentb049994d0f3a19c1706627117a7269ce5bd335ab (diff)
can: at91_can: convert derived mailbox constants into functions
This is the first of two patches converting the at91_can driver from a compile time mailbox setup to a dynamic one. This patch converts all derived mailbox constants to functions. Signed-off-by: Marc Kleine-Budde <mkl@pengutronix.de>
Diffstat (limited to 'drivers/net/can')
-rw-r--r--drivers/net/can/at91_can.c125
1 files changed, 83 insertions, 42 deletions
diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c
index 86994845a1d4..900ff67ed7f3 100644
--- a/drivers/net/can/at91_can.c
+++ b/drivers/net/can/at91_can.c
@@ -54,18 +54,7 @@
54 54
55#define AT91_MB_MASK(i) ((1 << (i)) - 1) 55#define AT91_MB_MASK(i) ((1 << (i)) - 1)
56#define AT91_MB_RX_SPLIT 8 56#define AT91_MB_RX_SPLIT 8
57#define AT91_MB_RX_LOW_LAST (AT91_MB_RX_SPLIT - 1)
58#define AT91_MB_RX_LOW_MASK (AT91_MB_MASK(AT91_MB_RX_SPLIT) & \
59 ~AT91_MB_MASK(AT91_MB_RX_FIRST))
60 57
61#define AT91_MB_TX_NUM (1 << AT91_MB_TX_SHIFT)
62#define AT91_MB_TX_FIRST (AT91_MB_RX_LAST + 1)
63#define AT91_MB_TX_LAST (AT91_MB_TX_FIRST + AT91_MB_TX_NUM - 1)
64
65#define AT91_NEXT_PRIO_SHIFT (AT91_MB_TX_SHIFT)
66#define AT91_NEXT_PRIO_MASK (0xf << AT91_MB_TX_SHIFT)
67#define AT91_NEXT_MB_MASK (AT91_MB_MASK(AT91_MB_TX_SHIFT))
68#define AT91_NEXT_MASK ((AT91_MB_TX_NUM - 1) | AT91_NEXT_PRIO_MASK)
69 58
70/* Common registers */ 59/* Common registers */
71enum at91_reg { 60enum at91_reg {
@@ -127,12 +116,6 @@ enum at91_mb_mode {
127}; 116};
128 117
129/* Interrupt mask bits */ 118/* Interrupt mask bits */
130#define AT91_IRQ_MB_RX (AT91_MB_MASK(AT91_MB_RX_LAST + 1) & \
131 ~AT91_MB_MASK(AT91_MB_RX_FIRST))
132#define AT91_IRQ_MB_TX (AT91_MB_MASK(AT91_MB_TX_LAST + 1) & \
133 ~AT91_MB_MASK(AT91_MB_TX_FIRST))
134#define AT91_IRQ_MB_ALL (AT91_IRQ_MB_RX | AT91_IRQ_MB_TX)
135
136#define AT91_IRQ_ERRA (1 << 16) 119#define AT91_IRQ_ERRA (1 << 16)
137#define AT91_IRQ_WARN (1 << 17) 120#define AT91_IRQ_WARN (1 << 17)
138#define AT91_IRQ_ERRP (1 << 18) 121#define AT91_IRQ_ERRP (1 << 18)
@@ -185,19 +168,77 @@ static struct can_bittiming_const at91_bittiming_const = {
185 .brp_inc = 1, 168 .brp_inc = 1,
186}; 169};
187 170
171static inline unsigned int get_mb_rx_low_last(const struct at91_priv *priv)
172{
173 return AT91_MB_RX_SPLIT - 1;
174}
175
176static inline unsigned int get_mb_rx_low_mask(const struct at91_priv *priv)
177{
178 return AT91_MB_MASK(AT91_MB_RX_SPLIT) &
179 ~AT91_MB_MASK(AT91_MB_RX_FIRST);
180}
181
182static inline unsigned int get_mb_tx_num(const struct at91_priv *priv)
183{
184 return 1 << AT91_MB_TX_SHIFT;
185}
186
187static inline unsigned int get_mb_tx_first(const struct at91_priv *priv)
188{
189 return AT91_MB_RX_LAST + 1;
190}
191
192static inline unsigned int get_mb_tx_last(const struct at91_priv *priv)
193{
194 return get_mb_tx_first(priv) + get_mb_tx_num(priv) - 1;
195}
196
197static inline unsigned int get_next_prio_shift(const struct at91_priv *priv)
198{
199 return AT91_MB_TX_SHIFT;
200}
201
202static inline unsigned int get_next_prio_mask(const struct at91_priv *priv)
203{
204 return 0xf << AT91_MB_TX_SHIFT;
205}
206
207static inline unsigned int get_next_mb_mask(const struct at91_priv *priv)
208{
209 return AT91_MB_MASK(AT91_MB_TX_SHIFT);
210}
211
212static inline unsigned int get_next_mask(const struct at91_priv *priv)
213{
214 return get_next_mb_mask(priv) | get_next_prio_mask(priv);
215}
216
217static inline unsigned int get_irq_mb_rx(const struct at91_priv *priv)
218{
219 return AT91_MB_MASK(AT91_MB_RX_LAST + 1) &
220 ~AT91_MB_MASK(AT91_MB_RX_FIRST);
221}
222
223static inline unsigned int get_irq_mb_tx(const struct at91_priv *priv)
224{
225 return AT91_MB_MASK(get_mb_tx_last(priv) + 1) &
226 ~AT91_MB_MASK(get_mb_tx_first(priv));
227}
228
188static inline unsigned int get_tx_next_mb(const struct at91_priv *priv) 229static inline unsigned int get_tx_next_mb(const struct at91_priv *priv)
189{ 230{
190 return (priv->tx_next & AT91_NEXT_MB_MASK) + AT91_MB_TX_FIRST; 231 return (priv->tx_next & get_next_mb_mask(priv)) + get_mb_tx_first(priv);
191} 232}
192 233
193static inline unsigned int get_tx_next_prio(const struct at91_priv *priv) 234static inline unsigned int get_tx_next_prio(const struct at91_priv *priv)
194{ 235{
195 return (priv->tx_next >> AT91_NEXT_PRIO_SHIFT) & 0xf; 236 return (priv->tx_next >> get_next_prio_shift(priv)) & 0xf;
196} 237}
197 238
198static inline unsigned int get_tx_echo_mb(const struct at91_priv *priv) 239static inline unsigned int get_tx_echo_mb(const struct at91_priv *priv)
199{ 240{
200 return (priv->tx_echo & AT91_NEXT_MB_MASK) + AT91_MB_TX_FIRST; 241 return (priv->tx_echo & get_next_mb_mask(priv)) + get_mb_tx_first(priv);
201} 242}
202 243
203static inline u32 at91_read(const struct at91_priv *priv, enum at91_reg reg) 244static inline u32 at91_read(const struct at91_priv *priv, enum at91_reg reg)
@@ -275,7 +316,7 @@ static void at91_setup_mailboxes(struct net_device *dev)
275 } 316 }
276 317
277 /* The last 4 mailboxes are used for transmitting. */ 318 /* The last 4 mailboxes are used for transmitting. */
278 for (i = AT91_MB_TX_FIRST; i <= AT91_MB_TX_LAST; i++) 319 for (i = get_mb_tx_first(priv); i <= get_mb_tx_last(priv); i++)
279 set_mb_mode_prio(priv, i, AT91_MB_MODE_TX, 0); 320 set_mb_mode_prio(priv, i, AT91_MB_MODE_TX, 0);
280 321
281 /* Reset tx and rx helper pointers */ 322 /* Reset tx and rx helper pointers */
@@ -335,7 +376,7 @@ static void at91_chip_start(struct net_device *dev)
335 priv->can.state = CAN_STATE_ERROR_ACTIVE; 376 priv->can.state = CAN_STATE_ERROR_ACTIVE;
336 377
337 /* Enable interrupts */ 378 /* Enable interrupts */
338 reg_ier = AT91_IRQ_MB_RX | AT91_IRQ_ERRP | AT91_IRQ_ERR_FRAME; 379 reg_ier = get_irq_mb_rx(priv) | AT91_IRQ_ERRP | AT91_IRQ_ERR_FRAME;
339 at91_write(priv, AT91_IDR, AT91_IRQ_ALL); 380 at91_write(priv, AT91_IDR, AT91_IRQ_ALL);
340 at91_write(priv, AT91_IER, reg_ier); 381 at91_write(priv, AT91_IER, reg_ier);
341} 382}
@@ -416,7 +457,7 @@ static netdev_tx_t at91_start_xmit(struct sk_buff *skb, struct net_device *dev)
416 stats->tx_bytes += cf->can_dlc; 457 stats->tx_bytes += cf->can_dlc;
417 458
418 /* _NOTE_: subtract AT91_MB_TX_FIRST offset from mb! */ 459 /* _NOTE_: subtract AT91_MB_TX_FIRST offset from mb! */
419 can_put_echo_skb(skb, dev, mb - AT91_MB_TX_FIRST); 460 can_put_echo_skb(skb, dev, mb - get_mb_tx_first(priv));
420 461
421 /* 462 /*
422 * we have to stop the queue and deliver all messages in case 463 * we have to stop the queue and deliver all messages in case
@@ -429,7 +470,7 @@ static netdev_tx_t at91_start_xmit(struct sk_buff *skb, struct net_device *dev)
429 priv->tx_next++; 470 priv->tx_next++;
430 if (!(at91_read(priv, AT91_MSR(get_tx_next_mb(priv))) & 471 if (!(at91_read(priv, AT91_MSR(get_tx_next_mb(priv))) &
431 AT91_MSR_MRDY) || 472 AT91_MSR_MRDY) ||
432 (priv->tx_next & AT91_NEXT_MASK) == 0) 473 (priv->tx_next & get_next_mask(priv)) == 0)
433 netif_stop_queue(dev); 474 netif_stop_queue(dev);
434 475
435 /* Enable interrupt for this mailbox */ 476 /* Enable interrupt for this mailbox */
@@ -446,7 +487,7 @@ static netdev_tx_t at91_start_xmit(struct sk_buff *skb, struct net_device *dev)
446 */ 487 */
447static inline void at91_activate_rx_low(const struct at91_priv *priv) 488static inline void at91_activate_rx_low(const struct at91_priv *priv)
448{ 489{
449 u32 mask = AT91_MB_RX_LOW_MASK; 490 u32 mask = get_mb_rx_low_mask(priv);
450 at91_write(priv, AT91_TCR, mask); 491 at91_write(priv, AT91_TCR, mask);
451} 492}
452 493
@@ -611,23 +652,23 @@ static int at91_poll_rx(struct net_device *dev, int quota)
611 unsigned int mb; 652 unsigned int mb;
612 int received = 0; 653 int received = 0;
613 654
614 if (priv->rx_next > AT91_MB_RX_LOW_LAST && 655 if (priv->rx_next > get_mb_rx_low_last(priv) &&
615 reg_sr & AT91_MB_RX_LOW_MASK) 656 reg_sr & get_mb_rx_low_mask(priv))
616 netdev_info(dev, 657 netdev_info(dev,
617 "order of incoming frames cannot be guaranteed\n"); 658 "order of incoming frames cannot be guaranteed\n");
618 659
619 again: 660 again:
620 for (mb = find_next_bit(addr, AT91_MB_RX_LAST + 1, priv->rx_next); 661 for (mb = find_next_bit(addr, get_mb_tx_first(priv), priv->rx_next);
621 mb < AT91_MB_RX_LAST + 1 && quota > 0; 662 mb < get_mb_tx_first(priv) && quota > 0;
622 reg_sr = at91_read(priv, AT91_SR), 663 reg_sr = at91_read(priv, AT91_SR),
623 mb = find_next_bit(addr, AT91_MB_RX_LAST + 1, ++priv->rx_next)) { 664 mb = find_next_bit(addr, get_mb_tx_first(priv), ++priv->rx_next)) {
624 at91_read_msg(dev, mb); 665 at91_read_msg(dev, mb);
625 666
626 /* reactivate mailboxes */ 667 /* reactivate mailboxes */
627 if (mb == AT91_MB_RX_LOW_LAST) 668 if (mb == get_mb_rx_low_last(priv))
628 /* all lower mailboxed, if just finished it */ 669 /* all lower mailboxed, if just finished it */
629 at91_activate_rx_low(priv); 670 at91_activate_rx_low(priv);
630 else if (mb > AT91_MB_RX_LOW_LAST) 671 else if (mb > get_mb_rx_low_last(priv))
631 /* only the mailbox we read */ 672 /* only the mailbox we read */
632 at91_activate_rx_mb(priv, mb); 673 at91_activate_rx_mb(priv, mb);
633 674
@@ -636,7 +677,7 @@ static int at91_poll_rx(struct net_device *dev, int quota)
636 } 677 }
637 678
638 /* upper group completed, look again in lower */ 679 /* upper group completed, look again in lower */
639 if (priv->rx_next > AT91_MB_RX_LOW_LAST && 680 if (priv->rx_next > get_mb_rx_low_last(priv) &&
640 quota > 0 && mb > AT91_MB_RX_LAST) { 681 quota > 0 && mb > AT91_MB_RX_LAST) {
641 priv->rx_next = AT91_MB_RX_FIRST; 682 priv->rx_next = AT91_MB_RX_FIRST;
642 goto again; 683 goto again;
@@ -721,7 +762,7 @@ static int at91_poll(struct napi_struct *napi, int quota)
721 u32 reg_sr = at91_read(priv, AT91_SR); 762 u32 reg_sr = at91_read(priv, AT91_SR);
722 int work_done = 0; 763 int work_done = 0;
723 764
724 if (reg_sr & AT91_IRQ_MB_RX) 765 if (reg_sr & get_irq_mb_rx(priv))
725 work_done += at91_poll_rx(dev, quota - work_done); 766 work_done += at91_poll_rx(dev, quota - work_done);
726 767
727 /* 768 /*
@@ -735,7 +776,7 @@ static int at91_poll(struct napi_struct *napi, int quota)
735 if (work_done < quota) { 776 if (work_done < quota) {
736 /* enable IRQs for frame errors and all mailboxes >= rx_next */ 777 /* enable IRQs for frame errors and all mailboxes >= rx_next */
737 u32 reg_ier = AT91_IRQ_ERR_FRAME; 778 u32 reg_ier = AT91_IRQ_ERR_FRAME;
738 reg_ier |= AT91_IRQ_MB_RX & ~AT91_MB_MASK(priv->rx_next); 779 reg_ier |= get_irq_mb_rx(priv) & ~AT91_MB_MASK(priv->rx_next);
739 780
740 napi_complete(napi); 781 napi_complete(napi);
741 at91_write(priv, AT91_IER, reg_ier); 782 at91_write(priv, AT91_IER, reg_ier);
@@ -784,7 +825,7 @@ static void at91_irq_tx(struct net_device *dev, u32 reg_sr)
784 if (likely(reg_msr & AT91_MSR_MRDY && 825 if (likely(reg_msr & AT91_MSR_MRDY &&
785 ~reg_msr & AT91_MSR_MABT)) { 826 ~reg_msr & AT91_MSR_MABT)) {
786 /* _NOTE_: subtract AT91_MB_TX_FIRST offset from mb! */ 827 /* _NOTE_: subtract AT91_MB_TX_FIRST offset from mb! */
787 can_get_echo_skb(dev, mb - AT91_MB_TX_FIRST); 828 can_get_echo_skb(dev, mb - get_mb_tx_first(priv));
788 dev->stats.tx_packets++; 829 dev->stats.tx_packets++;
789 } 830 }
790 } 831 }
@@ -794,8 +835,8 @@ static void at91_irq_tx(struct net_device *dev, u32 reg_sr)
794 * we get a TX int for the last can frame directly before a 835 * we get a TX int for the last can frame directly before a
795 * wrap around. 836 * wrap around.
796 */ 837 */
797 if ((priv->tx_next & AT91_NEXT_MASK) != 0 || 838 if ((priv->tx_next & get_next_mask(priv)) != 0 ||
798 (priv->tx_echo & AT91_NEXT_MASK) == 0) 839 (priv->tx_echo & get_next_mask(priv)) == 0)
799 netif_wake_queue(dev); 840 netif_wake_queue(dev);
800} 841}
801 842
@@ -969,19 +1010,19 @@ static irqreturn_t at91_irq(int irq, void *dev_id)
969 handled = IRQ_HANDLED; 1010 handled = IRQ_HANDLED;
970 1011
971 /* Receive or error interrupt? -> napi */ 1012 /* Receive or error interrupt? -> napi */
972 if (reg_sr & (AT91_IRQ_MB_RX | AT91_IRQ_ERR_FRAME)) { 1013 if (reg_sr & (get_irq_mb_rx(priv) | AT91_IRQ_ERR_FRAME)) {
973 /* 1014 /*
974 * The error bits are clear on read, 1015 * The error bits are clear on read,
975 * save for later use. 1016 * save for later use.
976 */ 1017 */
977 priv->reg_sr = reg_sr; 1018 priv->reg_sr = reg_sr;
978 at91_write(priv, AT91_IDR, 1019 at91_write(priv, AT91_IDR,
979 AT91_IRQ_MB_RX | AT91_IRQ_ERR_FRAME); 1020 get_irq_mb_rx(priv) | AT91_IRQ_ERR_FRAME);
980 napi_schedule(&priv->napi); 1021 napi_schedule(&priv->napi);
981 } 1022 }
982 1023
983 /* Transmission complete interrupt */ 1024 /* Transmission complete interrupt */
984 if (reg_sr & AT91_IRQ_MB_TX) 1025 if (reg_sr & get_irq_mb_tx(priv))
985 at91_irq_tx(dev, reg_sr); 1026 at91_irq_tx(dev, reg_sr);
986 1027
987 at91_irq_err(dev); 1028 at91_irq_err(dev);
@@ -1158,7 +1199,7 @@ static int __devinit at91_can_probe(struct platform_device *pdev)
1158 goto exit_release; 1199 goto exit_release;
1159 } 1200 }
1160 1201
1161 dev = alloc_candev(sizeof(struct at91_priv), AT91_MB_TX_NUM); 1202 dev = alloc_candev(sizeof(struct at91_priv), 1 << AT91_MB_TX_SHIFT);
1162 if (!dev) { 1203 if (!dev) {
1163 err = -ENOMEM; 1204 err = -ENOMEM;
1164 goto exit_iounmap; 1205 goto exit_iounmap;