aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2014-04-11 04:13:22 -0400
committerMarc Kleine-Budde <mkl@pengutronix.de>2014-04-24 16:09:01 -0400
commit35bdafb576c5c0a06815e7a681571c3ab950ff7e (patch)
tree4dfc42fc1a7181c5bb49114a5ec1816d3278e38c
parentd48071be6cb94912cf3c3ac0b4d520438fab4778 (diff)
can: c_can: Remove tx locking
Mark suggested to use one IF for the softirq and the other for the xmit function to avoid the xmit lock. That requires to write the frame into the interface first, then handle the echo skb and store the dlc before committing the TX request to the message ram. We use an atomic to handle the active buffers instead of reading the MSGVAL register as thats way faster especially on PCH/x86. Suggested-by: Mark <mark5@del-llc.com> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Tested-by: Alexander Stein <alexander.stein@systec-electronic.com> Signed-off-by: Marc Kleine-Budde <mkl@pengutronix.de>
-rw-r--r--drivers/net/can/c_can/c_can.c127
-rw-r--r--drivers/net/can/c_can/c_can.h8
2 files changed, 43 insertions, 92 deletions
diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c
index 61fde414bb1f..99d36bf1ba21 100644
--- a/drivers/net/can/c_can/c_can.c
+++ b/drivers/net/can/c_can/c_can.c
@@ -237,24 +237,6 @@ static inline void c_can_reset_ram(const struct c_can_priv *priv, bool enable)
237 priv->raminit(priv, enable); 237 priv->raminit(priv, enable);
238} 238}
239 239
240static inline int get_tx_next_msg_obj(const struct c_can_priv *priv)
241{
242 return (priv->tx_next & C_CAN_NEXT_MSG_OBJ_MASK) +
243 C_CAN_MSG_OBJ_TX_FIRST;
244}
245
246static inline int get_tx_echo_msg_obj(int txecho)
247{
248 return (txecho & C_CAN_NEXT_MSG_OBJ_MASK) + C_CAN_MSG_OBJ_TX_FIRST;
249}
250
251static u32 c_can_read_reg32(struct c_can_priv *priv, enum reg index)
252{
253 u32 val = priv->read_reg(priv, index);
254 val |= ((u32) priv->read_reg(priv, index + 1)) << 16;
255 return val;
256}
257
258static void c_can_irq_control(struct c_can_priv *priv, bool enable) 240static void c_can_irq_control(struct c_can_priv *priv, bool enable)
259{ 241{
260 u32 ctrl = priv->read_reg(priv, C_CAN_CTRL_REG) & ~CONTROL_IRQMSK; 242 u32 ctrl = priv->read_reg(priv, C_CAN_CTRL_REG) & ~CONTROL_IRQMSK;
@@ -294,8 +276,8 @@ static inline void c_can_object_put(struct net_device *dev, int iface,
294 c_can_obj_update(dev, iface, cmd | IF_COMM_WR, obj); 276 c_can_obj_update(dev, iface, cmd | IF_COMM_WR, obj);
295} 277}
296 278
297static void c_can_write_msg_object(struct net_device *dev, int iface, 279static void c_can_setup_tx_object(struct net_device *dev, int iface,
298 struct can_frame *frame, int obj) 280 struct can_frame *frame, int obj)
299{ 281{
300 struct c_can_priv *priv = netdev_priv(dev); 282 struct c_can_priv *priv = netdev_priv(dev);
301 u16 ctrl = IF_MCONT_TX | frame->can_dlc; 283 u16 ctrl = IF_MCONT_TX | frame->can_dlc;
@@ -321,8 +303,6 @@ static void c_can_write_msg_object(struct net_device *dev, int iface,
321 priv->write_reg(priv, C_CAN_IFACE(DATA1_REG, iface) + i / 2, 303 priv->write_reg(priv, C_CAN_IFACE(DATA1_REG, iface) + i / 2,
322 frame->data[i] | (frame->data[i + 1] << 8)); 304 frame->data[i] | (frame->data[i + 1] << 8));
323 } 305 }
324
325 c_can_object_put(dev, iface, obj, IF_COMM_TX);
326} 306}
327 307
328static inline void c_can_activate_all_lower_rx_msg_obj(struct net_device *dev, 308static inline void c_can_activate_all_lower_rx_msg_obj(struct net_device *dev,
@@ -432,47 +412,38 @@ static void c_can_inval_msg_object(struct net_device *dev, int iface, int obj)
432 c_can_object_put(dev, iface, obj, IF_COMM_INVAL); 412 c_can_object_put(dev, iface, obj, IF_COMM_INVAL);
433} 413}
434 414
435static inline int c_can_is_next_tx_obj_busy(struct c_can_priv *priv, int objno)
436{
437 int val = c_can_read_reg32(priv, C_CAN_TXRQST1_REG);
438
439 /*
440 * as transmission request register's bit n-1 corresponds to
441 * message object n, we need to handle the same properly.
442 */
443 if (val & (1 << (objno - 1)))
444 return 1;
445
446 return 0;
447}
448
449static netdev_tx_t c_can_start_xmit(struct sk_buff *skb, 415static netdev_tx_t c_can_start_xmit(struct sk_buff *skb,
450 struct net_device *dev) 416 struct net_device *dev)
451{ 417{
452 u32 msg_obj_no;
453 struct c_can_priv *priv = netdev_priv(dev);
454 struct can_frame *frame = (struct can_frame *)skb->data; 418 struct can_frame *frame = (struct can_frame *)skb->data;
419 struct c_can_priv *priv = netdev_priv(dev);
420 u32 idx, obj;
455 421
456 if (can_dropped_invalid_skb(dev, skb)) 422 if (can_dropped_invalid_skb(dev, skb))
457 return NETDEV_TX_OK; 423 return NETDEV_TX_OK;
458
459 spin_lock_bh(&priv->xmit_lock);
460 msg_obj_no = get_tx_next_msg_obj(priv);
461
462 /* prepare message object for transmission */
463 c_can_write_msg_object(dev, IF_TX, frame, msg_obj_no);
464 priv->dlc[msg_obj_no - C_CAN_MSG_OBJ_TX_FIRST] = frame->can_dlc;
465 can_put_echo_skb(skb, dev, msg_obj_no - C_CAN_MSG_OBJ_TX_FIRST);
466
467 /* 424 /*
468 * we have to stop the queue in case of a wrap around or 425 * This is not a FIFO. C/D_CAN sends out the buffers
469 * if the next TX message object is still in use 426 * prioritized. The lowest buffer number wins.
470 */ 427 */
471 priv->tx_next++; 428 idx = fls(atomic_read(&priv->tx_active));
472 if (c_can_is_next_tx_obj_busy(priv, get_tx_next_msg_obj(priv)) || 429 obj = idx + C_CAN_MSG_OBJ_TX_FIRST;
473 (priv->tx_next & C_CAN_NEXT_MSG_OBJ_MASK) == 0) 430
431 /* If this is the last buffer, stop the xmit queue */
432 if (idx == C_CAN_MSG_OBJ_TX_NUM - 1)
474 netif_stop_queue(dev); 433 netif_stop_queue(dev);
475 spin_unlock_bh(&priv->xmit_lock); 434 /*
435 * Store the message in the interface so we can call
436 * can_put_echo_skb(). We must do this before we enable
437 * transmit as we might race against do_tx().
438 */
439 c_can_setup_tx_object(dev, IF_TX, frame, obj);
440 priv->dlc[idx] = frame->can_dlc;
441 can_put_echo_skb(skb, dev, idx);
442
443 /* Update the active bits */
444 atomic_add((1 << idx), &priv->tx_active);
445 /* Start transmission */
446 c_can_object_put(dev, IF_TX, obj, IF_COMM_TX);
476 447
477 return NETDEV_TX_OK; 448 return NETDEV_TX_OK;
478} 449}
@@ -589,6 +560,10 @@ static int c_can_chip_config(struct net_device *dev)
589 /* set a `lec` value so that we can check for updates later */ 560 /* set a `lec` value so that we can check for updates later */
590 priv->write_reg(priv, C_CAN_STS_REG, LEC_UNUSED); 561 priv->write_reg(priv, C_CAN_STS_REG, LEC_UNUSED);
591 562
563 /* Clear all internal status */
564 atomic_set(&priv->tx_active, 0);
565 priv->rxmasked = 0;
566
592 /* set bittiming params */ 567 /* set bittiming params */
593 return c_can_set_bittiming(dev); 568 return c_can_set_bittiming(dev);
594} 569}
@@ -609,10 +584,6 @@ static int c_can_start(struct net_device *dev)
609 584
610 priv->can.state = CAN_STATE_ERROR_ACTIVE; 585 priv->can.state = CAN_STATE_ERROR_ACTIVE;
611 586
612 /* reset tx helper pointers and the rx mask */
613 priv->tx_next = priv->tx_echo = 0;
614 priv->rxmasked = 0;
615
616 return 0; 587 return 0;
617} 588}
618 589
@@ -671,42 +642,29 @@ static int c_can_get_berr_counter(const struct net_device *dev,
671 return err; 642 return err;
672} 643}
673 644
674/*
675 * priv->tx_echo holds the number of the oldest can_frame put for
676 * transmission into the hardware, but not yet ACKed by the CAN tx
677 * complete IRQ.
678 *
679 * We iterate from priv->tx_echo to priv->tx_next and check if the
680 * packet has been transmitted, echo it back to the CAN framework.
681 * If we discover a not yet transmitted packet, stop looking for more.
682 */
683static void c_can_do_tx(struct net_device *dev) 645static void c_can_do_tx(struct net_device *dev)
684{ 646{
685 struct c_can_priv *priv = netdev_priv(dev); 647 struct c_can_priv *priv = netdev_priv(dev);
686 struct net_device_stats *stats = &dev->stats; 648 struct net_device_stats *stats = &dev->stats;
687 u32 val, obj, pkts = 0, bytes = 0; 649 u32 idx, obj, pkts = 0, bytes = 0, pend, clr;
688 650
689 spin_lock_bh(&priv->xmit_lock); 651 clr = pend = priv->read_reg(priv, C_CAN_INTPND2_REG);
690 652
691 for (; (priv->tx_next - priv->tx_echo) > 0; priv->tx_echo++) { 653 while ((idx = ffs(pend))) {
692 obj = get_tx_echo_msg_obj(priv->tx_echo); 654 idx--;
693 val = c_can_read_reg32(priv, C_CAN_TXRQST1_REG); 655 pend &= ~(1 << idx);
694 656 obj = idx + C_CAN_MSG_OBJ_TX_FIRST;
695 if (val & (1 << (obj - 1))) 657 c_can_inval_msg_object(dev, IF_RX, obj);
696 break; 658 can_get_echo_skb(dev, idx);
697 659 bytes += priv->dlc[idx];
698 can_get_echo_skb(dev, obj - C_CAN_MSG_OBJ_TX_FIRST);
699 bytes += priv->dlc[obj - C_CAN_MSG_OBJ_TX_FIRST];
700 pkts++; 660 pkts++;
701 c_can_inval_msg_object(dev, IF_TX, obj);
702 } 661 }
703 662
704 /* restart queue if wrap-up or if queue stalled on last pkt */ 663 /* Clear the bits in the tx_active mask */
705 if (((priv->tx_next & C_CAN_NEXT_MSG_OBJ_MASK) != 0) || 664 atomic_sub(clr, &priv->tx_active);
706 ((priv->tx_echo & C_CAN_NEXT_MSG_OBJ_MASK) == 0))
707 netif_wake_queue(dev);
708 665
709 spin_unlock_bh(&priv->xmit_lock); 666 if (clr & (1 << (C_CAN_MSG_OBJ_TX_NUM - 1)))
667 netif_wake_queue(dev);
710 668
711 if (pkts) { 669 if (pkts) {
712 stats->tx_bytes += bytes; 670 stats->tx_bytes += bytes;
@@ -1192,7 +1150,6 @@ struct net_device *alloc_c_can_dev(void)
1192 return NULL; 1150 return NULL;
1193 1151
1194 priv = netdev_priv(dev); 1152 priv = netdev_priv(dev);
1195 spin_lock_init(&priv->xmit_lock);
1196 netif_napi_add(dev, &priv->napi, c_can_poll, C_CAN_NAPI_WEIGHT); 1153 netif_napi_add(dev, &priv->napi, c_can_poll, C_CAN_NAPI_WEIGHT);
1197 1154
1198 priv->dev = dev; 1155 priv->dev = dev;
diff --git a/drivers/net/can/c_can/c_can.h b/drivers/net/can/c_can/c_can.h
index d9f59cc4fcb5..a5f10a01e49f 100644
--- a/drivers/net/can/c_can/c_can.h
+++ b/drivers/net/can/c_can/c_can.h
@@ -37,8 +37,6 @@
37 37
38#define C_CAN_MSG_OBJ_RX_SPLIT 9 38#define C_CAN_MSG_OBJ_RX_SPLIT 9
39#define C_CAN_MSG_RX_LOW_LAST (C_CAN_MSG_OBJ_RX_SPLIT - 1) 39#define C_CAN_MSG_RX_LOW_LAST (C_CAN_MSG_OBJ_RX_SPLIT - 1)
40
41#define C_CAN_NEXT_MSG_OBJ_MASK (C_CAN_MSG_OBJ_TX_NUM - 1)
42#define RECEIVE_OBJECT_BITS 0x0000ffff 40#define RECEIVE_OBJECT_BITS 0x0000ffff
43 41
44enum reg { 42enum reg {
@@ -175,16 +173,12 @@ struct c_can_priv {
175 struct napi_struct napi; 173 struct napi_struct napi;
176 struct net_device *dev; 174 struct net_device *dev;
177 struct device *device; 175 struct device *device;
178 spinlock_t xmit_lock; 176 atomic_t tx_active;
179 int tx_object;
180 int last_status; 177 int last_status;
181 u16 (*read_reg) (struct c_can_priv *priv, enum reg index); 178 u16 (*read_reg) (struct c_can_priv *priv, enum reg index);
182 void (*write_reg) (struct c_can_priv *priv, enum reg index, u16 val); 179 void (*write_reg) (struct c_can_priv *priv, enum reg index, u16 val);
183 void __iomem *base; 180 void __iomem *base;
184 const u16 *regs; 181 const u16 *regs;
185 unsigned long irq_flags; /* for request_irq() */
186 unsigned int tx_next;
187 unsigned int tx_echo;
188 void *priv; /* for board-specific data */ 182 void *priv; /* for board-specific data */
189 enum c_can_dev_id type; 183 enum c_can_dev_id type;
190 u32 __iomem *raminit_ctrlreg; 184 u32 __iomem *raminit_ctrlreg;