aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/can
diff options
context:
space:
mode:
authorAhmed S. Darwish <ahmed.darwish@valeo.com>2015-03-14 09:02:49 -0400
committerMarc Kleine-Budde <mkl@pengutronix.de>2015-03-14 04:20:07 -0400
commita9dc960c37b0d4eb192598dc4c94276270454514 (patch)
tree769341072aeb277f71beed94b5b73f00b9dce46f /drivers/net/can
parent963a822b6d5fece27c88522ac34dd48928571c8b (diff)
can: kvaser_usb: Fix tx queue start/stop race conditions
A number of tx queue wake-up events went missing due to the outlined scenario below. Start state is a pool of 16 tx URBs, active tx_urbs count = 15, with the netdev tx queue open. CPU #1 [softirq] CPU #2 [softirq] start_xmit() tx_acknowledge() ................ ................ atomic_inc(&tx_urbs); if (atomic_read(&tx_urbs) >= 16) { --> atomic_dec(&tx_urbs); netif_wake_queue(); return; <-- netif_stop_queue(); } At the end, the correct state expected is a 15 tx_urbs count value with the tx queue state _open_. Due to the race, we get the same tx_urbs value but with the tx queue state _stopped_. The wake-up event is completely lost. Thus avoid hand-rolled concurrency mechanisms and use a proper lock for contexts and tx queue protection. Signed-off-by: Ahmed S. Darwish <ahmed.darwish@valeo.com> Cc: linux-stable <stable@vger.kernel.org> Signed-off-by: Marc Kleine-Budde <mkl@pengutronix.de>
Diffstat (limited to 'drivers/net/can')
-rw-r--r--drivers/net/can/usb/kvaser_usb.c83
1 files changed, 51 insertions, 32 deletions
diff --git a/drivers/net/can/usb/kvaser_usb.c b/drivers/net/can/usb/kvaser_usb.c
index a316fa4b91ab..e97a08ce0b90 100644
--- a/drivers/net/can/usb/kvaser_usb.c
+++ b/drivers/net/can/usb/kvaser_usb.c
@@ -14,6 +14,7 @@
14 * Copyright (C) 2015 Valeo S.A. 14 * Copyright (C) 2015 Valeo S.A.
15 */ 15 */
16 16
17#include <linux/spinlock.h>
17#include <linux/kernel.h> 18#include <linux/kernel.h>
18#include <linux/completion.h> 19#include <linux/completion.h>
19#include <linux/module.h> 20#include <linux/module.h>
@@ -467,10 +468,11 @@ struct kvaser_usb {
467struct kvaser_usb_net_priv { 468struct kvaser_usb_net_priv {
468 struct can_priv can; 469 struct can_priv can;
469 470
470 atomic_t active_tx_urbs; 471 spinlock_t tx_contexts_lock;
471 struct usb_anchor tx_submitted; 472 int active_tx_contexts;
472 struct kvaser_usb_tx_urb_context tx_contexts[MAX_TX_URBS]; 473 struct kvaser_usb_tx_urb_context tx_contexts[MAX_TX_URBS];
473 474
475 struct usb_anchor tx_submitted;
474 struct completion start_comp, stop_comp; 476 struct completion start_comp, stop_comp;
475 477
476 struct kvaser_usb *dev; 478 struct kvaser_usb *dev;
@@ -694,6 +696,7 @@ static void kvaser_usb_tx_acknowledge(const struct kvaser_usb *dev,
694 struct kvaser_usb_net_priv *priv; 696 struct kvaser_usb_net_priv *priv;
695 struct sk_buff *skb; 697 struct sk_buff *skb;
696 struct can_frame *cf; 698 struct can_frame *cf;
699 unsigned long flags;
697 u8 channel, tid; 700 u8 channel, tid;
698 701
699 channel = msg->u.tx_acknowledge_header.channel; 702 channel = msg->u.tx_acknowledge_header.channel;
@@ -737,12 +740,15 @@ static void kvaser_usb_tx_acknowledge(const struct kvaser_usb *dev,
737 740
738 stats->tx_packets++; 741 stats->tx_packets++;
739 stats->tx_bytes += context->dlc; 742 stats->tx_bytes += context->dlc;
740 can_get_echo_skb(priv->netdev, context->echo_index);
741 743
742 context->echo_index = MAX_TX_URBS; 744 spin_lock_irqsave(&priv->tx_contexts_lock, flags);
743 atomic_dec(&priv->active_tx_urbs);
744 745
746 can_get_echo_skb(priv->netdev, context->echo_index);
747 context->echo_index = MAX_TX_URBS;
748 --priv->active_tx_contexts;
745 netif_wake_queue(priv->netdev); 749 netif_wake_queue(priv->netdev);
750
751 spin_unlock_irqrestore(&priv->tx_contexts_lock, flags);
746} 752}
747 753
748static void kvaser_usb_simple_msg_callback(struct urb *urb) 754static void kvaser_usb_simple_msg_callback(struct urb *urb)
@@ -803,17 +809,6 @@ static int kvaser_usb_simple_msg_async(struct kvaser_usb_net_priv *priv,
803 return 0; 809 return 0;
804} 810}
805 811
806static void kvaser_usb_unlink_tx_urbs(struct kvaser_usb_net_priv *priv)
807{
808 int i;
809
810 usb_kill_anchored_urbs(&priv->tx_submitted);
811 atomic_set(&priv->active_tx_urbs, 0);
812
813 for (i = 0; i < MAX_TX_URBS; i++)
814 priv->tx_contexts[i].echo_index = MAX_TX_URBS;
815}
816
817static void kvaser_usb_rx_error_update_can_state(struct kvaser_usb_net_priv *priv, 812static void kvaser_usb_rx_error_update_can_state(struct kvaser_usb_net_priv *priv,
818 const struct kvaser_usb_error_summary *es, 813 const struct kvaser_usb_error_summary *es,
819 struct can_frame *cf) 814 struct can_frame *cf)
@@ -1515,6 +1510,24 @@ error:
1515 return err; 1510 return err;
1516} 1511}
1517 1512
1513static void kvaser_usb_reset_tx_urb_contexts(struct kvaser_usb_net_priv *priv)
1514{
1515 int i;
1516
1517 priv->active_tx_contexts = 0;
1518 for (i = 0; i < MAX_TX_URBS; i++)
1519 priv->tx_contexts[i].echo_index = MAX_TX_URBS;
1520}
1521
1522/* This method might sleep. Do not call it in the atomic context
1523 * of URB completions.
1524 */
1525static void kvaser_usb_unlink_tx_urbs(struct kvaser_usb_net_priv *priv)
1526{
1527 usb_kill_anchored_urbs(&priv->tx_submitted);
1528 kvaser_usb_reset_tx_urb_contexts(priv);
1529}
1530
1518static void kvaser_usb_unlink_all_urbs(struct kvaser_usb *dev) 1531static void kvaser_usb_unlink_all_urbs(struct kvaser_usb *dev)
1519{ 1532{
1520 int i; 1533 int i;
@@ -1634,6 +1647,7 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb,
1634 struct kvaser_msg *msg; 1647 struct kvaser_msg *msg;
1635 int i, err, ret = NETDEV_TX_OK; 1648 int i, err, ret = NETDEV_TX_OK;
1636 u8 *msg_tx_can_flags = NULL; /* GCC */ 1649 u8 *msg_tx_can_flags = NULL; /* GCC */
1650 unsigned long flags;
1637 1651
1638 if (can_dropped_invalid_skb(netdev, skb)) 1652 if (can_dropped_invalid_skb(netdev, skb))
1639 return NETDEV_TX_OK; 1653 return NETDEV_TX_OK;
@@ -1687,12 +1701,21 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb,
1687 if (cf->can_id & CAN_RTR_FLAG) 1701 if (cf->can_id & CAN_RTR_FLAG)
1688 *msg_tx_can_flags |= MSG_FLAG_REMOTE_FRAME; 1702 *msg_tx_can_flags |= MSG_FLAG_REMOTE_FRAME;
1689 1703
1704 spin_lock_irqsave(&priv->tx_contexts_lock, flags);
1690 for (i = 0; i < ARRAY_SIZE(priv->tx_contexts); i++) { 1705 for (i = 0; i < ARRAY_SIZE(priv->tx_contexts); i++) {
1691 if (priv->tx_contexts[i].echo_index == MAX_TX_URBS) { 1706 if (priv->tx_contexts[i].echo_index == MAX_TX_URBS) {
1692 context = &priv->tx_contexts[i]; 1707 context = &priv->tx_contexts[i];
1708
1709 context->echo_index = i;
1710 can_put_echo_skb(skb, netdev, context->echo_index);
1711 ++priv->active_tx_contexts;
1712 if (priv->active_tx_contexts >= MAX_TX_URBS)
1713 netif_stop_queue(netdev);
1714
1693 break; 1715 break;
1694 } 1716 }
1695 } 1717 }
1718 spin_unlock_irqrestore(&priv->tx_contexts_lock, flags);
1696 1719
1697 /* This should never happen; it implies a flow control bug */ 1720 /* This should never happen; it implies a flow control bug */
1698 if (!context) { 1721 if (!context) {
@@ -1704,7 +1727,6 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb,
1704 } 1727 }
1705 1728
1706 context->priv = priv; 1729 context->priv = priv;
1707 context->echo_index = i;
1708 context->dlc = cf->can_dlc; 1730 context->dlc = cf->can_dlc;
1709 1731
1710 msg->u.tx_can.tid = context->echo_index; 1732 msg->u.tx_can.tid = context->echo_index;
@@ -1716,18 +1738,17 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb,
1716 kvaser_usb_write_bulk_callback, context); 1738 kvaser_usb_write_bulk_callback, context);
1717 usb_anchor_urb(urb, &priv->tx_submitted); 1739 usb_anchor_urb(urb, &priv->tx_submitted);
1718 1740
1719 can_put_echo_skb(skb, netdev, context->echo_index);
1720
1721 atomic_inc(&priv->active_tx_urbs);
1722
1723 if (atomic_read(&priv->active_tx_urbs) >= MAX_TX_URBS)
1724 netif_stop_queue(netdev);
1725
1726 err = usb_submit_urb(urb, GFP_ATOMIC); 1741 err = usb_submit_urb(urb, GFP_ATOMIC);
1727 if (unlikely(err)) { 1742 if (unlikely(err)) {
1743 spin_lock_irqsave(&priv->tx_contexts_lock, flags);
1744
1728 can_free_echo_skb(netdev, context->echo_index); 1745 can_free_echo_skb(netdev, context->echo_index);
1746 context->echo_index = MAX_TX_URBS;
1747 --priv->active_tx_contexts;
1748 netif_wake_queue(netdev);
1749
1750 spin_unlock_irqrestore(&priv->tx_contexts_lock, flags);
1729 1751
1730 atomic_dec(&priv->active_tx_urbs);
1731 usb_unanchor_urb(urb); 1752 usb_unanchor_urb(urb);
1732 1753
1733 stats->tx_dropped++; 1754 stats->tx_dropped++;
@@ -1854,7 +1875,7 @@ static int kvaser_usb_init_one(struct usb_interface *intf,
1854 struct kvaser_usb *dev = usb_get_intfdata(intf); 1875 struct kvaser_usb *dev = usb_get_intfdata(intf);
1855 struct net_device *netdev; 1876 struct net_device *netdev;
1856 struct kvaser_usb_net_priv *priv; 1877 struct kvaser_usb_net_priv *priv;
1857 int i, err; 1878 int err;
1858 1879
1859 err = kvaser_usb_send_simple_msg(dev, CMD_RESET_CHIP, channel); 1880 err = kvaser_usb_send_simple_msg(dev, CMD_RESET_CHIP, channel);
1860 if (err) 1881 if (err)
@@ -1868,19 +1889,17 @@ static int kvaser_usb_init_one(struct usb_interface *intf,
1868 1889
1869 priv = netdev_priv(netdev); 1890 priv = netdev_priv(netdev);
1870 1891
1892 init_usb_anchor(&priv->tx_submitted);
1871 init_completion(&priv->start_comp); 1893 init_completion(&priv->start_comp);
1872 init_completion(&priv->stop_comp); 1894 init_completion(&priv->stop_comp);
1873 1895
1874 init_usb_anchor(&priv->tx_submitted);
1875 atomic_set(&priv->active_tx_urbs, 0);
1876
1877 for (i = 0; i < ARRAY_SIZE(priv->tx_contexts); i++)
1878 priv->tx_contexts[i].echo_index = MAX_TX_URBS;
1879
1880 priv->dev = dev; 1896 priv->dev = dev;
1881 priv->netdev = netdev; 1897 priv->netdev = netdev;
1882 priv->channel = channel; 1898 priv->channel = channel;
1883 1899
1900 spin_lock_init(&priv->tx_contexts_lock);
1901 kvaser_usb_reset_tx_urb_contexts(priv);
1902
1884 priv->can.state = CAN_STATE_STOPPED; 1903 priv->can.state = CAN_STATE_STOPPED;
1885 priv->can.clock.freq = CAN_USB_CLOCK; 1904 priv->can.clock.freq = CAN_USB_CLOCK;
1886 priv->can.bittiming_const = &kvaser_usb_bittiming_const; 1905 priv->can.bittiming_const = &kvaser_usb_bittiming_const;