diff options
author | Dominic Curran <dominic.curran@citrix.com> | 2014-01-21 22:03:23 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2014-01-23 00:32:56 -0500 |
commit | fa35864e0bb7f7c13b9c6d6751ddac9b42d4810f (patch) | |
tree | 50f705ece98607b7a54161554b486d65c426ac2e | |
parent | bdf4351bbc62f3b24151cf19ca6e531b68d8c340 (diff) |
tuntap: Fix for a race in accessing numqueues
A patch for fixing a race between queue selection and changing queues
was introduced in commit 92bb73ea2("tuntap: fix a possible race between
queue selection and changing queues").
The fix was to prevent the driver from re-reading the tun->numqueues
more than once within tun_select_queue() using ACCESS_ONCE().
We have been experiancing 'Divide-by-zero' errors in tun_net_xmit()
since we moved from 3.6 to 3.10, and believe that they come from a
simular source where the value of tun->numqueues changes to zero
between the first and a subsequent read of tun->numqueues.
The fix is a simular use of ACCESS_ONCE(), as well as a multiply
instead of a divide in the if statement.
Signed-off-by: Dominic Curran <dominic.curran@citrix.com>
Cc: Jason Wang <jasowang@redhat.com>
Cc: Maxim Krasnyansky <maxk@qti.qualcomm.com>
Acked-by: Jason Wang <jasowang@redhat.com>
Acked-by: Max Krasnyansky <maxk@kernel.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | drivers/net/tun.c | 10 |
1 files changed, 6 insertions, 4 deletions
diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 34cca74c99ed..bcf01af4b879 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c | |||
@@ -738,15 +738,17 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev) | |||
738 | struct tun_struct *tun = netdev_priv(dev); | 738 | struct tun_struct *tun = netdev_priv(dev); |
739 | int txq = skb->queue_mapping; | 739 | int txq = skb->queue_mapping; |
740 | struct tun_file *tfile; | 740 | struct tun_file *tfile; |
741 | u32 numqueues = 0; | ||
741 | 742 | ||
742 | rcu_read_lock(); | 743 | rcu_read_lock(); |
743 | tfile = rcu_dereference(tun->tfiles[txq]); | 744 | tfile = rcu_dereference(tun->tfiles[txq]); |
745 | numqueues = ACCESS_ONCE(tun->numqueues); | ||
744 | 746 | ||
745 | /* Drop packet if interface is not attached */ | 747 | /* Drop packet if interface is not attached */ |
746 | if (txq >= tun->numqueues) | 748 | if (txq >= numqueues) |
747 | goto drop; | 749 | goto drop; |
748 | 750 | ||
749 | if (tun->numqueues == 1) { | 751 | if (numqueues == 1) { |
750 | /* Select queue was not called for the skbuff, so we extract the | 752 | /* Select queue was not called for the skbuff, so we extract the |
751 | * RPS hash and save it into the flow_table here. | 753 | * RPS hash and save it into the flow_table here. |
752 | */ | 754 | */ |
@@ -779,8 +781,8 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev) | |||
779 | /* Limit the number of packets queued by dividing txq length with the | 781 | /* Limit the number of packets queued by dividing txq length with the |
780 | * number of queues. | 782 | * number of queues. |
781 | */ | 783 | */ |
782 | if (skb_queue_len(&tfile->socket.sk->sk_receive_queue) | 784 | if (skb_queue_len(&tfile->socket.sk->sk_receive_queue) * numqueues |
783 | >= dev->tx_queue_len / tun->numqueues) | 785 | >= dev->tx_queue_len) |
784 | goto drop; | 786 | goto drop; |
785 | 787 | ||
786 | if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC))) | 788 | if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC))) |