diff options
author | Jesper Dangaard Brouer <brouer@redhat.com> | 2014-09-03 11:56:09 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2014-09-03 23:41:42 -0400 |
commit | 1f59533f9ca5634e7b8914252e48aee9d9cbe501 (patch) | |
tree | 380ed2cf4938401f34b9658bb550ba48539fbd34 /net/core | |
parent | 3f3c7eec60ad4f990d7bcbc41a1597a4fc7268f6 (diff) |
qdisc: validate frames going through the direct_xmit path
In commit 50cbe9ab5f8d ("net: Validate xmit SKBs right when we
pull them out of the qdisc") the validation code was moved out of
dev_hard_start_xmit and into dequeue_skb.
However this overlooked the fact that we do not always enqueue
the skb onto a qdisc. First situation is if qdisc have flag
TCQ_F_CAN_BYPASS and qdisc is empty. Second situation is if
there is no qdisc on the device, which is a common case for
software devices.
Originally spotted and inital patch by Alexander Duyck.
As a result Alex was seeing issues trying to connect to a
vhost_net interface after commit 50cbe9ab5f8d was applied.
Added a call to validate_xmit_skb() in __dev_xmit_skb(), in the
code path for qdiscs with TCQ_F_CAN_BYPASS flag, and in
__dev_queue_xmit() when no qdisc.
Also handle the error situation where dev_hard_start_xmit() could
return a skb list, and does not return dev_xmit_complete(rc) and
falls through to the kfree_skb(), in that situation it should
call kfree_skb_list().
Fixes: 50cbe9ab5f8d ("net: Validate xmit SKBs right when we pull them out of the qdisc")
Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com>
Signed-off-by: Jesper Dangaard Brouer <brouer@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/core')
-rw-r--r-- | net/core/dev.c | 10 |
1 files changed, 8 insertions, 2 deletions
diff --git a/net/core/dev.c b/net/core/dev.c index 3774afc3bebf..2f3dbd657570 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -2739,7 +2739,8 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q, | |||
2739 | 2739 | ||
2740 | qdisc_bstats_update(q, skb); | 2740 | qdisc_bstats_update(q, skb); |
2741 | 2741 | ||
2742 | if (sch_direct_xmit(skb, q, dev, txq, root_lock)) { | 2742 | skb = validate_xmit_skb(skb, dev); |
2743 | if (skb && sch_direct_xmit(skb, q, dev, txq, root_lock)) { | ||
2743 | if (unlikely(contended)) { | 2744 | if (unlikely(contended)) { |
2744 | spin_unlock(&q->busylock); | 2745 | spin_unlock(&q->busylock); |
2745 | contended = false; | 2746 | contended = false; |
@@ -2879,6 +2880,10 @@ static int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv) | |||
2879 | if (__this_cpu_read(xmit_recursion) > RECURSION_LIMIT) | 2880 | if (__this_cpu_read(xmit_recursion) > RECURSION_LIMIT) |
2880 | goto recursion_alert; | 2881 | goto recursion_alert; |
2881 | 2882 | ||
2883 | skb = validate_xmit_skb(skb, dev); | ||
2884 | if (!skb) | ||
2885 | goto drop; | ||
2886 | |||
2882 | HARD_TX_LOCK(dev, txq, cpu); | 2887 | HARD_TX_LOCK(dev, txq, cpu); |
2883 | 2888 | ||
2884 | if (!netif_xmit_stopped(txq)) { | 2889 | if (!netif_xmit_stopped(txq)) { |
@@ -2904,10 +2909,11 @@ recursion_alert: | |||
2904 | } | 2909 | } |
2905 | 2910 | ||
2906 | rc = -ENETDOWN; | 2911 | rc = -ENETDOWN; |
2912 | drop: | ||
2907 | rcu_read_unlock_bh(); | 2913 | rcu_read_unlock_bh(); |
2908 | 2914 | ||
2909 | atomic_long_inc(&dev->tx_dropped); | 2915 | atomic_long_inc(&dev->tx_dropped); |
2910 | kfree_skb(skb); | 2916 | kfree_skb_list(skb); |
2911 | return rc; | 2917 | return rc; |
2912 | out: | 2918 | out: |
2913 | rcu_read_unlock_bh(); | 2919 | rcu_read_unlock_bh(); |