diff options
Diffstat (limited to 'net/core/dev.c')
-rw-r--r-- | net/core/dev.c | 38 |
1 files changed, 31 insertions, 7 deletions
diff --git a/net/core/dev.c b/net/core/dev.c index 2b3bf53bc687..0ea10f849be8 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -1553,6 +1553,24 @@ static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev) | |||
1553 | rcu_read_unlock(); | 1553 | rcu_read_unlock(); |
1554 | } | 1554 | } |
1555 | 1555 | ||
1556 | /* | ||
1557 | * Routine to help set real_num_tx_queues. To avoid skbs mapped to queues | ||
1558 | * greater then real_num_tx_queues stale skbs on the qdisc must be flushed. | ||
1559 | */ | ||
1560 | void netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq) | ||
1561 | { | ||
1562 | unsigned int real_num = dev->real_num_tx_queues; | ||
1563 | |||
1564 | if (unlikely(txq > dev->num_tx_queues)) | ||
1565 | ; | ||
1566 | else if (txq > real_num) | ||
1567 | dev->real_num_tx_queues = txq; | ||
1568 | else if (txq < real_num) { | ||
1569 | dev->real_num_tx_queues = txq; | ||
1570 | qdisc_reset_all_tx_gt(dev, txq); | ||
1571 | } | ||
1572 | } | ||
1573 | EXPORT_SYMBOL(netif_set_real_num_tx_queues); | ||
1556 | 1574 | ||
1557 | static inline void __netif_reschedule(struct Qdisc *q) | 1575 | static inline void __netif_reschedule(struct Qdisc *q) |
1558 | { | 1576 | { |
@@ -1893,8 +1911,16 @@ static int dev_gso_segment(struct sk_buff *skb) | |||
1893 | */ | 1911 | */ |
1894 | static inline void skb_orphan_try(struct sk_buff *skb) | 1912 | static inline void skb_orphan_try(struct sk_buff *skb) |
1895 | { | 1913 | { |
1896 | if (!skb_tx(skb)->flags) | 1914 | struct sock *sk = skb->sk; |
1915 | |||
1916 | if (sk && !skb_tx(skb)->flags) { | ||
1917 | /* skb_tx_hash() wont be able to get sk. | ||
1918 | * We copy sk_hash into skb->rxhash | ||
1919 | */ | ||
1920 | if (!skb->rxhash) | ||
1921 | skb->rxhash = sk->sk_hash; | ||
1897 | skb_orphan(skb); | 1922 | skb_orphan(skb); |
1923 | } | ||
1898 | } | 1924 | } |
1899 | 1925 | ||
1900 | int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, | 1926 | int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, |
@@ -1980,8 +2006,7 @@ u16 skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb) | |||
1980 | if (skb->sk && skb->sk->sk_hash) | 2006 | if (skb->sk && skb->sk->sk_hash) |
1981 | hash = skb->sk->sk_hash; | 2007 | hash = skb->sk->sk_hash; |
1982 | else | 2008 | else |
1983 | hash = (__force u16) skb->protocol; | 2009 | hash = (__force u16) skb->protocol ^ skb->rxhash; |
1984 | |||
1985 | hash = jhash_1word(hash, hashrnd); | 2010 | hash = jhash_1word(hash, hashrnd); |
1986 | 2011 | ||
1987 | return (u16) (((u64) hash * dev->real_num_tx_queues) >> 32); | 2012 | return (u16) (((u64) hash * dev->real_num_tx_queues) >> 32); |
@@ -2004,12 +2029,11 @@ static inline u16 dev_cap_txqueue(struct net_device *dev, u16 queue_index) | |||
2004 | static struct netdev_queue *dev_pick_tx(struct net_device *dev, | 2029 | static struct netdev_queue *dev_pick_tx(struct net_device *dev, |
2005 | struct sk_buff *skb) | 2030 | struct sk_buff *skb) |
2006 | { | 2031 | { |
2007 | u16 queue_index; | 2032 | int queue_index; |
2008 | struct sock *sk = skb->sk; | 2033 | struct sock *sk = skb->sk; |
2009 | 2034 | ||
2010 | if (sk_tx_queue_recorded(sk)) { | 2035 | queue_index = sk_tx_queue_get(sk); |
2011 | queue_index = sk_tx_queue_get(sk); | 2036 | if (queue_index < 0) { |
2012 | } else { | ||
2013 | const struct net_device_ops *ops = dev->netdev_ops; | 2037 | const struct net_device_ops *ops = dev->netdev_ops; |
2014 | 2038 | ||
2015 | if (ops->ndo_select_queue) { | 2039 | if (ops->ndo_select_queue) { |