diff options
Diffstat (limited to 'net/core/dev.c')
-rw-r--r-- | net/core/dev.c | 20 |
1 files changed, 13 insertions, 7 deletions
diff --git a/net/core/dev.c b/net/core/dev.c index 723a34710ad4..0ea10f849be8 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -1911,8 +1911,16 @@ static int dev_gso_segment(struct sk_buff *skb) | |||
1911 | */ | 1911 | */ |
1912 | static inline void skb_orphan_try(struct sk_buff *skb) | 1912 | static inline void skb_orphan_try(struct sk_buff *skb) |
1913 | { | 1913 | { |
1914 | if (!skb_tx(skb)->flags) | 1914 | struct sock *sk = skb->sk; |
1915 | |||
1916 | if (sk && !skb_tx(skb)->flags) { | ||
1917 | /* skb_tx_hash() wont be able to get sk. | ||
1918 | * We copy sk_hash into skb->rxhash | ||
1919 | */ | ||
1920 | if (!skb->rxhash) | ||
1921 | skb->rxhash = sk->sk_hash; | ||
1915 | skb_orphan(skb); | 1922 | skb_orphan(skb); |
1923 | } | ||
1916 | } | 1924 | } |
1917 | 1925 | ||
1918 | int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, | 1926 | int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, |
@@ -1998,8 +2006,7 @@ u16 skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb) | |||
1998 | if (skb->sk && skb->sk->sk_hash) | 2006 | if (skb->sk && skb->sk->sk_hash) |
1999 | hash = skb->sk->sk_hash; | 2007 | hash = skb->sk->sk_hash; |
2000 | else | 2008 | else |
2001 | hash = (__force u16) skb->protocol; | 2009 | hash = (__force u16) skb->protocol ^ skb->rxhash; |
2002 | |||
2003 | hash = jhash_1word(hash, hashrnd); | 2010 | hash = jhash_1word(hash, hashrnd); |
2004 | 2011 | ||
2005 | return (u16) (((u64) hash * dev->real_num_tx_queues) >> 32); | 2012 | return (u16) (((u64) hash * dev->real_num_tx_queues) >> 32); |
@@ -2022,12 +2029,11 @@ static inline u16 dev_cap_txqueue(struct net_device *dev, u16 queue_index) | |||
2022 | static struct netdev_queue *dev_pick_tx(struct net_device *dev, | 2029 | static struct netdev_queue *dev_pick_tx(struct net_device *dev, |
2023 | struct sk_buff *skb) | 2030 | struct sk_buff *skb) |
2024 | { | 2031 | { |
2025 | u16 queue_index; | 2032 | int queue_index; |
2026 | struct sock *sk = skb->sk; | 2033 | struct sock *sk = skb->sk; |
2027 | 2034 | ||
2028 | if (sk_tx_queue_recorded(sk)) { | 2035 | queue_index = sk_tx_queue_get(sk); |
2029 | queue_index = sk_tx_queue_get(sk); | 2036 | if (queue_index < 0) { |
2030 | } else { | ||
2031 | const struct net_device_ops *ops = dev->netdev_ops; | 2037 | const struct net_device_ops *ops = dev->netdev_ops; |
2032 | 2038 | ||
2033 | if (ops->ndo_select_queue) { | 2039 | if (ops->ndo_select_queue) { |