diff options
Diffstat (limited to 'net')
-rw-r--r-- | net/core/dev.c | 58 |
1 files changed, 35 insertions, 23 deletions
diff --git a/net/core/dev.c b/net/core/dev.c index cc1d6bba017a..09a7cc2f3c55 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -2786,24 +2786,26 @@ EXPORT_SYMBOL(netif_device_attach); | |||
2786 | * Returns a Tx hash based on the given packet descriptor a Tx queues' number | 2786 | * Returns a Tx hash based on the given packet descriptor a Tx queues' number |
2787 | * to be used as a distribution range. | 2787 | * to be used as a distribution range. |
2788 | */ | 2788 | */ |
2789 | static u16 skb_tx_hash(const struct net_device *dev, struct sk_buff *skb) | 2789 | static u16 skb_tx_hash(const struct net_device *dev, |
2790 | const struct net_device *sb_dev, | ||
2791 | struct sk_buff *skb) | ||
2790 | { | 2792 | { |
2791 | u32 hash; | 2793 | u32 hash; |
2792 | u16 qoffset = 0; | 2794 | u16 qoffset = 0; |
2793 | u16 qcount = dev->real_num_tx_queues; | 2795 | u16 qcount = dev->real_num_tx_queues; |
2794 | 2796 | ||
2797 | if (dev->num_tc) { | ||
2798 | u8 tc = netdev_get_prio_tc_map(dev, skb->priority); | ||
2799 | |||
2800 | qoffset = sb_dev->tc_to_txq[tc].offset; | ||
2801 | qcount = sb_dev->tc_to_txq[tc].count; | ||
2802 | } | ||
2803 | |||
2795 | if (skb_rx_queue_recorded(skb)) { | 2804 | if (skb_rx_queue_recorded(skb)) { |
2796 | hash = skb_get_rx_queue(skb); | 2805 | hash = skb_get_rx_queue(skb); |
2797 | while (unlikely(hash >= qcount)) | 2806 | while (unlikely(hash >= qcount)) |
2798 | hash -= qcount; | 2807 | hash -= qcount; |
2799 | return hash; | 2808 | return hash + qoffset; |
2800 | } | ||
2801 | |||
2802 | if (dev->num_tc) { | ||
2803 | u8 tc = netdev_get_prio_tc_map(dev, skb->priority); | ||
2804 | |||
2805 | qoffset = dev->tc_to_txq[tc].offset; | ||
2806 | qcount = dev->tc_to_txq[tc].count; | ||
2807 | } | 2809 | } |
2808 | 2810 | ||
2809 | return (u16) reciprocal_scale(skb_get_hash(skb), qcount) + qoffset; | 2811 | return (u16) reciprocal_scale(skb_get_hash(skb), qcount) + qoffset; |
@@ -3573,7 +3575,8 @@ static int __get_xps_queue_idx(struct net_device *dev, struct sk_buff *skb, | |||
3573 | } | 3575 | } |
3574 | #endif | 3576 | #endif |
3575 | 3577 | ||
3576 | static int get_xps_queue(struct net_device *dev, struct sk_buff *skb) | 3578 | static int get_xps_queue(struct net_device *dev, struct net_device *sb_dev, |
3579 | struct sk_buff *skb) | ||
3577 | { | 3580 | { |
3578 | #ifdef CONFIG_XPS | 3581 | #ifdef CONFIG_XPS |
3579 | struct xps_dev_maps *dev_maps; | 3582 | struct xps_dev_maps *dev_maps; |
@@ -3587,7 +3590,7 @@ static int get_xps_queue(struct net_device *dev, struct sk_buff *skb) | |||
3587 | if (!static_key_false(&xps_rxqs_needed)) | 3590 | if (!static_key_false(&xps_rxqs_needed)) |
3588 | goto get_cpus_map; | 3591 | goto get_cpus_map; |
3589 | 3592 | ||
3590 | dev_maps = rcu_dereference(dev->xps_rxqs_map); | 3593 | dev_maps = rcu_dereference(sb_dev->xps_rxqs_map); |
3591 | if (dev_maps) { | 3594 | if (dev_maps) { |
3592 | int tci = sk_rx_queue_get(sk); | 3595 | int tci = sk_rx_queue_get(sk); |
3593 | 3596 | ||
@@ -3598,7 +3601,7 @@ static int get_xps_queue(struct net_device *dev, struct sk_buff *skb) | |||
3598 | 3601 | ||
3599 | get_cpus_map: | 3602 | get_cpus_map: |
3600 | if (queue_index < 0) { | 3603 | if (queue_index < 0) { |
3601 | dev_maps = rcu_dereference(dev->xps_cpus_map); | 3604 | dev_maps = rcu_dereference(sb_dev->xps_cpus_map); |
3602 | if (dev_maps) { | 3605 | if (dev_maps) { |
3603 | unsigned int tci = skb->sender_cpu - 1; | 3606 | unsigned int tci = skb->sender_cpu - 1; |
3604 | 3607 | ||
@@ -3614,17 +3617,20 @@ get_cpus_map: | |||
3614 | #endif | 3617 | #endif |
3615 | } | 3618 | } |
3616 | 3619 | ||
3617 | static u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb) | 3620 | static u16 ___netdev_pick_tx(struct net_device *dev, struct sk_buff *skb, |
3621 | struct net_device *sb_dev) | ||
3618 | { | 3622 | { |
3619 | struct sock *sk = skb->sk; | 3623 | struct sock *sk = skb->sk; |
3620 | int queue_index = sk_tx_queue_get(sk); | 3624 | int queue_index = sk_tx_queue_get(sk); |
3621 | 3625 | ||
3626 | sb_dev = sb_dev ? : dev; | ||
3627 | |||
3622 | if (queue_index < 0 || skb->ooo_okay || | 3628 | if (queue_index < 0 || skb->ooo_okay || |
3623 | queue_index >= dev->real_num_tx_queues) { | 3629 | queue_index >= dev->real_num_tx_queues) { |
3624 | int new_index = get_xps_queue(dev, skb); | 3630 | int new_index = get_xps_queue(dev, sb_dev, skb); |
3625 | 3631 | ||
3626 | if (new_index < 0) | 3632 | if (new_index < 0) |
3627 | new_index = skb_tx_hash(dev, skb); | 3633 | new_index = skb_tx_hash(dev, sb_dev, skb); |
3628 | 3634 | ||
3629 | if (queue_index != new_index && sk && | 3635 | if (queue_index != new_index && sk && |
3630 | sk_fullsock(sk) && | 3636 | sk_fullsock(sk) && |
@@ -3637,9 +3643,15 @@ static u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb) | |||
3637 | return queue_index; | 3643 | return queue_index; |
3638 | } | 3644 | } |
3639 | 3645 | ||
3646 | static u16 __netdev_pick_tx(struct net_device *dev, | ||
3647 | struct sk_buff *skb) | ||
3648 | { | ||
3649 | return ___netdev_pick_tx(dev, skb, NULL); | ||
3650 | } | ||
3651 | |||
3640 | struct netdev_queue *netdev_pick_tx(struct net_device *dev, | 3652 | struct netdev_queue *netdev_pick_tx(struct net_device *dev, |
3641 | struct sk_buff *skb, | 3653 | struct sk_buff *skb, |
3642 | void *accel_priv) | 3654 | struct net_device *sb_dev) |
3643 | { | 3655 | { |
3644 | int queue_index = 0; | 3656 | int queue_index = 0; |
3645 | 3657 | ||
@@ -3654,10 +3666,10 @@ struct netdev_queue *netdev_pick_tx(struct net_device *dev, | |||
3654 | const struct net_device_ops *ops = dev->netdev_ops; | 3666 | const struct net_device_ops *ops = dev->netdev_ops; |
3655 | 3667 | ||
3656 | if (ops->ndo_select_queue) | 3668 | if (ops->ndo_select_queue) |
3657 | queue_index = ops->ndo_select_queue(dev, skb, accel_priv, | 3669 | queue_index = ops->ndo_select_queue(dev, skb, sb_dev, |
3658 | __netdev_pick_tx); | 3670 | __netdev_pick_tx); |
3659 | else | 3671 | else |
3660 | queue_index = __netdev_pick_tx(dev, skb); | 3672 | queue_index = ___netdev_pick_tx(dev, skb, sb_dev); |
3661 | 3673 | ||
3662 | queue_index = netdev_cap_txqueue(dev, queue_index); | 3674 | queue_index = netdev_cap_txqueue(dev, queue_index); |
3663 | } | 3675 | } |
@@ -3669,7 +3681,7 @@ struct netdev_queue *netdev_pick_tx(struct net_device *dev, | |||
3669 | /** | 3681 | /** |
3670 | * __dev_queue_xmit - transmit a buffer | 3682 | * __dev_queue_xmit - transmit a buffer |
3671 | * @skb: buffer to transmit | 3683 | * @skb: buffer to transmit |
3672 | * @accel_priv: private data used for L2 forwarding offload | 3684 | * @sb_dev: suboordinate device used for L2 forwarding offload |
3673 | * | 3685 | * |
3674 | * Queue a buffer for transmission to a network device. The caller must | 3686 | * Queue a buffer for transmission to a network device. The caller must |
3675 | * have set the device and priority and built the buffer before calling | 3687 | * have set the device and priority and built the buffer before calling |
@@ -3692,7 +3704,7 @@ struct netdev_queue *netdev_pick_tx(struct net_device *dev, | |||
3692 | * the BH enable code must have IRQs enabled so that it will not deadlock. | 3704 | * the BH enable code must have IRQs enabled so that it will not deadlock. |
3693 | * --BLG | 3705 | * --BLG |
3694 | */ | 3706 | */ |
3695 | static int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv) | 3707 | static int __dev_queue_xmit(struct sk_buff *skb, struct net_device *sb_dev) |
3696 | { | 3708 | { |
3697 | struct net_device *dev = skb->dev; | 3709 | struct net_device *dev = skb->dev; |
3698 | struct netdev_queue *txq; | 3710 | struct netdev_queue *txq; |
@@ -3731,7 +3743,7 @@ static int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv) | |||
3731 | else | 3743 | else |
3732 | skb_dst_force(skb); | 3744 | skb_dst_force(skb); |
3733 | 3745 | ||
3734 | txq = netdev_pick_tx(dev, skb, accel_priv); | 3746 | txq = netdev_pick_tx(dev, skb, sb_dev); |
3735 | q = rcu_dereference_bh(txq->qdisc); | 3747 | q = rcu_dereference_bh(txq->qdisc); |
3736 | 3748 | ||
3737 | trace_net_dev_queue(skb); | 3749 | trace_net_dev_queue(skb); |
@@ -3805,9 +3817,9 @@ int dev_queue_xmit(struct sk_buff *skb) | |||
3805 | } | 3817 | } |
3806 | EXPORT_SYMBOL(dev_queue_xmit); | 3818 | EXPORT_SYMBOL(dev_queue_xmit); |
3807 | 3819 | ||
3808 | int dev_queue_xmit_accel(struct sk_buff *skb, void *accel_priv) | 3820 | int dev_queue_xmit_accel(struct sk_buff *skb, struct net_device *sb_dev) |
3809 | { | 3821 | { |
3810 | return __dev_queue_xmit(skb, accel_priv); | 3822 | return __dev_queue_xmit(skb, sb_dev); |
3811 | } | 3823 | } |
3812 | EXPORT_SYMBOL(dev_queue_xmit_accel); | 3824 | EXPORT_SYMBOL(dev_queue_xmit_accel); |
3813 | 3825 | ||