diff options
-rw-r--r-- | drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 19 | ||||
-rw-r--r-- | drivers/net/macvlan.c | 10 | ||||
-rw-r--r-- | include/linux/netdevice.h | 4 | ||||
-rw-r--r-- | net/core/dev.c | 58 |
4 files changed, 45 insertions, 46 deletions
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 80225af2acb1..abb176df2e7f 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | |||
@@ -8208,20 +8208,17 @@ static void ixgbe_atr(struct ixgbe_ring *ring, | |||
8208 | input, common, ring->queue_index); | 8208 | input, common, ring->queue_index); |
8209 | } | 8209 | } |
8210 | 8210 | ||
8211 | #ifdef IXGBE_FCOE | ||
8211 | static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb, | 8212 | static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb, |
8212 | void *accel_priv, select_queue_fallback_t fallback) | 8213 | void *accel_priv, select_queue_fallback_t fallback) |
8213 | { | 8214 | { |
8214 | struct ixgbe_fwd_adapter *fwd_adapter = accel_priv; | ||
8215 | #ifdef IXGBE_FCOE | ||
8216 | struct ixgbe_adapter *adapter; | 8215 | struct ixgbe_adapter *adapter; |
8217 | struct ixgbe_ring_feature *f; | 8216 | struct ixgbe_ring_feature *f; |
8218 | #endif | ||
8219 | int txq; | 8217 | int txq; |
8220 | 8218 | ||
8221 | if (fwd_adapter) { | 8219 | if (accel_priv) { |
8222 | u8 tc = netdev_get_num_tc(dev) ? | 8220 | u8 tc = netdev_get_prio_tc_map(dev, skb->priority); |
8223 | netdev_get_prio_tc_map(dev, skb->priority) : 0; | 8221 | struct net_device *vdev = accel_priv; |
8224 | struct net_device *vdev = fwd_adapter->netdev; | ||
8225 | 8222 | ||
8226 | txq = vdev->tc_to_txq[tc].offset; | 8223 | txq = vdev->tc_to_txq[tc].offset; |
8227 | txq += reciprocal_scale(skb_get_hash(skb), | 8224 | txq += reciprocal_scale(skb_get_hash(skb), |
@@ -8230,8 +8227,6 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb, | |||
8230 | return txq; | 8227 | return txq; |
8231 | } | 8228 | } |
8232 | 8229 | ||
8233 | #ifdef IXGBE_FCOE | ||
8234 | |||
8235 | /* | 8230 | /* |
8236 | * only execute the code below if protocol is FCoE | 8231 | * only execute the code below if protocol is FCoE |
8237 | * or FIP and we have FCoE enabled on the adapter | 8232 | * or FIP and we have FCoE enabled on the adapter |
@@ -8257,11 +8252,9 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb, | |||
8257 | txq -= f->indices; | 8252 | txq -= f->indices; |
8258 | 8253 | ||
8259 | return txq + f->offset; | 8254 | return txq + f->offset; |
8260 | #else | ||
8261 | return fallback(dev, skb); | ||
8262 | #endif | ||
8263 | } | 8255 | } |
8264 | 8256 | ||
8257 | #endif | ||
8265 | static int ixgbe_xmit_xdp_ring(struct ixgbe_adapter *adapter, | 8258 | static int ixgbe_xmit_xdp_ring(struct ixgbe_adapter *adapter, |
8266 | struct xdp_frame *xdpf) | 8259 | struct xdp_frame *xdpf) |
8267 | { | 8260 | { |
@@ -10058,7 +10051,6 @@ static const struct net_device_ops ixgbe_netdev_ops = { | |||
10058 | .ndo_open = ixgbe_open, | 10051 | .ndo_open = ixgbe_open, |
10059 | .ndo_stop = ixgbe_close, | 10052 | .ndo_stop = ixgbe_close, |
10060 | .ndo_start_xmit = ixgbe_xmit_frame, | 10053 | .ndo_start_xmit = ixgbe_xmit_frame, |
10061 | .ndo_select_queue = ixgbe_select_queue, | ||
10062 | .ndo_set_rx_mode = ixgbe_set_rx_mode, | 10054 | .ndo_set_rx_mode = ixgbe_set_rx_mode, |
10063 | .ndo_validate_addr = eth_validate_addr, | 10055 | .ndo_validate_addr = eth_validate_addr, |
10064 | .ndo_set_mac_address = ixgbe_set_mac, | 10056 | .ndo_set_mac_address = ixgbe_set_mac, |
@@ -10081,6 +10073,7 @@ static const struct net_device_ops ixgbe_netdev_ops = { | |||
10081 | .ndo_poll_controller = ixgbe_netpoll, | 10073 | .ndo_poll_controller = ixgbe_netpoll, |
10082 | #endif | 10074 | #endif |
10083 | #ifdef IXGBE_FCOE | 10075 | #ifdef IXGBE_FCOE |
10076 | .ndo_select_queue = ixgbe_select_queue, | ||
10084 | .ndo_fcoe_ddp_setup = ixgbe_fcoe_ddp_get, | 10077 | .ndo_fcoe_ddp_setup = ixgbe_fcoe_ddp_get, |
10085 | .ndo_fcoe_ddp_target = ixgbe_fcoe_ddp_target, | 10078 | .ndo_fcoe_ddp_target = ixgbe_fcoe_ddp_target, |
10086 | .ndo_fcoe_ddp_done = ixgbe_fcoe_ddp_put, | 10079 | .ndo_fcoe_ddp_done = ixgbe_fcoe_ddp_put, |
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index adde8fc45588..401e1d1ce1ec 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c | |||
@@ -514,7 +514,6 @@ static int macvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev) | |||
514 | const struct macvlan_dev *vlan = netdev_priv(dev); | 514 | const struct macvlan_dev *vlan = netdev_priv(dev); |
515 | const struct macvlan_port *port = vlan->port; | 515 | const struct macvlan_port *port = vlan->port; |
516 | const struct macvlan_dev *dest; | 516 | const struct macvlan_dev *dest; |
517 | void *accel_priv = NULL; | ||
518 | 517 | ||
519 | if (vlan->mode == MACVLAN_MODE_BRIDGE) { | 518 | if (vlan->mode == MACVLAN_MODE_BRIDGE) { |
520 | const struct ethhdr *eth = (void *)skb->data; | 519 | const struct ethhdr *eth = (void *)skb->data; |
@@ -533,15 +532,10 @@ static int macvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev) | |||
533 | return NET_XMIT_SUCCESS; | 532 | return NET_XMIT_SUCCESS; |
534 | } | 533 | } |
535 | } | 534 | } |
536 | |||
537 | /* For packets that are non-multicast and not bridged we will pass | ||
538 | * the necessary information so that the lowerdev can distinguish | ||
539 | * the source of the packets via the accel_priv value. | ||
540 | */ | ||
541 | accel_priv = vlan->accel_priv; | ||
542 | xmit_world: | 535 | xmit_world: |
543 | skb->dev = vlan->lowerdev; | 536 | skb->dev = vlan->lowerdev; |
544 | return dev_queue_xmit_accel(skb, accel_priv); | 537 | return dev_queue_xmit_accel(skb, |
538 | netdev_get_sb_channel(dev) ? dev : NULL); | ||
545 | } | 539 | } |
546 | 540 | ||
547 | static inline netdev_tx_t macvlan_netpoll_send_skb(struct macvlan_dev *vlan, struct sk_buff *skb) | 541 | static inline netdev_tx_t macvlan_netpoll_send_skb(struct macvlan_dev *vlan, struct sk_buff *skb) |
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index b1ff77276bc4..fda0bcda7a42 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h | |||
@@ -2103,7 +2103,7 @@ static inline void netdev_for_each_tx_queue(struct net_device *dev, | |||
2103 | 2103 | ||
2104 | struct netdev_queue *netdev_pick_tx(struct net_device *dev, | 2104 | struct netdev_queue *netdev_pick_tx(struct net_device *dev, |
2105 | struct sk_buff *skb, | 2105 | struct sk_buff *skb, |
2106 | void *accel_priv); | 2106 | struct net_device *sb_dev); |
2107 | 2107 | ||
2108 | /* returns the headroom that the master device needs to take in account | 2108 | /* returns the headroom that the master device needs to take in account |
2109 | * when forwarding to this dev | 2109 | * when forwarding to this dev |
@@ -2568,7 +2568,7 @@ void dev_close_many(struct list_head *head, bool unlink); | |||
2568 | void dev_disable_lro(struct net_device *dev); | 2568 | void dev_disable_lro(struct net_device *dev); |
2569 | int dev_loopback_xmit(struct net *net, struct sock *sk, struct sk_buff *newskb); | 2569 | int dev_loopback_xmit(struct net *net, struct sock *sk, struct sk_buff *newskb); |
2570 | int dev_queue_xmit(struct sk_buff *skb); | 2570 | int dev_queue_xmit(struct sk_buff *skb); |
2571 | int dev_queue_xmit_accel(struct sk_buff *skb, void *accel_priv); | 2571 | int dev_queue_xmit_accel(struct sk_buff *skb, struct net_device *sb_dev); |
2572 | int dev_direct_xmit(struct sk_buff *skb, u16 queue_id); | 2572 | int dev_direct_xmit(struct sk_buff *skb, u16 queue_id); |
2573 | int register_netdevice(struct net_device *dev); | 2573 | int register_netdevice(struct net_device *dev); |
2574 | void unregister_netdevice_queue(struct net_device *dev, struct list_head *head); | 2574 | void unregister_netdevice_queue(struct net_device *dev, struct list_head *head); |
diff --git a/net/core/dev.c b/net/core/dev.c index cc1d6bba017a..09a7cc2f3c55 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -2786,24 +2786,26 @@ EXPORT_SYMBOL(netif_device_attach); | |||
2786 | * Returns a Tx hash based on the given packet descriptor a Tx queues' number | 2786 | * Returns a Tx hash based on the given packet descriptor a Tx queues' number |
2787 | * to be used as a distribution range. | 2787 | * to be used as a distribution range. |
2788 | */ | 2788 | */ |
2789 | static u16 skb_tx_hash(const struct net_device *dev, struct sk_buff *skb) | 2789 | static u16 skb_tx_hash(const struct net_device *dev, |
2790 | const struct net_device *sb_dev, | ||
2791 | struct sk_buff *skb) | ||
2790 | { | 2792 | { |
2791 | u32 hash; | 2793 | u32 hash; |
2792 | u16 qoffset = 0; | 2794 | u16 qoffset = 0; |
2793 | u16 qcount = dev->real_num_tx_queues; | 2795 | u16 qcount = dev->real_num_tx_queues; |
2794 | 2796 | ||
2797 | if (dev->num_tc) { | ||
2798 | u8 tc = netdev_get_prio_tc_map(dev, skb->priority); | ||
2799 | |||
2800 | qoffset = sb_dev->tc_to_txq[tc].offset; | ||
2801 | qcount = sb_dev->tc_to_txq[tc].count; | ||
2802 | } | ||
2803 | |||
2795 | if (skb_rx_queue_recorded(skb)) { | 2804 | if (skb_rx_queue_recorded(skb)) { |
2796 | hash = skb_get_rx_queue(skb); | 2805 | hash = skb_get_rx_queue(skb); |
2797 | while (unlikely(hash >= qcount)) | 2806 | while (unlikely(hash >= qcount)) |
2798 | hash -= qcount; | 2807 | hash -= qcount; |
2799 | return hash; | 2808 | return hash + qoffset; |
2800 | } | ||
2801 | |||
2802 | if (dev->num_tc) { | ||
2803 | u8 tc = netdev_get_prio_tc_map(dev, skb->priority); | ||
2804 | |||
2805 | qoffset = dev->tc_to_txq[tc].offset; | ||
2806 | qcount = dev->tc_to_txq[tc].count; | ||
2807 | } | 2809 | } |
2808 | 2810 | ||
2809 | return (u16) reciprocal_scale(skb_get_hash(skb), qcount) + qoffset; | 2811 | return (u16) reciprocal_scale(skb_get_hash(skb), qcount) + qoffset; |
@@ -3573,7 +3575,8 @@ static int __get_xps_queue_idx(struct net_device *dev, struct sk_buff *skb, | |||
3573 | } | 3575 | } |
3574 | #endif | 3576 | #endif |
3575 | 3577 | ||
3576 | static int get_xps_queue(struct net_device *dev, struct sk_buff *skb) | 3578 | static int get_xps_queue(struct net_device *dev, struct net_device *sb_dev, |
3579 | struct sk_buff *skb) | ||
3577 | { | 3580 | { |
3578 | #ifdef CONFIG_XPS | 3581 | #ifdef CONFIG_XPS |
3579 | struct xps_dev_maps *dev_maps; | 3582 | struct xps_dev_maps *dev_maps; |
@@ -3587,7 +3590,7 @@ static int get_xps_queue(struct net_device *dev, struct sk_buff *skb) | |||
3587 | if (!static_key_false(&xps_rxqs_needed)) | 3590 | if (!static_key_false(&xps_rxqs_needed)) |
3588 | goto get_cpus_map; | 3591 | goto get_cpus_map; |
3589 | 3592 | ||
3590 | dev_maps = rcu_dereference(dev->xps_rxqs_map); | 3593 | dev_maps = rcu_dereference(sb_dev->xps_rxqs_map); |
3591 | if (dev_maps) { | 3594 | if (dev_maps) { |
3592 | int tci = sk_rx_queue_get(sk); | 3595 | int tci = sk_rx_queue_get(sk); |
3593 | 3596 | ||
@@ -3598,7 +3601,7 @@ static int get_xps_queue(struct net_device *dev, struct sk_buff *skb) | |||
3598 | 3601 | ||
3599 | get_cpus_map: | 3602 | get_cpus_map: |
3600 | if (queue_index < 0) { | 3603 | if (queue_index < 0) { |
3601 | dev_maps = rcu_dereference(dev->xps_cpus_map); | 3604 | dev_maps = rcu_dereference(sb_dev->xps_cpus_map); |
3602 | if (dev_maps) { | 3605 | if (dev_maps) { |
3603 | unsigned int tci = skb->sender_cpu - 1; | 3606 | unsigned int tci = skb->sender_cpu - 1; |
3604 | 3607 | ||
@@ -3614,17 +3617,20 @@ get_cpus_map: | |||
3614 | #endif | 3617 | #endif |
3615 | } | 3618 | } |
3616 | 3619 | ||
3617 | static u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb) | 3620 | static u16 ___netdev_pick_tx(struct net_device *dev, struct sk_buff *skb, |
3621 | struct net_device *sb_dev) | ||
3618 | { | 3622 | { |
3619 | struct sock *sk = skb->sk; | 3623 | struct sock *sk = skb->sk; |
3620 | int queue_index = sk_tx_queue_get(sk); | 3624 | int queue_index = sk_tx_queue_get(sk); |
3621 | 3625 | ||
3626 | sb_dev = sb_dev ? : dev; | ||
3627 | |||
3622 | if (queue_index < 0 || skb->ooo_okay || | 3628 | if (queue_index < 0 || skb->ooo_okay || |
3623 | queue_index >= dev->real_num_tx_queues) { | 3629 | queue_index >= dev->real_num_tx_queues) { |
3624 | int new_index = get_xps_queue(dev, skb); | 3630 | int new_index = get_xps_queue(dev, sb_dev, skb); |
3625 | 3631 | ||
3626 | if (new_index < 0) | 3632 | if (new_index < 0) |
3627 | new_index = skb_tx_hash(dev, skb); | 3633 | new_index = skb_tx_hash(dev, sb_dev, skb); |
3628 | 3634 | ||
3629 | if (queue_index != new_index && sk && | 3635 | if (queue_index != new_index && sk && |
3630 | sk_fullsock(sk) && | 3636 | sk_fullsock(sk) && |
@@ -3637,9 +3643,15 @@ static u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb) | |||
3637 | return queue_index; | 3643 | return queue_index; |
3638 | } | 3644 | } |
3639 | 3645 | ||
3646 | static u16 __netdev_pick_tx(struct net_device *dev, | ||
3647 | struct sk_buff *skb) | ||
3648 | { | ||
3649 | return ___netdev_pick_tx(dev, skb, NULL); | ||
3650 | } | ||
3651 | |||
3640 | struct netdev_queue *netdev_pick_tx(struct net_device *dev, | 3652 | struct netdev_queue *netdev_pick_tx(struct net_device *dev, |
3641 | struct sk_buff *skb, | 3653 | struct sk_buff *skb, |
3642 | void *accel_priv) | 3654 | struct net_device *sb_dev) |
3643 | { | 3655 | { |
3644 | int queue_index = 0; | 3656 | int queue_index = 0; |
3645 | 3657 | ||
@@ -3654,10 +3666,10 @@ struct netdev_queue *netdev_pick_tx(struct net_device *dev, | |||
3654 | const struct net_device_ops *ops = dev->netdev_ops; | 3666 | const struct net_device_ops *ops = dev->netdev_ops; |
3655 | 3667 | ||
3656 | if (ops->ndo_select_queue) | 3668 | if (ops->ndo_select_queue) |
3657 | queue_index = ops->ndo_select_queue(dev, skb, accel_priv, | 3669 | queue_index = ops->ndo_select_queue(dev, skb, sb_dev, |
3658 | __netdev_pick_tx); | 3670 | __netdev_pick_tx); |
3659 | else | 3671 | else |
3660 | queue_index = __netdev_pick_tx(dev, skb); | 3672 | queue_index = ___netdev_pick_tx(dev, skb, sb_dev); |
3661 | 3673 | ||
3662 | queue_index = netdev_cap_txqueue(dev, queue_index); | 3674 | queue_index = netdev_cap_txqueue(dev, queue_index); |
3663 | } | 3675 | } |
@@ -3669,7 +3681,7 @@ struct netdev_queue *netdev_pick_tx(struct net_device *dev, | |||
3669 | /** | 3681 | /** |
3670 | * __dev_queue_xmit - transmit a buffer | 3682 | * __dev_queue_xmit - transmit a buffer |
3671 | * @skb: buffer to transmit | 3683 | * @skb: buffer to transmit |
3672 | * @accel_priv: private data used for L2 forwarding offload | 3684 | * @sb_dev: suboordinate device used for L2 forwarding offload |
3673 | * | 3685 | * |
3674 | * Queue a buffer for transmission to a network device. The caller must | 3686 | * Queue a buffer for transmission to a network device. The caller must |
3675 | * have set the device and priority and built the buffer before calling | 3687 | * have set the device and priority and built the buffer before calling |
@@ -3692,7 +3704,7 @@ struct netdev_queue *netdev_pick_tx(struct net_device *dev, | |||
3692 | * the BH enable code must have IRQs enabled so that it will not deadlock. | 3704 | * the BH enable code must have IRQs enabled so that it will not deadlock. |
3693 | * --BLG | 3705 | * --BLG |
3694 | */ | 3706 | */ |
3695 | static int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv) | 3707 | static int __dev_queue_xmit(struct sk_buff *skb, struct net_device *sb_dev) |
3696 | { | 3708 | { |
3697 | struct net_device *dev = skb->dev; | 3709 | struct net_device *dev = skb->dev; |
3698 | struct netdev_queue *txq; | 3710 | struct netdev_queue *txq; |
@@ -3731,7 +3743,7 @@ static int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv) | |||
3731 | else | 3743 | else |
3732 | skb_dst_force(skb); | 3744 | skb_dst_force(skb); |
3733 | 3745 | ||
3734 | txq = netdev_pick_tx(dev, skb, accel_priv); | 3746 | txq = netdev_pick_tx(dev, skb, sb_dev); |
3735 | q = rcu_dereference_bh(txq->qdisc); | 3747 | q = rcu_dereference_bh(txq->qdisc); |
3736 | 3748 | ||
3737 | trace_net_dev_queue(skb); | 3749 | trace_net_dev_queue(skb); |
@@ -3805,9 +3817,9 @@ int dev_queue_xmit(struct sk_buff *skb) | |||
3805 | } | 3817 | } |
3806 | EXPORT_SYMBOL(dev_queue_xmit); | 3818 | EXPORT_SYMBOL(dev_queue_xmit); |
3807 | 3819 | ||
3808 | int dev_queue_xmit_accel(struct sk_buff *skb, void *accel_priv) | 3820 | int dev_queue_xmit_accel(struct sk_buff *skb, struct net_device *sb_dev) |
3809 | { | 3821 | { |
3810 | return __dev_queue_xmit(skb, accel_priv); | 3822 | return __dev_queue_xmit(skb, sb_dev); |
3811 | } | 3823 | } |
3812 | EXPORT_SYMBOL(dev_queue_xmit_accel); | 3824 | EXPORT_SYMBOL(dev_queue_xmit_accel); |
3813 | 3825 | ||