diff options
-rw-r--r-- | drivers/net/bonding/bond_main.c | 3 | ||||
-rw-r--r-- | drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c | 3 | ||||
-rw-r--r-- | drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h | 3 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 33 | ||||
-rw-r--r-- | drivers/net/ethernet/lantiq_etop.c | 3 | ||||
-rw-r--r-- | drivers/net/ethernet/mellanox/mlx4/en_tx.c | 3 | ||||
-rw-r--r-- | drivers/net/ethernet/mellanox/mlx4/mlx4_en.h | 3 | ||||
-rw-r--r-- | drivers/net/ethernet/tile/tilegx.c | 3 | ||||
-rw-r--r-- | drivers/net/macvlan.c | 9 | ||||
-rw-r--r-- | drivers/net/team/team.c | 3 | ||||
-rw-r--r-- | drivers/net/tun.c | 3 | ||||
-rw-r--r-- | drivers/net/wireless/mwifiex/main.c | 3 | ||||
-rw-r--r-- | drivers/staging/bcm/Bcmnet.c | 3 | ||||
-rw-r--r-- | drivers/staging/netlogic/xlr_net.c | 3 | ||||
-rw-r--r-- | drivers/staging/rtl8188eu/os_dep/os_intfs.c | 3 | ||||
-rw-r--r-- | include/linux/netdevice.h | 12 | ||||
-rw-r--r-- | net/core/dev.c | 29 | ||||
-rw-r--r-- | net/core/flow_dissector.c | 10 | ||||
-rw-r--r-- | net/core/netpoll.c | 2 | ||||
-rw-r--r-- | net/mac80211/iface.c | 6 | ||||
-rw-r--r-- | net/sched/sch_generic.c | 2 |
21 files changed, 80 insertions, 62 deletions
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 398e299ee1bd..4b8c58b0ec24 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c | |||
@@ -3732,7 +3732,8 @@ static inline int bond_slave_override(struct bonding *bond, | |||
3732 | } | 3732 | } |
3733 | 3733 | ||
3734 | 3734 | ||
3735 | static u16 bond_select_queue(struct net_device *dev, struct sk_buff *skb) | 3735 | static u16 bond_select_queue(struct net_device *dev, struct sk_buff *skb, |
3736 | void *accel_priv) | ||
3736 | { | 3737 | { |
3737 | /* | 3738 | /* |
3738 | * This helper function exists to help dev_pick_tx get the correct | 3739 | * This helper function exists to help dev_pick_tx get the correct |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index 550412088dd0..bf811565ee24 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c | |||
@@ -1833,7 +1833,8 @@ void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw) | |||
1833 | bnx2x_napi_disable_cnic(bp); | 1833 | bnx2x_napi_disable_cnic(bp); |
1834 | } | 1834 | } |
1835 | 1835 | ||
1836 | u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb) | 1836 | u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb, |
1837 | void *accel_priv) | ||
1837 | { | 1838 | { |
1838 | struct bnx2x *bp = netdev_priv(dev); | 1839 | struct bnx2x *bp = netdev_priv(dev); |
1839 | 1840 | ||
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h index da8fcaa74495..41f3ca5ad972 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h | |||
@@ -524,7 +524,8 @@ int bnx2x_set_vf_mac(struct net_device *dev, int queue, u8 *mac); | |||
524 | int bnx2x_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos); | 524 | int bnx2x_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos); |
525 | 525 | ||
526 | /* select_queue callback */ | 526 | /* select_queue callback */ |
527 | u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb); | 527 | u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb, |
528 | void *accel_priv); | ||
528 | 529 | ||
529 | static inline void bnx2x_update_rx_prod(struct bnx2x *bp, | 530 | static inline void bnx2x_update_rx_prod(struct bnx2x *bp, |
530 | struct bnx2x_fastpath *fp, | 531 | struct bnx2x_fastpath *fp, |
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index cc06854296a3..5bcc870f8367 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | |||
@@ -6827,12 +6827,20 @@ static inline int ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, u16 size) | |||
6827 | return __ixgbe_maybe_stop_tx(tx_ring, size); | 6827 | return __ixgbe_maybe_stop_tx(tx_ring, size); |
6828 | } | 6828 | } |
6829 | 6829 | ||
6830 | #ifdef IXGBE_FCOE | 6830 | static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb, |
6831 | static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb) | 6831 | void *accel_priv) |
6832 | { | 6832 | { |
6833 | struct ixgbe_fwd_adapter *fwd_adapter = accel_priv; | ||
6834 | #ifdef IXGBE_FCOE | ||
6833 | struct ixgbe_adapter *adapter; | 6835 | struct ixgbe_adapter *adapter; |
6834 | struct ixgbe_ring_feature *f; | 6836 | struct ixgbe_ring_feature *f; |
6835 | int txq; | 6837 | int txq; |
6838 | #endif | ||
6839 | |||
6840 | if (fwd_adapter) | ||
6841 | return skb->queue_mapping + fwd_adapter->tx_base_queue; | ||
6842 | |||
6843 | #ifdef IXGBE_FCOE | ||
6836 | 6844 | ||
6837 | /* | 6845 | /* |
6838 | * only execute the code below if protocol is FCoE | 6846 | * only execute the code below if protocol is FCoE |
@@ -6858,9 +6866,11 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb) | |||
6858 | txq -= f->indices; | 6866 | txq -= f->indices; |
6859 | 6867 | ||
6860 | return txq + f->offset; | 6868 | return txq + f->offset; |
6869 | #else | ||
6870 | return __netdev_pick_tx(dev, skb); | ||
6871 | #endif | ||
6861 | } | 6872 | } |
6862 | 6873 | ||
6863 | #endif | ||
6864 | netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, | 6874 | netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, |
6865 | struct ixgbe_adapter *adapter, | 6875 | struct ixgbe_adapter *adapter, |
6866 | struct ixgbe_ring *tx_ring) | 6876 | struct ixgbe_ring *tx_ring) |
@@ -7629,27 +7639,11 @@ static void ixgbe_fwd_del(struct net_device *pdev, void *priv) | |||
7629 | kfree(fwd_adapter); | 7639 | kfree(fwd_adapter); |
7630 | } | 7640 | } |
7631 | 7641 | ||
7632 | static netdev_tx_t ixgbe_fwd_xmit(struct sk_buff *skb, | ||
7633 | struct net_device *dev, | ||
7634 | void *priv) | ||
7635 | { | ||
7636 | struct ixgbe_fwd_adapter *fwd_adapter = priv; | ||
7637 | unsigned int queue; | ||
7638 | struct ixgbe_ring *tx_ring; | ||
7639 | |||
7640 | queue = skb->queue_mapping + fwd_adapter->tx_base_queue; | ||
7641 | tx_ring = fwd_adapter->real_adapter->tx_ring[queue]; | ||
7642 | |||
7643 | return __ixgbe_xmit_frame(skb, dev, tx_ring); | ||
7644 | } | ||
7645 | |||
7646 | static const struct net_device_ops ixgbe_netdev_ops = { | 7642 | static const struct net_device_ops ixgbe_netdev_ops = { |
7647 | .ndo_open = ixgbe_open, | 7643 | .ndo_open = ixgbe_open, |
7648 | .ndo_stop = ixgbe_close, | 7644 | .ndo_stop = ixgbe_close, |
7649 | .ndo_start_xmit = ixgbe_xmit_frame, | 7645 | .ndo_start_xmit = ixgbe_xmit_frame, |
7650 | #ifdef IXGBE_FCOE | ||
7651 | .ndo_select_queue = ixgbe_select_queue, | 7646 | .ndo_select_queue = ixgbe_select_queue, |
7652 | #endif | ||
7653 | .ndo_set_rx_mode = ixgbe_set_rx_mode, | 7647 | .ndo_set_rx_mode = ixgbe_set_rx_mode, |
7654 | .ndo_validate_addr = eth_validate_addr, | 7648 | .ndo_validate_addr = eth_validate_addr, |
7655 | .ndo_set_mac_address = ixgbe_set_mac, | 7649 | .ndo_set_mac_address = ixgbe_set_mac, |
@@ -7689,7 +7683,6 @@ static const struct net_device_ops ixgbe_netdev_ops = { | |||
7689 | .ndo_bridge_getlink = ixgbe_ndo_bridge_getlink, | 7683 | .ndo_bridge_getlink = ixgbe_ndo_bridge_getlink, |
7690 | .ndo_dfwd_add_station = ixgbe_fwd_add, | 7684 | .ndo_dfwd_add_station = ixgbe_fwd_add, |
7691 | .ndo_dfwd_del_station = ixgbe_fwd_del, | 7685 | .ndo_dfwd_del_station = ixgbe_fwd_del, |
7692 | .ndo_dfwd_start_xmit = ixgbe_fwd_xmit, | ||
7693 | }; | 7686 | }; |
7694 | 7687 | ||
7695 | /** | 7688 | /** |
diff --git a/drivers/net/ethernet/lantiq_etop.c b/drivers/net/ethernet/lantiq_etop.c index 6a6c1f76d8e0..ec94a20d7099 100644 --- a/drivers/net/ethernet/lantiq_etop.c +++ b/drivers/net/ethernet/lantiq_etop.c | |||
@@ -619,7 +619,8 @@ ltq_etop_set_multicast_list(struct net_device *dev) | |||
619 | } | 619 | } |
620 | 620 | ||
621 | static u16 | 621 | static u16 |
622 | ltq_etop_select_queue(struct net_device *dev, struct sk_buff *skb) | 622 | ltq_etop_select_queue(struct net_device *dev, struct sk_buff *skb, |
623 | void *accel_priv) | ||
623 | { | 624 | { |
624 | /* we are currently only using the first queue */ | 625 | /* we are currently only using the first queue */ |
625 | return 0; | 626 | return 0; |
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c index f54ebd5a1702..a7fcd593b2db 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c | |||
@@ -592,7 +592,8 @@ static void build_inline_wqe(struct mlx4_en_tx_desc *tx_desc, struct sk_buff *sk | |||
592 | } | 592 | } |
593 | } | 593 | } |
594 | 594 | ||
595 | u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb) | 595 | u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb, |
596 | void *accel_priv) | ||
596 | { | 597 | { |
597 | struct mlx4_en_priv *priv = netdev_priv(dev); | 598 | struct mlx4_en_priv *priv = netdev_priv(dev); |
598 | u16 rings_p_up = priv->num_tx_rings_p_up; | 599 | u16 rings_p_up = priv->num_tx_rings_p_up; |
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h index f3758de59c05..d5758adceaa2 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h | |||
@@ -714,7 +714,8 @@ int mlx4_en_set_cq_moder(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq); | |||
714 | int mlx4_en_arm_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq); | 714 | int mlx4_en_arm_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq); |
715 | 715 | ||
716 | void mlx4_en_tx_irq(struct mlx4_cq *mcq); | 716 | void mlx4_en_tx_irq(struct mlx4_cq *mcq); |
717 | u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb); | 717 | u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb, |
718 | void *accel_priv); | ||
718 | netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev); | 719 | netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev); |
719 | 720 | ||
720 | int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, | 721 | int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, |
diff --git a/drivers/net/ethernet/tile/tilegx.c b/drivers/net/ethernet/tile/tilegx.c index 628b736e5ae7..0e9fb3301b11 100644 --- a/drivers/net/ethernet/tile/tilegx.c +++ b/drivers/net/ethernet/tile/tilegx.c | |||
@@ -2080,7 +2080,8 @@ static int tile_net_tx(struct sk_buff *skb, struct net_device *dev) | |||
2080 | } | 2080 | } |
2081 | 2081 | ||
2082 | /* Return subqueue id on this core (one per core). */ | 2082 | /* Return subqueue id on this core (one per core). */ |
2083 | static u16 tile_net_select_queue(struct net_device *dev, struct sk_buff *skb) | 2083 | static u16 tile_net_select_queue(struct net_device *dev, struct sk_buff *skb, |
2084 | void *accel_priv) | ||
2084 | { | 2085 | { |
2085 | return smp_processor_id(); | 2086 | return smp_processor_id(); |
2086 | } | 2087 | } |
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index 5360f73c9817..bc8faaec33f5 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c | |||
@@ -299,7 +299,7 @@ netdev_tx_t macvlan_start_xmit(struct sk_buff *skb, | |||
299 | 299 | ||
300 | if (vlan->fwd_priv) { | 300 | if (vlan->fwd_priv) { |
301 | skb->dev = vlan->lowerdev; | 301 | skb->dev = vlan->lowerdev; |
302 | ret = dev_hard_start_xmit(skb, skb->dev, NULL, vlan->fwd_priv); | 302 | ret = dev_queue_xmit_accel(skb, vlan->fwd_priv); |
303 | } else { | 303 | } else { |
304 | ret = macvlan_queue_xmit(skb, dev); | 304 | ret = macvlan_queue_xmit(skb, dev); |
305 | } | 305 | } |
@@ -365,10 +365,8 @@ static int macvlan_open(struct net_device *dev) | |||
365 | */ | 365 | */ |
366 | if (IS_ERR_OR_NULL(vlan->fwd_priv)) { | 366 | if (IS_ERR_OR_NULL(vlan->fwd_priv)) { |
367 | vlan->fwd_priv = NULL; | 367 | vlan->fwd_priv = NULL; |
368 | } else { | 368 | } else |
369 | dev->features &= ~NETIF_F_LLTX; | ||
370 | return 0; | 369 | return 0; |
371 | } | ||
372 | } | 370 | } |
373 | 371 | ||
374 | err = -EBUSY; | 372 | err = -EBUSY; |
@@ -702,8 +700,7 @@ static netdev_features_t macvlan_fix_features(struct net_device *dev, | |||
702 | features = netdev_increment_features(vlan->lowerdev->features, | 700 | features = netdev_increment_features(vlan->lowerdev->features, |
703 | features, | 701 | features, |
704 | mask); | 702 | mask); |
705 | if (!vlan->fwd_priv) | 703 | features |= NETIF_F_LLTX; |
706 | features |= NETIF_F_LLTX; | ||
707 | 704 | ||
708 | return features; | 705 | return features; |
709 | } | 706 | } |
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c index 736050d6b451..b75ae5bde673 100644 --- a/drivers/net/team/team.c +++ b/drivers/net/team/team.c | |||
@@ -1647,7 +1647,8 @@ static netdev_tx_t team_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1647 | return NETDEV_TX_OK; | 1647 | return NETDEV_TX_OK; |
1648 | } | 1648 | } |
1649 | 1649 | ||
1650 | static u16 team_select_queue(struct net_device *dev, struct sk_buff *skb) | 1650 | static u16 team_select_queue(struct net_device *dev, struct sk_buff *skb, |
1651 | void *accel_priv) | ||
1651 | { | 1652 | { |
1652 | /* | 1653 | /* |
1653 | * This helper function exists to help dev_pick_tx get the correct | 1654 | * This helper function exists to help dev_pick_tx get the correct |
diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 7c8343a4f918..ecec8029c5e8 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c | |||
@@ -348,7 +348,8 @@ unlock: | |||
348 | * different rxq no. here. If we could not get rxhash, then we would | 348 | * different rxq no. here. If we could not get rxhash, then we would |
349 | * hope the rxq no. may help here. | 349 | * hope the rxq no. may help here. |
350 | */ | 350 | */ |
351 | static u16 tun_select_queue(struct net_device *dev, struct sk_buff *skb) | 351 | static u16 tun_select_queue(struct net_device *dev, struct sk_buff *skb, |
352 | void *accel_priv) | ||
352 | { | 353 | { |
353 | struct tun_struct *tun = netdev_priv(dev); | 354 | struct tun_struct *tun = netdev_priv(dev); |
354 | struct tun_flow_entry *e; | 355 | struct tun_flow_entry *e; |
diff --git a/drivers/net/wireless/mwifiex/main.c b/drivers/net/wireless/mwifiex/main.c index 78e8a6666cc6..8bb8988c435c 100644 --- a/drivers/net/wireless/mwifiex/main.c +++ b/drivers/net/wireless/mwifiex/main.c | |||
@@ -746,7 +746,8 @@ static struct net_device_stats *mwifiex_get_stats(struct net_device *dev) | |||
746 | } | 746 | } |
747 | 747 | ||
748 | static u16 | 748 | static u16 |
749 | mwifiex_netdev_select_wmm_queue(struct net_device *dev, struct sk_buff *skb) | 749 | mwifiex_netdev_select_wmm_queue(struct net_device *dev, struct sk_buff *skb, |
750 | void *accel_priv) | ||
750 | { | 751 | { |
751 | skb->priority = cfg80211_classify8021d(skb); | 752 | skb->priority = cfg80211_classify8021d(skb); |
752 | return mwifiex_1d_to_wmm_queue[skb->priority]; | 753 | return mwifiex_1d_to_wmm_queue[skb->priority]; |
diff --git a/drivers/staging/bcm/Bcmnet.c b/drivers/staging/bcm/Bcmnet.c index 53fee2f9a498..8dfdd2732bdc 100644 --- a/drivers/staging/bcm/Bcmnet.c +++ b/drivers/staging/bcm/Bcmnet.c | |||
@@ -39,7 +39,8 @@ static INT bcm_close(struct net_device *dev) | |||
39 | return 0; | 39 | return 0; |
40 | } | 40 | } |
41 | 41 | ||
42 | static u16 bcm_select_queue(struct net_device *dev, struct sk_buff *skb) | 42 | static u16 bcm_select_queue(struct net_device *dev, struct sk_buff *skb, |
43 | void *accel_priv) | ||
43 | { | 44 | { |
44 | return ClassifyPacket(netdev_priv(dev), skb); | 45 | return ClassifyPacket(netdev_priv(dev), skb); |
45 | } | 46 | } |
diff --git a/drivers/staging/netlogic/xlr_net.c b/drivers/staging/netlogic/xlr_net.c index 235d2b1ec593..eedffed17e39 100644 --- a/drivers/staging/netlogic/xlr_net.c +++ b/drivers/staging/netlogic/xlr_net.c | |||
@@ -306,7 +306,8 @@ static netdev_tx_t xlr_net_start_xmit(struct sk_buff *skb, | |||
306 | return NETDEV_TX_OK; | 306 | return NETDEV_TX_OK; |
307 | } | 307 | } |
308 | 308 | ||
309 | static u16 xlr_net_select_queue(struct net_device *ndev, struct sk_buff *skb) | 309 | static u16 xlr_net_select_queue(struct net_device *ndev, struct sk_buff *skb, |
310 | void *accel_priv) | ||
310 | { | 311 | { |
311 | return (u16)smp_processor_id(); | 312 | return (u16)smp_processor_id(); |
312 | } | 313 | } |
diff --git a/drivers/staging/rtl8188eu/os_dep/os_intfs.c b/drivers/staging/rtl8188eu/os_dep/os_intfs.c index 17659bb04bef..dd69e344e409 100644 --- a/drivers/staging/rtl8188eu/os_dep/os_intfs.c +++ b/drivers/staging/rtl8188eu/os_dep/os_intfs.c | |||
@@ -652,7 +652,8 @@ static unsigned int rtw_classify8021d(struct sk_buff *skb) | |||
652 | return dscp >> 5; | 652 | return dscp >> 5; |
653 | } | 653 | } |
654 | 654 | ||
655 | static u16 rtw_select_queue(struct net_device *dev, struct sk_buff *skb) | 655 | static u16 rtw_select_queue(struct net_device *dev, struct sk_buff *skb, |
656 | void *accel_priv) | ||
656 | { | 657 | { |
657 | struct adapter *padapter = rtw_netdev_priv(dev); | 658 | struct adapter *padapter = rtw_netdev_priv(dev); |
658 | struct mlme_priv *pmlmepriv = &padapter->mlmepriv; | 659 | struct mlme_priv *pmlmepriv = &padapter->mlmepriv; |
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 5faaadb0c74f..ce2a1f5f9a1e 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h | |||
@@ -769,7 +769,8 @@ struct netdev_phys_port_id { | |||
769 | * (can also return NETDEV_TX_LOCKED iff NETIF_F_LLTX) | 769 | * (can also return NETDEV_TX_LOCKED iff NETIF_F_LLTX) |
770 | * Required can not be NULL. | 770 | * Required can not be NULL. |
771 | * | 771 | * |
772 | * u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb); | 772 | * u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb, |
773 | * void *accel_priv); | ||
773 | * Called to decide which queue to when device supports multiple | 774 | * Called to decide which queue to when device supports multiple |
774 | * transmit queues. | 775 | * transmit queues. |
775 | * | 776 | * |
@@ -990,7 +991,8 @@ struct net_device_ops { | |||
990 | netdev_tx_t (*ndo_start_xmit) (struct sk_buff *skb, | 991 | netdev_tx_t (*ndo_start_xmit) (struct sk_buff *skb, |
991 | struct net_device *dev); | 992 | struct net_device *dev); |
992 | u16 (*ndo_select_queue)(struct net_device *dev, | 993 | u16 (*ndo_select_queue)(struct net_device *dev, |
993 | struct sk_buff *skb); | 994 | struct sk_buff *skb, |
995 | void *accel_priv); | ||
994 | void (*ndo_change_rx_flags)(struct net_device *dev, | 996 | void (*ndo_change_rx_flags)(struct net_device *dev, |
995 | int flags); | 997 | int flags); |
996 | void (*ndo_set_rx_mode)(struct net_device *dev); | 998 | void (*ndo_set_rx_mode)(struct net_device *dev); |
@@ -1529,7 +1531,8 @@ static inline void netdev_for_each_tx_queue(struct net_device *dev, | |||
1529 | } | 1531 | } |
1530 | 1532 | ||
1531 | struct netdev_queue *netdev_pick_tx(struct net_device *dev, | 1533 | struct netdev_queue *netdev_pick_tx(struct net_device *dev, |
1532 | struct sk_buff *skb); | 1534 | struct sk_buff *skb, |
1535 | void *accel_priv); | ||
1533 | u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb); | 1536 | u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb); |
1534 | 1537 | ||
1535 | /* | 1538 | /* |
@@ -1819,6 +1822,7 @@ int dev_close(struct net_device *dev); | |||
1819 | void dev_disable_lro(struct net_device *dev); | 1822 | void dev_disable_lro(struct net_device *dev); |
1820 | int dev_loopback_xmit(struct sk_buff *newskb); | 1823 | int dev_loopback_xmit(struct sk_buff *newskb); |
1821 | int dev_queue_xmit(struct sk_buff *skb); | 1824 | int dev_queue_xmit(struct sk_buff *skb); |
1825 | int dev_queue_xmit_accel(struct sk_buff *skb, void *accel_priv); | ||
1822 | int register_netdevice(struct net_device *dev); | 1826 | int register_netdevice(struct net_device *dev); |
1823 | void unregister_netdevice_queue(struct net_device *dev, struct list_head *head); | 1827 | void unregister_netdevice_queue(struct net_device *dev, struct list_head *head); |
1824 | void unregister_netdevice_many(struct list_head *head); | 1828 | void unregister_netdevice_many(struct list_head *head); |
@@ -2426,7 +2430,7 @@ int dev_change_carrier(struct net_device *, bool new_carrier); | |||
2426 | int dev_get_phys_port_id(struct net_device *dev, | 2430 | int dev_get_phys_port_id(struct net_device *dev, |
2427 | struct netdev_phys_port_id *ppid); | 2431 | struct netdev_phys_port_id *ppid); |
2428 | int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, | 2432 | int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, |
2429 | struct netdev_queue *txq, void *accel_priv); | 2433 | struct netdev_queue *txq); |
2430 | int dev_forward_skb(struct net_device *dev, struct sk_buff *skb); | 2434 | int dev_forward_skb(struct net_device *dev, struct sk_buff *skb); |
2431 | 2435 | ||
2432 | extern int netdev_budget; | 2436 | extern int netdev_budget; |
diff --git a/net/core/dev.c b/net/core/dev.c index 4fc17221545d..0ce469e5ec80 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -2539,7 +2539,7 @@ static inline int skb_needs_linearize(struct sk_buff *skb, | |||
2539 | } | 2539 | } |
2540 | 2540 | ||
2541 | int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, | 2541 | int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, |
2542 | struct netdev_queue *txq, void *accel_priv) | 2542 | struct netdev_queue *txq) |
2543 | { | 2543 | { |
2544 | const struct net_device_ops *ops = dev->netdev_ops; | 2544 | const struct net_device_ops *ops = dev->netdev_ops; |
2545 | int rc = NETDEV_TX_OK; | 2545 | int rc = NETDEV_TX_OK; |
@@ -2605,13 +2605,10 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, | |||
2605 | dev_queue_xmit_nit(skb, dev); | 2605 | dev_queue_xmit_nit(skb, dev); |
2606 | 2606 | ||
2607 | skb_len = skb->len; | 2607 | skb_len = skb->len; |
2608 | if (accel_priv) | ||
2609 | rc = ops->ndo_dfwd_start_xmit(skb, dev, accel_priv); | ||
2610 | else | ||
2611 | rc = ops->ndo_start_xmit(skb, dev); | 2608 | rc = ops->ndo_start_xmit(skb, dev); |
2612 | 2609 | ||
2613 | trace_net_dev_xmit(skb, rc, dev, skb_len); | 2610 | trace_net_dev_xmit(skb, rc, dev, skb_len); |
2614 | if (rc == NETDEV_TX_OK && txq) | 2611 | if (rc == NETDEV_TX_OK) |
2615 | txq_trans_update(txq); | 2612 | txq_trans_update(txq); |
2616 | return rc; | 2613 | return rc; |
2617 | } | 2614 | } |
@@ -2627,10 +2624,7 @@ gso: | |||
2627 | dev_queue_xmit_nit(nskb, dev); | 2624 | dev_queue_xmit_nit(nskb, dev); |
2628 | 2625 | ||
2629 | skb_len = nskb->len; | 2626 | skb_len = nskb->len; |
2630 | if (accel_priv) | 2627 | rc = ops->ndo_start_xmit(nskb, dev); |
2631 | rc = ops->ndo_dfwd_start_xmit(nskb, dev, accel_priv); | ||
2632 | else | ||
2633 | rc = ops->ndo_start_xmit(nskb, dev); | ||
2634 | trace_net_dev_xmit(nskb, rc, dev, skb_len); | 2628 | trace_net_dev_xmit(nskb, rc, dev, skb_len); |
2635 | if (unlikely(rc != NETDEV_TX_OK)) { | 2629 | if (unlikely(rc != NETDEV_TX_OK)) { |
2636 | if (rc & ~NETDEV_TX_MASK) | 2630 | if (rc & ~NETDEV_TX_MASK) |
@@ -2811,7 +2805,7 @@ EXPORT_SYMBOL(dev_loopback_xmit); | |||
2811 | * the BH enable code must have IRQs enabled so that it will not deadlock. | 2805 | * the BH enable code must have IRQs enabled so that it will not deadlock. |
2812 | * --BLG | 2806 | * --BLG |
2813 | */ | 2807 | */ |
2814 | int dev_queue_xmit(struct sk_buff *skb) | 2808 | int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv) |
2815 | { | 2809 | { |
2816 | struct net_device *dev = skb->dev; | 2810 | struct net_device *dev = skb->dev; |
2817 | struct netdev_queue *txq; | 2811 | struct netdev_queue *txq; |
@@ -2827,7 +2821,7 @@ int dev_queue_xmit(struct sk_buff *skb) | |||
2827 | 2821 | ||
2828 | skb_update_prio(skb); | 2822 | skb_update_prio(skb); |
2829 | 2823 | ||
2830 | txq = netdev_pick_tx(dev, skb); | 2824 | txq = netdev_pick_tx(dev, skb, accel_priv); |
2831 | q = rcu_dereference_bh(txq->qdisc); | 2825 | q = rcu_dereference_bh(txq->qdisc); |
2832 | 2826 | ||
2833 | #ifdef CONFIG_NET_CLS_ACT | 2827 | #ifdef CONFIG_NET_CLS_ACT |
@@ -2863,7 +2857,7 @@ int dev_queue_xmit(struct sk_buff *skb) | |||
2863 | 2857 | ||
2864 | if (!netif_xmit_stopped(txq)) { | 2858 | if (!netif_xmit_stopped(txq)) { |
2865 | __this_cpu_inc(xmit_recursion); | 2859 | __this_cpu_inc(xmit_recursion); |
2866 | rc = dev_hard_start_xmit(skb, dev, txq, NULL); | 2860 | rc = dev_hard_start_xmit(skb, dev, txq); |
2867 | __this_cpu_dec(xmit_recursion); | 2861 | __this_cpu_dec(xmit_recursion); |
2868 | if (dev_xmit_complete(rc)) { | 2862 | if (dev_xmit_complete(rc)) { |
2869 | HARD_TX_UNLOCK(dev, txq); | 2863 | HARD_TX_UNLOCK(dev, txq); |
@@ -2892,8 +2886,19 @@ out: | |||
2892 | rcu_read_unlock_bh(); | 2886 | rcu_read_unlock_bh(); |
2893 | return rc; | 2887 | return rc; |
2894 | } | 2888 | } |
2889 | |||
2890 | int dev_queue_xmit(struct sk_buff *skb) | ||
2891 | { | ||
2892 | return __dev_queue_xmit(skb, NULL); | ||
2893 | } | ||
2895 | EXPORT_SYMBOL(dev_queue_xmit); | 2894 | EXPORT_SYMBOL(dev_queue_xmit); |
2896 | 2895 | ||
2896 | int dev_queue_xmit_accel(struct sk_buff *skb, void *accel_priv) | ||
2897 | { | ||
2898 | return __dev_queue_xmit(skb, accel_priv); | ||
2899 | } | ||
2900 | EXPORT_SYMBOL(dev_queue_xmit_accel); | ||
2901 | |||
2897 | 2902 | ||
2898 | /*======================================================================= | 2903 | /*======================================================================= |
2899 | Receiver routines | 2904 | Receiver routines |
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c index d6ef17322500..2fc5beaf5783 100644 --- a/net/core/flow_dissector.c +++ b/net/core/flow_dissector.c | |||
@@ -395,17 +395,21 @@ u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb) | |||
395 | EXPORT_SYMBOL(__netdev_pick_tx); | 395 | EXPORT_SYMBOL(__netdev_pick_tx); |
396 | 396 | ||
397 | struct netdev_queue *netdev_pick_tx(struct net_device *dev, | 397 | struct netdev_queue *netdev_pick_tx(struct net_device *dev, |
398 | struct sk_buff *skb) | 398 | struct sk_buff *skb, |
399 | void *accel_priv) | ||
399 | { | 400 | { |
400 | int queue_index = 0; | 401 | int queue_index = 0; |
401 | 402 | ||
402 | if (dev->real_num_tx_queues != 1) { | 403 | if (dev->real_num_tx_queues != 1) { |
403 | const struct net_device_ops *ops = dev->netdev_ops; | 404 | const struct net_device_ops *ops = dev->netdev_ops; |
404 | if (ops->ndo_select_queue) | 405 | if (ops->ndo_select_queue) |
405 | queue_index = ops->ndo_select_queue(dev, skb); | 406 | queue_index = ops->ndo_select_queue(dev, skb, |
407 | accel_priv); | ||
406 | else | 408 | else |
407 | queue_index = __netdev_pick_tx(dev, skb); | 409 | queue_index = __netdev_pick_tx(dev, skb); |
408 | queue_index = dev_cap_txqueue(dev, queue_index); | 410 | |
411 | if (!accel_priv) | ||
412 | queue_index = dev_cap_txqueue(dev, queue_index); | ||
409 | } | 413 | } |
410 | 414 | ||
411 | skb_set_queue_mapping(skb, queue_index); | 415 | skb_set_queue_mapping(skb, queue_index); |
diff --git a/net/core/netpoll.c b/net/core/netpoll.c index 303097874633..19fe9c717ced 100644 --- a/net/core/netpoll.c +++ b/net/core/netpoll.c | |||
@@ -375,7 +375,7 @@ void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb, | |||
375 | if (skb_queue_len(&npinfo->txq) == 0 && !netpoll_owner_active(dev)) { | 375 | if (skb_queue_len(&npinfo->txq) == 0 && !netpoll_owner_active(dev)) { |
376 | struct netdev_queue *txq; | 376 | struct netdev_queue *txq; |
377 | 377 | ||
378 | txq = netdev_pick_tx(dev, skb); | 378 | txq = netdev_pick_tx(dev, skb, NULL); |
379 | 379 | ||
380 | /* try until next clock tick */ | 380 | /* try until next clock tick */ |
381 | for (tries = jiffies_to_usecs(1)/USEC_PER_POLL; | 381 | for (tries = jiffies_to_usecs(1)/USEC_PER_POLL; |
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c index 36c3a4cbcabf..a0757913046e 100644 --- a/net/mac80211/iface.c +++ b/net/mac80211/iface.c | |||
@@ -1061,7 +1061,8 @@ static void ieee80211_uninit(struct net_device *dev) | |||
1061 | } | 1061 | } |
1062 | 1062 | ||
1063 | static u16 ieee80211_netdev_select_queue(struct net_device *dev, | 1063 | static u16 ieee80211_netdev_select_queue(struct net_device *dev, |
1064 | struct sk_buff *skb) | 1064 | struct sk_buff *skb, |
1065 | void *accel_priv) | ||
1065 | { | 1066 | { |
1066 | return ieee80211_select_queue(IEEE80211_DEV_TO_SUB_IF(dev), skb); | 1067 | return ieee80211_select_queue(IEEE80211_DEV_TO_SUB_IF(dev), skb); |
1067 | } | 1068 | } |
@@ -1078,7 +1079,8 @@ static const struct net_device_ops ieee80211_dataif_ops = { | |||
1078 | }; | 1079 | }; |
1079 | 1080 | ||
1080 | static u16 ieee80211_monitor_select_queue(struct net_device *dev, | 1081 | static u16 ieee80211_monitor_select_queue(struct net_device *dev, |
1081 | struct sk_buff *skb) | 1082 | struct sk_buff *skb, |
1083 | void *accel_priv) | ||
1082 | { | 1084 | { |
1083 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); | 1085 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); |
1084 | struct ieee80211_local *local = sdata->local; | 1086 | struct ieee80211_local *local = sdata->local; |
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index 922a09406ba7..7fc899a943a8 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c | |||
@@ -126,7 +126,7 @@ int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q, | |||
126 | 126 | ||
127 | HARD_TX_LOCK(dev, txq, smp_processor_id()); | 127 | HARD_TX_LOCK(dev, txq, smp_processor_id()); |
128 | if (!netif_xmit_frozen_or_stopped(txq)) | 128 | if (!netif_xmit_frozen_or_stopped(txq)) |
129 | ret = dev_hard_start_xmit(skb, dev, txq, NULL); | 129 | ret = dev_hard_start_xmit(skb, dev, txq); |
130 | 130 | ||
131 | HARD_TX_UNLOCK(dev, txq); | 131 | HARD_TX_UNLOCK(dev, txq); |
132 | 132 | ||