summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAlexander Duyck <alexander.h.duyck@intel.com>2018-07-09 12:20:04 -0400
committerJeff Kirsher <jeffrey.t.kirsher@intel.com>2018-07-09 16:57:25 -0400
commit8ec56fc3c5ee6f9700adac190e9ce5b8859a58b6 (patch)
tree717c1c9bc43906f1f9e306ef1e0c298ea23536f7
parent4f49dec9075aa0277b8c9c657ec31e6361f88724 (diff)
net: allow fallback function to pass netdev
For most of these calls we can just pass NULL through to the fallback function as the sb_dev. The only cases where we cannot are the cases where we might be dealing with either an upper device or a driver that would have configured things to support an sb_dev itself. The only driver that has any significant change in this patch set should be ixgbe as we can drop the redundant functionality that existed in both the ndo_select_queue function and the fallback function that was passed through to us. Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com> Tested-by: Andrew Bowers <andrewx.bowers@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.c2
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_enet.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c2
-rw-r--r--drivers/net/hyperv/netvsc_drv.c2
-rw-r--r--drivers/net/net_failover.c2
-rw-r--r--drivers/net/xen-netback/interface.c2
-rw-r--r--include/linux/netdevice.h3
-rw-r--r--net/core/dev.c12
-rw-r--r--net/packet/af_packet.c7
14 files changed, 24 insertions, 27 deletions
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index e3befb1f9204..c673ac2df65b 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -2224,7 +2224,7 @@ static u16 ena_select_queue(struct net_device *dev, struct sk_buff *skb,
2224 if (skb_rx_queue_recorded(skb)) 2224 if (skb_rx_queue_recorded(skb))
2225 qid = skb_get_rx_queue(skb); 2225 qid = skb_get_rx_queue(skb);
2226 else 2226 else
2227 qid = fallback(dev, skb); 2227 qid = fallback(dev, skb, NULL);
2228 2228
2229 return qid; 2229 return qid;
2230} 2230}
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index 32f548e6431d..eb890c4b3b2d 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -2116,7 +2116,7 @@ static u16 bcm_sysport_select_queue(struct net_device *dev, struct sk_buff *skb,
2116 unsigned int q, port; 2116 unsigned int q, port;
2117 2117
2118 if (!netdev_uses_dsa(dev)) 2118 if (!netdev_uses_dsa(dev))
2119 return fallback(dev, skb); 2119 return fallback(dev, skb, NULL);
2120 2120
2121 /* DSA tagging layer will have configured the correct queue */ 2121 /* DSA tagging layer will have configured the correct queue */
2122 q = BRCM_TAG_GET_QUEUE(queue); 2122 q = BRCM_TAG_GET_QUEUE(queue);
@@ -2124,7 +2124,7 @@ static u16 bcm_sysport_select_queue(struct net_device *dev, struct sk_buff *skb,
2124 tx_ring = priv->ring_map[q + port * priv->per_port_num_tx_queues]; 2124 tx_ring = priv->ring_map[q + port * priv->per_port_num_tx_queues];
2125 2125
2126 if (unlikely(!tx_ring)) 2126 if (unlikely(!tx_ring))
2127 return fallback(dev, skb); 2127 return fallback(dev, skb, NULL);
2128 2128
2129 return tx_ring->index; 2129 return tx_ring->index;
2130} 2130}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index e4e1cf907ac6..5a727d4729da 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -1933,7 +1933,8 @@ u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb,
1933 } 1933 }
1934 1934
1935 /* select a non-FCoE queue */ 1935 /* select a non-FCoE queue */
1936 return fallback(dev, skb) % (BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos); 1936 return fallback(dev, skb, NULL) %
1937 (BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos);
1937} 1938}
1938 1939
1939void bnx2x_set_num_queues(struct bnx2x *bp) 1940void bnx2x_set_num_queues(struct bnx2x *bp)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 5dc5e5604f05..40cf8dc9f163 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -973,7 +973,7 @@ static u16 cxgb_select_queue(struct net_device *dev, struct sk_buff *skb,
973 return txq; 973 return txq;
974 } 974 }
975 975
976 return fallback(dev, skb) % dev->real_num_tx_queues; 976 return fallback(dev, skb, NULL) % dev->real_num_tx_queues;
977} 977}
978 978
979static int closest_timer(const struct sge *s, int time) 979static int closest_timer(const struct sge *s, int time)
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
index ff7a74ec8f11..948b3e0d18f4 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
@@ -2033,7 +2033,7 @@ hns_nic_select_queue(struct net_device *ndev, struct sk_buff *skb,
2033 is_multicast_ether_addr(eth_hdr->h_dest)) 2033 is_multicast_ether_addr(eth_hdr->h_dest))
2034 return 0; 2034 return 0;
2035 else 2035 else
2036 return fallback(ndev, skb); 2036 return fallback(ndev, skb, NULL);
2037} 2037}
2038 2038
2039static const struct net_device_ops hns_nic_netdev_ops = { 2039static const struct net_device_ops hns_nic_netdev_ops = {
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 8c7a68c57afa..bd6d9ea27b4b 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -8237,11 +8237,11 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb,
8237 case htons(ETH_P_FIP): 8237 case htons(ETH_P_FIP):
8238 adapter = netdev_priv(dev); 8238 adapter = netdev_priv(dev);
8239 8239
8240 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) 8240 if (!sb_dev && (adapter->flags & IXGBE_FLAG_FCOE_ENABLED))
8241 break; 8241 break;
8242 /* fall through */ 8242 /* fall through */
8243 default: 8243 default:
8244 return fallback(dev, skb); 8244 return fallback(dev, skb, sb_dev);
8245 } 8245 }
8246 8246
8247 f = &adapter->ring_feature[RING_F_FCOE]; 8247 f = &adapter->ring_feature[RING_F_FCOE];
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index df2996618cd1..1857ee0f0871 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -695,9 +695,9 @@ u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb,
695 u16 rings_p_up = priv->num_tx_rings_p_up; 695 u16 rings_p_up = priv->num_tx_rings_p_up;
696 696
697 if (netdev_get_num_tc(dev)) 697 if (netdev_get_num_tc(dev))
698 return fallback(dev, skb); 698 return fallback(dev, skb, NULL);
699 699
700 return fallback(dev, skb) % rings_p_up; 700 return fallback(dev, skb, NULL) % rings_p_up;
701} 701}
702 702
703static void mlx4_bf_copy(void __iomem *dst, const void *src, 703static void mlx4_bf_copy(void __iomem *dst, const void *src,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index dfcc3710b65f..9106ea45e3cb 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -115,7 +115,7 @@ u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
115 select_queue_fallback_t fallback) 115 select_queue_fallback_t fallback)
116{ 116{
117 struct mlx5e_priv *priv = netdev_priv(dev); 117 struct mlx5e_priv *priv = netdev_priv(dev);
118 int channel_ix = fallback(dev, skb); 118 int channel_ix = fallback(dev, skb, NULL);
119 u16 num_channels; 119 u16 num_channels;
120 int up = 0; 120 int up = 0;
121 121
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 98c0107d6ca1..cf4f40a04194 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -345,7 +345,7 @@ static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb,
345 txq = vf_ops->ndo_select_queue(vf_netdev, skb, 345 txq = vf_ops->ndo_select_queue(vf_netdev, skb,
346 sb_dev, fallback); 346 sb_dev, fallback);
347 else 347 else
348 txq = fallback(vf_netdev, skb); 348 txq = fallback(vf_netdev, skb, NULL);
349 349
350 /* Record the queue selected by VF so that it can be 350 /* Record the queue selected by VF so that it can be
351 * used for common case where VF has more queues than 351 * used for common case where VF has more queues than
diff --git a/drivers/net/net_failover.c b/drivers/net/net_failover.c
index 78b549698b7b..d00d42c845b7 100644
--- a/drivers/net/net_failover.c
+++ b/drivers/net/net_failover.c
@@ -131,7 +131,7 @@ static u16 net_failover_select_queue(struct net_device *dev,
131 txq = ops->ndo_select_queue(primary_dev, skb, 131 txq = ops->ndo_select_queue(primary_dev, skb,
132 sb_dev, fallback); 132 sb_dev, fallback);
133 else 133 else
134 txq = fallback(primary_dev, skb); 134 txq = fallback(primary_dev, skb, NULL);
135 135
136 qdisc_skb_cb(skb)->slave_dev_queue_mapping = skb->queue_mapping; 136 qdisc_skb_cb(skb)->slave_dev_queue_mapping = skb->queue_mapping;
137 137
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index 19c4c585f472..92274c237200 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -155,7 +155,7 @@ static u16 xenvif_select_queue(struct net_device *dev, struct sk_buff *skb,
155 unsigned int size = vif->hash.size; 155 unsigned int size = vif->hash.size;
156 156
157 if (vif->hash.alg == XEN_NETIF_CTRL_HASH_ALGORITHM_NONE) 157 if (vif->hash.alg == XEN_NETIF_CTRL_HASH_ALGORITHM_NONE)
158 return fallback(dev, skb) % dev->real_num_tx_queues; 158 return fallback(dev, skb, NULL) % dev->real_num_tx_queues;
159 159
160 xenvif_set_skb_hash(vif, skb); 160 xenvif_set_skb_hash(vif, skb);
161 161
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index bbf062c1ca8a..2daf2fa6554f 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -793,7 +793,8 @@ static inline bool netdev_phys_item_id_same(struct netdev_phys_item_id *a,
793} 793}
794 794
795typedef u16 (*select_queue_fallback_t)(struct net_device *dev, 795typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
796 struct sk_buff *skb); 796 struct sk_buff *skb,
797 struct net_device *sb_dev);
797 798
798enum tc_setup_type { 799enum tc_setup_type {
799 TC_SETUP_QDISC_MQPRIO, 800 TC_SETUP_QDISC_MQPRIO,
diff --git a/net/core/dev.c b/net/core/dev.c
index a051ce27198b..e18d81837a6c 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -3633,8 +3633,8 @@ u16 dev_pick_tx_cpu_id(struct net_device *dev, struct sk_buff *skb,
3633} 3633}
3634EXPORT_SYMBOL(dev_pick_tx_cpu_id); 3634EXPORT_SYMBOL(dev_pick_tx_cpu_id);
3635 3635
3636static u16 ___netdev_pick_tx(struct net_device *dev, struct sk_buff *skb, 3636static u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb,
3637 struct net_device *sb_dev) 3637 struct net_device *sb_dev)
3638{ 3638{
3639 struct sock *sk = skb->sk; 3639 struct sock *sk = skb->sk;
3640 int queue_index = sk_tx_queue_get(sk); 3640 int queue_index = sk_tx_queue_get(sk);
@@ -3659,12 +3659,6 @@ static u16 ___netdev_pick_tx(struct net_device *dev, struct sk_buff *skb,
3659 return queue_index; 3659 return queue_index;
3660} 3660}
3661 3661
3662static u16 __netdev_pick_tx(struct net_device *dev,
3663 struct sk_buff *skb)
3664{
3665 return ___netdev_pick_tx(dev, skb, NULL);
3666}
3667
3668struct netdev_queue *netdev_pick_tx(struct net_device *dev, 3662struct netdev_queue *netdev_pick_tx(struct net_device *dev,
3669 struct sk_buff *skb, 3663 struct sk_buff *skb,
3670 struct net_device *sb_dev) 3664 struct net_device *sb_dev)
@@ -3685,7 +3679,7 @@ struct netdev_queue *netdev_pick_tx(struct net_device *dev,
3685 queue_index = ops->ndo_select_queue(dev, skb, sb_dev, 3679 queue_index = ops->ndo_select_queue(dev, skb, sb_dev,
3686 __netdev_pick_tx); 3680 __netdev_pick_tx);
3687 else 3681 else
3688 queue_index = ___netdev_pick_tx(dev, skb, sb_dev); 3682 queue_index = __netdev_pick_tx(dev, skb, sb_dev);
3689 3683
3690 queue_index = netdev_cap_txqueue(dev, queue_index); 3684 queue_index = netdev_cap_txqueue(dev, queue_index);
3691 } 3685 }
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index f37d087ae652..00189a3b07f2 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -275,9 +275,10 @@ static bool packet_use_direct_xmit(const struct packet_sock *po)
275 return po->xmit == packet_direct_xmit; 275 return po->xmit == packet_direct_xmit;
276} 276}
277 277
278static u16 __packet_pick_tx_queue(struct net_device *dev, struct sk_buff *skb) 278static u16 __packet_pick_tx_queue(struct net_device *dev, struct sk_buff *skb,
279 struct net_device *sb_dev)
279{ 280{
280 return dev_pick_tx_cpu_id(dev, skb, NULL, NULL); 281 return dev_pick_tx_cpu_id(dev, skb, sb_dev, NULL);
281} 282}
282 283
283static u16 packet_pick_tx_queue(struct sk_buff *skb) 284static u16 packet_pick_tx_queue(struct sk_buff *skb)
@@ -291,7 +292,7 @@ static u16 packet_pick_tx_queue(struct sk_buff *skb)
291 __packet_pick_tx_queue); 292 __packet_pick_tx_queue);
292 queue_index = netdev_cap_txqueue(dev, queue_index); 293 queue_index = netdev_cap_txqueue(dev, queue_index);
293 } else { 294 } else {
294 queue_index = __packet_pick_tx_queue(dev, skb); 295 queue_index = __packet_pick_tx_queue(dev, skb, NULL);
295 } 296 }
296 297
297 return queue_index; 298 return queue_index;