diff options
author | Amir Vadai <amirv@mellanox.com> | 2012-05-16 20:58:10 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2012-05-17 16:17:50 -0400 |
commit | bc6a4744b827c5a78ca591acca81809bddb8b2db (patch) | |
tree | 49328e3c1d680c5532605b2f701c01061958ebd3 /drivers/net | |
parent | cad456d5abbb6307be7a658d701bc44ca689e906 (diff) |
net/mlx4_en: num cores tx rings for every UP
Change the TX ring scheme such that the number of rings for untagged packets
and for tagged packets (per each of the vlan priorities) is the same, unlike
the current situation where for tagged traffic there's one ring per priority
and for untagged rings as the number of core.
Queue selection is done as follows:
If the mqprio qdisc is operates on the interface, such that the core networking
code invoked the device setup_tc ndo callback, a mapping of skb->priority =>
queue set is forced - for both, tagged and untagged traffic.
Else, the egress map skb->priority => User priority is used for tagged traffic, and
all untagged traffic is sent through tx rings of UP 0.
The patch follows the convergence of discussing that issue with John Fastabend
over this thread http://comments.gmane.org/gmane.linux.network/229877
Cc: John Fastabend <john.r.fastabend@intel.com>
Cc: Liran Liss <liranl@mellanox.com>
Signed-off-by: Amir Vadai <amirv@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net')
-rw-r--r-- | drivers/net/ethernet/mellanox/mlx4/en_main.c | 6 | ||||
-rw-r--r-- | drivers/net/ethernet/mellanox/mlx4/en_netdev.c | 41 | ||||
-rw-r--r-- | drivers/net/ethernet/mellanox/mlx4/en_tx.c | 15 | ||||
-rw-r--r-- | drivers/net/ethernet/mellanox/mlx4/mlx4_en.h | 9 |
4 files changed, 47 insertions, 24 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_main.c b/drivers/net/ethernet/mellanox/mlx4/en_main.c index 346fdb2e92a6..988b2424e1c6 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_main.c | |||
@@ -101,6 +101,8 @@ static int mlx4_en_get_profile(struct mlx4_en_dev *mdev) | |||
101 | int i; | 101 | int i; |
102 | 102 | ||
103 | params->udp_rss = udp_rss; | 103 | params->udp_rss = udp_rss; |
104 | params->num_tx_rings_p_up = min_t(int, num_online_cpus(), | ||
105 | MLX4_EN_MAX_TX_RING_P_UP); | ||
104 | if (params->udp_rss && !(mdev->dev->caps.flags | 106 | if (params->udp_rss && !(mdev->dev->caps.flags |
105 | & MLX4_DEV_CAP_FLAG_UDP_RSS)) { | 107 | & MLX4_DEV_CAP_FLAG_UDP_RSS)) { |
106 | mlx4_warn(mdev, "UDP RSS is not supported on this device.\n"); | 108 | mlx4_warn(mdev, "UDP RSS is not supported on this device.\n"); |
@@ -113,8 +115,8 @@ static int mlx4_en_get_profile(struct mlx4_en_dev *mdev) | |||
113 | params->prof[i].tx_ppp = pfctx; | 115 | params->prof[i].tx_ppp = pfctx; |
114 | params->prof[i].tx_ring_size = MLX4_EN_DEF_TX_RING_SIZE; | 116 | params->prof[i].tx_ring_size = MLX4_EN_DEF_TX_RING_SIZE; |
115 | params->prof[i].rx_ring_size = MLX4_EN_DEF_RX_RING_SIZE; | 117 | params->prof[i].rx_ring_size = MLX4_EN_DEF_RX_RING_SIZE; |
116 | params->prof[i].tx_ring_num = MLX4_EN_NUM_TX_RINGS + | 118 | params->prof[i].tx_ring_num = params->num_tx_rings_p_up * |
117 | MLX4_EN_NUM_PPP_RINGS; | 119 | MLX4_EN_NUM_UP; |
118 | params->prof[i].rss_rings = 0; | 120 | params->prof[i].rss_rings = 0; |
119 | } | 121 | } |
120 | 122 | ||
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index eaa8fadf19c0..926d8aac941c 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c | |||
@@ -47,9 +47,22 @@ | |||
47 | 47 | ||
48 | static int mlx4_en_setup_tc(struct net_device *dev, u8 up) | 48 | static int mlx4_en_setup_tc(struct net_device *dev, u8 up) |
49 | { | 49 | { |
50 | if (up != MLX4_EN_NUM_UP) | 50 | struct mlx4_en_priv *priv = netdev_priv(dev); |
51 | int i; | ||
52 | unsigned int q, offset = 0; | ||
53 | |||
54 | if (up && up != MLX4_EN_NUM_UP) | ||
51 | return -EINVAL; | 55 | return -EINVAL; |
52 | 56 | ||
57 | netdev_set_num_tc(dev, up); | ||
58 | |||
59 | /* Partition Tx queues evenly amongst UP's */ | ||
60 | q = priv->tx_ring_num / up; | ||
61 | for (i = 0; i < up; i++) { | ||
62 | netdev_set_tc_queue(dev, i, q, offset); | ||
63 | offset += q; | ||
64 | } | ||
65 | |||
53 | return 0; | 66 | return 0; |
54 | } | 67 | } |
55 | 68 | ||
@@ -661,7 +674,7 @@ int mlx4_en_start_port(struct net_device *dev) | |||
661 | /* Configure ring */ | 674 | /* Configure ring */ |
662 | tx_ring = &priv->tx_ring[i]; | 675 | tx_ring = &priv->tx_ring[i]; |
663 | err = mlx4_en_activate_tx_ring(priv, tx_ring, cq->mcq.cqn, | 676 | err = mlx4_en_activate_tx_ring(priv, tx_ring, cq->mcq.cqn, |
664 | max(0, i - MLX4_EN_NUM_TX_RINGS)); | 677 | i / priv->mdev->profile.num_tx_rings_p_up); |
665 | if (err) { | 678 | if (err) { |
666 | en_err(priv, "Failed allocating Tx ring\n"); | 679 | en_err(priv, "Failed allocating Tx ring\n"); |
667 | mlx4_en_deactivate_cq(priv, cq); | 680 | mlx4_en_deactivate_cq(priv, cq); |
@@ -986,6 +999,9 @@ void mlx4_en_destroy_netdev(struct net_device *dev) | |||
986 | 999 | ||
987 | mlx4_en_free_resources(priv); | 1000 | mlx4_en_free_resources(priv); |
988 | 1001 | ||
1002 | kfree(priv->tx_ring); | ||
1003 | kfree(priv->tx_cq); | ||
1004 | |||
989 | free_netdev(dev); | 1005 | free_netdev(dev); |
990 | } | 1006 | } |
991 | 1007 | ||
@@ -1091,6 +1107,18 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, | |||
1091 | priv->ctrl_flags = cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE | | 1107 | priv->ctrl_flags = cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE | |
1092 | MLX4_WQE_CTRL_SOLICITED); | 1108 | MLX4_WQE_CTRL_SOLICITED); |
1093 | priv->tx_ring_num = prof->tx_ring_num; | 1109 | priv->tx_ring_num = prof->tx_ring_num; |
1110 | priv->tx_ring = kzalloc(sizeof(struct mlx4_en_tx_ring) * | ||
1111 | priv->tx_ring_num, GFP_KERNEL); | ||
1112 | if (!priv->tx_ring) { | ||
1113 | err = -ENOMEM; | ||
1114 | goto out; | ||
1115 | } | ||
1116 | priv->tx_cq = kzalloc(sizeof(struct mlx4_en_cq) * priv->tx_ring_num, | ||
1117 | GFP_KERNEL); | ||
1118 | if (!priv->tx_cq) { | ||
1119 | err = -ENOMEM; | ||
1120 | goto out; | ||
1121 | } | ||
1094 | priv->rx_ring_num = prof->rx_ring_num; | 1122 | priv->rx_ring_num = prof->rx_ring_num; |
1095 | priv->mac_index = -1; | 1123 | priv->mac_index = -1; |
1096 | priv->msg_enable = MLX4_EN_MSG_LEVEL; | 1124 | priv->msg_enable = MLX4_EN_MSG_LEVEL; |
@@ -1138,15 +1166,6 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, | |||
1138 | netif_set_real_num_tx_queues(dev, priv->tx_ring_num); | 1166 | netif_set_real_num_tx_queues(dev, priv->tx_ring_num); |
1139 | netif_set_real_num_rx_queues(dev, priv->rx_ring_num); | 1167 | netif_set_real_num_rx_queues(dev, priv->rx_ring_num); |
1140 | 1168 | ||
1141 | netdev_set_num_tc(dev, MLX4_EN_NUM_UP); | ||
1142 | |||
1143 | /* First 9 rings are for UP 0 */ | ||
1144 | netdev_set_tc_queue(dev, 0, MLX4_EN_NUM_TX_RINGS + 1, 0); | ||
1145 | |||
1146 | /* Partition Tx queues evenly amongst UP's 1-7 */ | ||
1147 | for (i = 1; i < MLX4_EN_NUM_UP; i++) | ||
1148 | netdev_set_tc_queue(dev, i, 1, MLX4_EN_NUM_TX_RINGS + i); | ||
1149 | |||
1150 | SET_ETHTOOL_OPS(dev, &mlx4_en_ethtool_ops); | 1169 | SET_ETHTOOL_OPS(dev, &mlx4_en_ethtool_ops); |
1151 | 1170 | ||
1152 | /* Set defualt MAC */ | 1171 | /* Set defualt MAC */ |
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c index 9a38483feb92..019d856b1334 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c | |||
@@ -525,14 +525,17 @@ static void build_inline_wqe(struct mlx4_en_tx_desc *tx_desc, struct sk_buff *sk | |||
525 | 525 | ||
526 | u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb) | 526 | u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb) |
527 | { | 527 | { |
528 | u16 vlan_tag = 0; | 528 | struct mlx4_en_priv *priv = netdev_priv(dev); |
529 | u16 rings_p_up = priv->mdev->profile.num_tx_rings_p_up; | ||
530 | u8 up = 0; | ||
529 | 531 | ||
530 | if (vlan_tx_tag_present(skb)) { | 532 | if (dev->num_tc) |
531 | vlan_tag = vlan_tx_tag_get(skb); | 533 | return skb_tx_hash(dev, skb); |
532 | return MLX4_EN_NUM_TX_RINGS + (vlan_tag >> 13); | 534 | |
533 | } | 535 | if (vlan_tx_tag_present(skb)) |
536 | up = vlan_tx_tag_get(skb) >> VLAN_PRIO_SHIFT; | ||
534 | 537 | ||
535 | return skb_tx_hash(dev, skb); | 538 | return __skb_tx_hash(dev, skb, rings_p_up) + up * rings_p_up; |
536 | } | 539 | } |
537 | 540 | ||
538 | static void mlx4_bf_copy(void __iomem *dst, unsigned long *src, unsigned bytecnt) | 541 | static void mlx4_bf_copy(void __iomem *dst, unsigned long *src, unsigned bytecnt) |
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h index 5d876375a132..6ae350921b1a 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h | |||
@@ -111,9 +111,7 @@ enum { | |||
111 | #define MLX4_EN_MIN_TX_SIZE (4096 / TXBB_SIZE) | 111 | #define MLX4_EN_MIN_TX_SIZE (4096 / TXBB_SIZE) |
112 | 112 | ||
113 | #define MLX4_EN_SMALL_PKT_SIZE 64 | 113 | #define MLX4_EN_SMALL_PKT_SIZE 64 |
114 | #define MLX4_EN_NUM_TX_RINGS 8 | 114 | #define MLX4_EN_MAX_TX_RING_P_UP 32 |
115 | #define MLX4_EN_NUM_PPP_RINGS 8 | ||
116 | #define MAX_TX_RINGS (MLX4_EN_NUM_TX_RINGS + MLX4_EN_NUM_PPP_RINGS) | ||
117 | #define MLX4_EN_NUM_UP 8 | 115 | #define MLX4_EN_NUM_UP 8 |
118 | #define MLX4_EN_DEF_TX_RING_SIZE 512 | 116 | #define MLX4_EN_DEF_TX_RING_SIZE 512 |
119 | #define MLX4_EN_DEF_RX_RING_SIZE 1024 | 117 | #define MLX4_EN_DEF_RX_RING_SIZE 1024 |
@@ -339,6 +337,7 @@ struct mlx4_en_profile { | |||
339 | u32 active_ports; | 337 | u32 active_ports; |
340 | u32 small_pkt_int; | 338 | u32 small_pkt_int; |
341 | u8 no_reset; | 339 | u8 no_reset; |
340 | u8 num_tx_rings_p_up; | ||
342 | struct mlx4_en_port_profile prof[MLX4_MAX_PORTS + 1]; | 341 | struct mlx4_en_port_profile prof[MLX4_MAX_PORTS + 1]; |
343 | }; | 342 | }; |
344 | 343 | ||
@@ -477,9 +476,9 @@ struct mlx4_en_priv { | |||
477 | u16 num_frags; | 476 | u16 num_frags; |
478 | u16 log_rx_info; | 477 | u16 log_rx_info; |
479 | 478 | ||
480 | struct mlx4_en_tx_ring tx_ring[MAX_TX_RINGS]; | 479 | struct mlx4_en_tx_ring *tx_ring; |
481 | struct mlx4_en_rx_ring rx_ring[MAX_RX_RINGS]; | 480 | struct mlx4_en_rx_ring rx_ring[MAX_RX_RINGS]; |
482 | struct mlx4_en_cq tx_cq[MAX_TX_RINGS]; | 481 | struct mlx4_en_cq *tx_cq; |
483 | struct mlx4_en_cq rx_cq[MAX_RX_RINGS]; | 482 | struct mlx4_en_cq rx_cq[MAX_RX_RINGS]; |
484 | struct work_struct mcast_task; | 483 | struct work_struct mcast_task; |
485 | struct work_struct mac_task; | 484 | struct work_struct mac_task; |