diff options
author | Yevgeny Petrilin <yevgenyp@mellanox.co.il> | 2009-06-01 19:24:07 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2009-06-02 05:29:06 -0400 |
commit | f813cad836ab14b764cfe76f42a3b50bb9677b30 (patch) | |
tree | ccaafb7bcaa72e737fb8939e0c556f2466bb0d7d /drivers/net/mlx4/en_tx.c | |
parent | 3db36fb2c88d68ee28d20845d5bb805ea9a7f6d8 (diff) |
mlx4_en: multiqueue support
By default the driver opens 8 TX queues (defined by MLX4_EN_NUM_TX_RINGS).
If the driver is configured to support Per Priority Flow Control, we open
8 additional TX rings.
dev->real_num_tx_queues is always set to be MLX4_EN_NUM_TX_RINGS.
The mlx4_en_select_queue() function uses standard hashing (skb_tx_hash)
in case that PPFC is not supported or the skb contain a vlan tag,
otherwise the queue is selected according to vlan priority.
Signed-off-by: Yevgeny Petrilin <yevgenyp@mellanox.co.il>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/mlx4/en_tx.c')
-rw-r--r-- | drivers/net/mlx4/en_tx.c | 74 |
1 files changed, 18 insertions, 56 deletions
diff --git a/drivers/net/mlx4/en_tx.c b/drivers/net/mlx4/en_tx.c index 95703f90c1b9..3719d1ac3950 100644 --- a/drivers/net/mlx4/en_tx.c +++ b/drivers/net/mlx4/en_tx.c | |||
@@ -297,34 +297,6 @@ int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring) | |||
297 | return cnt; | 297 | return cnt; |
298 | } | 298 | } |
299 | 299 | ||
300 | void mlx4_en_set_prio_map(struct mlx4_en_priv *priv, u16 *prio_map, u32 ring_num) | ||
301 | { | ||
302 | int block = 8 / ring_num; | ||
303 | int extra = 8 - (block * ring_num); | ||
304 | int num = 0; | ||
305 | u16 ring = 1; | ||
306 | int prio; | ||
307 | |||
308 | if (ring_num == 1) { | ||
309 | for (prio = 0; prio < 8; prio++) | ||
310 | prio_map[prio] = 0; | ||
311 | return; | ||
312 | } | ||
313 | |||
314 | for (prio = 0; prio < 8; prio++) { | ||
315 | if (extra && (num == block + 1)) { | ||
316 | ring++; | ||
317 | num = 0; | ||
318 | extra--; | ||
319 | } else if (!extra && (num == block)) { | ||
320 | ring++; | ||
321 | num = 0; | ||
322 | } | ||
323 | prio_map[prio] = ring; | ||
324 | en_dbg(DRV, priv, " prio:%d --> ring:%d\n", prio, ring); | ||
325 | num++; | ||
326 | } | ||
327 | } | ||
328 | 300 | ||
329 | static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq) | 301 | static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq) |
330 | { | 302 | { |
@@ -386,18 +358,8 @@ static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq) | |||
386 | if (unlikely(ring->blocked)) { | 358 | if (unlikely(ring->blocked)) { |
387 | if ((u32) (ring->prod - ring->cons) <= | 359 | if ((u32) (ring->prod - ring->cons) <= |
388 | ring->size - HEADROOM - MAX_DESC_TXBBS) { | 360 | ring->size - HEADROOM - MAX_DESC_TXBBS) { |
389 | |||
390 | /* TODO: support multiqueue netdevs. Currently, we block | ||
391 | * when *any* ring is full. Note that: | ||
392 | * - 2 Tx rings can unblock at the same time and call | ||
393 | * netif_wake_queue(), which is OK since this | ||
394 | * operation is idempotent. | ||
395 | * - We might wake the queue just after another ring | ||
396 | * stopped it. This is no big deal because the next | ||
397 | * transmission on that ring would stop the queue. | ||
398 | */ | ||
399 | ring->blocked = 0; | 361 | ring->blocked = 0; |
400 | netif_wake_queue(dev); | 362 | netif_tx_wake_queue(netdev_get_tx_queue(dev, cq->ring)); |
401 | priv->port_stats.wake_queue++; | 363 | priv->port_stats.wake_queue++; |
402 | } | 364 | } |
403 | } | 365 | } |
@@ -616,21 +578,20 @@ static void build_inline_wqe(struct mlx4_en_tx_desc *tx_desc, struct sk_buff *sk | |||
616 | tx_desc->ctrl.fence_size = (real_size / 16) & 0x3f; | 578 | tx_desc->ctrl.fence_size = (real_size / 16) & 0x3f; |
617 | } | 579 | } |
618 | 580 | ||
619 | static int get_vlan_info(struct mlx4_en_priv *priv, struct sk_buff *skb, | 581 | u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb) |
620 | u16 *vlan_tag) | ||
621 | { | 582 | { |
622 | int tx_ind; | 583 | struct mlx4_en_priv *priv = netdev_priv(dev); |
584 | u16 vlan_tag = 0; | ||
623 | 585 | ||
624 | /* Obtain VLAN information if present */ | 586 | /* If we support per priority flow control and the packet contains |
625 | if (priv->vlgrp && vlan_tx_tag_present(skb)) { | 587 | * a vlan tag, send the packet to the TX ring assigned to that priority |
626 | *vlan_tag = vlan_tx_tag_get(skb); | 588 | */ |
627 | /* Set the Tx ring to use according to vlan priority */ | 589 | if (priv->prof->rx_ppp && priv->vlgrp && vlan_tx_tag_present(skb)) { |
628 | tx_ind = priv->tx_prio_map[*vlan_tag >> 13]; | 590 | vlan_tag = vlan_tx_tag_get(skb); |
629 | } else { | 591 | return MLX4_EN_NUM_TX_RINGS + (vlan_tag >> 13); |
630 | *vlan_tag = 0; | ||
631 | tx_ind = 0; | ||
632 | } | 592 | } |
633 | return tx_ind; | 593 | |
594 | return skb_tx_hash(dev, skb); | ||
634 | } | 595 | } |
635 | 596 | ||
636 | int mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) | 597 | int mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) |
@@ -650,7 +611,7 @@ int mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) | |||
650 | dma_addr_t dma; | 611 | dma_addr_t dma; |
651 | u32 index; | 612 | u32 index; |
652 | __be32 op_own; | 613 | __be32 op_own; |
653 | u16 vlan_tag; | 614 | u16 vlan_tag = 0; |
654 | int i; | 615 | int i; |
655 | int lso_header_size; | 616 | int lso_header_size; |
656 | void *fragptr; | 617 | void *fragptr; |
@@ -673,15 +634,16 @@ int mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) | |||
673 | return NETDEV_TX_OK; | 634 | return NETDEV_TX_OK; |
674 | } | 635 | } |
675 | 636 | ||
676 | tx_ind = get_vlan_info(priv, skb, &vlan_tag); | 637 | tx_ind = skb->queue_mapping; |
677 | ring = &priv->tx_ring[tx_ind]; | 638 | ring = &priv->tx_ring[tx_ind]; |
639 | if (priv->vlgrp && vlan_tx_tag_present(skb)) | ||
640 | vlan_tag = vlan_tx_tag_get(skb); | ||
678 | 641 | ||
679 | /* Check available TXBBs And 2K spare for prefetch */ | 642 | /* Check available TXBBs And 2K spare for prefetch */ |
680 | if (unlikely(((int)(ring->prod - ring->cons)) > | 643 | if (unlikely(((int)(ring->prod - ring->cons)) > |
681 | ring->size - HEADROOM - MAX_DESC_TXBBS)) { | 644 | ring->size - HEADROOM - MAX_DESC_TXBBS)) { |
682 | /* every full Tx ring stops queue. | 645 | /* every full Tx ring stops queue */ |
683 | * TODO: implement multi-queue support (per-queue stop) */ | 646 | netif_tx_stop_queue(netdev_get_tx_queue(dev, tx_ind)); |
684 | netif_stop_queue(dev); | ||
685 | ring->blocked = 1; | 647 | ring->blocked = 1; |
686 | priv->port_stats.queue_stopped++; | 648 | priv->port_stats.queue_stopped++; |
687 | 649 | ||