aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBenjamin Li <benli@broadcom.com>2008-07-18 20:55:11 -0400
committerDavid S. Miller <davem@davemloft.net>2008-07-18 20:55:11 -0400
commit706bf24038ba88ec77503b4829b16da712a32f04 (patch)
tree522c78dad8ebe426ce9c1244b260a102d50a8c7c
parent4f83ec19bbd0c78a2158c7a5d28f70d8b4417803 (diff)
bnx2: Add TX multiqueue support.
Signed-off-by: Benjamin Li <benli@broadcom.com> Signed-off-by: Michael Chan <mchan@broadcom.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/net/bnx2.c53
-rw-r--r--drivers/net/bnx2.h3
2 files changed, 38 insertions, 18 deletions
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
index 9ce30fc85ea0..dc6f26190f36 100644
--- a/drivers/net/bnx2.c
+++ b/drivers/net/bnx2.c
@@ -47,6 +47,7 @@
47#include <linux/prefetch.h> 47#include <linux/prefetch.h>
48#include <linux/cache.h> 48#include <linux/cache.h>
49#include <linux/zlib.h> 49#include <linux/zlib.h>
50#include <linux/log2.h>
50 51
51#include "bnx2.h" 52#include "bnx2.h"
52#include "bnx2_fw.h" 53#include "bnx2_fw.h"
@@ -492,7 +493,7 @@ bnx2_netif_start(struct bnx2 *bp)
492{ 493{
493 if (atomic_dec_and_test(&bp->intr_sem)) { 494 if (atomic_dec_and_test(&bp->intr_sem)) {
494 if (netif_running(bp->dev)) { 495 if (netif_running(bp->dev)) {
495 netif_wake_queue(bp->dev); 496 netif_tx_wake_all_queues(bp->dev);
496 bnx2_napi_enable(bp); 497 bnx2_napi_enable(bp);
497 bnx2_enable_int(bp); 498 bnx2_enable_int(bp);
498 } 499 }
@@ -2584,7 +2585,11 @@ bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2584{ 2585{
2585 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring; 2586 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
2586 u16 hw_cons, sw_cons, sw_ring_cons; 2587 u16 hw_cons, sw_cons, sw_ring_cons;
2587 int tx_pkt = 0; 2588 int tx_pkt = 0, index;
2589 struct netdev_queue *txq;
2590
2591 index = (bnapi - bp->bnx2_napi);
2592 txq = netdev_get_tx_queue(bp->dev, index);
2588 2593
2589 hw_cons = bnx2_get_hw_tx_cons(bnapi); 2594 hw_cons = bnx2_get_hw_tx_cons(bnapi);
2590 sw_cons = txr->tx_cons; 2595 sw_cons = txr->tx_cons;
@@ -2644,21 +2649,23 @@ bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2644 2649
2645 txr->hw_tx_cons = hw_cons; 2650 txr->hw_tx_cons = hw_cons;
2646 txr->tx_cons = sw_cons; 2651 txr->tx_cons = sw_cons;
2652
2647 /* Need to make the tx_cons update visible to bnx2_start_xmit() 2653 /* Need to make the tx_cons update visible to bnx2_start_xmit()
2648 * before checking for netif_queue_stopped(). Without the 2654 * before checking for netif_tx_queue_stopped(). Without the
2649 * memory barrier, there is a small possibility that bnx2_start_xmit() 2655 * memory barrier, there is a small possibility that bnx2_start_xmit()
2650 * will miss it and cause the queue to be stopped forever. 2656 * will miss it and cause the queue to be stopped forever.
2651 */ 2657 */
2652 smp_mb(); 2658 smp_mb();
2653 2659
2654 if (unlikely(netif_queue_stopped(bp->dev)) && 2660 if (unlikely(netif_tx_queue_stopped(txq)) &&
2655 (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)) { 2661 (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
2656 netif_tx_lock(bp->dev); 2662 __netif_tx_lock(txq, smp_processor_id());
2657 if ((netif_queue_stopped(bp->dev)) && 2663 if ((netif_tx_queue_stopped(txq)) &&
2658 (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)) 2664 (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh))
2659 netif_wake_queue(bp->dev); 2665 netif_tx_wake_queue(txq);
2660 netif_tx_unlock(bp->dev); 2666 __netif_tx_unlock(txq);
2661 } 2667 }
2668
2662 return tx_pkt; 2669 return tx_pkt;
2663} 2670}
2664 2671
@@ -5766,7 +5773,7 @@ static void
5766bnx2_setup_int_mode(struct bnx2 *bp, int dis_msi) 5773bnx2_setup_int_mode(struct bnx2 *bp, int dis_msi)
5767{ 5774{
5768 int cpus = num_online_cpus(); 5775 int cpus = num_online_cpus();
5769 int msix_vecs = min(cpus + 1, RX_MAX_RSS_RINGS); 5776 int msix_vecs = min(cpus + 1, RX_MAX_RINGS);
5770 5777
5771 bp->irq_tbl[0].handler = bnx2_interrupt; 5778 bp->irq_tbl[0].handler = bnx2_interrupt;
5772 strcpy(bp->irq_tbl[0].name, bp->dev->name); 5779 strcpy(bp->irq_tbl[0].name, bp->dev->name);
@@ -5789,7 +5796,10 @@ bnx2_setup_int_mode(struct bnx2 *bp, int dis_msi)
5789 bp->irq_tbl[0].vector = bp->pdev->irq; 5796 bp->irq_tbl[0].vector = bp->pdev->irq;
5790 } 5797 }
5791 } 5798 }
5792 bp->num_tx_rings = 1; 5799
5800 bp->num_tx_rings = rounddown_pow_of_two(bp->irq_nvecs);
5801 bp->dev->real_num_tx_queues = bp->num_tx_rings;
5802
5793 bp->num_rx_rings = bp->irq_nvecs; 5803 bp->num_rx_rings = bp->irq_nvecs;
5794} 5804}
5795 5805
@@ -5858,7 +5868,7 @@ bnx2_open(struct net_device *dev)
5858 else if (bp->flags & BNX2_FLAG_USING_MSIX) 5868 else if (bp->flags & BNX2_FLAG_USING_MSIX)
5859 printk(KERN_INFO PFX "%s: using MSIX\n", dev->name); 5869 printk(KERN_INFO PFX "%s: using MSIX\n", dev->name);
5860 5870
5861 netif_start_queue(dev); 5871 netif_tx_start_all_queues(dev);
5862 5872
5863 return 0; 5873 return 0;
5864 5874
@@ -5927,12 +5937,19 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
5927 u32 len, vlan_tag_flags, last_frag, mss; 5937 u32 len, vlan_tag_flags, last_frag, mss;
5928 u16 prod, ring_prod; 5938 u16 prod, ring_prod;
5929 int i; 5939 int i;
5930 struct bnx2_napi *bnapi = &bp->bnx2_napi[0]; 5940 struct bnx2_napi *bnapi;
5931 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring; 5941 struct bnx2_tx_ring_info *txr;
5942 struct netdev_queue *txq;
5943
5944 /* Determine which tx ring we will be placed on */
5945 i = skb_get_queue_mapping(skb);
5946 bnapi = &bp->bnx2_napi[i];
5947 txr = &bnapi->tx_ring;
5948 txq = netdev_get_tx_queue(dev, i);
5932 5949
5933 if (unlikely(bnx2_tx_avail(bp, txr) < 5950 if (unlikely(bnx2_tx_avail(bp, txr) <
5934 (skb_shinfo(skb)->nr_frags + 1))) { 5951 (skb_shinfo(skb)->nr_frags + 1))) {
5935 netif_stop_queue(dev); 5952 netif_tx_stop_queue(txq);
5936 printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n", 5953 printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n",
5937 dev->name); 5954 dev->name);
5938 5955
@@ -6047,9 +6064,9 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
6047 dev->trans_start = jiffies; 6064 dev->trans_start = jiffies;
6048 6065
6049 if (unlikely(bnx2_tx_avail(bp, txr) <= MAX_SKB_FRAGS)) { 6066 if (unlikely(bnx2_tx_avail(bp, txr) <= MAX_SKB_FRAGS)) {
6050 netif_stop_queue(dev); 6067 netif_tx_stop_queue(txq);
6051 if (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh) 6068 if (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)
6052 netif_wake_queue(dev); 6069 netif_tx_wake_queue(txq);
6053 } 6070 }
6054 6071
6055 return NETDEV_TX_OK; 6072 return NETDEV_TX_OK;
@@ -7294,7 +7311,7 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
7294 INIT_WORK(&bp->reset_task, bnx2_reset_task); 7311 INIT_WORK(&bp->reset_task, bnx2_reset_task);
7295 7312
7296 dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0); 7313 dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
7297 mem_len = MB_GET_CID_ADDR(TX_TSS_CID + 1); 7314 mem_len = MB_GET_CID_ADDR(TX_TSS_CID + TX_MAX_TSS_RINGS);
7298 dev->mem_end = dev->mem_start + mem_len; 7315 dev->mem_end = dev->mem_start + mem_len;
7299 dev->irq = pdev->irq; 7316 dev->irq = pdev->irq;
7300 7317
@@ -7647,7 +7664,7 @@ bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
7647 printk(KERN_INFO "%s", version); 7664 printk(KERN_INFO "%s", version);
7648 7665
7649 /* dev zeroed in init_etherdev */ 7666 /* dev zeroed in init_etherdev */
7650 dev = alloc_etherdev(sizeof(*bp)); 7667 dev = alloc_etherdev_mq(sizeof(*bp), TX_MAX_RINGS);
7651 7668
7652 if (!dev) 7669 if (!dev)
7653 return -ENOMEM; 7670 return -ENOMEM;
diff --git a/drivers/net/bnx2.h b/drivers/net/bnx2.h
index bb7b5d5471dd..c3c579f98ed0 100644
--- a/drivers/net/bnx2.h
+++ b/drivers/net/bnx2.h
@@ -6509,6 +6509,9 @@ struct l2_fhdr {
6509#define RX_CID 0 6509#define RX_CID 0
6510#define RX_RSS_CID 4 6510#define RX_RSS_CID 4
6511#define RX_MAX_RSS_RINGS 7 6511#define RX_MAX_RSS_RINGS 7
6512#define RX_MAX_RINGS (RX_MAX_RSS_RINGS + 1)
6513#define TX_MAX_TSS_RINGS 7
6514#define TX_MAX_RINGS (TX_MAX_TSS_RINGS + 1)
6512 6515
6513#define MB_TX_CID_ADDR MB_GET_CID_ADDR(TX_CID) 6516#define MB_TX_CID_ADDR MB_GET_CID_ADDR(TX_CID)
6514#define MB_RX_CID_ADDR MB_GET_CID_ADDR(RX_CID) 6517#define MB_RX_CID_ADDR MB_GET_CID_ADDR(RX_CID)