aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorRon Mercer <ron.mercer@qlogic.com>2009-03-09 06:59:21 -0400
committerDavid S. Miller <davem@davemloft.net>2009-03-10 07:43:53 -0400
commit1e213303d8ef2a5d43fb64d2b373858ef70cc79b (patch)
treebf6ae0c1459a0db32d1179da9996ead5a7917a56 /drivers
parent22bdd4f599b87734b7fc8137f47e62c13ab27e93 (diff)
qlge: Add tx multiqueue support.
Signed-off-by: Ron Mercer <ron.mercer@qlogic.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/net/qlge/qlge_main.c31
1 files changed, 14 insertions, 17 deletions
diff --git a/drivers/net/qlge/qlge_main.c b/drivers/net/qlge/qlge_main.c
index 339e1da77e6e..6da8901b0cc3 100644
--- a/drivers/net/qlge/qlge_main.c
+++ b/drivers/net/qlge/qlge_main.c
@@ -1627,14 +1627,12 @@ static void ql_process_mac_tx_intr(struct ql_adapter *qdev,
1627/* Fire up a handler to reset the MPI processor. */ 1627/* Fire up a handler to reset the MPI processor. */
1628void ql_queue_fw_error(struct ql_adapter *qdev) 1628void ql_queue_fw_error(struct ql_adapter *qdev)
1629{ 1629{
1630 netif_stop_queue(qdev->ndev);
1631 netif_carrier_off(qdev->ndev); 1630 netif_carrier_off(qdev->ndev);
1632 queue_delayed_work(qdev->workqueue, &qdev->mpi_reset_work, 0); 1631 queue_delayed_work(qdev->workqueue, &qdev->mpi_reset_work, 0);
1633} 1632}
1634 1633
1635void ql_queue_asic_error(struct ql_adapter *qdev) 1634void ql_queue_asic_error(struct ql_adapter *qdev)
1636{ 1635{
1637 netif_stop_queue(qdev->ndev);
1638 netif_carrier_off(qdev->ndev); 1636 netif_carrier_off(qdev->ndev);
1639 ql_disable_interrupts(qdev); 1637 ql_disable_interrupts(qdev);
1640 /* Clear adapter up bit to signal the recovery 1638 /* Clear adapter up bit to signal the recovery
@@ -1689,6 +1687,7 @@ static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring)
1689 struct ob_mac_iocb_rsp *net_rsp = NULL; 1687 struct ob_mac_iocb_rsp *net_rsp = NULL;
1690 int count = 0; 1688 int count = 0;
1691 1689
1690 struct tx_ring *tx_ring;
1692 /* While there are entries in the completion queue. */ 1691 /* While there are entries in the completion queue. */
1693 while (prod != rx_ring->cnsmr_idx) { 1692 while (prod != rx_ring->cnsmr_idx) {
1694 1693
@@ -1714,15 +1713,16 @@ static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring)
1714 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg); 1713 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
1715 } 1714 }
1716 ql_write_cq_idx(rx_ring); 1715 ql_write_cq_idx(rx_ring);
1717 if (netif_queue_stopped(qdev->ndev) && net_rsp != NULL) { 1716 tx_ring = &qdev->tx_ring[net_rsp->txq_idx];
1718 struct tx_ring *tx_ring = &qdev->tx_ring[net_rsp->txq_idx]; 1717 if (__netif_subqueue_stopped(qdev->ndev, tx_ring->wq_id) &&
1718 net_rsp != NULL) {
1719 if (atomic_read(&tx_ring->queue_stopped) && 1719 if (atomic_read(&tx_ring->queue_stopped) &&
1720 (atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4))) 1720 (atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
1721 /* 1721 /*
1722 * The queue got stopped because the tx_ring was full. 1722 * The queue got stopped because the tx_ring was full.
1723 * Wake it up, because it's now at least 25% empty. 1723 * Wake it up, because it's now at least 25% empty.
1724 */ 1724 */
1725 netif_wake_queue(qdev->ndev); 1725 netif_wake_subqueue(qdev->ndev, tx_ring->wq_id);
1726 } 1726 }
1727 1727
1728 return count; 1728 return count;
@@ -2054,7 +2054,7 @@ static int qlge_send(struct sk_buff *skb, struct net_device *ndev)
2054 struct ql_adapter *qdev = netdev_priv(ndev); 2054 struct ql_adapter *qdev = netdev_priv(ndev);
2055 int tso; 2055 int tso;
2056 struct tx_ring *tx_ring; 2056 struct tx_ring *tx_ring;
2057 u32 tx_ring_idx = (u32) QL_TXQ_IDX(qdev, skb); 2057 u32 tx_ring_idx = (u32) skb->queue_mapping;
2058 2058
2059 tx_ring = &qdev->tx_ring[tx_ring_idx]; 2059 tx_ring = &qdev->tx_ring[tx_ring_idx];
2060 2060
@@ -2062,7 +2062,7 @@ static int qlge_send(struct sk_buff *skb, struct net_device *ndev)
2062 QPRINTK(qdev, TX_QUEUED, INFO, 2062 QPRINTK(qdev, TX_QUEUED, INFO,
2063 "%s: shutting down tx queue %d du to lack of resources.\n", 2063 "%s: shutting down tx queue %d du to lack of resources.\n",
2064 __func__, tx_ring_idx); 2064 __func__, tx_ring_idx);
2065 netif_stop_queue(ndev); 2065 netif_stop_subqueue(ndev, tx_ring->wq_id);
2066 atomic_inc(&tx_ring->queue_stopped); 2066 atomic_inc(&tx_ring->queue_stopped);
2067 return NETDEV_TX_BUSY; 2067 return NETDEV_TX_BUSY;
2068 } 2068 }
@@ -3192,12 +3192,10 @@ static void ql_display_dev_info(struct net_device *ndev)
3192 3192
3193static int ql_adapter_down(struct ql_adapter *qdev) 3193static int ql_adapter_down(struct ql_adapter *qdev)
3194{ 3194{
3195 struct net_device *ndev = qdev->ndev;
3196 int i, status = 0; 3195 int i, status = 0;
3197 struct rx_ring *rx_ring; 3196 struct rx_ring *rx_ring;
3198 3197
3199 netif_stop_queue(ndev); 3198 netif_carrier_off(qdev->ndev);
3200 netif_carrier_off(ndev);
3201 3199
3202 /* Don't kill the reset worker thread if we 3200 /* Don't kill the reset worker thread if we
3203 * are in the process of recovery. 3201 * are in the process of recovery.
@@ -3261,12 +3259,11 @@ static int ql_adapter_up(struct ql_adapter *qdev)
3261 spin_unlock(&qdev->hw_lock); 3259 spin_unlock(&qdev->hw_lock);
3262 set_bit(QL_ADAPTER_UP, &qdev->flags); 3260 set_bit(QL_ADAPTER_UP, &qdev->flags);
3263 ql_alloc_rx_buffers(qdev); 3261 ql_alloc_rx_buffers(qdev);
3262 if ((ql_read32(qdev, STS) & qdev->port_init))
3263 netif_carrier_on(qdev->ndev);
3264 ql_enable_interrupts(qdev); 3264 ql_enable_interrupts(qdev);
3265 ql_enable_all_completion_interrupts(qdev); 3265 ql_enable_all_completion_interrupts(qdev);
3266 if ((ql_read32(qdev, STS) & qdev->port_init)) { 3266 netif_tx_start_all_queues(qdev->ndev);
3267 netif_carrier_on(qdev->ndev);
3268 netif_start_queue(qdev->ndev);
3269 }
3270 3267
3271 return 0; 3268 return 0;
3272err_init: 3269err_init:
@@ -3354,6 +3351,7 @@ static int ql_configure_rings(struct ql_adapter *qdev)
3354 * completion handler rx_rings. 3351 * completion handler rx_rings.
3355 */ 3352 */
3356 qdev->rx_ring_count = qdev->tx_ring_count + qdev->rss_ring_count + 1; 3353 qdev->rx_ring_count = qdev->tx_ring_count + qdev->rss_ring_count + 1;
3354 netif_set_gso_max_size(qdev->ndev, 65536);
3357 3355
3358 for (i = 0; i < qdev->tx_ring_count; i++) { 3356 for (i = 0; i < qdev->tx_ring_count; i++) {
3359 tx_ring = &qdev->tx_ring[i]; 3357 tx_ring = &qdev->tx_ring[i];
@@ -3829,7 +3827,8 @@ static int __devinit qlge_probe(struct pci_dev *pdev,
3829 static int cards_found = 0; 3827 static int cards_found = 0;
3830 int err = 0; 3828 int err = 0;
3831 3829
3832 ndev = alloc_etherdev(sizeof(struct ql_adapter)); 3830 ndev = alloc_etherdev_mq(sizeof(struct ql_adapter),
3831 min(MAX_CPUS, (int)num_online_cpus()));
3833 if (!ndev) 3832 if (!ndev)
3834 return -ENOMEM; 3833 return -ENOMEM;
3835 3834
@@ -3872,7 +3871,6 @@ static int __devinit qlge_probe(struct pci_dev *pdev,
3872 return err; 3871 return err;
3873 } 3872 }
3874 netif_carrier_off(ndev); 3873 netif_carrier_off(ndev);
3875 netif_stop_queue(ndev);
3876 ql_display_dev_info(ndev); 3874 ql_display_dev_info(ndev);
3877 cards_found++; 3875 cards_found++;
3878 return 0; 3876 return 0;
@@ -3926,7 +3924,6 @@ static pci_ers_result_t qlge_io_slot_reset(struct pci_dev *pdev)
3926 pci_set_master(pdev); 3924 pci_set_master(pdev);
3927 3925
3928 netif_carrier_off(ndev); 3926 netif_carrier_off(ndev);
3929 netif_stop_queue(ndev);
3930 ql_adapter_reset(qdev); 3927 ql_adapter_reset(qdev);
3931 3928
3932 /* Make sure the EEPROM is good */ 3929 /* Make sure the EEPROM is good */