aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/qlge
diff options
context:
space:
mode:
authorRon Mercer <ron.mercer@qlogic.com>2009-08-27 07:02:11 -0400
committerDavid S. Miller <davem@davemloft.net>2009-08-29 02:22:34 -0400
commit39aa81659353becbe4ee34d72cf79e02182e858a (patch)
tree4b61c88bd140489be86296c2bfb4a501683b960b /drivers/net/qlge
parenta4ab613717184138763c5fb4a4b4bbc354d5b0ee (diff)
qlge: Move TX completions from workqueue to NAPI.
TX completions were running in a workqueue queued by the ISR. This patch moves the processing of TX completions to an existing RSS NAPI context. Now each irq vector runs NAPI for one RSS ring and one or more TX completion rings. Signed-off-by: Ron Mercer <ron.mercer@qlogic.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/qlge')
-rw-r--r--drivers/net/qlge/qlge.h7
-rw-r--r--drivers/net/qlge/qlge_main.c266
2 files changed, 137 insertions, 136 deletions
diff --git a/drivers/net/qlge/qlge.h b/drivers/net/qlge/qlge.h
index ed5dbca01bd1..a9845a2f243f 100644
--- a/drivers/net/qlge/qlge.h
+++ b/drivers/net/qlge/qlge.h
@@ -1292,7 +1292,6 @@ struct rx_ring {
1292 u32 cpu; /* Which CPU this should run on. */ 1292 u32 cpu; /* Which CPU this should run on. */
1293 char name[IFNAMSIZ + 5]; 1293 char name[IFNAMSIZ + 5];
1294 struct napi_struct napi; 1294 struct napi_struct napi;
1295 struct delayed_work rx_work;
1296 u8 reserved; 1295 u8 reserved;
1297 struct ql_adapter *qdev; 1296 struct ql_adapter *qdev;
1298}; 1297};
@@ -1366,6 +1365,7 @@ struct nic_stats {
1366struct intr_context { 1365struct intr_context {
1367 struct ql_adapter *qdev; 1366 struct ql_adapter *qdev;
1368 u32 intr; 1367 u32 intr;
1368 u32 irq_mask; /* Mask of which rings the vector services. */
1369 u32 hooked; 1369 u32 hooked;
1370 u32 intr_en_mask; /* value/mask used to enable this intr */ 1370 u32 intr_en_mask; /* value/mask used to enable this intr */
1371 u32 intr_dis_mask; /* value/mask used to disable this intr */ 1371 u32 intr_dis_mask; /* value/mask used to disable this intr */
@@ -1486,11 +1486,11 @@ struct ql_adapter {
1486 struct intr_context intr_context[MAX_RX_RINGS]; 1486 struct intr_context intr_context[MAX_RX_RINGS];
1487 1487
1488 int tx_ring_count; /* One per online CPU. */ 1488 int tx_ring_count; /* One per online CPU. */
1489 u32 rss_ring_count; /* One per online CPU. */ 1489 u32 rss_ring_count; /* One per irq vector. */
1490 /* 1490 /*
1491 * rx_ring_count = 1491 * rx_ring_count =
1492 * (CPU count * outbound completion rx_ring) + 1492 * (CPU count * outbound completion rx_ring) +
1493 * (CPU count * inbound (RSS) completion rx_ring) 1493 * (irq_vector_cnt * inbound (RSS) completion rx_ring)
1494 */ 1494 */
1495 int rx_ring_count; 1495 int rx_ring_count;
1496 int ring_mem_size; 1496 int ring_mem_size;
@@ -1517,7 +1517,6 @@ struct ql_adapter {
1517 union flash_params flash; 1517 union flash_params flash;
1518 1518
1519 struct net_device_stats stats; 1519 struct net_device_stats stats;
1520 struct workqueue_struct *q_workqueue;
1521 struct workqueue_struct *workqueue; 1520 struct workqueue_struct *workqueue;
1522 struct delayed_work asic_reset_work; 1521 struct delayed_work asic_reset_work;
1523 struct delayed_work mpi_reset_work; 1522 struct delayed_work mpi_reset_work;
diff --git a/drivers/net/qlge/qlge_main.c b/drivers/net/qlge/qlge_main.c
index 0cbda4d47dc7..8dd266befdc7 100644
--- a/drivers/net/qlge/qlge_main.c
+++ b/drivers/net/qlge/qlge_main.c
@@ -1859,11 +1859,41 @@ static int ql_napi_poll_msix(struct napi_struct *napi, int budget)
1859{ 1859{
1860 struct rx_ring *rx_ring = container_of(napi, struct rx_ring, napi); 1860 struct rx_ring *rx_ring = container_of(napi, struct rx_ring, napi);
1861 struct ql_adapter *qdev = rx_ring->qdev; 1861 struct ql_adapter *qdev = rx_ring->qdev;
1862 int work_done = ql_clean_inbound_rx_ring(rx_ring, budget); 1862 struct rx_ring *trx_ring;
1863 int i, work_done = 0;
1864 struct intr_context *ctx = &qdev->intr_context[rx_ring->cq_id];
1863 1865
1864 QPRINTK(qdev, RX_STATUS, DEBUG, "Enter, NAPI POLL cq_id = %d.\n", 1866 QPRINTK(qdev, RX_STATUS, DEBUG, "Enter, NAPI POLL cq_id = %d.\n",
1865 rx_ring->cq_id); 1867 rx_ring->cq_id);
1866 1868
1869 /* Service the TX rings first. They start
1870 * right after the RSS rings. */
1871 for (i = qdev->rss_ring_count; i < qdev->rx_ring_count; i++) {
1872 trx_ring = &qdev->rx_ring[i];
1873 /* If this TX completion ring belongs to this vector and
1874 * it's not empty then service it.
1875 */
1876 if ((ctx->irq_mask & (1 << trx_ring->cq_id)) &&
1877 (ql_read_sh_reg(trx_ring->prod_idx_sh_reg) !=
1878 trx_ring->cnsmr_idx)) {
1879 QPRINTK(qdev, INTR, DEBUG,
1880 "%s: Servicing TX completion ring %d.\n",
1881 __func__, trx_ring->cq_id);
1882 ql_clean_outbound_rx_ring(trx_ring);
1883 }
1884 }
1885
1886 /*
1887 * Now service the RSS ring if it's active.
1888 */
1889 if (ql_read_sh_reg(rx_ring->prod_idx_sh_reg) !=
1890 rx_ring->cnsmr_idx) {
1891 QPRINTK(qdev, INTR, DEBUG,
1892 "%s: Servicing RX completion ring %d.\n",
1893 __func__, rx_ring->cq_id);
1894 work_done = ql_clean_inbound_rx_ring(rx_ring, budget);
1895 }
1896
1867 if (work_done < budget) { 1897 if (work_done < budget) {
1868 napi_complete(napi); 1898 napi_complete(napi);
1869 ql_enable_completion_interrupt(qdev, rx_ring->irq); 1899 ql_enable_completion_interrupt(qdev, rx_ring->irq);
@@ -1925,38 +1955,6 @@ static void ql_vlan_rx_kill_vid(struct net_device *ndev, u16 vid)
1925 1955
1926} 1956}
1927 1957
1928/* Worker thread to process a given rx_ring that is dedicated
1929 * to outbound completions.
1930 */
1931static void ql_tx_clean(struct work_struct *work)
1932{
1933 struct rx_ring *rx_ring =
1934 container_of(work, struct rx_ring, rx_work.work);
1935 ql_clean_outbound_rx_ring(rx_ring);
1936 ql_enable_completion_interrupt(rx_ring->qdev, rx_ring->irq);
1937
1938}
1939
1940/* Worker thread to process a given rx_ring that is dedicated
1941 * to inbound completions.
1942 */
1943static void ql_rx_clean(struct work_struct *work)
1944{
1945 struct rx_ring *rx_ring =
1946 container_of(work, struct rx_ring, rx_work.work);
1947 ql_clean_inbound_rx_ring(rx_ring, 64);
1948 ql_enable_completion_interrupt(rx_ring->qdev, rx_ring->irq);
1949}
1950
1951/* MSI-X Multiple Vector Interrupt Handler for outbound completions. */
1952static irqreturn_t qlge_msix_tx_isr(int irq, void *dev_id)
1953{
1954 struct rx_ring *rx_ring = dev_id;
1955 queue_delayed_work_on(rx_ring->cpu, rx_ring->qdev->q_workqueue,
1956 &rx_ring->rx_work, 0);
1957 return IRQ_HANDLED;
1958}
1959
1960/* MSI-X Multiple Vector Interrupt Handler for inbound completions. */ 1958/* MSI-X Multiple Vector Interrupt Handler for inbound completions. */
1961static irqreturn_t qlge_msix_rx_isr(int irq, void *dev_id) 1959static irqreturn_t qlge_msix_rx_isr(int irq, void *dev_id)
1962{ 1960{
@@ -1976,7 +1974,6 @@ static irqreturn_t qlge_isr(int irq, void *dev_id)
1976 struct ql_adapter *qdev = rx_ring->qdev; 1974 struct ql_adapter *qdev = rx_ring->qdev;
1977 struct intr_context *intr_context = &qdev->intr_context[0]; 1975 struct intr_context *intr_context = &qdev->intr_context[0];
1978 u32 var; 1976 u32 var;
1979 int i;
1980 int work_done = 0; 1977 int work_done = 0;
1981 1978
1982 spin_lock(&qdev->hw_lock); 1979 spin_lock(&qdev->hw_lock);
@@ -2017,41 +2014,18 @@ static irqreturn_t qlge_isr(int irq, void *dev_id)
2017 } 2014 }
2018 2015
2019 /* 2016 /*
2020 * Check the default queue and wake handler if active. 2017 * Get the bit-mask that shows the active queues for this
2018 * pass. Compare it to the queues that this irq services
2019 * and call napi if there's a match.
2021 */ 2020 */
2022 rx_ring = &qdev->rx_ring[0]; 2021 var = ql_read32(qdev, ISR1);
2023 if (ql_read_sh_reg(rx_ring->prod_idx_sh_reg) != rx_ring->cnsmr_idx) { 2022 if (var & intr_context->irq_mask) {
2024 QPRINTK(qdev, INTR, INFO, "Waking handler for rx_ring[0].\n");
2025 ql_disable_completion_interrupt(qdev, intr_context->intr);
2026 queue_delayed_work_on(smp_processor_id(), qdev->q_workqueue,
2027 &rx_ring->rx_work, 0);
2028 work_done++;
2029 }
2030
2031 if (!test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
2032 /*
2033 * Start the DPC for each active queue.
2034 */
2035 for (i = 1; i < qdev->rx_ring_count; i++) {
2036 rx_ring = &qdev->rx_ring[i];
2037 if (ql_read_sh_reg(rx_ring->prod_idx_sh_reg) !=
2038 rx_ring->cnsmr_idx) {
2039 QPRINTK(qdev, INTR, INFO, 2023 QPRINTK(qdev, INTR, INFO,
2040 "Waking handler for rx_ring[%d].\n", i); 2024 "Waking handler for rx_ring[0].\n");
2041 ql_disable_completion_interrupt(qdev, 2025 ql_disable_completion_interrupt(qdev, intr_context->intr);
2042 intr_context->
2043 intr);
2044 if (i >= qdev->rss_ring_count)
2045 queue_delayed_work_on(rx_ring->cpu,
2046 qdev->q_workqueue,
2047 &rx_ring->rx_work,
2048 0);
2049 else
2050 napi_schedule(&rx_ring->napi); 2026 napi_schedule(&rx_ring->napi);
2051 work_done++; 2027 work_done++;
2052 } 2028 }
2053 }
2054 }
2055 ql_enable_completion_interrupt(qdev, intr_context->intr); 2029 ql_enable_completion_interrupt(qdev, intr_context->intr);
2056 return work_done ? IRQ_HANDLED : IRQ_NONE; 2030 return work_done ? IRQ_HANDLED : IRQ_NONE;
2057} 2031}
@@ -2703,35 +2677,9 @@ static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2703 } 2677 }
2704 switch (rx_ring->type) { 2678 switch (rx_ring->type) {
2705 case TX_Q: 2679 case TX_Q:
2706 /* If there's only one interrupt, then we use
2707 * worker threads to process the outbound
2708 * completion handling rx_rings. We do this so
2709 * they can be run on multiple CPUs. There is
2710 * room to play with this more where we would only
2711 * run in a worker if there are more than x number
2712 * of outbound completions on the queue and more
2713 * than one queue active. Some threshold that
2714 * would indicate a benefit in spite of the cost
2715 * of a context switch.
2716 * If there's more than one interrupt, then the
2717 * outbound completions are processed in the ISR.
2718 */
2719 if (!test_bit(QL_MSIX_ENABLED, &qdev->flags))
2720 INIT_DELAYED_WORK(&rx_ring->rx_work, ql_tx_clean);
2721 else {
2722 /* With all debug warnings on we see a WARN_ON message
2723 * when we free the skb in the interrupt context.
2724 */
2725 INIT_DELAYED_WORK(&rx_ring->rx_work, ql_tx_clean);
2726 }
2727 cqicb->irq_delay = cpu_to_le16(qdev->tx_coalesce_usecs); 2680 cqicb->irq_delay = cpu_to_le16(qdev->tx_coalesce_usecs);
2728 cqicb->pkt_delay = cpu_to_le16(qdev->tx_max_coalesced_frames); 2681 cqicb->pkt_delay = cpu_to_le16(qdev->tx_max_coalesced_frames);
2729 break; 2682 break;
2730 case DEFAULT_Q:
2731 INIT_DELAYED_WORK(&rx_ring->rx_work, ql_rx_clean);
2732 cqicb->irq_delay = 0;
2733 cqicb->pkt_delay = 0;
2734 break;
2735 case RX_Q: 2683 case RX_Q:
2736 /* Inbound completion handling rx_rings run in 2684 /* Inbound completion handling rx_rings run in
2737 * separate NAPI contexts. 2685 * separate NAPI contexts.
@@ -2878,6 +2826,71 @@ msi:
2878 QPRINTK(qdev, IFUP, DEBUG, "Running with legacy interrupts.\n"); 2826 QPRINTK(qdev, IFUP, DEBUG, "Running with legacy interrupts.\n");
2879} 2827}
2880 2828
2829/* Each vector services 1 RSS ring and and 1 or more
2830 * TX completion rings. This function loops through
2831 * the TX completion rings and assigns the vector that
2832 * will service it. An example would be if there are
2833 * 2 vectors (so 2 RSS rings) and 8 TX completion rings.
2834 * This would mean that vector 0 would service RSS ring 0
2835 * and TX competion rings 0,1,2 and 3. Vector 1 would
2836 * service RSS ring 1 and TX completion rings 4,5,6 and 7.
2837 */
2838static void ql_set_tx_vect(struct ql_adapter *qdev)
2839{
2840 int i, j, vect;
2841 u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
2842
2843 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
2844 /* Assign irq vectors to TX rx_rings.*/
2845 for (vect = 0, j = 0, i = qdev->rss_ring_count;
2846 i < qdev->rx_ring_count; i++) {
2847 if (j == tx_rings_per_vector) {
2848 vect++;
2849 j = 0;
2850 }
2851 qdev->rx_ring[i].irq = vect;
2852 j++;
2853 }
2854 } else {
2855 /* For single vector all rings have an irq
2856 * of zero.
2857 */
2858 for (i = 0; i < qdev->rx_ring_count; i++)
2859 qdev->rx_ring[i].irq = 0;
2860 }
2861}
2862
2863/* Set the interrupt mask for this vector. Each vector
2864 * will service 1 RSS ring and 1 or more TX completion
2865 * rings. This function sets up a bit mask per vector
2866 * that indicates which rings it services.
2867 */
2868static void ql_set_irq_mask(struct ql_adapter *qdev, struct intr_context *ctx)
2869{
2870 int j, vect = ctx->intr;
2871 u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
2872
2873 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
2874 /* Add the RSS ring serviced by this vector
2875 * to the mask.
2876 */
2877 ctx->irq_mask = (1 << qdev->rx_ring[vect].cq_id);
2878 /* Add the TX ring(s) serviced by this vector
2879 * to the mask. */
2880 for (j = 0; j < tx_rings_per_vector; j++) {
2881 ctx->irq_mask |=
2882 (1 << qdev->rx_ring[qdev->rss_ring_count +
2883 (vect * tx_rings_per_vector) + j].cq_id);
2884 }
2885 } else {
2886 /* For single vector we just shift each queue's
2887 * ID into the mask.
2888 */
2889 for (j = 0; j < qdev->rx_ring_count; j++)
2890 ctx->irq_mask |= (1 << qdev->rx_ring[j].cq_id);
2891 }
2892}
2893
2881/* 2894/*
2882 * Here we build the intr_context structures based on 2895 * Here we build the intr_context structures based on
2883 * our rx_ring count and intr vector count. 2896 * our rx_ring count and intr vector count.
@@ -2893,12 +2906,15 @@ static void ql_resolve_queues_to_irqs(struct ql_adapter *qdev)
2893 /* Each rx_ring has it's 2906 /* Each rx_ring has it's
2894 * own intr_context since we have separate 2907 * own intr_context since we have separate
2895 * vectors for each queue. 2908 * vectors for each queue.
2896 * This only true when MSI-X is enabled.
2897 */ 2909 */
2898 for (i = 0; i < qdev->intr_count; i++, intr_context++) { 2910 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
2899 qdev->rx_ring[i].irq = i; 2911 qdev->rx_ring[i].irq = i;
2900 intr_context->intr = i; 2912 intr_context->intr = i;
2901 intr_context->qdev = qdev; 2913 intr_context->qdev = qdev;
2914 /* Set up this vector's bit-mask that indicates
2915 * which queues it services.
2916 */
2917 ql_set_irq_mask(qdev, intr_context);
2902 /* 2918 /*
2903 * We set up each vectors enable/disable/read bits so 2919 * We set up each vectors enable/disable/read bits so
2904 * there's no bit/mask calculations in the critical path. 2920 * there's no bit/mask calculations in the critical path.
@@ -2915,20 +2931,21 @@ static void ql_resolve_queues_to_irqs(struct ql_adapter *qdev)
2915 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | 2931 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
2916 INTR_EN_TYPE_READ | INTR_EN_IHD_MASK | INTR_EN_IHD | 2932 INTR_EN_TYPE_READ | INTR_EN_IHD_MASK | INTR_EN_IHD |
2917 i; 2933 i;
2918 2934 if (i == 0) {
2919 if (i < qdev->rss_ring_count) { 2935 /* The first vector/queue handles
2920 /* 2936 * broadcast/multicast, fatal errors,
2921 * Inbound queues handle unicast frames only. 2937 * and firmware events. This in addition
2938 * to normal inbound NAPI processing.
2922 */ 2939 */
2923 intr_context->handler = qlge_msix_rx_isr; 2940 intr_context->handler = qlge_isr;
2924 sprintf(intr_context->name, "%s-rx-%d", 2941 sprintf(intr_context->name, "%s-rx-%d",
2925 qdev->ndev->name, i); 2942 qdev->ndev->name, i);
2926 } else { 2943 } else {
2927 /* 2944 /*
2928 * Outbound queue is for outbound completions only. 2945 * Inbound queues handle unicast frames only.
2929 */ 2946 */
2930 intr_context->handler = qlge_msix_tx_isr; 2947 intr_context->handler = qlge_msix_rx_isr;
2931 sprintf(intr_context->name, "%s-tx-%d", 2948 sprintf(intr_context->name, "%s-rx-%d",
2932 qdev->ndev->name, i); 2949 qdev->ndev->name, i);
2933 } 2950 }
2934 } 2951 }
@@ -2955,9 +2972,17 @@ static void ql_resolve_queues_to_irqs(struct ql_adapter *qdev)
2955 */ 2972 */
2956 intr_context->handler = qlge_isr; 2973 intr_context->handler = qlge_isr;
2957 sprintf(intr_context->name, "%s-single_irq", qdev->ndev->name); 2974 sprintf(intr_context->name, "%s-single_irq", qdev->ndev->name);
2958 for (i = 0; i < qdev->rx_ring_count; i++) 2975 /* Set up this vector's bit-mask that indicates
2959 qdev->rx_ring[i].irq = 0; 2976 * which queues it services. In this case there is
2977 * a single vector so it will service all RSS and
2978 * TX completion rings.
2979 */
2980 ql_set_irq_mask(qdev, intr_context);
2960 } 2981 }
2982 /* Tell the TX completion rings which MSIx vector
2983 * they will be using.
2984 */
2985 ql_set_tx_vect(qdev);
2961} 2986}
2962 2987
2963static void ql_free_irq(struct ql_adapter *qdev) 2988static void ql_free_irq(struct ql_adapter *qdev)
@@ -3326,7 +3351,6 @@ static void ql_display_dev_info(struct net_device *ndev)
3326static int ql_adapter_down(struct ql_adapter *qdev) 3351static int ql_adapter_down(struct ql_adapter *qdev)
3327{ 3352{
3328 int i, status = 0; 3353 int i, status = 0;
3329 struct rx_ring *rx_ring;
3330 3354
3331 ql_link_off(qdev); 3355 ql_link_off(qdev);
3332 3356
@@ -3340,27 +3364,8 @@ static int ql_adapter_down(struct ql_adapter *qdev)
3340 cancel_delayed_work_sync(&qdev->mpi_idc_work); 3364 cancel_delayed_work_sync(&qdev->mpi_idc_work);
3341 cancel_delayed_work_sync(&qdev->mpi_port_cfg_work); 3365 cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
3342 3366
3343 /* The default queue at index 0 is always processed in 3367 for (i = 0; i < qdev->rss_ring_count; i++)
3344 * a workqueue. 3368 napi_disable(&qdev->rx_ring[i].napi);
3345 */
3346 cancel_delayed_work_sync(&qdev->rx_ring[0].rx_work);
3347
3348 /* The rest of the rx_rings are processed in
3349 * a workqueue only if it's a single interrupt
3350 * environment (MSI/Legacy).
3351 */
3352 for (i = 1; i < qdev->rx_ring_count; i++) {
3353 rx_ring = &qdev->rx_ring[i];
3354 /* Only the RSS rings use NAPI on multi irq
3355 * environment. Outbound completion processing
3356 * is done in interrupt context.
3357 */
3358 if (i <= qdev->rss_ring_count) {
3359 napi_disable(&rx_ring->napi);
3360 } else {
3361 cancel_delayed_work_sync(&rx_ring->rx_work);
3362 }
3363 }
3364 3369
3365 clear_bit(QL_ADAPTER_UP, &qdev->flags); 3370 clear_bit(QL_ADAPTER_UP, &qdev->flags);
3366 3371
@@ -3476,9 +3481,9 @@ static int ql_configure_rings(struct ql_adapter *qdev)
3476 3481
3477 /* 3482 /*
3478 * The completion queue ID for the tx rings start 3483 * The completion queue ID for the tx rings start
3479 * immediately after the default Q ID, which is zero. 3484 * immediately after the rss rings.
3480 */ 3485 */
3481 tx_ring->cq_id = i + qdev->rss_ring_count; 3486 tx_ring->cq_id = qdev->rss_ring_count + i;
3482 } 3487 }
3483 3488
3484 for (i = 0; i < qdev->rx_ring_count; i++) { 3489 for (i = 0; i < qdev->rx_ring_count; i++) {
@@ -3488,7 +3493,9 @@ static int ql_configure_rings(struct ql_adapter *qdev)
3488 rx_ring->cq_id = i; 3493 rx_ring->cq_id = i;
3489 rx_ring->cpu = i % cpu_cnt; /* CPU to run handler on. */ 3494 rx_ring->cpu = i % cpu_cnt; /* CPU to run handler on. */
3490 if (i < qdev->rss_ring_count) { 3495 if (i < qdev->rss_ring_count) {
3491 /* Inbound completions (RSS) queues */ 3496 /*
3497 * Inbound (RSS) queues.
3498 */
3492 rx_ring->cq_len = qdev->rx_ring_size; 3499 rx_ring->cq_len = qdev->rx_ring_size;
3493 rx_ring->cq_size = 3500 rx_ring->cq_size =
3494 rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb); 3501 rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
@@ -3804,10 +3811,7 @@ static void ql_release_all(struct pci_dev *pdev)
3804 destroy_workqueue(qdev->workqueue); 3811 destroy_workqueue(qdev->workqueue);
3805 qdev->workqueue = NULL; 3812 qdev->workqueue = NULL;
3806 } 3813 }
3807 if (qdev->q_workqueue) { 3814
3808 destroy_workqueue(qdev->q_workqueue);
3809 qdev->q_workqueue = NULL;
3810 }
3811 if (qdev->reg_base) 3815 if (qdev->reg_base)
3812 iounmap(qdev->reg_base); 3816 iounmap(qdev->reg_base);
3813 if (qdev->doorbell_area) 3817 if (qdev->doorbell_area)
@@ -3920,8 +3924,6 @@ static int __devinit ql_init_device(struct pci_dev *pdev,
3920 * Set up the operating parameters. 3924 * Set up the operating parameters.
3921 */ 3925 */
3922 qdev->rx_csum = 1; 3926 qdev->rx_csum = 1;
3923
3924 qdev->q_workqueue = create_workqueue(ndev->name);
3925 qdev->workqueue = create_singlethread_workqueue(ndev->name); 3927 qdev->workqueue = create_singlethread_workqueue(ndev->name);
3926 INIT_DELAYED_WORK(&qdev->asic_reset_work, ql_asic_reset_work); 3928 INIT_DELAYED_WORK(&qdev->asic_reset_work, ql_asic_reset_work);
3927 INIT_DELAYED_WORK(&qdev->mpi_reset_work, ql_mpi_reset_work); 3929 INIT_DELAYED_WORK(&qdev->mpi_reset_work, ql_mpi_reset_work);