aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/qlge/qlge_main.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/qlge/qlge_main.c')
-rw-r--r--drivers/net/qlge/qlge_main.c80
1 files changed, 21 insertions, 59 deletions
diff --git a/drivers/net/qlge/qlge_main.c b/drivers/net/qlge/qlge_main.c
index f4c016012f18..45421c8b6010 100644
--- a/drivers/net/qlge/qlge_main.c
+++ b/drivers/net/qlge/qlge_main.c
@@ -76,7 +76,6 @@ MODULE_PARM_DESC(irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy.");
76 76
77static struct pci_device_id qlge_pci_tbl[] __devinitdata = { 77static struct pci_device_id qlge_pci_tbl[] __devinitdata = {
78 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID)}, 78 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID)},
79 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID1)},
80 /* required last entry */ 79 /* required last entry */
81 {0,} 80 {0,}
82}; 81};
@@ -127,12 +126,12 @@ static int ql_sem_trylock(struct ql_adapter *qdev, u32 sem_mask)
127 126
128int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask) 127int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask)
129{ 128{
130 unsigned int seconds = 3; 129 unsigned int wait_count = 30;
131 do { 130 do {
132 if (!ql_sem_trylock(qdev, sem_mask)) 131 if (!ql_sem_trylock(qdev, sem_mask))
133 return 0; 132 return 0;
134 ssleep(1); 133 udelay(100);
135 } while (--seconds); 134 } while (--wait_count);
136 return -ETIMEDOUT; 135 return -ETIMEDOUT;
137} 136}
138 137
@@ -1545,7 +1544,7 @@ static void ql_process_chip_ae_intr(struct ql_adapter *qdev,
1545static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring) 1544static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring)
1546{ 1545{
1547 struct ql_adapter *qdev = rx_ring->qdev; 1546 struct ql_adapter *qdev = rx_ring->qdev;
1548 u32 prod = le32_to_cpu(*rx_ring->prod_idx_sh_reg); 1547 u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
1549 struct ob_mac_iocb_rsp *net_rsp = NULL; 1548 struct ob_mac_iocb_rsp *net_rsp = NULL;
1550 int count = 0; 1549 int count = 0;
1551 1550
@@ -1571,7 +1570,7 @@ static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring)
1571 } 1570 }
1572 count++; 1571 count++;
1573 ql_update_cq(rx_ring); 1572 ql_update_cq(rx_ring);
1574 prod = le32_to_cpu(*rx_ring->prod_idx_sh_reg); 1573 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
1575 } 1574 }
1576 ql_write_cq_idx(rx_ring); 1575 ql_write_cq_idx(rx_ring);
1577 if (netif_queue_stopped(qdev->ndev) && net_rsp != NULL) { 1576 if (netif_queue_stopped(qdev->ndev) && net_rsp != NULL) {
@@ -1591,7 +1590,7 @@ static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring)
1591static int ql_clean_inbound_rx_ring(struct rx_ring *rx_ring, int budget) 1590static int ql_clean_inbound_rx_ring(struct rx_ring *rx_ring, int budget)
1592{ 1591{
1593 struct ql_adapter *qdev = rx_ring->qdev; 1592 struct ql_adapter *qdev = rx_ring->qdev;
1594 u32 prod = le32_to_cpu(*rx_ring->prod_idx_sh_reg); 1593 u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
1595 struct ql_net_rsp_iocb *net_rsp; 1594 struct ql_net_rsp_iocb *net_rsp;
1596 int count = 0; 1595 int count = 0;
1597 1596
@@ -1624,7 +1623,7 @@ static int ql_clean_inbound_rx_ring(struct rx_ring *rx_ring, int budget)
1624 } 1623 }
1625 count++; 1624 count++;
1626 ql_update_cq(rx_ring); 1625 ql_update_cq(rx_ring);
1627 prod = le32_to_cpu(*rx_ring->prod_idx_sh_reg); 1626 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
1628 if (count == budget) 1627 if (count == budget)
1629 break; 1628 break;
1630 } 1629 }
@@ -1787,7 +1786,7 @@ static irqreturn_t qlge_isr(int irq, void *dev_id)
1787 * Check the default queue and wake handler if active. 1786 * Check the default queue and wake handler if active.
1788 */ 1787 */
1789 rx_ring = &qdev->rx_ring[0]; 1788 rx_ring = &qdev->rx_ring[0];
1790 if (le32_to_cpu(*rx_ring->prod_idx_sh_reg) != rx_ring->cnsmr_idx) { 1789 if (ql_read_sh_reg(rx_ring->prod_idx_sh_reg) != rx_ring->cnsmr_idx) {
1791 QPRINTK(qdev, INTR, INFO, "Waking handler for rx_ring[0].\n"); 1790 QPRINTK(qdev, INTR, INFO, "Waking handler for rx_ring[0].\n");
1792 ql_disable_completion_interrupt(qdev, intr_context->intr); 1791 ql_disable_completion_interrupt(qdev, intr_context->intr);
1793 queue_delayed_work_on(smp_processor_id(), qdev->q_workqueue, 1792 queue_delayed_work_on(smp_processor_id(), qdev->q_workqueue,
@@ -1801,7 +1800,7 @@ static irqreturn_t qlge_isr(int irq, void *dev_id)
1801 */ 1800 */
1802 for (i = 1; i < qdev->rx_ring_count; i++) { 1801 for (i = 1; i < qdev->rx_ring_count; i++) {
1803 rx_ring = &qdev->rx_ring[i]; 1802 rx_ring = &qdev->rx_ring[i];
1804 if (le32_to_cpu(*rx_ring->prod_idx_sh_reg) != 1803 if (ql_read_sh_reg(rx_ring->prod_idx_sh_reg) !=
1805 rx_ring->cnsmr_idx) { 1804 rx_ring->cnsmr_idx) {
1806 QPRINTK(qdev, INTR, INFO, 1805 QPRINTK(qdev, INTR, INFO,
1807 "Waking handler for rx_ring[%d].\n", i); 1806 "Waking handler for rx_ring[%d].\n", i);
@@ -2356,28 +2355,6 @@ static void ql_tx_ring_clean(struct ql_adapter *qdev)
2356 } 2355 }
2357} 2356}
2358 2357
2359static void ql_free_ring_cb(struct ql_adapter *qdev)
2360{
2361 kfree(qdev->ring_mem);
2362}
2363
2364static int ql_alloc_ring_cb(struct ql_adapter *qdev)
2365{
2366 /* Allocate space for tx/rx ring control blocks. */
2367 qdev->ring_mem_size =
2368 (qdev->tx_ring_count * sizeof(struct tx_ring)) +
2369 (qdev->rx_ring_count * sizeof(struct rx_ring));
2370 qdev->ring_mem = kmalloc(qdev->ring_mem_size, GFP_KERNEL);
2371 if (qdev->ring_mem == NULL) {
2372 return -ENOMEM;
2373 } else {
2374 qdev->rx_ring = qdev->ring_mem;
2375 qdev->tx_ring = qdev->ring_mem +
2376 (qdev->rx_ring_count * sizeof(struct rx_ring));
2377 }
2378 return 0;
2379}
2380
2381static void ql_free_mem_resources(struct ql_adapter *qdev) 2358static void ql_free_mem_resources(struct ql_adapter *qdev)
2382{ 2359{
2383 int i; 2360 int i;
@@ -2467,12 +2444,9 @@ static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2467 bq_len = (rx_ring->cq_len == 65536) ? 0 : (u16) rx_ring->cq_len; 2444 bq_len = (rx_ring->cq_len == 65536) ? 0 : (u16) rx_ring->cq_len;
2468 cqicb->len = cpu_to_le16(bq_len | LEN_V | LEN_CPP_CONT); 2445 cqicb->len = cpu_to_le16(bq_len | LEN_V | LEN_CPP_CONT);
2469 2446
2470 cqicb->addr_lo = cpu_to_le32(rx_ring->cq_base_dma); 2447 cqicb->addr = cpu_to_le64(rx_ring->cq_base_dma);
2471 cqicb->addr_hi = cpu_to_le32((u64) rx_ring->cq_base_dma >> 32);
2472 2448
2473 cqicb->prod_idx_addr_lo = cpu_to_le32(rx_ring->prod_idx_sh_reg_dma); 2449 cqicb->prod_idx_addr = cpu_to_le64(rx_ring->prod_idx_sh_reg_dma);
2474 cqicb->prod_idx_addr_hi =
2475 cpu_to_le32((u64) rx_ring->prod_idx_sh_reg_dma >> 32);
2476 2450
2477 /* 2451 /*
2478 * Set up the control block load flags. 2452 * Set up the control block load flags.
@@ -2483,10 +2457,8 @@ static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2483 if (rx_ring->lbq_len) { 2457 if (rx_ring->lbq_len) {
2484 cqicb->flags |= FLAGS_LL; /* Load lbq values */ 2458 cqicb->flags |= FLAGS_LL; /* Load lbq values */
2485 *((u64 *) rx_ring->lbq_base_indirect) = rx_ring->lbq_base_dma; 2459 *((u64 *) rx_ring->lbq_base_indirect) = rx_ring->lbq_base_dma;
2486 cqicb->lbq_addr_lo = 2460 cqicb->lbq_addr =
2487 cpu_to_le32(rx_ring->lbq_base_indirect_dma); 2461 cpu_to_le64(rx_ring->lbq_base_indirect_dma);
2488 cqicb->lbq_addr_hi =
2489 cpu_to_le32((u64) rx_ring->lbq_base_indirect_dma >> 32);
2490 bq_len = (rx_ring->lbq_buf_size == 65536) ? 0 : 2462 bq_len = (rx_ring->lbq_buf_size == 65536) ? 0 :
2491 (u16) rx_ring->lbq_buf_size; 2463 (u16) rx_ring->lbq_buf_size;
2492 cqicb->lbq_buf_size = cpu_to_le16(bq_len); 2464 cqicb->lbq_buf_size = cpu_to_le16(bq_len);
@@ -2501,10 +2473,8 @@ static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2501 if (rx_ring->sbq_len) { 2473 if (rx_ring->sbq_len) {
2502 cqicb->flags |= FLAGS_LS; /* Load sbq values */ 2474 cqicb->flags |= FLAGS_LS; /* Load sbq values */
2503 *((u64 *) rx_ring->sbq_base_indirect) = rx_ring->sbq_base_dma; 2475 *((u64 *) rx_ring->sbq_base_indirect) = rx_ring->sbq_base_dma;
2504 cqicb->sbq_addr_lo = 2476 cqicb->sbq_addr =
2505 cpu_to_le32(rx_ring->sbq_base_indirect_dma); 2477 cpu_to_le64(rx_ring->sbq_base_indirect_dma);
2506 cqicb->sbq_addr_hi =
2507 cpu_to_le32((u64) rx_ring->sbq_base_indirect_dma >> 32);
2508 cqicb->sbq_buf_size = 2478 cqicb->sbq_buf_size =
2509 cpu_to_le16(((rx_ring->sbq_buf_size / 2) + 8) & 0xfffffff8); 2479 cpu_to_le16(((rx_ring->sbq_buf_size / 2) + 8) & 0xfffffff8);
2510 bq_len = (rx_ring->sbq_len == 65536) ? 0 : 2480 bq_len = (rx_ring->sbq_len == 65536) ? 0 :
@@ -2611,12 +2581,9 @@ static int ql_start_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
2611 Q_FLAGS_LB | Q_FLAGS_LI | Q_FLAGS_LO); 2581 Q_FLAGS_LB | Q_FLAGS_LI | Q_FLAGS_LO);
2612 wqicb->cq_id_rss = cpu_to_le16(tx_ring->cq_id); 2582 wqicb->cq_id_rss = cpu_to_le16(tx_ring->cq_id);
2613 wqicb->rid = 0; 2583 wqicb->rid = 0;
2614 wqicb->addr_lo = cpu_to_le32(tx_ring->wq_base_dma); 2584 wqicb->addr = cpu_to_le64(tx_ring->wq_base_dma);
2615 wqicb->addr_hi = cpu_to_le32((u64) tx_ring->wq_base_dma >> 32);
2616 2585
2617 wqicb->cnsmr_idx_addr_lo = cpu_to_le32(tx_ring->cnsmr_idx_sh_reg_dma); 2586 wqicb->cnsmr_idx_addr = cpu_to_le64(tx_ring->cnsmr_idx_sh_reg_dma);
2618 wqicb->cnsmr_idx_addr_hi =
2619 cpu_to_le32((u64) tx_ring->cnsmr_idx_sh_reg_dma >> 32);
2620 2587
2621 ql_init_tx_ring(qdev, tx_ring); 2588 ql_init_tx_ring(qdev, tx_ring);
2622 2589
@@ -2746,14 +2713,14 @@ static void ql_resolve_queues_to_irqs(struct ql_adapter *qdev)
2746 * Outbound queue is for outbound completions only. 2713 * Outbound queue is for outbound completions only.
2747 */ 2714 */
2748 intr_context->handler = qlge_msix_tx_isr; 2715 intr_context->handler = qlge_msix_tx_isr;
2749 sprintf(intr_context->name, "%s-txq-%d", 2716 sprintf(intr_context->name, "%s-tx-%d",
2750 qdev->ndev->name, i); 2717 qdev->ndev->name, i);
2751 } else { 2718 } else {
2752 /* 2719 /*
2753 * Inbound queues handle unicast frames only. 2720 * Inbound queues handle unicast frames only.
2754 */ 2721 */
2755 intr_context->handler = qlge_msix_rx_isr; 2722 intr_context->handler = qlge_msix_rx_isr;
2756 sprintf(intr_context->name, "%s-rxq-%d", 2723 sprintf(intr_context->name, "%s-rx-%d",
2757 qdev->ndev->name, i); 2724 qdev->ndev->name, i);
2758 } 2725 }
2759 } 2726 }
@@ -3247,7 +3214,6 @@ static int qlge_close(struct net_device *ndev)
3247 msleep(1); 3214 msleep(1);
3248 ql_adapter_down(qdev); 3215 ql_adapter_down(qdev);
3249 ql_release_adapter_resources(qdev); 3216 ql_release_adapter_resources(qdev);
3250 ql_free_ring_cb(qdev);
3251 return 0; 3217 return 0;
3252} 3218}
3253 3219
@@ -3273,8 +3239,8 @@ static int ql_configure_rings(struct ql_adapter *qdev)
3273 * This limitation can be removed when requested. 3239 * This limitation can be removed when requested.
3274 */ 3240 */
3275 3241
3276 if (cpu_cnt > 8) 3242 if (cpu_cnt > MAX_CPUS)
3277 cpu_cnt = 8; 3243 cpu_cnt = MAX_CPUS;
3278 3244
3279 /* 3245 /*
3280 * rx_ring[0] is always the default queue. 3246 * rx_ring[0] is always the default queue.
@@ -3294,9 +3260,6 @@ static int ql_configure_rings(struct ql_adapter *qdev)
3294 */ 3260 */
3295 qdev->rx_ring_count = qdev->tx_ring_count + qdev->rss_ring_count + 1; 3261 qdev->rx_ring_count = qdev->tx_ring_count + qdev->rss_ring_count + 1;
3296 3262
3297 if (ql_alloc_ring_cb(qdev))
3298 return -ENOMEM;
3299
3300 for (i = 0; i < qdev->tx_ring_count; i++) { 3263 for (i = 0; i < qdev->tx_ring_count; i++) {
3301 tx_ring = &qdev->tx_ring[i]; 3264 tx_ring = &qdev->tx_ring[i];
3302 memset((void *)tx_ring, 0, sizeof(tx_ring)); 3265 memset((void *)tx_ring, 0, sizeof(tx_ring));
@@ -3393,7 +3356,6 @@ static int qlge_open(struct net_device *ndev)
3393 3356
3394error_up: 3357error_up:
3395 ql_release_adapter_resources(qdev); 3358 ql_release_adapter_resources(qdev);
3396 ql_free_ring_cb(qdev);
3397 return err; 3359 return err;
3398} 3360}
3399 3361