diff options
Diffstat (limited to 'drivers/net/chelsio')
-rw-r--r-- | drivers/net/chelsio/sge.c | 54 |
1 files changed, 19 insertions, 35 deletions
diff --git a/drivers/net/chelsio/sge.c b/drivers/net/chelsio/sge.c index ac7c46b0eeb4..f94d63971642 100644 --- a/drivers/net/chelsio/sge.c +++ b/drivers/net/chelsio/sge.c | |||
@@ -85,10 +85,6 @@ | |||
85 | */ | 85 | */ |
86 | #define TX_RECLAIM_PERIOD (HZ / 4) | 86 | #define TX_RECLAIM_PERIOD (HZ / 4) |
87 | 87 | ||
88 | #ifndef NET_IP_ALIGN | ||
89 | # define NET_IP_ALIGN 2 | ||
90 | #endif | ||
91 | |||
92 | #define M_CMD_LEN 0x7fffffff | 88 | #define M_CMD_LEN 0x7fffffff |
93 | #define V_CMD_LEN(v) (v) | 89 | #define V_CMD_LEN(v) (v) |
94 | #define G_CMD_LEN(v) ((v) & M_CMD_LEN) | 90 | #define G_CMD_LEN(v) ((v) & M_CMD_LEN) |
@@ -575,11 +571,10 @@ static int alloc_rx_resources(struct sge *sge, struct sge_params *p) | |||
575 | q->size = p->freelQ_size[i]; | 571 | q->size = p->freelQ_size[i]; |
576 | q->dma_offset = sge->rx_pkt_pad ? 0 : NET_IP_ALIGN; | 572 | q->dma_offset = sge->rx_pkt_pad ? 0 : NET_IP_ALIGN; |
577 | size = sizeof(struct freelQ_e) * q->size; | 573 | size = sizeof(struct freelQ_e) * q->size; |
578 | q->entries = (struct freelQ_e *) | 574 | q->entries = pci_alloc_consistent(pdev, size, &q->dma_addr); |
579 | pci_alloc_consistent(pdev, size, &q->dma_addr); | ||
580 | if (!q->entries) | 575 | if (!q->entries) |
581 | goto err_no_mem; | 576 | goto err_no_mem; |
582 | memset(q->entries, 0, size); | 577 | |
583 | size = sizeof(struct freelQ_ce) * q->size; | 578 | size = sizeof(struct freelQ_ce) * q->size; |
584 | q->centries = kzalloc(size, GFP_KERNEL); | 579 | q->centries = kzalloc(size, GFP_KERNEL); |
585 | if (!q->centries) | 580 | if (!q->centries) |
@@ -613,11 +608,10 @@ static int alloc_rx_resources(struct sge *sge, struct sge_params *p) | |||
613 | sge->respQ.size = SGE_RESPQ_E_N; | 608 | sge->respQ.size = SGE_RESPQ_E_N; |
614 | sge->respQ.credits = 0; | 609 | sge->respQ.credits = 0; |
615 | size = sizeof(struct respQ_e) * sge->respQ.size; | 610 | size = sizeof(struct respQ_e) * sge->respQ.size; |
616 | sge->respQ.entries = (struct respQ_e *) | 611 | sge->respQ.entries = |
617 | pci_alloc_consistent(pdev, size, &sge->respQ.dma_addr); | 612 | pci_alloc_consistent(pdev, size, &sge->respQ.dma_addr); |
618 | if (!sge->respQ.entries) | 613 | if (!sge->respQ.entries) |
619 | goto err_no_mem; | 614 | goto err_no_mem; |
620 | memset(sge->respQ.entries, 0, size); | ||
621 | return 0; | 615 | return 0; |
622 | 616 | ||
623 | err_no_mem: | 617 | err_no_mem: |
@@ -637,20 +631,12 @@ static void free_cmdQ_buffers(struct sge *sge, struct cmdQ *q, unsigned int n) | |||
637 | q->in_use -= n; | 631 | q->in_use -= n; |
638 | ce = &q->centries[cidx]; | 632 | ce = &q->centries[cidx]; |
639 | while (n--) { | 633 | while (n--) { |
640 | if (q->sop) { | 634 | if (likely(pci_unmap_len(ce, dma_len))) { |
641 | if (likely(pci_unmap_len(ce, dma_len))) { | 635 | pci_unmap_single(pdev, pci_unmap_addr(ce, dma_addr), |
642 | pci_unmap_single(pdev, | 636 | pci_unmap_len(ce, dma_len), |
643 | pci_unmap_addr(ce, dma_addr), | 637 | PCI_DMA_TODEVICE); |
644 | pci_unmap_len(ce, dma_len), | 638 | if (q->sop) |
645 | PCI_DMA_TODEVICE); | ||
646 | q->sop = 0; | 639 | q->sop = 0; |
647 | } | ||
648 | } else { | ||
649 | if (likely(pci_unmap_len(ce, dma_len))) { | ||
650 | pci_unmap_page(pdev, pci_unmap_addr(ce, dma_addr), | ||
651 | pci_unmap_len(ce, dma_len), | ||
652 | PCI_DMA_TODEVICE); | ||
653 | } | ||
654 | } | 640 | } |
655 | if (ce->skb) { | 641 | if (ce->skb) { |
656 | dev_kfree_skb_any(ce->skb); | 642 | dev_kfree_skb_any(ce->skb); |
@@ -711,11 +697,10 @@ static int alloc_tx_resources(struct sge *sge, struct sge_params *p) | |||
711 | q->stop_thres = 0; | 697 | q->stop_thres = 0; |
712 | spin_lock_init(&q->lock); | 698 | spin_lock_init(&q->lock); |
713 | size = sizeof(struct cmdQ_e) * q->size; | 699 | size = sizeof(struct cmdQ_e) * q->size; |
714 | q->entries = (struct cmdQ_e *) | 700 | q->entries = pci_alloc_consistent(pdev, size, &q->dma_addr); |
715 | pci_alloc_consistent(pdev, size, &q->dma_addr); | ||
716 | if (!q->entries) | 701 | if (!q->entries) |
717 | goto err_no_mem; | 702 | goto err_no_mem; |
718 | memset(q->entries, 0, size); | 703 | |
719 | size = sizeof(struct cmdQ_ce) * q->size; | 704 | size = sizeof(struct cmdQ_ce) * q->size; |
720 | q->centries = kzalloc(size, GFP_KERNEL); | 705 | q->centries = kzalloc(size, GFP_KERNEL); |
721 | if (!q->centries) | 706 | if (!q->centries) |
@@ -1447,19 +1432,18 @@ static inline int enough_free_Tx_descs(const struct cmdQ *q) | |||
1447 | static void restart_tx_queues(struct sge *sge) | 1432 | static void restart_tx_queues(struct sge *sge) |
1448 | { | 1433 | { |
1449 | struct adapter *adap = sge->adapter; | 1434 | struct adapter *adap = sge->adapter; |
1435 | int i; | ||
1450 | 1436 | ||
1451 | if (enough_free_Tx_descs(&sge->cmdQ[0])) { | 1437 | if (!enough_free_Tx_descs(&sge->cmdQ[0])) |
1452 | int i; | 1438 | return; |
1453 | 1439 | ||
1454 | for_each_port(adap, i) { | 1440 | for_each_port(adap, i) { |
1455 | struct net_device *nd = adap->port[i].dev; | 1441 | struct net_device *nd = adap->port[i].dev; |
1456 | 1442 | ||
1457 | if (test_and_clear_bit(nd->if_port, | 1443 | if (test_and_clear_bit(nd->if_port, &sge->stopped_tx_queues) && |
1458 | &sge->stopped_tx_queues) && | 1444 | netif_running(nd)) { |
1459 | netif_running(nd)) { | 1445 | sge->stats.cmdQ_restarted[2]++; |
1460 | sge->stats.cmdQ_restarted[2]++; | 1446 | netif_wake_queue(nd); |
1461 | netif_wake_queue(nd); | ||
1462 | } | ||
1463 | } | 1447 | } |
1464 | } | 1448 | } |
1465 | } | 1449 | } |