diff options
Diffstat (limited to 'drivers/net/tg3.c')
-rw-r--r-- | drivers/net/tg3.c | 231 |
1 files changed, 144 insertions, 87 deletions
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c index a2a5f318315b..5d0a1e6e0517 100644 --- a/drivers/net/tg3.c +++ b/drivers/net/tg3.c | |||
@@ -5517,14 +5517,7 @@ static int tg3_change_mtu(struct net_device *dev, int new_mtu) | |||
5517 | return err; | 5517 | return err; |
5518 | } | 5518 | } |
5519 | 5519 | ||
5520 | /* Free up pending packets in all rx/tx rings. | 5520 | static void tg3_rx_prodring_free(struct tg3 *tp) |
5521 | * | ||
5522 | * The chip has been shut down and the driver detached from | ||
5523 | * the networking, so no interrupts or new tx packets will | ||
5524 | * end up in the driver. tp->{tx,}lock is not held and we are not | ||
5525 | * in an interrupt context and thus may sleep. | ||
5526 | */ | ||
5527 | static void tg3_free_rings(struct tg3 *tp) | ||
5528 | { | 5521 | { |
5529 | struct ring_info *rxp; | 5522 | struct ring_info *rxp; |
5530 | int i; | 5523 | int i; |
@@ -5534,46 +5527,29 @@ static void tg3_free_rings(struct tg3 *tp) | |||
5534 | 5527 | ||
5535 | if (rxp->skb == NULL) | 5528 | if (rxp->skb == NULL) |
5536 | continue; | 5529 | continue; |
5537 | pci_unmap_single(tp->pdev, | ||
5538 | pci_unmap_addr(rxp, mapping), | ||
5539 | tp->rx_pkt_map_sz, | ||
5540 | PCI_DMA_FROMDEVICE); | ||
5541 | dev_kfree_skb_any(rxp->skb); | ||
5542 | rxp->skb = NULL; | ||
5543 | } | ||
5544 | |||
5545 | for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) { | ||
5546 | rxp = &tp->rx_jumbo_buffers[i]; | ||
5547 | 5530 | ||
5548 | if (rxp->skb == NULL) | ||
5549 | continue; | ||
5550 | pci_unmap_single(tp->pdev, | 5531 | pci_unmap_single(tp->pdev, |
5551 | pci_unmap_addr(rxp, mapping), | 5532 | pci_unmap_addr(rxp, mapping), |
5552 | TG3_RX_JMB_MAP_SZ, | 5533 | tp->rx_pkt_map_sz, |
5553 | PCI_DMA_FROMDEVICE); | 5534 | PCI_DMA_FROMDEVICE); |
5554 | dev_kfree_skb_any(rxp->skb); | 5535 | dev_kfree_skb_any(rxp->skb); |
5555 | rxp->skb = NULL; | 5536 | rxp->skb = NULL; |
5556 | } | 5537 | } |
5557 | 5538 | ||
5558 | for (i = 0; i < TG3_TX_RING_SIZE; ) { | 5539 | if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) { |
5559 | struct tx_ring_info *txp; | 5540 | for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) { |
5560 | struct sk_buff *skb; | 5541 | rxp = &tp->rx_jumbo_buffers[i]; |
5561 | 5542 | ||
5562 | txp = &tp->tx_buffers[i]; | 5543 | if (rxp->skb == NULL) |
5563 | skb = txp->skb; | 5544 | continue; |
5564 | 5545 | ||
5565 | if (skb == NULL) { | 5546 | pci_unmap_single(tp->pdev, |
5566 | i++; | 5547 | pci_unmap_addr(rxp, mapping), |
5567 | continue; | 5548 | TG3_RX_JMB_MAP_SZ, |
5549 | PCI_DMA_FROMDEVICE); | ||
5550 | dev_kfree_skb_any(rxp->skb); | ||
5551 | rxp->skb = NULL; | ||
5568 | } | 5552 | } |
5569 | |||
5570 | skb_dma_unmap(&tp->pdev->dev, skb, DMA_TO_DEVICE); | ||
5571 | |||
5572 | txp->skb = NULL; | ||
5573 | |||
5574 | i += skb_shinfo(skb)->nr_frags + 1; | ||
5575 | |||
5576 | dev_kfree_skb_any(skb); | ||
5577 | } | 5553 | } |
5578 | } | 5554 | } |
5579 | 5555 | ||
@@ -5584,18 +5560,12 @@ static void tg3_free_rings(struct tg3 *tp) | |||
5584 | * end up in the driver. tp->{tx,}lock are held and thus | 5560 | * end up in the driver. tp->{tx,}lock are held and thus |
5585 | * we may not sleep. | 5561 | * we may not sleep. |
5586 | */ | 5562 | */ |
5587 | static int tg3_init_rings(struct tg3 *tp) | 5563 | static int tg3_rx_prodring_alloc(struct tg3 *tp) |
5588 | { | 5564 | { |
5589 | u32 i, rx_pkt_dma_sz; | 5565 | u32 i, rx_pkt_dma_sz; |
5590 | 5566 | ||
5591 | /* Free up all the SKBs. */ | ||
5592 | tg3_free_rings(tp); | ||
5593 | |||
5594 | /* Zero out all descriptors. */ | 5567 | /* Zero out all descriptors. */ |
5595 | memset(tp->rx_std, 0, TG3_RX_RING_BYTES); | 5568 | memset(tp->rx_std, 0, TG3_RX_RING_BYTES); |
5596 | memset(tp->rx_jumbo, 0, TG3_RX_JUMBO_RING_BYTES); | ||
5597 | memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp)); | ||
5598 | memset(tp->tx_ring, 0, TG3_TX_RING_BYTES); | ||
5599 | 5569 | ||
5600 | rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ; | 5570 | rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ; |
5601 | if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) && | 5571 | if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) && |
@@ -5617,19 +5587,6 @@ static int tg3_init_rings(struct tg3 *tp) | |||
5617 | (i << RXD_OPAQUE_INDEX_SHIFT)); | 5587 | (i << RXD_OPAQUE_INDEX_SHIFT)); |
5618 | } | 5588 | } |
5619 | 5589 | ||
5620 | if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) { | ||
5621 | for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) { | ||
5622 | struct tg3_rx_buffer_desc *rxd; | ||
5623 | |||
5624 | rxd = &tp->rx_jumbo[i]; | ||
5625 | rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT; | ||
5626 | rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) | | ||
5627 | RXD_FLAG_JUMBO; | ||
5628 | rxd->opaque = (RXD_OPAQUE_RING_JUMBO | | ||
5629 | (i << RXD_OPAQUE_INDEX_SHIFT)); | ||
5630 | } | ||
5631 | } | ||
5632 | |||
5633 | /* Now allocate fresh SKBs for each rx ring. */ | 5590 | /* Now allocate fresh SKBs for each rx ring. */ |
5634 | for (i = 0; i < tp->rx_pending; i++) { | 5591 | for (i = 0; i < tp->rx_pending; i++) { |
5635 | if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD, -1, i) < 0) { | 5592 | if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD, -1, i) < 0) { |
@@ -5639,13 +5596,29 @@ static int tg3_init_rings(struct tg3 *tp) | |||
5639 | "successfully.\n", | 5596 | "successfully.\n", |
5640 | tp->dev->name, i, tp->rx_pending); | 5597 | tp->dev->name, i, tp->rx_pending); |
5641 | if (i == 0) | 5598 | if (i == 0) |
5642 | return -ENOMEM; | 5599 | goto initfail; |
5643 | tp->rx_pending = i; | 5600 | tp->rx_pending = i; |
5644 | break; | 5601 | break; |
5645 | } | 5602 | } |
5646 | } | 5603 | } |
5647 | 5604 | ||
5605 | if (!(tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE)) | ||
5606 | goto done; | ||
5607 | |||
5608 | memset(tp->rx_jumbo, 0, TG3_RX_JUMBO_RING_BYTES); | ||
5609 | |||
5648 | if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) { | 5610 | if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) { |
5611 | for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) { | ||
5612 | struct tg3_rx_buffer_desc *rxd; | ||
5613 | |||
5614 | rxd = &tp->rx_jumbo[i]; | ||
5615 | rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT; | ||
5616 | rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) | | ||
5617 | RXD_FLAG_JUMBO; | ||
5618 | rxd->opaque = (RXD_OPAQUE_RING_JUMBO | | ||
5619 | (i << RXD_OPAQUE_INDEX_SHIFT)); | ||
5620 | } | ||
5621 | |||
5649 | for (i = 0; i < tp->rx_jumbo_pending; i++) { | 5622 | for (i = 0; i < tp->rx_jumbo_pending; i++) { |
5650 | if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO, | 5623 | if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO, |
5651 | -1, i) < 0) { | 5624 | -1, i) < 0) { |
@@ -5654,26 +5627,28 @@ static int tg3_init_rings(struct tg3 *tp) | |||
5654 | "only %d out of %d buffers were " | 5627 | "only %d out of %d buffers were " |
5655 | "allocated successfully.\n", | 5628 | "allocated successfully.\n", |
5656 | tp->dev->name, i, tp->rx_jumbo_pending); | 5629 | tp->dev->name, i, tp->rx_jumbo_pending); |
5657 | if (i == 0) { | 5630 | if (i == 0) |
5658 | tg3_free_rings(tp); | 5631 | goto initfail; |
5659 | return -ENOMEM; | ||
5660 | } | ||
5661 | tp->rx_jumbo_pending = i; | 5632 | tp->rx_jumbo_pending = i; |
5662 | break; | 5633 | break; |
5663 | } | 5634 | } |
5664 | } | 5635 | } |
5665 | } | 5636 | } |
5637 | |||
5638 | done: | ||
5666 | return 0; | 5639 | return 0; |
5640 | |||
5641 | initfail: | ||
5642 | tg3_rx_prodring_free(tp); | ||
5643 | return -ENOMEM; | ||
5667 | } | 5644 | } |
5668 | 5645 | ||
5669 | /* | 5646 | static void tg3_rx_prodring_fini(struct tg3 *tp) |
5670 | * Must not be invoked with interrupt sources disabled and | ||
5671 | * the hardware shutdown down. | ||
5672 | */ | ||
5673 | static void tg3_free_consistent(struct tg3 *tp) | ||
5674 | { | 5647 | { |
5675 | kfree(tp->rx_std_buffers); | 5648 | kfree(tp->rx_std_buffers); |
5676 | tp->rx_std_buffers = NULL; | 5649 | tp->rx_std_buffers = NULL; |
5650 | kfree(tp->rx_jumbo_buffers); | ||
5651 | tp->rx_jumbo_buffers = NULL; | ||
5677 | if (tp->rx_std) { | 5652 | if (tp->rx_std) { |
5678 | pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES, | 5653 | pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES, |
5679 | tp->rx_std, tp->rx_std_mapping); | 5654 | tp->rx_std, tp->rx_std_mapping); |
@@ -5684,6 +5659,103 @@ static void tg3_free_consistent(struct tg3 *tp) | |||
5684 | tp->rx_jumbo, tp->rx_jumbo_mapping); | 5659 | tp->rx_jumbo, tp->rx_jumbo_mapping); |
5685 | tp->rx_jumbo = NULL; | 5660 | tp->rx_jumbo = NULL; |
5686 | } | 5661 | } |
5662 | } | ||
5663 | |||
5664 | static int tg3_rx_prodring_init(struct tg3 *tp) | ||
5665 | { | ||
5666 | tp->rx_std_buffers = kzalloc(sizeof(struct ring_info) * | ||
5667 | TG3_RX_RING_SIZE, GFP_KERNEL); | ||
5668 | if (!tp->rx_std_buffers) | ||
5669 | return -ENOMEM; | ||
5670 | |||
5671 | tp->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES, | ||
5672 | &tp->rx_std_mapping); | ||
5673 | if (!tp->rx_std) | ||
5674 | goto err_out; | ||
5675 | |||
5676 | if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) { | ||
5677 | tp->rx_jumbo_buffers = kzalloc(sizeof(struct ring_info) * | ||
5678 | TG3_RX_JUMBO_RING_SIZE, | ||
5679 | GFP_KERNEL); | ||
5680 | if (!tp->rx_jumbo_buffers) | ||
5681 | goto err_out; | ||
5682 | |||
5683 | tp->rx_jumbo = pci_alloc_consistent(tp->pdev, | ||
5684 | TG3_RX_JUMBO_RING_BYTES, | ||
5685 | &tp->rx_jumbo_mapping); | ||
5686 | if (!tp->rx_jumbo) | ||
5687 | goto err_out; | ||
5688 | } | ||
5689 | |||
5690 | return 0; | ||
5691 | |||
5692 | err_out: | ||
5693 | tg3_rx_prodring_fini(tp); | ||
5694 | return -ENOMEM; | ||
5695 | } | ||
5696 | |||
5697 | /* Free up pending packets in all rx/tx rings. | ||
5698 | * | ||
5699 | * The chip has been shut down and the driver detached from | ||
5700 | * the networking, so no interrupts or new tx packets will | ||
5701 | * end up in the driver. tp->{tx,}lock is not held and we are not | ||
5702 | * in an interrupt context and thus may sleep. | ||
5703 | */ | ||
5704 | static void tg3_free_rings(struct tg3 *tp) | ||
5705 | { | ||
5706 | int i; | ||
5707 | |||
5708 | for (i = 0; i < TG3_TX_RING_SIZE; ) { | ||
5709 | struct tx_ring_info *txp; | ||
5710 | struct sk_buff *skb; | ||
5711 | |||
5712 | txp = &tp->tx_buffers[i]; | ||
5713 | skb = txp->skb; | ||
5714 | |||
5715 | if (skb == NULL) { | ||
5716 | i++; | ||
5717 | continue; | ||
5718 | } | ||
5719 | |||
5720 | skb_dma_unmap(&tp->pdev->dev, skb, DMA_TO_DEVICE); | ||
5721 | |||
5722 | txp->skb = NULL; | ||
5723 | |||
5724 | i += skb_shinfo(skb)->nr_frags + 1; | ||
5725 | |||
5726 | dev_kfree_skb_any(skb); | ||
5727 | } | ||
5728 | |||
5729 | tg3_rx_prodring_free(tp); | ||
5730 | } | ||
5731 | |||
5732 | /* Initialize tx/rx rings for packet processing. | ||
5733 | * | ||
5734 | * The chip has been shut down and the driver detached from | ||
5735 | * the networking, so no interrupts or new tx packets will | ||
5736 | * end up in the driver. tp->{tx,}lock are held and thus | ||
5737 | * we may not sleep. | ||
5738 | */ | ||
5739 | static int tg3_init_rings(struct tg3 *tp) | ||
5740 | { | ||
5741 | /* Free up all the SKBs. */ | ||
5742 | tg3_free_rings(tp); | ||
5743 | |||
5744 | /* Zero out all descriptors. */ | ||
5745 | memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp)); | ||
5746 | memset(tp->tx_ring, 0, TG3_TX_RING_BYTES); | ||
5747 | |||
5748 | return tg3_rx_prodring_alloc(tp); | ||
5749 | } | ||
5750 | |||
5751 | /* | ||
5752 | * Must not be invoked with interrupt sources disabled and | ||
5753 | * the hardware shutdown down. | ||
5754 | */ | ||
5755 | static void tg3_free_consistent(struct tg3 *tp) | ||
5756 | { | ||
5757 | kfree(tp->tx_buffers); | ||
5758 | tp->tx_buffers = NULL; | ||
5687 | if (tp->rx_rcb) { | 5759 | if (tp->rx_rcb) { |
5688 | pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp), | 5760 | pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp), |
5689 | tp->rx_rcb, tp->rx_rcb_mapping); | 5761 | tp->rx_rcb, tp->rx_rcb_mapping); |
@@ -5704,6 +5776,7 @@ static void tg3_free_consistent(struct tg3 *tp) | |||
5704 | tp->hw_stats, tp->stats_mapping); | 5776 | tp->hw_stats, tp->stats_mapping); |
5705 | tp->hw_stats = NULL; | 5777 | tp->hw_stats = NULL; |
5706 | } | 5778 | } |
5779 | tg3_rx_prodring_fini(tp); | ||
5707 | } | 5780 | } |
5708 | 5781 | ||
5709 | /* | 5782 | /* |
@@ -5712,28 +5785,12 @@ static void tg3_free_consistent(struct tg3 *tp) | |||
5712 | */ | 5785 | */ |
5713 | static int tg3_alloc_consistent(struct tg3 *tp) | 5786 | static int tg3_alloc_consistent(struct tg3 *tp) |
5714 | { | 5787 | { |
5715 | tp->rx_std_buffers = kzalloc((sizeof(struct ring_info) * | 5788 | if (tg3_rx_prodring_init(tp)) |
5716 | (TG3_RX_RING_SIZE + | ||
5717 | TG3_RX_JUMBO_RING_SIZE)) + | ||
5718 | (sizeof(struct tx_ring_info) * | ||
5719 | TG3_TX_RING_SIZE), | ||
5720 | GFP_KERNEL); | ||
5721 | if (!tp->rx_std_buffers) | ||
5722 | return -ENOMEM; | 5789 | return -ENOMEM; |
5723 | 5790 | ||
5724 | tp->rx_jumbo_buffers = &tp->rx_std_buffers[TG3_RX_RING_SIZE]; | 5791 | tp->tx_buffers = kzalloc(sizeof(struct tx_ring_info) * |
5725 | tp->tx_buffers = (struct tx_ring_info *) | 5792 | TG3_TX_RING_SIZE, GFP_KERNEL); |
5726 | &tp->rx_jumbo_buffers[TG3_RX_JUMBO_RING_SIZE]; | 5793 | if (!tp->tx_buffers) |
5727 | |||
5728 | tp->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES, | ||
5729 | &tp->rx_std_mapping); | ||
5730 | if (!tp->rx_std) | ||
5731 | goto err_out; | ||
5732 | |||
5733 | tp->rx_jumbo = pci_alloc_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES, | ||
5734 | &tp->rx_jumbo_mapping); | ||
5735 | |||
5736 | if (!tp->rx_jumbo) | ||
5737 | goto err_out; | 5794 | goto err_out; |
5738 | 5795 | ||
5739 | tp->rx_rcb = pci_alloc_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp), | 5796 | tp->rx_rcb = pci_alloc_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp), |