diff options
author | Jeff Kirsher <jeffrey.t.kirsher@intel.com> | 2006-01-12 19:51:19 -0500 |
---|---|---|
committer | Jeff Garzik <jgarzik@pobox.com> | 2006-01-17 07:48:03 -0500 |
commit | 72d64a436724da72f876c85a73895a622da6a673 (patch) | |
tree | c59dde1b1c116036dfd4400b35df002549e4c9ec /drivers/net/e1000/e1000_main.c | |
parent | 1e613fd9d64aba9945ddb3b7f1107127ee8a9835 (diff) |
[PATCH] e1000: Added cleaned_count to RX buffer allocation
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Signed-off-by: Jesse Brandeburg <jesse.brandeburg@intel.com>
Signed-off-by: John Ronciak <john.ronciak@intel.com>
Signed-off-by: Jeff Garzik <jgarzik@pobox.com>
Diffstat (limited to 'drivers/net/e1000/e1000_main.c')
-rw-r--r-- | drivers/net/e1000/e1000_main.c | 68 |
1 files changed, 46 insertions, 22 deletions
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c index 53f87fe511c5..72a80099f4ae 100644 --- a/drivers/net/e1000/e1000_main.c +++ b/drivers/net/e1000/e1000_main.c | |||
@@ -171,9 +171,11 @@ static boolean_t e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, | |||
171 | struct e1000_rx_ring *rx_ring); | 171 | struct e1000_rx_ring *rx_ring); |
172 | #endif | 172 | #endif |
173 | static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter, | 173 | static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter, |
174 | struct e1000_rx_ring *rx_ring); | 174 | struct e1000_rx_ring *rx_ring, |
175 | int cleaned_count); | ||
175 | static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter, | 176 | static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter, |
176 | struct e1000_rx_ring *rx_ring); | 177 | struct e1000_rx_ring *rx_ring, |
178 | int cleaned_count); | ||
177 | static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd); | 179 | static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd); |
178 | static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, | 180 | static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, |
179 | int cmd); | 181 | int cmd); |
@@ -411,8 +413,12 @@ e1000_up(struct e1000_adapter *adapter) | |||
411 | e1000_configure_tx(adapter); | 413 | e1000_configure_tx(adapter); |
412 | e1000_setup_rctl(adapter); | 414 | e1000_setup_rctl(adapter); |
413 | e1000_configure_rx(adapter); | 415 | e1000_configure_rx(adapter); |
416 | /* call E1000_DESC_UNUSED which always leaves | ||
417 | * at least 1 descriptor unused to make sure | ||
418 | * next_to_use != next_to_clean */ | ||
414 | for (i = 0; i < adapter->num_rx_queues; i++) { | 419 | for (i = 0; i < adapter->num_rx_queues; i++) { |
415 | adapter->alloc_rx_buf(adapter, &adapter->rx_ring[i]); | 420 | struct e1000_rx_ring *ring = &adapter->rx_ring[i]; |
421 | adapter->alloc_rx_buf(adapter, ring, E1000_DESC_UNUSED(ring)); | ||
416 | } | 422 | } |
417 | 423 | ||
418 | #ifdef CONFIG_PCI_MSI | 424 | #ifdef CONFIG_PCI_MSI |
@@ -2119,7 +2125,9 @@ e1000_leave_82542_rst(struct e1000_adapter *adapter) | |||
2119 | 2125 | ||
2120 | if(netif_running(netdev)) { | 2126 | if(netif_running(netdev)) { |
2121 | e1000_configure_rx(adapter); | 2127 | e1000_configure_rx(adapter); |
2122 | e1000_alloc_rx_buffers(adapter, &adapter->rx_ring[0]); | 2128 | /* No need to loop, because 82542 supports only 1 queue */ |
2129 | struct e1000_rx_ring *ring = &adapter->rx_ring[0]; | ||
2130 | adapter->alloc_rx_buf(adapter, ring, E1000_DESC_UNUSED(ring)); | ||
2123 | } | 2131 | } |
2124 | } | 2132 | } |
2125 | 2133 | ||
@@ -3539,6 +3547,7 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter, | |||
3539 | uint8_t last_byte; | 3547 | uint8_t last_byte; |
3540 | unsigned int i; | 3548 | unsigned int i; |
3541 | boolean_t cleaned = FALSE; | 3549 | boolean_t cleaned = FALSE; |
3550 | int cleaned_count = 0; | ||
3542 | 3551 | ||
3543 | i = rx_ring->next_to_clean; | 3552 | i = rx_ring->next_to_clean; |
3544 | rx_desc = E1000_RX_DESC(*rx_ring, i); | 3553 | rx_desc = E1000_RX_DESC(*rx_ring, i); |
@@ -3550,11 +3559,10 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter, | |||
3550 | break; | 3559 | break; |
3551 | (*work_done)++; | 3560 | (*work_done)++; |
3552 | #endif | 3561 | #endif |
3553 | cleaned = TRUE; | ||
3554 | 3562 | ||
3555 | pci_unmap_single(pdev, | 3563 | cleaned = TRUE; |
3556 | buffer_info->dma, | 3564 | cleaned_count++; |
3557 | buffer_info->length, | 3565 | pci_unmap_single(pdev, buffer_info->dma, buffer_info->length, |
3558 | PCI_DMA_FROMDEVICE); | 3566 | PCI_DMA_FROMDEVICE); |
3559 | 3567 | ||
3560 | skb = buffer_info->skb; | 3568 | skb = buffer_info->skb; |
@@ -3573,8 +3581,7 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter, | |||
3573 | if(TBI_ACCEPT(&adapter->hw, rx_desc->status, | 3581 | if(TBI_ACCEPT(&adapter->hw, rx_desc->status, |
3574 | rx_desc->errors, length, last_byte)) { | 3582 | rx_desc->errors, length, last_byte)) { |
3575 | spin_lock_irqsave(&adapter->stats_lock, flags); | 3583 | spin_lock_irqsave(&adapter->stats_lock, flags); |
3576 | e1000_tbi_adjust_stats(&adapter->hw, | 3584 | e1000_tbi_adjust_stats(&adapter->hw, &adapter->stats, |
3577 | &adapter->stats, | ||
3578 | length, skb->data); | 3585 | length, skb->data); |
3579 | spin_unlock_irqrestore(&adapter->stats_lock, | 3586 | spin_unlock_irqrestore(&adapter->stats_lock, |
3580 | flags); | 3587 | flags); |
@@ -3589,8 +3596,7 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter, | |||
3589 | skb_put(skb, length - ETHERNET_FCS_SIZE); | 3596 | skb_put(skb, length - ETHERNET_FCS_SIZE); |
3590 | 3597 | ||
3591 | /* Receive Checksum Offload */ | 3598 | /* Receive Checksum Offload */ |
3592 | e1000_rx_checksum(adapter, | 3599 | e1000_rx_checksum(adapter, (uint32_t)(rx_desc->status) | |
3593 | (uint32_t)(rx_desc->status) | | ||
3594 | ((uint32_t)(rx_desc->errors) << 24), | 3600 | ((uint32_t)(rx_desc->errors) << 24), |
3595 | rx_desc->csum, skb); | 3601 | rx_desc->csum, skb); |
3596 | skb->protocol = eth_type_trans(skb, netdev); | 3602 | skb->protocol = eth_type_trans(skb, netdev); |
@@ -3621,13 +3627,19 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter, | |||
3621 | 3627 | ||
3622 | next_desc: | 3628 | next_desc: |
3623 | rx_desc->status = 0; | 3629 | rx_desc->status = 0; |
3624 | buffer_info->skb = NULL; | ||
3625 | if(unlikely(++i == rx_ring->count)) i = 0; | ||
3626 | 3630 | ||
3627 | rx_desc = E1000_RX_DESC(*rx_ring, i); | 3631 | /* return some buffers to hardware, one at a time is too slow */ |
3632 | if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) { | ||
3633 | adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count); | ||
3634 | cleaned_count = 0; | ||
3635 | } | ||
3636 | |||
3628 | } | 3637 | } |
3629 | rx_ring->next_to_clean = i; | 3638 | rx_ring->next_to_clean = i; |
3630 | adapter->alloc_rx_buf(adapter, rx_ring); | 3639 | |
3640 | cleaned_count = E1000_DESC_UNUSED(rx_ring); | ||
3641 | if (cleaned_count) | ||
3642 | adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count); | ||
3631 | 3643 | ||
3632 | return cleaned; | 3644 | return cleaned; |
3633 | } | 3645 | } |
@@ -3656,6 +3668,7 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, | |||
3656 | struct sk_buff *skb; | 3668 | struct sk_buff *skb; |
3657 | unsigned int i, j; | 3669 | unsigned int i, j; |
3658 | uint32_t length, staterr; | 3670 | uint32_t length, staterr; |
3671 | int cleaned_count = 0; | ||
3659 | boolean_t cleaned = FALSE; | 3672 | boolean_t cleaned = FALSE; |
3660 | 3673 | ||
3661 | i = rx_ring->next_to_clean; | 3674 | i = rx_ring->next_to_clean; |
@@ -3672,6 +3685,7 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, | |||
3672 | (*work_done)++; | 3685 | (*work_done)++; |
3673 | #endif | 3686 | #endif |
3674 | cleaned = TRUE; | 3687 | cleaned = TRUE; |
3688 | cleaned_count++; | ||
3675 | pci_unmap_single(pdev, buffer_info->dma, | 3689 | pci_unmap_single(pdev, buffer_info->dma, |
3676 | buffer_info->length, | 3690 | buffer_info->length, |
3677 | PCI_DMA_FROMDEVICE); | 3691 | PCI_DMA_FROMDEVICE); |
@@ -3756,13 +3770,20 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, | |||
3756 | next_desc: | 3770 | next_desc: |
3757 | rx_desc->wb.middle.status_error &= ~0xFF; | 3771 | rx_desc->wb.middle.status_error &= ~0xFF; |
3758 | buffer_info->skb = NULL; | 3772 | buffer_info->skb = NULL; |
3759 | if(unlikely(++i == rx_ring->count)) i = 0; | ||
3760 | 3773 | ||
3761 | rx_desc = E1000_RX_DESC_PS(*rx_ring, i); | 3774 | /* return some buffers to hardware, one at a time is too slow */ |
3775 | if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) { | ||
3776 | adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count); | ||
3777 | cleaned_count = 0; | ||
3778 | } | ||
3779 | |||
3762 | staterr = le32_to_cpu(rx_desc->wb.middle.status_error); | 3780 | staterr = le32_to_cpu(rx_desc->wb.middle.status_error); |
3763 | } | 3781 | } |
3764 | rx_ring->next_to_clean = i; | 3782 | rx_ring->next_to_clean = i; |
3765 | adapter->alloc_rx_buf(adapter, rx_ring); | 3783 | |
3784 | cleaned_count = E1000_DESC_UNUSED(rx_ring); | ||
3785 | if (cleaned_count) | ||
3786 | adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count); | ||
3766 | 3787 | ||
3767 | return cleaned; | 3788 | return cleaned; |
3768 | } | 3789 | } |
@@ -3774,7 +3795,8 @@ next_desc: | |||
3774 | 3795 | ||
3775 | static void | 3796 | static void |
3776 | e1000_alloc_rx_buffers(struct e1000_adapter *adapter, | 3797 | e1000_alloc_rx_buffers(struct e1000_adapter *adapter, |
3777 | struct e1000_rx_ring *rx_ring) | 3798 | struct e1000_rx_ring *rx_ring, |
3799 | int cleaned_count) | ||
3778 | { | 3800 | { |
3779 | struct net_device *netdev = adapter->netdev; | 3801 | struct net_device *netdev = adapter->netdev; |
3780 | struct pci_dev *pdev = adapter->pdev; | 3802 | struct pci_dev *pdev = adapter->pdev; |
@@ -3792,6 +3814,7 @@ e1000_alloc_rx_buffers(struct e1000_adapter *adapter, | |||
3792 | 3814 | ||
3793 | if(unlikely(!skb)) { | 3815 | if(unlikely(!skb)) { |
3794 | /* Better luck next round */ | 3816 | /* Better luck next round */ |
3817 | adapter->alloc_rx_buff_failed++; | ||
3795 | break; | 3818 | break; |
3796 | } | 3819 | } |
3797 | 3820 | ||
@@ -3876,7 +3899,8 @@ e1000_alloc_rx_buffers(struct e1000_adapter *adapter, | |||
3876 | 3899 | ||
3877 | static void | 3900 | static void |
3878 | e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter, | 3901 | e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter, |
3879 | struct e1000_rx_ring *rx_ring) | 3902 | struct e1000_rx_ring *rx_ring, |
3903 | int cleaned_count) | ||
3880 | { | 3904 | { |
3881 | struct net_device *netdev = adapter->netdev; | 3905 | struct net_device *netdev = adapter->netdev; |
3882 | struct pci_dev *pdev = adapter->pdev; | 3906 | struct pci_dev *pdev = adapter->pdev; |
@@ -3892,7 +3916,7 @@ e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter, | |||
3892 | ps_page = &rx_ring->ps_page[i]; | 3916 | ps_page = &rx_ring->ps_page[i]; |
3893 | ps_page_dma = &rx_ring->ps_page_dma[i]; | 3917 | ps_page_dma = &rx_ring->ps_page_dma[i]; |
3894 | 3918 | ||
3895 | while(!buffer_info->skb) { | 3919 | while (cleaned_count--) { |
3896 | rx_desc = E1000_RX_DESC_PS(*rx_ring, i); | 3920 | rx_desc = E1000_RX_DESC_PS(*rx_ring, i); |
3897 | 3921 | ||
3898 | for(j = 0; j < PS_PAGE_BUFFERS; j++) { | 3922 | for(j = 0; j < PS_PAGE_BUFFERS; j++) { |