aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/e1000/e1000_main.c
diff options
context:
space:
mode:
authorJeff Kirsher <jeffrey.t.kirsher@intel.com>2006-01-12 19:51:30 -0500
committerJeff Garzik <jgarzik@pobox.com>2006-01-17 07:51:03 -0500
commita292ca6efbc1f259ddfb9c902367f2588e0e8b0f (patch)
tree02d26f87e4b3931c54923c5f97557eae0eff39f0 /drivers/net/e1000/e1000_main.c
parent240b1710f56b359685bc698e2a1473a2f3f2b8d7 (diff)
[PATCH] e1000: Added copy break code
Improves small packet performance with large amounts of reassembly being done in the stack. Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com> Signed-off-by: Jesse Brandeburg <jesse.brandeburg@intel.com> Signed-off-by: John Ronciak <john.ronciak@intel.com> Signed-off-by: Jeff Garzik <jgarzik@pobox.com>
Diffstat (limited to 'drivers/net/e1000/e1000_main.c')
-rw-r--r--drivers/net/e1000/e1000_main.c59
1 files changed, 47 insertions, 12 deletions
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index 8207db44b705..bef4d9d6f2d5 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -420,7 +420,8 @@ e1000_up(struct e1000_adapter *adapter)
420 * next_to_use != next_to_clean */ 420 * next_to_use != next_to_clean */
421 for (i = 0; i < adapter->num_rx_queues; i++) { 421 for (i = 0; i < adapter->num_rx_queues; i++) {
422 struct e1000_rx_ring *ring = &adapter->rx_ring[i]; 422 struct e1000_rx_ring *ring = &adapter->rx_ring[i];
423 adapter->alloc_rx_buf(adapter, ring, E1000_DESC_UNUSED(ring)); 423 adapter->alloc_rx_buf(adapter, ring,
424 E1000_DESC_UNUSED(ring));
424 } 425 }
425 426
426#ifdef CONFIG_PCI_MSI 427#ifdef CONFIG_PCI_MSI
@@ -3567,23 +3568,26 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter,
3567 uint32_t length; 3568 uint32_t length;
3568 uint8_t last_byte; 3569 uint8_t last_byte;
3569 unsigned int i; 3570 unsigned int i;
3570 boolean_t cleaned = FALSE;
3571 int cleaned_count = 0; 3571 int cleaned_count = 0;
3572 boolean_t cleaned = FALSE, multi_descriptor = FALSE;
3572 3573
3573 i = rx_ring->next_to_clean; 3574 i = rx_ring->next_to_clean;
3574 rx_desc = E1000_RX_DESC(*rx_ring, i); 3575 rx_desc = E1000_RX_DESC(*rx_ring, i);
3575 3576
3576 while(rx_desc->status & E1000_RXD_STAT_DD) { 3577 while(rx_desc->status & E1000_RXD_STAT_DD) {
3577 buffer_info = &rx_ring->buffer_info[i]; 3578 buffer_info = &rx_ring->buffer_info[i];
3579 u8 status;
3578#ifdef CONFIG_E1000_NAPI 3580#ifdef CONFIG_E1000_NAPI
3579 if(*work_done >= work_to_do) 3581 if(*work_done >= work_to_do)
3580 break; 3582 break;
3581 (*work_done)++; 3583 (*work_done)++;
3582#endif 3584#endif
3583 3585 status = rx_desc->status;
3584 cleaned = TRUE; 3586 cleaned = TRUE;
3585 cleaned_count++; 3587 cleaned_count++;
3586 pci_unmap_single(pdev, buffer_info->dma, buffer_info->length, 3588 pci_unmap_single(pdev,
3589 buffer_info->dma,
3590 buffer_info->length,
3587 PCI_DMA_FROMDEVICE); 3591 PCI_DMA_FROMDEVICE);
3588 3592
3589 skb = buffer_info->skb; 3593 skb = buffer_info->skb;
@@ -3602,7 +3606,8 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter,
3602 if(TBI_ACCEPT(&adapter->hw, rx_desc->status, 3606 if(TBI_ACCEPT(&adapter->hw, rx_desc->status,
3603 rx_desc->errors, length, last_byte)) { 3607 rx_desc->errors, length, last_byte)) {
3604 spin_lock_irqsave(&adapter->stats_lock, flags); 3608 spin_lock_irqsave(&adapter->stats_lock, flags);
3605 e1000_tbi_adjust_stats(&adapter->hw, &adapter->stats, 3609 e1000_tbi_adjust_stats(&adapter->hw,
3610 &adapter->stats,
3606 length, skb->data); 3611 length, skb->data);
3607 spin_unlock_irqrestore(&adapter->stats_lock, 3612 spin_unlock_irqrestore(&adapter->stats_lock,
3608 flags); 3613 flags);
@@ -3613,17 +3618,40 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter,
3613 } 3618 }
3614 } 3619 }
3615 3620
3616 /* Good Receive */ 3621 /* code added for copybreak, this should improve
3617 skb_put(skb, length - ETHERNET_FCS_SIZE); 3622 * performance for small packets with large amounts
3623 * of reassembly being done in the stack */
3624#define E1000_CB_LENGTH 256
3625 if ((length < E1000_CB_LENGTH) &&
3626 !rx_ring->rx_skb_top &&
3627 /* or maybe (status & E1000_RXD_STAT_EOP) && */
3628 !multi_descriptor) {
3629 struct sk_buff *new_skb =
3630 dev_alloc_skb(length + NET_IP_ALIGN);
3631 if (new_skb) {
3632 skb_reserve(new_skb, NET_IP_ALIGN);
3633 new_skb->dev = netdev;
3634 memcpy(new_skb->data - NET_IP_ALIGN,
3635 skb->data - NET_IP_ALIGN,
3636 length + NET_IP_ALIGN);
3637 /* save the skb in buffer_info as good */
3638 buffer_info->skb = skb;
3639 skb = new_skb;
3640 skb_put(skb, length);
3641 }
3642 }
3643
3644 /* end copybreak code */
3618 3645
3619 /* Receive Checksum Offload */ 3646 /* Receive Checksum Offload */
3620 e1000_rx_checksum(adapter, (uint32_t)(rx_desc->status) | 3647 e1000_rx_checksum(adapter,
3648 (uint32_t)(status) |
3621 ((uint32_t)(rx_desc->errors) << 24), 3649 ((uint32_t)(rx_desc->errors) << 24),
3622 rx_desc->csum, skb); 3650 rx_desc->csum, skb);
3623 skb->protocol = eth_type_trans(skb, netdev); 3651 skb->protocol = eth_type_trans(skb, netdev);
3624#ifdef CONFIG_E1000_NAPI 3652#ifdef CONFIG_E1000_NAPI
3625 if(unlikely(adapter->vlgrp && 3653 if(unlikely(adapter->vlgrp &&
3626 (rx_desc->status & E1000_RXD_STAT_VP))) { 3654 (status & E1000_RXD_STAT_VP))) {
3627 vlan_hwaccel_receive_skb(skb, adapter->vlgrp, 3655 vlan_hwaccel_receive_skb(skb, adapter->vlgrp,
3628 le16_to_cpu(rx_desc->special) & 3656 le16_to_cpu(rx_desc->special) &
3629 E1000_RXD_SPC_VLAN_MASK); 3657 E1000_RXD_SPC_VLAN_MASK);
@@ -3817,7 +3845,7 @@ next_desc:
3817static void 3845static void
3818e1000_alloc_rx_buffers(struct e1000_adapter *adapter, 3846e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
3819 struct e1000_rx_ring *rx_ring, 3847 struct e1000_rx_ring *rx_ring,
3820 int cleaned_count) 3848 int cleaned_count)
3821{ 3849{
3822 struct net_device *netdev = adapter->netdev; 3850 struct net_device *netdev = adapter->netdev;
3823 struct pci_dev *pdev = adapter->pdev; 3851 struct pci_dev *pdev = adapter->pdev;
@@ -3830,8 +3858,14 @@ e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
3830 i = rx_ring->next_to_use; 3858 i = rx_ring->next_to_use;
3831 buffer_info = &rx_ring->buffer_info[i]; 3859 buffer_info = &rx_ring->buffer_info[i];
3832 3860
3833 while(!buffer_info->skb) { 3861 while (cleaned_count--) {
3834 skb = dev_alloc_skb(bufsz); 3862 if (!(skb = buffer_info->skb))
3863 skb = dev_alloc_skb(bufsz);
3864 else {
3865 skb_trim(skb, 0);
3866 goto map_skb;
3867 }
3868
3835 3869
3836 if(unlikely(!skb)) { 3870 if(unlikely(!skb)) {
3837 /* Better luck next round */ 3871 /* Better luck next round */
@@ -3872,6 +3906,7 @@ e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
3872 3906
3873 buffer_info->skb = skb; 3907 buffer_info->skb = skb;
3874 buffer_info->length = adapter->rx_buffer_len; 3908 buffer_info->length = adapter->rx_buffer_len;
3909map_skb:
3875 buffer_info->dma = pci_map_single(pdev, 3910 buffer_info->dma = pci_map_single(pdev,
3876 skb->data, 3911 skb->data,
3877 adapter->rx_buffer_len, 3912 adapter->rx_buffer_len,