diff options
| author | Tom Herbert <therbert@google.com> | 2010-05-05 10:02:49 -0400 |
|---|---|---|
| committer | David S. Miller <davem@davemloft.net> | 2010-05-06 00:11:25 -0400 |
| commit | 50849d792b97c546c45a6652a16ba9be7d635c69 (patch) | |
| tree | 16f7ec1cb98909e8f9ac7e9c85065cc1c69e236d | |
| parent | 9ed318d546a29d7a591dbe648fd1a2efe3be1180 (diff) | |
e1000e: reduce writes of RX producer ptr
Reduce number of writes to RX producer pointer. When alloc'ing RX
buffers, only write the RX producer pointer once every
E1000_RX_BUFFER_WRITE (16) buffers created.
Signed-off-by: Tom Herbert <therbert@google.com>
Acked-by: Bruce Allan <bruce.w.allan@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
| -rw-r--r-- | drivers/net/e1000e/netdev.c | 57 |
1 files changed, 23 insertions, 34 deletions
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c index 8de64ed8762c..b049d1a3c861 100644 --- a/drivers/net/e1000e/netdev.c +++ b/drivers/net/e1000e/netdev.c | |||
| @@ -548,26 +548,23 @@ map_skb: | |||
| 548 | rx_desc = E1000_RX_DESC(*rx_ring, i); | 548 | rx_desc = E1000_RX_DESC(*rx_ring, i); |
| 549 | rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); | 549 | rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); |
| 550 | 550 | ||
| 551 | if (unlikely(!(i & (E1000_RX_BUFFER_WRITE - 1)))) { | ||
| 552 | /* | ||
| 553 | * Force memory writes to complete before letting h/w | ||
| 554 | * know there are new descriptors to fetch. (Only | ||
| 555 | * applicable for weak-ordered memory model archs, | ||
| 556 | * such as IA-64). | ||
| 557 | */ | ||
| 558 | wmb(); | ||
| 559 | writel(i, adapter->hw.hw_addr + rx_ring->tail); | ||
| 560 | } | ||
| 551 | i++; | 561 | i++; |
| 552 | if (i == rx_ring->count) | 562 | if (i == rx_ring->count) |
| 553 | i = 0; | 563 | i = 0; |
| 554 | buffer_info = &rx_ring->buffer_info[i]; | 564 | buffer_info = &rx_ring->buffer_info[i]; |
| 555 | } | 565 | } |
| 556 | 566 | ||
| 557 | if (rx_ring->next_to_use != i) { | 567 | rx_ring->next_to_use = i; |
| 558 | rx_ring->next_to_use = i; | ||
| 559 | if (i-- == 0) | ||
| 560 | i = (rx_ring->count - 1); | ||
| 561 | |||
| 562 | /* | ||
| 563 | * Force memory writes to complete before letting h/w | ||
| 564 | * know there are new descriptors to fetch. (Only | ||
| 565 | * applicable for weak-ordered memory model archs, | ||
| 566 | * such as IA-64). | ||
| 567 | */ | ||
| 568 | wmb(); | ||
| 569 | writel(i, adapter->hw.hw_addr + rx_ring->tail); | ||
| 570 | } | ||
| 571 | } | 568 | } |
| 572 | 569 | ||
| 573 | /** | 570 | /** |
| @@ -649,6 +646,17 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter, | |||
| 649 | 646 | ||
| 650 | rx_desc->read.buffer_addr[0] = cpu_to_le64(buffer_info->dma); | 647 | rx_desc->read.buffer_addr[0] = cpu_to_le64(buffer_info->dma); |
| 651 | 648 | ||
| 649 | if (unlikely(!(i & (E1000_RX_BUFFER_WRITE - 1)))) { | ||
| 650 | /* | ||
| 651 | * Force memory writes to complete before letting h/w | ||
| 652 | * know there are new descriptors to fetch. (Only | ||
| 653 | * applicable for weak-ordered memory model archs, | ||
| 654 | * such as IA-64). | ||
| 655 | */ | ||
| 656 | wmb(); | ||
| 657 | writel(i<<1, adapter->hw.hw_addr + rx_ring->tail); | ||
| 658 | } | ||
| 659 | |||
| 652 | i++; | 660 | i++; |
| 653 | if (i == rx_ring->count) | 661 | if (i == rx_ring->count) |
| 654 | i = 0; | 662 | i = 0; |
| @@ -656,26 +664,7 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter, | |||
| 656 | } | 664 | } |
| 657 | 665 | ||
| 658 | no_buffers: | 666 | no_buffers: |
| 659 | if (rx_ring->next_to_use != i) { | 667 | rx_ring->next_to_use = i; |
| 660 | rx_ring->next_to_use = i; | ||
| 661 | |||
| 662 | if (!(i--)) | ||
| 663 | i = (rx_ring->count - 1); | ||
| 664 | |||
| 665 | /* | ||
| 666 | * Force memory writes to complete before letting h/w | ||
| 667 | * know there are new descriptors to fetch. (Only | ||
| 668 | * applicable for weak-ordered memory model archs, | ||
| 669 | * such as IA-64). | ||
| 670 | */ | ||
| 671 | wmb(); | ||
| 672 | /* | ||
| 673 | * Hardware increments by 16 bytes, but packet split | ||
| 674 | * descriptors are 32 bytes...so we increment tail | ||
| 675 | * twice as much. | ||
| 676 | */ | ||
| 677 | writel(i<<1, adapter->hw.hw_addr + rx_ring->tail); | ||
| 678 | } | ||
| 679 | } | 668 | } |
| 680 | 669 | ||
| 681 | /** | 670 | /** |
