aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ixgbe
diff options
context:
space:
mode:
authorAlexander Duyck <alexander.h.duyck@intel.com>2010-08-19 09:33:41 -0400
committerDavid S. Miller <davem@davemloft.net>2010-08-19 19:36:45 -0400
commitd716a7d88549c99f9afbfc1f75dda1c390e2dc3a (patch)
treee9450cb13cd9159191a81b676a6adf4501765914 /drivers/net/ixgbe
parent083fc582b8752c64b0ae73935ddb45a1dd794b4c (diff)
ixgbe: remove redundant DMA alignment code
This patch removes the redundant DMA alignment code from the Rx buffer allocation path. This code is no longer necessary since all x86 buffers are now DMA aligned due to recent changes to NET_IP_ALIGN and NET_SKB_PAD. It also moves the setting of the Rx queue value into the allocation path since it is more likely that the queue mapping will still be in the cache at the time of allocation. Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ixgbe')
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c24
1 files changed, 12 insertions, 12 deletions
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index e32af434cc9d..5dceaf3dbb7f 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -955,7 +955,6 @@ static void ixgbe_receive_skb(struct ixgbe_q_vector *q_vector,
955 bool is_vlan = (status & IXGBE_RXD_STAT_VP); 955 bool is_vlan = (status & IXGBE_RXD_STAT_VP);
956 u16 tag = le16_to_cpu(rx_desc->wb.upper.vlan); 956 u16 tag = le16_to_cpu(rx_desc->wb.upper.vlan);
957 957
958 skb_record_rx_queue(skb, ring->queue_index);
959 if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL)) { 958 if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL)) {
960 if (adapter->vlgrp && is_vlan && (tag & VLAN_VID_MASK)) 959 if (adapter->vlgrp && is_vlan && (tag & VLAN_VID_MASK))
961 vlan_gro_receive(napi, adapter->vlgrp, tag, skb); 960 vlan_gro_receive(napi, adapter->vlgrp, tag, skb);
@@ -1037,10 +1036,12 @@ static void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter,
1037 struct ixgbe_ring *rx_ring, 1036 struct ixgbe_ring *rx_ring,
1038 int cleaned_count) 1037 int cleaned_count)
1039{ 1038{
1039 struct net_device *netdev = adapter->netdev;
1040 struct pci_dev *pdev = adapter->pdev; 1040 struct pci_dev *pdev = adapter->pdev;
1041 union ixgbe_adv_rx_desc *rx_desc; 1041 union ixgbe_adv_rx_desc *rx_desc;
1042 struct ixgbe_rx_buffer *bi; 1042 struct ixgbe_rx_buffer *bi;
1043 unsigned int i; 1043 unsigned int i;
1044 unsigned int bufsz = rx_ring->rx_buf_len;
1044 1045
1045 i = rx_ring->next_to_use; 1046 i = rx_ring->next_to_use;
1046 bi = &rx_ring->rx_buffer_info[i]; 1047 bi = &rx_ring->rx_buffer_info[i];
@@ -1051,7 +1052,7 @@ static void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter,
1051 if (!bi->page_dma && 1052 if (!bi->page_dma &&
1052 (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED)) { 1053 (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED)) {
1053 if (!bi->page) { 1054 if (!bi->page) {
1054 bi->page = alloc_page(GFP_ATOMIC); 1055 bi->page = netdev_alloc_page(netdev);
1055 if (!bi->page) { 1056 if (!bi->page) {
1056 adapter->alloc_rx_page_failed++; 1057 adapter->alloc_rx_page_failed++;
1057 goto no_buffers; 1058 goto no_buffers;
@@ -1069,22 +1070,21 @@ static void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter,
1069 } 1070 }
1070 1071
1071 if (!bi->skb) { 1072 if (!bi->skb) {
1072 struct sk_buff *skb; 1073 struct sk_buff *skb = netdev_alloc_skb_ip_align(netdev,
1073 /* netdev_alloc_skb reserves 32 bytes up front!! */ 1074 bufsz);
1074 uint bufsz = rx_ring->rx_buf_len + SMP_CACHE_BYTES; 1075 bi->skb = skb;
1075 skb = netdev_alloc_skb(adapter->netdev, bufsz);
1076 1076
1077 if (!skb) { 1077 if (!skb) {
1078 adapter->alloc_rx_buff_failed++; 1078 adapter->alloc_rx_buff_failed++;
1079 goto no_buffers; 1079 goto no_buffers;
1080 } 1080 }
1081 /* initialize queue mapping */
1082 skb_record_rx_queue(skb, rx_ring->queue_index);
1083 }
1081 1084
1082 /* advance the data pointer to the next cache line */ 1085 if (!bi->dma) {
1083 skb_reserve(skb, (PTR_ALIGN(skb->data, SMP_CACHE_BYTES) 1086 bi->dma = dma_map_single(&pdev->dev,
1084 - skb->data)); 1087 bi->skb->data,
1085
1086 bi->skb = skb;
1087 bi->dma = dma_map_single(&pdev->dev, skb->data,
1088 rx_ring->rx_buf_len, 1088 rx_ring->rx_buf_len,
1089 DMA_FROM_DEVICE); 1089 DMA_FROM_DEVICE);
1090 } 1090 }