aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAlexander Duyck <alexander.h.duyck@intel.com>2012-05-23 21:59:27 -0400
committerJeff Kirsher <jeffrey.t.kirsher@intel.com>2012-07-21 19:04:51 -0400
commit252562c207a850106d9d5b41a41d29f96c0530b7 (patch)
tree7584cd94d2069a9466c6640b70189a7254d58b0e
parentce422606696f137e610fd0e677ec72ac33c17842 (diff)
ixgbe: Reduce Rx header size to what is actually used
The recent changes to netdev_alloc_skb actually make it so that the size of the buffer now actually has a more direct input on the truesize. So in order to make best use of the piece of a page we are allocated I am reducing the IXGBE_RX_HDR_SIZE to 256 so that our truesize will be reduced by 256 bytes as well. This should result in performance improvements since the number of uses per page should increase from 4 to 6 in the case of a 4K page. In addition we should see socket performance improvements due to the truesize dropping to less than 1K for buffers less than 256 bytes. Cc: Eric Dumazet <edumazet@google.com> Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com> Tested-by: Phil Schmitt <phillip.j.schmitt@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h15
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c4
2 files changed, 10 insertions, 9 deletions
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index eb5928228670..b9623e9ea895 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -77,17 +77,18 @@
77#define IXGBE_MAX_FCPAUSE 0xFFFF 77#define IXGBE_MAX_FCPAUSE 0xFFFF
78 78
79/* Supported Rx Buffer Sizes */ 79/* Supported Rx Buffer Sizes */
80#define IXGBE_RXBUFFER_512 512 /* Used for packet split */ 80#define IXGBE_RXBUFFER_256 256 /* Used for skb receive header */
81#define IXGBE_MAX_RXBUFFER 16384 /* largest size for a single descriptor */ 81#define IXGBE_MAX_RXBUFFER 16384 /* largest size for a single descriptor */
82 82
83/* 83/*
84 * NOTE: netdev_alloc_skb reserves up to 64 bytes, NET_IP_ALIGN mans we 84 * NOTE: netdev_alloc_skb reserves up to 64 bytes, NET_IP_ALIGN means we
85 * reserve 2 more, and skb_shared_info adds an additional 384 bytes more, 85 * reserve 64 more, and skb_shared_info adds an additional 320 bytes more,
86 * this adds up to 512 bytes of extra data meaning the smallest allocation 86 * this adds up to 448 bytes of extra data.
87 * we could have is 1K. 87 *
88 * i.e. RXBUFFER_512 --> size-1024 slab 88 * Since netdev_alloc_skb now allocates a page fragment we can use a value
89 * of 256 and the resultant skb will have a truesize of 960 or less.
89 */ 90 */
90#define IXGBE_RX_HDR_SIZE IXGBE_RXBUFFER_512 91#define IXGBE_RX_HDR_SIZE IXGBE_RXBUFFER_256
91 92
92#define MAXIMUM_ETHERNET_VLAN_SIZE (ETH_FRAME_LEN + ETH_FCS_LEN + VLAN_HLEN) 93#define MAXIMUM_ETHERNET_VLAN_SIZE (ETH_FRAME_LEN + ETH_FCS_LEN + VLAN_HLEN)
93 94
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 7be35043b751..b376926af890 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -1517,8 +1517,8 @@ static bool ixgbe_cleanup_headers(struct ixgbe_ring *rx_ring,
1517 * 60 bytes if the skb->len is less than 60 for skb_pad. 1517 * 60 bytes if the skb->len is less than 60 for skb_pad.
1518 */ 1518 */
1519 pull_len = skb_frag_size(frag); 1519 pull_len = skb_frag_size(frag);
1520 if (pull_len > 256) 1520 if (pull_len > IXGBE_RX_HDR_SIZE)
1521 pull_len = ixgbe_get_headlen(va, pull_len); 1521 pull_len = ixgbe_get_headlen(va, IXGBE_RX_HDR_SIZE);
1522 1522
1523 /* align pull length to size of long to optimize memcpy performance */ 1523 /* align pull length to size of long to optimize memcpy performance */
1524 skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long))); 1524 skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long)));