aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/skbuff.h
diff options
context:
space:
mode:
authorEric Dumazet <eric.dumazet@gmail.com>2010-06-15 21:16:43 -0400
committerDavid S. Miller <davem@davemloft.net>2010-06-15 21:16:43 -0400
commit5933dd2f028cdcbb4b3169dca594324704ba10ae (patch)
treec49d33589cf1ee2047ed4aa00f700e7ddb090447 /include/linux/skbuff.h
parenta95d8c88bea0c93505e1d143d075f112be2b25e3 (diff)
net: NET_SKB_PAD should depend on L1_CACHE_BYTES
In old kernels, NET_SKB_PAD was defined to 16. Then commit d6301d3dd1c2 (net: Increase default NET_SKB_PAD to 32), and commit 18e8c134f4e9 (net: Increase NET_SKB_PAD to 64 bytes) increased it to 64. While first patch was governed by network stack needs, second was more driven by performance issues on current hardware. Real intent was to align data on a cache line boundary. So use max(32, L1_CACHE_BYTES) instead of 64, to be more generic. Remove microblaze and powerpc own NET_SKB_PAD definitions. Thanks to Alexander Duyck and David Miller for their comments. Suggested-by: David Miller <davem@davemloft.net> Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include/linux/skbuff.h')
-rw-r--r--include/linux/skbuff.h8
1 files changed, 5 insertions, 3 deletions
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 122d08396e56..ac74ee085d74 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -1414,12 +1414,14 @@ static inline int skb_network_offset(const struct sk_buff *skb)
1414 * 1414 *
1415 * Various parts of the networking layer expect at least 32 bytes of 1415 * Various parts of the networking layer expect at least 32 bytes of
1416 * headroom, you should not reduce this. 1416 * headroom, you should not reduce this.
1417 * With RPS, we raised NET_SKB_PAD to 64 so that get_rps_cpus() fetches span 1417 *
1418 * a 64 bytes aligned block to fit modern (>= 64 bytes) cache line sizes 1418 * Using max(32, L1_CACHE_BYTES) makes sense (especially with RPS)
1419 * to reduce average number of cache lines per packet.
1420 * get_rps_cpus() for example only access one 64 bytes aligned block :
1419 * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8) 1421 * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
1420 */ 1422 */
1421#ifndef NET_SKB_PAD 1423#ifndef NET_SKB_PAD
1422#define NET_SKB_PAD 64 1424#define NET_SKB_PAD max(32, L1_CACHE_BYTES)
1423#endif 1425#endif
1424 1426
1425extern int ___pskb_trim(struct sk_buff *skb, unsigned int len); 1427extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);