aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorAnton Blanchard <anton@samba.org>2011-03-27 10:57:26 -0400
committerDavid S. Miller <davem@davemloft.net>2011-03-29 01:26:32 -0400
commita715dea3c8e9ef2771c534e05ee1d36f65987e64 (patch)
tree7734586473abed25f7ec4b8423adbdba3d829a61 /include
parent4910ac6c526d2868adcb5893e0c428473de862b5 (diff)
net: Always allocate at least 16 skb frags regardless of page size
When analysing performance of the cxgb3 on a ppc64 box I noticed that we weren't doing much GRO merging. It turns out we are limited by the number of SKB frags: #define MAX_SKB_FRAGS (65536/PAGE_SIZE + 2) With a 4kB page size we have 18 frags, but with a 64kB page size we only have 3 frags. I ran a single stream TCP bandwidth test to compare the performance of Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include')
-rw-r--r--include/linux/skbuff.h8
1 files changed, 7 insertions, 1 deletions
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 24cfa626931e..239083bfea13 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -122,8 +122,14 @@ struct sk_buff_head {
122 122
123struct sk_buff; 123struct sk_buff;
124 124
125/* To allow 64K frame to be packed as single skb without frag_list */ 125/* To allow 64K frame to be packed as single skb without frag_list. Since
126 * GRO uses frags we allocate at least 16 regardless of page size.
127 */
128#if (65536/PAGE_SIZE + 2) < 16
129#define MAX_SKB_FRAGS 16
130#else
126#define MAX_SKB_FRAGS (65536/PAGE_SIZE + 2) 131#define MAX_SKB_FRAGS (65536/PAGE_SIZE + 2)
132#endif
127 133
128typedef struct skb_frag_struct skb_frag_t; 134typedef struct skb_frag_struct skb_frag_t;
129 135