aboutsummaryrefslogtreecommitdiffstats
path: root/net/core
diff options
context:
space:
mode:
authorEric Dumazet <eric.dumazet@gmail.com>2011-10-13 03:28:54 -0400
committerDavid S. Miller <davem@davemloft.net>2011-10-13 16:05:07 -0400
commit87fb4b7b533073eeeaed0b6bf7c2328995f6c075 (patch)
treebe4b37f08d7fe2d018ae68bae4577b1b2bafd0fc /net/core
parent97ba0eb64ca690165f945a83e609102fcefa99cb (diff)
net: more accurate skb truesize
skb truesize currently accounts for sk_buff struct and part of skb head. kmalloc() roundings are also ignored. Considering that skb_shared_info is larger than sk_buff, its time to take it into account for better memory accounting. This patch introduces SKB_TRUESIZE(X) macro to centralize various assumptions into a single place. At skb alloc phase, we put skb_shared_info struct at the exact end of skb head, to allow a better use of memory (lowering number of reallocations), since kmalloc() gives us power-of-two memory blocks. Unless SLUB/SLUB debug is active, both skb->head and skb_shared_info are aligned to cache lines, as before. Note: This patch might trigger performance regressions because of misconfigured protocol stacks, hitting per socket or global memory limits that were previously not reached. But its a necessary step for a more accurate memory accounting. Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com> CC: Andi Kleen <ak@linux.intel.com> CC: Ben Hutchings <bhutchings@solarflare.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/core')
-rw-r--r--net/core/skbuff.c18
-rw-r--r--net/core/sock.c2
2 files changed, 15 insertions, 5 deletions
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 5b2c5f1d4dba..a7f855dca922 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -184,11 +184,20 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
184 goto out; 184 goto out;
185 prefetchw(skb); 185 prefetchw(skb);
186 186
187 size = SKB_DATA_ALIGN(size); 187 /* We do our best to align skb_shared_info on a separate cache
188 data = kmalloc_node_track_caller(size + sizeof(struct skb_shared_info), 188 * line. It usually works because kmalloc(X > SMP_CACHE_BYTES) gives
189 gfp_mask, node); 189 * aligned memory blocks, unless SLUB/SLAB debug is enabled.
190 * Both skb->head and skb_shared_info are cache line aligned.
191 */
192 size += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
193 data = kmalloc_node_track_caller(size, gfp_mask, node);
190 if (!data) 194 if (!data)
191 goto nodata; 195 goto nodata;
196 /* kmalloc(size) might give us more room than requested.
197 * Put skb_shared_info exactly at the end of allocated zone,
198 * to allow max possible filling before reallocation.
199 */
200 size = SKB_WITH_OVERHEAD(ksize(data));
192 prefetchw(data + size); 201 prefetchw(data + size);
193 202
194 /* 203 /*
@@ -197,7 +206,8 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
197 * the tail pointer in struct sk_buff! 206 * the tail pointer in struct sk_buff!
198 */ 207 */
199 memset(skb, 0, offsetof(struct sk_buff, tail)); 208 memset(skb, 0, offsetof(struct sk_buff, tail));
200 skb->truesize = size + sizeof(struct sk_buff); 209 /* Account for allocated memory : skb + skb->head */
210 skb->truesize = SKB_TRUESIZE(size);
201 atomic_set(&skb->users, 1); 211 atomic_set(&skb->users, 1);
202 skb->head = data; 212 skb->head = data;
203 skb->data = data; 213 skb->data = data;
diff --git a/net/core/sock.c b/net/core/sock.c
index 83c462d3f451..5a087626bb3a 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -207,7 +207,7 @@ static struct lock_class_key af_callback_keys[AF_MAX];
207 * not depend upon such differences. 207 * not depend upon such differences.
208 */ 208 */
209#define _SK_MEM_PACKETS 256 209#define _SK_MEM_PACKETS 256
210#define _SK_MEM_OVERHEAD (sizeof(struct sk_buff) + 256) 210#define _SK_MEM_OVERHEAD SKB_TRUESIZE(256)
211#define SK_WMEM_MAX (_SK_MEM_OVERHEAD * _SK_MEM_PACKETS) 211#define SK_WMEM_MAX (_SK_MEM_OVERHEAD * _SK_MEM_PACKETS)
212#define SK_RMEM_MAX (_SK_MEM_OVERHEAD * _SK_MEM_PACKETS) 212#define SK_RMEM_MAX (_SK_MEM_OVERHEAD * _SK_MEM_PACKETS)
213 213