diff options
author | Eric Dumazet <edumazet@google.com> | 2012-05-17 03:34:16 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2012-05-17 15:52:40 -0400 |
commit | a1c7fff7e18f59e684e07b0f9a770561cd39f395 (patch) | |
tree | 99b006ffc7423205ded8616a21eb955f1fe046dd /net/core | |
parent | 1de5a71c3e6eae2fbf15e9a9e13a8fc269bb82bc (diff) |
net: netdev_alloc_skb() use build_skb()
netdev_alloc_skb() is used by networks driver in their RX path to
allocate an skb to receive an incoming frame.
With recent skb->head_frag infrastructure, it makes sense to change
netdev_alloc_skb() to use build_skb() and a frag allocator.
This permits a zero copy splice(socket->pipe), and better GRO or TCP
coalescing.
Signed-off-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/core')
-rw-r--r-- | net/core/skbuff.c | 32 |
1 files changed, 31 insertions, 1 deletions
diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 7a10f0894152..7645df1bada0 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c | |||
@@ -293,6 +293,12 @@ struct sk_buff *build_skb(void *data, unsigned int frag_size) | |||
293 | } | 293 | } |
294 | EXPORT_SYMBOL(build_skb); | 294 | EXPORT_SYMBOL(build_skb); |
295 | 295 | ||
296 | struct netdev_alloc_cache { | ||
297 | struct page *page; | ||
298 | unsigned int offset; | ||
299 | }; | ||
300 | static DEFINE_PER_CPU(struct netdev_alloc_cache, netdev_alloc_cache); | ||
301 | |||
296 | /** | 302 | /** |
297 | * __netdev_alloc_skb - allocate an skbuff for rx on a specific device | 303 | * __netdev_alloc_skb - allocate an skbuff for rx on a specific device |
298 | * @dev: network device to receive on | 304 | * @dev: network device to receive on |
@@ -310,8 +316,32 @@ struct sk_buff *__netdev_alloc_skb(struct net_device *dev, | |||
310 | unsigned int length, gfp_t gfp_mask) | 316 | unsigned int length, gfp_t gfp_mask) |
311 | { | 317 | { |
312 | struct sk_buff *skb; | 318 | struct sk_buff *skb; |
319 | unsigned int fragsz = SKB_DATA_ALIGN(length + NET_SKB_PAD) + | ||
320 | SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); | ||
313 | 321 | ||
314 | skb = __alloc_skb(length + NET_SKB_PAD, gfp_mask, 0, NUMA_NO_NODE); | 322 | if (fragsz <= PAGE_SIZE && !(gfp_mask & __GFP_WAIT)) { |
323 | struct netdev_alloc_cache *nc; | ||
324 | void *data = NULL; | ||
325 | |||
326 | nc = &get_cpu_var(netdev_alloc_cache); | ||
327 | if (!nc->page) { | ||
328 | refill: nc->page = alloc_page(gfp_mask); | ||
329 | nc->offset = 0; | ||
330 | } | ||
331 | if (likely(nc->page)) { | ||
332 | if (nc->offset + fragsz > PAGE_SIZE) { | ||
333 | put_page(nc->page); | ||
334 | goto refill; | ||
335 | } | ||
336 | data = page_address(nc->page) + nc->offset; | ||
337 | nc->offset += fragsz; | ||
338 | get_page(nc->page); | ||
339 | } | ||
340 | put_cpu_var(netdev_alloc_cache); | ||
341 | skb = data ? build_skb(data, fragsz) : NULL; | ||
342 | } else { | ||
343 | skb = __alloc_skb(length + NET_SKB_PAD, gfp_mask, 0, NUMA_NO_NODE); | ||
344 | } | ||
315 | if (likely(skb)) { | 345 | if (likely(skb)) { |
316 | skb_reserve(skb, NET_SKB_PAD); | 346 | skb_reserve(skb, NET_SKB_PAD); |
317 | skb->dev = dev; | 347 | skb->dev = dev; |