aboutsummaryrefslogtreecommitdiffstats
path: root/net/core
diff options
context:
space:
mode:
authorEric Dumazet <edumazet@google.com>2012-05-18 01:12:12 -0400
committerDavid S. Miller <davem@davemloft.net>2012-05-18 13:31:25 -0400
commit6f532612cc2410a5079ea0f83e7a5011adfbf70d (patch)
tree0459fada6287120f5c47ed56c440c4d1c3e3eaa2 /net/core
parent56138f50d1900b0c3d8647376e37b488b23ba53d (diff)
net: introduce netdev_alloc_frag()
Fix two issues introduced in commit a1c7fff7e18f5 ( net: netdev_alloc_skb() use build_skb() ) - Must be IRQ safe (non NAPI drivers can use it) - Must not leak the frag if build_skb() fails to allocate sk_buff This patch introduces netdev_alloc_frag() for drivers willing to use build_skb() instead of __netdev_alloc_skb() variants. Factorize code so that : __dev_alloc_skb() is a wrapper around __netdev_alloc_skb(), and dev_alloc_skb() a wrapper around netdev_alloc_skb() Use __GFP_COLD flag. Almost all network drivers now benefit from skb->head_frag infrastructure. Signed-off-by: Eric Dumazet <edumazet@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/core')
-rw-r--r--net/core/skbuff.c82
1 files changed, 41 insertions, 41 deletions
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 7645df1bada0..7ceb673d622f 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -300,6 +300,40 @@ struct netdev_alloc_cache {
300static DEFINE_PER_CPU(struct netdev_alloc_cache, netdev_alloc_cache); 300static DEFINE_PER_CPU(struct netdev_alloc_cache, netdev_alloc_cache);
301 301
302/** 302/**
303 * netdev_alloc_frag - allocate a page fragment
304 * @fragsz: fragment size
305 *
306 * Allocates a frag from a page for receive buffer.
307 * Uses GFP_ATOMIC allocations.
308 */
309void *netdev_alloc_frag(unsigned int fragsz)
310{
311 struct netdev_alloc_cache *nc;
312 void *data = NULL;
313 unsigned long flags;
314
315 local_irq_save(flags);
316 nc = &__get_cpu_var(netdev_alloc_cache);
317 if (unlikely(!nc->page)) {
318refill:
319 nc->page = alloc_page(GFP_ATOMIC | __GFP_COLD);
320 nc->offset = 0;
321 }
322 if (likely(nc->page)) {
323 if (nc->offset + fragsz > PAGE_SIZE) {
324 put_page(nc->page);
325 goto refill;
326 }
327 data = page_address(nc->page) + nc->offset;
328 nc->offset += fragsz;
329 get_page(nc->page);
330 }
331 local_irq_restore(flags);
332 return data;
333}
334EXPORT_SYMBOL(netdev_alloc_frag);
335
336/**
303 * __netdev_alloc_skb - allocate an skbuff for rx on a specific device 337 * __netdev_alloc_skb - allocate an skbuff for rx on a specific device
304 * @dev: network device to receive on 338 * @dev: network device to receive on
305 * @length: length to allocate 339 * @length: length to allocate
@@ -313,32 +347,20 @@ static DEFINE_PER_CPU(struct netdev_alloc_cache, netdev_alloc_cache);
313 * %NULL is returned if there is no free memory. 347 * %NULL is returned if there is no free memory.
314 */ 348 */
315struct sk_buff *__netdev_alloc_skb(struct net_device *dev, 349struct sk_buff *__netdev_alloc_skb(struct net_device *dev,
316 unsigned int length, gfp_t gfp_mask) 350 unsigned int length, gfp_t gfp_mask)
317{ 351{
318 struct sk_buff *skb; 352 struct sk_buff *skb = NULL;
319 unsigned int fragsz = SKB_DATA_ALIGN(length + NET_SKB_PAD) + 353 unsigned int fragsz = SKB_DATA_ALIGN(length + NET_SKB_PAD) +
320 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 354 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
321 355
322 if (fragsz <= PAGE_SIZE && !(gfp_mask & __GFP_WAIT)) { 356 if (fragsz <= PAGE_SIZE && !(gfp_mask & __GFP_WAIT)) {
323 struct netdev_alloc_cache *nc; 357 void *data = netdev_alloc_frag(fragsz);
324 void *data = NULL;
325 358
326 nc = &get_cpu_var(netdev_alloc_cache); 359 if (likely(data)) {
327 if (!nc->page) { 360 skb = build_skb(data, fragsz);
328refill: nc->page = alloc_page(gfp_mask); 361 if (unlikely(!skb))
329 nc->offset = 0; 362 put_page(virt_to_head_page(data));
330 }
331 if (likely(nc->page)) {
332 if (nc->offset + fragsz > PAGE_SIZE) {
333 put_page(nc->page);
334 goto refill;
335 }
336 data = page_address(nc->page) + nc->offset;
337 nc->offset += fragsz;
338 get_page(nc->page);
339 } 363 }
340 put_cpu_var(netdev_alloc_cache);
341 skb = data ? build_skb(data, fragsz) : NULL;
342 } else { 364 } else {
343 skb = __alloc_skb(length + NET_SKB_PAD, gfp_mask, 0, NUMA_NO_NODE); 365 skb = __alloc_skb(length + NET_SKB_PAD, gfp_mask, 0, NUMA_NO_NODE);
344 } 366 }
@@ -360,28 +382,6 @@ void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off,
360} 382}
361EXPORT_SYMBOL(skb_add_rx_frag); 383EXPORT_SYMBOL(skb_add_rx_frag);
362 384
363/**
364 * dev_alloc_skb - allocate an skbuff for receiving
365 * @length: length to allocate
366 *
367 * Allocate a new &sk_buff and assign it a usage count of one. The
368 * buffer has unspecified headroom built in. Users should allocate
369 * the headroom they think they need without accounting for the
370 * built in space. The built in space is used for optimisations.
371 *
372 * %NULL is returned if there is no free memory. Although this function
373 * allocates memory it can be called from an interrupt.
374 */
375struct sk_buff *dev_alloc_skb(unsigned int length)
376{
377 /*
378 * There is more code here than it seems:
379 * __dev_alloc_skb is an inline
380 */
381 return __dev_alloc_skb(length, GFP_ATOMIC);
382}
383EXPORT_SYMBOL(dev_alloc_skb);
384
385static void skb_drop_list(struct sk_buff **listp) 385static void skb_drop_list(struct sk_buff **listp)
386{ 386{
387 struct sk_buff *list = *listp; 387 struct sk_buff *list = *listp;