aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/core/skbuff.c83
1 files changed, 53 insertions, 30 deletions
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 758bbef506d3..c3aa68ceed62 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -400,37 +400,8 @@ static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
400 skb_copy_secmark(new, old); 400 skb_copy_secmark(new, old);
401} 401}
402 402
403/** 403static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb)
404 * skb_clone - duplicate an sk_buff
405 * @skb: buffer to clone
406 * @gfp_mask: allocation priority
407 *
408 * Duplicate an &sk_buff. The new one is not owned by a socket. Both
409 * copies share the same packet data but not structure. The new
410 * buffer has a reference count of 1. If the allocation fails the
411 * function returns %NULL otherwise the new buffer is returned.
412 *
413 * If this function is called from an interrupt gfp_mask() must be
414 * %GFP_ATOMIC.
415 */
416
417struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask)
418{ 404{
419 struct sk_buff *n;
420
421 n = skb + 1;
422 if (skb->fclone == SKB_FCLONE_ORIG &&
423 n->fclone == SKB_FCLONE_UNAVAILABLE) {
424 atomic_t *fclone_ref = (atomic_t *) (n + 1);
425 n->fclone = SKB_FCLONE_CLONE;
426 atomic_inc(fclone_ref);
427 } else {
428 n = kmem_cache_alloc(skbuff_head_cache, gfp_mask);
429 if (!n)
430 return NULL;
431 n->fclone = SKB_FCLONE_UNAVAILABLE;
432 }
433
434#define C(x) n->x = skb->x 405#define C(x) n->x = skb->x
435 406
436 n->next = n->prev = NULL; 407 n->next = n->prev = NULL;
@@ -462,6 +433,58 @@ struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask)
462 skb->cloned = 1; 433 skb->cloned = 1;
463 434
464 return n; 435 return n;
436#undef C
437}
438
439/**
440 * skb_morph - morph one skb into another
441 * @dst: the skb to receive the contents
442 * @src: the skb to supply the contents
443 *
444 * This is identical to skb_clone except that the target skb is
445 * supplied by the user.
446 *
447 * The target skb is returned upon exit.
448 */
449struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src)
450{
451 skb_release_data(dst);
452 return __skb_clone(dst, src);
453}
454EXPORT_SYMBOL_GPL(skb_morph);
455
456/**
457 * skb_clone - duplicate an sk_buff
458 * @skb: buffer to clone
459 * @gfp_mask: allocation priority
460 *
461 * Duplicate an &sk_buff. The new one is not owned by a socket. Both
462 * copies share the same packet data but not structure. The new
463 * buffer has a reference count of 1. If the allocation fails the
464 * function returns %NULL otherwise the new buffer is returned.
465 *
466 * If this function is called from an interrupt gfp_mask() must be
467 * %GFP_ATOMIC.
468 */
469
470struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask)
471{
472 struct sk_buff *n;
473
474 n = skb + 1;
475 if (skb->fclone == SKB_FCLONE_ORIG &&
476 n->fclone == SKB_FCLONE_UNAVAILABLE) {
477 atomic_t *fclone_ref = (atomic_t *) (n + 1);
478 n->fclone = SKB_FCLONE_CLONE;
479 atomic_inc(fclone_ref);
480 } else {
481 n = kmem_cache_alloc(skbuff_head_cache, gfp_mask);
482 if (!n)
483 return NULL;
484 n->fclone = SKB_FCLONE_UNAVAILABLE;
485 }
486
487 return __skb_clone(n, skb);
465} 488}
466 489
467static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old) 490static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old)