aboutsummaryrefslogtreecommitdiffstats
path: root/net/core/skbuff.c
diff options
context:
space:
mode:
authorArnaldo Carvalho de Melo <acme@redhat.com>2007-04-19 23:43:29 -0400
committerDavid S. Miller <davem@sunset.davemloft.net>2007-04-26 01:26:29 -0400
commit4305b541357ddbd205aa145dc378926b7cb12283 (patch)
tree9b1f57ee4ee757a9324c48a7dea84bc8c279ad82 /net/core/skbuff.c
parent27a884dc3cb63b93c2b3b643f5b31eed5f8a4d26 (diff)
[SK_BUFF]: Convert skb->end to sk_buff_data_t
Now to convert the last one, skb->data, that will allow many simplifications and removal of some of the offset helpers. Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/core/skbuff.c')
-rw-r--r--net/core/skbuff.c51
1 files changed, 33 insertions, 18 deletions
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index ddcbc4d10dab..a203bedefe09 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -87,9 +87,9 @@ static struct kmem_cache *skbuff_fclone_cache __read_mostly;
87void skb_over_panic(struct sk_buff *skb, int sz, void *here) 87void skb_over_panic(struct sk_buff *skb, int sz, void *here)
88{ 88{
89 printk(KERN_EMERG "skb_over_panic: text:%p len:%d put:%d head:%p " 89 printk(KERN_EMERG "skb_over_panic: text:%p len:%d put:%d head:%p "
90 "data:%p tail:%#lx end:%p dev:%s\n", 90 "data:%p tail:%#lx end:%#lx dev:%s\n",
91 here, skb->len, sz, skb->head, skb->data, 91 here, skb->len, sz, skb->head, skb->data,
92 (unsigned long)skb->tail, skb->end, 92 (unsigned long)skb->tail, (unsigned long)skb->end,
93 skb->dev ? skb->dev->name : "<NULL>"); 93 skb->dev ? skb->dev->name : "<NULL>");
94 BUG(); 94 BUG();
95} 95}
@@ -106,9 +106,9 @@ void skb_over_panic(struct sk_buff *skb, int sz, void *here)
106void skb_under_panic(struct sk_buff *skb, int sz, void *here) 106void skb_under_panic(struct sk_buff *skb, int sz, void *here)
107{ 107{
108 printk(KERN_EMERG "skb_under_panic: text:%p len:%d put:%d head:%p " 108 printk(KERN_EMERG "skb_under_panic: text:%p len:%d put:%d head:%p "
109 "data:%p tail:%#lx end:%p dev:%s\n", 109 "data:%p tail:%#lx end:%#lx dev:%s\n",
110 here, skb->len, sz, skb->head, skb->data, 110 here, skb->len, sz, skb->head, skb->data,
111 (unsigned long)skb->tail, skb->end, 111 (unsigned long)skb->tail, (unsigned long)skb->end,
112 skb->dev ? skb->dev->name : "<NULL>"); 112 skb->dev ? skb->dev->name : "<NULL>");
113 BUG(); 113 BUG();
114} 114}
@@ -170,7 +170,7 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
170 skb->head = data; 170 skb->head = data;
171 skb->data = data; 171 skb->data = data;
172 skb_reset_tail_pointer(skb); 172 skb_reset_tail_pointer(skb);
173 skb->end = data + size; 173 skb->end = skb->tail + size;
174 /* make sure we initialize shinfo sequentially */ 174 /* make sure we initialize shinfo sequentially */
175 shinfo = skb_shinfo(skb); 175 shinfo = skb_shinfo(skb);
176 atomic_set(&shinfo->dataref, 1); 176 atomic_set(&shinfo->dataref, 1);
@@ -520,8 +520,12 @@ struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask)
520 /* 520 /*
521 * Allocate the copy buffer 521 * Allocate the copy buffer
522 */ 522 */
523 struct sk_buff *n = alloc_skb(skb->end - skb->head + skb->data_len, 523 struct sk_buff *n;
524 gfp_mask); 524#ifdef NET_SKBUFF_DATA_USES_OFFSET
525 n = alloc_skb(skb->end + skb->data_len, gfp_mask);
526#else
527 n = alloc_skb(skb->end - skb->head + skb->data_len, gfp_mask);
528#endif
525 if (!n) 529 if (!n)
526 return NULL; 530 return NULL;
527 531
@@ -558,8 +562,12 @@ struct sk_buff *pskb_copy(struct sk_buff *skb, gfp_t gfp_mask)
558 /* 562 /*
559 * Allocate the copy buffer 563 * Allocate the copy buffer
560 */ 564 */
561 struct sk_buff *n = alloc_skb(skb->end - skb->head, gfp_mask); 565 struct sk_buff *n;
562 566#ifdef NET_SKBUFF_DATA_USES_OFFSET
567 n = alloc_skb(skb->end, gfp_mask);
568#else
569 n = alloc_skb(skb->end - skb->head, gfp_mask);
570#endif
563 if (!n) 571 if (!n)
564 goto out; 572 goto out;
565 573
@@ -617,7 +625,11 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
617{ 625{
618 int i; 626 int i;
619 u8 *data; 627 u8 *data;
628#ifdef NET_SKBUFF_DATA_USES_OFFSET
629 int size = nhead + skb->end + ntail;
630#else
620 int size = nhead + (skb->end - skb->head) + ntail; 631 int size = nhead + (skb->end - skb->head) + ntail;
632#endif
621 long off; 633 long off;
622 634
623 if (skb_shared(skb)) 635 if (skb_shared(skb))
@@ -632,12 +644,13 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
632 /* Copy only real data... and, alas, header. This should be 644 /* Copy only real data... and, alas, header. This should be
633 * optimized for the cases when header is void. */ 645 * optimized for the cases when header is void. */
634 memcpy(data + nhead, skb->head, 646 memcpy(data + nhead, skb->head,
635 skb->tail 647#ifdef NET_SKBUFF_DATA_USES_OFFSET
636#ifndef NET_SKBUFF_DATA_USES_OFFSET 648 skb->tail);
637 - skb->head 649#else
650 skb->tail - skb->head);
638#endif 651#endif
639 ); 652 memcpy(data + size, skb_end_pointer(skb),
640 memcpy(data + size, skb->end, sizeof(struct skb_shared_info)); 653 sizeof(struct skb_shared_info));
641 654
642 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) 655 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
643 get_page(skb_shinfo(skb)->frags[i].page); 656 get_page(skb_shinfo(skb)->frags[i].page);
@@ -650,9 +663,11 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
650 off = (data + nhead) - skb->head; 663 off = (data + nhead) - skb->head;
651 664
652 skb->head = data; 665 skb->head = data;
653 skb->end = data + size;
654 skb->data += off; 666 skb->data += off;
655#ifndef NET_SKBUFF_DATA_USES_OFFSET 667#ifdef NET_SKBUFF_DATA_USES_OFFSET
668 skb->end = size;
669#else
670 skb->end = skb->head + size;
656 /* {transport,network,mac}_header and tail are relative to skb->head */ 671 /* {transport,network,mac}_header and tail are relative to skb->head */
657 skb->tail += off; 672 skb->tail += off;
658 skb->transport_header += off; 673 skb->transport_header += off;
@@ -769,7 +784,7 @@ int skb_pad(struct sk_buff *skb, int pad)
769 return 0; 784 return 0;
770 } 785 }
771 786
772 ntail = skb->data_len + pad - (skb->end - skb_tail_pointer(skb)); 787 ntail = skb->data_len + pad - (skb->end - skb->tail);
773 if (likely(skb_cloned(skb) || ntail > 0)) { 788 if (likely(skb_cloned(skb) || ntail > 0)) {
774 err = pskb_expand_head(skb, 0, ntail, GFP_ATOMIC); 789 err = pskb_expand_head(skb, 0, ntail, GFP_ATOMIC);
775 if (unlikely(err)) 790 if (unlikely(err))
@@ -907,7 +922,7 @@ unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta)
907 * plus 128 bytes for future expansions. If we have enough 922 * plus 128 bytes for future expansions. If we have enough
908 * room at tail, reallocate without expansion only if skb is cloned. 923 * room at tail, reallocate without expansion only if skb is cloned.
909 */ 924 */
910 int i, k, eat = (skb_tail_pointer(skb) + delta) - skb->end; 925 int i, k, eat = (skb->tail + delta) - skb->end;
911 926
912 if (eat > 0 || skb_cloned(skb)) { 927 if (eat > 0 || skb_cloned(skb)) {
913 if (pskb_expand_head(skb, 0, eat > 0 ? eat + 128 : 0, 928 if (pskb_expand_head(skb, 0, eat > 0 ? eat + 128 : 0,