diff options
author | Arnaldo Carvalho de Melo <acme@redhat.com> | 2007-04-19 23:29:13 -0400 |
---|---|---|
committer | David S. Miller <davem@sunset.davemloft.net> | 2007-04-26 01:26:28 -0400 |
commit | 27a884dc3cb63b93c2b3b643f5b31eed5f8a4d26 (patch) | |
tree | 5a267e40f9b94014be38dad5de0a52b6628834e0 /net/core/skbuff.c | |
parent | be8bd86321fa7f06359d866ef61fb4d2f3e9dce9 (diff) |
[SK_BUFF]: Convert skb->tail to sk_buff_data_t
So that it is also an offset from skb->head, reduces its size from 8 to 4 bytes
on 64bit architectures, allowing us to combine the 4 bytes hole left by the
layer headers conversion, reducing struct sk_buff size to 256 bytes, i.e. 4
64byte cachelines, and since the sk_buff slab cache is SLAB_HWCACHE_ALIGN...
:-)
Many calculations that previously required that skb->{transport,network,
mac}_header be first converted to a pointer now can be done directly, being
meaningful as offsets or pointers.
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/core/skbuff.c')
-rw-r--r-- | net/core/skbuff.c | 35 |
1 files changed, 21 insertions, 14 deletions
diff --git a/net/core/skbuff.c b/net/core/skbuff.c index a48b08681261..ddcbc4d10dab 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c | |||
@@ -87,8 +87,9 @@ static struct kmem_cache *skbuff_fclone_cache __read_mostly; | |||
87 | void skb_over_panic(struct sk_buff *skb, int sz, void *here) | 87 | void skb_over_panic(struct sk_buff *skb, int sz, void *here) |
88 | { | 88 | { |
89 | printk(KERN_EMERG "skb_over_panic: text:%p len:%d put:%d head:%p " | 89 | printk(KERN_EMERG "skb_over_panic: text:%p len:%d put:%d head:%p " |
90 | "data:%p tail:%p end:%p dev:%s\n", | 90 | "data:%p tail:%#lx end:%p dev:%s\n", |
91 | here, skb->len, sz, skb->head, skb->data, skb->tail, skb->end, | 91 | here, skb->len, sz, skb->head, skb->data, |
92 | (unsigned long)skb->tail, skb->end, | ||
92 | skb->dev ? skb->dev->name : "<NULL>"); | 93 | skb->dev ? skb->dev->name : "<NULL>"); |
93 | BUG(); | 94 | BUG(); |
94 | } | 95 | } |
@@ -105,8 +106,9 @@ void skb_over_panic(struct sk_buff *skb, int sz, void *here) | |||
105 | void skb_under_panic(struct sk_buff *skb, int sz, void *here) | 106 | void skb_under_panic(struct sk_buff *skb, int sz, void *here) |
106 | { | 107 | { |
107 | printk(KERN_EMERG "skb_under_panic: text:%p len:%d put:%d head:%p " | 108 | printk(KERN_EMERG "skb_under_panic: text:%p len:%d put:%d head:%p " |
108 | "data:%p tail:%p end:%p dev:%s\n", | 109 | "data:%p tail:%#lx end:%p dev:%s\n", |
109 | here, skb->len, sz, skb->head, skb->data, skb->tail, skb->end, | 110 | here, skb->len, sz, skb->head, skb->data, |
111 | (unsigned long)skb->tail, skb->end, | ||
110 | skb->dev ? skb->dev->name : "<NULL>"); | 112 | skb->dev ? skb->dev->name : "<NULL>"); |
111 | BUG(); | 113 | BUG(); |
112 | } | 114 | } |
@@ -167,7 +169,7 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask, | |||
167 | atomic_set(&skb->users, 1); | 169 | atomic_set(&skb->users, 1); |
168 | skb->head = data; | 170 | skb->head = data; |
169 | skb->data = data; | 171 | skb->data = data; |
170 | skb->tail = data; | 172 | skb_reset_tail_pointer(skb); |
171 | skb->end = data + size; | 173 | skb->end = data + size; |
172 | /* make sure we initialize shinfo sequentially */ | 174 | /* make sure we initialize shinfo sequentially */ |
173 | shinfo = skb_shinfo(skb); | 175 | shinfo = skb_shinfo(skb); |
@@ -629,7 +631,12 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, | |||
629 | 631 | ||
630 | /* Copy only real data... and, alas, header. This should be | 632 | /* Copy only real data... and, alas, header. This should be |
631 | * optimized for the cases when header is void. */ | 633 | * optimized for the cases when header is void. */ |
632 | memcpy(data + nhead, skb->head, skb->tail - skb->head); | 634 | memcpy(data + nhead, skb->head, |
635 | skb->tail | ||
636 | #ifndef NET_SKBUFF_DATA_USES_OFFSET | ||
637 | - skb->head | ||
638 | #endif | ||
639 | ); | ||
633 | memcpy(data + size, skb->end, sizeof(struct skb_shared_info)); | 640 | memcpy(data + size, skb->end, sizeof(struct skb_shared_info)); |
634 | 641 | ||
635 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) | 642 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) |
@@ -645,9 +652,9 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, | |||
645 | skb->head = data; | 652 | skb->head = data; |
646 | skb->end = data + size; | 653 | skb->end = data + size; |
647 | skb->data += off; | 654 | skb->data += off; |
648 | skb->tail += off; | ||
649 | #ifndef NET_SKBUFF_DATA_USES_OFFSET | 655 | #ifndef NET_SKBUFF_DATA_USES_OFFSET |
650 | /* {transport,network,mac}_header are relative to skb->head */ | 656 | /* {transport,network,mac}_header and tail are relative to skb->head */ |
657 | skb->tail += off; | ||
651 | skb->transport_header += off; | 658 | skb->transport_header += off; |
652 | skb->network_header += off; | 659 | skb->network_header += off; |
653 | skb->mac_header += off; | 660 | skb->mac_header += off; |
@@ -762,7 +769,7 @@ int skb_pad(struct sk_buff *skb, int pad) | |||
762 | return 0; | 769 | return 0; |
763 | } | 770 | } |
764 | 771 | ||
765 | ntail = skb->data_len + pad - (skb->end - skb->tail); | 772 | ntail = skb->data_len + pad - (skb->end - skb_tail_pointer(skb)); |
766 | if (likely(skb_cloned(skb) || ntail > 0)) { | 773 | if (likely(skb_cloned(skb) || ntail > 0)) { |
767 | err = pskb_expand_head(skb, 0, ntail, GFP_ATOMIC); | 774 | err = pskb_expand_head(skb, 0, ntail, GFP_ATOMIC); |
768 | if (unlikely(err)) | 775 | if (unlikely(err)) |
@@ -863,7 +870,7 @@ done: | |||
863 | } else { | 870 | } else { |
864 | skb->len = len; | 871 | skb->len = len; |
865 | skb->data_len = 0; | 872 | skb->data_len = 0; |
866 | skb->tail = skb->data + len; | 873 | skb_set_tail_pointer(skb, len); |
867 | } | 874 | } |
868 | 875 | ||
869 | return 0; | 876 | return 0; |
@@ -900,7 +907,7 @@ unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta) | |||
900 | * plus 128 bytes for future expansions. If we have enough | 907 | * plus 128 bytes for future expansions. If we have enough |
901 | * room at tail, reallocate without expansion only if skb is cloned. | 908 | * room at tail, reallocate without expansion only if skb is cloned. |
902 | */ | 909 | */ |
903 | int i, k, eat = (skb->tail + delta) - skb->end; | 910 | int i, k, eat = (skb_tail_pointer(skb) + delta) - skb->end; |
904 | 911 | ||
905 | if (eat > 0 || skb_cloned(skb)) { | 912 | if (eat > 0 || skb_cloned(skb)) { |
906 | if (pskb_expand_head(skb, 0, eat > 0 ? eat + 128 : 0, | 913 | if (pskb_expand_head(skb, 0, eat > 0 ? eat + 128 : 0, |
@@ -908,7 +915,7 @@ unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta) | |||
908 | return NULL; | 915 | return NULL; |
909 | } | 916 | } |
910 | 917 | ||
911 | if (skb_copy_bits(skb, skb_headlen(skb), skb->tail, delta)) | 918 | if (skb_copy_bits(skb, skb_headlen(skb), skb_tail_pointer(skb), delta)) |
912 | BUG(); | 919 | BUG(); |
913 | 920 | ||
914 | /* Optimization: no fragments, no reasons to preestimate | 921 | /* Optimization: no fragments, no reasons to preestimate |
@@ -1004,7 +1011,7 @@ pull_pages: | |||
1004 | skb->tail += delta; | 1011 | skb->tail += delta; |
1005 | skb->data_len -= delta; | 1012 | skb->data_len -= delta; |
1006 | 1013 | ||
1007 | return skb->tail; | 1014 | return skb_tail_pointer(skb); |
1008 | } | 1015 | } |
1009 | 1016 | ||
1010 | /* Copy some data bits from skb to kernel buffer. */ | 1017 | /* Copy some data bits from skb to kernel buffer. */ |
@@ -1539,7 +1546,7 @@ static inline void skb_split_inside_header(struct sk_buff *skb, | |||
1539 | skb1->len += skb1->data_len; | 1546 | skb1->len += skb1->data_len; |
1540 | skb->data_len = 0; | 1547 | skb->data_len = 0; |
1541 | skb->len = len; | 1548 | skb->len = len; |
1542 | skb->tail = skb->data + len; | 1549 | skb_set_tail_pointer(skb, len); |
1543 | } | 1550 | } |
1544 | 1551 | ||
1545 | static inline void skb_split_no_header(struct sk_buff *skb, | 1552 | static inline void skb_split_no_header(struct sk_buff *skb, |