aboutsummaryrefslogtreecommitdiffstats
path: root/net/core/skbuff.c
diff options
context:
space:
mode:
authorBenjamin LaHaise <bcrl@kvack.org>2006-01-03 17:06:50 -0500
committerDavid S. Miller <davem@davemloft.net>2006-01-03 17:06:50 -0500
commit4947d3ef8de7b4f42aed6ea9ba689dc8fb45b5a5 (patch)
treea4e77f0271702e4ff34a7a9e0c9598a3807204ee /net/core/skbuff.c
parent17ba15fb6264f27374bc87f4c3f8519b80289d85 (diff)
[NET]: Speed up __alloc_skb()
From: Benjamin LaHaise <bcrl@kvack.org> In __alloc_skb(), the use of skb_shinfo() which casts a u8 * to the shared info structure results in gcc being forced to do a reload of the pointer since it has no information on possible aliasing. Fix this by using a pointer to refer to skb_shared_info. By initializing skb_shared_info sequentially, the write combining buffers can reduce the number of memory transactions to a single write. Reorder the initialization in __alloc_skb() to match the structure definition. There is also an alignment issue on 64 bit systems with skb_shared_info by converting nr_frags to a short everything packs up nicely. Also, pass the slab cache pointer according to the fclone flag instead of using two almost identical function calls. This raises bw_unix performance up to a peak of 707KB/s when combined with the spinlock patch. It should help other networking protocols, too. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/core/skbuff.c')
-rw-r--r--net/core/skbuff.c27
1 files changed, 13 insertions, 14 deletions
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 83fee37de38e..070f91cfde59 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -135,17 +135,13 @@ void skb_under_panic(struct sk_buff *skb, int sz, void *here)
135struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask, 135struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
136 int fclone) 136 int fclone)
137{ 137{
138 struct skb_shared_info *shinfo;
138 struct sk_buff *skb; 139 struct sk_buff *skb;
139 u8 *data; 140 u8 *data;
140 141
141 /* Get the HEAD */ 142 /* Get the HEAD */
142 if (fclone) 143 skb = kmem_cache_alloc(fclone ? skbuff_fclone_cache : skbuff_head_cache,
143 skb = kmem_cache_alloc(skbuff_fclone_cache, 144 gfp_mask & ~__GFP_DMA);
144 gfp_mask & ~__GFP_DMA);
145 else
146 skb = kmem_cache_alloc(skbuff_head_cache,
147 gfp_mask & ~__GFP_DMA);
148
149 if (!skb) 145 if (!skb)
150 goto out; 146 goto out;
151 147
@@ -162,6 +158,16 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
162 skb->data = data; 158 skb->data = data;
163 skb->tail = data; 159 skb->tail = data;
164 skb->end = data + size; 160 skb->end = data + size;
161 /* make sure we initialize shinfo sequentially */
162 shinfo = skb_shinfo(skb);
163 atomic_set(&shinfo->dataref, 1);
164 shinfo->nr_frags = 0;
165 shinfo->tso_size = 0;
166 shinfo->tso_segs = 0;
167 shinfo->ufo_size = 0;
168 shinfo->ip6_frag_id = 0;
169 shinfo->frag_list = NULL;
170
165 if (fclone) { 171 if (fclone) {
166 struct sk_buff *child = skb + 1; 172 struct sk_buff *child = skb + 1;
167 atomic_t *fclone_ref = (atomic_t *) (child + 1); 173 atomic_t *fclone_ref = (atomic_t *) (child + 1);
@@ -171,13 +177,6 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
171 177
172 child->fclone = SKB_FCLONE_UNAVAILABLE; 178 child->fclone = SKB_FCLONE_UNAVAILABLE;
173 } 179 }
174 atomic_set(&(skb_shinfo(skb)->dataref), 1);
175 skb_shinfo(skb)->nr_frags = 0;
176 skb_shinfo(skb)->tso_size = 0;
177 skb_shinfo(skb)->tso_segs = 0;
178 skb_shinfo(skb)->frag_list = NULL;
179 skb_shinfo(skb)->ufo_size = 0;
180 skb_shinfo(skb)->ip6_frag_id = 0;
181out: 180out:
182 return skb; 181 return skb;
183nodata: 182nodata: