aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorHerbert Xu <herbert@gondor.apana.org.au>2007-04-17 15:28:27 -0400
committerDavid S. Miller <davem@sunset.davemloft.net>2007-04-17 16:13:16 -0400
commitb4dfa0b1fb39c7ffe74741d60668825de6a47b69 (patch)
tree1866a6b51f666b4969f6c0be55c25f31e4d42a51 /net
parent6f29e35e2d4cdbc3e8785982314e54ec5df4ad37 (diff)
[NET]: Get rid of alloc_skb_from_cache
Since this was added originally for Xen, and Xen has recently (~2.6.18) stopped using this function, we can safely get rid of it. Good timing too since this function has started to bit rot. Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r--net/core/skbuff.c55
1 files changed, 0 insertions, 55 deletions
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 87573ae35b02..336958fbbcb2 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -197,61 +197,6 @@ nodata:
197} 197}
198 198
199/** 199/**
200 * alloc_skb_from_cache - allocate a network buffer
201 * @cp: kmem_cache from which to allocate the data area
202 * (object size must be big enough for @size bytes + skb overheads)
203 * @size: size to allocate
204 * @gfp_mask: allocation mask
205 *
206 * Allocate a new &sk_buff. The returned buffer has no headroom and
207 * tail room of size bytes. The object has a reference count of one.
208 * The return is the buffer. On a failure the return is %NULL.
209 *
210 * Buffers may only be allocated from interrupts using a @gfp_mask of
211 * %GFP_ATOMIC.
212 */
213struct sk_buff *alloc_skb_from_cache(struct kmem_cache *cp,
214 unsigned int size,
215 gfp_t gfp_mask)
216{
217 struct sk_buff *skb;
218 u8 *data;
219
220 /* Get the HEAD */
221 skb = kmem_cache_alloc(skbuff_head_cache,
222 gfp_mask & ~__GFP_DMA);
223 if (!skb)
224 goto out;
225
226 /* Get the DATA. */
227 size = SKB_DATA_ALIGN(size);
228 data = kmem_cache_alloc(cp, gfp_mask);
229 if (!data)
230 goto nodata;
231
232 memset(skb, 0, offsetof(struct sk_buff, truesize));
233 skb->truesize = size + sizeof(struct sk_buff);
234 atomic_set(&skb->users, 1);
235 skb->head = data;
236 skb->data = data;
237 skb->tail = data;
238 skb->end = data + size;
239
240 atomic_set(&(skb_shinfo(skb)->dataref), 1);
241 skb_shinfo(skb)->nr_frags = 0;
242 skb_shinfo(skb)->gso_size = 0;
243 skb_shinfo(skb)->gso_segs = 0;
244 skb_shinfo(skb)->gso_type = 0;
245 skb_shinfo(skb)->frag_list = NULL;
246out:
247 return skb;
248nodata:
249 kmem_cache_free(skbuff_head_cache, skb);
250 skb = NULL;
251 goto out;
252}
253
254/**
255 * __netdev_alloc_skb - allocate an skbuff for rx on a specific device 200 * __netdev_alloc_skb - allocate an skbuff for rx on a specific device
256 * @dev: network device to receive on 201 * @dev: network device to receive on
257 * @length: length to allocate 202 * @length: length to allocate