aboutsummaryrefslogtreecommitdiffstats
path: root/net/core/skbuff.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/core/skbuff.c')
-rw-r--r--net/core/skbuff.c57
1 files changed, 24 insertions, 33 deletions
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 096991cb09d9..e6564b0a6839 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -281,8 +281,6 @@ void kfree_skbmem(struct sk_buff *skb)
281 281
282void __kfree_skb(struct sk_buff *skb) 282void __kfree_skb(struct sk_buff *skb)
283{ 283{
284 BUG_ON(skb->list != NULL);
285
286 dst_release(skb->dst); 284 dst_release(skb->dst);
287#ifdef CONFIG_XFRM 285#ifdef CONFIG_XFRM
288 secpath_put(skb->sp); 286 secpath_put(skb->sp);
@@ -333,7 +331,6 @@ struct sk_buff *skb_clone(struct sk_buff *skb, unsigned int __nocast gfp_mask)
333#define C(x) n->x = skb->x 331#define C(x) n->x = skb->x
334 332
335 n->next = n->prev = NULL; 333 n->next = n->prev = NULL;
336 n->list = NULL;
337 n->sk = NULL; 334 n->sk = NULL;
338 C(stamp); 335 C(stamp);
339 C(dev); 336 C(dev);
@@ -403,7 +400,6 @@ static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
403 */ 400 */
404 unsigned long offset = new->data - old->data; 401 unsigned long offset = new->data - old->data;
405 402
406 new->list = NULL;
407 new->sk = NULL; 403 new->sk = NULL;
408 new->dev = old->dev; 404 new->dev = old->dev;
409 new->real_dev = old->real_dev; 405 new->real_dev = old->real_dev;
@@ -1342,50 +1338,43 @@ void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk)
1342 __skb_queue_tail(list, newsk); 1338 __skb_queue_tail(list, newsk);
1343 spin_unlock_irqrestore(&list->lock, flags); 1339 spin_unlock_irqrestore(&list->lock, flags);
1344} 1340}
1341
1345/** 1342/**
1346 * skb_unlink - remove a buffer from a list 1343 * skb_unlink - remove a buffer from a list
1347 * @skb: buffer to remove 1344 * @skb: buffer to remove
1345 * @list: list to use
1348 * 1346 *
1349 * Place a packet after a given packet in a list. The list locks are taken 1347 * Remove a packet from a list. The list locks are taken and this
1350 * and this function is atomic with respect to other list locked calls 1348 * function is atomic with respect to other list locked calls
1351 * 1349 *
1352 * Works even without knowing the list it is sitting on, which can be 1350 * You must know what list the SKB is on.
1353 * handy at times. It also means that THE LIST MUST EXIST when you
1354 * unlink. Thus a list must have its contents unlinked before it is
1355 * destroyed.
1356 */ 1351 */
1357void skb_unlink(struct sk_buff *skb) 1352void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
1358{ 1353{
1359 struct sk_buff_head *list = skb->list; 1354 unsigned long flags;
1360
1361 if (list) {
1362 unsigned long flags;
1363 1355
1364 spin_lock_irqsave(&list->lock, flags); 1356 spin_lock_irqsave(&list->lock, flags);
1365 if (skb->list == list) 1357 __skb_unlink(skb, list);
1366 __skb_unlink(skb, skb->list); 1358 spin_unlock_irqrestore(&list->lock, flags);
1367 spin_unlock_irqrestore(&list->lock, flags);
1368 }
1369} 1359}
1370 1360
1371
1372/** 1361/**
1373 * skb_append - append a buffer 1362 * skb_append - append a buffer
1374 * @old: buffer to insert after 1363 * @old: buffer to insert after
1375 * @newsk: buffer to insert 1364 * @newsk: buffer to insert
1365 * @list: list to use
1376 * 1366 *
1377 * Place a packet after a given packet in a list. The list locks are taken 1367 * Place a packet after a given packet in a list. The list locks are taken
1378 * and this function is atomic with respect to other list locked calls. 1368 * and this function is atomic with respect to other list locked calls.
1379 * A buffer cannot be placed on two lists at the same time. 1369 * A buffer cannot be placed on two lists at the same time.
1380 */ 1370 */
1381 1371void skb_append(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list)
1382void skb_append(struct sk_buff *old, struct sk_buff *newsk)
1383{ 1372{
1384 unsigned long flags; 1373 unsigned long flags;
1385 1374
1386 spin_lock_irqsave(&old->list->lock, flags); 1375 spin_lock_irqsave(&list->lock, flags);
1387 __skb_append(old, newsk); 1376 __skb_append(old, newsk, list);
1388 spin_unlock_irqrestore(&old->list->lock, flags); 1377 spin_unlock_irqrestore(&list->lock, flags);
1389} 1378}
1390 1379
1391 1380
@@ -1393,19 +1382,21 @@ void skb_append(struct sk_buff *old, struct sk_buff *newsk)
1393 * skb_insert - insert a buffer 1382 * skb_insert - insert a buffer
1394 * @old: buffer to insert before 1383 * @old: buffer to insert before
1395 * @newsk: buffer to insert 1384 * @newsk: buffer to insert
1385 * @list: list to use
1386 *
1387 * Place a packet before a given packet in a list. The list locks are
1388 * taken and this function is atomic with respect to other list locked
1389 * calls.
1396 * 1390 *
1397 * Place a packet before a given packet in a list. The list locks are taken
1398 * and this function is atomic with respect to other list locked calls
1399 * A buffer cannot be placed on two lists at the same time. 1391 * A buffer cannot be placed on two lists at the same time.
1400 */ 1392 */
1401 1393void skb_insert(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list)
1402void skb_insert(struct sk_buff *old, struct sk_buff *newsk)
1403{ 1394{
1404 unsigned long flags; 1395 unsigned long flags;
1405 1396
1406 spin_lock_irqsave(&old->list->lock, flags); 1397 spin_lock_irqsave(&list->lock, flags);
1407 __skb_insert(newsk, old->prev, old, old->list); 1398 __skb_insert(newsk, old->prev, old, list);
1408 spin_unlock_irqrestore(&old->list->lock, flags); 1399 spin_unlock_irqrestore(&list->lock, flags);
1409} 1400}
1410 1401
1411#if 0 1402#if 0