diff options
author | David S. Miller <davem@davemloft.net> | 2005-08-09 22:25:21 -0400 |
---|---|---|
committer | David S. Miller <davem@sunset.davemloft.net> | 2005-08-29 18:31:14 -0400 |
commit | 8728b834b226ffcf2c94a58530090e292af2a7bf (patch) | |
tree | 2fd51ff3b7097eb3ffc41ea3a1d8b3ba04715b4c /net/core/skbuff.c | |
parent | 6869c4d8e066e21623c812c448a05f1ed931c9c6 (diff) |
[NET]: Kill skb->list
Remove the "list" member of struct sk_buff, as it is entirely
redundant. All SKB list removal callers know which list the
SKB is on, so storing this in sk_buff does nothing other than
taking up some space.
Two tricky bits were SCTP, which I took care of, and two ATM
drivers which Francois Romieu <romieu@fr.zoreil.com> fixed
up.
Signed-off-by: David S. Miller <davem@davemloft.net>
Signed-off-by: Francois Romieu <romieu@fr.zoreil.com>
Diffstat (limited to 'net/core/skbuff.c')
-rw-r--r-- | net/core/skbuff.c | 57 |
1 files changed, 24 insertions, 33 deletions
diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 096991cb09d9..e6564b0a6839 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c | |||
@@ -281,8 +281,6 @@ void kfree_skbmem(struct sk_buff *skb) | |||
281 | 281 | ||
282 | void __kfree_skb(struct sk_buff *skb) | 282 | void __kfree_skb(struct sk_buff *skb) |
283 | { | 283 | { |
284 | BUG_ON(skb->list != NULL); | ||
285 | |||
286 | dst_release(skb->dst); | 284 | dst_release(skb->dst); |
287 | #ifdef CONFIG_XFRM | 285 | #ifdef CONFIG_XFRM |
288 | secpath_put(skb->sp); | 286 | secpath_put(skb->sp); |
@@ -333,7 +331,6 @@ struct sk_buff *skb_clone(struct sk_buff *skb, unsigned int __nocast gfp_mask) | |||
333 | #define C(x) n->x = skb->x | 331 | #define C(x) n->x = skb->x |
334 | 332 | ||
335 | n->next = n->prev = NULL; | 333 | n->next = n->prev = NULL; |
336 | n->list = NULL; | ||
337 | n->sk = NULL; | 334 | n->sk = NULL; |
338 | C(stamp); | 335 | C(stamp); |
339 | C(dev); | 336 | C(dev); |
@@ -403,7 +400,6 @@ static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old) | |||
403 | */ | 400 | */ |
404 | unsigned long offset = new->data - old->data; | 401 | unsigned long offset = new->data - old->data; |
405 | 402 | ||
406 | new->list = NULL; | ||
407 | new->sk = NULL; | 403 | new->sk = NULL; |
408 | new->dev = old->dev; | 404 | new->dev = old->dev; |
409 | new->real_dev = old->real_dev; | 405 | new->real_dev = old->real_dev; |
@@ -1342,50 +1338,43 @@ void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk) | |||
1342 | __skb_queue_tail(list, newsk); | 1338 | __skb_queue_tail(list, newsk); |
1343 | spin_unlock_irqrestore(&list->lock, flags); | 1339 | spin_unlock_irqrestore(&list->lock, flags); |
1344 | } | 1340 | } |
1341 | |||
1345 | /** | 1342 | /** |
1346 | * skb_unlink - remove a buffer from a list | 1343 | * skb_unlink - remove a buffer from a list |
1347 | * @skb: buffer to remove | 1344 | * @skb: buffer to remove |
1345 | * @list: list to use | ||
1348 | * | 1346 | * |
1349 | * Place a packet after a given packet in a list. The list locks are taken | 1347 | * Remove a packet from a list. The list locks are taken and this |
1350 | * and this function is atomic with respect to other list locked calls | 1348 | * function is atomic with respect to other list locked calls |
1351 | * | 1349 | * |
1352 | * Works even without knowing the list it is sitting on, which can be | 1350 | * You must know what list the SKB is on. |
1353 | * handy at times. It also means that THE LIST MUST EXIST when you | ||
1354 | * unlink. Thus a list must have its contents unlinked before it is | ||
1355 | * destroyed. | ||
1356 | */ | 1351 | */ |
1357 | void skb_unlink(struct sk_buff *skb) | 1352 | void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list) |
1358 | { | 1353 | { |
1359 | struct sk_buff_head *list = skb->list; | 1354 | unsigned long flags; |
1360 | |||
1361 | if (list) { | ||
1362 | unsigned long flags; | ||
1363 | 1355 | ||
1364 | spin_lock_irqsave(&list->lock, flags); | 1356 | spin_lock_irqsave(&list->lock, flags); |
1365 | if (skb->list == list) | 1357 | __skb_unlink(skb, list); |
1366 | __skb_unlink(skb, skb->list); | 1358 | spin_unlock_irqrestore(&list->lock, flags); |
1367 | spin_unlock_irqrestore(&list->lock, flags); | ||
1368 | } | ||
1369 | } | 1359 | } |
1370 | 1360 | ||
1371 | |||
1372 | /** | 1361 | /** |
1373 | * skb_append - append a buffer | 1362 | * skb_append - append a buffer |
1374 | * @old: buffer to insert after | 1363 | * @old: buffer to insert after |
1375 | * @newsk: buffer to insert | 1364 | * @newsk: buffer to insert |
1365 | * @list: list to use | ||
1376 | * | 1366 | * |
1377 | * Place a packet after a given packet in a list. The list locks are taken | 1367 | * Place a packet after a given packet in a list. The list locks are taken |
1378 | * and this function is atomic with respect to other list locked calls. | 1368 | * and this function is atomic with respect to other list locked calls. |
1379 | * A buffer cannot be placed on two lists at the same time. | 1369 | * A buffer cannot be placed on two lists at the same time. |
1380 | */ | 1370 | */ |
1381 | 1371 | void skb_append(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list) | |
1382 | void skb_append(struct sk_buff *old, struct sk_buff *newsk) | ||
1383 | { | 1372 | { |
1384 | unsigned long flags; | 1373 | unsigned long flags; |
1385 | 1374 | ||
1386 | spin_lock_irqsave(&old->list->lock, flags); | 1375 | spin_lock_irqsave(&list->lock, flags); |
1387 | __skb_append(old, newsk); | 1376 | __skb_append(old, newsk, list); |
1388 | spin_unlock_irqrestore(&old->list->lock, flags); | 1377 | spin_unlock_irqrestore(&list->lock, flags); |
1389 | } | 1378 | } |
1390 | 1379 | ||
1391 | 1380 | ||
@@ -1393,19 +1382,21 @@ void skb_append(struct sk_buff *old, struct sk_buff *newsk) | |||
1393 | * skb_insert - insert a buffer | 1382 | * skb_insert - insert a buffer |
1394 | * @old: buffer to insert before | 1383 | * @old: buffer to insert before |
1395 | * @newsk: buffer to insert | 1384 | * @newsk: buffer to insert |
1385 | * @list: list to use | ||
1386 | * | ||
1387 | * Place a packet before a given packet in a list. The list locks are | ||
1388 | * taken and this function is atomic with respect to other list locked | ||
1389 | * calls. | ||
1396 | * | 1390 | * |
1397 | * Place a packet before a given packet in a list. The list locks are taken | ||
1398 | * and this function is atomic with respect to other list locked calls | ||
1399 | * A buffer cannot be placed on two lists at the same time. | 1391 | * A buffer cannot be placed on two lists at the same time. |
1400 | */ | 1392 | */ |
1401 | 1393 | void skb_insert(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list) | |
1402 | void skb_insert(struct sk_buff *old, struct sk_buff *newsk) | ||
1403 | { | 1394 | { |
1404 | unsigned long flags; | 1395 | unsigned long flags; |
1405 | 1396 | ||
1406 | spin_lock_irqsave(&old->list->lock, flags); | 1397 | spin_lock_irqsave(&list->lock, flags); |
1407 | __skb_insert(newsk, old->prev, old, old->list); | 1398 | __skb_insert(newsk, old->prev, old, list); |
1408 | spin_unlock_irqrestore(&old->list->lock, flags); | 1399 | spin_unlock_irqrestore(&list->lock, flags); |
1409 | } | 1400 | } |
1410 | 1401 | ||
1411 | #if 0 | 1402 | #if 0 |